1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.00"
33 #define DRV_MODULE_RELDATE "Apr 7, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 static char version[] __devinitdata =
79 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
86 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
89 MODULE_VERSION(DRV_MODULE_VERSION);
91 static struct pci_device_id b44_pci_tbl[] = {
92 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
93 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
94 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
95 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
96 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
97 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
98 { } /* terminate list with empty entry */
101 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
103 static void b44_halt(struct b44 *);
104 static void b44_init_rings(struct b44 *);
105 static void b44_init_hw(struct b44 *);
107 static int dma_desc_align_mask;
108 static int dma_desc_sync_size;
110 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
111 #define _B44(x...) # x,
116 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
118 unsigned long offset,
119 enum dma_data_direction dir)
121 dma_sync_single_range_for_device(&pdev->dev, dma_base,
122 offset & dma_desc_align_mask,
123 dma_desc_sync_size, dir);
126 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
128 unsigned long offset,
129 enum dma_data_direction dir)
131 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
132 offset & dma_desc_align_mask,
133 dma_desc_sync_size, dir);
136 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
138 return readl(bp->regs + reg);
141 static inline void bw32(const struct b44 *bp,
142 unsigned long reg, unsigned long val)
144 writel(val, bp->regs + reg);
147 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
148 u32 bit, unsigned long timeout, const int clear)
152 for (i = 0; i < timeout; i++) {
153 u32 val = br32(bp, reg);
155 if (clear && !(val & bit))
157 if (!clear && (val & bit))
162 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
166 (clear ? "clear" : "set"));
172 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
173 * buzz words used on this company's website :-)
175 * All of these routines must be invoked with bp->lock held and
176 * interrupts disabled.
179 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
180 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
182 static u32 ssb_get_core_rev(struct b44 *bp)
184 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
187 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
189 u32 bar_orig, pci_rev, val;
191 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
192 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
193 pci_rev = ssb_get_core_rev(bp);
195 val = br32(bp, B44_SBINTVEC);
197 bw32(bp, B44_SBINTVEC, val);
199 val = br32(bp, SSB_PCI_TRANS_2);
200 val |= SSB_PCI_PREF | SSB_PCI_BURST;
201 bw32(bp, SSB_PCI_TRANS_2, val);
203 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
208 static void ssb_core_disable(struct b44 *bp)
210 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
213 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
214 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
215 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
216 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
217 SBTMSLOW_REJECT | SBTMSLOW_RESET));
218 br32(bp, B44_SBTMSLOW);
220 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
221 br32(bp, B44_SBTMSLOW);
225 static void ssb_core_reset(struct b44 *bp)
229 ssb_core_disable(bp);
230 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
231 br32(bp, B44_SBTMSLOW);
234 /* Clear SERR if set, this is a hw bug workaround. */
235 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
236 bw32(bp, B44_SBTMSHIGH, 0);
238 val = br32(bp, B44_SBIMSTATE);
239 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
240 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
242 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
243 br32(bp, B44_SBTMSLOW);
246 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
247 br32(bp, B44_SBTMSLOW);
251 static int ssb_core_unit(struct b44 *bp)
254 u32 val = br32(bp, B44_SBADMATCH0);
257 type = val & SBADMATCH0_TYPE_MASK;
260 base = val & SBADMATCH0_BS0_MASK;
264 base = val & SBADMATCH0_BS1_MASK;
269 base = val & SBADMATCH0_BS2_MASK;
276 static int ssb_is_core_up(struct b44 *bp)
278 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
282 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
286 val = ((u32) data[2]) << 24;
287 val |= ((u32) data[3]) << 16;
288 val |= ((u32) data[4]) << 8;
289 val |= ((u32) data[5]) << 0;
290 bw32(bp, B44_CAM_DATA_LO, val);
291 val = (CAM_DATA_HI_VALID |
292 (((u32) data[0]) << 8) |
293 (((u32) data[1]) << 0));
294 bw32(bp, B44_CAM_DATA_HI, val);
295 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
296 (index << CAM_CTRL_INDEX_SHIFT)));
297 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
300 static inline void __b44_disable_ints(struct b44 *bp)
302 bw32(bp, B44_IMASK, 0);
305 static void b44_disable_ints(struct b44 *bp)
307 __b44_disable_ints(bp);
309 /* Flush posted writes. */
313 static void b44_enable_ints(struct b44 *bp)
315 bw32(bp, B44_IMASK, bp->imask);
318 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
322 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
323 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
324 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
325 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
326 (reg << MDIO_DATA_RA_SHIFT) |
327 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
328 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
329 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
334 static int b44_writephy(struct b44 *bp, int reg, u32 val)
336 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
337 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
338 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
339 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
340 (reg << MDIO_DATA_RA_SHIFT) |
341 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
342 (val & MDIO_DATA_DATA)));
343 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
346 /* miilib interface */
347 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
348 * due to code existing before miilib use was added to this driver.
349 * Someone should remove this artificial driver limitation in
350 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
352 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
355 struct b44 *bp = netdev_priv(dev);
356 int rc = b44_readphy(bp, location, &val);
362 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
365 struct b44 *bp = netdev_priv(dev);
366 b44_writephy(bp, location, val);
369 static int b44_phy_reset(struct b44 *bp)
374 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
378 err = b44_readphy(bp, MII_BMCR, &val);
380 if (val & BMCR_RESET) {
381 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
390 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
394 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
395 bp->flags |= pause_flags;
397 val = br32(bp, B44_RXCONFIG);
398 if (pause_flags & B44_FLAG_RX_PAUSE)
399 val |= RXCONFIG_FLOW;
401 val &= ~RXCONFIG_FLOW;
402 bw32(bp, B44_RXCONFIG, val);
404 val = br32(bp, B44_MAC_FLOW);
405 if (pause_flags & B44_FLAG_TX_PAUSE)
406 val |= (MAC_FLOW_PAUSE_ENAB |
407 (0xc0 & MAC_FLOW_RX_HI_WATER));
409 val &= ~MAC_FLOW_PAUSE_ENAB;
410 bw32(bp, B44_MAC_FLOW, val);
413 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
417 /* The driver supports only rx pause by default because
418 the b44 mac tx pause mechanism generates excessive
420 Use ethtool to turn on b44 tx pause if necessary.
422 if ((local & ADVERTISE_PAUSE_CAP) &&
423 (local & ADVERTISE_PAUSE_ASYM)){
424 if ((remote & LPA_PAUSE_ASYM) &&
425 !(remote & LPA_PAUSE_CAP))
426 pause_enab |= B44_FLAG_RX_PAUSE;
429 __b44_set_flow_ctrl(bp, pause_enab);
432 static int b44_setup_phy(struct b44 *bp)
437 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
439 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
440 val & MII_ALEDCTRL_ALLMSK)) != 0)
442 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
444 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
445 val | MII_TLEDCTRL_ENABLE)) != 0)
448 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
449 u32 adv = ADVERTISE_CSMA;
451 if (bp->flags & B44_FLAG_ADV_10HALF)
452 adv |= ADVERTISE_10HALF;
453 if (bp->flags & B44_FLAG_ADV_10FULL)
454 adv |= ADVERTISE_10FULL;
455 if (bp->flags & B44_FLAG_ADV_100HALF)
456 adv |= ADVERTISE_100HALF;
457 if (bp->flags & B44_FLAG_ADV_100FULL)
458 adv |= ADVERTISE_100FULL;
460 if (bp->flags & B44_FLAG_PAUSE_AUTO)
461 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
463 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
465 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
466 BMCR_ANRESTART))) != 0)
471 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
473 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
474 if (bp->flags & B44_FLAG_100_BASE_T)
475 bmcr |= BMCR_SPEED100;
476 if (bp->flags & B44_FLAG_FULL_DUPLEX)
477 bmcr |= BMCR_FULLDPLX;
478 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
481 /* Since we will not be negotiating there is no safe way
482 * to determine if the link partner supports flow control
483 * or not. So just disable it completely in this case.
485 b44_set_flow_ctrl(bp, 0, 0);
492 static void b44_stats_update(struct b44 *bp)
497 val = &bp->hw_stats.tx_good_octets;
498 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
499 *val++ += br32(bp, reg);
505 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
506 *val++ += br32(bp, reg);
510 static void b44_link_report(struct b44 *bp)
512 if (!netif_carrier_ok(bp->dev)) {
513 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
515 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
517 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
518 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
520 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
523 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
524 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
528 static void b44_check_phy(struct b44 *bp)
532 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
533 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
535 if (aux & MII_AUXCTRL_SPEED)
536 bp->flags |= B44_FLAG_100_BASE_T;
538 bp->flags &= ~B44_FLAG_100_BASE_T;
539 if (aux & MII_AUXCTRL_DUPLEX)
540 bp->flags |= B44_FLAG_FULL_DUPLEX;
542 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
544 if (!netif_carrier_ok(bp->dev) &&
545 (bmsr & BMSR_LSTATUS)) {
546 u32 val = br32(bp, B44_TX_CTRL);
547 u32 local_adv, remote_adv;
549 if (bp->flags & B44_FLAG_FULL_DUPLEX)
550 val |= TX_CTRL_DUPLEX;
552 val &= ~TX_CTRL_DUPLEX;
553 bw32(bp, B44_TX_CTRL, val);
555 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
556 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
557 !b44_readphy(bp, MII_LPA, &remote_adv))
558 b44_set_flow_ctrl(bp, local_adv, remote_adv);
561 netif_carrier_on(bp->dev);
563 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
565 netif_carrier_off(bp->dev);
569 if (bmsr & BMSR_RFAULT)
570 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
573 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
578 static void b44_timer(unsigned long __opaque)
580 struct b44 *bp = (struct b44 *) __opaque;
582 spin_lock_irq(&bp->lock);
586 b44_stats_update(bp);
588 spin_unlock_irq(&bp->lock);
590 bp->timer.expires = jiffies + HZ;
591 add_timer(&bp->timer);
594 static void b44_tx(struct b44 *bp)
598 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
599 cur /= sizeof(struct dma_desc);
601 /* XXX needs updating when NETIF_F_SG is supported */
602 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
603 struct ring_info *rp = &bp->tx_buffers[cons];
604 struct sk_buff *skb = rp->skb;
608 pci_unmap_single(bp->pdev,
609 pci_unmap_addr(rp, mapping),
613 dev_kfree_skb_irq(skb);
617 if (netif_queue_stopped(bp->dev) &&
618 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
619 netif_wake_queue(bp->dev);
621 bw32(bp, B44_GPTIMER, 0);
624 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
625 * before the DMA address you give it. So we allocate 30 more bytes
626 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
627 * point the chip at 30 bytes past where the rx_header will go.
629 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
632 struct ring_info *src_map, *map;
633 struct rx_header *rh;
641 src_map = &bp->rx_buffers[src_idx];
642 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
643 map = &bp->rx_buffers[dest_idx];
644 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
648 mapping = pci_map_single(bp->pdev, skb->data,
652 /* Hardware bug work-around, the chip is unable to do PCI DMA
653 to/from anything above 1GB :-( */
654 if (dma_mapping_error(mapping) ||
655 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
657 if (!dma_mapping_error(mapping))
658 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
659 dev_kfree_skb_any(skb);
660 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
663 mapping = pci_map_single(bp->pdev, skb->data,
666 if (dma_mapping_error(mapping) ||
667 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
668 if (!dma_mapping_error(mapping))
669 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
670 dev_kfree_skb_any(skb);
676 skb_reserve(skb, bp->rx_offset);
678 rh = (struct rx_header *)
679 (skb->data - bp->rx_offset);
684 pci_unmap_addr_set(map, mapping, mapping);
689 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
690 if (dest_idx == (B44_RX_RING_SIZE - 1))
691 ctrl |= DESC_CTRL_EOT;
693 dp = &bp->rx_ring[dest_idx];
694 dp->ctrl = cpu_to_le32(ctrl);
695 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
697 if (bp->flags & B44_FLAG_RX_RING_HACK)
698 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
699 dest_idx * sizeof(dp),
702 return RX_PKT_BUF_SZ;
705 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
707 struct dma_desc *src_desc, *dest_desc;
708 struct ring_info *src_map, *dest_map;
709 struct rx_header *rh;
713 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
714 dest_desc = &bp->rx_ring[dest_idx];
715 dest_map = &bp->rx_buffers[dest_idx];
716 src_desc = &bp->rx_ring[src_idx];
717 src_map = &bp->rx_buffers[src_idx];
719 dest_map->skb = src_map->skb;
720 rh = (struct rx_header *) src_map->skb->data;
723 pci_unmap_addr_set(dest_map, mapping,
724 pci_unmap_addr(src_map, mapping));
726 if (bp->flags & B44_FLAG_RX_RING_HACK)
727 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
728 src_idx * sizeof(src_desc),
731 ctrl = src_desc->ctrl;
732 if (dest_idx == (B44_RX_RING_SIZE - 1))
733 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
735 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
737 dest_desc->ctrl = ctrl;
738 dest_desc->addr = src_desc->addr;
742 if (bp->flags & B44_FLAG_RX_RING_HACK)
743 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
744 dest_idx * sizeof(dest_desc),
747 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
752 static int b44_rx(struct b44 *bp, int budget)
758 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
759 prod /= sizeof(struct dma_desc);
762 while (cons != prod && budget > 0) {
763 struct ring_info *rp = &bp->rx_buffers[cons];
764 struct sk_buff *skb = rp->skb;
765 dma_addr_t map = pci_unmap_addr(rp, mapping);
766 struct rx_header *rh;
769 pci_dma_sync_single_for_cpu(bp->pdev, map,
772 rh = (struct rx_header *) skb->data;
773 len = cpu_to_le16(rh->len);
774 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
775 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
777 b44_recycle_rx(bp, cons, bp->rx_prod);
779 bp->stats.rx_dropped++;
789 len = cpu_to_le16(rh->len);
790 } while (len == 0 && i++ < 5);
798 if (len > RX_COPY_THRESHOLD) {
800 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
803 pci_unmap_single(bp->pdev, map,
804 skb_size, PCI_DMA_FROMDEVICE);
805 /* Leave out rx_header */
806 skb_put(skb, len+bp->rx_offset);
807 skb_pull(skb,bp->rx_offset);
809 struct sk_buff *copy_skb;
811 b44_recycle_rx(bp, cons, bp->rx_prod);
812 copy_skb = dev_alloc_skb(len + 2);
813 if (copy_skb == NULL)
814 goto drop_it_no_recycle;
816 copy_skb->dev = bp->dev;
817 skb_reserve(copy_skb, 2);
818 skb_put(copy_skb, len);
819 /* DMA sync done above, copy just the actual packet */
820 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
824 skb->ip_summed = CHECKSUM_NONE;
825 skb->protocol = eth_type_trans(skb, bp->dev);
826 netif_receive_skb(skb);
827 bp->dev->last_rx = jiffies;
831 bp->rx_prod = (bp->rx_prod + 1) &
832 (B44_RX_RING_SIZE - 1);
833 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
837 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
842 static int b44_poll(struct net_device *netdev, int *budget)
844 struct b44 *bp = netdev_priv(netdev);
847 spin_lock_irq(&bp->lock);
849 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
850 /* spin_lock(&bp->tx_lock); */
852 /* spin_unlock(&bp->tx_lock); */
854 spin_unlock_irq(&bp->lock);
857 if (bp->istat & ISTAT_RX) {
858 int orig_budget = *budget;
861 if (orig_budget > netdev->quota)
862 orig_budget = netdev->quota;
864 work_done = b44_rx(bp, orig_budget);
866 *budget -= work_done;
867 netdev->quota -= work_done;
869 if (work_done >= orig_budget)
873 if (bp->istat & ISTAT_ERRORS) {
874 spin_lock_irq(&bp->lock);
878 netif_wake_queue(bp->dev);
879 spin_unlock_irq(&bp->lock);
884 netif_rx_complete(netdev);
888 return (done ? 0 : 1);
891 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
893 struct net_device *dev = dev_id;
894 struct b44 *bp = netdev_priv(dev);
898 spin_lock(&bp->lock);
900 istat = br32(bp, B44_ISTAT);
901 imask = br32(bp, B44_IMASK);
903 /* ??? What the fuck is the purpose of the interrupt mask
904 * ??? register if we have to mask it out by hand anyways?
910 if (unlikely(!netif_running(dev))) {
911 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
915 if (netif_rx_schedule_prep(dev)) {
916 /* NOTE: These writes are posted by the readback of
917 * the ISTAT register below.
920 __b44_disable_ints(bp);
921 __netif_rx_schedule(dev);
923 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
928 bw32(bp, B44_ISTAT, istat);
931 spin_unlock(&bp->lock);
932 return IRQ_RETVAL(handled);
935 static void b44_tx_timeout(struct net_device *dev)
937 struct b44 *bp = netdev_priv(dev);
939 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
942 spin_lock_irq(&bp->lock);
948 spin_unlock_irq(&bp->lock);
952 netif_wake_queue(dev);
955 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
957 struct b44 *bp = netdev_priv(dev);
958 struct sk_buff *bounce_skb;
959 int rc = NETDEV_TX_OK;
961 u32 len, entry, ctrl;
964 spin_lock_irq(&bp->lock);
966 /* This is a hard error, log it. */
967 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
968 netif_stop_queue(dev);
969 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
974 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
975 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
976 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
977 if (!dma_mapping_error(mapping))
978 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
980 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
985 mapping = pci_map_single(bp->pdev, bounce_skb->data,
986 len, PCI_DMA_TODEVICE);
987 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
988 if (!dma_mapping_error(mapping))
989 pci_unmap_single(bp->pdev, mapping,
990 len, PCI_DMA_TODEVICE);
991 dev_kfree_skb_any(bounce_skb);
995 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
996 dev_kfree_skb_any(skb);
1000 entry = bp->tx_prod;
1001 bp->tx_buffers[entry].skb = skb;
1002 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1004 ctrl = (len & DESC_CTRL_LEN);
1005 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1006 if (entry == (B44_TX_RING_SIZE - 1))
1007 ctrl |= DESC_CTRL_EOT;
1009 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1010 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1012 if (bp->flags & B44_FLAG_TX_RING_HACK)
1013 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1014 entry * sizeof(bp->tx_ring[0]),
1017 entry = NEXT_TX(entry);
1019 bp->tx_prod = entry;
1023 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1024 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1025 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1026 if (bp->flags & B44_FLAG_REORDER_BUG)
1027 br32(bp, B44_DMATX_PTR);
1029 if (TX_BUFFS_AVAIL(bp) < 1)
1030 netif_stop_queue(dev);
1032 dev->trans_start = jiffies;
1035 spin_unlock_irq(&bp->lock);
1040 rc = NETDEV_TX_BUSY;
1044 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1046 struct b44 *bp = netdev_priv(dev);
1048 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1051 if (!netif_running(dev)) {
1052 /* We'll just catch it later when the
1059 spin_lock_irq(&bp->lock);
1064 spin_unlock_irq(&bp->lock);
1066 b44_enable_ints(bp);
1071 /* Free up pending packets in all rx/tx rings.
1073 * The chip has been shut down and the driver detached from
1074 * the networking, so no interrupts or new tx packets will
1075 * end up in the driver. bp->lock is not held and we are not
1076 * in an interrupt context and thus may sleep.
1078 static void b44_free_rings(struct b44 *bp)
1080 struct ring_info *rp;
1083 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1084 rp = &bp->rx_buffers[i];
1086 if (rp->skb == NULL)
1088 pci_unmap_single(bp->pdev,
1089 pci_unmap_addr(rp, mapping),
1091 PCI_DMA_FROMDEVICE);
1092 dev_kfree_skb_any(rp->skb);
1096 /* XXX needs changes once NETIF_F_SG is set... */
1097 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1098 rp = &bp->tx_buffers[i];
1100 if (rp->skb == NULL)
1102 pci_unmap_single(bp->pdev,
1103 pci_unmap_addr(rp, mapping),
1106 dev_kfree_skb_any(rp->skb);
1111 /* Initialize tx/rx rings for packet processing.
1113 * The chip has been shut down and the driver detached from
1114 * the networking, so no interrupts or new tx packets will
1115 * end up in the driver.
1117 static void b44_init_rings(struct b44 *bp)
1123 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1124 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1126 if (bp->flags & B44_FLAG_RX_RING_HACK)
1127 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1129 PCI_DMA_BIDIRECTIONAL);
1131 if (bp->flags & B44_FLAG_TX_RING_HACK)
1132 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1136 for (i = 0; i < bp->rx_pending; i++) {
1137 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1143 * Must not be invoked with interrupt sources disabled and
1144 * the hardware shutdown down.
1146 static void b44_free_consistent(struct b44 *bp)
1148 kfree(bp->rx_buffers);
1149 bp->rx_buffers = NULL;
1150 kfree(bp->tx_buffers);
1151 bp->tx_buffers = NULL;
1153 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1154 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1159 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1160 bp->rx_ring, bp->rx_ring_dma);
1162 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1165 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1166 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1171 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1172 bp->tx_ring, bp->tx_ring_dma);
1174 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1179 * Must not be invoked with interrupt sources disabled and
1180 * the hardware shutdown down. Can sleep.
1182 static int b44_alloc_consistent(struct b44 *bp)
1186 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1187 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1188 if (!bp->rx_buffers)
1191 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1192 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1193 if (!bp->tx_buffers)
1196 size = DMA_TABLE_BYTES;
1197 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1199 /* Allocation may have failed due to pci_alloc_consistent
1200 insisting on use of GFP_DMA, which is more restrictive
1201 than necessary... */
1202 struct dma_desc *rx_ring;
1203 dma_addr_t rx_ring_dma;
1205 rx_ring = kzalloc(size, GFP_KERNEL);
1209 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1213 if (dma_mapping_error(rx_ring_dma) ||
1214 rx_ring_dma + size > B44_DMA_MASK) {
1219 bp->rx_ring = rx_ring;
1220 bp->rx_ring_dma = rx_ring_dma;
1221 bp->flags |= B44_FLAG_RX_RING_HACK;
1224 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1226 /* Allocation may have failed due to pci_alloc_consistent
1227 insisting on use of GFP_DMA, which is more restrictive
1228 than necessary... */
1229 struct dma_desc *tx_ring;
1230 dma_addr_t tx_ring_dma;
1232 tx_ring = kzalloc(size, GFP_KERNEL);
1236 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1240 if (dma_mapping_error(tx_ring_dma) ||
1241 tx_ring_dma + size > B44_DMA_MASK) {
1246 bp->tx_ring = tx_ring;
1247 bp->tx_ring_dma = tx_ring_dma;
1248 bp->flags |= B44_FLAG_TX_RING_HACK;
1254 b44_free_consistent(bp);
1258 /* bp->lock is held. */
1259 static void b44_clear_stats(struct b44 *bp)
1263 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1264 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1266 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1270 /* bp->lock is held. */
1271 static void b44_chip_reset(struct b44 *bp)
1273 if (ssb_is_core_up(bp)) {
1274 bw32(bp, B44_RCV_LAZY, 0);
1275 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1277 bw32(bp, B44_DMATX_CTRL, 0);
1278 bp->tx_prod = bp->tx_cons = 0;
1279 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1283 bw32(bp, B44_DMARX_CTRL, 0);
1284 bp->rx_prod = bp->rx_cons = 0;
1286 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1293 b44_clear_stats(bp);
1295 /* Make PHY accessible. */
1296 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297 (0x0d & MDIO_CTRL_MAXF_MASK)));
1298 br32(bp, B44_MDIO_CTRL);
1300 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1301 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1302 br32(bp, B44_ENET_CTRL);
1303 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1305 u32 val = br32(bp, B44_DEVCTRL);
1307 if (val & DEVCTRL_EPR) {
1308 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1309 br32(bp, B44_DEVCTRL);
1312 bp->flags |= B44_FLAG_INTERNAL_PHY;
1316 /* bp->lock is held. */
1317 static void b44_halt(struct b44 *bp)
1319 b44_disable_ints(bp);
1323 /* bp->lock is held. */
1324 static void __b44_set_mac_addr(struct b44 *bp)
1326 bw32(bp, B44_CAM_CTRL, 0);
1327 if (!(bp->dev->flags & IFF_PROMISC)) {
1330 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1331 val = br32(bp, B44_CAM_CTRL);
1332 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1336 static int b44_set_mac_addr(struct net_device *dev, void *p)
1338 struct b44 *bp = netdev_priv(dev);
1339 struct sockaddr *addr = p;
1341 if (netif_running(dev))
1344 if (!is_valid_ether_addr(addr->sa_data))
1347 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1349 spin_lock_irq(&bp->lock);
1350 __b44_set_mac_addr(bp);
1351 spin_unlock_irq(&bp->lock);
1356 /* Called at device open time to get the chip ready for
1357 * packet processing. Invoked with bp->lock held.
1359 static void __b44_set_rx_mode(struct net_device *);
1360 static void b44_init_hw(struct b44 *bp)
1368 /* Enable CRC32, set proper LED modes and power on PHY */
1369 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1370 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1372 /* This sets the MAC address too. */
1373 __b44_set_rx_mode(bp->dev);
1375 /* MTU + eth header + possible VLAN tag + struct rx_header */
1376 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1377 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1379 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1380 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1381 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1382 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1383 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1384 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1386 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1387 bp->rx_prod = bp->rx_pending;
1389 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1391 val = br32(bp, B44_ENET_CTRL);
1392 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1395 static int b44_open(struct net_device *dev)
1397 struct b44 *bp = netdev_priv(dev);
1400 err = b44_alloc_consistent(bp);
1409 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1410 if (unlikely(err < 0)) {
1413 b44_free_consistent(bp);
1417 init_timer(&bp->timer);
1418 bp->timer.expires = jiffies + HZ;
1419 bp->timer.data = (unsigned long) bp;
1420 bp->timer.function = b44_timer;
1421 add_timer(&bp->timer);
1423 b44_enable_ints(bp);
1424 netif_start_queue(dev);
1430 /*static*/ void b44_dump_state(struct b44 *bp)
1432 u32 val32, val32_2, val32_3, val32_4, val32_5;
1435 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1436 printk("DEBUG: PCI status [%04x] \n", val16);
1441 #ifdef CONFIG_NET_POLL_CONTROLLER
1443 * Polling receive - used by netconsole and other diagnostic tools
1444 * to allow network i/o with interrupts disabled.
1446 static void b44_poll_controller(struct net_device *dev)
1448 disable_irq(dev->irq);
1449 b44_interrupt(dev->irq, dev, NULL);
1450 enable_irq(dev->irq);
1454 static int b44_close(struct net_device *dev)
1456 struct b44 *bp = netdev_priv(dev);
1458 netif_stop_queue(dev);
1460 netif_poll_disable(dev);
1462 del_timer_sync(&bp->timer);
1464 spin_lock_irq(&bp->lock);
1471 netif_carrier_off(dev);
1473 spin_unlock_irq(&bp->lock);
1475 free_irq(dev->irq, dev);
1477 netif_poll_enable(dev);
1479 b44_free_consistent(bp);
1484 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1486 struct b44 *bp = netdev_priv(dev);
1487 struct net_device_stats *nstat = &bp->stats;
1488 struct b44_hw_stats *hwstat = &bp->hw_stats;
1490 /* Convert HW stats into netdevice stats. */
1491 nstat->rx_packets = hwstat->rx_pkts;
1492 nstat->tx_packets = hwstat->tx_pkts;
1493 nstat->rx_bytes = hwstat->rx_octets;
1494 nstat->tx_bytes = hwstat->tx_octets;
1495 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1496 hwstat->tx_oversize_pkts +
1497 hwstat->tx_underruns +
1498 hwstat->tx_excessive_cols +
1499 hwstat->tx_late_cols);
1500 nstat->multicast = hwstat->tx_multicast_pkts;
1501 nstat->collisions = hwstat->tx_total_cols;
1503 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1504 hwstat->rx_undersize);
1505 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1506 nstat->rx_frame_errors = hwstat->rx_align_errs;
1507 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1508 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1509 hwstat->rx_oversize_pkts +
1510 hwstat->rx_missed_pkts +
1511 hwstat->rx_crc_align_errs +
1512 hwstat->rx_undersize +
1513 hwstat->rx_crc_errs +
1514 hwstat->rx_align_errs +
1515 hwstat->rx_symbol_errs);
1517 nstat->tx_aborted_errors = hwstat->tx_underruns;
1519 /* Carrier lost counter seems to be broken for some devices */
1520 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1526 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1528 struct dev_mc_list *mclist;
1531 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1532 mclist = dev->mc_list;
1533 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1534 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1539 static void __b44_set_rx_mode(struct net_device *dev)
1541 struct b44 *bp = netdev_priv(dev);
1544 val = br32(bp, B44_RXCONFIG);
1545 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1546 if (dev->flags & IFF_PROMISC) {
1547 val |= RXCONFIG_PROMISC;
1548 bw32(bp, B44_RXCONFIG, val);
1550 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1553 __b44_set_mac_addr(bp);
1555 if (dev->flags & IFF_ALLMULTI)
1556 val |= RXCONFIG_ALLMULTI;
1558 i = __b44_load_mcast(bp, dev);
1560 for (; i < 64; i++) {
1561 __b44_cam_write(bp, zero, i);
1563 bw32(bp, B44_RXCONFIG, val);
1564 val = br32(bp, B44_CAM_CTRL);
1565 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1569 static void b44_set_rx_mode(struct net_device *dev)
1571 struct b44 *bp = netdev_priv(dev);
1573 spin_lock_irq(&bp->lock);
1574 __b44_set_rx_mode(dev);
1575 spin_unlock_irq(&bp->lock);
1578 static u32 b44_get_msglevel(struct net_device *dev)
1580 struct b44 *bp = netdev_priv(dev);
1581 return bp->msg_enable;
1584 static void b44_set_msglevel(struct net_device *dev, u32 value)
1586 struct b44 *bp = netdev_priv(dev);
1587 bp->msg_enable = value;
1590 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1592 struct b44 *bp = netdev_priv(dev);
1593 struct pci_dev *pci_dev = bp->pdev;
1595 strcpy (info->driver, DRV_MODULE_NAME);
1596 strcpy (info->version, DRV_MODULE_VERSION);
1597 strcpy (info->bus_info, pci_name(pci_dev));
1600 static int b44_nway_reset(struct net_device *dev)
1602 struct b44 *bp = netdev_priv(dev);
1606 spin_lock_irq(&bp->lock);
1607 b44_readphy(bp, MII_BMCR, &bmcr);
1608 b44_readphy(bp, MII_BMCR, &bmcr);
1610 if (bmcr & BMCR_ANENABLE) {
1611 b44_writephy(bp, MII_BMCR,
1612 bmcr | BMCR_ANRESTART);
1615 spin_unlock_irq(&bp->lock);
1620 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1622 struct b44 *bp = netdev_priv(dev);
1624 if (!netif_running(dev))
1626 cmd->supported = (SUPPORTED_Autoneg);
1627 cmd->supported |= (SUPPORTED_100baseT_Half |
1628 SUPPORTED_100baseT_Full |
1629 SUPPORTED_10baseT_Half |
1630 SUPPORTED_10baseT_Full |
1633 cmd->advertising = 0;
1634 if (bp->flags & B44_FLAG_ADV_10HALF)
1635 cmd->advertising |= ADVERTISED_10baseT_Half;
1636 if (bp->flags & B44_FLAG_ADV_10FULL)
1637 cmd->advertising |= ADVERTISED_10baseT_Full;
1638 if (bp->flags & B44_FLAG_ADV_100HALF)
1639 cmd->advertising |= ADVERTISED_100baseT_Half;
1640 if (bp->flags & B44_FLAG_ADV_100FULL)
1641 cmd->advertising |= ADVERTISED_100baseT_Full;
1642 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1643 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1644 SPEED_100 : SPEED_10;
1645 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1646 DUPLEX_FULL : DUPLEX_HALF;
1648 cmd->phy_address = bp->phy_addr;
1649 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1650 XCVR_INTERNAL : XCVR_EXTERNAL;
1651 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1652 AUTONEG_DISABLE : AUTONEG_ENABLE;
1658 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1660 struct b44 *bp = netdev_priv(dev);
1662 if (!netif_running(dev))
1665 /* We do not support gigabit. */
1666 if (cmd->autoneg == AUTONEG_ENABLE) {
1667 if (cmd->advertising &
1668 (ADVERTISED_1000baseT_Half |
1669 ADVERTISED_1000baseT_Full))
1671 } else if ((cmd->speed != SPEED_100 &&
1672 cmd->speed != SPEED_10) ||
1673 (cmd->duplex != DUPLEX_HALF &&
1674 cmd->duplex != DUPLEX_FULL)) {
1678 spin_lock_irq(&bp->lock);
1680 if (cmd->autoneg == AUTONEG_ENABLE) {
1681 bp->flags &= ~B44_FLAG_FORCE_LINK;
1682 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1683 B44_FLAG_ADV_10FULL |
1684 B44_FLAG_ADV_100HALF |
1685 B44_FLAG_ADV_100FULL);
1686 if (cmd->advertising & ADVERTISE_10HALF)
1687 bp->flags |= B44_FLAG_ADV_10HALF;
1688 if (cmd->advertising & ADVERTISE_10FULL)
1689 bp->flags |= B44_FLAG_ADV_10FULL;
1690 if (cmd->advertising & ADVERTISE_100HALF)
1691 bp->flags |= B44_FLAG_ADV_100HALF;
1692 if (cmd->advertising & ADVERTISE_100FULL)
1693 bp->flags |= B44_FLAG_ADV_100FULL;
1695 bp->flags |= B44_FLAG_FORCE_LINK;
1696 if (cmd->speed == SPEED_100)
1697 bp->flags |= B44_FLAG_100_BASE_T;
1698 if (cmd->duplex == DUPLEX_FULL)
1699 bp->flags |= B44_FLAG_FULL_DUPLEX;
1704 spin_unlock_irq(&bp->lock);
1709 static void b44_get_ringparam(struct net_device *dev,
1710 struct ethtool_ringparam *ering)
1712 struct b44 *bp = netdev_priv(dev);
1714 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1715 ering->rx_pending = bp->rx_pending;
1717 /* XXX ethtool lacks a tx_max_pending, oops... */
1720 static int b44_set_ringparam(struct net_device *dev,
1721 struct ethtool_ringparam *ering)
1723 struct b44 *bp = netdev_priv(dev);
1725 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1726 (ering->rx_mini_pending != 0) ||
1727 (ering->rx_jumbo_pending != 0) ||
1728 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1731 spin_lock_irq(&bp->lock);
1733 bp->rx_pending = ering->rx_pending;
1734 bp->tx_pending = ering->tx_pending;
1739 netif_wake_queue(bp->dev);
1740 spin_unlock_irq(&bp->lock);
1742 b44_enable_ints(bp);
1747 static void b44_get_pauseparam(struct net_device *dev,
1748 struct ethtool_pauseparam *epause)
1750 struct b44 *bp = netdev_priv(dev);
1753 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1755 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1757 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1760 static int b44_set_pauseparam(struct net_device *dev,
1761 struct ethtool_pauseparam *epause)
1763 struct b44 *bp = netdev_priv(dev);
1765 spin_lock_irq(&bp->lock);
1766 if (epause->autoneg)
1767 bp->flags |= B44_FLAG_PAUSE_AUTO;
1769 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1770 if (epause->rx_pause)
1771 bp->flags |= B44_FLAG_RX_PAUSE;
1773 bp->flags &= ~B44_FLAG_RX_PAUSE;
1774 if (epause->tx_pause)
1775 bp->flags |= B44_FLAG_TX_PAUSE;
1777 bp->flags &= ~B44_FLAG_TX_PAUSE;
1778 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1783 __b44_set_flow_ctrl(bp, bp->flags);
1785 spin_unlock_irq(&bp->lock);
1787 b44_enable_ints(bp);
1792 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1796 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1801 static int b44_get_stats_count(struct net_device *dev)
1803 return ARRAY_SIZE(b44_gstrings);
1806 static void b44_get_ethtool_stats(struct net_device *dev,
1807 struct ethtool_stats *stats, u64 *data)
1809 struct b44 *bp = netdev_priv(dev);
1810 u32 *val = &bp->hw_stats.tx_good_octets;
1813 spin_lock_irq(&bp->lock);
1815 b44_stats_update(bp);
1817 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1820 spin_unlock_irq(&bp->lock);
1823 static struct ethtool_ops b44_ethtool_ops = {
1824 .get_drvinfo = b44_get_drvinfo,
1825 .get_settings = b44_get_settings,
1826 .set_settings = b44_set_settings,
1827 .nway_reset = b44_nway_reset,
1828 .get_link = ethtool_op_get_link,
1829 .get_ringparam = b44_get_ringparam,
1830 .set_ringparam = b44_set_ringparam,
1831 .get_pauseparam = b44_get_pauseparam,
1832 .set_pauseparam = b44_set_pauseparam,
1833 .get_msglevel = b44_get_msglevel,
1834 .set_msglevel = b44_set_msglevel,
1835 .get_strings = b44_get_strings,
1836 .get_stats_count = b44_get_stats_count,
1837 .get_ethtool_stats = b44_get_ethtool_stats,
1838 .get_perm_addr = ethtool_op_get_perm_addr,
1841 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1843 struct mii_ioctl_data *data = if_mii(ifr);
1844 struct b44 *bp = netdev_priv(dev);
1847 if (!netif_running(dev))
1850 spin_lock_irq(&bp->lock);
1851 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1852 spin_unlock_irq(&bp->lock);
1857 /* Read 128-bytes of EEPROM. */
1858 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1861 u16 *ptr = (u16 *) data;
1863 for (i = 0; i < 128; i += 2)
1864 ptr[i / 2] = readw(bp->regs + 4096 + i);
1869 static int __devinit b44_get_invariants(struct b44 *bp)
1874 err = b44_read_eeprom(bp, &eeprom[0]);
1878 bp->dev->dev_addr[0] = eeprom[79];
1879 bp->dev->dev_addr[1] = eeprom[78];
1880 bp->dev->dev_addr[2] = eeprom[81];
1881 bp->dev->dev_addr[3] = eeprom[80];
1882 bp->dev->dev_addr[4] = eeprom[83];
1883 bp->dev->dev_addr[5] = eeprom[82];
1885 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1886 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1890 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1892 bp->phy_addr = eeprom[90] & 0x1f;
1894 /* With this, plus the rx_header prepended to the data by the
1895 * hardware, we'll land the ethernet header on a 2-byte boundary.
1899 bp->imask = IMASK_DEF;
1901 bp->core_unit = ssb_core_unit(bp);
1902 bp->dma_offset = SB_PCI_DMA;
1904 /* XXX - really required?
1905 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1911 static int __devinit b44_init_one(struct pci_dev *pdev,
1912 const struct pci_device_id *ent)
1914 static int b44_version_printed = 0;
1915 unsigned long b44reg_base, b44reg_len;
1916 struct net_device *dev;
1920 if (b44_version_printed++ == 0)
1921 printk(KERN_INFO "%s", version);
1923 err = pci_enable_device(pdev);
1925 printk(KERN_ERR PFX "Cannot enable PCI device, "
1930 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1931 printk(KERN_ERR PFX "Cannot find proper PCI device "
1932 "base address, aborting.\n");
1934 goto err_out_disable_pdev;
1937 err = pci_request_regions(pdev, DRV_MODULE_NAME);
1939 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1941 goto err_out_disable_pdev;
1944 pci_set_master(pdev);
1946 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
1948 printk(KERN_ERR PFX "No usable DMA configuration, "
1950 goto err_out_free_res;
1953 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1955 printk(KERN_ERR PFX "No usable DMA configuration, "
1957 goto err_out_free_res;
1960 b44reg_base = pci_resource_start(pdev, 0);
1961 b44reg_len = pci_resource_len(pdev, 0);
1963 dev = alloc_etherdev(sizeof(*bp));
1965 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1967 goto err_out_free_res;
1970 SET_MODULE_OWNER(dev);
1971 SET_NETDEV_DEV(dev,&pdev->dev);
1973 /* No interesting netdevice features in this card... */
1976 bp = netdev_priv(dev);
1980 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1982 spin_lock_init(&bp->lock);
1984 bp->regs = ioremap(b44reg_base, b44reg_len);
1985 if (bp->regs == 0UL) {
1986 printk(KERN_ERR PFX "Cannot map device registers, "
1989 goto err_out_free_dev;
1992 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1993 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1995 dev->open = b44_open;
1996 dev->stop = b44_close;
1997 dev->hard_start_xmit = b44_start_xmit;
1998 dev->get_stats = b44_get_stats;
1999 dev->set_multicast_list = b44_set_rx_mode;
2000 dev->set_mac_address = b44_set_mac_addr;
2001 dev->do_ioctl = b44_ioctl;
2002 dev->tx_timeout = b44_tx_timeout;
2003 dev->poll = b44_poll;
2005 dev->watchdog_timeo = B44_TX_TIMEOUT;
2006 #ifdef CONFIG_NET_POLL_CONTROLLER
2007 dev->poll_controller = b44_poll_controller;
2009 dev->change_mtu = b44_change_mtu;
2010 dev->irq = pdev->irq;
2011 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2013 netif_carrier_off(dev);
2015 err = b44_get_invariants(bp);
2017 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2019 goto err_out_iounmap;
2022 bp->mii_if.dev = dev;
2023 bp->mii_if.mdio_read = b44_mii_read;
2024 bp->mii_if.mdio_write = b44_mii_write;
2025 bp->mii_if.phy_id = bp->phy_addr;
2026 bp->mii_if.phy_id_mask = 0x1f;
2027 bp->mii_if.reg_num_mask = 0x1f;
2029 /* By default, advertise all speed/duplex settings. */
2030 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2031 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2033 /* By default, auto-negotiate PAUSE. */
2034 bp->flags |= B44_FLAG_PAUSE_AUTO;
2036 err = register_netdev(dev);
2038 printk(KERN_ERR PFX "Cannot register net device, "
2040 goto err_out_iounmap;
2043 pci_set_drvdata(pdev, dev);
2045 pci_save_state(bp->pdev);
2047 /* Chip reset provides power to the b44 MAC & PCI cores, which
2048 * is necessary for MAC register access.
2052 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2053 for (i = 0; i < 6; i++)
2054 printk("%2.2x%c", dev->dev_addr[i],
2055 i == 5 ? '\n' : ':');
2066 pci_release_regions(pdev);
2068 err_out_disable_pdev:
2069 pci_disable_device(pdev);
2070 pci_set_drvdata(pdev, NULL);
2074 static void __devexit b44_remove_one(struct pci_dev *pdev)
2076 struct net_device *dev = pci_get_drvdata(pdev);
2077 struct b44 *bp = netdev_priv(dev);
2079 unregister_netdev(dev);
2082 pci_release_regions(pdev);
2083 pci_disable_device(pdev);
2084 pci_set_drvdata(pdev, NULL);
2087 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2089 struct net_device *dev = pci_get_drvdata(pdev);
2090 struct b44 *bp = netdev_priv(dev);
2092 if (!netif_running(dev))
2095 del_timer_sync(&bp->timer);
2097 spin_lock_irq(&bp->lock);
2100 netif_carrier_off(bp->dev);
2101 netif_device_detach(bp->dev);
2104 spin_unlock_irq(&bp->lock);
2106 free_irq(dev->irq, dev);
2107 pci_disable_device(pdev);
2111 static int b44_resume(struct pci_dev *pdev)
2113 struct net_device *dev = pci_get_drvdata(pdev);
2114 struct b44 *bp = netdev_priv(dev);
2116 pci_restore_state(pdev);
2117 pci_enable_device(pdev);
2118 pci_set_master(pdev);
2120 if (!netif_running(dev))
2123 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2124 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2126 spin_lock_irq(&bp->lock);
2130 netif_device_attach(bp->dev);
2131 spin_unlock_irq(&bp->lock);
2133 bp->timer.expires = jiffies + HZ;
2134 add_timer(&bp->timer);
2136 b44_enable_ints(bp);
2137 netif_wake_queue(dev);
2141 static struct pci_driver b44_driver = {
2142 .name = DRV_MODULE_NAME,
2143 .id_table = b44_pci_tbl,
2144 .probe = b44_init_one,
2145 .remove = __devexit_p(b44_remove_one),
2146 .suspend = b44_suspend,
2147 .resume = b44_resume,
2150 static int __init b44_init(void)
2152 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2154 /* Setup paramaters for syncing RX/TX DMA descriptors */
2155 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2156 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2158 return pci_module_init(&b44_driver);
2161 static void __exit b44_cleanup(void)
2163 pci_unregister_driver(&b44_driver);
2166 module_init(b44_init);
2167 module_exit(b44_cleanup);