1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
6 * Distribute under GPL.
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/mii.h>
15 #include <linux/if_ether.h>
16 #include <linux/etherdevice.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/version.h>
22 #include <asm/uaccess.h>
28 #define DRV_MODULE_NAME "b44"
29 #define PFX DRV_MODULE_NAME ": "
30 #define DRV_MODULE_VERSION "0.93"
31 #define DRV_MODULE_RELDATE "Mar, 2004"
33 #define B44_DEF_MSG_ENABLE \
43 /* length of time before we decide the hardware is borked,
44 * and dev->tx_timeout() should be called to fix the problem
46 #define B44_TX_TIMEOUT (5 * HZ)
48 /* hardware minimum and maximum for a single frame's data payload */
49 #define B44_MIN_MTU 60
50 #define B44_MAX_MTU 1500
52 #define B44_RX_RING_SIZE 512
53 #define B44_DEF_RX_RING_PENDING 200
54 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
56 #define B44_TX_RING_SIZE 512
57 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
58 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
61 #define TX_RING_GAP(BP) \
62 (B44_TX_RING_SIZE - (BP)->tx_pending)
63 #define TX_BUFFS_AVAIL(BP) \
64 (((BP)->tx_cons <= (BP)->tx_prod) ? \
65 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
66 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
67 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
69 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
71 /* minimum number of free TX descriptors required to wake up TX process */
72 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
74 static char version[] __devinitdata =
75 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
78 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
79 MODULE_LICENSE("GPL");
80 MODULE_PARM(b44_debug, "i");
81 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
83 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
85 static struct pci_device_id b44_pci_tbl[] = {
86 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
87 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
88 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
89 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
90 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
91 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
92 { } /* terminate list with empty entry */
95 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
97 static void b44_halt(struct b44 *);
98 static void b44_init_rings(struct b44 *);
99 static int b44_init_hw(struct b44 *);
101 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
102 u32 bit, unsigned long timeout, const int clear)
106 for (i = 0; i < timeout; i++) {
109 if (clear && !(val & bit))
111 if (!clear && (val & bit))
116 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
120 (clear ? "clear" : "set"));
126 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
127 * buzz words used on this company's website :-)
129 * All of these routines must be invoked with bp->lock held and
130 * interrupts disabled.
134 #define SBID_PCI_MEM 1
135 #define SBID_PCI_CFG 2
136 #define SBID_PCI_DMA 3
137 #define SBID_SDRAM_SWAPPED 4
139 #define SBID_REG_SDRAM 6
140 #define SBID_REG_ILINE20 7
141 #define SBID_REG_EMAC 8
142 #define SBID_REG_CODEC 9
143 #define SBID_REG_USB 10
144 #define SBID_REG_PCI 11
145 #define SBID_REG_MIPS 12
146 #define SBID_REG_EXTIF 13
147 #define SBID_EXTIF 14
148 #define SBID_EJTAG 15
151 static u32 ssb_get_addr(struct b44 *bp, u32 id, u32 instance)
169 static u32 ssb_get_core_rev(struct b44 *bp)
171 return (br32(B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
174 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
176 u32 bar_orig, pci_rev, val;
178 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
179 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN,
180 ssb_get_addr(bp, SBID_REG_PCI, 0));
181 pci_rev = ssb_get_core_rev(bp);
183 val = br32(B44_SBINTVEC);
185 bw32(B44_SBINTVEC, val);
187 val = br32(SSB_PCI_TRANS_2);
188 val |= SSB_PCI_PREF | SSB_PCI_BURST;
189 bw32(SSB_PCI_TRANS_2, val);
191 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
196 static void ssb_core_disable(struct b44 *bp)
198 if (br32(B44_SBTMSLOW) & SBTMSLOW_RESET)
201 bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
202 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
203 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
204 bw32(B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
205 SBTMSLOW_REJECT | SBTMSLOW_RESET));
208 bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
213 static void ssb_core_reset(struct b44 *bp)
217 ssb_core_disable(bp);
218 bw32(B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
222 /* Clear SERR if set, this is a hw bug workaround. */
223 if (br32(B44_SBTMSHIGH) & SBTMSHIGH_SERR)
224 bw32(B44_SBTMSHIGH, 0);
226 val = br32(B44_SBIMSTATE);
227 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
228 bw32(B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
230 bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
234 bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK));
239 static int ssb_core_unit(struct b44 *bp)
242 u32 val = br32(B44_SBADMATCH0);
245 type = val & SBADMATCH0_TYPE_MASK;
248 base = val & SBADMATCH0_BS0_MASK;
252 base = val & SBADMATCH0_BS1_MASK;
257 base = val & SBADMATCH0_BS2_MASK;
264 static int ssb_is_core_up(struct b44 *bp)
266 return ((br32(B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
270 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
274 val = ((u32) data[2]) << 24;
275 val |= ((u32) data[3]) << 16;
276 val |= ((u32) data[4]) << 8;
277 val |= ((u32) data[5]) << 0;
278 bw32(B44_CAM_DATA_LO, val);
279 val = (CAM_DATA_HI_VALID |
280 (((u32) data[0]) << 8) |
281 (((u32) data[1]) << 0));
282 bw32(B44_CAM_DATA_HI, val);
283 bw32(B44_CAM_CTRL, (CAM_CTRL_WRITE |
284 (index << CAM_CTRL_INDEX_SHIFT)));
285 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
288 static inline void __b44_disable_ints(struct b44 *bp)
293 static void b44_disable_ints(struct b44 *bp)
295 __b44_disable_ints(bp);
297 /* Flush posted writes. */
301 static void b44_enable_ints(struct b44 *bp)
303 bw32(B44_IMASK, bp->imask);
306 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
310 bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
311 bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
312 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
313 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
314 (reg << MDIO_DATA_RA_SHIFT) |
315 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
316 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
317 *val = br32(B44_MDIO_DATA) & MDIO_DATA_DATA;
322 static int b44_writephy(struct b44 *bp, int reg, u32 val)
324 bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
325 bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
326 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
327 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
328 (reg << MDIO_DATA_RA_SHIFT) |
329 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
330 (val & MDIO_DATA_DATA)));
331 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
334 static int b44_phy_reset(struct b44 *bp)
339 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
343 err = b44_readphy(bp, MII_BMCR, &val);
345 if (val & BMCR_RESET) {
346 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
355 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
359 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
360 bp->flags |= pause_flags;
362 val = br32(B44_RXCONFIG);
363 if (pause_flags & B44_FLAG_RX_PAUSE)
364 val |= RXCONFIG_FLOW;
366 val &= ~RXCONFIG_FLOW;
367 bw32(B44_RXCONFIG, val);
369 val = br32(B44_MAC_FLOW);
370 if (pause_flags & B44_FLAG_TX_PAUSE)
371 val |= (MAC_FLOW_PAUSE_ENAB |
372 (0xc0 & MAC_FLOW_RX_HI_WATER));
374 val &= ~MAC_FLOW_PAUSE_ENAB;
375 bw32(B44_MAC_FLOW, val);
378 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
380 u32 pause_enab = bp->flags & (B44_FLAG_TX_PAUSE |
383 if (local & ADVERTISE_PAUSE_CAP) {
384 if (local & ADVERTISE_PAUSE_ASYM) {
385 if (remote & LPA_PAUSE_CAP)
386 pause_enab |= (B44_FLAG_TX_PAUSE |
388 else if (remote & LPA_PAUSE_ASYM)
389 pause_enab |= B44_FLAG_RX_PAUSE;
391 if (remote & LPA_PAUSE_CAP)
392 pause_enab |= (B44_FLAG_TX_PAUSE |
395 } else if (local & ADVERTISE_PAUSE_ASYM) {
396 if ((remote & LPA_PAUSE_CAP) &&
397 (remote & LPA_PAUSE_ASYM))
398 pause_enab |= B44_FLAG_TX_PAUSE;
401 __b44_set_flow_ctrl(bp, pause_enab);
404 static int b44_setup_phy(struct b44 *bp)
409 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
411 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
412 val & MII_ALEDCTRL_ALLMSK)) != 0)
414 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
416 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
417 val | MII_TLEDCTRL_ENABLE)) != 0)
420 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
421 u32 adv = ADVERTISE_CSMA;
423 if (bp->flags & B44_FLAG_ADV_10HALF)
424 adv |= ADVERTISE_10HALF;
425 if (bp->flags & B44_FLAG_ADV_10FULL)
426 adv |= ADVERTISE_10FULL;
427 if (bp->flags & B44_FLAG_ADV_100HALF)
428 adv |= ADVERTISE_100HALF;
429 if (bp->flags & B44_FLAG_ADV_100FULL)
430 adv |= ADVERTISE_100FULL;
432 if (bp->flags & B44_FLAG_PAUSE_AUTO)
433 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
435 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
437 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
438 BMCR_ANRESTART))) != 0)
443 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
445 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
446 if (bp->flags & B44_FLAG_100_BASE_T)
447 bmcr |= BMCR_SPEED100;
448 if (bp->flags & B44_FLAG_FULL_DUPLEX)
449 bmcr |= BMCR_FULLDPLX;
450 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
453 /* Since we will not be negotiating there is no safe way
454 * to determine if the link partner supports flow control
455 * or not. So just disable it completely in this case.
457 b44_set_flow_ctrl(bp, 0, 0);
464 static void b44_stats_update(struct b44 *bp)
469 val = &bp->hw_stats.tx_good_octets;
470 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
473 val = &bp->hw_stats.rx_good_octets;
474 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
479 static void b44_link_report(struct b44 *bp)
481 if (!netif_carrier_ok(bp->dev)) {
482 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
484 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
486 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
487 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
489 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
492 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
493 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
497 static void b44_check_phy(struct b44 *bp)
501 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
502 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
504 if (aux & MII_AUXCTRL_SPEED)
505 bp->flags |= B44_FLAG_100_BASE_T;
507 bp->flags &= ~B44_FLAG_100_BASE_T;
508 if (aux & MII_AUXCTRL_DUPLEX)
509 bp->flags |= B44_FLAG_FULL_DUPLEX;
511 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
513 if (!netif_carrier_ok(bp->dev) &&
514 (bmsr & BMSR_LSTATUS)) {
515 u32 val = br32(B44_TX_CTRL);
516 u32 local_adv, remote_adv;
518 if (bp->flags & B44_FLAG_FULL_DUPLEX)
519 val |= TX_CTRL_DUPLEX;
521 val &= ~TX_CTRL_DUPLEX;
522 bw32(B44_TX_CTRL, val);
524 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
525 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
526 !b44_readphy(bp, MII_LPA, &remote_adv))
527 b44_set_flow_ctrl(bp, local_adv, remote_adv);
530 netif_carrier_on(bp->dev);
532 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
534 netif_carrier_off(bp->dev);
538 if (bmsr & BMSR_RFAULT)
539 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
542 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
547 static void b44_timer(unsigned long __opaque)
549 struct b44 *bp = (struct b44 *) __opaque;
551 spin_lock_irq(&bp->lock);
555 b44_stats_update(bp);
557 spin_unlock_irq(&bp->lock);
559 bp->timer.expires = jiffies + HZ;
560 add_timer(&bp->timer);
563 static void b44_tx(struct b44 *bp)
567 cur = br32(B44_DMATX_STAT) & DMATX_STAT_CDMASK;
568 cur /= sizeof(struct dma_desc);
570 /* XXX needs updating when NETIF_F_SG is supported */
571 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
572 struct ring_info *rp = &bp->tx_buffers[cons];
573 struct sk_buff *skb = rp->skb;
575 if (unlikely(skb == NULL))
578 pci_unmap_single(bp->pdev,
579 pci_unmap_addr(rp, mapping),
583 dev_kfree_skb_irq(skb);
587 if (netif_queue_stopped(bp->dev) &&
588 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
589 netif_wake_queue(bp->dev);
591 bw32(B44_GPTIMER, 0);
594 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
595 * before the DMA address you give it. So we allocate 30 more bytes
596 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
597 * point the chip at 30 bytes past where the rx_header will go.
599 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
602 struct ring_info *src_map, *map;
603 struct rx_header *rh;
611 src_map = &bp->rx_buffers[src_idx];
612 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
613 map = &bp->rx_buffers[dest_idx];
614 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
619 mapping = pci_map_single(bp->pdev, skb->data,
622 skb_reserve(skb, bp->rx_offset);
624 rh = (struct rx_header *)
625 (skb->data - bp->rx_offset);
630 pci_unmap_addr_set(map, mapping, mapping);
635 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
636 if (dest_idx == (B44_RX_RING_SIZE - 1))
637 ctrl |= DESC_CTRL_EOT;
639 dp = &bp->rx_ring[dest_idx];
640 dp->ctrl = cpu_to_le32(ctrl);
641 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
643 return RX_PKT_BUF_SZ;
646 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
648 struct dma_desc *src_desc, *dest_desc;
649 struct ring_info *src_map, *dest_map;
650 struct rx_header *rh;
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 dest_desc = &bp->rx_ring[dest_idx];
656 dest_map = &bp->rx_buffers[dest_idx];
657 src_desc = &bp->rx_ring[src_idx];
658 src_map = &bp->rx_buffers[src_idx];
660 dest_map->skb = src_map->skb;
661 rh = (struct rx_header *) src_map->skb->data;
664 pci_unmap_addr_set(dest_map, mapping,
665 pci_unmap_addr(src_map, mapping));
667 ctrl = src_desc->ctrl;
668 if (dest_idx == (B44_RX_RING_SIZE - 1))
669 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
671 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
673 dest_desc->ctrl = ctrl;
674 dest_desc->addr = src_desc->addr;
677 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
682 static int b44_rx(struct b44 *bp, int budget)
688 prod = br32(B44_DMARX_STAT) & DMARX_STAT_CDMASK;
689 prod /= sizeof(struct dma_desc);
692 while (cons != prod && budget > 0) {
693 struct ring_info *rp = &bp->rx_buffers[cons];
694 struct sk_buff *skb = rp->skb;
695 dma_addr_t map = pci_unmap_addr(rp, mapping);
696 struct rx_header *rh;
699 pci_dma_sync_single_for_cpu(bp->pdev, map,
702 rh = (struct rx_header *) skb->data;
703 len = cpu_to_le16(rh->len);
704 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
705 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
707 b44_recycle_rx(bp, cons, bp->rx_prod);
709 bp->stats.rx_dropped++;
719 len = cpu_to_le16(rh->len);
720 } while (len == 0 && i++ < 5);
728 if (len > RX_COPY_THRESHOLD) {
730 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
733 pci_unmap_single(bp->pdev, map,
734 skb_size, PCI_DMA_FROMDEVICE);
735 /* Leave out rx_header */
736 skb_put(skb, len+bp->rx_offset);
737 skb_pull(skb,bp->rx_offset);
739 struct sk_buff *copy_skb;
741 b44_recycle_rx(bp, cons, bp->rx_prod);
742 copy_skb = dev_alloc_skb(len + 2);
743 if (copy_skb == NULL)
744 goto drop_it_no_recycle;
746 copy_skb->dev = bp->dev;
747 skb_reserve(copy_skb, 2);
748 skb_put(copy_skb, len);
749 /* DMA sync done above, copy just the actual packet */
750 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
754 skb->ip_summed = CHECKSUM_NONE;
755 skb->protocol = eth_type_trans(skb, bp->dev);
756 netif_receive_skb(skb);
757 bp->dev->last_rx = jiffies;
761 bp->rx_prod = (bp->rx_prod + 1) &
762 (B44_RX_RING_SIZE - 1);
763 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
767 bw32(B44_DMARX_PTR, cons * sizeof(struct dma_desc));
772 static int b44_poll(struct net_device *netdev, int *budget)
774 struct b44 *bp = netdev->priv;
777 spin_lock_irq(&bp->lock);
779 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
780 /* spin_lock(&bp->tx_lock); */
782 /* spin_unlock(&bp->tx_lock); */
784 spin_unlock_irq(&bp->lock);
787 if (bp->istat & ISTAT_RX) {
788 int orig_budget = *budget;
791 if (orig_budget > netdev->quota)
792 orig_budget = netdev->quota;
794 work_done = b44_rx(bp, orig_budget);
796 *budget -= work_done;
797 netdev->quota -= work_done;
799 if (work_done >= orig_budget)
803 if (bp->istat & ISTAT_ERRORS) {
804 spin_lock_irq(&bp->lock);
808 netif_wake_queue(bp->dev);
809 spin_unlock_irq(&bp->lock);
814 netif_rx_complete(netdev);
818 return (done ? 0 : 1);
821 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
823 struct net_device *dev = dev_id;
824 struct b44 *bp = dev->priv;
829 spin_lock_irqsave(&bp->lock, flags);
831 istat = br32(B44_ISTAT);
832 imask = br32(B44_IMASK);
834 /* ??? What the fuck is the purpose of the interrupt mask
835 * ??? register if we have to mask it out by hand anyways?
840 if (netif_rx_schedule_prep(dev)) {
841 /* NOTE: These writes are posted by the readback of
842 * the ISTAT register below.
845 __b44_disable_ints(bp);
846 __netif_rx_schedule(dev);
848 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
852 bw32(B44_ISTAT, istat);
855 spin_unlock_irqrestore(&bp->lock, flags);
856 return IRQ_RETVAL(handled);
859 static void b44_tx_timeout(struct net_device *dev)
861 struct b44 *bp = dev->priv;
863 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
866 spin_lock_irq(&bp->lock);
872 spin_unlock_irq(&bp->lock);
876 netif_wake_queue(dev);
879 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
881 struct b44 *bp = dev->priv;
883 u32 len, entry, ctrl;
886 spin_lock_irq(&bp->lock);
888 /* This is a hard error, log it. */
889 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
890 netif_stop_queue(dev);
891 spin_unlock_irq(&bp->lock);
892 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
898 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
900 bp->tx_buffers[entry].skb = skb;
901 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
903 ctrl = (len & DESC_CTRL_LEN);
904 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
905 if (entry == (B44_TX_RING_SIZE - 1))
906 ctrl |= DESC_CTRL_EOT;
908 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
909 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
911 entry = NEXT_TX(entry);
917 bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
918 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
919 bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
920 if (bp->flags & B44_FLAG_REORDER_BUG)
923 if (TX_BUFFS_AVAIL(bp) < 1)
924 netif_stop_queue(dev);
926 spin_unlock_irq(&bp->lock);
928 dev->trans_start = jiffies;
933 static int b44_change_mtu(struct net_device *dev, int new_mtu)
935 struct b44 *bp = dev->priv;
937 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
940 if (!netif_running(dev)) {
941 /* We'll just catch it later when the
948 spin_lock_irq(&bp->lock);
953 spin_unlock_irq(&bp->lock);
960 /* Free up pending packets in all rx/tx rings.
962 * The chip has been shut down and the driver detached from
963 * the networking, so no interrupts or new tx packets will
964 * end up in the driver. bp->lock is not held and we are not
965 * in an interrupt context and thus may sleep.
967 static void b44_free_rings(struct b44 *bp)
969 struct ring_info *rp;
972 for (i = 0; i < B44_RX_RING_SIZE; i++) {
973 rp = &bp->rx_buffers[i];
977 pci_unmap_single(bp->pdev,
978 pci_unmap_addr(rp, mapping),
981 dev_kfree_skb_any(rp->skb);
985 /* XXX needs changes once NETIF_F_SG is set... */
986 for (i = 0; i < B44_TX_RING_SIZE; i++) {
987 rp = &bp->tx_buffers[i];
991 pci_unmap_single(bp->pdev,
992 pci_unmap_addr(rp, mapping),
995 dev_kfree_skb_any(rp->skb);
1000 /* Initialize tx/rx rings for packet processing.
1002 * The chip has been shut down and the driver detached from
1003 * the networking, so no interrupts or new tx packets will
1004 * end up in the driver. bp->lock is not held and we are not
1005 * in an interrupt context and thus may sleep.
1007 static void b44_init_rings(struct b44 *bp)
1013 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1014 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1016 for (i = 0; i < bp->rx_pending; i++) {
1017 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1023 * Must not be invoked with interrupt sources disabled and
1024 * the hardware shutdown down.
1026 static void b44_free_consistent(struct b44 *bp)
1028 if (bp->rx_buffers) {
1029 kfree(bp->rx_buffers);
1030 bp->rx_buffers = NULL;
1032 if (bp->tx_buffers) {
1033 kfree(bp->tx_buffers);
1034 bp->tx_buffers = NULL;
1037 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1038 bp->rx_ring, bp->rx_ring_dma);
1042 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1043 bp->tx_ring, bp->tx_ring_dma);
1049 * Must not be invoked with interrupt sources disabled and
1050 * the hardware shutdown down. Can sleep.
1052 static int b44_alloc_consistent(struct b44 *bp)
1056 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1057 bp->rx_buffers = kmalloc(size, GFP_KERNEL);
1058 if (!bp->rx_buffers)
1060 memset(bp->rx_buffers, 0, size);
1062 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1063 bp->tx_buffers = kmalloc(size, GFP_KERNEL);
1064 if (!bp->tx_buffers)
1066 memset(bp->tx_buffers, 0, size);
1068 size = DMA_TABLE_BYTES;
1069 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1073 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1080 b44_free_consistent(bp);
1084 /* bp->lock is held. */
1085 static void b44_clear_stats(struct b44 *bp)
1089 bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1090 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1092 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1096 /* bp->lock is held. */
1097 static void b44_chip_reset(struct b44 *bp)
1099 if (ssb_is_core_up(bp)) {
1100 bw32(B44_RCV_LAZY, 0);
1101 bw32(B44_ENET_CTRL, ENET_CTRL_DISABLE);
1102 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1103 bw32(B44_DMATX_CTRL, 0);
1104 bp->tx_prod = bp->tx_cons = 0;
1105 if (br32(B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1106 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1109 bw32(B44_DMARX_CTRL, 0);
1110 bp->rx_prod = bp->rx_cons = 0;
1112 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1119 b44_clear_stats(bp);
1121 /* Make PHY accessible. */
1122 bw32(B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1123 (0x0d & MDIO_CTRL_MAXF_MASK)));
1124 br32(B44_MDIO_CTRL);
1126 if (!(br32(B44_DEVCTRL) & DEVCTRL_IPP)) {
1127 bw32(B44_ENET_CTRL, ENET_CTRL_EPSEL);
1128 br32(B44_ENET_CTRL);
1129 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1131 u32 val = br32(B44_DEVCTRL);
1133 if (val & DEVCTRL_EPR) {
1134 bw32(B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1138 bp->flags |= B44_FLAG_INTERNAL_PHY;
1142 /* bp->lock is held. */
1143 static void b44_halt(struct b44 *bp)
1145 b44_disable_ints(bp);
1149 /* bp->lock is held. */
1150 static void __b44_set_mac_addr(struct b44 *bp)
1152 bw32(B44_CAM_CTRL, 0);
1153 if (!(bp->dev->flags & IFF_PROMISC)) {
1156 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1157 val = br32(B44_CAM_CTRL);
1158 bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1162 static int b44_set_mac_addr(struct net_device *dev, void *p)
1164 struct b44 *bp = dev->priv;
1165 struct sockaddr *addr = p;
1167 if (netif_running(dev))
1170 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1172 spin_lock_irq(&bp->lock);
1173 __b44_set_mac_addr(bp);
1174 spin_unlock_irq(&bp->lock);
1179 /* Called at device open time to get the chip ready for
1180 * packet processing. Invoked with bp->lock held.
1182 static void __b44_set_rx_mode(struct net_device *);
1183 static int b44_init_hw(struct b44 *bp)
1190 val = br32(B44_MAC_CTRL);
1191 bw32(B44_MAC_CTRL, val | MAC_CTRL_CRC32_ENAB);
1192 bw32(B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1194 /* This sets the MAC address too. */
1195 __b44_set_rx_mode(bp->dev);
1197 /* MTU + eth header + possible VLAN tag + struct rx_header */
1198 bw32(B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1199 bw32(B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1201 bw32(B44_TX_WMARK, 56); /* XXX magic */
1202 bw32(B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1203 bw32(B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1204 bw32(B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1205 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1206 bw32(B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1208 bw32(B44_DMARX_PTR, bp->rx_pending);
1209 bp->rx_prod = bp->rx_pending;
1211 bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1213 val = br32(B44_ENET_CTRL);
1214 bw32(B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1219 static int b44_open(struct net_device *dev)
1221 struct b44 *bp = dev->priv;
1224 err = b44_alloc_consistent(bp);
1228 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1232 spin_lock_irq(&bp->lock);
1235 err = b44_init_hw(bp);
1237 goto err_out_noinit;
1238 bp->flags |= B44_FLAG_INIT_COMPLETE;
1240 spin_unlock_irq(&bp->lock);
1242 init_timer(&bp->timer);
1243 bp->timer.expires = jiffies + HZ;
1244 bp->timer.data = (unsigned long) bp;
1245 bp->timer.function = b44_timer;
1246 add_timer(&bp->timer);
1248 b44_enable_ints(bp);
1255 spin_unlock_irq(&bp->lock);
1256 free_irq(dev->irq, dev);
1258 b44_free_consistent(bp);
1263 /*static*/ void b44_dump_state(struct b44 *bp)
1265 u32 val32, val32_2, val32_3, val32_4, val32_5;
1268 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1269 printk("DEBUG: PCI status [%04x] \n", val16);
1274 static int b44_close(struct net_device *dev)
1276 struct b44 *bp = dev->priv;
1278 netif_stop_queue(dev);
1280 del_timer_sync(&bp->timer);
1282 spin_lock_irq(&bp->lock);
1289 bp->flags &= ~B44_FLAG_INIT_COMPLETE;
1290 netif_carrier_off(bp->dev);
1292 spin_unlock_irq(&bp->lock);
1294 free_irq(dev->irq, dev);
1296 b44_free_consistent(bp);
1301 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1303 struct b44 *bp = dev->priv;
1304 struct net_device_stats *nstat = &bp->stats;
1305 struct b44_hw_stats *hwstat = &bp->hw_stats;
1307 /* Convert HW stats into netdevice stats. */
1308 nstat->rx_packets = hwstat->rx_pkts;
1309 nstat->tx_packets = hwstat->tx_pkts;
1310 nstat->rx_bytes = hwstat->rx_octets;
1311 nstat->tx_bytes = hwstat->tx_octets;
1312 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1313 hwstat->tx_oversize_pkts +
1314 hwstat->tx_underruns +
1315 hwstat->tx_excessive_cols +
1316 hwstat->tx_late_cols);
1317 nstat->multicast = hwstat->tx_multicast_pkts;
1318 nstat->collisions = hwstat->tx_total_cols;
1320 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1321 hwstat->rx_undersize);
1322 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1323 nstat->rx_frame_errors = hwstat->rx_align_errs;
1324 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1325 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1326 hwstat->rx_oversize_pkts +
1327 hwstat->rx_missed_pkts +
1328 hwstat->rx_crc_align_errs +
1329 hwstat->rx_undersize +
1330 hwstat->rx_crc_errs +
1331 hwstat->rx_align_errs +
1332 hwstat->rx_symbol_errs);
1334 nstat->tx_aborted_errors = hwstat->tx_underruns;
1335 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1340 static void __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1342 struct dev_mc_list *mclist;
1345 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1346 mclist = dev->mc_list;
1347 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1348 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1352 static void __b44_set_rx_mode(struct net_device *dev)
1354 struct b44 *bp = dev->priv;
1357 val = br32(B44_RXCONFIG);
1358 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1359 if (dev->flags & IFF_PROMISC) {
1360 val |= RXCONFIG_PROMISC;
1361 bw32(B44_RXCONFIG, val);
1363 __b44_set_mac_addr(bp);
1365 if (dev->flags & IFF_ALLMULTI)
1366 val |= RXCONFIG_ALLMULTI;
1368 __b44_load_mcast(bp, dev);
1370 bw32(B44_RXCONFIG, val);
1371 val = br32(B44_CAM_CTRL);
1372 bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1376 static void b44_set_rx_mode(struct net_device *dev)
1378 struct b44 *bp = dev->priv;
1380 spin_lock_irq(&bp->lock);
1381 __b44_set_rx_mode(dev);
1382 spin_unlock_irq(&bp->lock);
1385 static int b44_ethtool_ioctl (struct net_device *dev, void __user *useraddr)
1387 struct b44 *bp = dev->priv;
1388 struct pci_dev *pci_dev = bp->pdev;
1391 if (copy_from_user (ðcmd, useraddr, sizeof (ethcmd)))
1395 case ETHTOOL_GDRVINFO:{
1396 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1397 strcpy (info.driver, DRV_MODULE_NAME);
1398 strcpy (info.version, DRV_MODULE_VERSION);
1399 memset(&info.fw_version, 0, sizeof(info.fw_version));
1400 strcpy (info.bus_info, pci_name(pci_dev));
1401 info.eedump_len = 0;
1402 info.regdump_len = 0;
1403 if (copy_to_user (useraddr, &info, sizeof (info)))
1408 case ETHTOOL_GSET: {
1409 struct ethtool_cmd cmd = { ETHTOOL_GSET };
1411 if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
1413 cmd.supported = (SUPPORTED_Autoneg);
1414 cmd.supported |= (SUPPORTED_100baseT_Half |
1415 SUPPORTED_100baseT_Full |
1416 SUPPORTED_10baseT_Half |
1417 SUPPORTED_10baseT_Full |
1420 cmd.advertising = 0;
1421 if (bp->flags & B44_FLAG_ADV_10HALF)
1422 cmd.advertising |= ADVERTISE_10HALF;
1423 if (bp->flags & B44_FLAG_ADV_10FULL)
1424 cmd.advertising |= ADVERTISE_10FULL;
1425 if (bp->flags & B44_FLAG_ADV_100HALF)
1426 cmd.advertising |= ADVERTISE_100HALF;
1427 if (bp->flags & B44_FLAG_ADV_100FULL)
1428 cmd.advertising |= ADVERTISE_100FULL;
1429 cmd.advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1430 cmd.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1431 SPEED_100 : SPEED_10;
1432 cmd.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1433 DUPLEX_FULL : DUPLEX_HALF;
1435 cmd.phy_address = bp->phy_addr;
1436 cmd.transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1437 XCVR_INTERNAL : XCVR_EXTERNAL;
1438 cmd.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1439 AUTONEG_DISABLE : AUTONEG_ENABLE;
1442 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
1446 case ETHTOOL_SSET: {
1447 struct ethtool_cmd cmd;
1449 if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
1452 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1455 /* We do not support gigabit. */
1456 if (cmd.autoneg == AUTONEG_ENABLE) {
1457 if (cmd.advertising &
1458 (ADVERTISED_1000baseT_Half |
1459 ADVERTISED_1000baseT_Full))
1461 } else if ((cmd.speed != SPEED_100 &&
1462 cmd.speed != SPEED_10) ||
1463 (cmd.duplex != DUPLEX_HALF &&
1464 cmd.duplex != DUPLEX_FULL)) {
1468 spin_lock_irq(&bp->lock);
1470 if (cmd.autoneg == AUTONEG_ENABLE) {
1471 bp->flags &= ~B44_FLAG_FORCE_LINK;
1472 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1473 B44_FLAG_ADV_10FULL |
1474 B44_FLAG_ADV_100HALF |
1475 B44_FLAG_ADV_100FULL);
1476 if (cmd.advertising & ADVERTISE_10HALF)
1477 bp->flags |= B44_FLAG_ADV_10HALF;
1478 if (cmd.advertising & ADVERTISE_10FULL)
1479 bp->flags |= B44_FLAG_ADV_10FULL;
1480 if (cmd.advertising & ADVERTISE_100HALF)
1481 bp->flags |= B44_FLAG_ADV_100HALF;
1482 if (cmd.advertising & ADVERTISE_100FULL)
1483 bp->flags |= B44_FLAG_ADV_100FULL;
1485 bp->flags |= B44_FLAG_FORCE_LINK;
1486 if (cmd.speed == SPEED_100)
1487 bp->flags |= B44_FLAG_100_BASE_T;
1488 if (cmd.duplex == DUPLEX_FULL)
1489 bp->flags |= B44_FLAG_FULL_DUPLEX;
1494 spin_unlock_irq(&bp->lock);
1499 case ETHTOOL_GMSGLVL: {
1500 struct ethtool_value edata = { ETHTOOL_GMSGLVL };
1501 edata.data = bp->msg_enable;
1502 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1506 case ETHTOOL_SMSGLVL: {
1507 struct ethtool_value edata;
1508 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1510 bp->msg_enable = edata.data;
1513 case ETHTOOL_NWAY_RST: {
1517 spin_lock_irq(&bp->lock);
1518 b44_readphy(bp, MII_BMCR, &bmcr);
1519 b44_readphy(bp, MII_BMCR, &bmcr);
1521 if (bmcr & BMCR_ANENABLE) {
1522 b44_writephy(bp, MII_BMCR,
1523 bmcr | BMCR_ANRESTART);
1526 spin_unlock_irq(&bp->lock);
1530 case ETHTOOL_GLINK: {
1531 struct ethtool_value edata = { ETHTOOL_GLINK };
1532 edata.data = netif_carrier_ok(bp->dev) ? 1 : 0;
1533 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1537 case ETHTOOL_GRINGPARAM: {
1538 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
1540 ering.rx_max_pending = B44_RX_RING_SIZE - 1;
1541 ering.rx_pending = bp->rx_pending;
1543 /* XXX ethtool lacks a tx_max_pending, oops... */
1545 if (copy_to_user(useraddr, &ering, sizeof(ering)))
1549 case ETHTOOL_SRINGPARAM: {
1550 struct ethtool_ringparam ering;
1552 if (copy_from_user(&ering, useraddr, sizeof(ering)))
1555 if ((ering.rx_pending > B44_RX_RING_SIZE - 1) ||
1556 (ering.rx_mini_pending != 0) ||
1557 (ering.rx_jumbo_pending != 0) ||
1558 (ering.tx_pending > B44_TX_RING_SIZE - 1))
1561 spin_lock_irq(&bp->lock);
1563 bp->rx_pending = ering.rx_pending;
1564 bp->tx_pending = ering.tx_pending;
1569 netif_wake_queue(bp->dev);
1570 spin_unlock_irq(&bp->lock);
1572 b44_enable_ints(bp);
1576 case ETHTOOL_GPAUSEPARAM: {
1577 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
1580 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1582 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1584 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1585 if (copy_to_user(useraddr, &epause, sizeof(epause)))
1589 case ETHTOOL_SPAUSEPARAM: {
1590 struct ethtool_pauseparam epause;
1592 if (copy_from_user(&epause, useraddr, sizeof(epause)))
1595 spin_lock_irq(&bp->lock);
1597 bp->flags |= B44_FLAG_PAUSE_AUTO;
1599 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1600 if (epause.rx_pause)
1601 bp->flags |= B44_FLAG_RX_PAUSE;
1603 bp->flags &= ~B44_FLAG_RX_PAUSE;
1604 if (epause.tx_pause)
1605 bp->flags |= B44_FLAG_TX_PAUSE;
1607 bp->flags &= ~B44_FLAG_TX_PAUSE;
1608 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1613 __b44_set_flow_ctrl(bp, bp->flags);
1615 spin_unlock_irq(&bp->lock);
1617 b44_enable_ints(bp);
1626 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1628 struct mii_ioctl_data __user *data = (struct mii_ioctl_data __user *)&ifr->ifr_data;
1629 struct b44 *bp = dev->priv;
1634 return b44_ethtool_ioctl(dev, (void __user*) ifr->ifr_data);
1637 data->phy_id = bp->phy_addr;
1643 spin_lock_irq(&bp->lock);
1644 err = b44_readphy(bp, data->reg_num & 0x1f, &mii_regval);
1645 spin_unlock_irq(&bp->lock);
1647 data->val_out = mii_regval;
1653 if (!capable(CAP_NET_ADMIN))
1656 spin_lock_irq(&bp->lock);
1657 err = b44_writephy(bp, data->reg_num & 0x1f, data->val_in);
1658 spin_unlock_irq(&bp->lock);
1669 /* Read 128-bytes of EEPROM. */
1670 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1673 u16 *ptr = (u16 *) data;
1675 for (i = 0; i < 128; i += 2)
1676 ptr[i / 2] = readw(bp->regs + 4096 + i);
1681 static int __devinit b44_get_invariants(struct b44 *bp)
1686 err = b44_read_eeprom(bp, &eeprom[0]);
1690 bp->dev->dev_addr[0] = eeprom[79];
1691 bp->dev->dev_addr[1] = eeprom[78];
1692 bp->dev->dev_addr[2] = eeprom[81];
1693 bp->dev->dev_addr[3] = eeprom[80];
1694 bp->dev->dev_addr[4] = eeprom[83];
1695 bp->dev->dev_addr[5] = eeprom[82];
1697 bp->phy_addr = eeprom[90] & 0x1f;
1698 bp->mdc_port = (eeprom[90] >> 14) & 0x1;
1700 /* With this, plus the rx_header prepended to the data by the
1701 * hardware, we'll land the ethernet header on a 2-byte boundary.
1705 bp->imask = IMASK_DEF;
1707 bp->core_unit = ssb_core_unit(bp);
1708 bp->dma_offset = ssb_get_addr(bp, SBID_PCI_DMA, 0);
1710 /* XXX - really required?
1711 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1717 static int __devinit b44_init_one(struct pci_dev *pdev,
1718 const struct pci_device_id *ent)
1720 static int b44_version_printed = 0;
1721 unsigned long b44reg_base, b44reg_len;
1722 struct net_device *dev;
1726 if (b44_version_printed++ == 0)
1727 printk(KERN_INFO "%s", version);
1729 err = pci_enable_device(pdev);
1731 printk(KERN_ERR PFX "Cannot enable PCI device, "
1736 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1737 printk(KERN_ERR PFX "Cannot find proper PCI device "
1738 "base address, aborting.\n");
1740 goto err_out_disable_pdev;
1743 err = pci_request_regions(pdev, DRV_MODULE_NAME);
1745 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1747 goto err_out_disable_pdev;
1750 pci_set_master(pdev);
1752 err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
1754 printk(KERN_ERR PFX "No usable DMA configuration, "
1756 goto err_out_free_res;
1759 b44reg_base = pci_resource_start(pdev, 0);
1760 b44reg_len = pci_resource_len(pdev, 0);
1762 dev = alloc_etherdev(sizeof(*bp));
1764 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1766 goto err_out_free_res;
1769 SET_MODULE_OWNER(dev);
1770 SET_NETDEV_DEV(dev,&pdev->dev);
1772 /* No interesting netdevice features in this card... */
1779 bp->msg_enable = (1 << b44_debug) - 1;
1781 bp->msg_enable = B44_DEF_MSG_ENABLE;
1783 spin_lock_init(&bp->lock);
1785 bp->regs = (unsigned long) ioremap(b44reg_base, b44reg_len);
1786 if (bp->regs == 0UL) {
1787 printk(KERN_ERR PFX "Cannot map device registers, "
1790 goto err_out_free_dev;
1793 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1794 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1796 dev->open = b44_open;
1797 dev->stop = b44_close;
1798 dev->hard_start_xmit = b44_start_xmit;
1799 dev->get_stats = b44_get_stats;
1800 dev->set_multicast_list = b44_set_rx_mode;
1801 dev->set_mac_address = b44_set_mac_addr;
1802 dev->do_ioctl = b44_ioctl;
1803 dev->tx_timeout = b44_tx_timeout;
1804 dev->poll = b44_poll;
1806 dev->watchdog_timeo = B44_TX_TIMEOUT;
1807 dev->change_mtu = b44_change_mtu;
1808 dev->irq = pdev->irq;
1810 err = b44_get_invariants(bp);
1812 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
1814 goto err_out_iounmap;
1817 /* By default, advertise all speed/duplex settings. */
1818 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
1819 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
1821 /* By default, auto-negotiate PAUSE. */
1822 bp->flags |= B44_FLAG_PAUSE_AUTO;
1824 err = register_netdev(dev);
1826 printk(KERN_ERR PFX "Cannot register net device, "
1828 goto err_out_iounmap;
1831 pci_set_drvdata(pdev, dev);
1833 pci_save_state(bp->pdev, bp->pci_cfg_state);
1835 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
1836 for (i = 0; i < 6; i++)
1837 printk("%2.2x%c", dev->dev_addr[i],
1838 i == 5 ? '\n' : ':');
1843 iounmap((void *) bp->regs);
1849 pci_release_regions(pdev);
1851 err_out_disable_pdev:
1852 pci_disable_device(pdev);
1853 pci_set_drvdata(pdev, NULL);
1857 static void __devexit b44_remove_one(struct pci_dev *pdev)
1859 struct net_device *dev = pci_get_drvdata(pdev);
1862 unregister_netdev(dev);
1863 iounmap((void *) ((struct b44 *)(dev->priv))->regs);
1865 pci_release_regions(pdev);
1866 pci_disable_device(pdev);
1867 pci_set_drvdata(pdev, NULL);
1871 static int b44_suspend(struct pci_dev *pdev, u32 state)
1873 struct net_device *dev = pci_get_drvdata(pdev);
1874 struct b44 *bp = dev->priv;
1876 if (!netif_running(dev))
1879 del_timer_sync(&bp->timer);
1881 spin_lock_irq(&bp->lock);
1884 netif_carrier_off(bp->dev);
1885 netif_device_detach(bp->dev);
1888 spin_unlock_irq(&bp->lock);
1892 static int b44_resume(struct pci_dev *pdev)
1894 struct net_device *dev = pci_get_drvdata(pdev);
1895 struct b44 *bp = dev->priv;
1897 if (!netif_running(dev))
1900 pci_restore_state(pdev, bp->pci_cfg_state);
1902 spin_lock_irq(&bp->lock);
1906 netif_device_attach(bp->dev);
1907 spin_unlock_irq(&bp->lock);
1909 bp->timer.expires = jiffies + HZ;
1910 add_timer(&bp->timer);
1912 b44_enable_ints(bp);
1916 static struct pci_driver b44_driver = {
1917 .name = DRV_MODULE_NAME,
1918 .id_table = b44_pci_tbl,
1919 .probe = b44_init_one,
1920 .remove = __devexit_p(b44_remove_one),
1921 .suspend = b44_suspend,
1922 .resume = b44_resume,
1925 static int __init b44_init(void)
1927 return pci_module_init(&b44_driver);
1930 static void __exit b44_cleanup(void)
1932 pci_unregister_driver(&b44_driver);
1935 module_init(b44_init);
1936 module_exit(b44_cleanup);