ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  *
6  * Distribute under GPL.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/mii.h>
15 #include <linux/if_ether.h>
16 #include <linux/etherdevice.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/version.h>
21
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 #include <asm/irq.h>
25
26 #include "b44.h"
27
28 #define DRV_MODULE_NAME         "b44"
29 #define PFX DRV_MODULE_NAME     ": "
30 #define DRV_MODULE_VERSION      "0.93"
31 #define DRV_MODULE_RELDATE      "Mar, 2004"
32
33 #define B44_DEF_MSG_ENABLE        \
34         (NETIF_MSG_DRV          | \
35          NETIF_MSG_PROBE        | \
36          NETIF_MSG_LINK         | \
37          NETIF_MSG_TIMER        | \
38          NETIF_MSG_IFDOWN       | \
39          NETIF_MSG_IFUP         | \
40          NETIF_MSG_RX_ERR       | \
41          NETIF_MSG_TX_ERR)
42
43 /* length of time before we decide the hardware is borked,
44  * and dev->tx_timeout() should be called to fix the problem
45  */
46 #define B44_TX_TIMEOUT                  (5 * HZ)
47
48 /* hardware minimum and maximum for a single frame's data payload */
49 #define B44_MIN_MTU                     60
50 #define B44_MAX_MTU                     1500
51
52 #define B44_RX_RING_SIZE                512
53 #define B44_DEF_RX_RING_PENDING         200
54 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
55                                  B44_RX_RING_SIZE)
56 #define B44_TX_RING_SIZE                512
57 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
58 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
59                                  B44_TX_RING_SIZE)
60
61 #define TX_RING_GAP(BP) \
62         (B44_TX_RING_SIZE - (BP)->tx_pending)
63 #define TX_BUFFS_AVAIL(BP)                                              \
64         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
65           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
66           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
67 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
68
69 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
70
71 /* minimum number of free TX descriptors required to wake up TX process */
72 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
73
74 static char version[] __devinitdata =
75         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
78 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
79 MODULE_LICENSE("GPL");
80 MODULE_PARM(b44_debug, "i");
81 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
82
83 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
84
85 static struct pci_device_id b44_pci_tbl[] = {
86         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
87           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
88         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
89           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
90         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
91           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
92         { }     /* terminate list with empty entry */
93 };
94
95 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
96
97 static void b44_halt(struct b44 *);
98 static void b44_init_rings(struct b44 *);
99 static int b44_init_hw(struct b44 *);
100
101 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
102                         u32 bit, unsigned long timeout, const int clear)
103 {
104         unsigned long i;
105
106         for (i = 0; i < timeout; i++) {
107                 u32 val = br32(reg);
108
109                 if (clear && !(val & bit))
110                         break;
111                 if (!clear && (val & bit))
112                         break;
113                 udelay(10);
114         }
115         if (i == timeout) {
116                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
117                        "%lx to %s.\n",
118                        bp->dev->name,
119                        bit, reg,
120                        (clear ? "clear" : "set"));
121                 return -ENODEV;
122         }
123         return 0;
124 }
125
126 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
127  * buzz words used on this company's website :-)
128  *
129  * All of these routines must be invoked with bp->lock held and
130  * interrupts disabled.
131  */
132
133 #define SBID_SDRAM              0
134 #define SBID_PCI_MEM            1
135 #define SBID_PCI_CFG            2
136 #define SBID_PCI_DMA            3
137 #define SBID_SDRAM_SWAPPED      4
138 #define SBID_ENUM               5
139 #define SBID_REG_SDRAM          6
140 #define SBID_REG_ILINE20        7
141 #define SBID_REG_EMAC           8
142 #define SBID_REG_CODEC          9
143 #define SBID_REG_USB            10
144 #define SBID_REG_PCI            11
145 #define SBID_REG_MIPS           12
146 #define SBID_REG_EXTIF          13
147 #define SBID_EXTIF              14
148 #define SBID_EJTAG              15
149 #define SBID_MAX                16
150
151 static u32 ssb_get_addr(struct b44 *bp, u32 id, u32 instance)
152 {
153         switch (id) {
154         case SBID_PCI_DMA:
155                 return 0x40000000;
156         case SBID_ENUM:
157                 return 0x18000000;
158         case SBID_REG_EMAC:
159                 return 0x18000000;
160         case SBID_REG_CODEC:
161                 return 0x18001000;
162         case SBID_REG_PCI:
163                 return 0x18002000;
164         default:
165                 return 0;
166         };
167 }
168
169 static u32 ssb_get_core_rev(struct b44 *bp)
170 {
171         return (br32(B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
172 }
173
174 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
175 {
176         u32 bar_orig, pci_rev, val;
177
178         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
179         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN,
180                                ssb_get_addr(bp, SBID_REG_PCI, 0));
181         pci_rev = ssb_get_core_rev(bp);
182
183         val = br32(B44_SBINTVEC);
184         val |= cores;
185         bw32(B44_SBINTVEC, val);
186
187         val = br32(SSB_PCI_TRANS_2);
188         val |= SSB_PCI_PREF | SSB_PCI_BURST;
189         bw32(SSB_PCI_TRANS_2, val);
190
191         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
192
193         return pci_rev;
194 }
195
196 static void ssb_core_disable(struct b44 *bp)
197 {
198         if (br32(B44_SBTMSLOW) & SBTMSLOW_RESET)
199                 return;
200
201         bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
202         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
203         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
204         bw32(B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
205                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
206         br32(B44_SBTMSLOW);
207         udelay(1);
208         bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
209         br32(B44_SBTMSLOW);
210         udelay(1);
211 }
212
213 static void ssb_core_reset(struct b44 *bp)
214 {
215         u32 val;
216
217         ssb_core_disable(bp);
218         bw32(B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
219         br32(B44_SBTMSLOW);
220         udelay(1);
221
222         /* Clear SERR if set, this is a hw bug workaround.  */
223         if (br32(B44_SBTMSHIGH) & SBTMSHIGH_SERR)
224                 bw32(B44_SBTMSHIGH, 0);
225
226         val = br32(B44_SBIMSTATE);
227         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
228                 bw32(B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
229
230         bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
231         br32(B44_SBTMSLOW);
232         udelay(1);
233
234         bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK));
235         br32(B44_SBTMSLOW);
236         udelay(1);
237 }
238
239 static int ssb_core_unit(struct b44 *bp)
240 {
241 #if 0
242         u32 val = br32(B44_SBADMATCH0);
243         u32 base;
244
245         type = val & SBADMATCH0_TYPE_MASK;
246         switch (type) {
247         case 0:
248                 base = val & SBADMATCH0_BS0_MASK;
249                 break;
250
251         case 1:
252                 base = val & SBADMATCH0_BS1_MASK;
253                 break;
254
255         case 2:
256         default:
257                 base = val & SBADMATCH0_BS2_MASK;
258                 break;
259         };
260 #endif
261         return 0;
262 }
263
264 static int ssb_is_core_up(struct b44 *bp)
265 {
266         return ((br32(B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
267                 == SBTMSLOW_CLOCK);
268 }
269
270 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
271 {
272         u32 val;
273
274         val  = ((u32) data[2]) << 24;
275         val |= ((u32) data[3]) << 16;
276         val |= ((u32) data[4]) <<  8;
277         val |= ((u32) data[5]) <<  0;
278         bw32(B44_CAM_DATA_LO, val);
279         val = (CAM_DATA_HI_VALID | 
280                (((u32) data[0]) << 8) |
281                (((u32) data[1]) << 0));
282         bw32(B44_CAM_DATA_HI, val);
283         bw32(B44_CAM_CTRL, (CAM_CTRL_WRITE |
284                             (index << CAM_CTRL_INDEX_SHIFT)));
285         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);  
286 }
287
288 static inline void __b44_disable_ints(struct b44 *bp)
289 {
290         bw32(B44_IMASK, 0);
291 }
292
293 static void b44_disable_ints(struct b44 *bp)
294 {
295         __b44_disable_ints(bp);
296
297         /* Flush posted writes. */
298         br32(B44_IMASK);
299 }
300
301 static void b44_enable_ints(struct b44 *bp)
302 {
303         bw32(B44_IMASK, bp->imask);
304 }
305
306 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
307 {
308         int err;
309
310         bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
311         bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
312                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
313                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
314                              (reg << MDIO_DATA_RA_SHIFT) |
315                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
316         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
317         *val = br32(B44_MDIO_DATA) & MDIO_DATA_DATA;
318
319         return err;
320 }
321
322 static int b44_writephy(struct b44 *bp, int reg, u32 val)
323 {
324         bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
325         bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
326                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
327                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
328                              (reg << MDIO_DATA_RA_SHIFT) |
329                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
330                              (val & MDIO_DATA_DATA)));
331         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
332 }
333
334 static int b44_phy_reset(struct b44 *bp)
335 {
336         u32 val;
337         int err;
338
339         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
340         if (err)
341                 return err;
342         udelay(100);
343         err = b44_readphy(bp, MII_BMCR, &val);
344         if (!err) {
345                 if (val & BMCR_RESET) {
346                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
347                                bp->dev->name);
348                         err = -ENODEV;
349                 }
350         }
351
352         return 0;
353 }
354
355 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
356 {
357         u32 val;
358
359         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
360         bp->flags |= pause_flags;
361
362         val = br32(B44_RXCONFIG);
363         if (pause_flags & B44_FLAG_RX_PAUSE)
364                 val |= RXCONFIG_FLOW;
365         else
366                 val &= ~RXCONFIG_FLOW;
367         bw32(B44_RXCONFIG, val);
368
369         val = br32(B44_MAC_FLOW);
370         if (pause_flags & B44_FLAG_TX_PAUSE)
371                 val |= (MAC_FLOW_PAUSE_ENAB |
372                         (0xc0 & MAC_FLOW_RX_HI_WATER));
373         else
374                 val &= ~MAC_FLOW_PAUSE_ENAB;
375         bw32(B44_MAC_FLOW, val);
376 }
377
378 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
379 {
380         u32 pause_enab = bp->flags & (B44_FLAG_TX_PAUSE |
381                                       B44_FLAG_RX_PAUSE);
382
383         if (local & ADVERTISE_PAUSE_CAP) {
384                 if (local & ADVERTISE_PAUSE_ASYM) {
385                         if (remote & LPA_PAUSE_CAP)
386                                 pause_enab |= (B44_FLAG_TX_PAUSE |
387                                                B44_FLAG_RX_PAUSE);
388                         else if (remote & LPA_PAUSE_ASYM)
389                                 pause_enab |= B44_FLAG_RX_PAUSE;
390                 } else {
391                         if (remote & LPA_PAUSE_CAP)
392                                 pause_enab |= (B44_FLAG_TX_PAUSE |
393                                                B44_FLAG_RX_PAUSE);
394                 }
395         } else if (local & ADVERTISE_PAUSE_ASYM) {
396                 if ((remote & LPA_PAUSE_CAP) &&
397                     (remote & LPA_PAUSE_ASYM))
398                         pause_enab |= B44_FLAG_TX_PAUSE;
399         }
400
401         __b44_set_flow_ctrl(bp, pause_enab);
402 }
403
404 static int b44_setup_phy(struct b44 *bp)
405 {
406         u32 val;
407         int err;
408
409         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
410                 goto out;
411         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
412                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
413                 goto out;
414         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
415                 goto out;
416         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
417                                 val | MII_TLEDCTRL_ENABLE)) != 0)
418                 goto out;
419
420         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
421                 u32 adv = ADVERTISE_CSMA;
422
423                 if (bp->flags & B44_FLAG_ADV_10HALF)
424                         adv |= ADVERTISE_10HALF;
425                 if (bp->flags & B44_FLAG_ADV_10FULL)
426                         adv |= ADVERTISE_10FULL;
427                 if (bp->flags & B44_FLAG_ADV_100HALF)
428                         adv |= ADVERTISE_100HALF;
429                 if (bp->flags & B44_FLAG_ADV_100FULL)
430                         adv |= ADVERTISE_100FULL;
431
432                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
433                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
434
435                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
436                         goto out;
437                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
438                                                        BMCR_ANRESTART))) != 0)
439                         goto out;
440         } else {
441                 u32 bmcr;
442
443                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
444                         goto out;
445                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
446                 if (bp->flags & B44_FLAG_100_BASE_T)
447                         bmcr |= BMCR_SPEED100;
448                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
449                         bmcr |= BMCR_FULLDPLX;
450                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
451                         goto out;
452
453                 /* Since we will not be negotiating there is no safe way
454                  * to determine if the link partner supports flow control
455                  * or not.  So just disable it completely in this case.
456                  */
457                 b44_set_flow_ctrl(bp, 0, 0);
458         }
459
460 out:
461         return err;
462 }
463
464 static void b44_stats_update(struct b44 *bp)
465 {
466         unsigned long reg;
467         u32 *val;
468
469         val = &bp->hw_stats.tx_good_octets;
470         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
471                 *val++ += br32(reg);
472         }
473         val = &bp->hw_stats.rx_good_octets;
474         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
475                 *val++ += br32(reg);
476         }
477 }
478
479 static void b44_link_report(struct b44 *bp)
480 {
481         if (!netif_carrier_ok(bp->dev)) {
482                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
483         } else {
484                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
485                        bp->dev->name,
486                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
487                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
488
489                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
490                        "%s for RX.\n",
491                        bp->dev->name,
492                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
493                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
494         }
495 }
496
497 static void b44_check_phy(struct b44 *bp)
498 {
499         u32 bmsr, aux;
500
501         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
502             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
503             (bmsr != 0xffff)) {
504                 if (aux & MII_AUXCTRL_SPEED)
505                         bp->flags |= B44_FLAG_100_BASE_T;
506                 else
507                         bp->flags &= ~B44_FLAG_100_BASE_T;
508                 if (aux & MII_AUXCTRL_DUPLEX)
509                         bp->flags |= B44_FLAG_FULL_DUPLEX;
510                 else
511                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
512
513                 if (!netif_carrier_ok(bp->dev) &&
514                     (bmsr & BMSR_LSTATUS)) {
515                         u32 val = br32(B44_TX_CTRL);
516                         u32 local_adv, remote_adv;
517
518                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
519                                 val |= TX_CTRL_DUPLEX;
520                         else
521                                 val &= ~TX_CTRL_DUPLEX;
522                         bw32(B44_TX_CTRL, val);
523
524                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
525                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
526                             !b44_readphy(bp, MII_LPA, &remote_adv))
527                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
528
529                         /* Link now up */
530                         netif_carrier_on(bp->dev);
531                         b44_link_report(bp);
532                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
533                         /* Link now down */
534                         netif_carrier_off(bp->dev);
535                         b44_link_report(bp);
536                 }
537
538                 if (bmsr & BMSR_RFAULT)
539                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
540                                bp->dev->name);
541                 if (bmsr & BMSR_JCD)
542                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
543                                bp->dev->name);
544         }
545 }
546
547 static void b44_timer(unsigned long __opaque)
548 {
549         struct b44 *bp = (struct b44 *) __opaque;
550
551         spin_lock_irq(&bp->lock);
552
553         b44_check_phy(bp);
554
555         b44_stats_update(bp);
556
557         spin_unlock_irq(&bp->lock);
558
559         bp->timer.expires = jiffies + HZ;
560         add_timer(&bp->timer);
561 }
562
563 static void b44_tx(struct b44 *bp)
564 {
565         u32 cur, cons;
566
567         cur  = br32(B44_DMATX_STAT) & DMATX_STAT_CDMASK;
568         cur /= sizeof(struct dma_desc);
569
570         /* XXX needs updating when NETIF_F_SG is supported */
571         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
572                 struct ring_info *rp = &bp->tx_buffers[cons];
573                 struct sk_buff *skb = rp->skb;
574
575                 if (unlikely(skb == NULL))
576                         BUG();
577
578                 pci_unmap_single(bp->pdev,
579                                  pci_unmap_addr(rp, mapping),
580                                  skb->len,
581                                  PCI_DMA_TODEVICE);
582                 rp->skb = NULL;
583                 dev_kfree_skb_irq(skb);
584         }
585
586         bp->tx_cons = cons;
587         if (netif_queue_stopped(bp->dev) &&
588             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
589                 netif_wake_queue(bp->dev);
590
591         bw32(B44_GPTIMER, 0);
592 }
593
594 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
595  * before the DMA address you give it.  So we allocate 30 more bytes
596  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
597  * point the chip at 30 bytes past where the rx_header will go.
598  */
599 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
600 {
601         struct dma_desc *dp;
602         struct ring_info *src_map, *map;
603         struct rx_header *rh;
604         struct sk_buff *skb;
605         dma_addr_t mapping;
606         int dest_idx;
607         u32 ctrl;
608
609         src_map = NULL;
610         if (src_idx >= 0)
611                 src_map = &bp->rx_buffers[src_idx];
612         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
613         map = &bp->rx_buffers[dest_idx];
614         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
615         if (skb == NULL)
616                 return -ENOMEM;
617
618         skb->dev = bp->dev;
619         mapping = pci_map_single(bp->pdev, skb->data,
620                                  RX_PKT_BUF_SZ,
621                                  PCI_DMA_FROMDEVICE);
622         skb_reserve(skb, bp->rx_offset);
623
624         rh = (struct rx_header *)
625                 (skb->data - bp->rx_offset);
626         rh->len = 0;
627         rh->flags = 0;
628
629         map->skb = skb;
630         pci_unmap_addr_set(map, mapping, mapping);
631
632         if (src_map != NULL)
633                 src_map->skb = NULL;
634
635         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
636         if (dest_idx == (B44_RX_RING_SIZE - 1))
637                 ctrl |= DESC_CTRL_EOT;
638
639         dp = &bp->rx_ring[dest_idx];
640         dp->ctrl = cpu_to_le32(ctrl);
641         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
642
643         return RX_PKT_BUF_SZ;
644 }
645
646 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
647 {
648         struct dma_desc *src_desc, *dest_desc;
649         struct ring_info *src_map, *dest_map;
650         struct rx_header *rh;
651         int dest_idx;
652         u32 ctrl;
653
654         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655         dest_desc = &bp->rx_ring[dest_idx];
656         dest_map = &bp->rx_buffers[dest_idx];
657         src_desc = &bp->rx_ring[src_idx];
658         src_map = &bp->rx_buffers[src_idx];
659
660         dest_map->skb = src_map->skb;
661         rh = (struct rx_header *) src_map->skb->data;
662         rh->len = 0;
663         rh->flags = 0;
664         pci_unmap_addr_set(dest_map, mapping,
665                            pci_unmap_addr(src_map, mapping));
666
667         ctrl = src_desc->ctrl;
668         if (dest_idx == (B44_RX_RING_SIZE - 1))
669                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
670         else
671                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
672
673         dest_desc->ctrl = ctrl;
674         dest_desc->addr = src_desc->addr;
675         src_map->skb = NULL;
676
677         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
678                                        RX_PKT_BUF_SZ,
679                                        PCI_DMA_FROMDEVICE);
680 }
681
682 static int b44_rx(struct b44 *bp, int budget)
683 {
684         int received;
685         u32 cons, prod;
686
687         received = 0;
688         prod  = br32(B44_DMARX_STAT) & DMARX_STAT_CDMASK;
689         prod /= sizeof(struct dma_desc);
690         cons = bp->rx_cons;
691
692         while (cons != prod && budget > 0) {
693                 struct ring_info *rp = &bp->rx_buffers[cons];
694                 struct sk_buff *skb = rp->skb;
695                 dma_addr_t map = pci_unmap_addr(rp, mapping);
696                 struct rx_header *rh;
697                 u16 len;
698
699                 pci_dma_sync_single_for_cpu(bp->pdev, map,
700                                             RX_PKT_BUF_SZ,
701                                             PCI_DMA_FROMDEVICE);
702                 rh = (struct rx_header *) skb->data;
703                 len = cpu_to_le16(rh->len);
704                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
705                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
706                 drop_it:
707                         b44_recycle_rx(bp, cons, bp->rx_prod);
708                 drop_it_no_recycle:
709                         bp->stats.rx_dropped++;
710                         goto next_pkt;
711                 }
712
713                 if (len == 0) {
714                         int i = 0;
715
716                         do {
717                                 udelay(2);
718                                 barrier();
719                                 len = cpu_to_le16(rh->len);
720                         } while (len == 0 && i++ < 5);
721                         if (len == 0)
722                                 goto drop_it;
723                 }
724
725                 /* Omit CRC. */
726                 len -= 4;
727
728                 if (len > RX_COPY_THRESHOLD) {
729                         int skb_size;
730                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
731                         if (skb_size < 0)
732                                 goto drop_it;
733                         pci_unmap_single(bp->pdev, map,
734                                          skb_size, PCI_DMA_FROMDEVICE);
735                         /* Leave out rx_header */
736                         skb_put(skb, len+bp->rx_offset);
737                         skb_pull(skb,bp->rx_offset);
738                 } else {
739                         struct sk_buff *copy_skb;
740
741                         b44_recycle_rx(bp, cons, bp->rx_prod);
742                         copy_skb = dev_alloc_skb(len + 2);
743                         if (copy_skb == NULL)
744                                 goto drop_it_no_recycle;
745
746                         copy_skb->dev = bp->dev;
747                         skb_reserve(copy_skb, 2);
748                         skb_put(copy_skb, len);
749                         /* DMA sync done above, copy just the actual packet */
750                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
751
752                         skb = copy_skb;
753                 }
754                 skb->ip_summed = CHECKSUM_NONE;
755                 skb->protocol = eth_type_trans(skb, bp->dev);
756                 netif_receive_skb(skb);
757                 bp->dev->last_rx = jiffies;
758                 received++;
759                 budget--;
760         next_pkt:
761                 bp->rx_prod = (bp->rx_prod + 1) &
762                         (B44_RX_RING_SIZE - 1);
763                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
764         }
765
766         bp->rx_cons = cons;
767         bw32(B44_DMARX_PTR, cons * sizeof(struct dma_desc));
768
769         return received;
770 }
771
772 static int b44_poll(struct net_device *netdev, int *budget)
773 {
774         struct b44 *bp = netdev->priv;
775         int done;
776
777         spin_lock_irq(&bp->lock);
778
779         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
780                 /* spin_lock(&bp->tx_lock); */
781                 b44_tx(bp);
782                 /* spin_unlock(&bp->tx_lock); */
783         }
784         spin_unlock_irq(&bp->lock);
785
786         done = 1;
787         if (bp->istat & ISTAT_RX) {
788                 int orig_budget = *budget;
789                 int work_done;
790
791                 if (orig_budget > netdev->quota)
792                         orig_budget = netdev->quota;
793
794                 work_done = b44_rx(bp, orig_budget);
795
796                 *budget -= work_done;
797                 netdev->quota -= work_done;
798
799                 if (work_done >= orig_budget)
800                         done = 0;
801         }
802
803         if (bp->istat & ISTAT_ERRORS) {
804                 spin_lock_irq(&bp->lock);
805                 b44_halt(bp);
806                 b44_init_rings(bp);
807                 b44_init_hw(bp);
808                 netif_wake_queue(bp->dev);
809                 spin_unlock_irq(&bp->lock);
810                 done = 1;
811         }
812
813         if (done) {
814                 netif_rx_complete(netdev);
815                 b44_enable_ints(bp);
816         }
817
818         return (done ? 0 : 1);
819 }
820
821 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
822 {
823         struct net_device *dev = dev_id;
824         struct b44 *bp = dev->priv;
825         unsigned long flags;
826         u32 istat, imask;
827         int handled = 0;
828
829         spin_lock_irqsave(&bp->lock, flags);
830
831         istat = br32(B44_ISTAT);
832         imask = br32(B44_IMASK);
833
834         /* ??? What the fuck is the purpose of the interrupt mask
835          * ??? register if we have to mask it out by hand anyways?
836          */
837         istat &= imask;
838         if (istat) {
839                 handled = 1;
840                 if (netif_rx_schedule_prep(dev)) {
841                         /* NOTE: These writes are posted by the readback of
842                          *       the ISTAT register below.
843                          */
844                         bp->istat = istat;
845                         __b44_disable_ints(bp);
846                         __netif_rx_schedule(dev);
847                 } else {
848                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
849                                dev->name);
850                 }
851
852                 bw32(B44_ISTAT, istat);
853                 br32(B44_ISTAT);
854         }
855         spin_unlock_irqrestore(&bp->lock, flags);
856         return IRQ_RETVAL(handled);
857 }
858
859 static void b44_tx_timeout(struct net_device *dev)
860 {
861         struct b44 *bp = dev->priv;
862
863         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
864                dev->name);
865
866         spin_lock_irq(&bp->lock);
867
868         b44_halt(bp);
869         b44_init_rings(bp);
870         b44_init_hw(bp);
871
872         spin_unlock_irq(&bp->lock);
873
874         b44_enable_ints(bp);
875
876         netif_wake_queue(dev);
877 }
878
879 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
880 {
881         struct b44 *bp = dev->priv;
882         dma_addr_t mapping;
883         u32 len, entry, ctrl;
884
885         len = skb->len;
886         spin_lock_irq(&bp->lock);
887
888         /* This is a hard error, log it. */
889         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
890                 netif_stop_queue(dev);
891                 spin_unlock_irq(&bp->lock);
892                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
893                        dev->name);
894                 return 1;
895         }
896
897         entry = bp->tx_prod;
898         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
899
900         bp->tx_buffers[entry].skb = skb;
901         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
902
903         ctrl  = (len & DESC_CTRL_LEN);
904         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
905         if (entry == (B44_TX_RING_SIZE - 1))
906                 ctrl |= DESC_CTRL_EOT;
907
908         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
909         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
910
911         entry = NEXT_TX(entry);
912
913         bp->tx_prod = entry;
914
915         wmb();
916
917         bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
918         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
919                 bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
920         if (bp->flags & B44_FLAG_REORDER_BUG)
921                 br32(B44_DMATX_PTR);
922
923         if (TX_BUFFS_AVAIL(bp) < 1)
924                 netif_stop_queue(dev);
925
926         spin_unlock_irq(&bp->lock);
927
928         dev->trans_start = jiffies;
929
930         return 0;
931 }
932
933 static int b44_change_mtu(struct net_device *dev, int new_mtu)
934 {
935         struct b44 *bp = dev->priv;
936
937         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
938                 return -EINVAL;
939
940         if (!netif_running(dev)) {
941                 /* We'll just catch it later when the
942                  * device is up'd.
943                  */
944                 dev->mtu = new_mtu;
945                 return 0;
946         }
947
948         spin_lock_irq(&bp->lock);
949         b44_halt(bp);
950         dev->mtu = new_mtu;
951         b44_init_rings(bp);
952         b44_init_hw(bp);
953         spin_unlock_irq(&bp->lock);
954
955         b44_enable_ints(bp);
956         
957         return 0;
958 }
959
960 /* Free up pending packets in all rx/tx rings.
961  *
962  * The chip has been shut down and the driver detached from
963  * the networking, so no interrupts or new tx packets will
964  * end up in the driver.  bp->lock is not held and we are not
965  * in an interrupt context and thus may sleep.
966  */
967 static void b44_free_rings(struct b44 *bp)
968 {
969         struct ring_info *rp;
970         int i;
971
972         for (i = 0; i < B44_RX_RING_SIZE; i++) {
973                 rp = &bp->rx_buffers[i];
974
975                 if (rp->skb == NULL)
976                         continue;
977                 pci_unmap_single(bp->pdev,
978                                  pci_unmap_addr(rp, mapping),
979                                  RX_PKT_BUF_SZ,
980                                  PCI_DMA_FROMDEVICE);
981                 dev_kfree_skb_any(rp->skb);
982                 rp->skb = NULL;
983         }
984
985         /* XXX needs changes once NETIF_F_SG is set... */
986         for (i = 0; i < B44_TX_RING_SIZE; i++) {
987                 rp = &bp->tx_buffers[i];
988
989                 if (rp->skb == NULL)
990                         continue;
991                 pci_unmap_single(bp->pdev,
992                                  pci_unmap_addr(rp, mapping),
993                                  rp->skb->len,
994                                  PCI_DMA_TODEVICE);
995                 dev_kfree_skb_any(rp->skb);
996                 rp->skb = NULL;
997         }
998 }
999
1000 /* Initialize tx/rx rings for packet processing.
1001  *
1002  * The chip has been shut down and the driver detached from
1003  * the networking, so no interrupts or new tx packets will
1004  * end up in the driver.  bp->lock is not held and we are not
1005  * in an interrupt context and thus may sleep.
1006  */
1007 static void b44_init_rings(struct b44 *bp)
1008 {
1009         int i;
1010
1011         b44_free_rings(bp);
1012
1013         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1014         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1015
1016         for (i = 0; i < bp->rx_pending; i++) {
1017                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1018                         break;
1019         }
1020 }
1021
1022 /*
1023  * Must not be invoked with interrupt sources disabled and
1024  * the hardware shutdown down.
1025  */
1026 static void b44_free_consistent(struct b44 *bp)
1027 {
1028         if (bp->rx_buffers) {
1029                 kfree(bp->rx_buffers);
1030                 bp->rx_buffers = NULL;
1031         }
1032         if (bp->tx_buffers) {
1033                 kfree(bp->tx_buffers);
1034                 bp->tx_buffers = NULL;
1035         }
1036         if (bp->rx_ring) {
1037                 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1038                                     bp->rx_ring, bp->rx_ring_dma);
1039                 bp->rx_ring = NULL;
1040         }
1041         if (bp->tx_ring) {
1042                 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1043                                     bp->tx_ring, bp->tx_ring_dma);
1044                 bp->tx_ring = NULL;
1045         }
1046 }
1047
1048 /*
1049  * Must not be invoked with interrupt sources disabled and
1050  * the hardware shutdown down.  Can sleep.
1051  */
1052 static int b44_alloc_consistent(struct b44 *bp)
1053 {
1054         int size;
1055
1056         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1057         bp->rx_buffers = kmalloc(size, GFP_KERNEL);
1058         if (!bp->rx_buffers)
1059                 goto out_err;
1060         memset(bp->rx_buffers, 0, size);
1061
1062         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1063         bp->tx_buffers = kmalloc(size, GFP_KERNEL);
1064         if (!bp->tx_buffers)
1065                 goto out_err;
1066         memset(bp->tx_buffers, 0, size);
1067
1068         size = DMA_TABLE_BYTES;
1069         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1070         if (!bp->rx_ring)
1071                 goto out_err;
1072
1073         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1074         if (!bp->tx_ring)
1075                 goto out_err;
1076
1077         return 0;
1078
1079 out_err:
1080         b44_free_consistent(bp);
1081         return -ENOMEM;
1082 }
1083
1084 /* bp->lock is held. */
1085 static void b44_clear_stats(struct b44 *bp)
1086 {
1087         unsigned long reg;
1088
1089         bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1090         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1091                 br32(reg);
1092         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1093                 br32(reg);
1094 }
1095
1096 /* bp->lock is held. */
1097 static void b44_chip_reset(struct b44 *bp)
1098 {
1099         if (ssb_is_core_up(bp)) {
1100                 bw32(B44_RCV_LAZY, 0);
1101                 bw32(B44_ENET_CTRL, ENET_CTRL_DISABLE);
1102                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1103                 bw32(B44_DMATX_CTRL, 0);
1104                 bp->tx_prod = bp->tx_cons = 0;
1105                 if (br32(B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1106                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1107                                      100, 0);
1108                 }
1109                 bw32(B44_DMARX_CTRL, 0);
1110                 bp->rx_prod = bp->rx_cons = 0;
1111         } else {
1112                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1113                                    SBINTVEC_ENET0 :
1114                                    SBINTVEC_ENET1));
1115         }
1116
1117         ssb_core_reset(bp);
1118
1119         b44_clear_stats(bp);
1120
1121         /* Make PHY accessible. */
1122         bw32(B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1123                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1124         br32(B44_MDIO_CTRL);
1125
1126         if (!(br32(B44_DEVCTRL) & DEVCTRL_IPP)) {
1127                 bw32(B44_ENET_CTRL, ENET_CTRL_EPSEL);
1128                 br32(B44_ENET_CTRL);
1129                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1130         } else {
1131                 u32 val = br32(B44_DEVCTRL);
1132
1133                 if (val & DEVCTRL_EPR) {
1134                         bw32(B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1135                         br32(B44_DEVCTRL);
1136                         udelay(100);
1137                 }
1138                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1139         }
1140 }
1141
1142 /* bp->lock is held. */
1143 static void b44_halt(struct b44 *bp)
1144 {
1145         b44_disable_ints(bp);
1146         b44_chip_reset(bp);
1147 }
1148
1149 /* bp->lock is held. */
1150 static void __b44_set_mac_addr(struct b44 *bp)
1151 {
1152         bw32(B44_CAM_CTRL, 0);
1153         if (!(bp->dev->flags & IFF_PROMISC)) {
1154                 u32 val;
1155
1156                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1157                 val = br32(B44_CAM_CTRL);
1158                 bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1159         }
1160 }
1161
1162 static int b44_set_mac_addr(struct net_device *dev, void *p)
1163 {
1164         struct b44 *bp = dev->priv;
1165         struct sockaddr *addr = p;
1166
1167         if (netif_running(dev))
1168                 return -EBUSY;
1169
1170         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1171
1172         spin_lock_irq(&bp->lock);
1173         __b44_set_mac_addr(bp);
1174         spin_unlock_irq(&bp->lock);
1175
1176         return 0;
1177 }
1178
1179 /* Called at device open time to get the chip ready for
1180  * packet processing.  Invoked with bp->lock held.
1181  */
1182 static void __b44_set_rx_mode(struct net_device *);
1183 static int b44_init_hw(struct b44 *bp)
1184 {
1185         u32 val;
1186
1187         b44_chip_reset(bp);
1188         b44_phy_reset(bp);
1189         b44_setup_phy(bp);
1190         val = br32(B44_MAC_CTRL);
1191         bw32(B44_MAC_CTRL, val | MAC_CTRL_CRC32_ENAB);
1192         bw32(B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1193
1194         /* This sets the MAC address too.  */
1195         __b44_set_rx_mode(bp->dev);
1196
1197         /* MTU + eth header + possible VLAN tag + struct rx_header */
1198         bw32(B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1199         bw32(B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1200
1201         bw32(B44_TX_WMARK, 56); /* XXX magic */
1202         bw32(B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1203         bw32(B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1204         bw32(B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1205                               (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1206         bw32(B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1207
1208         bw32(B44_DMARX_PTR, bp->rx_pending);
1209         bp->rx_prod = bp->rx_pending;   
1210
1211         bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1212
1213         val = br32(B44_ENET_CTRL);
1214         bw32(B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1215
1216         return 0;
1217 }
1218
1219 static int b44_open(struct net_device *dev)
1220 {
1221         struct b44 *bp = dev->priv;
1222         int err;
1223
1224         err = b44_alloc_consistent(bp);
1225         if (err)
1226                 return err;
1227
1228         err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1229         if (err)
1230                 goto err_out_free;
1231
1232         spin_lock_irq(&bp->lock);
1233
1234         b44_init_rings(bp);
1235         err = b44_init_hw(bp);
1236         if (err)
1237                 goto err_out_noinit;
1238         bp->flags |= B44_FLAG_INIT_COMPLETE;
1239
1240         spin_unlock_irq(&bp->lock);
1241
1242         init_timer(&bp->timer);
1243         bp->timer.expires = jiffies + HZ;
1244         bp->timer.data = (unsigned long) bp;
1245         bp->timer.function = b44_timer;
1246         add_timer(&bp->timer);
1247
1248         b44_enable_ints(bp);
1249
1250         return 0;
1251
1252 err_out_noinit:
1253         b44_halt(bp);
1254         b44_free_rings(bp);
1255         spin_unlock_irq(&bp->lock);
1256         free_irq(dev->irq, dev);
1257 err_out_free:
1258         b44_free_consistent(bp);
1259         return err;
1260 }
1261
1262 #if 0
1263 /*static*/ void b44_dump_state(struct b44 *bp)
1264 {
1265         u32 val32, val32_2, val32_3, val32_4, val32_5;
1266         u16 val16;
1267
1268         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1269         printk("DEBUG: PCI status [%04x] \n", val16);
1270
1271 }
1272 #endif
1273
1274 static int b44_close(struct net_device *dev)
1275 {
1276         struct b44 *bp = dev->priv;
1277
1278         netif_stop_queue(dev);
1279
1280         del_timer_sync(&bp->timer);
1281
1282         spin_lock_irq(&bp->lock);
1283
1284 #if 0
1285         b44_dump_state(bp);
1286 #endif
1287         b44_halt(bp);
1288         b44_free_rings(bp);
1289         bp->flags &= ~B44_FLAG_INIT_COMPLETE;
1290         netif_carrier_off(bp->dev);
1291
1292         spin_unlock_irq(&bp->lock);
1293
1294         free_irq(dev->irq, dev);
1295
1296         b44_free_consistent(bp);
1297
1298         return 0;
1299 }
1300
1301 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1302 {
1303         struct b44 *bp = dev->priv;
1304         struct net_device_stats *nstat = &bp->stats;
1305         struct b44_hw_stats *hwstat = &bp->hw_stats;
1306
1307         /* Convert HW stats into netdevice stats. */
1308         nstat->rx_packets = hwstat->rx_pkts;
1309         nstat->tx_packets = hwstat->tx_pkts;
1310         nstat->rx_bytes   = hwstat->rx_octets;
1311         nstat->tx_bytes   = hwstat->tx_octets;
1312         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1313                              hwstat->tx_oversize_pkts +
1314                              hwstat->tx_underruns +
1315                              hwstat->tx_excessive_cols +
1316                              hwstat->tx_late_cols);
1317         nstat->multicast  = hwstat->tx_multicast_pkts;
1318         nstat->collisions = hwstat->tx_total_cols;
1319
1320         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1321                                    hwstat->rx_undersize);
1322         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1323         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1324         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1325         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1326                                    hwstat->rx_oversize_pkts +
1327                                    hwstat->rx_missed_pkts +
1328                                    hwstat->rx_crc_align_errs +
1329                                    hwstat->rx_undersize +
1330                                    hwstat->rx_crc_errs +
1331                                    hwstat->rx_align_errs +
1332                                    hwstat->rx_symbol_errs);
1333
1334         nstat->tx_aborted_errors = hwstat->tx_underruns;
1335         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1336
1337         return nstat;
1338 }
1339
1340 static void __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1341 {
1342         struct dev_mc_list *mclist;
1343         int i, num_ents;
1344
1345         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1346         mclist = dev->mc_list;
1347         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1348                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1349         }
1350 }
1351
1352 static void __b44_set_rx_mode(struct net_device *dev)
1353 {
1354         struct b44 *bp = dev->priv;
1355         u32 val;
1356
1357         val = br32(B44_RXCONFIG);
1358         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1359         if (dev->flags & IFF_PROMISC) {
1360                 val |= RXCONFIG_PROMISC;
1361                 bw32(B44_RXCONFIG, val);
1362         } else {
1363                 __b44_set_mac_addr(bp);
1364
1365                 if (dev->flags & IFF_ALLMULTI)
1366                         val |= RXCONFIG_ALLMULTI;
1367                 else
1368                         __b44_load_mcast(bp, dev);
1369
1370                 bw32(B44_RXCONFIG, val);
1371                 val = br32(B44_CAM_CTRL);
1372                 bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1373         }
1374 }
1375
1376 static void b44_set_rx_mode(struct net_device *dev)
1377 {
1378         struct b44 *bp = dev->priv;
1379
1380         spin_lock_irq(&bp->lock);
1381         __b44_set_rx_mode(dev);
1382         spin_unlock_irq(&bp->lock);
1383 }
1384
1385 static int b44_ethtool_ioctl (struct net_device *dev, void __user *useraddr)
1386 {
1387         struct b44 *bp = dev->priv;
1388         struct pci_dev *pci_dev = bp->pdev;
1389         u32 ethcmd;
1390
1391         if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
1392                 return -EFAULT;
1393
1394         switch (ethcmd) {
1395         case ETHTOOL_GDRVINFO:{
1396                 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1397                 strcpy (info.driver, DRV_MODULE_NAME);
1398                 strcpy (info.version, DRV_MODULE_VERSION);
1399                 memset(&info.fw_version, 0, sizeof(info.fw_version));
1400                 strcpy (info.bus_info, pci_name(pci_dev));
1401                 info.eedump_len = 0;
1402                 info.regdump_len = 0;
1403                 if (copy_to_user (useraddr, &info, sizeof (info)))
1404                         return -EFAULT;
1405                 return 0;
1406         }
1407
1408         case ETHTOOL_GSET: {
1409                 struct ethtool_cmd cmd = { ETHTOOL_GSET };
1410
1411                 if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
1412                         return -EAGAIN;
1413                 cmd.supported = (SUPPORTED_Autoneg);
1414                 cmd.supported |= (SUPPORTED_100baseT_Half |
1415                                   SUPPORTED_100baseT_Full |
1416                                   SUPPORTED_10baseT_Half |
1417                                   SUPPORTED_10baseT_Full |
1418                                   SUPPORTED_MII);
1419
1420                 cmd.advertising = 0;
1421                 if (bp->flags & B44_FLAG_ADV_10HALF)
1422                         cmd.advertising |= ADVERTISE_10HALF;
1423                 if (bp->flags & B44_FLAG_ADV_10FULL)
1424                         cmd.advertising |= ADVERTISE_10FULL;
1425                 if (bp->flags & B44_FLAG_ADV_100HALF)
1426                         cmd.advertising |= ADVERTISE_100HALF;
1427                 if (bp->flags & B44_FLAG_ADV_100FULL)
1428                         cmd.advertising |= ADVERTISE_100FULL;
1429                 cmd.advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1430                 cmd.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1431                         SPEED_100 : SPEED_10;
1432                 cmd.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1433                         DUPLEX_FULL : DUPLEX_HALF;
1434                 cmd.port = 0;
1435                 cmd.phy_address = bp->phy_addr;
1436                 cmd.transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1437                         XCVR_INTERNAL : XCVR_EXTERNAL;
1438                 cmd.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1439                         AUTONEG_DISABLE : AUTONEG_ENABLE;
1440                 cmd.maxtxpkt = 0;
1441                 cmd.maxrxpkt = 0;
1442                 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
1443                         return -EFAULT;
1444                 return 0;
1445         }
1446         case ETHTOOL_SSET: {
1447                 struct ethtool_cmd cmd;
1448
1449                 if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
1450                         return -EAGAIN;
1451
1452                 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1453                         return -EFAULT;
1454
1455                 /* We do not support gigabit. */
1456                 if (cmd.autoneg == AUTONEG_ENABLE) {
1457                         if (cmd.advertising &
1458                             (ADVERTISED_1000baseT_Half |
1459                              ADVERTISED_1000baseT_Full))
1460                                 return -EINVAL;
1461                 } else if ((cmd.speed != SPEED_100 &&
1462                             cmd.speed != SPEED_10) ||
1463                            (cmd.duplex != DUPLEX_HALF &&
1464                             cmd.duplex != DUPLEX_FULL)) {
1465                                 return -EINVAL;
1466                 }
1467
1468                 spin_lock_irq(&bp->lock);
1469
1470                 if (cmd.autoneg == AUTONEG_ENABLE) {
1471                         bp->flags &= ~B44_FLAG_FORCE_LINK;
1472                         bp->flags &= ~(B44_FLAG_ADV_10HALF |
1473                                        B44_FLAG_ADV_10FULL |
1474                                        B44_FLAG_ADV_100HALF |
1475                                        B44_FLAG_ADV_100FULL);
1476                         if (cmd.advertising & ADVERTISE_10HALF)
1477                                 bp->flags |= B44_FLAG_ADV_10HALF;
1478                         if (cmd.advertising & ADVERTISE_10FULL)
1479                                 bp->flags |= B44_FLAG_ADV_10FULL;
1480                         if (cmd.advertising & ADVERTISE_100HALF)
1481                                 bp->flags |= B44_FLAG_ADV_100HALF;
1482                         if (cmd.advertising & ADVERTISE_100FULL)
1483                                 bp->flags |= B44_FLAG_ADV_100FULL;
1484                 } else {
1485                         bp->flags |= B44_FLAG_FORCE_LINK;
1486                         if (cmd.speed == SPEED_100)
1487                                 bp->flags |= B44_FLAG_100_BASE_T;
1488                         if (cmd.duplex == DUPLEX_FULL)
1489                                 bp->flags |= B44_FLAG_FULL_DUPLEX;
1490                 }
1491
1492                 b44_setup_phy(bp);
1493
1494                 spin_unlock_irq(&bp->lock);
1495
1496                 return 0;
1497         }
1498
1499         case ETHTOOL_GMSGLVL: {
1500                 struct ethtool_value edata = { ETHTOOL_GMSGLVL };
1501                 edata.data = bp->msg_enable;
1502                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1503                         return -EFAULT;
1504                 return 0;
1505         }
1506         case ETHTOOL_SMSGLVL: {
1507                 struct ethtool_value edata;
1508                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1509                         return -EFAULT;
1510                 bp->msg_enable = edata.data;
1511                 return 0;
1512         }
1513         case ETHTOOL_NWAY_RST: {
1514                 u32 bmcr;
1515                 int r;
1516
1517                 spin_lock_irq(&bp->lock);
1518                 b44_readphy(bp, MII_BMCR, &bmcr);
1519                 b44_readphy(bp, MII_BMCR, &bmcr);
1520                 r = -EINVAL;
1521                 if (bmcr & BMCR_ANENABLE) {
1522                         b44_writephy(bp, MII_BMCR,
1523                                      bmcr | BMCR_ANRESTART);
1524                         r = 0;
1525                 }
1526                 spin_unlock_irq(&bp->lock);
1527
1528                 return r;
1529         }
1530         case ETHTOOL_GLINK: {
1531                 struct ethtool_value edata = { ETHTOOL_GLINK };
1532                 edata.data = netif_carrier_ok(bp->dev) ? 1 : 0;
1533                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1534                         return -EFAULT;
1535                 return 0;
1536         }
1537         case ETHTOOL_GRINGPARAM: {
1538                 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
1539
1540                 ering.rx_max_pending = B44_RX_RING_SIZE - 1;
1541                 ering.rx_pending = bp->rx_pending;
1542
1543                 /* XXX ethtool lacks a tx_max_pending, oops... */
1544
1545                 if (copy_to_user(useraddr, &ering, sizeof(ering)))
1546                         return -EFAULT;
1547                 return 0;
1548         }
1549         case ETHTOOL_SRINGPARAM: {
1550                 struct ethtool_ringparam ering;
1551
1552                 if (copy_from_user(&ering, useraddr, sizeof(ering)))
1553                         return -EFAULT;
1554
1555                 if ((ering.rx_pending > B44_RX_RING_SIZE - 1) ||
1556                     (ering.rx_mini_pending != 0) ||
1557                     (ering.rx_jumbo_pending != 0) ||
1558                     (ering.tx_pending > B44_TX_RING_SIZE - 1))
1559                         return -EINVAL;
1560
1561                 spin_lock_irq(&bp->lock);
1562
1563                 bp->rx_pending = ering.rx_pending;
1564                 bp->tx_pending = ering.tx_pending;
1565
1566                 b44_halt(bp);
1567                 b44_init_rings(bp);
1568                 b44_init_hw(bp);
1569                 netif_wake_queue(bp->dev);
1570                 spin_unlock_irq(&bp->lock);
1571
1572                 b44_enable_ints(bp);
1573                 
1574                 return 0;
1575         }
1576         case ETHTOOL_GPAUSEPARAM: {
1577                 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
1578
1579                 epause.autoneg =
1580                         (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1581                 epause.rx_pause =
1582                         (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1583                 epause.tx_pause =
1584                         (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1585                 if (copy_to_user(useraddr, &epause, sizeof(epause)))
1586                         return -EFAULT;
1587                 return 0;
1588         }
1589         case ETHTOOL_SPAUSEPARAM: {
1590                 struct ethtool_pauseparam epause;
1591
1592                 if (copy_from_user(&epause, useraddr, sizeof(epause)))
1593                         return -EFAULT;
1594
1595                 spin_lock_irq(&bp->lock);
1596                 if (epause.autoneg)
1597                         bp->flags |= B44_FLAG_PAUSE_AUTO;
1598                 else
1599                         bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1600                 if (epause.rx_pause)
1601                         bp->flags |= B44_FLAG_RX_PAUSE;
1602                 else
1603                         bp->flags &= ~B44_FLAG_RX_PAUSE;
1604                 if (epause.tx_pause)
1605                         bp->flags |= B44_FLAG_TX_PAUSE;
1606                 else
1607                         bp->flags &= ~B44_FLAG_TX_PAUSE;
1608                 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1609                         b44_halt(bp);
1610                         b44_init_rings(bp);
1611                         b44_init_hw(bp);
1612                 } else {
1613                         __b44_set_flow_ctrl(bp, bp->flags);
1614                 }
1615                 spin_unlock_irq(&bp->lock);
1616
1617                 b44_enable_ints(bp);
1618                 
1619                 return 0;
1620         }
1621         };
1622
1623         return -EOPNOTSUPP;
1624 }
1625
1626 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1627 {
1628         struct mii_ioctl_data __user *data = (struct mii_ioctl_data __user *)&ifr->ifr_data;
1629         struct b44 *bp = dev->priv;
1630         int err;
1631
1632         switch (cmd) {
1633         case SIOCETHTOOL:
1634                 return b44_ethtool_ioctl(dev, (void __user*) ifr->ifr_data);
1635
1636         case SIOCGMIIPHY:
1637                 data->phy_id = bp->phy_addr;
1638
1639                 /* fallthru */
1640         case SIOCGMIIREG: {
1641                 u32 mii_regval;
1642
1643                 spin_lock_irq(&bp->lock);
1644                 err = b44_readphy(bp, data->reg_num & 0x1f, &mii_regval);
1645                 spin_unlock_irq(&bp->lock);
1646
1647                 data->val_out = mii_regval;
1648
1649                 return err;
1650         }
1651
1652         case SIOCSMIIREG:
1653                 if (!capable(CAP_NET_ADMIN))
1654                         return -EPERM;
1655
1656                 spin_lock_irq(&bp->lock);
1657                 err = b44_writephy(bp, data->reg_num & 0x1f, data->val_in);
1658                 spin_unlock_irq(&bp->lock);
1659
1660                 return err;
1661
1662         default:
1663                 /* do nothing */
1664                 break;
1665         };
1666         return -EOPNOTSUPP;
1667 }
1668
1669 /* Read 128-bytes of EEPROM. */
1670 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1671 {
1672         long i;
1673         u16 *ptr = (u16 *) data;
1674
1675         for (i = 0; i < 128; i += 2)
1676                 ptr[i / 2] = readw(bp->regs + 4096 + i);
1677
1678         return 0;
1679 }
1680
1681 static int __devinit b44_get_invariants(struct b44 *bp)
1682 {
1683         u8 eeprom[128];
1684         int err;
1685
1686         err = b44_read_eeprom(bp, &eeprom[0]);
1687         if (err)
1688                 goto out;
1689
1690         bp->dev->dev_addr[0] = eeprom[79];
1691         bp->dev->dev_addr[1] = eeprom[78];
1692         bp->dev->dev_addr[2] = eeprom[81];
1693         bp->dev->dev_addr[3] = eeprom[80];
1694         bp->dev->dev_addr[4] = eeprom[83];
1695         bp->dev->dev_addr[5] = eeprom[82];
1696
1697         bp->phy_addr = eeprom[90] & 0x1f;
1698         bp->mdc_port = (eeprom[90] >> 14) & 0x1;
1699
1700         /* With this, plus the rx_header prepended to the data by the
1701          * hardware, we'll land the ethernet header on a 2-byte boundary.
1702          */
1703         bp->rx_offset = 30;
1704
1705         bp->imask = IMASK_DEF;
1706
1707         bp->core_unit = ssb_core_unit(bp);
1708         bp->dma_offset = ssb_get_addr(bp, SBID_PCI_DMA, 0);
1709
1710         /* XXX - really required? 
1711            bp->flags |= B44_FLAG_BUGGY_TXPTR;
1712          */
1713 out:
1714         return err;
1715 }
1716
1717 static int __devinit b44_init_one(struct pci_dev *pdev,
1718                                   const struct pci_device_id *ent)
1719 {
1720         static int b44_version_printed = 0;
1721         unsigned long b44reg_base, b44reg_len;
1722         struct net_device *dev;
1723         struct b44 *bp;
1724         int err, i;
1725
1726         if (b44_version_printed++ == 0)
1727                 printk(KERN_INFO "%s", version);
1728
1729         err = pci_enable_device(pdev);
1730         if (err) {
1731                 printk(KERN_ERR PFX "Cannot enable PCI device, "
1732                        "aborting.\n");
1733                 return err;
1734         }
1735
1736         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1737                 printk(KERN_ERR PFX "Cannot find proper PCI device "
1738                        "base address, aborting.\n");
1739                 err = -ENODEV;
1740                 goto err_out_disable_pdev;
1741         }
1742
1743         err = pci_request_regions(pdev, DRV_MODULE_NAME);
1744         if (err) {
1745                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1746                        "aborting.\n");
1747                 goto err_out_disable_pdev;
1748         }
1749
1750         pci_set_master(pdev);
1751
1752         err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
1753         if (err) {
1754                 printk(KERN_ERR PFX "No usable DMA configuration, "
1755                        "aborting.\n");
1756                 goto err_out_free_res;
1757         }
1758
1759         b44reg_base = pci_resource_start(pdev, 0);
1760         b44reg_len = pci_resource_len(pdev, 0);
1761
1762         dev = alloc_etherdev(sizeof(*bp));
1763         if (!dev) {
1764                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1765                 err = -ENOMEM;
1766                 goto err_out_free_res;
1767         }
1768
1769         SET_MODULE_OWNER(dev);
1770         SET_NETDEV_DEV(dev,&pdev->dev);
1771
1772         /* No interesting netdevice features in this card... */
1773         dev->features |= 0;
1774
1775         bp = dev->priv;
1776         bp->pdev = pdev;
1777         bp->dev = dev;
1778         if (b44_debug >= 0)
1779                 bp->msg_enable = (1 << b44_debug) - 1;
1780         else
1781                 bp->msg_enable = B44_DEF_MSG_ENABLE;
1782
1783         spin_lock_init(&bp->lock);
1784
1785         bp->regs = (unsigned long) ioremap(b44reg_base, b44reg_len);
1786         if (bp->regs == 0UL) {
1787                 printk(KERN_ERR PFX "Cannot map device registers, "
1788                        "aborting.\n");
1789                 err = -ENOMEM;
1790                 goto err_out_free_dev;
1791         }
1792
1793         bp->rx_pending = B44_DEF_RX_RING_PENDING;
1794         bp->tx_pending = B44_DEF_TX_RING_PENDING;
1795
1796         dev->open = b44_open;
1797         dev->stop = b44_close;
1798         dev->hard_start_xmit = b44_start_xmit;
1799         dev->get_stats = b44_get_stats;
1800         dev->set_multicast_list = b44_set_rx_mode;
1801         dev->set_mac_address = b44_set_mac_addr;
1802         dev->do_ioctl = b44_ioctl;
1803         dev->tx_timeout = b44_tx_timeout;
1804         dev->poll = b44_poll;
1805         dev->weight = 64;
1806         dev->watchdog_timeo = B44_TX_TIMEOUT;
1807         dev->change_mtu = b44_change_mtu;
1808         dev->irq = pdev->irq;
1809
1810         err = b44_get_invariants(bp);
1811         if (err) {
1812                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
1813                        "aborting.\n");
1814                 goto err_out_iounmap;
1815         }
1816
1817         /* By default, advertise all speed/duplex settings. */
1818         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
1819                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
1820
1821         /* By default, auto-negotiate PAUSE. */
1822         bp->flags |= B44_FLAG_PAUSE_AUTO;
1823
1824         err = register_netdev(dev);
1825         if (err) {
1826                 printk(KERN_ERR PFX "Cannot register net device, "
1827                        "aborting.\n");
1828                 goto err_out_iounmap;
1829         }
1830
1831         pci_set_drvdata(pdev, dev);
1832
1833         pci_save_state(bp->pdev, bp->pci_cfg_state);
1834
1835         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
1836         for (i = 0; i < 6; i++)
1837                 printk("%2.2x%c", dev->dev_addr[i],
1838                        i == 5 ? '\n' : ':');
1839
1840         return 0;
1841
1842 err_out_iounmap:
1843         iounmap((void *) bp->regs);
1844
1845 err_out_free_dev:
1846         free_netdev(dev);
1847
1848 err_out_free_res:
1849         pci_release_regions(pdev);
1850
1851 err_out_disable_pdev:
1852         pci_disable_device(pdev);
1853         pci_set_drvdata(pdev, NULL);
1854         return err;
1855 }
1856
1857 static void __devexit b44_remove_one(struct pci_dev *pdev)
1858 {
1859         struct net_device *dev = pci_get_drvdata(pdev);
1860
1861         if (dev) {
1862                 unregister_netdev(dev);
1863                 iounmap((void *) ((struct b44 *)(dev->priv))->regs);
1864                 free_netdev(dev);
1865                 pci_release_regions(pdev);
1866                 pci_disable_device(pdev);
1867                 pci_set_drvdata(pdev, NULL);
1868         }
1869 }
1870
1871 static int b44_suspend(struct pci_dev *pdev, u32 state)
1872 {
1873         struct net_device *dev = pci_get_drvdata(pdev);
1874         struct b44 *bp = dev->priv;
1875
1876         if (!netif_running(dev))
1877                  return 0;
1878
1879         del_timer_sync(&bp->timer);
1880
1881         spin_lock_irq(&bp->lock); 
1882
1883         b44_halt(bp);
1884         netif_carrier_off(bp->dev); 
1885         netif_device_detach(bp->dev);
1886         b44_free_rings(bp);
1887
1888         spin_unlock_irq(&bp->lock);
1889         return 0;
1890 }
1891
1892 static int b44_resume(struct pci_dev *pdev)
1893 {
1894         struct net_device *dev = pci_get_drvdata(pdev);
1895         struct b44 *bp = dev->priv;
1896
1897         if (!netif_running(dev))
1898                 return 0;
1899
1900         pci_restore_state(pdev, bp->pci_cfg_state);
1901
1902         spin_lock_irq(&bp->lock);
1903
1904         b44_init_rings(bp);
1905         b44_init_hw(bp);
1906         netif_device_attach(bp->dev);
1907         spin_unlock_irq(&bp->lock);
1908
1909         bp->timer.expires = jiffies + HZ;
1910         add_timer(&bp->timer);
1911
1912         b44_enable_ints(bp);
1913         return 0;
1914 }
1915
1916 static struct pci_driver b44_driver = {
1917         .name           = DRV_MODULE_NAME,
1918         .id_table       = b44_pci_tbl,
1919         .probe          = b44_init_one,
1920         .remove         = __devexit_p(b44_remove_one),
1921         .suspend        = b44_suspend,
1922         .resume         = b44_resume,
1923 };
1924
1925 static int __init b44_init(void)
1926 {
1927         return pci_module_init(&b44_driver);
1928 }
1929
1930 static void __exit b44_cleanup(void)
1931 {
1932         pci_unregister_driver(&b44_driver);
1933 }
1934
1935 module_init(b44_init);
1936 module_exit(b44_cleanup);
1937