vserver 1.9.3
[linux-2.6.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  *
6  * Distribute under GPL.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/mii.h>
15 #include <linux/if_ether.h>
16 #include <linux/etherdevice.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/version.h>
21
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 #include <asm/irq.h>
25
26 #include "b44.h"
27
28 #define DRV_MODULE_NAME         "b44"
29 #define PFX DRV_MODULE_NAME     ": "
30 #define DRV_MODULE_VERSION      "0.94"
31 #define DRV_MODULE_RELDATE      "May 4, 2004"
32
33 #define B44_DEF_MSG_ENABLE        \
34         (NETIF_MSG_DRV          | \
35          NETIF_MSG_PROBE        | \
36          NETIF_MSG_LINK         | \
37          NETIF_MSG_TIMER        | \
38          NETIF_MSG_IFDOWN       | \
39          NETIF_MSG_IFUP         | \
40          NETIF_MSG_RX_ERR       | \
41          NETIF_MSG_TX_ERR)
42
43 /* length of time before we decide the hardware is borked,
44  * and dev->tx_timeout() should be called to fix the problem
45  */
46 #define B44_TX_TIMEOUT                  (5 * HZ)
47
48 /* hardware minimum and maximum for a single frame's data payload */
49 #define B44_MIN_MTU                     60
50 #define B44_MAX_MTU                     1500
51
52 #define B44_RX_RING_SIZE                512
53 #define B44_DEF_RX_RING_PENDING         200
54 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
55                                  B44_RX_RING_SIZE)
56 #define B44_TX_RING_SIZE                512
57 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
58 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
59                                  B44_TX_RING_SIZE)
60
61 #define TX_RING_GAP(BP) \
62         (B44_TX_RING_SIZE - (BP)->tx_pending)
63 #define TX_BUFFS_AVAIL(BP)                                              \
64         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
65           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
66           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
67 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
68
69 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
70
71 /* minimum number of free TX descriptors required to wake up TX process */
72 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
73
74 static char version[] __devinitdata =
75         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
78 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
79 MODULE_LICENSE("GPL");
80 MODULE_PARM(b44_debug, "i");
81 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
82
83 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
84
85 static struct pci_device_id b44_pci_tbl[] = {
86         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
87           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
88         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
89           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
90         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
91           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
92         { }     /* terminate list with empty entry */
93 };
94
95 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
96
97 static void b44_halt(struct b44 *);
98 static void b44_init_rings(struct b44 *);
99 static void b44_init_hw(struct b44 *);
100
101 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
102 {
103         return readl(bp->regs + reg);
104 }
105
106 static inline void bw32(const struct b44 *bp, 
107                         unsigned long reg, unsigned long val)
108 {
109         writel(val, bp->regs + reg);
110 }
111
112 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
113                         u32 bit, unsigned long timeout, const int clear)
114 {
115         unsigned long i;
116
117         for (i = 0; i < timeout; i++) {
118                 u32 val = br32(bp, reg);
119
120                 if (clear && !(val & bit))
121                         break;
122                 if (!clear && (val & bit))
123                         break;
124                 udelay(10);
125         }
126         if (i == timeout) {
127                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
128                        "%lx to %s.\n",
129                        bp->dev->name,
130                        bit, reg,
131                        (clear ? "clear" : "set"));
132                 return -ENODEV;
133         }
134         return 0;
135 }
136
137 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
138  * buzz words used on this company's website :-)
139  *
140  * All of these routines must be invoked with bp->lock held and
141  * interrupts disabled.
142  */
143
144 #define SBID_SDRAM              0
145 #define SBID_PCI_MEM            1
146 #define SBID_PCI_CFG            2
147 #define SBID_PCI_DMA            3
148 #define SBID_SDRAM_SWAPPED      4
149 #define SBID_ENUM               5
150 #define SBID_REG_SDRAM          6
151 #define SBID_REG_ILINE20        7
152 #define SBID_REG_EMAC           8
153 #define SBID_REG_CODEC          9
154 #define SBID_REG_USB            10
155 #define SBID_REG_PCI            11
156 #define SBID_REG_MIPS           12
157 #define SBID_REG_EXTIF          13
158 #define SBID_EXTIF              14
159 #define SBID_EJTAG              15
160 #define SBID_MAX                16
161
162 static u32 ssb_get_addr(struct b44 *bp, u32 id, u32 instance)
163 {
164         switch (id) {
165         case SBID_PCI_DMA:
166                 return 0x40000000;
167         case SBID_ENUM:
168                 return 0x18000000;
169         case SBID_REG_EMAC:
170                 return 0x18000000;
171         case SBID_REG_CODEC:
172                 return 0x18001000;
173         case SBID_REG_PCI:
174                 return 0x18002000;
175         default:
176                 return 0;
177         };
178 }
179
180 static u32 ssb_get_core_rev(struct b44 *bp)
181 {
182         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
183 }
184
185 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
186 {
187         u32 bar_orig, pci_rev, val;
188
189         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
190         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN,
191                                ssb_get_addr(bp, SBID_REG_PCI, 0));
192         pci_rev = ssb_get_core_rev(bp);
193
194         val = br32(bp, B44_SBINTVEC);
195         val |= cores;
196         bw32(bp, B44_SBINTVEC, val);
197
198         val = br32(bp, SSB_PCI_TRANS_2);
199         val |= SSB_PCI_PREF | SSB_PCI_BURST;
200         bw32(bp, SSB_PCI_TRANS_2, val);
201
202         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
203
204         return pci_rev;
205 }
206
207 static void ssb_core_disable(struct b44 *bp)
208 {
209         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
210                 return;
211
212         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
217         br32(bp, B44_SBTMSLOW);
218         udelay(1);
219         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220         br32(bp, B44_SBTMSLOW);
221         udelay(1);
222 }
223
224 static void ssb_core_reset(struct b44 *bp)
225 {
226         u32 val;
227
228         ssb_core_disable(bp);
229         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230         br32(bp, B44_SBTMSLOW);
231         udelay(1);
232
233         /* Clear SERR if set, this is a hw bug workaround.  */
234         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235                 bw32(bp, B44_SBTMSHIGH, 0);
236
237         val = br32(bp, B44_SBIMSTATE);
238         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
240
241         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242         br32(bp, B44_SBTMSLOW);
243         udelay(1);
244
245         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246         br32(bp, B44_SBTMSLOW);
247         udelay(1);
248 }
249
250 static int ssb_core_unit(struct b44 *bp)
251 {
252 #if 0
253         u32 val = br32(bp, B44_SBADMATCH0);
254         u32 base;
255
256         type = val & SBADMATCH0_TYPE_MASK;
257         switch (type) {
258         case 0:
259                 base = val & SBADMATCH0_BS0_MASK;
260                 break;
261
262         case 1:
263                 base = val & SBADMATCH0_BS1_MASK;
264                 break;
265
266         case 2:
267         default:
268                 base = val & SBADMATCH0_BS2_MASK;
269                 break;
270         };
271 #endif
272         return 0;
273 }
274
275 static int ssb_is_core_up(struct b44 *bp)
276 {
277         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
278                 == SBTMSLOW_CLOCK);
279 }
280
281 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
282 {
283         u32 val;
284
285         val  = ((u32) data[2]) << 24;
286         val |= ((u32) data[3]) << 16;
287         val |= ((u32) data[4]) <<  8;
288         val |= ((u32) data[5]) <<  0;
289         bw32(bp, B44_CAM_DATA_LO, val);
290         val = (CAM_DATA_HI_VALID | 
291                (((u32) data[0]) << 8) |
292                (((u32) data[1]) << 0));
293         bw32(bp, B44_CAM_DATA_HI, val);
294         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295                             (index << CAM_CTRL_INDEX_SHIFT)));
296         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);  
297 }
298
299 static inline void __b44_disable_ints(struct b44 *bp)
300 {
301         bw32(bp, B44_IMASK, 0);
302 }
303
304 static void b44_disable_ints(struct b44 *bp)
305 {
306         __b44_disable_ints(bp);
307
308         /* Flush posted writes. */
309         br32(bp, B44_IMASK);
310 }
311
312 static void b44_enable_ints(struct b44 *bp)
313 {
314         bw32(bp, B44_IMASK, bp->imask);
315 }
316
317 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
318 {
319         int err;
320
321         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325                              (reg << MDIO_DATA_RA_SHIFT) |
326                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
329
330         return err;
331 }
332
333 static int b44_writephy(struct b44 *bp, int reg, u32 val)
334 {
335         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339                              (reg << MDIO_DATA_RA_SHIFT) |
340                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341                              (val & MDIO_DATA_DATA)));
342         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
343 }
344
345 /* miilib interface */
346 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347  * due to code existing before miilib use was added to this driver.
348  * Someone should remove this artificial driver limitation in
349  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
350  */
351 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
352 {
353         u32 val;
354         struct b44 *bp = netdev_priv(dev);
355         int rc = b44_readphy(bp, location, &val);
356         if (rc)
357                 return 0xffffffff;
358         return val;
359 }
360
361 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
362                          int val)
363 {
364         struct b44 *bp = netdev_priv(dev);
365         b44_writephy(bp, location, val);
366 }
367
368 static int b44_phy_reset(struct b44 *bp)
369 {
370         u32 val;
371         int err;
372
373         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
374         if (err)
375                 return err;
376         udelay(100);
377         err = b44_readphy(bp, MII_BMCR, &val);
378         if (!err) {
379                 if (val & BMCR_RESET) {
380                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
381                                bp->dev->name);
382                         err = -ENODEV;
383                 }
384         }
385
386         return 0;
387 }
388
389 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
390 {
391         u32 val;
392
393         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394         bp->flags |= pause_flags;
395
396         val = br32(bp, B44_RXCONFIG);
397         if (pause_flags & B44_FLAG_RX_PAUSE)
398                 val |= RXCONFIG_FLOW;
399         else
400                 val &= ~RXCONFIG_FLOW;
401         bw32(bp, B44_RXCONFIG, val);
402
403         val = br32(bp, B44_MAC_FLOW);
404         if (pause_flags & B44_FLAG_TX_PAUSE)
405                 val |= (MAC_FLOW_PAUSE_ENAB |
406                         (0xc0 & MAC_FLOW_RX_HI_WATER));
407         else
408                 val &= ~MAC_FLOW_PAUSE_ENAB;
409         bw32(bp, B44_MAC_FLOW, val);
410 }
411
412 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
413 {
414         u32 pause_enab = bp->flags & (B44_FLAG_TX_PAUSE |
415                                       B44_FLAG_RX_PAUSE);
416
417         if (local & ADVERTISE_PAUSE_CAP) {
418                 if (local & ADVERTISE_PAUSE_ASYM) {
419                         if (remote & LPA_PAUSE_CAP)
420                                 pause_enab |= (B44_FLAG_TX_PAUSE |
421                                                B44_FLAG_RX_PAUSE);
422                         else if (remote & LPA_PAUSE_ASYM)
423                                 pause_enab |= B44_FLAG_RX_PAUSE;
424                 } else {
425                         if (remote & LPA_PAUSE_CAP)
426                                 pause_enab |= (B44_FLAG_TX_PAUSE |
427                                                B44_FLAG_RX_PAUSE);
428                 }
429         } else if (local & ADVERTISE_PAUSE_ASYM) {
430                 if ((remote & LPA_PAUSE_CAP) &&
431                     (remote & LPA_PAUSE_ASYM))
432                         pause_enab |= B44_FLAG_TX_PAUSE;
433         }
434
435         __b44_set_flow_ctrl(bp, pause_enab);
436 }
437
438 static int b44_setup_phy(struct b44 *bp)
439 {
440         u32 val;
441         int err;
442
443         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
444                 goto out;
445         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
446                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
447                 goto out;
448         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
449                 goto out;
450         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
451                                 val | MII_TLEDCTRL_ENABLE)) != 0)
452                 goto out;
453
454         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
455                 u32 adv = ADVERTISE_CSMA;
456
457                 if (bp->flags & B44_FLAG_ADV_10HALF)
458                         adv |= ADVERTISE_10HALF;
459                 if (bp->flags & B44_FLAG_ADV_10FULL)
460                         adv |= ADVERTISE_10FULL;
461                 if (bp->flags & B44_FLAG_ADV_100HALF)
462                         adv |= ADVERTISE_100HALF;
463                 if (bp->flags & B44_FLAG_ADV_100FULL)
464                         adv |= ADVERTISE_100FULL;
465
466                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
467                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
468
469                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
470                         goto out;
471                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
472                                                        BMCR_ANRESTART))) != 0)
473                         goto out;
474         } else {
475                 u32 bmcr;
476
477                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
478                         goto out;
479                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
480                 if (bp->flags & B44_FLAG_100_BASE_T)
481                         bmcr |= BMCR_SPEED100;
482                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
483                         bmcr |= BMCR_FULLDPLX;
484                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
485                         goto out;
486
487                 /* Since we will not be negotiating there is no safe way
488                  * to determine if the link partner supports flow control
489                  * or not.  So just disable it completely in this case.
490                  */
491                 b44_set_flow_ctrl(bp, 0, 0);
492         }
493
494 out:
495         return err;
496 }
497
498 static void b44_stats_update(struct b44 *bp)
499 {
500         unsigned long reg;
501         u32 *val;
502
503         val = &bp->hw_stats.tx_good_octets;
504         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
505                 *val++ += br32(bp, reg);
506         }
507         val = &bp->hw_stats.rx_good_octets;
508         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
509                 *val++ += br32(bp, reg);
510         }
511 }
512
513 static void b44_link_report(struct b44 *bp)
514 {
515         if (!netif_carrier_ok(bp->dev)) {
516                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
517         } else {
518                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
519                        bp->dev->name,
520                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
521                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
522
523                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
524                        "%s for RX.\n",
525                        bp->dev->name,
526                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
527                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
528         }
529 }
530
531 static void b44_check_phy(struct b44 *bp)
532 {
533         u32 bmsr, aux;
534
535         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
536             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
537             (bmsr != 0xffff)) {
538                 if (aux & MII_AUXCTRL_SPEED)
539                         bp->flags |= B44_FLAG_100_BASE_T;
540                 else
541                         bp->flags &= ~B44_FLAG_100_BASE_T;
542                 if (aux & MII_AUXCTRL_DUPLEX)
543                         bp->flags |= B44_FLAG_FULL_DUPLEX;
544                 else
545                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
546
547                 if (!netif_carrier_ok(bp->dev) &&
548                     (bmsr & BMSR_LSTATUS)) {
549                         u32 val = br32(bp, B44_TX_CTRL);
550                         u32 local_adv, remote_adv;
551
552                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
553                                 val |= TX_CTRL_DUPLEX;
554                         else
555                                 val &= ~TX_CTRL_DUPLEX;
556                         bw32(bp, B44_TX_CTRL, val);
557
558                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
559                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
560                             !b44_readphy(bp, MII_LPA, &remote_adv))
561                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
562
563                         /* Link now up */
564                         netif_carrier_on(bp->dev);
565                         b44_link_report(bp);
566                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
567                         /* Link now down */
568                         netif_carrier_off(bp->dev);
569                         b44_link_report(bp);
570                 }
571
572                 if (bmsr & BMSR_RFAULT)
573                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
574                                bp->dev->name);
575                 if (bmsr & BMSR_JCD)
576                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
577                                bp->dev->name);
578         }
579 }
580
581 static void b44_timer(unsigned long __opaque)
582 {
583         struct b44 *bp = (struct b44 *) __opaque;
584
585         spin_lock_irq(&bp->lock);
586
587         b44_check_phy(bp);
588
589         b44_stats_update(bp);
590
591         spin_unlock_irq(&bp->lock);
592
593         bp->timer.expires = jiffies + HZ;
594         add_timer(&bp->timer);
595 }
596
597 static void b44_tx(struct b44 *bp)
598 {
599         u32 cur, cons;
600
601         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
602         cur /= sizeof(struct dma_desc);
603
604         /* XXX needs updating when NETIF_F_SG is supported */
605         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
606                 struct ring_info *rp = &bp->tx_buffers[cons];
607                 struct sk_buff *skb = rp->skb;
608
609                 if (unlikely(skb == NULL))
610                         BUG();
611
612                 pci_unmap_single(bp->pdev,
613                                  pci_unmap_addr(rp, mapping),
614                                  skb->len,
615                                  PCI_DMA_TODEVICE);
616                 rp->skb = NULL;
617                 dev_kfree_skb_irq(skb);
618         }
619
620         bp->tx_cons = cons;
621         if (netif_queue_stopped(bp->dev) &&
622             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
623                 netif_wake_queue(bp->dev);
624
625         bw32(bp, B44_GPTIMER, 0);
626 }
627
628 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
629  * before the DMA address you give it.  So we allocate 30 more bytes
630  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
631  * point the chip at 30 bytes past where the rx_header will go.
632  */
633 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
634 {
635         struct dma_desc *dp;
636         struct ring_info *src_map, *map;
637         struct rx_header *rh;
638         struct sk_buff *skb;
639         dma_addr_t mapping;
640         int dest_idx;
641         u32 ctrl;
642
643         src_map = NULL;
644         if (src_idx >= 0)
645                 src_map = &bp->rx_buffers[src_idx];
646         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
647         map = &bp->rx_buffers[dest_idx];
648         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
649         if (skb == NULL)
650                 return -ENOMEM;
651
652         skb->dev = bp->dev;
653         mapping = pci_map_single(bp->pdev, skb->data,
654                                  RX_PKT_BUF_SZ,
655                                  PCI_DMA_FROMDEVICE);
656         skb_reserve(skb, bp->rx_offset);
657
658         rh = (struct rx_header *)
659                 (skb->data - bp->rx_offset);
660         rh->len = 0;
661         rh->flags = 0;
662
663         map->skb = skb;
664         pci_unmap_addr_set(map, mapping, mapping);
665
666         if (src_map != NULL)
667                 src_map->skb = NULL;
668
669         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
670         if (dest_idx == (B44_RX_RING_SIZE - 1))
671                 ctrl |= DESC_CTRL_EOT;
672
673         dp = &bp->rx_ring[dest_idx];
674         dp->ctrl = cpu_to_le32(ctrl);
675         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
676
677         return RX_PKT_BUF_SZ;
678 }
679
680 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
681 {
682         struct dma_desc *src_desc, *dest_desc;
683         struct ring_info *src_map, *dest_map;
684         struct rx_header *rh;
685         int dest_idx;
686         u32 ctrl;
687
688         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
689         dest_desc = &bp->rx_ring[dest_idx];
690         dest_map = &bp->rx_buffers[dest_idx];
691         src_desc = &bp->rx_ring[src_idx];
692         src_map = &bp->rx_buffers[src_idx];
693
694         dest_map->skb = src_map->skb;
695         rh = (struct rx_header *) src_map->skb->data;
696         rh->len = 0;
697         rh->flags = 0;
698         pci_unmap_addr_set(dest_map, mapping,
699                            pci_unmap_addr(src_map, mapping));
700
701         ctrl = src_desc->ctrl;
702         if (dest_idx == (B44_RX_RING_SIZE - 1))
703                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
704         else
705                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
706
707         dest_desc->ctrl = ctrl;
708         dest_desc->addr = src_desc->addr;
709         src_map->skb = NULL;
710
711         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
712                                        RX_PKT_BUF_SZ,
713                                        PCI_DMA_FROMDEVICE);
714 }
715
716 static int b44_rx(struct b44 *bp, int budget)
717 {
718         int received;
719         u32 cons, prod;
720
721         received = 0;
722         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
723         prod /= sizeof(struct dma_desc);
724         cons = bp->rx_cons;
725
726         while (cons != prod && budget > 0) {
727                 struct ring_info *rp = &bp->rx_buffers[cons];
728                 struct sk_buff *skb = rp->skb;
729                 dma_addr_t map = pci_unmap_addr(rp, mapping);
730                 struct rx_header *rh;
731                 u16 len;
732
733                 pci_dma_sync_single_for_cpu(bp->pdev, map,
734                                             RX_PKT_BUF_SZ,
735                                             PCI_DMA_FROMDEVICE);
736                 rh = (struct rx_header *) skb->data;
737                 len = cpu_to_le16(rh->len);
738                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
739                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
740                 drop_it:
741                         b44_recycle_rx(bp, cons, bp->rx_prod);
742                 drop_it_no_recycle:
743                         bp->stats.rx_dropped++;
744                         goto next_pkt;
745                 }
746
747                 if (len == 0) {
748                         int i = 0;
749
750                         do {
751                                 udelay(2);
752                                 barrier();
753                                 len = cpu_to_le16(rh->len);
754                         } while (len == 0 && i++ < 5);
755                         if (len == 0)
756                                 goto drop_it;
757                 }
758
759                 /* Omit CRC. */
760                 len -= 4;
761
762                 if (len > RX_COPY_THRESHOLD) {
763                         int skb_size;
764                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
765                         if (skb_size < 0)
766                                 goto drop_it;
767                         pci_unmap_single(bp->pdev, map,
768                                          skb_size, PCI_DMA_FROMDEVICE);
769                         /* Leave out rx_header */
770                         skb_put(skb, len+bp->rx_offset);
771                         skb_pull(skb,bp->rx_offset);
772                 } else {
773                         struct sk_buff *copy_skb;
774
775                         b44_recycle_rx(bp, cons, bp->rx_prod);
776                         copy_skb = dev_alloc_skb(len + 2);
777                         if (copy_skb == NULL)
778                                 goto drop_it_no_recycle;
779
780                         copy_skb->dev = bp->dev;
781                         skb_reserve(copy_skb, 2);
782                         skb_put(copy_skb, len);
783                         /* DMA sync done above, copy just the actual packet */
784                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
785
786                         skb = copy_skb;
787                 }
788                 skb->ip_summed = CHECKSUM_NONE;
789                 skb->protocol = eth_type_trans(skb, bp->dev);
790                 netif_receive_skb(skb);
791                 bp->dev->last_rx = jiffies;
792                 received++;
793                 budget--;
794         next_pkt:
795                 bp->rx_prod = (bp->rx_prod + 1) &
796                         (B44_RX_RING_SIZE - 1);
797                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
798         }
799
800         bp->rx_cons = cons;
801         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
802
803         return received;
804 }
805
806 static int b44_poll(struct net_device *netdev, int *budget)
807 {
808         struct b44 *bp = netdev_priv(netdev);
809         int done;
810
811         spin_lock_irq(&bp->lock);
812
813         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
814                 /* spin_lock(&bp->tx_lock); */
815                 b44_tx(bp);
816                 /* spin_unlock(&bp->tx_lock); */
817         }
818         spin_unlock_irq(&bp->lock);
819
820         done = 1;
821         if (bp->istat & ISTAT_RX) {
822                 int orig_budget = *budget;
823                 int work_done;
824
825                 if (orig_budget > netdev->quota)
826                         orig_budget = netdev->quota;
827
828                 work_done = b44_rx(bp, orig_budget);
829
830                 *budget -= work_done;
831                 netdev->quota -= work_done;
832
833                 if (work_done >= orig_budget)
834                         done = 0;
835         }
836
837         if (bp->istat & ISTAT_ERRORS) {
838                 spin_lock_irq(&bp->lock);
839                 b44_halt(bp);
840                 b44_init_rings(bp);
841                 b44_init_hw(bp);
842                 netif_wake_queue(bp->dev);
843                 spin_unlock_irq(&bp->lock);
844                 done = 1;
845         }
846
847         if (done) {
848                 netif_rx_complete(netdev);
849                 b44_enable_ints(bp);
850         }
851
852         return (done ? 0 : 1);
853 }
854
855 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
856 {
857         struct net_device *dev = dev_id;
858         struct b44 *bp = netdev_priv(dev);
859         unsigned long flags;
860         u32 istat, imask;
861         int handled = 0;
862
863         spin_lock_irqsave(&bp->lock, flags);
864
865         istat = br32(bp, B44_ISTAT);
866         imask = br32(bp, B44_IMASK);
867
868         /* ??? What the fuck is the purpose of the interrupt mask
869          * ??? register if we have to mask it out by hand anyways?
870          */
871         istat &= imask;
872         if (istat) {
873                 handled = 1;
874                 if (netif_rx_schedule_prep(dev)) {
875                         /* NOTE: These writes are posted by the readback of
876                          *       the ISTAT register below.
877                          */
878                         bp->istat = istat;
879                         __b44_disable_ints(bp);
880                         __netif_rx_schedule(dev);
881                 } else {
882                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
883                                dev->name);
884                 }
885
886                 bw32(bp, B44_ISTAT, istat);
887                 br32(bp, B44_ISTAT);
888         }
889         spin_unlock_irqrestore(&bp->lock, flags);
890         return IRQ_RETVAL(handled);
891 }
892
893 static void b44_tx_timeout(struct net_device *dev)
894 {
895         struct b44 *bp = netdev_priv(dev);
896
897         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
898                dev->name);
899
900         spin_lock_irq(&bp->lock);
901
902         b44_halt(bp);
903         b44_init_rings(bp);
904         b44_init_hw(bp);
905
906         spin_unlock_irq(&bp->lock);
907
908         b44_enable_ints(bp);
909
910         netif_wake_queue(dev);
911 }
912
913 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
914 {
915         struct b44 *bp = netdev_priv(dev);
916         dma_addr_t mapping;
917         u32 len, entry, ctrl;
918
919         len = skb->len;
920         spin_lock_irq(&bp->lock);
921
922         /* This is a hard error, log it. */
923         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
924                 netif_stop_queue(dev);
925                 spin_unlock_irq(&bp->lock);
926                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
927                        dev->name);
928                 return 1;
929         }
930
931         entry = bp->tx_prod;
932         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
933
934         bp->tx_buffers[entry].skb = skb;
935         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
936
937         ctrl  = (len & DESC_CTRL_LEN);
938         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
939         if (entry == (B44_TX_RING_SIZE - 1))
940                 ctrl |= DESC_CTRL_EOT;
941
942         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
943         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
944
945         entry = NEXT_TX(entry);
946
947         bp->tx_prod = entry;
948
949         wmb();
950
951         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
952         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
953                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
954         if (bp->flags & B44_FLAG_REORDER_BUG)
955                 br32(bp, B44_DMATX_PTR);
956
957         if (TX_BUFFS_AVAIL(bp) < 1)
958                 netif_stop_queue(dev);
959
960         spin_unlock_irq(&bp->lock);
961
962         dev->trans_start = jiffies;
963
964         return 0;
965 }
966
967 static int b44_change_mtu(struct net_device *dev, int new_mtu)
968 {
969         struct b44 *bp = netdev_priv(dev);
970
971         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
972                 return -EINVAL;
973
974         if (!netif_running(dev)) {
975                 /* We'll just catch it later when the
976                  * device is up'd.
977                  */
978                 dev->mtu = new_mtu;
979                 return 0;
980         }
981
982         spin_lock_irq(&bp->lock);
983         b44_halt(bp);
984         dev->mtu = new_mtu;
985         b44_init_rings(bp);
986         b44_init_hw(bp);
987         spin_unlock_irq(&bp->lock);
988
989         b44_enable_ints(bp);
990         
991         return 0;
992 }
993
994 /* Free up pending packets in all rx/tx rings.
995  *
996  * The chip has been shut down and the driver detached from
997  * the networking, so no interrupts or new tx packets will
998  * end up in the driver.  bp->lock is not held and we are not
999  * in an interrupt context and thus may sleep.
1000  */
1001 static void b44_free_rings(struct b44 *bp)
1002 {
1003         struct ring_info *rp;
1004         int i;
1005
1006         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1007                 rp = &bp->rx_buffers[i];
1008
1009                 if (rp->skb == NULL)
1010                         continue;
1011                 pci_unmap_single(bp->pdev,
1012                                  pci_unmap_addr(rp, mapping),
1013                                  RX_PKT_BUF_SZ,
1014                                  PCI_DMA_FROMDEVICE);
1015                 dev_kfree_skb_any(rp->skb);
1016                 rp->skb = NULL;
1017         }
1018
1019         /* XXX needs changes once NETIF_F_SG is set... */
1020         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1021                 rp = &bp->tx_buffers[i];
1022
1023                 if (rp->skb == NULL)
1024                         continue;
1025                 pci_unmap_single(bp->pdev,
1026                                  pci_unmap_addr(rp, mapping),
1027                                  rp->skb->len,
1028                                  PCI_DMA_TODEVICE);
1029                 dev_kfree_skb_any(rp->skb);
1030                 rp->skb = NULL;
1031         }
1032 }
1033
1034 /* Initialize tx/rx rings for packet processing.
1035  *
1036  * The chip has been shut down and the driver detached from
1037  * the networking, so no interrupts or new tx packets will
1038  * end up in the driver.  bp->lock is not held and we are not
1039  * in an interrupt context and thus may sleep.
1040  */
1041 static void b44_init_rings(struct b44 *bp)
1042 {
1043         int i;
1044
1045         b44_free_rings(bp);
1046
1047         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1048         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1049
1050         for (i = 0; i < bp->rx_pending; i++) {
1051                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1052                         break;
1053         }
1054 }
1055
1056 /*
1057  * Must not be invoked with interrupt sources disabled and
1058  * the hardware shutdown down.
1059  */
1060 static void b44_free_consistent(struct b44 *bp)
1061 {
1062         if (bp->rx_buffers) {
1063                 kfree(bp->rx_buffers);
1064                 bp->rx_buffers = NULL;
1065         }
1066         if (bp->tx_buffers) {
1067                 kfree(bp->tx_buffers);
1068                 bp->tx_buffers = NULL;
1069         }
1070         if (bp->rx_ring) {
1071                 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1072                                     bp->rx_ring, bp->rx_ring_dma);
1073                 bp->rx_ring = NULL;
1074         }
1075         if (bp->tx_ring) {
1076                 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1077                                     bp->tx_ring, bp->tx_ring_dma);
1078                 bp->tx_ring = NULL;
1079         }
1080 }
1081
1082 /*
1083  * Must not be invoked with interrupt sources disabled and
1084  * the hardware shutdown down.  Can sleep.
1085  */
1086 static int b44_alloc_consistent(struct b44 *bp)
1087 {
1088         int size;
1089
1090         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1091         bp->rx_buffers = kmalloc(size, GFP_KERNEL);
1092         if (!bp->rx_buffers)
1093                 goto out_err;
1094         memset(bp->rx_buffers, 0, size);
1095
1096         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1097         bp->tx_buffers = kmalloc(size, GFP_KERNEL);
1098         if (!bp->tx_buffers)
1099                 goto out_err;
1100         memset(bp->tx_buffers, 0, size);
1101
1102         size = DMA_TABLE_BYTES;
1103         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1104         if (!bp->rx_ring)
1105                 goto out_err;
1106
1107         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1108         if (!bp->tx_ring)
1109                 goto out_err;
1110
1111         return 0;
1112
1113 out_err:
1114         b44_free_consistent(bp);
1115         return -ENOMEM;
1116 }
1117
1118 /* bp->lock is held. */
1119 static void b44_clear_stats(struct b44 *bp)
1120 {
1121         unsigned long reg;
1122
1123         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1124         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1125                 br32(bp, reg);
1126         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1127                 br32(bp, reg);
1128 }
1129
1130 /* bp->lock is held. */
1131 static void b44_chip_reset(struct b44 *bp)
1132 {
1133         if (ssb_is_core_up(bp)) {
1134                 bw32(bp, B44_RCV_LAZY, 0);
1135                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1136                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1137                 bw32(bp, B44_DMATX_CTRL, 0);
1138                 bp->tx_prod = bp->tx_cons = 0;
1139                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1140                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1141                                      100, 0);
1142                 }
1143                 bw32(bp, B44_DMARX_CTRL, 0);
1144                 bp->rx_prod = bp->rx_cons = 0;
1145         } else {
1146                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1147                                    SBINTVEC_ENET0 :
1148                                    SBINTVEC_ENET1));
1149         }
1150
1151         ssb_core_reset(bp);
1152
1153         b44_clear_stats(bp);
1154
1155         /* Make PHY accessible. */
1156         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1157                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1158         br32(bp, B44_MDIO_CTRL);
1159
1160         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1161                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1162                 br32(bp, B44_ENET_CTRL);
1163                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1164         } else {
1165                 u32 val = br32(bp, B44_DEVCTRL);
1166
1167                 if (val & DEVCTRL_EPR) {
1168                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1169                         br32(bp, B44_DEVCTRL);
1170                         udelay(100);
1171                 }
1172                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1173         }
1174 }
1175
1176 /* bp->lock is held. */
1177 static void b44_halt(struct b44 *bp)
1178 {
1179         b44_disable_ints(bp);
1180         b44_chip_reset(bp);
1181 }
1182
1183 /* bp->lock is held. */
1184 static void __b44_set_mac_addr(struct b44 *bp)
1185 {
1186         bw32(bp, B44_CAM_CTRL, 0);
1187         if (!(bp->dev->flags & IFF_PROMISC)) {
1188                 u32 val;
1189
1190                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1191                 val = br32(bp, B44_CAM_CTRL);
1192                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1193         }
1194 }
1195
1196 static int b44_set_mac_addr(struct net_device *dev, void *p)
1197 {
1198         struct b44 *bp = netdev_priv(dev);
1199         struct sockaddr *addr = p;
1200
1201         if (netif_running(dev))
1202                 return -EBUSY;
1203
1204         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1205
1206         spin_lock_irq(&bp->lock);
1207         __b44_set_mac_addr(bp);
1208         spin_unlock_irq(&bp->lock);
1209
1210         return 0;
1211 }
1212
1213 /* Called at device open time to get the chip ready for
1214  * packet processing.  Invoked with bp->lock held.
1215  */
1216 static void __b44_set_rx_mode(struct net_device *);
1217 static void b44_init_hw(struct b44 *bp)
1218 {
1219         u32 val;
1220
1221         b44_chip_reset(bp);
1222         b44_phy_reset(bp);
1223         b44_setup_phy(bp);
1224
1225         /* Enable CRC32, set proper LED modes and power on PHY */
1226         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1227         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1228
1229         /* This sets the MAC address too.  */
1230         __b44_set_rx_mode(bp->dev);
1231
1232         /* MTU + eth header + possible VLAN tag + struct rx_header */
1233         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1234         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1235
1236         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1237         bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1238         bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1239         bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1240                               (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1241         bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1242
1243         bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1244         bp->rx_prod = bp->rx_pending;   
1245
1246         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1247
1248         val = br32(bp, B44_ENET_CTRL);
1249         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1250 }
1251
1252 static int b44_open(struct net_device *dev)
1253 {
1254         struct b44 *bp = netdev_priv(dev);
1255         int err;
1256
1257         err = b44_alloc_consistent(bp);
1258         if (err)
1259                 return err;
1260
1261         err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1262         if (err)
1263                 goto err_out_free;
1264
1265         spin_lock_irq(&bp->lock);
1266
1267         b44_init_rings(bp);
1268         b44_init_hw(bp);
1269         bp->flags |= B44_FLAG_INIT_COMPLETE;
1270
1271         spin_unlock_irq(&bp->lock);
1272
1273         init_timer(&bp->timer);
1274         bp->timer.expires = jiffies + HZ;
1275         bp->timer.data = (unsigned long) bp;
1276         bp->timer.function = b44_timer;
1277         add_timer(&bp->timer);
1278
1279         b44_enable_ints(bp);
1280
1281         return 0;
1282
1283 err_out_free:
1284         b44_free_consistent(bp);
1285         return err;
1286 }
1287
1288 #if 0
1289 /*static*/ void b44_dump_state(struct b44 *bp)
1290 {
1291         u32 val32, val32_2, val32_3, val32_4, val32_5;
1292         u16 val16;
1293
1294         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1295         printk("DEBUG: PCI status [%04x] \n", val16);
1296
1297 }
1298 #endif
1299
1300 static int b44_close(struct net_device *dev)
1301 {
1302         struct b44 *bp = netdev_priv(dev);
1303
1304         netif_stop_queue(dev);
1305
1306         del_timer_sync(&bp->timer);
1307
1308         spin_lock_irq(&bp->lock);
1309
1310 #if 0
1311         b44_dump_state(bp);
1312 #endif
1313         b44_halt(bp);
1314         b44_free_rings(bp);
1315         bp->flags &= ~B44_FLAG_INIT_COMPLETE;
1316         netif_carrier_off(bp->dev);
1317
1318         spin_unlock_irq(&bp->lock);
1319
1320         free_irq(dev->irq, dev);
1321
1322         b44_free_consistent(bp);
1323
1324         return 0;
1325 }
1326
1327 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1328 {
1329         struct b44 *bp = netdev_priv(dev);
1330         struct net_device_stats *nstat = &bp->stats;
1331         struct b44_hw_stats *hwstat = &bp->hw_stats;
1332
1333         /* Convert HW stats into netdevice stats. */
1334         nstat->rx_packets = hwstat->rx_pkts;
1335         nstat->tx_packets = hwstat->tx_pkts;
1336         nstat->rx_bytes   = hwstat->rx_octets;
1337         nstat->tx_bytes   = hwstat->tx_octets;
1338         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1339                              hwstat->tx_oversize_pkts +
1340                              hwstat->tx_underruns +
1341                              hwstat->tx_excessive_cols +
1342                              hwstat->tx_late_cols);
1343         nstat->multicast  = hwstat->tx_multicast_pkts;
1344         nstat->collisions = hwstat->tx_total_cols;
1345
1346         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1347                                    hwstat->rx_undersize);
1348         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1349         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1350         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1351         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1352                                    hwstat->rx_oversize_pkts +
1353                                    hwstat->rx_missed_pkts +
1354                                    hwstat->rx_crc_align_errs +
1355                                    hwstat->rx_undersize +
1356                                    hwstat->rx_crc_errs +
1357                                    hwstat->rx_align_errs +
1358                                    hwstat->rx_symbol_errs);
1359
1360         nstat->tx_aborted_errors = hwstat->tx_underruns;
1361         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1362
1363         return nstat;
1364 }
1365
1366 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1367 {
1368         struct dev_mc_list *mclist;
1369         int i, num_ents;
1370
1371         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1372         mclist = dev->mc_list;
1373         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1374                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1375         }
1376         return i+1;
1377 }
1378
1379 static void __b44_set_rx_mode(struct net_device *dev)
1380 {
1381         struct b44 *bp = netdev_priv(dev);
1382         u32 val;
1383         int i=0;
1384         unsigned char zero[6] = {0,0,0,0,0,0};
1385
1386         val = br32(bp, B44_RXCONFIG);
1387         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1388         if (dev->flags & IFF_PROMISC) {
1389                 val |= RXCONFIG_PROMISC;
1390                 bw32(bp, B44_RXCONFIG, val);
1391         } else {
1392                 __b44_set_mac_addr(bp);
1393
1394                 if (dev->flags & IFF_ALLMULTI)
1395                         val |= RXCONFIG_ALLMULTI;
1396                 else
1397                         i=__b44_load_mcast(bp, dev);
1398                 
1399                 for(;i<64;i++) {
1400                         __b44_cam_write(bp, zero, i);                   
1401                 }
1402                 bw32(bp, B44_RXCONFIG, val);
1403                 val = br32(bp, B44_CAM_CTRL);
1404                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1405         }
1406 }
1407
1408 static void b44_set_rx_mode(struct net_device *dev)
1409 {
1410         struct b44 *bp = netdev_priv(dev);
1411
1412         spin_lock_irq(&bp->lock);
1413         __b44_set_rx_mode(dev);
1414         spin_unlock_irq(&bp->lock);
1415 }
1416
1417 static u32 b44_get_msglevel(struct net_device *dev)
1418 {
1419         struct b44 *bp = netdev_priv(dev);
1420         return bp->msg_enable;
1421 }
1422
1423 static void b44_set_msglevel(struct net_device *dev, u32 value)
1424 {
1425         struct b44 *bp = netdev_priv(dev);
1426         bp->msg_enable = value;
1427 }
1428
1429 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1430 {
1431         struct b44 *bp = netdev_priv(dev);
1432         struct pci_dev *pci_dev = bp->pdev;
1433
1434         strcpy (info->driver, DRV_MODULE_NAME);
1435         strcpy (info->version, DRV_MODULE_VERSION);
1436         strcpy (info->bus_info, pci_name(pci_dev));
1437 }
1438
1439 static int b44_nway_reset(struct net_device *dev)
1440 {
1441         struct b44 *bp = netdev_priv(dev);
1442         u32 bmcr;
1443         int r;
1444
1445         spin_lock_irq(&bp->lock);
1446         b44_readphy(bp, MII_BMCR, &bmcr);
1447         b44_readphy(bp, MII_BMCR, &bmcr);
1448         r = -EINVAL;
1449         if (bmcr & BMCR_ANENABLE) {
1450                 b44_writephy(bp, MII_BMCR,
1451                              bmcr | BMCR_ANRESTART);
1452                 r = 0;
1453         }
1454         spin_unlock_irq(&bp->lock);
1455
1456         return r;
1457 }
1458
1459 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1460 {
1461         struct b44 *bp = netdev_priv(dev);
1462
1463         if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
1464                 return -EAGAIN;
1465         cmd->supported = (SUPPORTED_Autoneg);
1466         cmd->supported |= (SUPPORTED_100baseT_Half |
1467                           SUPPORTED_100baseT_Full |
1468                           SUPPORTED_10baseT_Half |
1469                           SUPPORTED_10baseT_Full |
1470                           SUPPORTED_MII);
1471
1472         cmd->advertising = 0;
1473         if (bp->flags & B44_FLAG_ADV_10HALF)
1474                 cmd->advertising |= ADVERTISE_10HALF;
1475         if (bp->flags & B44_FLAG_ADV_10FULL)
1476                 cmd->advertising |= ADVERTISE_10FULL;
1477         if (bp->flags & B44_FLAG_ADV_100HALF)
1478                 cmd->advertising |= ADVERTISE_100HALF;
1479         if (bp->flags & B44_FLAG_ADV_100FULL)
1480                 cmd->advertising |= ADVERTISE_100FULL;
1481         cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1482         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1483                 SPEED_100 : SPEED_10;
1484         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1485                 DUPLEX_FULL : DUPLEX_HALF;
1486         cmd->port = 0;
1487         cmd->phy_address = bp->phy_addr;
1488         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1489                 XCVR_INTERNAL : XCVR_EXTERNAL;
1490         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1491                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1492         cmd->maxtxpkt = 0;
1493         cmd->maxrxpkt = 0;
1494         return 0;
1495 }
1496
1497 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1498 {
1499         struct b44 *bp = netdev_priv(dev);
1500
1501         if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
1502                 return -EAGAIN;
1503
1504         /* We do not support gigabit. */
1505         if (cmd->autoneg == AUTONEG_ENABLE) {
1506                 if (cmd->advertising &
1507                     (ADVERTISED_1000baseT_Half |
1508                      ADVERTISED_1000baseT_Full))
1509                         return -EINVAL;
1510         } else if ((cmd->speed != SPEED_100 &&
1511                     cmd->speed != SPEED_10) ||
1512                    (cmd->duplex != DUPLEX_HALF &&
1513                     cmd->duplex != DUPLEX_FULL)) {
1514                         return -EINVAL;
1515         }
1516
1517         spin_lock_irq(&bp->lock);
1518
1519         if (cmd->autoneg == AUTONEG_ENABLE) {
1520                 bp->flags &= ~B44_FLAG_FORCE_LINK;
1521                 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1522                                B44_FLAG_ADV_10FULL |
1523                                B44_FLAG_ADV_100HALF |
1524                                B44_FLAG_ADV_100FULL);
1525                 if (cmd->advertising & ADVERTISE_10HALF)
1526                         bp->flags |= B44_FLAG_ADV_10HALF;
1527                 if (cmd->advertising & ADVERTISE_10FULL)
1528                         bp->flags |= B44_FLAG_ADV_10FULL;
1529                 if (cmd->advertising & ADVERTISE_100HALF)
1530                         bp->flags |= B44_FLAG_ADV_100HALF;
1531                 if (cmd->advertising & ADVERTISE_100FULL)
1532                         bp->flags |= B44_FLAG_ADV_100FULL;
1533         } else {
1534                 bp->flags |= B44_FLAG_FORCE_LINK;
1535                 if (cmd->speed == SPEED_100)
1536                         bp->flags |= B44_FLAG_100_BASE_T;
1537                 if (cmd->duplex == DUPLEX_FULL)
1538                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1539         }
1540
1541         b44_setup_phy(bp);
1542
1543         spin_unlock_irq(&bp->lock);
1544
1545         return 0;
1546 }
1547
1548 static void b44_get_ringparam(struct net_device *dev,
1549                               struct ethtool_ringparam *ering)
1550 {
1551         struct b44 *bp = netdev_priv(dev);
1552
1553         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1554         ering->rx_pending = bp->rx_pending;
1555
1556         /* XXX ethtool lacks a tx_max_pending, oops... */
1557 }
1558
1559 static int b44_set_ringparam(struct net_device *dev,
1560                              struct ethtool_ringparam *ering)
1561 {
1562         struct b44 *bp = netdev_priv(dev);
1563
1564         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1565             (ering->rx_mini_pending != 0) ||
1566             (ering->rx_jumbo_pending != 0) ||
1567             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1568                 return -EINVAL;
1569
1570         spin_lock_irq(&bp->lock);
1571
1572         bp->rx_pending = ering->rx_pending;
1573         bp->tx_pending = ering->tx_pending;
1574
1575         b44_halt(bp);
1576         b44_init_rings(bp);
1577         b44_init_hw(bp);
1578         netif_wake_queue(bp->dev);
1579         spin_unlock_irq(&bp->lock);
1580
1581         b44_enable_ints(bp);
1582         
1583         return 0;
1584 }
1585
1586 static void b44_get_pauseparam(struct net_device *dev,
1587                                 struct ethtool_pauseparam *epause)
1588 {
1589         struct b44 *bp = netdev_priv(dev);
1590
1591         epause->autoneg =
1592                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1593         epause->rx_pause =
1594                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1595         epause->tx_pause =
1596                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1597 }
1598
1599 static int b44_set_pauseparam(struct net_device *dev,
1600                                 struct ethtool_pauseparam *epause)
1601 {
1602         struct b44 *bp = netdev_priv(dev);
1603
1604         spin_lock_irq(&bp->lock);
1605         if (epause->autoneg)
1606                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1607         else
1608                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1609         if (epause->rx_pause)
1610                 bp->flags |= B44_FLAG_RX_PAUSE;
1611         else
1612                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1613         if (epause->tx_pause)
1614                 bp->flags |= B44_FLAG_TX_PAUSE;
1615         else
1616                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1617         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1618                 b44_halt(bp);
1619                 b44_init_rings(bp);
1620                 b44_init_hw(bp);
1621         } else {
1622                 __b44_set_flow_ctrl(bp, bp->flags);
1623         }
1624         spin_unlock_irq(&bp->lock);
1625
1626         b44_enable_ints(bp);
1627         
1628         return 0;
1629 }
1630
1631 static struct ethtool_ops b44_ethtool_ops = {
1632         .get_drvinfo            = b44_get_drvinfo,
1633         .get_settings           = b44_get_settings,
1634         .set_settings           = b44_set_settings,
1635         .nway_reset             = b44_nway_reset,
1636         .get_link               = ethtool_op_get_link,
1637         .get_ringparam          = b44_get_ringparam,
1638         .set_ringparam          = b44_set_ringparam,
1639         .get_pauseparam         = b44_get_pauseparam,
1640         .set_pauseparam         = b44_set_pauseparam,
1641         .get_msglevel           = b44_get_msglevel,
1642         .set_msglevel           = b44_set_msglevel,
1643 };
1644
1645 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1646 {
1647         struct mii_ioctl_data *data = if_mii(ifr);
1648         struct b44 *bp = netdev_priv(dev);
1649         int err;
1650
1651         spin_lock_irq(&bp->lock);
1652         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1653         spin_unlock_irq(&bp->lock);
1654
1655         return err;
1656 }
1657
1658 /* Read 128-bytes of EEPROM. */
1659 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1660 {
1661         long i;
1662         u16 *ptr = (u16 *) data;
1663
1664         for (i = 0; i < 128; i += 2)
1665                 ptr[i / 2] = readw(bp->regs + 4096 + i);
1666
1667         return 0;
1668 }
1669
1670 static int __devinit b44_get_invariants(struct b44 *bp)
1671 {
1672         u8 eeprom[128];
1673         int err;
1674
1675         err = b44_read_eeprom(bp, &eeprom[0]);
1676         if (err)
1677                 goto out;
1678
1679         bp->dev->dev_addr[0] = eeprom[79];
1680         bp->dev->dev_addr[1] = eeprom[78];
1681         bp->dev->dev_addr[2] = eeprom[81];
1682         bp->dev->dev_addr[3] = eeprom[80];
1683         bp->dev->dev_addr[4] = eeprom[83];
1684         bp->dev->dev_addr[5] = eeprom[82];
1685
1686         bp->phy_addr = eeprom[90] & 0x1f;
1687         bp->mdc_port = (eeprom[90] >> 14) & 0x1;
1688
1689         /* With this, plus the rx_header prepended to the data by the
1690          * hardware, we'll land the ethernet header on a 2-byte boundary.
1691          */
1692         bp->rx_offset = 30;
1693
1694         bp->imask = IMASK_DEF;
1695
1696         bp->core_unit = ssb_core_unit(bp);
1697         bp->dma_offset = ssb_get_addr(bp, SBID_PCI_DMA, 0);
1698
1699         /* XXX - really required? 
1700            bp->flags |= B44_FLAG_BUGGY_TXPTR;
1701          */
1702 out:
1703         return err;
1704 }
1705
1706 static int __devinit b44_init_one(struct pci_dev *pdev,
1707                                   const struct pci_device_id *ent)
1708 {
1709         static int b44_version_printed = 0;
1710         unsigned long b44reg_base, b44reg_len;
1711         struct net_device *dev;
1712         struct b44 *bp;
1713         int err, i;
1714
1715         if (b44_version_printed++ == 0)
1716                 printk(KERN_INFO "%s", version);
1717
1718         err = pci_enable_device(pdev);
1719         if (err) {
1720                 printk(KERN_ERR PFX "Cannot enable PCI device, "
1721                        "aborting.\n");
1722                 return err;
1723         }
1724
1725         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1726                 printk(KERN_ERR PFX "Cannot find proper PCI device "
1727                        "base address, aborting.\n");
1728                 err = -ENODEV;
1729                 goto err_out_disable_pdev;
1730         }
1731
1732         err = pci_request_regions(pdev, DRV_MODULE_NAME);
1733         if (err) {
1734                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1735                        "aborting.\n");
1736                 goto err_out_disable_pdev;
1737         }
1738
1739         pci_set_master(pdev);
1740
1741         err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
1742         if (err) {
1743                 printk(KERN_ERR PFX "No usable DMA configuration, "
1744                        "aborting.\n");
1745                 goto err_out_free_res;
1746         }
1747
1748         b44reg_base = pci_resource_start(pdev, 0);
1749         b44reg_len = pci_resource_len(pdev, 0);
1750
1751         dev = alloc_etherdev(sizeof(*bp));
1752         if (!dev) {
1753                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1754                 err = -ENOMEM;
1755                 goto err_out_free_res;
1756         }
1757
1758         SET_MODULE_OWNER(dev);
1759         SET_NETDEV_DEV(dev,&pdev->dev);
1760
1761         /* No interesting netdevice features in this card... */
1762         dev->features |= 0;
1763
1764         bp = netdev_priv(dev);
1765         bp->pdev = pdev;
1766         bp->dev = dev;
1767         if (b44_debug >= 0)
1768                 bp->msg_enable = (1 << b44_debug) - 1;
1769         else
1770                 bp->msg_enable = B44_DEF_MSG_ENABLE;
1771
1772         spin_lock_init(&bp->lock);
1773
1774         bp->regs = ioremap(b44reg_base, b44reg_len);
1775         if (bp->regs == 0UL) {
1776                 printk(KERN_ERR PFX "Cannot map device registers, "
1777                        "aborting.\n");
1778                 err = -ENOMEM;
1779                 goto err_out_free_dev;
1780         }
1781
1782         bp->rx_pending = B44_DEF_RX_RING_PENDING;
1783         bp->tx_pending = B44_DEF_TX_RING_PENDING;
1784
1785         dev->open = b44_open;
1786         dev->stop = b44_close;
1787         dev->hard_start_xmit = b44_start_xmit;
1788         dev->get_stats = b44_get_stats;
1789         dev->set_multicast_list = b44_set_rx_mode;
1790         dev->set_mac_address = b44_set_mac_addr;
1791         dev->do_ioctl = b44_ioctl;
1792         dev->tx_timeout = b44_tx_timeout;
1793         dev->poll = b44_poll;
1794         dev->weight = 64;
1795         dev->watchdog_timeo = B44_TX_TIMEOUT;
1796         dev->change_mtu = b44_change_mtu;
1797         dev->irq = pdev->irq;
1798         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
1799
1800         err = b44_get_invariants(bp);
1801         if (err) {
1802                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
1803                        "aborting.\n");
1804                 goto err_out_iounmap;
1805         }
1806
1807         bp->mii_if.dev = dev;
1808         bp->mii_if.mdio_read = b44_mii_read;
1809         bp->mii_if.mdio_write = b44_mii_write;
1810         bp->mii_if.phy_id = bp->phy_addr;
1811         bp->mii_if.phy_id_mask = 0x1f;
1812         bp->mii_if.reg_num_mask = 0x1f;
1813
1814         /* By default, advertise all speed/duplex settings. */
1815         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
1816                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
1817
1818         /* By default, auto-negotiate PAUSE. */
1819         bp->flags |= B44_FLAG_PAUSE_AUTO;
1820
1821         err = register_netdev(dev);
1822         if (err) {
1823                 printk(KERN_ERR PFX "Cannot register net device, "
1824                        "aborting.\n");
1825                 goto err_out_iounmap;
1826         }
1827
1828         pci_set_drvdata(pdev, dev);
1829
1830         pci_save_state(bp->pdev, bp->pci_cfg_state);
1831
1832         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
1833         for (i = 0; i < 6; i++)
1834                 printk("%2.2x%c", dev->dev_addr[i],
1835                        i == 5 ? '\n' : ':');
1836
1837         return 0;
1838
1839 err_out_iounmap:
1840         iounmap(bp->regs);
1841
1842 err_out_free_dev:
1843         free_netdev(dev);
1844
1845 err_out_free_res:
1846         pci_release_regions(pdev);
1847
1848 err_out_disable_pdev:
1849         pci_disable_device(pdev);
1850         pci_set_drvdata(pdev, NULL);
1851         return err;
1852 }
1853
1854 static void __devexit b44_remove_one(struct pci_dev *pdev)
1855 {
1856         struct net_device *dev = pci_get_drvdata(pdev);
1857
1858         if (dev) {
1859                 struct b44 *bp = netdev_priv(dev);
1860
1861                 unregister_netdev(dev);
1862                 iounmap(bp->regs);
1863                 free_netdev(dev);
1864                 pci_release_regions(pdev);
1865                 pci_disable_device(pdev);
1866                 pci_set_drvdata(pdev, NULL);
1867         }
1868 }
1869
1870 static int b44_suspend(struct pci_dev *pdev, u32 state)
1871 {
1872         struct net_device *dev = pci_get_drvdata(pdev);
1873         struct b44 *bp = dev->priv;
1874
1875         if (!netif_running(dev))
1876                  return 0;
1877
1878         del_timer_sync(&bp->timer);
1879
1880         spin_lock_irq(&bp->lock); 
1881
1882         b44_halt(bp);
1883         netif_carrier_off(bp->dev); 
1884         netif_device_detach(bp->dev);
1885         b44_free_rings(bp);
1886
1887         spin_unlock_irq(&bp->lock);
1888         return 0;
1889 }
1890
1891 static int b44_resume(struct pci_dev *pdev)
1892 {
1893         struct net_device *dev = pci_get_drvdata(pdev);
1894         struct b44 *bp = dev->priv;
1895
1896         pci_restore_state(pdev, bp->pci_cfg_state);
1897
1898         if (!netif_running(dev))
1899                 return 0;
1900
1901         spin_lock_irq(&bp->lock);
1902
1903         b44_init_rings(bp);
1904         b44_init_hw(bp);
1905         netif_device_attach(bp->dev);
1906         spin_unlock_irq(&bp->lock);
1907
1908         bp->timer.expires = jiffies + HZ;
1909         add_timer(&bp->timer);
1910
1911         b44_enable_ints(bp);
1912         return 0;
1913 }
1914
1915 static struct pci_driver b44_driver = {
1916         .name           = DRV_MODULE_NAME,
1917         .id_table       = b44_pci_tbl,
1918         .probe          = b44_init_one,
1919         .remove         = __devexit_p(b44_remove_one),
1920         .suspend        = b44_suspend,
1921         .resume         = b44_resume,
1922 };
1923
1924 static int __init b44_init(void)
1925 {
1926         return pci_module_init(&b44_driver);
1927 }
1928
1929 static void __exit b44_cleanup(void)
1930 {
1931         pci_unregister_driver(&b44_driver);
1932 }
1933
1934 module_init(b44_init);
1935 module_exit(b44_cleanup);
1936