fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.01"
33 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE        0x400
80 #define B44_PATTERN_SIZE        0x80
81 #define B44_PMASK_BASE          0x600
82 #define B44_PMASK_SIZE          0x10
83 #define B44_MAX_PATTERNS        16
84 #define B44_ETHIPV6UDP_HLEN     62
85 #define B44_ETHIPV4UDP_HLEN     42
86
87 static char version[] __devinitdata =
88         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
94
95 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98 MODULE_VERSION(DRV_MODULE_VERSION);
99
100 static struct pci_device_id b44_pci_tbl[] = {
101         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
102           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
104           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
106           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
107         { }     /* terminate list with empty entry */
108 };
109
110 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111
112 static void b44_halt(struct b44 *);
113 static void b44_init_rings(struct b44 *);
114
115 #define B44_FULL_RESET          1
116 #define B44_FULL_RESET_SKIP_PHY 2
117 #define B44_PARTIAL_RESET       3
118
119 static void b44_init_hw(struct b44 *, int);
120
121 static int dma_desc_align_mask;
122 static int dma_desc_sync_size;
123
124 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
125 #define _B44(x...)      # x,
126 B44_STAT_REG_DECLARE
127 #undef _B44
128 };
129
130 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
131                                                 dma_addr_t dma_base,
132                                                 unsigned long offset,
133                                                 enum dma_data_direction dir)
134 {
135         dma_sync_single_range_for_device(&pdev->dev, dma_base,
136                                          offset & dma_desc_align_mask,
137                                          dma_desc_sync_size, dir);
138 }
139
140 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
141                                              dma_addr_t dma_base,
142                                              unsigned long offset,
143                                              enum dma_data_direction dir)
144 {
145         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
146                                       offset & dma_desc_align_mask,
147                                       dma_desc_sync_size, dir);
148 }
149
150 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
151 {
152         return readl(bp->regs + reg);
153 }
154
155 static inline void bw32(const struct b44 *bp,
156                         unsigned long reg, unsigned long val)
157 {
158         writel(val, bp->regs + reg);
159 }
160
161 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
162                         u32 bit, unsigned long timeout, const int clear)
163 {
164         unsigned long i;
165
166         for (i = 0; i < timeout; i++) {
167                 u32 val = br32(bp, reg);
168
169                 if (clear && !(val & bit))
170                         break;
171                 if (!clear && (val & bit))
172                         break;
173                 udelay(10);
174         }
175         if (i == timeout) {
176                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
177                        "%lx to %s.\n",
178                        bp->dev->name,
179                        bit, reg,
180                        (clear ? "clear" : "set"));
181                 return -ENODEV;
182         }
183         return 0;
184 }
185
186 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
187  * buzz words used on this company's website :-)
188  *
189  * All of these routines must be invoked with bp->lock held and
190  * interrupts disabled.
191  */
192
193 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
194 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
195
196 static u32 ssb_get_core_rev(struct b44 *bp)
197 {
198         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
199 }
200
201 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
202 {
203         u32 bar_orig, pci_rev, val;
204
205         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
206         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
207         pci_rev = ssb_get_core_rev(bp);
208
209         val = br32(bp, B44_SBINTVEC);
210         val |= cores;
211         bw32(bp, B44_SBINTVEC, val);
212
213         val = br32(bp, SSB_PCI_TRANS_2);
214         val |= SSB_PCI_PREF | SSB_PCI_BURST;
215         bw32(bp, SSB_PCI_TRANS_2, val);
216
217         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
218
219         return pci_rev;
220 }
221
222 static void ssb_core_disable(struct b44 *bp)
223 {
224         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
225                 return;
226
227         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
228         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
229         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
230         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
231                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
232         br32(bp, B44_SBTMSLOW);
233         udelay(1);
234         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
235         br32(bp, B44_SBTMSLOW);
236         udelay(1);
237 }
238
239 static void ssb_core_reset(struct b44 *bp)
240 {
241         u32 val;
242
243         ssb_core_disable(bp);
244         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
245         br32(bp, B44_SBTMSLOW);
246         udelay(1);
247
248         /* Clear SERR if set, this is a hw bug workaround.  */
249         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
250                 bw32(bp, B44_SBTMSHIGH, 0);
251
252         val = br32(bp, B44_SBIMSTATE);
253         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
254                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
255
256         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
257         br32(bp, B44_SBTMSLOW);
258         udelay(1);
259
260         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
261         br32(bp, B44_SBTMSLOW);
262         udelay(1);
263 }
264
265 static int ssb_core_unit(struct b44 *bp)
266 {
267 #if 0
268         u32 val = br32(bp, B44_SBADMATCH0);
269         u32 base;
270
271         type = val & SBADMATCH0_TYPE_MASK;
272         switch (type) {
273         case 0:
274                 base = val & SBADMATCH0_BS0_MASK;
275                 break;
276
277         case 1:
278                 base = val & SBADMATCH0_BS1_MASK;
279                 break;
280
281         case 2:
282         default:
283                 base = val & SBADMATCH0_BS2_MASK;
284                 break;
285         };
286 #endif
287         return 0;
288 }
289
290 static int ssb_is_core_up(struct b44 *bp)
291 {
292         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
293                 == SBTMSLOW_CLOCK);
294 }
295
296 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
297 {
298         u32 val;
299
300         val  = ((u32) data[2]) << 24;
301         val |= ((u32) data[3]) << 16;
302         val |= ((u32) data[4]) <<  8;
303         val |= ((u32) data[5]) <<  0;
304         bw32(bp, B44_CAM_DATA_LO, val);
305         val = (CAM_DATA_HI_VALID |
306                (((u32) data[0]) << 8) |
307                (((u32) data[1]) << 0));
308         bw32(bp, B44_CAM_DATA_HI, val);
309         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
310                             (index << CAM_CTRL_INDEX_SHIFT)));
311         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
312 }
313
314 static inline void __b44_disable_ints(struct b44 *bp)
315 {
316         bw32(bp, B44_IMASK, 0);
317 }
318
319 static void b44_disable_ints(struct b44 *bp)
320 {
321         __b44_disable_ints(bp);
322
323         /* Flush posted writes. */
324         br32(bp, B44_IMASK);
325 }
326
327 static void b44_enable_ints(struct b44 *bp)
328 {
329         bw32(bp, B44_IMASK, bp->imask);
330 }
331
332 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
333 {
334         int err;
335
336         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
337         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
338                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
339                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
340                              (reg << MDIO_DATA_RA_SHIFT) |
341                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
342         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
343         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
344
345         return err;
346 }
347
348 static int b44_writephy(struct b44 *bp, int reg, u32 val)
349 {
350         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
351         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
352                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
353                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
354                              (reg << MDIO_DATA_RA_SHIFT) |
355                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
356                              (val & MDIO_DATA_DATA)));
357         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
358 }
359
360 /* miilib interface */
361 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
362  * due to code existing before miilib use was added to this driver.
363  * Someone should remove this artificial driver limitation in
364  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
365  */
366 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
367 {
368         u32 val;
369         struct b44 *bp = netdev_priv(dev);
370         int rc = b44_readphy(bp, location, &val);
371         if (rc)
372                 return 0xffffffff;
373         return val;
374 }
375
376 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
377                          int val)
378 {
379         struct b44 *bp = netdev_priv(dev);
380         b44_writephy(bp, location, val);
381 }
382
383 static int b44_phy_reset(struct b44 *bp)
384 {
385         u32 val;
386         int err;
387
388         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
389         if (err)
390                 return err;
391         udelay(100);
392         err = b44_readphy(bp, MII_BMCR, &val);
393         if (!err) {
394                 if (val & BMCR_RESET) {
395                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
396                                bp->dev->name);
397                         err = -ENODEV;
398                 }
399         }
400
401         return 0;
402 }
403
404 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
405 {
406         u32 val;
407
408         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
409         bp->flags |= pause_flags;
410
411         val = br32(bp, B44_RXCONFIG);
412         if (pause_flags & B44_FLAG_RX_PAUSE)
413                 val |= RXCONFIG_FLOW;
414         else
415                 val &= ~RXCONFIG_FLOW;
416         bw32(bp, B44_RXCONFIG, val);
417
418         val = br32(bp, B44_MAC_FLOW);
419         if (pause_flags & B44_FLAG_TX_PAUSE)
420                 val |= (MAC_FLOW_PAUSE_ENAB |
421                         (0xc0 & MAC_FLOW_RX_HI_WATER));
422         else
423                 val &= ~MAC_FLOW_PAUSE_ENAB;
424         bw32(bp, B44_MAC_FLOW, val);
425 }
426
427 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
428 {
429         u32 pause_enab = 0;
430
431         /* The driver supports only rx pause by default because
432            the b44 mac tx pause mechanism generates excessive
433            pause frames.
434            Use ethtool to turn on b44 tx pause if necessary.
435          */
436         if ((local & ADVERTISE_PAUSE_CAP) &&
437             (local & ADVERTISE_PAUSE_ASYM)){
438                 if ((remote & LPA_PAUSE_ASYM) &&
439                     !(remote & LPA_PAUSE_CAP))
440                         pause_enab |= B44_FLAG_RX_PAUSE;
441         }
442
443         __b44_set_flow_ctrl(bp, pause_enab);
444 }
445
446 static int b44_setup_phy(struct b44 *bp)
447 {
448         u32 val;
449         int err;
450
451         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
452                 goto out;
453         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
454                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
455                 goto out;
456         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
457                 goto out;
458         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
459                                 val | MII_TLEDCTRL_ENABLE)) != 0)
460                 goto out;
461
462         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
463                 u32 adv = ADVERTISE_CSMA;
464
465                 if (bp->flags & B44_FLAG_ADV_10HALF)
466                         adv |= ADVERTISE_10HALF;
467                 if (bp->flags & B44_FLAG_ADV_10FULL)
468                         adv |= ADVERTISE_10FULL;
469                 if (bp->flags & B44_FLAG_ADV_100HALF)
470                         adv |= ADVERTISE_100HALF;
471                 if (bp->flags & B44_FLAG_ADV_100FULL)
472                         adv |= ADVERTISE_100FULL;
473
474                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
475                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
476
477                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
478                         goto out;
479                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
480                                                        BMCR_ANRESTART))) != 0)
481                         goto out;
482         } else {
483                 u32 bmcr;
484
485                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
486                         goto out;
487                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
488                 if (bp->flags & B44_FLAG_100_BASE_T)
489                         bmcr |= BMCR_SPEED100;
490                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
491                         bmcr |= BMCR_FULLDPLX;
492                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
493                         goto out;
494
495                 /* Since we will not be negotiating there is no safe way
496                  * to determine if the link partner supports flow control
497                  * or not.  So just disable it completely in this case.
498                  */
499                 b44_set_flow_ctrl(bp, 0, 0);
500         }
501
502 out:
503         return err;
504 }
505
506 static void b44_stats_update(struct b44 *bp)
507 {
508         unsigned long reg;
509         u32 *val;
510
511         val = &bp->hw_stats.tx_good_octets;
512         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
513                 *val++ += br32(bp, reg);
514         }
515
516         /* Pad */
517         reg += 8*4UL;
518
519         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
520                 *val++ += br32(bp, reg);
521         }
522 }
523
524 static void b44_link_report(struct b44 *bp)
525 {
526         if (!netif_carrier_ok(bp->dev)) {
527                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
528         } else {
529                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
530                        bp->dev->name,
531                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
532                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
533
534                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
535                        "%s for RX.\n",
536                        bp->dev->name,
537                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
538                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
539         }
540 }
541
542 static void b44_check_phy(struct b44 *bp)
543 {
544         u32 bmsr, aux;
545
546         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
547             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
548             (bmsr != 0xffff)) {
549                 if (aux & MII_AUXCTRL_SPEED)
550                         bp->flags |= B44_FLAG_100_BASE_T;
551                 else
552                         bp->flags &= ~B44_FLAG_100_BASE_T;
553                 if (aux & MII_AUXCTRL_DUPLEX)
554                         bp->flags |= B44_FLAG_FULL_DUPLEX;
555                 else
556                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
557
558                 if (!netif_carrier_ok(bp->dev) &&
559                     (bmsr & BMSR_LSTATUS)) {
560                         u32 val = br32(bp, B44_TX_CTRL);
561                         u32 local_adv, remote_adv;
562
563                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
564                                 val |= TX_CTRL_DUPLEX;
565                         else
566                                 val &= ~TX_CTRL_DUPLEX;
567                         bw32(bp, B44_TX_CTRL, val);
568
569                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
570                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
571                             !b44_readphy(bp, MII_LPA, &remote_adv))
572                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
573
574                         /* Link now up */
575                         netif_carrier_on(bp->dev);
576                         b44_link_report(bp);
577                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
578                         /* Link now down */
579                         netif_carrier_off(bp->dev);
580                         b44_link_report(bp);
581                 }
582
583                 if (bmsr & BMSR_RFAULT)
584                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
585                                bp->dev->name);
586                 if (bmsr & BMSR_JCD)
587                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
588                                bp->dev->name);
589         }
590 }
591
592 static void b44_timer(unsigned long __opaque)
593 {
594         struct b44 *bp = (struct b44 *) __opaque;
595
596         spin_lock_irq(&bp->lock);
597
598         b44_check_phy(bp);
599
600         b44_stats_update(bp);
601
602         spin_unlock_irq(&bp->lock);
603
604         bp->timer.expires = jiffies + HZ;
605         add_timer(&bp->timer);
606 }
607
608 static void b44_tx(struct b44 *bp)
609 {
610         u32 cur, cons;
611
612         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
613         cur /= sizeof(struct dma_desc);
614
615         /* XXX needs updating when NETIF_F_SG is supported */
616         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
617                 struct ring_info *rp = &bp->tx_buffers[cons];
618                 struct sk_buff *skb = rp->skb;
619
620                 BUG_ON(skb == NULL);
621
622                 pci_unmap_single(bp->pdev,
623                                  pci_unmap_addr(rp, mapping),
624                                  skb->len,
625                                  PCI_DMA_TODEVICE);
626                 rp->skb = NULL;
627                 dev_kfree_skb_irq(skb);
628         }
629
630         bp->tx_cons = cons;
631         if (netif_queue_stopped(bp->dev) &&
632             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
633                 netif_wake_queue(bp->dev);
634
635         bw32(bp, B44_GPTIMER, 0);
636 }
637
638 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
639  * before the DMA address you give it.  So we allocate 30 more bytes
640  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
641  * point the chip at 30 bytes past where the rx_header will go.
642  */
643 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
644 {
645         struct dma_desc *dp;
646         struct ring_info *src_map, *map;
647         struct rx_header *rh;
648         struct sk_buff *skb;
649         dma_addr_t mapping;
650         int dest_idx;
651         u32 ctrl;
652
653         src_map = NULL;
654         if (src_idx >= 0)
655                 src_map = &bp->rx_buffers[src_idx];
656         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
657         map = &bp->rx_buffers[dest_idx];
658         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
659         if (skb == NULL)
660                 return -ENOMEM;
661
662         mapping = pci_map_single(bp->pdev, skb->data,
663                                  RX_PKT_BUF_SZ,
664                                  PCI_DMA_FROMDEVICE);
665
666         /* Hardware bug work-around, the chip is unable to do PCI DMA
667            to/from anything above 1GB :-( */
668         if (dma_mapping_error(mapping) ||
669                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
670                 /* Sigh... */
671                 if (!dma_mapping_error(mapping))
672                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
673                 dev_kfree_skb_any(skb);
674                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
675                 if (skb == NULL)
676                         return -ENOMEM;
677                 mapping = pci_map_single(bp->pdev, skb->data,
678                                          RX_PKT_BUF_SZ,
679                                          PCI_DMA_FROMDEVICE);
680                 if (dma_mapping_error(mapping) ||
681                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
682                         if (!dma_mapping_error(mapping))
683                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
684                         dev_kfree_skb_any(skb);
685                         return -ENOMEM;
686                 }
687         }
688
689         skb->dev = bp->dev;
690         skb_reserve(skb, bp->rx_offset);
691
692         rh = (struct rx_header *)
693                 (skb->data - bp->rx_offset);
694         rh->len = 0;
695         rh->flags = 0;
696
697         map->skb = skb;
698         pci_unmap_addr_set(map, mapping, mapping);
699
700         if (src_map != NULL)
701                 src_map->skb = NULL;
702
703         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
704         if (dest_idx == (B44_RX_RING_SIZE - 1))
705                 ctrl |= DESC_CTRL_EOT;
706
707         dp = &bp->rx_ring[dest_idx];
708         dp->ctrl = cpu_to_le32(ctrl);
709         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
710
711         if (bp->flags & B44_FLAG_RX_RING_HACK)
712                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
713                                              dest_idx * sizeof(dp),
714                                              DMA_BIDIRECTIONAL);
715
716         return RX_PKT_BUF_SZ;
717 }
718
719 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
720 {
721         struct dma_desc *src_desc, *dest_desc;
722         struct ring_info *src_map, *dest_map;
723         struct rx_header *rh;
724         int dest_idx;
725         u32 ctrl;
726
727         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
728         dest_desc = &bp->rx_ring[dest_idx];
729         dest_map = &bp->rx_buffers[dest_idx];
730         src_desc = &bp->rx_ring[src_idx];
731         src_map = &bp->rx_buffers[src_idx];
732
733         dest_map->skb = src_map->skb;
734         rh = (struct rx_header *) src_map->skb->data;
735         rh->len = 0;
736         rh->flags = 0;
737         pci_unmap_addr_set(dest_map, mapping,
738                            pci_unmap_addr(src_map, mapping));
739
740         if (bp->flags & B44_FLAG_RX_RING_HACK)
741                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
742                                           src_idx * sizeof(src_desc),
743                                           DMA_BIDIRECTIONAL);
744
745         ctrl = src_desc->ctrl;
746         if (dest_idx == (B44_RX_RING_SIZE - 1))
747                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
748         else
749                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
750
751         dest_desc->ctrl = ctrl;
752         dest_desc->addr = src_desc->addr;
753
754         src_map->skb = NULL;
755
756         if (bp->flags & B44_FLAG_RX_RING_HACK)
757                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
758                                              dest_idx * sizeof(dest_desc),
759                                              DMA_BIDIRECTIONAL);
760
761         pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
762                                        RX_PKT_BUF_SZ,
763                                        PCI_DMA_FROMDEVICE);
764 }
765
766 static int b44_rx(struct b44 *bp, int budget)
767 {
768         int received;
769         u32 cons, prod;
770
771         received = 0;
772         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
773         prod /= sizeof(struct dma_desc);
774         cons = bp->rx_cons;
775
776         while (cons != prod && budget > 0) {
777                 struct ring_info *rp = &bp->rx_buffers[cons];
778                 struct sk_buff *skb = rp->skb;
779                 dma_addr_t map = pci_unmap_addr(rp, mapping);
780                 struct rx_header *rh;
781                 u16 len;
782
783                 pci_dma_sync_single_for_cpu(bp->pdev, map,
784                                             RX_PKT_BUF_SZ,
785                                             PCI_DMA_FROMDEVICE);
786                 rh = (struct rx_header *) skb->data;
787                 len = cpu_to_le16(rh->len);
788                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
789                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
790                 drop_it:
791                         b44_recycle_rx(bp, cons, bp->rx_prod);
792                 drop_it_no_recycle:
793                         bp->stats.rx_dropped++;
794                         goto next_pkt;
795                 }
796
797                 if (len == 0) {
798                         int i = 0;
799
800                         do {
801                                 udelay(2);
802                                 barrier();
803                                 len = cpu_to_le16(rh->len);
804                         } while (len == 0 && i++ < 5);
805                         if (len == 0)
806                                 goto drop_it;
807                 }
808
809                 /* Omit CRC. */
810                 len -= 4;
811
812                 if (len > RX_COPY_THRESHOLD) {
813                         int skb_size;
814                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
815                         if (skb_size < 0)
816                                 goto drop_it;
817                         pci_unmap_single(bp->pdev, map,
818                                          skb_size, PCI_DMA_FROMDEVICE);
819                         /* Leave out rx_header */
820                         skb_put(skb, len+bp->rx_offset);
821                         skb_pull(skb,bp->rx_offset);
822                 } else {
823                         struct sk_buff *copy_skb;
824
825                         b44_recycle_rx(bp, cons, bp->rx_prod);
826                         copy_skb = dev_alloc_skb(len + 2);
827                         if (copy_skb == NULL)
828                                 goto drop_it_no_recycle;
829
830                         copy_skb->dev = bp->dev;
831                         skb_reserve(copy_skb, 2);
832                         skb_put(copy_skb, len);
833                         /* DMA sync done above, copy just the actual packet */
834                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
835
836                         skb = copy_skb;
837                 }
838                 skb->ip_summed = CHECKSUM_NONE;
839                 skb->protocol = eth_type_trans(skb, bp->dev);
840                 netif_receive_skb(skb);
841                 bp->dev->last_rx = jiffies;
842                 received++;
843                 budget--;
844         next_pkt:
845                 bp->rx_prod = (bp->rx_prod + 1) &
846                         (B44_RX_RING_SIZE - 1);
847                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
848         }
849
850         bp->rx_cons = cons;
851         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
852
853         return received;
854 }
855
856 static int b44_poll(struct net_device *netdev, int *budget)
857 {
858         struct b44 *bp = netdev_priv(netdev);
859         int done;
860
861         spin_lock_irq(&bp->lock);
862
863         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
864                 /* spin_lock(&bp->tx_lock); */
865                 b44_tx(bp);
866                 /* spin_unlock(&bp->tx_lock); */
867         }
868         spin_unlock_irq(&bp->lock);
869
870         done = 1;
871         if (bp->istat & ISTAT_RX) {
872                 int orig_budget = *budget;
873                 int work_done;
874
875                 if (orig_budget > netdev->quota)
876                         orig_budget = netdev->quota;
877
878                 work_done = b44_rx(bp, orig_budget);
879
880                 *budget -= work_done;
881                 netdev->quota -= work_done;
882
883                 if (work_done >= orig_budget)
884                         done = 0;
885         }
886
887         if (bp->istat & ISTAT_ERRORS) {
888                 unsigned long flags;
889
890                 spin_lock_irqsave(&bp->lock, flags);
891                 b44_halt(bp);
892                 b44_init_rings(bp);
893                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
894                 netif_wake_queue(bp->dev);
895                 spin_unlock_irqrestore(&bp->lock, flags);
896                 done = 1;
897         }
898
899         if (done) {
900                 netif_rx_complete(netdev);
901                 b44_enable_ints(bp);
902         }
903
904         return (done ? 0 : 1);
905 }
906
907 static irqreturn_t b44_interrupt(int irq, void *dev_id)
908 {
909         struct net_device *dev = dev_id;
910         struct b44 *bp = netdev_priv(dev);
911         u32 istat, imask;
912         int handled = 0;
913
914         spin_lock(&bp->lock);
915
916         istat = br32(bp, B44_ISTAT);
917         imask = br32(bp, B44_IMASK);
918
919         /* The interrupt mask register controls which interrupt bits
920          * will actually raise an interrupt to the CPU when set by hw/firmware,
921          * but doesn't mask off the bits.
922          */
923         istat &= imask;
924         if (istat) {
925                 handled = 1;
926
927                 if (unlikely(!netif_running(dev))) {
928                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
929                         goto irq_ack;
930                 }
931
932                 if (netif_rx_schedule_prep(dev)) {
933                         /* NOTE: These writes are posted by the readback of
934                          *       the ISTAT register below.
935                          */
936                         bp->istat = istat;
937                         __b44_disable_ints(bp);
938                         __netif_rx_schedule(dev);
939                 } else {
940                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
941                                dev->name);
942                 }
943
944 irq_ack:
945                 bw32(bp, B44_ISTAT, istat);
946                 br32(bp, B44_ISTAT);
947         }
948         spin_unlock(&bp->lock);
949         return IRQ_RETVAL(handled);
950 }
951
952 static void b44_tx_timeout(struct net_device *dev)
953 {
954         struct b44 *bp = netdev_priv(dev);
955
956         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
957                dev->name);
958
959         spin_lock_irq(&bp->lock);
960
961         b44_halt(bp);
962         b44_init_rings(bp);
963         b44_init_hw(bp, B44_FULL_RESET);
964
965         spin_unlock_irq(&bp->lock);
966
967         b44_enable_ints(bp);
968
969         netif_wake_queue(dev);
970 }
971
972 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
973 {
974         struct b44 *bp = netdev_priv(dev);
975         struct sk_buff *bounce_skb;
976         int rc = NETDEV_TX_OK;
977         dma_addr_t mapping;
978         u32 len, entry, ctrl;
979
980         len = skb->len;
981         spin_lock_irq(&bp->lock);
982
983         /* This is a hard error, log it. */
984         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
985                 netif_stop_queue(dev);
986                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
987                        dev->name);
988                 goto err_out;
989         }
990
991         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
992         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
993                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
994                 if (!dma_mapping_error(mapping))
995                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
996
997                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
998                                              GFP_ATOMIC|GFP_DMA);
999                 if (!bounce_skb)
1000                         goto err_out;
1001
1002                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
1003                                          len, PCI_DMA_TODEVICE);
1004                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
1005                         if (!dma_mapping_error(mapping))
1006                                 pci_unmap_single(bp->pdev, mapping,
1007                                          len, PCI_DMA_TODEVICE);
1008                         dev_kfree_skb_any(bounce_skb);
1009                         goto err_out;
1010                 }
1011
1012                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1013                 dev_kfree_skb_any(skb);
1014                 skb = bounce_skb;
1015         }
1016
1017         entry = bp->tx_prod;
1018         bp->tx_buffers[entry].skb = skb;
1019         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1020
1021         ctrl  = (len & DESC_CTRL_LEN);
1022         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1023         if (entry == (B44_TX_RING_SIZE - 1))
1024                 ctrl |= DESC_CTRL_EOT;
1025
1026         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1027         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1028
1029         if (bp->flags & B44_FLAG_TX_RING_HACK)
1030                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1031                                              entry * sizeof(bp->tx_ring[0]),
1032                                              DMA_TO_DEVICE);
1033
1034         entry = NEXT_TX(entry);
1035
1036         bp->tx_prod = entry;
1037
1038         wmb();
1039
1040         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1041         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1042                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1043         if (bp->flags & B44_FLAG_REORDER_BUG)
1044                 br32(bp, B44_DMATX_PTR);
1045
1046         if (TX_BUFFS_AVAIL(bp) < 1)
1047                 netif_stop_queue(dev);
1048
1049         dev->trans_start = jiffies;
1050
1051 out_unlock:
1052         spin_unlock_irq(&bp->lock);
1053
1054         return rc;
1055
1056 err_out:
1057         rc = NETDEV_TX_BUSY;
1058         goto out_unlock;
1059 }
1060
1061 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1062 {
1063         struct b44 *bp = netdev_priv(dev);
1064
1065         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1066                 return -EINVAL;
1067
1068         if (!netif_running(dev)) {
1069                 /* We'll just catch it later when the
1070                  * device is up'd.
1071                  */
1072                 dev->mtu = new_mtu;
1073                 return 0;
1074         }
1075
1076         spin_lock_irq(&bp->lock);
1077         b44_halt(bp);
1078         dev->mtu = new_mtu;
1079         b44_init_rings(bp);
1080         b44_init_hw(bp, B44_FULL_RESET);
1081         spin_unlock_irq(&bp->lock);
1082
1083         b44_enable_ints(bp);
1084
1085         return 0;
1086 }
1087
1088 /* Free up pending packets in all rx/tx rings.
1089  *
1090  * The chip has been shut down and the driver detached from
1091  * the networking, so no interrupts or new tx packets will
1092  * end up in the driver.  bp->lock is not held and we are not
1093  * in an interrupt context and thus may sleep.
1094  */
1095 static void b44_free_rings(struct b44 *bp)
1096 {
1097         struct ring_info *rp;
1098         int i;
1099
1100         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1101                 rp = &bp->rx_buffers[i];
1102
1103                 if (rp->skb == NULL)
1104                         continue;
1105                 pci_unmap_single(bp->pdev,
1106                                  pci_unmap_addr(rp, mapping),
1107                                  RX_PKT_BUF_SZ,
1108                                  PCI_DMA_FROMDEVICE);
1109                 dev_kfree_skb_any(rp->skb);
1110                 rp->skb = NULL;
1111         }
1112
1113         /* XXX needs changes once NETIF_F_SG is set... */
1114         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1115                 rp = &bp->tx_buffers[i];
1116
1117                 if (rp->skb == NULL)
1118                         continue;
1119                 pci_unmap_single(bp->pdev,
1120                                  pci_unmap_addr(rp, mapping),
1121                                  rp->skb->len,
1122                                  PCI_DMA_TODEVICE);
1123                 dev_kfree_skb_any(rp->skb);
1124                 rp->skb = NULL;
1125         }
1126 }
1127
1128 /* Initialize tx/rx rings for packet processing.
1129  *
1130  * The chip has been shut down and the driver detached from
1131  * the networking, so no interrupts or new tx packets will
1132  * end up in the driver.
1133  */
1134 static void b44_init_rings(struct b44 *bp)
1135 {
1136         int i;
1137
1138         b44_free_rings(bp);
1139
1140         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1141         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1142
1143         if (bp->flags & B44_FLAG_RX_RING_HACK)
1144                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1145                                            DMA_TABLE_BYTES,
1146                                            PCI_DMA_BIDIRECTIONAL);
1147
1148         if (bp->flags & B44_FLAG_TX_RING_HACK)
1149                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1150                                            DMA_TABLE_BYTES,
1151                                            PCI_DMA_TODEVICE);
1152
1153         for (i = 0; i < bp->rx_pending; i++) {
1154                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1155                         break;
1156         }
1157 }
1158
1159 /*
1160  * Must not be invoked with interrupt sources disabled and
1161  * the hardware shutdown down.
1162  */
1163 static void b44_free_consistent(struct b44 *bp)
1164 {
1165         kfree(bp->rx_buffers);
1166         bp->rx_buffers = NULL;
1167         kfree(bp->tx_buffers);
1168         bp->tx_buffers = NULL;
1169         if (bp->rx_ring) {
1170                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1171                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1172                                          DMA_TABLE_BYTES,
1173                                          DMA_BIDIRECTIONAL);
1174                         kfree(bp->rx_ring);
1175                 } else
1176                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1177                                             bp->rx_ring, bp->rx_ring_dma);
1178                 bp->rx_ring = NULL;
1179                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1180         }
1181         if (bp->tx_ring) {
1182                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1183                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1184                                          DMA_TABLE_BYTES,
1185                                          DMA_TO_DEVICE);
1186                         kfree(bp->tx_ring);
1187                 } else
1188                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1189                                             bp->tx_ring, bp->tx_ring_dma);
1190                 bp->tx_ring = NULL;
1191                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1192         }
1193 }
1194
1195 /*
1196  * Must not be invoked with interrupt sources disabled and
1197  * the hardware shutdown down.  Can sleep.
1198  */
1199 static int b44_alloc_consistent(struct b44 *bp)
1200 {
1201         int size;
1202
1203         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1204         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1205         if (!bp->rx_buffers)
1206                 goto out_err;
1207
1208         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1209         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1210         if (!bp->tx_buffers)
1211                 goto out_err;
1212
1213         size = DMA_TABLE_BYTES;
1214         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1215         if (!bp->rx_ring) {
1216                 /* Allocation may have failed due to pci_alloc_consistent
1217                    insisting on use of GFP_DMA, which is more restrictive
1218                    than necessary...  */
1219                 struct dma_desc *rx_ring;
1220                 dma_addr_t rx_ring_dma;
1221
1222                 rx_ring = kzalloc(size, GFP_KERNEL);
1223                 if (!rx_ring)
1224                         goto out_err;
1225
1226                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1227                                              DMA_TABLE_BYTES,
1228                                              DMA_BIDIRECTIONAL);
1229
1230                 if (dma_mapping_error(rx_ring_dma) ||
1231                         rx_ring_dma + size > B44_DMA_MASK) {
1232                         kfree(rx_ring);
1233                         goto out_err;
1234                 }
1235
1236                 bp->rx_ring = rx_ring;
1237                 bp->rx_ring_dma = rx_ring_dma;
1238                 bp->flags |= B44_FLAG_RX_RING_HACK;
1239         }
1240
1241         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1242         if (!bp->tx_ring) {
1243                 /* Allocation may have failed due to pci_alloc_consistent
1244                    insisting on use of GFP_DMA, which is more restrictive
1245                    than necessary...  */
1246                 struct dma_desc *tx_ring;
1247                 dma_addr_t tx_ring_dma;
1248
1249                 tx_ring = kzalloc(size, GFP_KERNEL);
1250                 if (!tx_ring)
1251                         goto out_err;
1252
1253                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1254                                              DMA_TABLE_BYTES,
1255                                              DMA_TO_DEVICE);
1256
1257                 if (dma_mapping_error(tx_ring_dma) ||
1258                         tx_ring_dma + size > B44_DMA_MASK) {
1259                         kfree(tx_ring);
1260                         goto out_err;
1261                 }
1262
1263                 bp->tx_ring = tx_ring;
1264                 bp->tx_ring_dma = tx_ring_dma;
1265                 bp->flags |= B44_FLAG_TX_RING_HACK;
1266         }
1267
1268         return 0;
1269
1270 out_err:
1271         b44_free_consistent(bp);
1272         return -ENOMEM;
1273 }
1274
1275 /* bp->lock is held. */
1276 static void b44_clear_stats(struct b44 *bp)
1277 {
1278         unsigned long reg;
1279
1280         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1281         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1282                 br32(bp, reg);
1283         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1284                 br32(bp, reg);
1285 }
1286
1287 /* bp->lock is held. */
1288 static void b44_chip_reset(struct b44 *bp)
1289 {
1290         if (ssb_is_core_up(bp)) {
1291                 bw32(bp, B44_RCV_LAZY, 0);
1292                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1293                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1294                 bw32(bp, B44_DMATX_CTRL, 0);
1295                 bp->tx_prod = bp->tx_cons = 0;
1296                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1297                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1298                                      100, 0);
1299                 }
1300                 bw32(bp, B44_DMARX_CTRL, 0);
1301                 bp->rx_prod = bp->rx_cons = 0;
1302         } else {
1303                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1304                                    SBINTVEC_ENET0 :
1305                                    SBINTVEC_ENET1));
1306         }
1307
1308         ssb_core_reset(bp);
1309
1310         b44_clear_stats(bp);
1311
1312         /* Make PHY accessible. */
1313         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1314                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1315         br32(bp, B44_MDIO_CTRL);
1316
1317         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1318                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1319                 br32(bp, B44_ENET_CTRL);
1320                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1321         } else {
1322                 u32 val = br32(bp, B44_DEVCTRL);
1323
1324                 if (val & DEVCTRL_EPR) {
1325                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1326                         br32(bp, B44_DEVCTRL);
1327                         udelay(100);
1328                 }
1329                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1330         }
1331 }
1332
1333 /* bp->lock is held. */
1334 static void b44_halt(struct b44 *bp)
1335 {
1336         b44_disable_ints(bp);
1337         b44_chip_reset(bp);
1338 }
1339
1340 /* bp->lock is held. */
1341 static void __b44_set_mac_addr(struct b44 *bp)
1342 {
1343         bw32(bp, B44_CAM_CTRL, 0);
1344         if (!(bp->dev->flags & IFF_PROMISC)) {
1345                 u32 val;
1346
1347                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1348                 val = br32(bp, B44_CAM_CTRL);
1349                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1350         }
1351 }
1352
1353 static int b44_set_mac_addr(struct net_device *dev, void *p)
1354 {
1355         struct b44 *bp = netdev_priv(dev);
1356         struct sockaddr *addr = p;
1357
1358         if (netif_running(dev))
1359                 return -EBUSY;
1360
1361         if (!is_valid_ether_addr(addr->sa_data))
1362                 return -EINVAL;
1363
1364         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1365
1366         spin_lock_irq(&bp->lock);
1367         __b44_set_mac_addr(bp);
1368         spin_unlock_irq(&bp->lock);
1369
1370         return 0;
1371 }
1372
1373 /* Called at device open time to get the chip ready for
1374  * packet processing.  Invoked with bp->lock held.
1375  */
1376 static void __b44_set_rx_mode(struct net_device *);
1377 static void b44_init_hw(struct b44 *bp, int reset_kind)
1378 {
1379         u32 val;
1380
1381         b44_chip_reset(bp);
1382         if (reset_kind == B44_FULL_RESET) {
1383                 b44_phy_reset(bp);
1384                 b44_setup_phy(bp);
1385         }
1386
1387         /* Enable CRC32, set proper LED modes and power on PHY */
1388         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1389         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1390
1391         /* This sets the MAC address too.  */
1392         __b44_set_rx_mode(bp->dev);
1393
1394         /* MTU + eth header + possible VLAN tag + struct rx_header */
1395         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1397
1398         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1399         if (reset_kind == B44_PARTIAL_RESET) {
1400                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1401                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1402         } else {
1403                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1404                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1405                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1407                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1408
1409                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1410                 bp->rx_prod = bp->rx_pending;
1411
1412                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1413         }
1414
1415         val = br32(bp, B44_ENET_CTRL);
1416         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1417 }
1418
1419 static int b44_open(struct net_device *dev)
1420 {
1421         struct b44 *bp = netdev_priv(dev);
1422         int err;
1423
1424         err = b44_alloc_consistent(bp);
1425         if (err)
1426                 goto out;
1427
1428         b44_init_rings(bp);
1429         b44_init_hw(bp, B44_FULL_RESET);
1430
1431         b44_check_phy(bp);
1432
1433         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1434         if (unlikely(err < 0)) {
1435                 b44_chip_reset(bp);
1436                 b44_free_rings(bp);
1437                 b44_free_consistent(bp);
1438                 goto out;
1439         }
1440
1441         init_timer(&bp->timer);
1442         bp->timer.expires = jiffies + HZ;
1443         bp->timer.data = (unsigned long) bp;
1444         bp->timer.function = b44_timer;
1445         add_timer(&bp->timer);
1446
1447         b44_enable_ints(bp);
1448         netif_start_queue(dev);
1449 out:
1450         return err;
1451 }
1452
1453 #if 0
1454 /*static*/ void b44_dump_state(struct b44 *bp)
1455 {
1456         u32 val32, val32_2, val32_3, val32_4, val32_5;
1457         u16 val16;
1458
1459         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1460         printk("DEBUG: PCI status [%04x] \n", val16);
1461
1462 }
1463 #endif
1464
1465 #ifdef CONFIG_NET_POLL_CONTROLLER
1466 /*
1467  * Polling receive - used by netconsole and other diagnostic tools
1468  * to allow network i/o with interrupts disabled.
1469  */
1470 static void b44_poll_controller(struct net_device *dev)
1471 {
1472         disable_irq(dev->irq);
1473         b44_interrupt(dev->irq, dev);
1474         enable_irq(dev->irq);
1475 }
1476 #endif
1477
1478 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1479 {
1480         u32 i;
1481         u32 *pattern = (u32 *) pp;
1482
1483         for (i = 0; i < bytes; i += sizeof(u32)) {
1484                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1485                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1486         }
1487 }
1488
1489 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1490 {
1491         int magicsync = 6;
1492         int k, j, len = offset;
1493         int ethaddr_bytes = ETH_ALEN;
1494
1495         memset(ppattern + offset, 0xff, magicsync);
1496         for (j = 0; j < magicsync; j++)
1497                 set_bit(len++, (unsigned long *) pmask);
1498
1499         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1500                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1501                         ethaddr_bytes = ETH_ALEN;
1502                 else
1503                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1504                 if (ethaddr_bytes <=0)
1505                         break;
1506                 for (k = 0; k< ethaddr_bytes; k++) {
1507                         ppattern[offset + magicsync +
1508                                 (j * ETH_ALEN) + k] = macaddr[k];
1509                         len++;
1510                         set_bit(len, (unsigned long *) pmask);
1511                 }
1512         }
1513         return len - 1;
1514 }
1515
1516 /* Setup magic packet patterns in the b44 WOL
1517  * pattern matching filter.
1518  */
1519 static void b44_setup_pseudo_magicp(struct b44 *bp)
1520 {
1521
1522         u32 val;
1523         int plen0, plen1, plen2;
1524         u8 *pwol_pattern;
1525         u8 pwol_mask[B44_PMASK_SIZE];
1526
1527         pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1528         if (!pwol_pattern) {
1529                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1530                 return;
1531         }
1532
1533         /* Ipv4 magic packet pattern - pattern 0.*/
1534         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1535         memset(pwol_mask, 0, B44_PMASK_SIZE);
1536         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537                                   B44_ETHIPV4UDP_HLEN);
1538
1539         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1540         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1541
1542         /* Raw ethernet II magic packet pattern - pattern 1 */
1543         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544         memset(pwol_mask, 0, B44_PMASK_SIZE);
1545         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546                                   ETH_HLEN);
1547
1548         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1550         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551                        B44_PMASK_BASE + B44_PMASK_SIZE);
1552
1553         /* Ipv6 magic packet pattern - pattern 2 */
1554         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1555         memset(pwol_mask, 0, B44_PMASK_SIZE);
1556         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557                                   B44_ETHIPV6UDP_HLEN);
1558
1559         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1560                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1561         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1562                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1563
1564         kfree(pwol_pattern);
1565
1566         /* set these pattern's lengths: one less than each real length */
1567         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1568         bw32(bp, B44_WKUP_LEN, val);
1569
1570         /* enable wakeup pattern matching */
1571         val = br32(bp, B44_DEVCTRL);
1572         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1573
1574 }
1575
1576 static void b44_setup_wol(struct b44 *bp)
1577 {
1578         u32 val;
1579         u16 pmval;
1580
1581         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1582
1583         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1584
1585                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1586
1587                 val = bp->dev->dev_addr[2] << 24 |
1588                         bp->dev->dev_addr[3] << 16 |
1589                         bp->dev->dev_addr[4] << 8 |
1590                         bp->dev->dev_addr[5];
1591                 bw32(bp, B44_ADDR_LO, val);
1592
1593                 val = bp->dev->dev_addr[0] << 8 |
1594                         bp->dev->dev_addr[1];
1595                 bw32(bp, B44_ADDR_HI, val);
1596
1597                 val = br32(bp, B44_DEVCTRL);
1598                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1599
1600         } else {
1601                 b44_setup_pseudo_magicp(bp);
1602         }
1603
1604         val = br32(bp, B44_SBTMSLOW);
1605         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1606
1607         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1608         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1609
1610 }
1611
1612 static int b44_close(struct net_device *dev)
1613 {
1614         struct b44 *bp = netdev_priv(dev);
1615
1616         netif_stop_queue(dev);
1617
1618         netif_poll_disable(dev);
1619
1620         del_timer_sync(&bp->timer);
1621
1622         spin_lock_irq(&bp->lock);
1623
1624 #if 0
1625         b44_dump_state(bp);
1626 #endif
1627         b44_halt(bp);
1628         b44_free_rings(bp);
1629         netif_carrier_off(dev);
1630
1631         spin_unlock_irq(&bp->lock);
1632
1633         free_irq(dev->irq, dev);
1634
1635         netif_poll_enable(dev);
1636
1637         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1638                 b44_init_hw(bp, B44_PARTIAL_RESET);
1639                 b44_setup_wol(bp);
1640         }
1641
1642         b44_free_consistent(bp);
1643
1644         return 0;
1645 }
1646
1647 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1648 {
1649         struct b44 *bp = netdev_priv(dev);
1650         struct net_device_stats *nstat = &bp->stats;
1651         struct b44_hw_stats *hwstat = &bp->hw_stats;
1652
1653         /* Convert HW stats into netdevice stats. */
1654         nstat->rx_packets = hwstat->rx_pkts;
1655         nstat->tx_packets = hwstat->tx_pkts;
1656         nstat->rx_bytes   = hwstat->rx_octets;
1657         nstat->tx_bytes   = hwstat->tx_octets;
1658         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1659                              hwstat->tx_oversize_pkts +
1660                              hwstat->tx_underruns +
1661                              hwstat->tx_excessive_cols +
1662                              hwstat->tx_late_cols);
1663         nstat->multicast  = hwstat->tx_multicast_pkts;
1664         nstat->collisions = hwstat->tx_total_cols;
1665
1666         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1667                                    hwstat->rx_undersize);
1668         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1669         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1670         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1671         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1672                                    hwstat->rx_oversize_pkts +
1673                                    hwstat->rx_missed_pkts +
1674                                    hwstat->rx_crc_align_errs +
1675                                    hwstat->rx_undersize +
1676                                    hwstat->rx_crc_errs +
1677                                    hwstat->rx_align_errs +
1678                                    hwstat->rx_symbol_errs);
1679
1680         nstat->tx_aborted_errors = hwstat->tx_underruns;
1681 #if 0
1682         /* Carrier lost counter seems to be broken for some devices */
1683         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1684 #endif
1685
1686         return nstat;
1687 }
1688
1689 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1690 {
1691         struct dev_mc_list *mclist;
1692         int i, num_ents;
1693
1694         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1695         mclist = dev->mc_list;
1696         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1697                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1698         }
1699         return i+1;
1700 }
1701
1702 static void __b44_set_rx_mode(struct net_device *dev)
1703 {
1704         struct b44 *bp = netdev_priv(dev);
1705         u32 val;
1706
1707         val = br32(bp, B44_RXCONFIG);
1708         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1709         if (dev->flags & IFF_PROMISC) {
1710                 val |= RXCONFIG_PROMISC;
1711                 bw32(bp, B44_RXCONFIG, val);
1712         } else {
1713                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1714                 int i = 0;
1715
1716                 __b44_set_mac_addr(bp);
1717
1718                 if ((dev->flags & IFF_ALLMULTI) ||
1719                     (dev->mc_count > B44_MCAST_TABLE_SIZE))
1720                         val |= RXCONFIG_ALLMULTI;
1721                 else
1722                         i = __b44_load_mcast(bp, dev);
1723
1724                 for (; i < 64; i++)
1725                         __b44_cam_write(bp, zero, i);
1726
1727                 bw32(bp, B44_RXCONFIG, val);
1728                 val = br32(bp, B44_CAM_CTRL);
1729                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1730         }
1731 }
1732
1733 static void b44_set_rx_mode(struct net_device *dev)
1734 {
1735         struct b44 *bp = netdev_priv(dev);
1736
1737         spin_lock_irq(&bp->lock);
1738         __b44_set_rx_mode(dev);
1739         spin_unlock_irq(&bp->lock);
1740 }
1741
1742 static u32 b44_get_msglevel(struct net_device *dev)
1743 {
1744         struct b44 *bp = netdev_priv(dev);
1745         return bp->msg_enable;
1746 }
1747
1748 static void b44_set_msglevel(struct net_device *dev, u32 value)
1749 {
1750         struct b44 *bp = netdev_priv(dev);
1751         bp->msg_enable = value;
1752 }
1753
1754 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1755 {
1756         struct b44 *bp = netdev_priv(dev);
1757         struct pci_dev *pci_dev = bp->pdev;
1758
1759         strcpy (info->driver, DRV_MODULE_NAME);
1760         strcpy (info->version, DRV_MODULE_VERSION);
1761         strcpy (info->bus_info, pci_name(pci_dev));
1762 }
1763
1764 static int b44_nway_reset(struct net_device *dev)
1765 {
1766         struct b44 *bp = netdev_priv(dev);
1767         u32 bmcr;
1768         int r;
1769
1770         spin_lock_irq(&bp->lock);
1771         b44_readphy(bp, MII_BMCR, &bmcr);
1772         b44_readphy(bp, MII_BMCR, &bmcr);
1773         r = -EINVAL;
1774         if (bmcr & BMCR_ANENABLE) {
1775                 b44_writephy(bp, MII_BMCR,
1776                              bmcr | BMCR_ANRESTART);
1777                 r = 0;
1778         }
1779         spin_unlock_irq(&bp->lock);
1780
1781         return r;
1782 }
1783
1784 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1785 {
1786         struct b44 *bp = netdev_priv(dev);
1787
1788         cmd->supported = (SUPPORTED_Autoneg);
1789         cmd->supported |= (SUPPORTED_100baseT_Half |
1790                           SUPPORTED_100baseT_Full |
1791                           SUPPORTED_10baseT_Half |
1792                           SUPPORTED_10baseT_Full |
1793                           SUPPORTED_MII);
1794
1795         cmd->advertising = 0;
1796         if (bp->flags & B44_FLAG_ADV_10HALF)
1797                 cmd->advertising |= ADVERTISED_10baseT_Half;
1798         if (bp->flags & B44_FLAG_ADV_10FULL)
1799                 cmd->advertising |= ADVERTISED_10baseT_Full;
1800         if (bp->flags & B44_FLAG_ADV_100HALF)
1801                 cmd->advertising |= ADVERTISED_100baseT_Half;
1802         if (bp->flags & B44_FLAG_ADV_100FULL)
1803                 cmd->advertising |= ADVERTISED_100baseT_Full;
1804         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1805         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1806                 SPEED_100 : SPEED_10;
1807         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1808                 DUPLEX_FULL : DUPLEX_HALF;
1809         cmd->port = 0;
1810         cmd->phy_address = bp->phy_addr;
1811         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1812                 XCVR_INTERNAL : XCVR_EXTERNAL;
1813         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1814                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1815         if (cmd->autoneg == AUTONEG_ENABLE)
1816                 cmd->advertising |= ADVERTISED_Autoneg;
1817         if (!netif_running(dev)){
1818                 cmd->speed = 0;
1819                 cmd->duplex = 0xff;
1820         }
1821         cmd->maxtxpkt = 0;
1822         cmd->maxrxpkt = 0;
1823         return 0;
1824 }
1825
1826 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1827 {
1828         struct b44 *bp = netdev_priv(dev);
1829
1830         /* We do not support gigabit. */
1831         if (cmd->autoneg == AUTONEG_ENABLE) {
1832                 if (cmd->advertising &
1833                     (ADVERTISED_1000baseT_Half |
1834                      ADVERTISED_1000baseT_Full))
1835                         return -EINVAL;
1836         } else if ((cmd->speed != SPEED_100 &&
1837                     cmd->speed != SPEED_10) ||
1838                    (cmd->duplex != DUPLEX_HALF &&
1839                     cmd->duplex != DUPLEX_FULL)) {
1840                         return -EINVAL;
1841         }
1842
1843         spin_lock_irq(&bp->lock);
1844
1845         if (cmd->autoneg == AUTONEG_ENABLE) {
1846                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1847                                B44_FLAG_100_BASE_T |
1848                                B44_FLAG_FULL_DUPLEX |
1849                                B44_FLAG_ADV_10HALF |
1850                                B44_FLAG_ADV_10FULL |
1851                                B44_FLAG_ADV_100HALF |
1852                                B44_FLAG_ADV_100FULL);
1853                 if (cmd->advertising == 0) {
1854                         bp->flags |= (B44_FLAG_ADV_10HALF |
1855                                       B44_FLAG_ADV_10FULL |
1856                                       B44_FLAG_ADV_100HALF |
1857                                       B44_FLAG_ADV_100FULL);
1858                 } else {
1859                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1860                                 bp->flags |= B44_FLAG_ADV_10HALF;
1861                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1862                                 bp->flags |= B44_FLAG_ADV_10FULL;
1863                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1864                                 bp->flags |= B44_FLAG_ADV_100HALF;
1865                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1866                                 bp->flags |= B44_FLAG_ADV_100FULL;
1867                 }
1868         } else {
1869                 bp->flags |= B44_FLAG_FORCE_LINK;
1870                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1871                 if (cmd->speed == SPEED_100)
1872                         bp->flags |= B44_FLAG_100_BASE_T;
1873                 if (cmd->duplex == DUPLEX_FULL)
1874                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1875         }
1876
1877         if (netif_running(dev))
1878                 b44_setup_phy(bp);
1879
1880         spin_unlock_irq(&bp->lock);
1881
1882         return 0;
1883 }
1884
1885 static void b44_get_ringparam(struct net_device *dev,
1886                               struct ethtool_ringparam *ering)
1887 {
1888         struct b44 *bp = netdev_priv(dev);
1889
1890         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1891         ering->rx_pending = bp->rx_pending;
1892
1893         /* XXX ethtool lacks a tx_max_pending, oops... */
1894 }
1895
1896 static int b44_set_ringparam(struct net_device *dev,
1897                              struct ethtool_ringparam *ering)
1898 {
1899         struct b44 *bp = netdev_priv(dev);
1900
1901         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1902             (ering->rx_mini_pending != 0) ||
1903             (ering->rx_jumbo_pending != 0) ||
1904             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1905                 return -EINVAL;
1906
1907         spin_lock_irq(&bp->lock);
1908
1909         bp->rx_pending = ering->rx_pending;
1910         bp->tx_pending = ering->tx_pending;
1911
1912         b44_halt(bp);
1913         b44_init_rings(bp);
1914         b44_init_hw(bp, B44_FULL_RESET);
1915         netif_wake_queue(bp->dev);
1916         spin_unlock_irq(&bp->lock);
1917
1918         b44_enable_ints(bp);
1919
1920         return 0;
1921 }
1922
1923 static void b44_get_pauseparam(struct net_device *dev,
1924                                 struct ethtool_pauseparam *epause)
1925 {
1926         struct b44 *bp = netdev_priv(dev);
1927
1928         epause->autoneg =
1929                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1930         epause->rx_pause =
1931                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1932         epause->tx_pause =
1933                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1934 }
1935
1936 static int b44_set_pauseparam(struct net_device *dev,
1937                                 struct ethtool_pauseparam *epause)
1938 {
1939         struct b44 *bp = netdev_priv(dev);
1940
1941         spin_lock_irq(&bp->lock);
1942         if (epause->autoneg)
1943                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1944         else
1945                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1946         if (epause->rx_pause)
1947                 bp->flags |= B44_FLAG_RX_PAUSE;
1948         else
1949                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1950         if (epause->tx_pause)
1951                 bp->flags |= B44_FLAG_TX_PAUSE;
1952         else
1953                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1954         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1955                 b44_halt(bp);
1956                 b44_init_rings(bp);
1957                 b44_init_hw(bp, B44_FULL_RESET);
1958         } else {
1959                 __b44_set_flow_ctrl(bp, bp->flags);
1960         }
1961         spin_unlock_irq(&bp->lock);
1962
1963         b44_enable_ints(bp);
1964
1965         return 0;
1966 }
1967
1968 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1969 {
1970         switch(stringset) {
1971         case ETH_SS_STATS:
1972                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1973                 break;
1974         }
1975 }
1976
1977 static int b44_get_stats_count(struct net_device *dev)
1978 {
1979         return ARRAY_SIZE(b44_gstrings);
1980 }
1981
1982 static void b44_get_ethtool_stats(struct net_device *dev,
1983                                   struct ethtool_stats *stats, u64 *data)
1984 {
1985         struct b44 *bp = netdev_priv(dev);
1986         u32 *val = &bp->hw_stats.tx_good_octets;
1987         u32 i;
1988
1989         spin_lock_irq(&bp->lock);
1990
1991         b44_stats_update(bp);
1992
1993         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1994                 *data++ = *val++;
1995
1996         spin_unlock_irq(&bp->lock);
1997 }
1998
1999 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2000 {
2001         struct b44 *bp = netdev_priv(dev);
2002
2003         wol->supported = WAKE_MAGIC;
2004         if (bp->flags & B44_FLAG_WOL_ENABLE)
2005                 wol->wolopts = WAKE_MAGIC;
2006         else
2007                 wol->wolopts = 0;
2008         memset(&wol->sopass, 0, sizeof(wol->sopass));
2009 }
2010
2011 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2012 {
2013         struct b44 *bp = netdev_priv(dev);
2014
2015         spin_lock_irq(&bp->lock);
2016         if (wol->wolopts & WAKE_MAGIC)
2017                 bp->flags |= B44_FLAG_WOL_ENABLE;
2018         else
2019                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2020         spin_unlock_irq(&bp->lock);
2021
2022         return 0;
2023 }
2024
2025 static const struct ethtool_ops b44_ethtool_ops = {
2026         .get_drvinfo            = b44_get_drvinfo,
2027         .get_settings           = b44_get_settings,
2028         .set_settings           = b44_set_settings,
2029         .nway_reset             = b44_nway_reset,
2030         .get_link               = ethtool_op_get_link,
2031         .get_wol                = b44_get_wol,
2032         .set_wol                = b44_set_wol,
2033         .get_ringparam          = b44_get_ringparam,
2034         .set_ringparam          = b44_set_ringparam,
2035         .get_pauseparam         = b44_get_pauseparam,
2036         .set_pauseparam         = b44_set_pauseparam,
2037         .get_msglevel           = b44_get_msglevel,
2038         .set_msglevel           = b44_set_msglevel,
2039         .get_strings            = b44_get_strings,
2040         .get_stats_count        = b44_get_stats_count,
2041         .get_ethtool_stats      = b44_get_ethtool_stats,
2042         .get_perm_addr          = ethtool_op_get_perm_addr,
2043 };
2044
2045 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2046 {
2047         struct mii_ioctl_data *data = if_mii(ifr);
2048         struct b44 *bp = netdev_priv(dev);
2049         int err = -EINVAL;
2050
2051         if (!netif_running(dev))
2052                 goto out;
2053
2054         spin_lock_irq(&bp->lock);
2055         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2056         spin_unlock_irq(&bp->lock);
2057 out:
2058         return err;
2059 }
2060
2061 /* Read 128-bytes of EEPROM. */
2062 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2063 {
2064         long i;
2065         u16 *ptr = (u16 *) data;
2066
2067         for (i = 0; i < 128; i += 2)
2068                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2069
2070         return 0;
2071 }
2072
2073 static int __devinit b44_get_invariants(struct b44 *bp)
2074 {
2075         u8 eeprom[128];
2076         int err;
2077
2078         err = b44_read_eeprom(bp, &eeprom[0]);
2079         if (err)
2080                 goto out;
2081
2082         bp->dev->dev_addr[0] = eeprom[79];
2083         bp->dev->dev_addr[1] = eeprom[78];
2084         bp->dev->dev_addr[2] = eeprom[81];
2085         bp->dev->dev_addr[3] = eeprom[80];
2086         bp->dev->dev_addr[4] = eeprom[83];
2087         bp->dev->dev_addr[5] = eeprom[82];
2088
2089         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2090                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2091                 return -EINVAL;
2092         }
2093
2094         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2095
2096         bp->phy_addr = eeprom[90] & 0x1f;
2097
2098         /* With this, plus the rx_header prepended to the data by the
2099          * hardware, we'll land the ethernet header on a 2-byte boundary.
2100          */
2101         bp->rx_offset = 30;
2102
2103         bp->imask = IMASK_DEF;
2104
2105         bp->core_unit = ssb_core_unit(bp);
2106         bp->dma_offset = SB_PCI_DMA;
2107
2108         /* XXX - really required?
2109            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2110          */
2111
2112         if (ssb_get_core_rev(bp) >= 7)
2113                 bp->flags |= B44_FLAG_B0_ANDLATER;
2114
2115 out:
2116         return err;
2117 }
2118
2119 static int __devinit b44_init_one(struct pci_dev *pdev,
2120                                   const struct pci_device_id *ent)
2121 {
2122         static int b44_version_printed = 0;
2123         unsigned long b44reg_base, b44reg_len;
2124         struct net_device *dev;
2125         struct b44 *bp;
2126         int err, i;
2127
2128         if (b44_version_printed++ == 0)
2129                 printk(KERN_INFO "%s", version);
2130
2131         err = pci_enable_device(pdev);
2132         if (err) {
2133                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2134                        "aborting.\n");
2135                 return err;
2136         }
2137
2138         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2139                 dev_err(&pdev->dev,
2140                         "Cannot find proper PCI device "
2141                        "base address, aborting.\n");
2142                 err = -ENODEV;
2143                 goto err_out_disable_pdev;
2144         }
2145
2146         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2147         if (err) {
2148                 dev_err(&pdev->dev,
2149                         "Cannot obtain PCI resources, aborting.\n");
2150                 goto err_out_disable_pdev;
2151         }
2152
2153         pci_set_master(pdev);
2154
2155         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2156         if (err) {
2157                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2158                 goto err_out_free_res;
2159         }
2160
2161         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2162         if (err) {
2163                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2164                 goto err_out_free_res;
2165         }
2166
2167         b44reg_base = pci_resource_start(pdev, 0);
2168         b44reg_len = pci_resource_len(pdev, 0);
2169
2170         dev = alloc_etherdev(sizeof(*bp));
2171         if (!dev) {
2172                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2173                 err = -ENOMEM;
2174                 goto err_out_free_res;
2175         }
2176
2177         SET_MODULE_OWNER(dev);
2178         SET_NETDEV_DEV(dev,&pdev->dev);
2179
2180         /* No interesting netdevice features in this card... */
2181         dev->features |= 0;
2182
2183         bp = netdev_priv(dev);
2184         bp->pdev = pdev;
2185         bp->dev = dev;
2186
2187         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2188
2189         spin_lock_init(&bp->lock);
2190
2191         bp->regs = ioremap(b44reg_base, b44reg_len);
2192         if (bp->regs == 0UL) {
2193                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2194                 err = -ENOMEM;
2195                 goto err_out_free_dev;
2196         }
2197
2198         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2199         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2200
2201         dev->open = b44_open;
2202         dev->stop = b44_close;
2203         dev->hard_start_xmit = b44_start_xmit;
2204         dev->get_stats = b44_get_stats;
2205         dev->set_multicast_list = b44_set_rx_mode;
2206         dev->set_mac_address = b44_set_mac_addr;
2207         dev->do_ioctl = b44_ioctl;
2208         dev->tx_timeout = b44_tx_timeout;
2209         dev->poll = b44_poll;
2210         dev->weight = 64;
2211         dev->watchdog_timeo = B44_TX_TIMEOUT;
2212 #ifdef CONFIG_NET_POLL_CONTROLLER
2213         dev->poll_controller = b44_poll_controller;
2214 #endif
2215         dev->change_mtu = b44_change_mtu;
2216         dev->irq = pdev->irq;
2217         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2218
2219         netif_carrier_off(dev);
2220
2221         err = b44_get_invariants(bp);
2222         if (err) {
2223                 dev_err(&pdev->dev,
2224                         "Problem fetching invariants of chip, aborting.\n");
2225                 goto err_out_iounmap;
2226         }
2227
2228         bp->mii_if.dev = dev;
2229         bp->mii_if.mdio_read = b44_mii_read;
2230         bp->mii_if.mdio_write = b44_mii_write;
2231         bp->mii_if.phy_id = bp->phy_addr;
2232         bp->mii_if.phy_id_mask = 0x1f;
2233         bp->mii_if.reg_num_mask = 0x1f;
2234
2235         /* By default, advertise all speed/duplex settings. */
2236         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2237                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2238
2239         /* By default, auto-negotiate PAUSE. */
2240         bp->flags |= B44_FLAG_PAUSE_AUTO;
2241
2242         err = register_netdev(dev);
2243         if (err) {
2244                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2245                 goto err_out_iounmap;
2246         }
2247
2248         pci_set_drvdata(pdev, dev);
2249
2250         pci_save_state(bp->pdev);
2251
2252         /* Chip reset provides power to the b44 MAC & PCI cores, which
2253          * is necessary for MAC register access.
2254          */
2255         b44_chip_reset(bp);
2256
2257         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2258         for (i = 0; i < 6; i++)
2259                 printk("%2.2x%c", dev->dev_addr[i],
2260                        i == 5 ? '\n' : ':');
2261
2262         return 0;
2263
2264 err_out_iounmap:
2265         iounmap(bp->regs);
2266
2267 err_out_free_dev:
2268         free_netdev(dev);
2269
2270 err_out_free_res:
2271         pci_release_regions(pdev);
2272
2273 err_out_disable_pdev:
2274         pci_disable_device(pdev);
2275         pci_set_drvdata(pdev, NULL);
2276         return err;
2277 }
2278
2279 static void __devexit b44_remove_one(struct pci_dev *pdev)
2280 {
2281         struct net_device *dev = pci_get_drvdata(pdev);
2282         struct b44 *bp = netdev_priv(dev);
2283
2284         unregister_netdev(dev);
2285         iounmap(bp->regs);
2286         free_netdev(dev);
2287         pci_release_regions(pdev);
2288         pci_disable_device(pdev);
2289         pci_set_drvdata(pdev, NULL);
2290 }
2291
2292 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2293 {
2294         struct net_device *dev = pci_get_drvdata(pdev);
2295         struct b44 *bp = netdev_priv(dev);
2296
2297         if (!netif_running(dev))
2298                  return 0;
2299
2300         del_timer_sync(&bp->timer);
2301
2302         spin_lock_irq(&bp->lock);
2303
2304         b44_halt(bp);
2305         netif_carrier_off(bp->dev);
2306         netif_device_detach(bp->dev);
2307         b44_free_rings(bp);
2308
2309         spin_unlock_irq(&bp->lock);
2310
2311         free_irq(dev->irq, dev);
2312         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2313                 b44_init_hw(bp, B44_PARTIAL_RESET);
2314                 b44_setup_wol(bp);
2315         }
2316         pci_disable_device(pdev);
2317         return 0;
2318 }
2319
2320 static int b44_resume(struct pci_dev *pdev)
2321 {
2322         struct net_device *dev = pci_get_drvdata(pdev);
2323         struct b44 *bp = netdev_priv(dev);
2324         int rc = 0;
2325
2326         pci_restore_state(pdev);
2327         rc = pci_enable_device(pdev);
2328         if (rc) {
2329                 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2330                         dev->name);
2331                 return rc;
2332         }
2333
2334         pci_set_master(pdev);
2335
2336         if (!netif_running(dev))
2337                 return 0;
2338
2339         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2340         if (rc) {
2341                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2342                 pci_disable_device(pdev);
2343                 return rc;
2344         }
2345
2346         spin_lock_irq(&bp->lock);
2347
2348         b44_init_rings(bp);
2349         b44_init_hw(bp, B44_FULL_RESET);
2350         netif_device_attach(bp->dev);
2351         spin_unlock_irq(&bp->lock);
2352
2353         bp->timer.expires = jiffies + HZ;
2354         add_timer(&bp->timer);
2355
2356         b44_enable_ints(bp);
2357         netif_wake_queue(dev);
2358         return 0;
2359 }
2360
2361 static struct pci_driver b44_driver = {
2362         .name           = DRV_MODULE_NAME,
2363         .id_table       = b44_pci_tbl,
2364         .probe          = b44_init_one,
2365         .remove         = __devexit_p(b44_remove_one),
2366         .suspend        = b44_suspend,
2367         .resume         = b44_resume,
2368 };
2369
2370 static int __init b44_init(void)
2371 {
2372         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2373
2374         /* Setup paramaters for syncing RX/TX DMA descriptors */
2375         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2376         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2377
2378         return pci_register_driver(&b44_driver);
2379 }
2380
2381 static void __exit b44_cleanup(void)
2382 {
2383         pci_unregister_driver(&b44_driver);
2384 }
2385
2386 module_init(b44_init);
2387 module_exit(b44_cleanup);
2388