Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.01"
33 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE        0x400
80 #define B44_PATTERN_SIZE        0x80
81 #define B44_PMASK_BASE          0x600
82 #define B44_PMASK_SIZE          0x10
83 #define B44_MAX_PATTERNS        16
84 #define B44_ETHIPV6UDP_HLEN     62
85 #define B44_ETHIPV4UDP_HLEN     42
86
87 static char version[] __devinitdata =
88         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
94
95 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98 MODULE_VERSION(DRV_MODULE_VERSION);
99
100 static struct pci_device_id b44_pci_tbl[] = {
101         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
102           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
104           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
106           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
107         { }     /* terminate list with empty entry */
108 };
109
110 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111
112 static void b44_halt(struct b44 *);
113 static void b44_init_rings(struct b44 *);
114 static void b44_init_hw(struct b44 *, int);
115
116 static int dma_desc_align_mask;
117 static int dma_desc_sync_size;
118
119 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
120 #define _B44(x...)      # x,
121 B44_STAT_REG_DECLARE
122 #undef _B44
123 };
124
125 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
126                                                 dma_addr_t dma_base,
127                                                 unsigned long offset,
128                                                 enum dma_data_direction dir)
129 {
130         dma_sync_single_range_for_device(&pdev->dev, dma_base,
131                                          offset & dma_desc_align_mask,
132                                          dma_desc_sync_size, dir);
133 }
134
135 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
136                                              dma_addr_t dma_base,
137                                              unsigned long offset,
138                                              enum dma_data_direction dir)
139 {
140         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
141                                       offset & dma_desc_align_mask,
142                                       dma_desc_sync_size, dir);
143 }
144
145 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
146 {
147         return readl(bp->regs + reg);
148 }
149
150 static inline void bw32(const struct b44 *bp,
151                         unsigned long reg, unsigned long val)
152 {
153         writel(val, bp->regs + reg);
154 }
155
156 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
157                         u32 bit, unsigned long timeout, const int clear)
158 {
159         unsigned long i;
160
161         for (i = 0; i < timeout; i++) {
162                 u32 val = br32(bp, reg);
163
164                 if (clear && !(val & bit))
165                         break;
166                 if (!clear && (val & bit))
167                         break;
168                 udelay(10);
169         }
170         if (i == timeout) {
171                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
172                        "%lx to %s.\n",
173                        bp->dev->name,
174                        bit, reg,
175                        (clear ? "clear" : "set"));
176                 return -ENODEV;
177         }
178         return 0;
179 }
180
181 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
182  * buzz words used on this company's website :-)
183  *
184  * All of these routines must be invoked with bp->lock held and
185  * interrupts disabled.
186  */
187
188 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
189 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
190
191 static u32 ssb_get_core_rev(struct b44 *bp)
192 {
193         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
194 }
195
196 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
197 {
198         u32 bar_orig, pci_rev, val;
199
200         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
201         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
202         pci_rev = ssb_get_core_rev(bp);
203
204         val = br32(bp, B44_SBINTVEC);
205         val |= cores;
206         bw32(bp, B44_SBINTVEC, val);
207
208         val = br32(bp, SSB_PCI_TRANS_2);
209         val |= SSB_PCI_PREF | SSB_PCI_BURST;
210         bw32(bp, SSB_PCI_TRANS_2, val);
211
212         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
213
214         return pci_rev;
215 }
216
217 static void ssb_core_disable(struct b44 *bp)
218 {
219         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
220                 return;
221
222         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
223         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
224         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
225         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
226                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
227         br32(bp, B44_SBTMSLOW);
228         udelay(1);
229         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
230         br32(bp, B44_SBTMSLOW);
231         udelay(1);
232 }
233
234 static void ssb_core_reset(struct b44 *bp)
235 {
236         u32 val;
237
238         ssb_core_disable(bp);
239         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
240         br32(bp, B44_SBTMSLOW);
241         udelay(1);
242
243         /* Clear SERR if set, this is a hw bug workaround.  */
244         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
245                 bw32(bp, B44_SBTMSHIGH, 0);
246
247         val = br32(bp, B44_SBIMSTATE);
248         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
249                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
250
251         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
252         br32(bp, B44_SBTMSLOW);
253         udelay(1);
254
255         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
256         br32(bp, B44_SBTMSLOW);
257         udelay(1);
258 }
259
260 static int ssb_core_unit(struct b44 *bp)
261 {
262 #if 0
263         u32 val = br32(bp, B44_SBADMATCH0);
264         u32 base;
265
266         type = val & SBADMATCH0_TYPE_MASK;
267         switch (type) {
268         case 0:
269                 base = val & SBADMATCH0_BS0_MASK;
270                 break;
271
272         case 1:
273                 base = val & SBADMATCH0_BS1_MASK;
274                 break;
275
276         case 2:
277         default:
278                 base = val & SBADMATCH0_BS2_MASK;
279                 break;
280         };
281 #endif
282         return 0;
283 }
284
285 static int ssb_is_core_up(struct b44 *bp)
286 {
287         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
288                 == SBTMSLOW_CLOCK);
289 }
290
291 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
292 {
293         u32 val;
294
295         val  = ((u32) data[2]) << 24;
296         val |= ((u32) data[3]) << 16;
297         val |= ((u32) data[4]) <<  8;
298         val |= ((u32) data[5]) <<  0;
299         bw32(bp, B44_CAM_DATA_LO, val);
300         val = (CAM_DATA_HI_VALID |
301                (((u32) data[0]) << 8) |
302                (((u32) data[1]) << 0));
303         bw32(bp, B44_CAM_DATA_HI, val);
304         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
305                             (index << CAM_CTRL_INDEX_SHIFT)));
306         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
307 }
308
309 static inline void __b44_disable_ints(struct b44 *bp)
310 {
311         bw32(bp, B44_IMASK, 0);
312 }
313
314 static void b44_disable_ints(struct b44 *bp)
315 {
316         __b44_disable_ints(bp);
317
318         /* Flush posted writes. */
319         br32(bp, B44_IMASK);
320 }
321
322 static void b44_enable_ints(struct b44 *bp)
323 {
324         bw32(bp, B44_IMASK, bp->imask);
325 }
326
327 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
328 {
329         int err;
330
331         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
332         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
333                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
334                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
335                              (reg << MDIO_DATA_RA_SHIFT) |
336                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
337         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
338         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
339
340         return err;
341 }
342
343 static int b44_writephy(struct b44 *bp, int reg, u32 val)
344 {
345         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
346         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
347                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
348                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
349                              (reg << MDIO_DATA_RA_SHIFT) |
350                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
351                              (val & MDIO_DATA_DATA)));
352         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
353 }
354
355 /* miilib interface */
356 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
357  * due to code existing before miilib use was added to this driver.
358  * Someone should remove this artificial driver limitation in
359  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
360  */
361 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
362 {
363         u32 val;
364         struct b44 *bp = netdev_priv(dev);
365         int rc = b44_readphy(bp, location, &val);
366         if (rc)
367                 return 0xffffffff;
368         return val;
369 }
370
371 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
372                          int val)
373 {
374         struct b44 *bp = netdev_priv(dev);
375         b44_writephy(bp, location, val);
376 }
377
378 static int b44_phy_reset(struct b44 *bp)
379 {
380         u32 val;
381         int err;
382
383         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
384         if (err)
385                 return err;
386         udelay(100);
387         err = b44_readphy(bp, MII_BMCR, &val);
388         if (!err) {
389                 if (val & BMCR_RESET) {
390                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
391                                bp->dev->name);
392                         err = -ENODEV;
393                 }
394         }
395
396         return 0;
397 }
398
399 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
400 {
401         u32 val;
402
403         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
404         bp->flags |= pause_flags;
405
406         val = br32(bp, B44_RXCONFIG);
407         if (pause_flags & B44_FLAG_RX_PAUSE)
408                 val |= RXCONFIG_FLOW;
409         else
410                 val &= ~RXCONFIG_FLOW;
411         bw32(bp, B44_RXCONFIG, val);
412
413         val = br32(bp, B44_MAC_FLOW);
414         if (pause_flags & B44_FLAG_TX_PAUSE)
415                 val |= (MAC_FLOW_PAUSE_ENAB |
416                         (0xc0 & MAC_FLOW_RX_HI_WATER));
417         else
418                 val &= ~MAC_FLOW_PAUSE_ENAB;
419         bw32(bp, B44_MAC_FLOW, val);
420 }
421
422 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
423 {
424         u32 pause_enab = 0;
425
426         /* The driver supports only rx pause by default because
427            the b44 mac tx pause mechanism generates excessive
428            pause frames.
429            Use ethtool to turn on b44 tx pause if necessary.
430          */
431         if ((local & ADVERTISE_PAUSE_CAP) &&
432             (local & ADVERTISE_PAUSE_ASYM)){
433                 if ((remote & LPA_PAUSE_ASYM) &&
434                     !(remote & LPA_PAUSE_CAP))
435                         pause_enab |= B44_FLAG_RX_PAUSE;
436         }
437
438         __b44_set_flow_ctrl(bp, pause_enab);
439 }
440
441 static int b44_setup_phy(struct b44 *bp)
442 {
443         u32 val;
444         int err;
445
446         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
447                 goto out;
448         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
449                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
450                 goto out;
451         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
452                 goto out;
453         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
454                                 val | MII_TLEDCTRL_ENABLE)) != 0)
455                 goto out;
456
457         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
458                 u32 adv = ADVERTISE_CSMA;
459
460                 if (bp->flags & B44_FLAG_ADV_10HALF)
461                         adv |= ADVERTISE_10HALF;
462                 if (bp->flags & B44_FLAG_ADV_10FULL)
463                         adv |= ADVERTISE_10FULL;
464                 if (bp->flags & B44_FLAG_ADV_100HALF)
465                         adv |= ADVERTISE_100HALF;
466                 if (bp->flags & B44_FLAG_ADV_100FULL)
467                         adv |= ADVERTISE_100FULL;
468
469                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
470                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
471
472                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
473                         goto out;
474                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
475                                                        BMCR_ANRESTART))) != 0)
476                         goto out;
477         } else {
478                 u32 bmcr;
479
480                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
481                         goto out;
482                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
483                 if (bp->flags & B44_FLAG_100_BASE_T)
484                         bmcr |= BMCR_SPEED100;
485                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
486                         bmcr |= BMCR_FULLDPLX;
487                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
488                         goto out;
489
490                 /* Since we will not be negotiating there is no safe way
491                  * to determine if the link partner supports flow control
492                  * or not.  So just disable it completely in this case.
493                  */
494                 b44_set_flow_ctrl(bp, 0, 0);
495         }
496
497 out:
498         return err;
499 }
500
501 static void b44_stats_update(struct b44 *bp)
502 {
503         unsigned long reg;
504         u32 *val;
505
506         val = &bp->hw_stats.tx_good_octets;
507         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
508                 *val++ += br32(bp, reg);
509         }
510
511         /* Pad */
512         reg += 8*4UL;
513
514         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
515                 *val++ += br32(bp, reg);
516         }
517 }
518
519 static void b44_link_report(struct b44 *bp)
520 {
521         if (!netif_carrier_ok(bp->dev)) {
522                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
523         } else {
524                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
525                        bp->dev->name,
526                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
527                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
528
529                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
530                        "%s for RX.\n",
531                        bp->dev->name,
532                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
533                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
534         }
535 }
536
537 static void b44_check_phy(struct b44 *bp)
538 {
539         u32 bmsr, aux;
540
541         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
542             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
543             (bmsr != 0xffff)) {
544                 if (aux & MII_AUXCTRL_SPEED)
545                         bp->flags |= B44_FLAG_100_BASE_T;
546                 else
547                         bp->flags &= ~B44_FLAG_100_BASE_T;
548                 if (aux & MII_AUXCTRL_DUPLEX)
549                         bp->flags |= B44_FLAG_FULL_DUPLEX;
550                 else
551                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
552
553                 if (!netif_carrier_ok(bp->dev) &&
554                     (bmsr & BMSR_LSTATUS)) {
555                         u32 val = br32(bp, B44_TX_CTRL);
556                         u32 local_adv, remote_adv;
557
558                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
559                                 val |= TX_CTRL_DUPLEX;
560                         else
561                                 val &= ~TX_CTRL_DUPLEX;
562                         bw32(bp, B44_TX_CTRL, val);
563
564                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
565                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
566                             !b44_readphy(bp, MII_LPA, &remote_adv))
567                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
568
569                         /* Link now up */
570                         netif_carrier_on(bp->dev);
571                         b44_link_report(bp);
572                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
573                         /* Link now down */
574                         netif_carrier_off(bp->dev);
575                         b44_link_report(bp);
576                 }
577
578                 if (bmsr & BMSR_RFAULT)
579                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
580                                bp->dev->name);
581                 if (bmsr & BMSR_JCD)
582                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
583                                bp->dev->name);
584         }
585 }
586
587 static void b44_timer(unsigned long __opaque)
588 {
589         struct b44 *bp = (struct b44 *) __opaque;
590
591         spin_lock_irq(&bp->lock);
592
593         b44_check_phy(bp);
594
595         b44_stats_update(bp);
596
597         spin_unlock_irq(&bp->lock);
598
599         bp->timer.expires = jiffies + HZ;
600         add_timer(&bp->timer);
601 }
602
603 static void b44_tx(struct b44 *bp)
604 {
605         u32 cur, cons;
606
607         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
608         cur /= sizeof(struct dma_desc);
609
610         /* XXX needs updating when NETIF_F_SG is supported */
611         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
612                 struct ring_info *rp = &bp->tx_buffers[cons];
613                 struct sk_buff *skb = rp->skb;
614
615                 BUG_ON(skb == NULL);
616
617                 pci_unmap_single(bp->pdev,
618                                  pci_unmap_addr(rp, mapping),
619                                  skb->len,
620                                  PCI_DMA_TODEVICE);
621                 rp->skb = NULL;
622                 dev_kfree_skb_irq(skb);
623         }
624
625         bp->tx_cons = cons;
626         if (netif_queue_stopped(bp->dev) &&
627             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
628                 netif_wake_queue(bp->dev);
629
630         bw32(bp, B44_GPTIMER, 0);
631 }
632
633 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
634  * before the DMA address you give it.  So we allocate 30 more bytes
635  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
636  * point the chip at 30 bytes past where the rx_header will go.
637  */
638 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
639 {
640         struct dma_desc *dp;
641         struct ring_info *src_map, *map;
642         struct rx_header *rh;
643         struct sk_buff *skb;
644         dma_addr_t mapping;
645         int dest_idx;
646         u32 ctrl;
647
648         src_map = NULL;
649         if (src_idx >= 0)
650                 src_map = &bp->rx_buffers[src_idx];
651         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
652         map = &bp->rx_buffers[dest_idx];
653         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
654         if (skb == NULL)
655                 return -ENOMEM;
656
657         mapping = pci_map_single(bp->pdev, skb->data,
658                                  RX_PKT_BUF_SZ,
659                                  PCI_DMA_FROMDEVICE);
660
661         /* Hardware bug work-around, the chip is unable to do PCI DMA
662            to/from anything above 1GB :-( */
663         if (dma_mapping_error(mapping) ||
664                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
665                 /* Sigh... */
666                 if (!dma_mapping_error(mapping))
667                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
668                 dev_kfree_skb_any(skb);
669                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
670                 if (skb == NULL)
671                         return -ENOMEM;
672                 mapping = pci_map_single(bp->pdev, skb->data,
673                                          RX_PKT_BUF_SZ,
674                                          PCI_DMA_FROMDEVICE);
675                 if (dma_mapping_error(mapping) ||
676                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
677                         if (!dma_mapping_error(mapping))
678                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
679                         dev_kfree_skb_any(skb);
680                         return -ENOMEM;
681                 }
682         }
683
684         skb->dev = bp->dev;
685         skb_reserve(skb, bp->rx_offset);
686
687         rh = (struct rx_header *)
688                 (skb->data - bp->rx_offset);
689         rh->len = 0;
690         rh->flags = 0;
691
692         map->skb = skb;
693         pci_unmap_addr_set(map, mapping, mapping);
694
695         if (src_map != NULL)
696                 src_map->skb = NULL;
697
698         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
699         if (dest_idx == (B44_RX_RING_SIZE - 1))
700                 ctrl |= DESC_CTRL_EOT;
701
702         dp = &bp->rx_ring[dest_idx];
703         dp->ctrl = cpu_to_le32(ctrl);
704         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
705
706         if (bp->flags & B44_FLAG_RX_RING_HACK)
707                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
708                                              dest_idx * sizeof(dp),
709                                              DMA_BIDIRECTIONAL);
710
711         return RX_PKT_BUF_SZ;
712 }
713
714 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
715 {
716         struct dma_desc *src_desc, *dest_desc;
717         struct ring_info *src_map, *dest_map;
718         struct rx_header *rh;
719         int dest_idx;
720         u32 ctrl;
721
722         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
723         dest_desc = &bp->rx_ring[dest_idx];
724         dest_map = &bp->rx_buffers[dest_idx];
725         src_desc = &bp->rx_ring[src_idx];
726         src_map = &bp->rx_buffers[src_idx];
727
728         dest_map->skb = src_map->skb;
729         rh = (struct rx_header *) src_map->skb->data;
730         rh->len = 0;
731         rh->flags = 0;
732         pci_unmap_addr_set(dest_map, mapping,
733                            pci_unmap_addr(src_map, mapping));
734
735         if (bp->flags & B44_FLAG_RX_RING_HACK)
736                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
737                                           src_idx * sizeof(src_desc),
738                                           DMA_BIDIRECTIONAL);
739
740         ctrl = src_desc->ctrl;
741         if (dest_idx == (B44_RX_RING_SIZE - 1))
742                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
743         else
744                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
745
746         dest_desc->ctrl = ctrl;
747         dest_desc->addr = src_desc->addr;
748
749         src_map->skb = NULL;
750
751         if (bp->flags & B44_FLAG_RX_RING_HACK)
752                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
753                                              dest_idx * sizeof(dest_desc),
754                                              DMA_BIDIRECTIONAL);
755
756         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
757                                        RX_PKT_BUF_SZ,
758                                        PCI_DMA_FROMDEVICE);
759 }
760
761 static int b44_rx(struct b44 *bp, int budget)
762 {
763         int received;
764         u32 cons, prod;
765
766         received = 0;
767         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
768         prod /= sizeof(struct dma_desc);
769         cons = bp->rx_cons;
770
771         while (cons != prod && budget > 0) {
772                 struct ring_info *rp = &bp->rx_buffers[cons];
773                 struct sk_buff *skb = rp->skb;
774                 dma_addr_t map = pci_unmap_addr(rp, mapping);
775                 struct rx_header *rh;
776                 u16 len;
777
778                 pci_dma_sync_single_for_cpu(bp->pdev, map,
779                                             RX_PKT_BUF_SZ,
780                                             PCI_DMA_FROMDEVICE);
781                 rh = (struct rx_header *) skb->data;
782                 len = cpu_to_le16(rh->len);
783                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
784                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
785                 drop_it:
786                         b44_recycle_rx(bp, cons, bp->rx_prod);
787                 drop_it_no_recycle:
788                         bp->stats.rx_dropped++;
789                         goto next_pkt;
790                 }
791
792                 if (len == 0) {
793                         int i = 0;
794
795                         do {
796                                 udelay(2);
797                                 barrier();
798                                 len = cpu_to_le16(rh->len);
799                         } while (len == 0 && i++ < 5);
800                         if (len == 0)
801                                 goto drop_it;
802                 }
803
804                 /* Omit CRC. */
805                 len -= 4;
806
807                 if (len > RX_COPY_THRESHOLD) {
808                         int skb_size;
809                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
810                         if (skb_size < 0)
811                                 goto drop_it;
812                         pci_unmap_single(bp->pdev, map,
813                                          skb_size, PCI_DMA_FROMDEVICE);
814                         /* Leave out rx_header */
815                         skb_put(skb, len+bp->rx_offset);
816                         skb_pull(skb,bp->rx_offset);
817                 } else {
818                         struct sk_buff *copy_skb;
819
820                         b44_recycle_rx(bp, cons, bp->rx_prod);
821                         copy_skb = dev_alloc_skb(len + 2);
822                         if (copy_skb == NULL)
823                                 goto drop_it_no_recycle;
824
825                         copy_skb->dev = bp->dev;
826                         skb_reserve(copy_skb, 2);
827                         skb_put(copy_skb, len);
828                         /* DMA sync done above, copy just the actual packet */
829                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
830
831                         skb = copy_skb;
832                 }
833                 skb->ip_summed = CHECKSUM_NONE;
834                 skb->protocol = eth_type_trans(skb, bp->dev);
835                 netif_receive_skb(skb);
836                 bp->dev->last_rx = jiffies;
837                 received++;
838                 budget--;
839         next_pkt:
840                 bp->rx_prod = (bp->rx_prod + 1) &
841                         (B44_RX_RING_SIZE - 1);
842                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
843         }
844
845         bp->rx_cons = cons;
846         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
847
848         return received;
849 }
850
851 static int b44_poll(struct net_device *netdev, int *budget)
852 {
853         struct b44 *bp = netdev_priv(netdev);
854         int done;
855
856         spin_lock_irq(&bp->lock);
857
858         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
859                 /* spin_lock(&bp->tx_lock); */
860                 b44_tx(bp);
861                 /* spin_unlock(&bp->tx_lock); */
862         }
863         spin_unlock_irq(&bp->lock);
864
865         done = 1;
866         if (bp->istat & ISTAT_RX) {
867                 int orig_budget = *budget;
868                 int work_done;
869
870                 if (orig_budget > netdev->quota)
871                         orig_budget = netdev->quota;
872
873                 work_done = b44_rx(bp, orig_budget);
874
875                 *budget -= work_done;
876                 netdev->quota -= work_done;
877
878                 if (work_done >= orig_budget)
879                         done = 0;
880         }
881
882         if (bp->istat & ISTAT_ERRORS) {
883                 spin_lock_irq(&bp->lock);
884                 b44_halt(bp);
885                 b44_init_rings(bp);
886                 b44_init_hw(bp, 1);
887                 netif_wake_queue(bp->dev);
888                 spin_unlock_irq(&bp->lock);
889                 done = 1;
890         }
891
892         if (done) {
893                 netif_rx_complete(netdev);
894                 b44_enable_ints(bp);
895         }
896
897         return (done ? 0 : 1);
898 }
899
900 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
901 {
902         struct net_device *dev = dev_id;
903         struct b44 *bp = netdev_priv(dev);
904         u32 istat, imask;
905         int handled = 0;
906
907         spin_lock(&bp->lock);
908
909         istat = br32(bp, B44_ISTAT);
910         imask = br32(bp, B44_IMASK);
911
912         /* ??? What the fuck is the purpose of the interrupt mask
913          * ??? register if we have to mask it out by hand anyways?
914          */
915         istat &= imask;
916         if (istat) {
917                 handled = 1;
918
919                 if (unlikely(!netif_running(dev))) {
920                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
921                         goto irq_ack;
922                 }
923
924                 if (netif_rx_schedule_prep(dev)) {
925                         /* NOTE: These writes are posted by the readback of
926                          *       the ISTAT register below.
927                          */
928                         bp->istat = istat;
929                         __b44_disable_ints(bp);
930                         __netif_rx_schedule(dev);
931                 } else {
932                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
933                                dev->name);
934                 }
935
936 irq_ack:
937                 bw32(bp, B44_ISTAT, istat);
938                 br32(bp, B44_ISTAT);
939         }
940         spin_unlock(&bp->lock);
941         return IRQ_RETVAL(handled);
942 }
943
944 static void b44_tx_timeout(struct net_device *dev)
945 {
946         struct b44 *bp = netdev_priv(dev);
947
948         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
949                dev->name);
950
951         spin_lock_irq(&bp->lock);
952
953         b44_halt(bp);
954         b44_init_rings(bp);
955         b44_init_hw(bp, 1);
956
957         spin_unlock_irq(&bp->lock);
958
959         b44_enable_ints(bp);
960
961         netif_wake_queue(dev);
962 }
963
964 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
965 {
966         struct b44 *bp = netdev_priv(dev);
967         struct sk_buff *bounce_skb;
968         int rc = NETDEV_TX_OK;
969         dma_addr_t mapping;
970         u32 len, entry, ctrl;
971
972         len = skb->len;
973         spin_lock_irq(&bp->lock);
974
975         /* This is a hard error, log it. */
976         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
977                 netif_stop_queue(dev);
978                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
979                        dev->name);
980                 goto err_out;
981         }
982
983         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
984         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
985                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
986                 if (!dma_mapping_error(mapping))
987                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
988
989                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
990                                              GFP_ATOMIC|GFP_DMA);
991                 if (!bounce_skb)
992                         goto err_out;
993
994                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
995                                          len, PCI_DMA_TODEVICE);
996                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
997                         if (!dma_mapping_error(mapping))
998                                 pci_unmap_single(bp->pdev, mapping,
999                                          len, PCI_DMA_TODEVICE);
1000                         dev_kfree_skb_any(bounce_skb);
1001                         goto err_out;
1002                 }
1003
1004                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1005                 dev_kfree_skb_any(skb);
1006                 skb = bounce_skb;
1007         }
1008
1009         entry = bp->tx_prod;
1010         bp->tx_buffers[entry].skb = skb;
1011         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1012
1013         ctrl  = (len & DESC_CTRL_LEN);
1014         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1015         if (entry == (B44_TX_RING_SIZE - 1))
1016                 ctrl |= DESC_CTRL_EOT;
1017
1018         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1019         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1020
1021         if (bp->flags & B44_FLAG_TX_RING_HACK)
1022                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1023                                              entry * sizeof(bp->tx_ring[0]),
1024                                              DMA_TO_DEVICE);
1025
1026         entry = NEXT_TX(entry);
1027
1028         bp->tx_prod = entry;
1029
1030         wmb();
1031
1032         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1033         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1034                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1035         if (bp->flags & B44_FLAG_REORDER_BUG)
1036                 br32(bp, B44_DMATX_PTR);
1037
1038         if (TX_BUFFS_AVAIL(bp) < 1)
1039                 netif_stop_queue(dev);
1040
1041         dev->trans_start = jiffies;
1042
1043 out_unlock:
1044         spin_unlock_irq(&bp->lock);
1045
1046         return rc;
1047
1048 err_out:
1049         rc = NETDEV_TX_BUSY;
1050         goto out_unlock;
1051 }
1052
1053 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1054 {
1055         struct b44 *bp = netdev_priv(dev);
1056
1057         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1058                 return -EINVAL;
1059
1060         if (!netif_running(dev)) {
1061                 /* We'll just catch it later when the
1062                  * device is up'd.
1063                  */
1064                 dev->mtu = new_mtu;
1065                 return 0;
1066         }
1067
1068         spin_lock_irq(&bp->lock);
1069         b44_halt(bp);
1070         dev->mtu = new_mtu;
1071         b44_init_rings(bp);
1072         b44_init_hw(bp, 1);
1073         spin_unlock_irq(&bp->lock);
1074
1075         b44_enable_ints(bp);
1076
1077         return 0;
1078 }
1079
1080 /* Free up pending packets in all rx/tx rings.
1081  *
1082  * The chip has been shut down and the driver detached from
1083  * the networking, so no interrupts or new tx packets will
1084  * end up in the driver.  bp->lock is not held and we are not
1085  * in an interrupt context and thus may sleep.
1086  */
1087 static void b44_free_rings(struct b44 *bp)
1088 {
1089         struct ring_info *rp;
1090         int i;
1091
1092         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1093                 rp = &bp->rx_buffers[i];
1094
1095                 if (rp->skb == NULL)
1096                         continue;
1097                 pci_unmap_single(bp->pdev,
1098                                  pci_unmap_addr(rp, mapping),
1099                                  RX_PKT_BUF_SZ,
1100                                  PCI_DMA_FROMDEVICE);
1101                 dev_kfree_skb_any(rp->skb);
1102                 rp->skb = NULL;
1103         }
1104
1105         /* XXX needs changes once NETIF_F_SG is set... */
1106         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1107                 rp = &bp->tx_buffers[i];
1108
1109                 if (rp->skb == NULL)
1110                         continue;
1111                 pci_unmap_single(bp->pdev,
1112                                  pci_unmap_addr(rp, mapping),
1113                                  rp->skb->len,
1114                                  PCI_DMA_TODEVICE);
1115                 dev_kfree_skb_any(rp->skb);
1116                 rp->skb = NULL;
1117         }
1118 }
1119
1120 /* Initialize tx/rx rings for packet processing.
1121  *
1122  * The chip has been shut down and the driver detached from
1123  * the networking, so no interrupts or new tx packets will
1124  * end up in the driver.
1125  */
1126 static void b44_init_rings(struct b44 *bp)
1127 {
1128         int i;
1129
1130         b44_free_rings(bp);
1131
1132         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1133         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1134
1135         if (bp->flags & B44_FLAG_RX_RING_HACK)
1136                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1137                                            DMA_TABLE_BYTES,
1138                                            PCI_DMA_BIDIRECTIONAL);
1139
1140         if (bp->flags & B44_FLAG_TX_RING_HACK)
1141                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1142                                            DMA_TABLE_BYTES,
1143                                            PCI_DMA_TODEVICE);
1144
1145         for (i = 0; i < bp->rx_pending; i++) {
1146                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1147                         break;
1148         }
1149 }
1150
1151 /*
1152  * Must not be invoked with interrupt sources disabled and
1153  * the hardware shutdown down.
1154  */
1155 static void b44_free_consistent(struct b44 *bp)
1156 {
1157         kfree(bp->rx_buffers);
1158         bp->rx_buffers = NULL;
1159         kfree(bp->tx_buffers);
1160         bp->tx_buffers = NULL;
1161         if (bp->rx_ring) {
1162                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1163                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1164                                          DMA_TABLE_BYTES,
1165                                          DMA_BIDIRECTIONAL);
1166                         kfree(bp->rx_ring);
1167                 } else
1168                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1169                                             bp->rx_ring, bp->rx_ring_dma);
1170                 bp->rx_ring = NULL;
1171                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1172         }
1173         if (bp->tx_ring) {
1174                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1175                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1176                                          DMA_TABLE_BYTES,
1177                                          DMA_TO_DEVICE);
1178                         kfree(bp->tx_ring);
1179                 } else
1180                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1181                                             bp->tx_ring, bp->tx_ring_dma);
1182                 bp->tx_ring = NULL;
1183                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1184         }
1185 }
1186
1187 /*
1188  * Must not be invoked with interrupt sources disabled and
1189  * the hardware shutdown down.  Can sleep.
1190  */
1191 static int b44_alloc_consistent(struct b44 *bp)
1192 {
1193         int size;
1194
1195         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1196         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1197         if (!bp->rx_buffers)
1198                 goto out_err;
1199
1200         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1201         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1202         if (!bp->tx_buffers)
1203                 goto out_err;
1204
1205         size = DMA_TABLE_BYTES;
1206         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1207         if (!bp->rx_ring) {
1208                 /* Allocation may have failed due to pci_alloc_consistent
1209                    insisting on use of GFP_DMA, which is more restrictive
1210                    than necessary...  */
1211                 struct dma_desc *rx_ring;
1212                 dma_addr_t rx_ring_dma;
1213
1214                 rx_ring = kzalloc(size, GFP_KERNEL);
1215                 if (!rx_ring)
1216                         goto out_err;
1217
1218                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1219                                              DMA_TABLE_BYTES,
1220                                              DMA_BIDIRECTIONAL);
1221
1222                 if (dma_mapping_error(rx_ring_dma) ||
1223                         rx_ring_dma + size > B44_DMA_MASK) {
1224                         kfree(rx_ring);
1225                         goto out_err;
1226                 }
1227
1228                 bp->rx_ring = rx_ring;
1229                 bp->rx_ring_dma = rx_ring_dma;
1230                 bp->flags |= B44_FLAG_RX_RING_HACK;
1231         }
1232
1233         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1234         if (!bp->tx_ring) {
1235                 /* Allocation may have failed due to pci_alloc_consistent
1236                    insisting on use of GFP_DMA, which is more restrictive
1237                    than necessary...  */
1238                 struct dma_desc *tx_ring;
1239                 dma_addr_t tx_ring_dma;
1240
1241                 tx_ring = kzalloc(size, GFP_KERNEL);
1242                 if (!tx_ring)
1243                         goto out_err;
1244
1245                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1246                                              DMA_TABLE_BYTES,
1247                                              DMA_TO_DEVICE);
1248
1249                 if (dma_mapping_error(tx_ring_dma) ||
1250                         tx_ring_dma + size > B44_DMA_MASK) {
1251                         kfree(tx_ring);
1252                         goto out_err;
1253                 }
1254
1255                 bp->tx_ring = tx_ring;
1256                 bp->tx_ring_dma = tx_ring_dma;
1257                 bp->flags |= B44_FLAG_TX_RING_HACK;
1258         }
1259
1260         return 0;
1261
1262 out_err:
1263         b44_free_consistent(bp);
1264         return -ENOMEM;
1265 }
1266
1267 /* bp->lock is held. */
1268 static void b44_clear_stats(struct b44 *bp)
1269 {
1270         unsigned long reg;
1271
1272         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1273         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1274                 br32(bp, reg);
1275         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1276                 br32(bp, reg);
1277 }
1278
1279 /* bp->lock is held. */
1280 static void b44_chip_reset(struct b44 *bp)
1281 {
1282         if (ssb_is_core_up(bp)) {
1283                 bw32(bp, B44_RCV_LAZY, 0);
1284                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1285                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1286                 bw32(bp, B44_DMATX_CTRL, 0);
1287                 bp->tx_prod = bp->tx_cons = 0;
1288                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1289                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1290                                      100, 0);
1291                 }
1292                 bw32(bp, B44_DMARX_CTRL, 0);
1293                 bp->rx_prod = bp->rx_cons = 0;
1294         } else {
1295                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1296                                    SBINTVEC_ENET0 :
1297                                    SBINTVEC_ENET1));
1298         }
1299
1300         ssb_core_reset(bp);
1301
1302         b44_clear_stats(bp);
1303
1304         /* Make PHY accessible. */
1305         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1306                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1307         br32(bp, B44_MDIO_CTRL);
1308
1309         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1310                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1311                 br32(bp, B44_ENET_CTRL);
1312                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1313         } else {
1314                 u32 val = br32(bp, B44_DEVCTRL);
1315
1316                 if (val & DEVCTRL_EPR) {
1317                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1318                         br32(bp, B44_DEVCTRL);
1319                         udelay(100);
1320                 }
1321                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1322         }
1323 }
1324
1325 /* bp->lock is held. */
1326 static void b44_halt(struct b44 *bp)
1327 {
1328         b44_disable_ints(bp);
1329         b44_chip_reset(bp);
1330 }
1331
1332 /* bp->lock is held. */
1333 static void __b44_set_mac_addr(struct b44 *bp)
1334 {
1335         bw32(bp, B44_CAM_CTRL, 0);
1336         if (!(bp->dev->flags & IFF_PROMISC)) {
1337                 u32 val;
1338
1339                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1340                 val = br32(bp, B44_CAM_CTRL);
1341                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1342         }
1343 }
1344
1345 static int b44_set_mac_addr(struct net_device *dev, void *p)
1346 {
1347         struct b44 *bp = netdev_priv(dev);
1348         struct sockaddr *addr = p;
1349
1350         if (netif_running(dev))
1351                 return -EBUSY;
1352
1353         if (!is_valid_ether_addr(addr->sa_data))
1354                 return -EINVAL;
1355
1356         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1357
1358         spin_lock_irq(&bp->lock);
1359         __b44_set_mac_addr(bp);
1360         spin_unlock_irq(&bp->lock);
1361
1362         return 0;
1363 }
1364
1365 /* Called at device open time to get the chip ready for
1366  * packet processing.  Invoked with bp->lock held.
1367  */
1368 static void __b44_set_rx_mode(struct net_device *);
1369 static void b44_init_hw(struct b44 *bp, int full_reset)
1370 {
1371         u32 val;
1372
1373         b44_chip_reset(bp);
1374         if (full_reset) {
1375                 b44_phy_reset(bp);
1376                 b44_setup_phy(bp);
1377         }
1378
1379         /* Enable CRC32, set proper LED modes and power on PHY */
1380         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1381         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1382
1383         /* This sets the MAC address too.  */
1384         __b44_set_rx_mode(bp->dev);
1385
1386         /* MTU + eth header + possible VLAN tag + struct rx_header */
1387         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1388         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1389
1390         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1391         if (full_reset) {
1392                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1393                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1394                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1395                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1396                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1397
1398                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1399                 bp->rx_prod = bp->rx_pending;
1400
1401                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1402         } else {
1403                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1405         }
1406
1407         val = br32(bp, B44_ENET_CTRL);
1408         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1409 }
1410
1411 static int b44_open(struct net_device *dev)
1412 {
1413         struct b44 *bp = netdev_priv(dev);
1414         int err;
1415
1416         err = b44_alloc_consistent(bp);
1417         if (err)
1418                 goto out;
1419
1420         b44_init_rings(bp);
1421         b44_init_hw(bp, 1);
1422
1423         b44_check_phy(bp);
1424
1425         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1426         if (unlikely(err < 0)) {
1427                 b44_chip_reset(bp);
1428                 b44_free_rings(bp);
1429                 b44_free_consistent(bp);
1430                 goto out;
1431         }
1432
1433         init_timer(&bp->timer);
1434         bp->timer.expires = jiffies + HZ;
1435         bp->timer.data = (unsigned long) bp;
1436         bp->timer.function = b44_timer;
1437         add_timer(&bp->timer);
1438
1439         b44_enable_ints(bp);
1440         netif_start_queue(dev);
1441 out:
1442         return err;
1443 }
1444
1445 #if 0
1446 /*static*/ void b44_dump_state(struct b44 *bp)
1447 {
1448         u32 val32, val32_2, val32_3, val32_4, val32_5;
1449         u16 val16;
1450
1451         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1452         printk("DEBUG: PCI status [%04x] \n", val16);
1453
1454 }
1455 #endif
1456
1457 #ifdef CONFIG_NET_POLL_CONTROLLER
1458 /*
1459  * Polling receive - used by netconsole and other diagnostic tools
1460  * to allow network i/o with interrupts disabled.
1461  */
1462 static void b44_poll_controller(struct net_device *dev)
1463 {
1464         disable_irq(dev->irq);
1465         b44_interrupt(dev->irq, dev, NULL);
1466         enable_irq(dev->irq);
1467 }
1468 #endif
1469
1470 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1471 {
1472         u32 i;
1473         u32 *pattern = (u32 *) pp;
1474
1475         for (i = 0; i < bytes; i += sizeof(u32)) {
1476                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1477                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1478         }
1479 }
1480
1481 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1482 {
1483         int magicsync = 6;
1484         int k, j, len = offset;
1485         int ethaddr_bytes = ETH_ALEN;
1486
1487         memset(ppattern + offset, 0xff, magicsync);
1488         for (j = 0; j < magicsync; j++)
1489                 set_bit(len++, (unsigned long *) pmask);
1490
1491         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1492                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1493                         ethaddr_bytes = ETH_ALEN;
1494                 else
1495                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1496                 if (ethaddr_bytes <=0)
1497                         break;
1498                 for (k = 0; k< ethaddr_bytes; k++) {
1499                         ppattern[offset + magicsync +
1500                                 (j * ETH_ALEN) + k] = macaddr[k];
1501                         len++;
1502                         set_bit(len, (unsigned long *) pmask);
1503                 }
1504         }
1505         return len - 1;
1506 }
1507
1508 /* Setup magic packet patterns in the b44 WOL
1509  * pattern matching filter.
1510  */
1511 static void b44_setup_pseudo_magicp(struct b44 *bp)
1512 {
1513
1514         u32 val;
1515         int plen0, plen1, plen2;
1516         u8 *pwol_pattern;
1517         u8 pwol_mask[B44_PMASK_SIZE];
1518
1519         pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1520         if (!pwol_pattern) {
1521                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1522                 return;
1523         }
1524
1525         /* Ipv4 magic packet pattern - pattern 0.*/
1526         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1527         memset(pwol_mask, 0, B44_PMASK_SIZE);
1528         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1529                                   B44_ETHIPV4UDP_HLEN);
1530
1531         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1532         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1533
1534         /* Raw ethernet II magic packet pattern - pattern 1 */
1535         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1536         memset(pwol_mask, 0, B44_PMASK_SIZE);
1537         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1538                                   ETH_HLEN);
1539
1540         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1541                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1542         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1543                        B44_PMASK_BASE + B44_PMASK_SIZE);
1544
1545         /* Ipv6 magic packet pattern - pattern 2 */
1546         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1547         memset(pwol_mask, 0, B44_PMASK_SIZE);
1548         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1549                                   B44_ETHIPV6UDP_HLEN);
1550
1551         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1552                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1553         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1554                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1555
1556         kfree(pwol_pattern);
1557
1558         /* set these pattern's lengths: one less than each real length */
1559         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1560         bw32(bp, B44_WKUP_LEN, val);
1561
1562         /* enable wakeup pattern matching */
1563         val = br32(bp, B44_DEVCTRL);
1564         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1565
1566 }
1567
1568 static void b44_setup_wol(struct b44 *bp)
1569 {
1570         u32 val;
1571         u16 pmval;
1572
1573         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1574
1575         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1576
1577                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1578
1579                 val = bp->dev->dev_addr[2] << 24 |
1580                         bp->dev->dev_addr[3] << 16 |
1581                         bp->dev->dev_addr[4] << 8 |
1582                         bp->dev->dev_addr[5];
1583                 bw32(bp, B44_ADDR_LO, val);
1584
1585                 val = bp->dev->dev_addr[0] << 8 |
1586                         bp->dev->dev_addr[1];
1587                 bw32(bp, B44_ADDR_HI, val);
1588
1589                 val = br32(bp, B44_DEVCTRL);
1590                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1591
1592         } else {
1593                 b44_setup_pseudo_magicp(bp);
1594         }
1595
1596         val = br32(bp, B44_SBTMSLOW);
1597         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1598
1599         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1600         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1601
1602 }
1603
1604 static int b44_close(struct net_device *dev)
1605 {
1606         struct b44 *bp = netdev_priv(dev);
1607
1608         netif_stop_queue(dev);
1609
1610         netif_poll_disable(dev);
1611
1612         del_timer_sync(&bp->timer);
1613
1614         spin_lock_irq(&bp->lock);
1615
1616 #if 0
1617         b44_dump_state(bp);
1618 #endif
1619         b44_halt(bp);
1620         b44_free_rings(bp);
1621         netif_carrier_off(dev);
1622
1623         spin_unlock_irq(&bp->lock);
1624
1625         free_irq(dev->irq, dev);
1626
1627         netif_poll_enable(dev);
1628
1629         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1630                 b44_init_hw(bp, 0);
1631                 b44_setup_wol(bp);
1632         }
1633
1634         b44_free_consistent(bp);
1635
1636         return 0;
1637 }
1638
1639 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1640 {
1641         struct b44 *bp = netdev_priv(dev);
1642         struct net_device_stats *nstat = &bp->stats;
1643         struct b44_hw_stats *hwstat = &bp->hw_stats;
1644
1645         /* Convert HW stats into netdevice stats. */
1646         nstat->rx_packets = hwstat->rx_pkts;
1647         nstat->tx_packets = hwstat->tx_pkts;
1648         nstat->rx_bytes   = hwstat->rx_octets;
1649         nstat->tx_bytes   = hwstat->tx_octets;
1650         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1651                              hwstat->tx_oversize_pkts +
1652                              hwstat->tx_underruns +
1653                              hwstat->tx_excessive_cols +
1654                              hwstat->tx_late_cols);
1655         nstat->multicast  = hwstat->tx_multicast_pkts;
1656         nstat->collisions = hwstat->tx_total_cols;
1657
1658         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1659                                    hwstat->rx_undersize);
1660         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1661         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1662         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1663         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1664                                    hwstat->rx_oversize_pkts +
1665                                    hwstat->rx_missed_pkts +
1666                                    hwstat->rx_crc_align_errs +
1667                                    hwstat->rx_undersize +
1668                                    hwstat->rx_crc_errs +
1669                                    hwstat->rx_align_errs +
1670                                    hwstat->rx_symbol_errs);
1671
1672         nstat->tx_aborted_errors = hwstat->tx_underruns;
1673 #if 0
1674         /* Carrier lost counter seems to be broken for some devices */
1675         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1676 #endif
1677
1678         return nstat;
1679 }
1680
1681 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1682 {
1683         struct dev_mc_list *mclist;
1684         int i, num_ents;
1685
1686         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1687         mclist = dev->mc_list;
1688         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1689                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1690         }
1691         return i+1;
1692 }
1693
1694 static void __b44_set_rx_mode(struct net_device *dev)
1695 {
1696         struct b44 *bp = netdev_priv(dev);
1697         u32 val;
1698
1699         val = br32(bp, B44_RXCONFIG);
1700         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1701         if (dev->flags & IFF_PROMISC) {
1702                 val |= RXCONFIG_PROMISC;
1703                 bw32(bp, B44_RXCONFIG, val);
1704         } else {
1705                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1706                 int i = 0;
1707
1708                 __b44_set_mac_addr(bp);
1709
1710                 if (dev->flags & IFF_ALLMULTI)
1711                         val |= RXCONFIG_ALLMULTI;
1712                 else
1713                         i = __b44_load_mcast(bp, dev);
1714
1715                 for (; i < 64; i++) {
1716                         __b44_cam_write(bp, zero, i);
1717                 }
1718                 bw32(bp, B44_RXCONFIG, val);
1719                 val = br32(bp, B44_CAM_CTRL);
1720                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1721         }
1722 }
1723
1724 static void b44_set_rx_mode(struct net_device *dev)
1725 {
1726         struct b44 *bp = netdev_priv(dev);
1727
1728         spin_lock_irq(&bp->lock);
1729         __b44_set_rx_mode(dev);
1730         spin_unlock_irq(&bp->lock);
1731 }
1732
1733 static u32 b44_get_msglevel(struct net_device *dev)
1734 {
1735         struct b44 *bp = netdev_priv(dev);
1736         return bp->msg_enable;
1737 }
1738
1739 static void b44_set_msglevel(struct net_device *dev, u32 value)
1740 {
1741         struct b44 *bp = netdev_priv(dev);
1742         bp->msg_enable = value;
1743 }
1744
1745 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1746 {
1747         struct b44 *bp = netdev_priv(dev);
1748         struct pci_dev *pci_dev = bp->pdev;
1749
1750         strcpy (info->driver, DRV_MODULE_NAME);
1751         strcpy (info->version, DRV_MODULE_VERSION);
1752         strcpy (info->bus_info, pci_name(pci_dev));
1753 }
1754
1755 static int b44_nway_reset(struct net_device *dev)
1756 {
1757         struct b44 *bp = netdev_priv(dev);
1758         u32 bmcr;
1759         int r;
1760
1761         spin_lock_irq(&bp->lock);
1762         b44_readphy(bp, MII_BMCR, &bmcr);
1763         b44_readphy(bp, MII_BMCR, &bmcr);
1764         r = -EINVAL;
1765         if (bmcr & BMCR_ANENABLE) {
1766                 b44_writephy(bp, MII_BMCR,
1767                              bmcr | BMCR_ANRESTART);
1768                 r = 0;
1769         }
1770         spin_unlock_irq(&bp->lock);
1771
1772         return r;
1773 }
1774
1775 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1776 {
1777         struct b44 *bp = netdev_priv(dev);
1778
1779         cmd->supported = (SUPPORTED_Autoneg);
1780         cmd->supported |= (SUPPORTED_100baseT_Half |
1781                           SUPPORTED_100baseT_Full |
1782                           SUPPORTED_10baseT_Half |
1783                           SUPPORTED_10baseT_Full |
1784                           SUPPORTED_MII);
1785
1786         cmd->advertising = 0;
1787         if (bp->flags & B44_FLAG_ADV_10HALF)
1788                 cmd->advertising |= ADVERTISED_10baseT_Half;
1789         if (bp->flags & B44_FLAG_ADV_10FULL)
1790                 cmd->advertising |= ADVERTISED_10baseT_Full;
1791         if (bp->flags & B44_FLAG_ADV_100HALF)
1792                 cmd->advertising |= ADVERTISED_100baseT_Half;
1793         if (bp->flags & B44_FLAG_ADV_100FULL)
1794                 cmd->advertising |= ADVERTISED_100baseT_Full;
1795         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1796         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1797                 SPEED_100 : SPEED_10;
1798         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1799                 DUPLEX_FULL : DUPLEX_HALF;
1800         cmd->port = 0;
1801         cmd->phy_address = bp->phy_addr;
1802         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1803                 XCVR_INTERNAL : XCVR_EXTERNAL;
1804         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1805                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1806         if (cmd->autoneg == AUTONEG_ENABLE)
1807                 cmd->advertising |= ADVERTISED_Autoneg;
1808         if (!netif_running(dev)){
1809                 cmd->speed = 0;
1810                 cmd->duplex = 0xff;
1811         }
1812         cmd->maxtxpkt = 0;
1813         cmd->maxrxpkt = 0;
1814         return 0;
1815 }
1816
1817 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1818 {
1819         struct b44 *bp = netdev_priv(dev);
1820
1821         /* We do not support gigabit. */
1822         if (cmd->autoneg == AUTONEG_ENABLE) {
1823                 if (cmd->advertising &
1824                     (ADVERTISED_1000baseT_Half |
1825                      ADVERTISED_1000baseT_Full))
1826                         return -EINVAL;
1827         } else if ((cmd->speed != SPEED_100 &&
1828                     cmd->speed != SPEED_10) ||
1829                    (cmd->duplex != DUPLEX_HALF &&
1830                     cmd->duplex != DUPLEX_FULL)) {
1831                         return -EINVAL;
1832         }
1833
1834         spin_lock_irq(&bp->lock);
1835
1836         if (cmd->autoneg == AUTONEG_ENABLE) {
1837                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1838                                B44_FLAG_100_BASE_T |
1839                                B44_FLAG_FULL_DUPLEX |
1840                                B44_FLAG_ADV_10HALF |
1841                                B44_FLAG_ADV_10FULL |
1842                                B44_FLAG_ADV_100HALF |
1843                                B44_FLAG_ADV_100FULL);
1844                 if (cmd->advertising == 0) {
1845                         bp->flags |= (B44_FLAG_ADV_10HALF |
1846                                       B44_FLAG_ADV_10FULL |
1847                                       B44_FLAG_ADV_100HALF |
1848                                       B44_FLAG_ADV_100FULL);
1849                 } else {
1850                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1851                                 bp->flags |= B44_FLAG_ADV_10HALF;
1852                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1853                                 bp->flags |= B44_FLAG_ADV_10FULL;
1854                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1855                                 bp->flags |= B44_FLAG_ADV_100HALF;
1856                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1857                                 bp->flags |= B44_FLAG_ADV_100FULL;
1858                 }
1859         } else {
1860                 bp->flags |= B44_FLAG_FORCE_LINK;
1861                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1862                 if (cmd->speed == SPEED_100)
1863                         bp->flags |= B44_FLAG_100_BASE_T;
1864                 if (cmd->duplex == DUPLEX_FULL)
1865                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1866         }
1867
1868         if (netif_running(dev))
1869                 b44_setup_phy(bp);
1870
1871         spin_unlock_irq(&bp->lock);
1872
1873         return 0;
1874 }
1875
1876 static void b44_get_ringparam(struct net_device *dev,
1877                               struct ethtool_ringparam *ering)
1878 {
1879         struct b44 *bp = netdev_priv(dev);
1880
1881         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1882         ering->rx_pending = bp->rx_pending;
1883
1884         /* XXX ethtool lacks a tx_max_pending, oops... */
1885 }
1886
1887 static int b44_set_ringparam(struct net_device *dev,
1888                              struct ethtool_ringparam *ering)
1889 {
1890         struct b44 *bp = netdev_priv(dev);
1891
1892         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1893             (ering->rx_mini_pending != 0) ||
1894             (ering->rx_jumbo_pending != 0) ||
1895             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1896                 return -EINVAL;
1897
1898         spin_lock_irq(&bp->lock);
1899
1900         bp->rx_pending = ering->rx_pending;
1901         bp->tx_pending = ering->tx_pending;
1902
1903         b44_halt(bp);
1904         b44_init_rings(bp);
1905         b44_init_hw(bp, 1);
1906         netif_wake_queue(bp->dev);
1907         spin_unlock_irq(&bp->lock);
1908
1909         b44_enable_ints(bp);
1910
1911         return 0;
1912 }
1913
1914 static void b44_get_pauseparam(struct net_device *dev,
1915                                 struct ethtool_pauseparam *epause)
1916 {
1917         struct b44 *bp = netdev_priv(dev);
1918
1919         epause->autoneg =
1920                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1921         epause->rx_pause =
1922                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1923         epause->tx_pause =
1924                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1925 }
1926
1927 static int b44_set_pauseparam(struct net_device *dev,
1928                                 struct ethtool_pauseparam *epause)
1929 {
1930         struct b44 *bp = netdev_priv(dev);
1931
1932         spin_lock_irq(&bp->lock);
1933         if (epause->autoneg)
1934                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1935         else
1936                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1937         if (epause->rx_pause)
1938                 bp->flags |= B44_FLAG_RX_PAUSE;
1939         else
1940                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1941         if (epause->tx_pause)
1942                 bp->flags |= B44_FLAG_TX_PAUSE;
1943         else
1944                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1945         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1946                 b44_halt(bp);
1947                 b44_init_rings(bp);
1948                 b44_init_hw(bp, 1);
1949         } else {
1950                 __b44_set_flow_ctrl(bp, bp->flags);
1951         }
1952         spin_unlock_irq(&bp->lock);
1953
1954         b44_enable_ints(bp);
1955
1956         return 0;
1957 }
1958
1959 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1960 {
1961         switch(stringset) {
1962         case ETH_SS_STATS:
1963                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1964                 break;
1965         }
1966 }
1967
1968 static int b44_get_stats_count(struct net_device *dev)
1969 {
1970         return ARRAY_SIZE(b44_gstrings);
1971 }
1972
1973 static void b44_get_ethtool_stats(struct net_device *dev,
1974                                   struct ethtool_stats *stats, u64 *data)
1975 {
1976         struct b44 *bp = netdev_priv(dev);
1977         u32 *val = &bp->hw_stats.tx_good_octets;
1978         u32 i;
1979
1980         spin_lock_irq(&bp->lock);
1981
1982         b44_stats_update(bp);
1983
1984         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1985                 *data++ = *val++;
1986
1987         spin_unlock_irq(&bp->lock);
1988 }
1989
1990 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1991 {
1992         struct b44 *bp = netdev_priv(dev);
1993
1994         wol->supported = WAKE_MAGIC;
1995         if (bp->flags & B44_FLAG_WOL_ENABLE)
1996                 wol->wolopts = WAKE_MAGIC;
1997         else
1998                 wol->wolopts = 0;
1999         memset(&wol->sopass, 0, sizeof(wol->sopass));
2000 }
2001
2002 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2003 {
2004         struct b44 *bp = netdev_priv(dev);
2005
2006         spin_lock_irq(&bp->lock);
2007         if (wol->wolopts & WAKE_MAGIC)
2008                 bp->flags |= B44_FLAG_WOL_ENABLE;
2009         else
2010                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2011         spin_unlock_irq(&bp->lock);
2012
2013         return 0;
2014 }
2015
2016 static struct ethtool_ops b44_ethtool_ops = {
2017         .get_drvinfo            = b44_get_drvinfo,
2018         .get_settings           = b44_get_settings,
2019         .set_settings           = b44_set_settings,
2020         .nway_reset             = b44_nway_reset,
2021         .get_link               = ethtool_op_get_link,
2022         .get_wol                = b44_get_wol,
2023         .set_wol                = b44_set_wol,
2024         .get_ringparam          = b44_get_ringparam,
2025         .set_ringparam          = b44_set_ringparam,
2026         .get_pauseparam         = b44_get_pauseparam,
2027         .set_pauseparam         = b44_set_pauseparam,
2028         .get_msglevel           = b44_get_msglevel,
2029         .set_msglevel           = b44_set_msglevel,
2030         .get_strings            = b44_get_strings,
2031         .get_stats_count        = b44_get_stats_count,
2032         .get_ethtool_stats      = b44_get_ethtool_stats,
2033         .get_perm_addr          = ethtool_op_get_perm_addr,
2034 };
2035
2036 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2037 {
2038         struct mii_ioctl_data *data = if_mii(ifr);
2039         struct b44 *bp = netdev_priv(dev);
2040         int err = -EINVAL;
2041
2042         if (!netif_running(dev))
2043                 goto out;
2044
2045         spin_lock_irq(&bp->lock);
2046         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2047         spin_unlock_irq(&bp->lock);
2048 out:
2049         return err;
2050 }
2051
2052 /* Read 128-bytes of EEPROM. */
2053 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2054 {
2055         long i;
2056         u16 *ptr = (u16 *) data;
2057
2058         for (i = 0; i < 128; i += 2)
2059                 ptr[i / 2] = readw(bp->regs + 4096 + i);
2060
2061         return 0;
2062 }
2063
2064 static int __devinit b44_get_invariants(struct b44 *bp)
2065 {
2066         u8 eeprom[128];
2067         int err;
2068
2069         err = b44_read_eeprom(bp, &eeprom[0]);
2070         if (err)
2071                 goto out;
2072
2073         bp->dev->dev_addr[0] = eeprom[79];
2074         bp->dev->dev_addr[1] = eeprom[78];
2075         bp->dev->dev_addr[2] = eeprom[81];
2076         bp->dev->dev_addr[3] = eeprom[80];
2077         bp->dev->dev_addr[4] = eeprom[83];
2078         bp->dev->dev_addr[5] = eeprom[82];
2079
2080         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2081                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2082                 return -EINVAL;
2083         }
2084
2085         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2086
2087         bp->phy_addr = eeprom[90] & 0x1f;
2088
2089         /* With this, plus the rx_header prepended to the data by the
2090          * hardware, we'll land the ethernet header on a 2-byte boundary.
2091          */
2092         bp->rx_offset = 30;
2093
2094         bp->imask = IMASK_DEF;
2095
2096         bp->core_unit = ssb_core_unit(bp);
2097         bp->dma_offset = SB_PCI_DMA;
2098
2099         /* XXX - really required?
2100            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2101          */
2102
2103         if (ssb_get_core_rev(bp) >= 7)
2104                 bp->flags |= B44_FLAG_B0_ANDLATER;
2105
2106 out:
2107         return err;
2108 }
2109
2110 static int __devinit b44_init_one(struct pci_dev *pdev,
2111                                   const struct pci_device_id *ent)
2112 {
2113         static int b44_version_printed = 0;
2114         unsigned long b44reg_base, b44reg_len;
2115         struct net_device *dev;
2116         struct b44 *bp;
2117         int err, i;
2118
2119         if (b44_version_printed++ == 0)
2120                 printk(KERN_INFO "%s", version);
2121
2122         err = pci_enable_device(pdev);
2123         if (err) {
2124                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2125                        "aborting.\n");
2126                 return err;
2127         }
2128
2129         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2130                 dev_err(&pdev->dev,
2131                         "Cannot find proper PCI device "
2132                        "base address, aborting.\n");
2133                 err = -ENODEV;
2134                 goto err_out_disable_pdev;
2135         }
2136
2137         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2138         if (err) {
2139                 dev_err(&pdev->dev,
2140                         "Cannot obtain PCI resources, aborting.\n");
2141                 goto err_out_disable_pdev;
2142         }
2143
2144         pci_set_master(pdev);
2145
2146         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2147         if (err) {
2148                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2149                 goto err_out_free_res;
2150         }
2151
2152         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2153         if (err) {
2154                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2155                 goto err_out_free_res;
2156         }
2157
2158         b44reg_base = pci_resource_start(pdev, 0);
2159         b44reg_len = pci_resource_len(pdev, 0);
2160
2161         dev = alloc_etherdev(sizeof(*bp));
2162         if (!dev) {
2163                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2164                 err = -ENOMEM;
2165                 goto err_out_free_res;
2166         }
2167
2168         SET_MODULE_OWNER(dev);
2169         SET_NETDEV_DEV(dev,&pdev->dev);
2170
2171         /* No interesting netdevice features in this card... */
2172         dev->features |= 0;
2173
2174         bp = netdev_priv(dev);
2175         bp->pdev = pdev;
2176         bp->dev = dev;
2177
2178         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2179
2180         spin_lock_init(&bp->lock);
2181
2182         bp->regs = ioremap(b44reg_base, b44reg_len);
2183         if (bp->regs == 0UL) {
2184                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2185                 err = -ENOMEM;
2186                 goto err_out_free_dev;
2187         }
2188
2189         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2190         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2191
2192         dev->open = b44_open;
2193         dev->stop = b44_close;
2194         dev->hard_start_xmit = b44_start_xmit;
2195         dev->get_stats = b44_get_stats;
2196         dev->set_multicast_list = b44_set_rx_mode;
2197         dev->set_mac_address = b44_set_mac_addr;
2198         dev->do_ioctl = b44_ioctl;
2199         dev->tx_timeout = b44_tx_timeout;
2200         dev->poll = b44_poll;
2201         dev->weight = 64;
2202         dev->watchdog_timeo = B44_TX_TIMEOUT;
2203 #ifdef CONFIG_NET_POLL_CONTROLLER
2204         dev->poll_controller = b44_poll_controller;
2205 #endif
2206         dev->change_mtu = b44_change_mtu;
2207         dev->irq = pdev->irq;
2208         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2209
2210         netif_carrier_off(dev);
2211
2212         err = b44_get_invariants(bp);
2213         if (err) {
2214                 dev_err(&pdev->dev,
2215                         "Problem fetching invariants of chip, aborting.\n");
2216                 goto err_out_iounmap;
2217         }
2218
2219         bp->mii_if.dev = dev;
2220         bp->mii_if.mdio_read = b44_mii_read;
2221         bp->mii_if.mdio_write = b44_mii_write;
2222         bp->mii_if.phy_id = bp->phy_addr;
2223         bp->mii_if.phy_id_mask = 0x1f;
2224         bp->mii_if.reg_num_mask = 0x1f;
2225
2226         /* By default, advertise all speed/duplex settings. */
2227         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2228                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2229
2230         /* By default, auto-negotiate PAUSE. */
2231         bp->flags |= B44_FLAG_PAUSE_AUTO;
2232
2233         err = register_netdev(dev);
2234         if (err) {
2235                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2236                 goto err_out_iounmap;
2237         }
2238
2239         pci_set_drvdata(pdev, dev);
2240
2241         pci_save_state(bp->pdev);
2242
2243         /* Chip reset provides power to the b44 MAC & PCI cores, which
2244          * is necessary for MAC register access.
2245          */
2246         b44_chip_reset(bp);
2247
2248         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2249         for (i = 0; i < 6; i++)
2250                 printk("%2.2x%c", dev->dev_addr[i],
2251                        i == 5 ? '\n' : ':');
2252
2253         return 0;
2254
2255 err_out_iounmap:
2256         iounmap(bp->regs);
2257
2258 err_out_free_dev:
2259         free_netdev(dev);
2260
2261 err_out_free_res:
2262         pci_release_regions(pdev);
2263
2264 err_out_disable_pdev:
2265         pci_disable_device(pdev);
2266         pci_set_drvdata(pdev, NULL);
2267         return err;
2268 }
2269
2270 static void __devexit b44_remove_one(struct pci_dev *pdev)
2271 {
2272         struct net_device *dev = pci_get_drvdata(pdev);
2273         struct b44 *bp = netdev_priv(dev);
2274
2275         unregister_netdev(dev);
2276         iounmap(bp->regs);
2277         free_netdev(dev);
2278         pci_release_regions(pdev);
2279         pci_disable_device(pdev);
2280         pci_set_drvdata(pdev, NULL);
2281 }
2282
2283 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2284 {
2285         struct net_device *dev = pci_get_drvdata(pdev);
2286         struct b44 *bp = netdev_priv(dev);
2287
2288         if (!netif_running(dev))
2289                  return 0;
2290
2291         del_timer_sync(&bp->timer);
2292
2293         spin_lock_irq(&bp->lock);
2294
2295         b44_halt(bp);
2296         netif_carrier_off(bp->dev);
2297         netif_device_detach(bp->dev);
2298         b44_free_rings(bp);
2299
2300         spin_unlock_irq(&bp->lock);
2301
2302         free_irq(dev->irq, dev);
2303         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2304                 b44_init_hw(bp, 0);
2305                 b44_setup_wol(bp);
2306         }
2307         pci_disable_device(pdev);
2308         return 0;
2309 }
2310
2311 static int b44_resume(struct pci_dev *pdev)
2312 {
2313         struct net_device *dev = pci_get_drvdata(pdev);
2314         struct b44 *bp = netdev_priv(dev);
2315
2316         pci_restore_state(pdev);
2317         pci_enable_device(pdev);
2318         pci_set_master(pdev);
2319
2320         if (!netif_running(dev))
2321                 return 0;
2322
2323         if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev))
2324                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2325
2326         spin_lock_irq(&bp->lock);
2327
2328         b44_init_rings(bp);
2329         b44_init_hw(bp, 1);
2330         netif_device_attach(bp->dev);
2331         spin_unlock_irq(&bp->lock);
2332
2333         bp->timer.expires = jiffies + HZ;
2334         add_timer(&bp->timer);
2335
2336         b44_enable_ints(bp);
2337         netif_wake_queue(dev);
2338         return 0;
2339 }
2340
2341 static struct pci_driver b44_driver = {
2342         .name           = DRV_MODULE_NAME,
2343         .id_table       = b44_pci_tbl,
2344         .probe          = b44_init_one,
2345         .remove         = __devexit_p(b44_remove_one),
2346         .suspend        = b44_suspend,
2347         .resume         = b44_resume,
2348 };
2349
2350 static int __init b44_init(void)
2351 {
2352         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2353
2354         /* Setup paramaters for syncing RX/TX DMA descriptors */
2355         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2356         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2357
2358         return pci_module_init(&b44_driver);
2359 }
2360
2361 static void __exit b44_cleanup(void)
2362 {
2363         pci_unregister_driver(&b44_driver);
2364 }
2365
2366 module_init(b44_init);
2367 module_exit(b44_cleanup);
2368