Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.00"
33 #define DRV_MODULE_RELDATE      "Apr 7, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 static char version[] __devinitdata =
79         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
80
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
85
86 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
89 MODULE_VERSION(DRV_MODULE_VERSION);
90
91 static struct pci_device_id b44_pci_tbl[] = {
92         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
93           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
94         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
95           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
96         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
97           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
98         { }     /* terminate list with empty entry */
99 };
100
101 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
102
103 static void b44_halt(struct b44 *);
104 static void b44_init_rings(struct b44 *);
105 static void b44_init_hw(struct b44 *);
106
107 static int dma_desc_align_mask;
108 static int dma_desc_sync_size;
109
110 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
111 #define _B44(x...)      # x,
112 B44_STAT_REG_DECLARE
113 #undef _B44
114 };
115
116 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
117                                                 dma_addr_t dma_base,
118                                                 unsigned long offset,
119                                                 enum dma_data_direction dir)
120 {
121         dma_sync_single_range_for_device(&pdev->dev, dma_base,
122                                          offset & dma_desc_align_mask,
123                                          dma_desc_sync_size, dir);
124 }
125
126 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
127                                              dma_addr_t dma_base,
128                                              unsigned long offset,
129                                              enum dma_data_direction dir)
130 {
131         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
132                                       offset & dma_desc_align_mask,
133                                       dma_desc_sync_size, dir);
134 }
135
136 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
137 {
138         return readl(bp->regs + reg);
139 }
140
141 static inline void bw32(const struct b44 *bp,
142                         unsigned long reg, unsigned long val)
143 {
144         writel(val, bp->regs + reg);
145 }
146
147 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
148                         u32 bit, unsigned long timeout, const int clear)
149 {
150         unsigned long i;
151
152         for (i = 0; i < timeout; i++) {
153                 u32 val = br32(bp, reg);
154
155                 if (clear && !(val & bit))
156                         break;
157                 if (!clear && (val & bit))
158                         break;
159                 udelay(10);
160         }
161         if (i == timeout) {
162                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
163                        "%lx to %s.\n",
164                        bp->dev->name,
165                        bit, reg,
166                        (clear ? "clear" : "set"));
167                 return -ENODEV;
168         }
169         return 0;
170 }
171
172 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
173  * buzz words used on this company's website :-)
174  *
175  * All of these routines must be invoked with bp->lock held and
176  * interrupts disabled.
177  */
178
179 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
180 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
181
182 static u32 ssb_get_core_rev(struct b44 *bp)
183 {
184         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
185 }
186
187 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
188 {
189         u32 bar_orig, pci_rev, val;
190
191         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
192         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
193         pci_rev = ssb_get_core_rev(bp);
194
195         val = br32(bp, B44_SBINTVEC);
196         val |= cores;
197         bw32(bp, B44_SBINTVEC, val);
198
199         val = br32(bp, SSB_PCI_TRANS_2);
200         val |= SSB_PCI_PREF | SSB_PCI_BURST;
201         bw32(bp, SSB_PCI_TRANS_2, val);
202
203         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
204
205         return pci_rev;
206 }
207
208 static void ssb_core_disable(struct b44 *bp)
209 {
210         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
211                 return;
212
213         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
214         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
215         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
216         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
217                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
218         br32(bp, B44_SBTMSLOW);
219         udelay(1);
220         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
221         br32(bp, B44_SBTMSLOW);
222         udelay(1);
223 }
224
225 static void ssb_core_reset(struct b44 *bp)
226 {
227         u32 val;
228
229         ssb_core_disable(bp);
230         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
231         br32(bp, B44_SBTMSLOW);
232         udelay(1);
233
234         /* Clear SERR if set, this is a hw bug workaround.  */
235         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
236                 bw32(bp, B44_SBTMSHIGH, 0);
237
238         val = br32(bp, B44_SBIMSTATE);
239         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
240                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
241
242         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
243         br32(bp, B44_SBTMSLOW);
244         udelay(1);
245
246         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
247         br32(bp, B44_SBTMSLOW);
248         udelay(1);
249 }
250
251 static int ssb_core_unit(struct b44 *bp)
252 {
253 #if 0
254         u32 val = br32(bp, B44_SBADMATCH0);
255         u32 base;
256
257         type = val & SBADMATCH0_TYPE_MASK;
258         switch (type) {
259         case 0:
260                 base = val & SBADMATCH0_BS0_MASK;
261                 break;
262
263         case 1:
264                 base = val & SBADMATCH0_BS1_MASK;
265                 break;
266
267         case 2:
268         default:
269                 base = val & SBADMATCH0_BS2_MASK;
270                 break;
271         };
272 #endif
273         return 0;
274 }
275
276 static int ssb_is_core_up(struct b44 *bp)
277 {
278         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
279                 == SBTMSLOW_CLOCK);
280 }
281
282 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
283 {
284         u32 val;
285
286         val  = ((u32) data[2]) << 24;
287         val |= ((u32) data[3]) << 16;
288         val |= ((u32) data[4]) <<  8;
289         val |= ((u32) data[5]) <<  0;
290         bw32(bp, B44_CAM_DATA_LO, val);
291         val = (CAM_DATA_HI_VALID |
292                (((u32) data[0]) << 8) |
293                (((u32) data[1]) << 0));
294         bw32(bp, B44_CAM_DATA_HI, val);
295         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
296                             (index << CAM_CTRL_INDEX_SHIFT)));
297         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
298 }
299
300 static inline void __b44_disable_ints(struct b44 *bp)
301 {
302         bw32(bp, B44_IMASK, 0);
303 }
304
305 static void b44_disable_ints(struct b44 *bp)
306 {
307         __b44_disable_ints(bp);
308
309         /* Flush posted writes. */
310         br32(bp, B44_IMASK);
311 }
312
313 static void b44_enable_ints(struct b44 *bp)
314 {
315         bw32(bp, B44_IMASK, bp->imask);
316 }
317
318 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
319 {
320         int err;
321
322         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
323         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
324                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
325                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
326                              (reg << MDIO_DATA_RA_SHIFT) |
327                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
328         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
329         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
330
331         return err;
332 }
333
334 static int b44_writephy(struct b44 *bp, int reg, u32 val)
335 {
336         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
337         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
338                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
339                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
340                              (reg << MDIO_DATA_RA_SHIFT) |
341                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
342                              (val & MDIO_DATA_DATA)));
343         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
344 }
345
346 /* miilib interface */
347 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
348  * due to code existing before miilib use was added to this driver.
349  * Someone should remove this artificial driver limitation in
350  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
351  */
352 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
353 {
354         u32 val;
355         struct b44 *bp = netdev_priv(dev);
356         int rc = b44_readphy(bp, location, &val);
357         if (rc)
358                 return 0xffffffff;
359         return val;
360 }
361
362 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
363                          int val)
364 {
365         struct b44 *bp = netdev_priv(dev);
366         b44_writephy(bp, location, val);
367 }
368
369 static int b44_phy_reset(struct b44 *bp)
370 {
371         u32 val;
372         int err;
373
374         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
375         if (err)
376                 return err;
377         udelay(100);
378         err = b44_readphy(bp, MII_BMCR, &val);
379         if (!err) {
380                 if (val & BMCR_RESET) {
381                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
382                                bp->dev->name);
383                         err = -ENODEV;
384                 }
385         }
386
387         return 0;
388 }
389
390 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
391 {
392         u32 val;
393
394         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
395         bp->flags |= pause_flags;
396
397         val = br32(bp, B44_RXCONFIG);
398         if (pause_flags & B44_FLAG_RX_PAUSE)
399                 val |= RXCONFIG_FLOW;
400         else
401                 val &= ~RXCONFIG_FLOW;
402         bw32(bp, B44_RXCONFIG, val);
403
404         val = br32(bp, B44_MAC_FLOW);
405         if (pause_flags & B44_FLAG_TX_PAUSE)
406                 val |= (MAC_FLOW_PAUSE_ENAB |
407                         (0xc0 & MAC_FLOW_RX_HI_WATER));
408         else
409                 val &= ~MAC_FLOW_PAUSE_ENAB;
410         bw32(bp, B44_MAC_FLOW, val);
411 }
412
413 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
414 {
415         u32 pause_enab = 0;
416
417         /* The driver supports only rx pause by default because
418            the b44 mac tx pause mechanism generates excessive
419            pause frames.
420            Use ethtool to turn on b44 tx pause if necessary.
421          */
422         if ((local & ADVERTISE_PAUSE_CAP) &&
423             (local & ADVERTISE_PAUSE_ASYM)){
424                 if ((remote & LPA_PAUSE_ASYM) &&
425                     !(remote & LPA_PAUSE_CAP))
426                         pause_enab |= B44_FLAG_RX_PAUSE;
427         }
428
429         __b44_set_flow_ctrl(bp, pause_enab);
430 }
431
432 static int b44_setup_phy(struct b44 *bp)
433 {
434         u32 val;
435         int err;
436
437         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
438                 goto out;
439         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
440                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
441                 goto out;
442         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
443                 goto out;
444         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
445                                 val | MII_TLEDCTRL_ENABLE)) != 0)
446                 goto out;
447
448         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
449                 u32 adv = ADVERTISE_CSMA;
450
451                 if (bp->flags & B44_FLAG_ADV_10HALF)
452                         adv |= ADVERTISE_10HALF;
453                 if (bp->flags & B44_FLAG_ADV_10FULL)
454                         adv |= ADVERTISE_10FULL;
455                 if (bp->flags & B44_FLAG_ADV_100HALF)
456                         adv |= ADVERTISE_100HALF;
457                 if (bp->flags & B44_FLAG_ADV_100FULL)
458                         adv |= ADVERTISE_100FULL;
459
460                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
461                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
462
463                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
464                         goto out;
465                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
466                                                        BMCR_ANRESTART))) != 0)
467                         goto out;
468         } else {
469                 u32 bmcr;
470
471                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
472                         goto out;
473                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
474                 if (bp->flags & B44_FLAG_100_BASE_T)
475                         bmcr |= BMCR_SPEED100;
476                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
477                         bmcr |= BMCR_FULLDPLX;
478                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
479                         goto out;
480
481                 /* Since we will not be negotiating there is no safe way
482                  * to determine if the link partner supports flow control
483                  * or not.  So just disable it completely in this case.
484                  */
485                 b44_set_flow_ctrl(bp, 0, 0);
486         }
487
488 out:
489         return err;
490 }
491
492 static void b44_stats_update(struct b44 *bp)
493 {
494         unsigned long reg;
495         u32 *val;
496
497         val = &bp->hw_stats.tx_good_octets;
498         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
499                 *val++ += br32(bp, reg);
500         }
501
502         /* Pad */
503         reg += 8*4UL;
504
505         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
506                 *val++ += br32(bp, reg);
507         }
508 }
509
510 static void b44_link_report(struct b44 *bp)
511 {
512         if (!netif_carrier_ok(bp->dev)) {
513                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
514         } else {
515                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
516                        bp->dev->name,
517                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
518                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
519
520                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
521                        "%s for RX.\n",
522                        bp->dev->name,
523                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
524                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
525         }
526 }
527
528 static void b44_check_phy(struct b44 *bp)
529 {
530         u32 bmsr, aux;
531
532         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
533             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534             (bmsr != 0xffff)) {
535                 if (aux & MII_AUXCTRL_SPEED)
536                         bp->flags |= B44_FLAG_100_BASE_T;
537                 else
538                         bp->flags &= ~B44_FLAG_100_BASE_T;
539                 if (aux & MII_AUXCTRL_DUPLEX)
540                         bp->flags |= B44_FLAG_FULL_DUPLEX;
541                 else
542                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543
544                 if (!netif_carrier_ok(bp->dev) &&
545                     (bmsr & BMSR_LSTATUS)) {
546                         u32 val = br32(bp, B44_TX_CTRL);
547                         u32 local_adv, remote_adv;
548
549                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
550                                 val |= TX_CTRL_DUPLEX;
551                         else
552                                 val &= ~TX_CTRL_DUPLEX;
553                         bw32(bp, B44_TX_CTRL, val);
554
555                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
556                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
557                             !b44_readphy(bp, MII_LPA, &remote_adv))
558                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
559
560                         /* Link now up */
561                         netif_carrier_on(bp->dev);
562                         b44_link_report(bp);
563                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564                         /* Link now down */
565                         netif_carrier_off(bp->dev);
566                         b44_link_report(bp);
567                 }
568
569                 if (bmsr & BMSR_RFAULT)
570                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
571                                bp->dev->name);
572                 if (bmsr & BMSR_JCD)
573                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
574                                bp->dev->name);
575         }
576 }
577
578 static void b44_timer(unsigned long __opaque)
579 {
580         struct b44 *bp = (struct b44 *) __opaque;
581
582         spin_lock_irq(&bp->lock);
583
584         b44_check_phy(bp);
585
586         b44_stats_update(bp);
587
588         spin_unlock_irq(&bp->lock);
589
590         bp->timer.expires = jiffies + HZ;
591         add_timer(&bp->timer);
592 }
593
594 static void b44_tx(struct b44 *bp)
595 {
596         u32 cur, cons;
597
598         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
599         cur /= sizeof(struct dma_desc);
600
601         /* XXX needs updating when NETIF_F_SG is supported */
602         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
603                 struct ring_info *rp = &bp->tx_buffers[cons];
604                 struct sk_buff *skb = rp->skb;
605
606                 BUG_ON(skb == NULL);
607
608                 pci_unmap_single(bp->pdev,
609                                  pci_unmap_addr(rp, mapping),
610                                  skb->len,
611                                  PCI_DMA_TODEVICE);
612                 rp->skb = NULL;
613                 dev_kfree_skb_irq(skb);
614         }
615
616         bp->tx_cons = cons;
617         if (netif_queue_stopped(bp->dev) &&
618             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
619                 netif_wake_queue(bp->dev);
620
621         bw32(bp, B44_GPTIMER, 0);
622 }
623
624 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
625  * before the DMA address you give it.  So we allocate 30 more bytes
626  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
627  * point the chip at 30 bytes past where the rx_header will go.
628  */
629 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
630 {
631         struct dma_desc *dp;
632         struct ring_info *src_map, *map;
633         struct rx_header *rh;
634         struct sk_buff *skb;
635         dma_addr_t mapping;
636         int dest_idx;
637         u32 ctrl;
638
639         src_map = NULL;
640         if (src_idx >= 0)
641                 src_map = &bp->rx_buffers[src_idx];
642         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
643         map = &bp->rx_buffers[dest_idx];
644         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
645         if (skb == NULL)
646                 return -ENOMEM;
647
648         mapping = pci_map_single(bp->pdev, skb->data,
649                                  RX_PKT_BUF_SZ,
650                                  PCI_DMA_FROMDEVICE);
651
652         /* Hardware bug work-around, the chip is unable to do PCI DMA
653            to/from anything above 1GB :-( */
654         if (dma_mapping_error(mapping) ||
655                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
656                 /* Sigh... */
657                 if (!dma_mapping_error(mapping))
658                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
659                 dev_kfree_skb_any(skb);
660                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
661                 if (skb == NULL)
662                         return -ENOMEM;
663                 mapping = pci_map_single(bp->pdev, skb->data,
664                                          RX_PKT_BUF_SZ,
665                                          PCI_DMA_FROMDEVICE);
666                 if (dma_mapping_error(mapping) ||
667                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
668                         if (!dma_mapping_error(mapping))
669                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
670                         dev_kfree_skb_any(skb);
671                         return -ENOMEM;
672                 }
673         }
674
675         skb->dev = bp->dev;
676         skb_reserve(skb, bp->rx_offset);
677
678         rh = (struct rx_header *)
679                 (skb->data - bp->rx_offset);
680         rh->len = 0;
681         rh->flags = 0;
682
683         map->skb = skb;
684         pci_unmap_addr_set(map, mapping, mapping);
685
686         if (src_map != NULL)
687                 src_map->skb = NULL;
688
689         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
690         if (dest_idx == (B44_RX_RING_SIZE - 1))
691                 ctrl |= DESC_CTRL_EOT;
692
693         dp = &bp->rx_ring[dest_idx];
694         dp->ctrl = cpu_to_le32(ctrl);
695         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
696
697         if (bp->flags & B44_FLAG_RX_RING_HACK)
698                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
699                                              dest_idx * sizeof(dp),
700                                              DMA_BIDIRECTIONAL);
701
702         return RX_PKT_BUF_SZ;
703 }
704
705 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
706 {
707         struct dma_desc *src_desc, *dest_desc;
708         struct ring_info *src_map, *dest_map;
709         struct rx_header *rh;
710         int dest_idx;
711         u32 ctrl;
712
713         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
714         dest_desc = &bp->rx_ring[dest_idx];
715         dest_map = &bp->rx_buffers[dest_idx];
716         src_desc = &bp->rx_ring[src_idx];
717         src_map = &bp->rx_buffers[src_idx];
718
719         dest_map->skb = src_map->skb;
720         rh = (struct rx_header *) src_map->skb->data;
721         rh->len = 0;
722         rh->flags = 0;
723         pci_unmap_addr_set(dest_map, mapping,
724                            pci_unmap_addr(src_map, mapping));
725
726         if (bp->flags & B44_FLAG_RX_RING_HACK)
727                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
728                                           src_idx * sizeof(src_desc),
729                                           DMA_BIDIRECTIONAL);
730
731         ctrl = src_desc->ctrl;
732         if (dest_idx == (B44_RX_RING_SIZE - 1))
733                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
734         else
735                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
736
737         dest_desc->ctrl = ctrl;
738         dest_desc->addr = src_desc->addr;
739
740         src_map->skb = NULL;
741
742         if (bp->flags & B44_FLAG_RX_RING_HACK)
743                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
744                                              dest_idx * sizeof(dest_desc),
745                                              DMA_BIDIRECTIONAL);
746
747         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
748                                        RX_PKT_BUF_SZ,
749                                        PCI_DMA_FROMDEVICE);
750 }
751
752 static int b44_rx(struct b44 *bp, int budget)
753 {
754         int received;
755         u32 cons, prod;
756
757         received = 0;
758         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
759         prod /= sizeof(struct dma_desc);
760         cons = bp->rx_cons;
761
762         while (cons != prod && budget > 0) {
763                 struct ring_info *rp = &bp->rx_buffers[cons];
764                 struct sk_buff *skb = rp->skb;
765                 dma_addr_t map = pci_unmap_addr(rp, mapping);
766                 struct rx_header *rh;
767                 u16 len;
768
769                 pci_dma_sync_single_for_cpu(bp->pdev, map,
770                                             RX_PKT_BUF_SZ,
771                                             PCI_DMA_FROMDEVICE);
772                 rh = (struct rx_header *) skb->data;
773                 len = cpu_to_le16(rh->len);
774                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
775                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
776                 drop_it:
777                         b44_recycle_rx(bp, cons, bp->rx_prod);
778                 drop_it_no_recycle:
779                         bp->stats.rx_dropped++;
780                         goto next_pkt;
781                 }
782
783                 if (len == 0) {
784                         int i = 0;
785
786                         do {
787                                 udelay(2);
788                                 barrier();
789                                 len = cpu_to_le16(rh->len);
790                         } while (len == 0 && i++ < 5);
791                         if (len == 0)
792                                 goto drop_it;
793                 }
794
795                 /* Omit CRC. */
796                 len -= 4;
797
798                 if (len > RX_COPY_THRESHOLD) {
799                         int skb_size;
800                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
801                         if (skb_size < 0)
802                                 goto drop_it;
803                         pci_unmap_single(bp->pdev, map,
804                                          skb_size, PCI_DMA_FROMDEVICE);
805                         /* Leave out rx_header */
806                         skb_put(skb, len+bp->rx_offset);
807                         skb_pull(skb,bp->rx_offset);
808                 } else {
809                         struct sk_buff *copy_skb;
810
811                         b44_recycle_rx(bp, cons, bp->rx_prod);
812                         copy_skb = dev_alloc_skb(len + 2);
813                         if (copy_skb == NULL)
814                                 goto drop_it_no_recycle;
815
816                         copy_skb->dev = bp->dev;
817                         skb_reserve(copy_skb, 2);
818                         skb_put(copy_skb, len);
819                         /* DMA sync done above, copy just the actual packet */
820                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
821
822                         skb = copy_skb;
823                 }
824                 skb->ip_summed = CHECKSUM_NONE;
825                 skb->protocol = eth_type_trans(skb, bp->dev);
826                 netif_receive_skb(skb);
827                 bp->dev->last_rx = jiffies;
828                 received++;
829                 budget--;
830         next_pkt:
831                 bp->rx_prod = (bp->rx_prod + 1) &
832                         (B44_RX_RING_SIZE - 1);
833                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
834         }
835
836         bp->rx_cons = cons;
837         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
838
839         return received;
840 }
841
842 static int b44_poll(struct net_device *netdev, int *budget)
843 {
844         struct b44 *bp = netdev_priv(netdev);
845         int done;
846
847         spin_lock_irq(&bp->lock);
848
849         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
850                 /* spin_lock(&bp->tx_lock); */
851                 b44_tx(bp);
852                 /* spin_unlock(&bp->tx_lock); */
853         }
854         spin_unlock_irq(&bp->lock);
855
856         done = 1;
857         if (bp->istat & ISTAT_RX) {
858                 int orig_budget = *budget;
859                 int work_done;
860
861                 if (orig_budget > netdev->quota)
862                         orig_budget = netdev->quota;
863
864                 work_done = b44_rx(bp, orig_budget);
865
866                 *budget -= work_done;
867                 netdev->quota -= work_done;
868
869                 if (work_done >= orig_budget)
870                         done = 0;
871         }
872
873         if (bp->istat & ISTAT_ERRORS) {
874                 spin_lock_irq(&bp->lock);
875                 b44_halt(bp);
876                 b44_init_rings(bp);
877                 b44_init_hw(bp);
878                 netif_wake_queue(bp->dev);
879                 spin_unlock_irq(&bp->lock);
880                 done = 1;
881         }
882
883         if (done) {
884                 netif_rx_complete(netdev);
885                 b44_enable_ints(bp);
886         }
887
888         return (done ? 0 : 1);
889 }
890
891 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
892 {
893         struct net_device *dev = dev_id;
894         struct b44 *bp = netdev_priv(dev);
895         u32 istat, imask;
896         int handled = 0;
897
898         spin_lock(&bp->lock);
899
900         istat = br32(bp, B44_ISTAT);
901         imask = br32(bp, B44_IMASK);
902
903         /* ??? What the fuck is the purpose of the interrupt mask
904          * ??? register if we have to mask it out by hand anyways?
905          */
906         istat &= imask;
907         if (istat) {
908                 handled = 1;
909
910                 if (unlikely(!netif_running(dev))) {
911                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
912                         goto irq_ack;
913                 }
914
915                 if (netif_rx_schedule_prep(dev)) {
916                         /* NOTE: These writes are posted by the readback of
917                          *       the ISTAT register below.
918                          */
919                         bp->istat = istat;
920                         __b44_disable_ints(bp);
921                         __netif_rx_schedule(dev);
922                 } else {
923                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
924                                dev->name);
925                 }
926
927 irq_ack:
928                 bw32(bp, B44_ISTAT, istat);
929                 br32(bp, B44_ISTAT);
930         }
931         spin_unlock(&bp->lock);
932         return IRQ_RETVAL(handled);
933 }
934
935 static void b44_tx_timeout(struct net_device *dev)
936 {
937         struct b44 *bp = netdev_priv(dev);
938
939         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
940                dev->name);
941
942         spin_lock_irq(&bp->lock);
943
944         b44_halt(bp);
945         b44_init_rings(bp);
946         b44_init_hw(bp);
947
948         spin_unlock_irq(&bp->lock);
949
950         b44_enable_ints(bp);
951
952         netif_wake_queue(dev);
953 }
954
955 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
956 {
957         struct b44 *bp = netdev_priv(dev);
958         struct sk_buff *bounce_skb;
959         int rc = NETDEV_TX_OK;
960         dma_addr_t mapping;
961         u32 len, entry, ctrl;
962
963         len = skb->len;
964         spin_lock_irq(&bp->lock);
965
966         /* This is a hard error, log it. */
967         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
968                 netif_stop_queue(dev);
969                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
970                        dev->name);
971                 goto err_out;
972         }
973
974         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
975         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
976                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
977                 if (!dma_mapping_error(mapping))
978                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
979
980                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
981                                              GFP_ATOMIC|GFP_DMA);
982                 if (!bounce_skb)
983                         goto err_out;
984
985                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
986                                          len, PCI_DMA_TODEVICE);
987                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
988                         if (!dma_mapping_error(mapping))
989                                 pci_unmap_single(bp->pdev, mapping,
990                                          len, PCI_DMA_TODEVICE);
991                         dev_kfree_skb_any(bounce_skb);
992                         goto err_out;
993                 }
994
995                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
996                 dev_kfree_skb_any(skb);
997                 skb = bounce_skb;
998         }
999
1000         entry = bp->tx_prod;
1001         bp->tx_buffers[entry].skb = skb;
1002         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1003
1004         ctrl  = (len & DESC_CTRL_LEN);
1005         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1006         if (entry == (B44_TX_RING_SIZE - 1))
1007                 ctrl |= DESC_CTRL_EOT;
1008
1009         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1010         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1011
1012         if (bp->flags & B44_FLAG_TX_RING_HACK)
1013                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1014                                              entry * sizeof(bp->tx_ring[0]),
1015                                              DMA_TO_DEVICE);
1016
1017         entry = NEXT_TX(entry);
1018
1019         bp->tx_prod = entry;
1020
1021         wmb();
1022
1023         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1024         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1025                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1026         if (bp->flags & B44_FLAG_REORDER_BUG)
1027                 br32(bp, B44_DMATX_PTR);
1028
1029         if (TX_BUFFS_AVAIL(bp) < 1)
1030                 netif_stop_queue(dev);
1031
1032         dev->trans_start = jiffies;
1033
1034 out_unlock:
1035         spin_unlock_irq(&bp->lock);
1036
1037         return rc;
1038
1039 err_out:
1040         rc = NETDEV_TX_BUSY;
1041         goto out_unlock;
1042 }
1043
1044 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1045 {
1046         struct b44 *bp = netdev_priv(dev);
1047
1048         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1049                 return -EINVAL;
1050
1051         if (!netif_running(dev)) {
1052                 /* We'll just catch it later when the
1053                  * device is up'd.
1054                  */
1055                 dev->mtu = new_mtu;
1056                 return 0;
1057         }
1058
1059         spin_lock_irq(&bp->lock);
1060         b44_halt(bp);
1061         dev->mtu = new_mtu;
1062         b44_init_rings(bp);
1063         b44_init_hw(bp);
1064         spin_unlock_irq(&bp->lock);
1065
1066         b44_enable_ints(bp);
1067
1068         return 0;
1069 }
1070
1071 /* Free up pending packets in all rx/tx rings.
1072  *
1073  * The chip has been shut down and the driver detached from
1074  * the networking, so no interrupts or new tx packets will
1075  * end up in the driver.  bp->lock is not held and we are not
1076  * in an interrupt context and thus may sleep.
1077  */
1078 static void b44_free_rings(struct b44 *bp)
1079 {
1080         struct ring_info *rp;
1081         int i;
1082
1083         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1084                 rp = &bp->rx_buffers[i];
1085
1086                 if (rp->skb == NULL)
1087                         continue;
1088                 pci_unmap_single(bp->pdev,
1089                                  pci_unmap_addr(rp, mapping),
1090                                  RX_PKT_BUF_SZ,
1091                                  PCI_DMA_FROMDEVICE);
1092                 dev_kfree_skb_any(rp->skb);
1093                 rp->skb = NULL;
1094         }
1095
1096         /* XXX needs changes once NETIF_F_SG is set... */
1097         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1098                 rp = &bp->tx_buffers[i];
1099
1100                 if (rp->skb == NULL)
1101                         continue;
1102                 pci_unmap_single(bp->pdev,
1103                                  pci_unmap_addr(rp, mapping),
1104                                  rp->skb->len,
1105                                  PCI_DMA_TODEVICE);
1106                 dev_kfree_skb_any(rp->skb);
1107                 rp->skb = NULL;
1108         }
1109 }
1110
1111 /* Initialize tx/rx rings for packet processing.
1112  *
1113  * The chip has been shut down and the driver detached from
1114  * the networking, so no interrupts or new tx packets will
1115  * end up in the driver.
1116  */
1117 static void b44_init_rings(struct b44 *bp)
1118 {
1119         int i;
1120
1121         b44_free_rings(bp);
1122
1123         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1124         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1125
1126         if (bp->flags & B44_FLAG_RX_RING_HACK)
1127                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1128                                            DMA_TABLE_BYTES,
1129                                            PCI_DMA_BIDIRECTIONAL);
1130
1131         if (bp->flags & B44_FLAG_TX_RING_HACK)
1132                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1133                                            DMA_TABLE_BYTES,
1134                                            PCI_DMA_TODEVICE);
1135
1136         for (i = 0; i < bp->rx_pending; i++) {
1137                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1138                         break;
1139         }
1140 }
1141
1142 /*
1143  * Must not be invoked with interrupt sources disabled and
1144  * the hardware shutdown down.
1145  */
1146 static void b44_free_consistent(struct b44 *bp)
1147 {
1148         kfree(bp->rx_buffers);
1149         bp->rx_buffers = NULL;
1150         kfree(bp->tx_buffers);
1151         bp->tx_buffers = NULL;
1152         if (bp->rx_ring) {
1153                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1154                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1155                                          DMA_TABLE_BYTES,
1156                                          DMA_BIDIRECTIONAL);
1157                         kfree(bp->rx_ring);
1158                 } else
1159                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1160                                             bp->rx_ring, bp->rx_ring_dma);
1161                 bp->rx_ring = NULL;
1162                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1163         }
1164         if (bp->tx_ring) {
1165                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1166                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1167                                          DMA_TABLE_BYTES,
1168                                          DMA_TO_DEVICE);
1169                         kfree(bp->tx_ring);
1170                 } else
1171                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1172                                             bp->tx_ring, bp->tx_ring_dma);
1173                 bp->tx_ring = NULL;
1174                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1175         }
1176 }
1177
1178 /*
1179  * Must not be invoked with interrupt sources disabled and
1180  * the hardware shutdown down.  Can sleep.
1181  */
1182 static int b44_alloc_consistent(struct b44 *bp)
1183 {
1184         int size;
1185
1186         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1187         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1188         if (!bp->rx_buffers)
1189                 goto out_err;
1190
1191         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1192         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1193         if (!bp->tx_buffers)
1194                 goto out_err;
1195
1196         size = DMA_TABLE_BYTES;
1197         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1198         if (!bp->rx_ring) {
1199                 /* Allocation may have failed due to pci_alloc_consistent
1200                    insisting on use of GFP_DMA, which is more restrictive
1201                    than necessary...  */
1202                 struct dma_desc *rx_ring;
1203                 dma_addr_t rx_ring_dma;
1204
1205                 rx_ring = kzalloc(size, GFP_KERNEL);
1206                 if (!rx_ring)
1207                         goto out_err;
1208
1209                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1210                                              DMA_TABLE_BYTES,
1211                                              DMA_BIDIRECTIONAL);
1212
1213                 if (dma_mapping_error(rx_ring_dma) ||
1214                         rx_ring_dma + size > B44_DMA_MASK) {
1215                         kfree(rx_ring);
1216                         goto out_err;
1217                 }
1218
1219                 bp->rx_ring = rx_ring;
1220                 bp->rx_ring_dma = rx_ring_dma;
1221                 bp->flags |= B44_FLAG_RX_RING_HACK;
1222         }
1223
1224         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1225         if (!bp->tx_ring) {
1226                 /* Allocation may have failed due to pci_alloc_consistent
1227                    insisting on use of GFP_DMA, which is more restrictive
1228                    than necessary...  */
1229                 struct dma_desc *tx_ring;
1230                 dma_addr_t tx_ring_dma;
1231
1232                 tx_ring = kzalloc(size, GFP_KERNEL);
1233                 if (!tx_ring)
1234                         goto out_err;
1235
1236                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1237                                              DMA_TABLE_BYTES,
1238                                              DMA_TO_DEVICE);
1239
1240                 if (dma_mapping_error(tx_ring_dma) ||
1241                         tx_ring_dma + size > B44_DMA_MASK) {
1242                         kfree(tx_ring);
1243                         goto out_err;
1244                 }
1245
1246                 bp->tx_ring = tx_ring;
1247                 bp->tx_ring_dma = tx_ring_dma;
1248                 bp->flags |= B44_FLAG_TX_RING_HACK;
1249         }
1250
1251         return 0;
1252
1253 out_err:
1254         b44_free_consistent(bp);
1255         return -ENOMEM;
1256 }
1257
1258 /* bp->lock is held. */
1259 static void b44_clear_stats(struct b44 *bp)
1260 {
1261         unsigned long reg;
1262
1263         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1264         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1265                 br32(bp, reg);
1266         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1267                 br32(bp, reg);
1268 }
1269
1270 /* bp->lock is held. */
1271 static void b44_chip_reset(struct b44 *bp)
1272 {
1273         if (ssb_is_core_up(bp)) {
1274                 bw32(bp, B44_RCV_LAZY, 0);
1275                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1277                 bw32(bp, B44_DMATX_CTRL, 0);
1278                 bp->tx_prod = bp->tx_cons = 0;
1279                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1281                                      100, 0);
1282                 }
1283                 bw32(bp, B44_DMARX_CTRL, 0);
1284                 bp->rx_prod = bp->rx_cons = 0;
1285         } else {
1286                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1287                                    SBINTVEC_ENET0 :
1288                                    SBINTVEC_ENET1));
1289         }
1290
1291         ssb_core_reset(bp);
1292
1293         b44_clear_stats(bp);
1294
1295         /* Make PHY accessible. */
1296         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1298         br32(bp, B44_MDIO_CTRL);
1299
1300         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1301                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1302                 br32(bp, B44_ENET_CTRL);
1303                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1304         } else {
1305                 u32 val = br32(bp, B44_DEVCTRL);
1306
1307                 if (val & DEVCTRL_EPR) {
1308                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1309                         br32(bp, B44_DEVCTRL);
1310                         udelay(100);
1311                 }
1312                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1313         }
1314 }
1315
1316 /* bp->lock is held. */
1317 static void b44_halt(struct b44 *bp)
1318 {
1319         b44_disable_ints(bp);
1320         b44_chip_reset(bp);
1321 }
1322
1323 /* bp->lock is held. */
1324 static void __b44_set_mac_addr(struct b44 *bp)
1325 {
1326         bw32(bp, B44_CAM_CTRL, 0);
1327         if (!(bp->dev->flags & IFF_PROMISC)) {
1328                 u32 val;
1329
1330                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1331                 val = br32(bp, B44_CAM_CTRL);
1332                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1333         }
1334 }
1335
1336 static int b44_set_mac_addr(struct net_device *dev, void *p)
1337 {
1338         struct b44 *bp = netdev_priv(dev);
1339         struct sockaddr *addr = p;
1340
1341         if (netif_running(dev))
1342                 return -EBUSY;
1343
1344         if (!is_valid_ether_addr(addr->sa_data))
1345                 return -EINVAL;
1346
1347         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1348
1349         spin_lock_irq(&bp->lock);
1350         __b44_set_mac_addr(bp);
1351         spin_unlock_irq(&bp->lock);
1352
1353         return 0;
1354 }
1355
1356 /* Called at device open time to get the chip ready for
1357  * packet processing.  Invoked with bp->lock held.
1358  */
1359 static void __b44_set_rx_mode(struct net_device *);
1360 static void b44_init_hw(struct b44 *bp)
1361 {
1362         u32 val;
1363
1364         b44_chip_reset(bp);
1365         b44_phy_reset(bp);
1366         b44_setup_phy(bp);
1367
1368         /* Enable CRC32, set proper LED modes and power on PHY */
1369         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1370         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1371
1372         /* This sets the MAC address too.  */
1373         __b44_set_rx_mode(bp->dev);
1374
1375         /* MTU + eth header + possible VLAN tag + struct rx_header */
1376         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1377         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1378
1379         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1380         bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1381         bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1382         bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1383                               (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1384         bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1385
1386         bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1387         bp->rx_prod = bp->rx_pending;
1388
1389         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1390
1391         val = br32(bp, B44_ENET_CTRL);
1392         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1393 }
1394
1395 static int b44_open(struct net_device *dev)
1396 {
1397         struct b44 *bp = netdev_priv(dev);
1398         int err;
1399
1400         err = b44_alloc_consistent(bp);
1401         if (err)
1402                 goto out;
1403
1404         b44_init_rings(bp);
1405         b44_init_hw(bp);
1406
1407         b44_check_phy(bp);
1408
1409         err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1410         if (unlikely(err < 0)) {
1411                 b44_chip_reset(bp);
1412                 b44_free_rings(bp);
1413                 b44_free_consistent(bp);
1414                 goto out;
1415         }
1416
1417         init_timer(&bp->timer);
1418         bp->timer.expires = jiffies + HZ;
1419         bp->timer.data = (unsigned long) bp;
1420         bp->timer.function = b44_timer;
1421         add_timer(&bp->timer);
1422
1423         b44_enable_ints(bp);
1424         netif_start_queue(dev);
1425 out:
1426         return err;
1427 }
1428
1429 #if 0
1430 /*static*/ void b44_dump_state(struct b44 *bp)
1431 {
1432         u32 val32, val32_2, val32_3, val32_4, val32_5;
1433         u16 val16;
1434
1435         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1436         printk("DEBUG: PCI status [%04x] \n", val16);
1437
1438 }
1439 #endif
1440
1441 #ifdef CONFIG_NET_POLL_CONTROLLER
1442 /*
1443  * Polling receive - used by netconsole and other diagnostic tools
1444  * to allow network i/o with interrupts disabled.
1445  */
1446 static void b44_poll_controller(struct net_device *dev)
1447 {
1448         disable_irq(dev->irq);
1449         b44_interrupt(dev->irq, dev, NULL);
1450         enable_irq(dev->irq);
1451 }
1452 #endif
1453
1454 static int b44_close(struct net_device *dev)
1455 {
1456         struct b44 *bp = netdev_priv(dev);
1457
1458         netif_stop_queue(dev);
1459
1460         netif_poll_disable(dev);
1461
1462         del_timer_sync(&bp->timer);
1463
1464         spin_lock_irq(&bp->lock);
1465
1466 #if 0
1467         b44_dump_state(bp);
1468 #endif
1469         b44_halt(bp);
1470         b44_free_rings(bp);
1471         netif_carrier_off(dev);
1472
1473         spin_unlock_irq(&bp->lock);
1474
1475         free_irq(dev->irq, dev);
1476
1477         netif_poll_enable(dev);
1478
1479         b44_free_consistent(bp);
1480
1481         return 0;
1482 }
1483
1484 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1485 {
1486         struct b44 *bp = netdev_priv(dev);
1487         struct net_device_stats *nstat = &bp->stats;
1488         struct b44_hw_stats *hwstat = &bp->hw_stats;
1489
1490         /* Convert HW stats into netdevice stats. */
1491         nstat->rx_packets = hwstat->rx_pkts;
1492         nstat->tx_packets = hwstat->tx_pkts;
1493         nstat->rx_bytes   = hwstat->rx_octets;
1494         nstat->tx_bytes   = hwstat->tx_octets;
1495         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1496                              hwstat->tx_oversize_pkts +
1497                              hwstat->tx_underruns +
1498                              hwstat->tx_excessive_cols +
1499                              hwstat->tx_late_cols);
1500         nstat->multicast  = hwstat->tx_multicast_pkts;
1501         nstat->collisions = hwstat->tx_total_cols;
1502
1503         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1504                                    hwstat->rx_undersize);
1505         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1506         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1507         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1508         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1509                                    hwstat->rx_oversize_pkts +
1510                                    hwstat->rx_missed_pkts +
1511                                    hwstat->rx_crc_align_errs +
1512                                    hwstat->rx_undersize +
1513                                    hwstat->rx_crc_errs +
1514                                    hwstat->rx_align_errs +
1515                                    hwstat->rx_symbol_errs);
1516
1517         nstat->tx_aborted_errors = hwstat->tx_underruns;
1518 #if 0
1519         /* Carrier lost counter seems to be broken for some devices */
1520         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1521 #endif
1522
1523         return nstat;
1524 }
1525
1526 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1527 {
1528         struct dev_mc_list *mclist;
1529         int i, num_ents;
1530
1531         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1532         mclist = dev->mc_list;
1533         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1534                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1535         }
1536         return i+1;
1537 }
1538
1539 static void __b44_set_rx_mode(struct net_device *dev)
1540 {
1541         struct b44 *bp = netdev_priv(dev);
1542         u32 val;
1543
1544         val = br32(bp, B44_RXCONFIG);
1545         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1546         if (dev->flags & IFF_PROMISC) {
1547                 val |= RXCONFIG_PROMISC;
1548                 bw32(bp, B44_RXCONFIG, val);
1549         } else {
1550                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1551                 int i = 0;
1552
1553                 __b44_set_mac_addr(bp);
1554
1555                 if (dev->flags & IFF_ALLMULTI)
1556                         val |= RXCONFIG_ALLMULTI;
1557                 else
1558                         i = __b44_load_mcast(bp, dev);
1559
1560                 for (; i < 64; i++) {
1561                         __b44_cam_write(bp, zero, i);
1562                 }
1563                 bw32(bp, B44_RXCONFIG, val);
1564                 val = br32(bp, B44_CAM_CTRL);
1565                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1566         }
1567 }
1568
1569 static void b44_set_rx_mode(struct net_device *dev)
1570 {
1571         struct b44 *bp = netdev_priv(dev);
1572
1573         spin_lock_irq(&bp->lock);
1574         __b44_set_rx_mode(dev);
1575         spin_unlock_irq(&bp->lock);
1576 }
1577
1578 static u32 b44_get_msglevel(struct net_device *dev)
1579 {
1580         struct b44 *bp = netdev_priv(dev);
1581         return bp->msg_enable;
1582 }
1583
1584 static void b44_set_msglevel(struct net_device *dev, u32 value)
1585 {
1586         struct b44 *bp = netdev_priv(dev);
1587         bp->msg_enable = value;
1588 }
1589
1590 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1591 {
1592         struct b44 *bp = netdev_priv(dev);
1593         struct pci_dev *pci_dev = bp->pdev;
1594
1595         strcpy (info->driver, DRV_MODULE_NAME);
1596         strcpy (info->version, DRV_MODULE_VERSION);
1597         strcpy (info->bus_info, pci_name(pci_dev));
1598 }
1599
1600 static int b44_nway_reset(struct net_device *dev)
1601 {
1602         struct b44 *bp = netdev_priv(dev);
1603         u32 bmcr;
1604         int r;
1605
1606         spin_lock_irq(&bp->lock);
1607         b44_readphy(bp, MII_BMCR, &bmcr);
1608         b44_readphy(bp, MII_BMCR, &bmcr);
1609         r = -EINVAL;
1610         if (bmcr & BMCR_ANENABLE) {
1611                 b44_writephy(bp, MII_BMCR,
1612                              bmcr | BMCR_ANRESTART);
1613                 r = 0;
1614         }
1615         spin_unlock_irq(&bp->lock);
1616
1617         return r;
1618 }
1619
1620 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1621 {
1622         struct b44 *bp = netdev_priv(dev);
1623
1624         if (!netif_running(dev))
1625                 return -EAGAIN;
1626         cmd->supported = (SUPPORTED_Autoneg);
1627         cmd->supported |= (SUPPORTED_100baseT_Half |
1628                           SUPPORTED_100baseT_Full |
1629                           SUPPORTED_10baseT_Half |
1630                           SUPPORTED_10baseT_Full |
1631                           SUPPORTED_MII);
1632
1633         cmd->advertising = 0;
1634         if (bp->flags & B44_FLAG_ADV_10HALF)
1635                 cmd->advertising |= ADVERTISED_10baseT_Half;
1636         if (bp->flags & B44_FLAG_ADV_10FULL)
1637                 cmd->advertising |= ADVERTISED_10baseT_Full;
1638         if (bp->flags & B44_FLAG_ADV_100HALF)
1639                 cmd->advertising |= ADVERTISED_100baseT_Half;
1640         if (bp->flags & B44_FLAG_ADV_100FULL)
1641                 cmd->advertising |= ADVERTISED_100baseT_Full;
1642         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1643         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1644                 SPEED_100 : SPEED_10;
1645         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1646                 DUPLEX_FULL : DUPLEX_HALF;
1647         cmd->port = 0;
1648         cmd->phy_address = bp->phy_addr;
1649         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1650                 XCVR_INTERNAL : XCVR_EXTERNAL;
1651         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1652                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1653         cmd->maxtxpkt = 0;
1654         cmd->maxrxpkt = 0;
1655         return 0;
1656 }
1657
1658 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1659 {
1660         struct b44 *bp = netdev_priv(dev);
1661
1662         if (!netif_running(dev))
1663                 return -EAGAIN;
1664
1665         /* We do not support gigabit. */
1666         if (cmd->autoneg == AUTONEG_ENABLE) {
1667                 if (cmd->advertising &
1668                     (ADVERTISED_1000baseT_Half |
1669                      ADVERTISED_1000baseT_Full))
1670                         return -EINVAL;
1671         } else if ((cmd->speed != SPEED_100 &&
1672                     cmd->speed != SPEED_10) ||
1673                    (cmd->duplex != DUPLEX_HALF &&
1674                     cmd->duplex != DUPLEX_FULL)) {
1675                         return -EINVAL;
1676         }
1677
1678         spin_lock_irq(&bp->lock);
1679
1680         if (cmd->autoneg == AUTONEG_ENABLE) {
1681                 bp->flags &= ~B44_FLAG_FORCE_LINK;
1682                 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1683                                B44_FLAG_ADV_10FULL |
1684                                B44_FLAG_ADV_100HALF |
1685                                B44_FLAG_ADV_100FULL);
1686                 if (cmd->advertising & ADVERTISE_10HALF)
1687                         bp->flags |= B44_FLAG_ADV_10HALF;
1688                 if (cmd->advertising & ADVERTISE_10FULL)
1689                         bp->flags |= B44_FLAG_ADV_10FULL;
1690                 if (cmd->advertising & ADVERTISE_100HALF)
1691                         bp->flags |= B44_FLAG_ADV_100HALF;
1692                 if (cmd->advertising & ADVERTISE_100FULL)
1693                         bp->flags |= B44_FLAG_ADV_100FULL;
1694         } else {
1695                 bp->flags |= B44_FLAG_FORCE_LINK;
1696                 if (cmd->speed == SPEED_100)
1697                         bp->flags |= B44_FLAG_100_BASE_T;
1698                 if (cmd->duplex == DUPLEX_FULL)
1699                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1700         }
1701
1702         b44_setup_phy(bp);
1703
1704         spin_unlock_irq(&bp->lock);
1705
1706         return 0;
1707 }
1708
1709 static void b44_get_ringparam(struct net_device *dev,
1710                               struct ethtool_ringparam *ering)
1711 {
1712         struct b44 *bp = netdev_priv(dev);
1713
1714         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1715         ering->rx_pending = bp->rx_pending;
1716
1717         /* XXX ethtool lacks a tx_max_pending, oops... */
1718 }
1719
1720 static int b44_set_ringparam(struct net_device *dev,
1721                              struct ethtool_ringparam *ering)
1722 {
1723         struct b44 *bp = netdev_priv(dev);
1724
1725         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1726             (ering->rx_mini_pending != 0) ||
1727             (ering->rx_jumbo_pending != 0) ||
1728             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1729                 return -EINVAL;
1730
1731         spin_lock_irq(&bp->lock);
1732
1733         bp->rx_pending = ering->rx_pending;
1734         bp->tx_pending = ering->tx_pending;
1735
1736         b44_halt(bp);
1737         b44_init_rings(bp);
1738         b44_init_hw(bp);
1739         netif_wake_queue(bp->dev);
1740         spin_unlock_irq(&bp->lock);
1741
1742         b44_enable_ints(bp);
1743
1744         return 0;
1745 }
1746
1747 static void b44_get_pauseparam(struct net_device *dev,
1748                                 struct ethtool_pauseparam *epause)
1749 {
1750         struct b44 *bp = netdev_priv(dev);
1751
1752         epause->autoneg =
1753                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1754         epause->rx_pause =
1755                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1756         epause->tx_pause =
1757                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1758 }
1759
1760 static int b44_set_pauseparam(struct net_device *dev,
1761                                 struct ethtool_pauseparam *epause)
1762 {
1763         struct b44 *bp = netdev_priv(dev);
1764
1765         spin_lock_irq(&bp->lock);
1766         if (epause->autoneg)
1767                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1768         else
1769                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1770         if (epause->rx_pause)
1771                 bp->flags |= B44_FLAG_RX_PAUSE;
1772         else
1773                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1774         if (epause->tx_pause)
1775                 bp->flags |= B44_FLAG_TX_PAUSE;
1776         else
1777                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1778         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1779                 b44_halt(bp);
1780                 b44_init_rings(bp);
1781                 b44_init_hw(bp);
1782         } else {
1783                 __b44_set_flow_ctrl(bp, bp->flags);
1784         }
1785         spin_unlock_irq(&bp->lock);
1786
1787         b44_enable_ints(bp);
1788
1789         return 0;
1790 }
1791
1792 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1793 {
1794         switch(stringset) {
1795         case ETH_SS_STATS:
1796                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1797                 break;
1798         }
1799 }
1800
1801 static int b44_get_stats_count(struct net_device *dev)
1802 {
1803         return ARRAY_SIZE(b44_gstrings);
1804 }
1805
1806 static void b44_get_ethtool_stats(struct net_device *dev,
1807                                   struct ethtool_stats *stats, u64 *data)
1808 {
1809         struct b44 *bp = netdev_priv(dev);
1810         u32 *val = &bp->hw_stats.tx_good_octets;
1811         u32 i;
1812
1813         spin_lock_irq(&bp->lock);
1814
1815         b44_stats_update(bp);
1816
1817         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1818                 *data++ = *val++;
1819
1820         spin_unlock_irq(&bp->lock);
1821 }
1822
1823 static struct ethtool_ops b44_ethtool_ops = {
1824         .get_drvinfo            = b44_get_drvinfo,
1825         .get_settings           = b44_get_settings,
1826         .set_settings           = b44_set_settings,
1827         .nway_reset             = b44_nway_reset,
1828         .get_link               = ethtool_op_get_link,
1829         .get_ringparam          = b44_get_ringparam,
1830         .set_ringparam          = b44_set_ringparam,
1831         .get_pauseparam         = b44_get_pauseparam,
1832         .set_pauseparam         = b44_set_pauseparam,
1833         .get_msglevel           = b44_get_msglevel,
1834         .set_msglevel           = b44_set_msglevel,
1835         .get_strings            = b44_get_strings,
1836         .get_stats_count        = b44_get_stats_count,
1837         .get_ethtool_stats      = b44_get_ethtool_stats,
1838         .get_perm_addr          = ethtool_op_get_perm_addr,
1839 };
1840
1841 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1842 {
1843         struct mii_ioctl_data *data = if_mii(ifr);
1844         struct b44 *bp = netdev_priv(dev);
1845         int err = -EINVAL;
1846
1847         if (!netif_running(dev))
1848                 goto out;
1849
1850         spin_lock_irq(&bp->lock);
1851         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1852         spin_unlock_irq(&bp->lock);
1853 out:
1854         return err;
1855 }
1856
1857 /* Read 128-bytes of EEPROM. */
1858 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1859 {
1860         long i;
1861         u16 *ptr = (u16 *) data;
1862
1863         for (i = 0; i < 128; i += 2)
1864                 ptr[i / 2] = readw(bp->regs + 4096 + i);
1865
1866         return 0;
1867 }
1868
1869 static int __devinit b44_get_invariants(struct b44 *bp)
1870 {
1871         u8 eeprom[128];
1872         int err;
1873
1874         err = b44_read_eeprom(bp, &eeprom[0]);
1875         if (err)
1876                 goto out;
1877
1878         bp->dev->dev_addr[0] = eeprom[79];
1879         bp->dev->dev_addr[1] = eeprom[78];
1880         bp->dev->dev_addr[2] = eeprom[81];
1881         bp->dev->dev_addr[3] = eeprom[80];
1882         bp->dev->dev_addr[4] = eeprom[83];
1883         bp->dev->dev_addr[5] = eeprom[82];
1884
1885         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1886                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1887                 return -EINVAL;
1888         }
1889
1890         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1891
1892         bp->phy_addr = eeprom[90] & 0x1f;
1893
1894         /* With this, plus the rx_header prepended to the data by the
1895          * hardware, we'll land the ethernet header on a 2-byte boundary.
1896          */
1897         bp->rx_offset = 30;
1898
1899         bp->imask = IMASK_DEF;
1900
1901         bp->core_unit = ssb_core_unit(bp);
1902         bp->dma_offset = SB_PCI_DMA;
1903
1904         /* XXX - really required?
1905            bp->flags |= B44_FLAG_BUGGY_TXPTR;
1906          */
1907 out:
1908         return err;
1909 }
1910
1911 static int __devinit b44_init_one(struct pci_dev *pdev,
1912                                   const struct pci_device_id *ent)
1913 {
1914         static int b44_version_printed = 0;
1915         unsigned long b44reg_base, b44reg_len;
1916         struct net_device *dev;
1917         struct b44 *bp;
1918         int err, i;
1919
1920         if (b44_version_printed++ == 0)
1921                 printk(KERN_INFO "%s", version);
1922
1923         err = pci_enable_device(pdev);
1924         if (err) {
1925                 printk(KERN_ERR PFX "Cannot enable PCI device, "
1926                        "aborting.\n");
1927                 return err;
1928         }
1929
1930         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1931                 printk(KERN_ERR PFX "Cannot find proper PCI device "
1932                        "base address, aborting.\n");
1933                 err = -ENODEV;
1934                 goto err_out_disable_pdev;
1935         }
1936
1937         err = pci_request_regions(pdev, DRV_MODULE_NAME);
1938         if (err) {
1939                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1940                        "aborting.\n");
1941                 goto err_out_disable_pdev;
1942         }
1943
1944         pci_set_master(pdev);
1945
1946         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
1947         if (err) {
1948                 printk(KERN_ERR PFX "No usable DMA configuration, "
1949                        "aborting.\n");
1950                 goto err_out_free_res;
1951         }
1952
1953         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1954         if (err) {
1955                 printk(KERN_ERR PFX "No usable DMA configuration, "
1956                        "aborting.\n");
1957                 goto err_out_free_res;
1958         }
1959
1960         b44reg_base = pci_resource_start(pdev, 0);
1961         b44reg_len = pci_resource_len(pdev, 0);
1962
1963         dev = alloc_etherdev(sizeof(*bp));
1964         if (!dev) {
1965                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1966                 err = -ENOMEM;
1967                 goto err_out_free_res;
1968         }
1969
1970         SET_MODULE_OWNER(dev);
1971         SET_NETDEV_DEV(dev,&pdev->dev);
1972
1973         /* No interesting netdevice features in this card... */
1974         dev->features |= 0;
1975
1976         bp = netdev_priv(dev);
1977         bp->pdev = pdev;
1978         bp->dev = dev;
1979
1980         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1981
1982         spin_lock_init(&bp->lock);
1983
1984         bp->regs = ioremap(b44reg_base, b44reg_len);
1985         if (bp->regs == 0UL) {
1986                 printk(KERN_ERR PFX "Cannot map device registers, "
1987                        "aborting.\n");
1988                 err = -ENOMEM;
1989                 goto err_out_free_dev;
1990         }
1991
1992         bp->rx_pending = B44_DEF_RX_RING_PENDING;
1993         bp->tx_pending = B44_DEF_TX_RING_PENDING;
1994
1995         dev->open = b44_open;
1996         dev->stop = b44_close;
1997         dev->hard_start_xmit = b44_start_xmit;
1998         dev->get_stats = b44_get_stats;
1999         dev->set_multicast_list = b44_set_rx_mode;
2000         dev->set_mac_address = b44_set_mac_addr;
2001         dev->do_ioctl = b44_ioctl;
2002         dev->tx_timeout = b44_tx_timeout;
2003         dev->poll = b44_poll;
2004         dev->weight = 64;
2005         dev->watchdog_timeo = B44_TX_TIMEOUT;
2006 #ifdef CONFIG_NET_POLL_CONTROLLER
2007         dev->poll_controller = b44_poll_controller;
2008 #endif
2009         dev->change_mtu = b44_change_mtu;
2010         dev->irq = pdev->irq;
2011         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2012
2013         netif_carrier_off(dev);
2014
2015         err = b44_get_invariants(bp);
2016         if (err) {
2017                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2018                        "aborting.\n");
2019                 goto err_out_iounmap;
2020         }
2021
2022         bp->mii_if.dev = dev;
2023         bp->mii_if.mdio_read = b44_mii_read;
2024         bp->mii_if.mdio_write = b44_mii_write;
2025         bp->mii_if.phy_id = bp->phy_addr;
2026         bp->mii_if.phy_id_mask = 0x1f;
2027         bp->mii_if.reg_num_mask = 0x1f;
2028
2029         /* By default, advertise all speed/duplex settings. */
2030         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2031                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2032
2033         /* By default, auto-negotiate PAUSE. */
2034         bp->flags |= B44_FLAG_PAUSE_AUTO;
2035
2036         err = register_netdev(dev);
2037         if (err) {
2038                 printk(KERN_ERR PFX "Cannot register net device, "
2039                        "aborting.\n");
2040                 goto err_out_iounmap;
2041         }
2042
2043         pci_set_drvdata(pdev, dev);
2044
2045         pci_save_state(bp->pdev);
2046
2047         /* Chip reset provides power to the b44 MAC & PCI cores, which
2048          * is necessary for MAC register access.
2049          */
2050         b44_chip_reset(bp);
2051
2052         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2053         for (i = 0; i < 6; i++)
2054                 printk("%2.2x%c", dev->dev_addr[i],
2055                        i == 5 ? '\n' : ':');
2056
2057         return 0;
2058
2059 err_out_iounmap:
2060         iounmap(bp->regs);
2061
2062 err_out_free_dev:
2063         free_netdev(dev);
2064
2065 err_out_free_res:
2066         pci_release_regions(pdev);
2067
2068 err_out_disable_pdev:
2069         pci_disable_device(pdev);
2070         pci_set_drvdata(pdev, NULL);
2071         return err;
2072 }
2073
2074 static void __devexit b44_remove_one(struct pci_dev *pdev)
2075 {
2076         struct net_device *dev = pci_get_drvdata(pdev);
2077         struct b44 *bp = netdev_priv(dev);
2078
2079         unregister_netdev(dev);
2080         iounmap(bp->regs);
2081         free_netdev(dev);
2082         pci_release_regions(pdev);
2083         pci_disable_device(pdev);
2084         pci_set_drvdata(pdev, NULL);
2085 }
2086
2087 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2088 {
2089         struct net_device *dev = pci_get_drvdata(pdev);
2090         struct b44 *bp = netdev_priv(dev);
2091
2092         if (!netif_running(dev))
2093                  return 0;
2094
2095         del_timer_sync(&bp->timer);
2096
2097         spin_lock_irq(&bp->lock);
2098
2099         b44_halt(bp);
2100         netif_carrier_off(bp->dev);
2101         netif_device_detach(bp->dev);
2102         b44_free_rings(bp);
2103
2104         spin_unlock_irq(&bp->lock);
2105
2106         free_irq(dev->irq, dev);
2107         pci_disable_device(pdev);
2108         return 0;
2109 }
2110
2111 static int b44_resume(struct pci_dev *pdev)
2112 {
2113         struct net_device *dev = pci_get_drvdata(pdev);
2114         struct b44 *bp = netdev_priv(dev);
2115
2116         pci_restore_state(pdev);
2117         pci_enable_device(pdev);
2118         pci_set_master(pdev);
2119
2120         if (!netif_running(dev))
2121                 return 0;
2122
2123         if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2124                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2125
2126         spin_lock_irq(&bp->lock);
2127
2128         b44_init_rings(bp);
2129         b44_init_hw(bp);
2130         netif_device_attach(bp->dev);
2131         spin_unlock_irq(&bp->lock);
2132
2133         bp->timer.expires = jiffies + HZ;
2134         add_timer(&bp->timer);
2135
2136         b44_enable_ints(bp);
2137         netif_wake_queue(dev);
2138         return 0;
2139 }
2140
2141 static struct pci_driver b44_driver = {
2142         .name           = DRV_MODULE_NAME,
2143         .id_table       = b44_pci_tbl,
2144         .probe          = b44_init_one,
2145         .remove         = __devexit_p(b44_remove_one),
2146         .suspend        = b44_suspend,
2147         .resume         = b44_resume,
2148 };
2149
2150 static int __init b44_init(void)
2151 {
2152         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2153
2154         /* Setup paramaters for syncing RX/TX DMA descriptors */
2155         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2156         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2157
2158         return pci_module_init(&b44_driver);
2159 }
2160
2161 static void __exit b44_cleanup(void)
2162 {
2163         pci_unregister_driver(&b44_driver);
2164 }
2165
2166 module_init(b44_init);
2167 module_exit(b44_cleanup);
2168