This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / drivers / net / wireless / bcm43xx / bcm43xx_dma.c
1 /*
2
3   Broadcom BCM43xx wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41
42
43 static inline int free_slots(struct bcm43xx_dmaring *ring)
44 {
45         return (ring->nr_slots - ring->used_slots);
46 }
47
48 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49 {
50         assert(slot >= -1 && slot <= ring->nr_slots - 1);
51         if (slot == ring->nr_slots - 1)
52                 return 0;
53         return slot + 1;
54 }
55
56 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57 {
58         assert(slot >= 0 && slot <= ring->nr_slots - 1);
59         if (slot == 0)
60                 return ring->nr_slots - 1;
61         return slot - 1;
62 }
63
64 /* Request a slot for usage. */
65 static inline
66 int request_slot(struct bcm43xx_dmaring *ring)
67 {
68         int slot;
69
70         assert(ring->tx);
71         assert(!ring->suspended);
72         assert(free_slots(ring) != 0);
73
74         slot = next_slot(ring, ring->current_slot);
75         ring->current_slot = slot;
76         ring->used_slots++;
77
78         /* Check the number of available slots and suspend TX,
79          * if we are running low on free slots.
80          */
81         if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82                 netif_stop_queue(ring->bcm->net_dev);
83                 ring->suspended = 1;
84         }
85 #ifdef CONFIG_BCM43XX_DEBUG
86         if (ring->used_slots > ring->max_used_slots)
87                 ring->max_used_slots = ring->used_slots;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
89
90         return slot;
91 }
92
93 /* Return a slot to the free slots. */
94 static inline
95 void return_slot(struct bcm43xx_dmaring *ring, int slot)
96 {
97         assert(ring->tx);
98
99         ring->used_slots--;
100
101         /* Check if TX is suspended and check if we have
102          * enough free slots to resume it again.
103          */
104         if (unlikely(ring->suspended)) {
105                 if (free_slots(ring) >= ring->resume_mark) {
106                         ring->suspended = 0;
107                         netif_wake_queue(ring->bcm->net_dev);
108                 }
109         }
110 }
111
112 static inline
113 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
114                           unsigned char *buf,
115                           size_t len,
116                           int tx)
117 {
118         dma_addr_t dmaaddr;
119
120         if (tx) {
121                 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
122                                          buf, len,
123                                          DMA_TO_DEVICE);
124         } else {
125                 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
126                                          buf, len,
127                                          DMA_FROM_DEVICE);
128         }
129
130         return dmaaddr;
131 }
132
133 static inline
134 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
135                       dma_addr_t addr,
136                       size_t len,
137                       int tx)
138 {
139         if (tx) {
140                 dma_unmap_single(&ring->bcm->pci_dev->dev,
141                                  addr, len,
142                                  DMA_TO_DEVICE);
143         } else {
144                 dma_unmap_single(&ring->bcm->pci_dev->dev,
145                                  addr, len,
146                                  DMA_FROM_DEVICE);
147         }
148 }
149
150 static inline
151 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
152                              dma_addr_t addr,
153                              size_t len)
154 {
155         assert(!ring->tx);
156
157         dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
158                                 addr, len, DMA_FROM_DEVICE);
159 }
160
161 static inline
162 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
163                                 dma_addr_t addr,
164                                 size_t len)
165 {
166         assert(!ring->tx);
167
168         dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
169                                    addr, len, DMA_FROM_DEVICE);
170 }
171
172 /* Unmap and free a descriptor buffer. */
173 static inline
174 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
175                             struct bcm43xx_dmadesc *desc,
176                             struct bcm43xx_dmadesc_meta *meta,
177                             int irq_context)
178 {
179         assert(meta->skb);
180         if (irq_context)
181                 dev_kfree_skb_irq(meta->skb);
182         else
183                 dev_kfree_skb(meta->skb);
184         meta->skb = NULL;
185 }
186
187 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
188 {
189         struct device *dev = &(ring->bcm->pci_dev->dev);
190
191         ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
192                                          &(ring->dmabase), GFP_KERNEL);
193         if (!ring->vbase) {
194                 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
195                 return -ENOMEM;
196         }
197         if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
198                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RINGMEMORY >1G "
199                                     "(0x%llx, len: %lu)\n",
200                                 (unsigned long long)ring->dmabase,
201                                 BCM43xx_DMA_RINGMEMSIZE);
202                 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
203                                   ring->vbase, ring->dmabase);
204                 return -ENOMEM;
205         }
206         assert(!(ring->dmabase & 0x000003FF));
207         memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
208
209         return 0;
210 }
211
212 static void free_ringmemory(struct bcm43xx_dmaring *ring)
213 {
214         struct device *dev = &(ring->bcm->pci_dev->dev);
215
216         dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
217                           ring->vbase, ring->dmabase);
218 }
219
220 /* Reset the RX DMA channel */
221 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
222                                    u16 mmio_base)
223 {
224         int i;
225         u32 value;
226
227         bcm43xx_write32(bcm,
228                         mmio_base + BCM43xx_DMA_RX_CONTROL,
229                         0x00000000);
230         for (i = 0; i < 1000; i++) {
231                 value = bcm43xx_read32(bcm,
232                                        mmio_base + BCM43xx_DMA_RX_STATUS);
233                 value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
234                 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
235                         i = -1;
236                         break;
237                 }
238                 udelay(10);
239         }
240         if (i != -1) {
241                 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
242                 return -ENODEV;
243         }
244
245         return 0;
246 }
247
248 /* Reset the RX DMA channel */
249 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
250                                    u16 mmio_base)
251 {
252         int i;
253         u32 value;
254
255         for (i = 0; i < 1000; i++) {
256                 value = bcm43xx_read32(bcm,
257                                        mmio_base + BCM43xx_DMA_TX_STATUS);
258                 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
259                 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
260                     value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
261                     value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
262                         break;
263                 udelay(10);
264         }
265         bcm43xx_write32(bcm,
266                         mmio_base + BCM43xx_DMA_TX_CONTROL,
267                         0x00000000);
268         for (i = 0; i < 1000; i++) {
269                 value = bcm43xx_read32(bcm,
270                                        mmio_base + BCM43xx_DMA_TX_STATUS);
271                 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
272                 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
273                         i = -1;
274                         break;
275                 }
276                 udelay(10);
277         }
278         if (i != -1) {
279                 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
280                 return -ENODEV;
281         }
282         /* ensure the reset is completed. */
283         udelay(300);
284
285         return 0;
286 }
287
288 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
289                                struct bcm43xx_dmadesc *desc,
290                                struct bcm43xx_dmadesc_meta *meta,
291                                gfp_t gfp_flags)
292 {
293         struct bcm43xx_rxhdr *rxhdr;
294         dma_addr_t dmaaddr;
295         u32 desc_addr;
296         u32 desc_ctl;
297         const int slot = (int)(desc - ring->vbase);
298         struct sk_buff *skb;
299
300         assert(slot >= 0 && slot < ring->nr_slots);
301         assert(!ring->tx);
302
303         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
304         if (unlikely(!skb))
305                 return -ENOMEM;
306         dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
307         if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
308                 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
309                 dev_kfree_skb_any(skb);
310                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RX SKB >1G "
311                                     "(0x%llx, len: %u)\n",
312                         (unsigned long long)dmaaddr, ring->rx_buffersize);
313                 return -ENOMEM;
314         }
315         meta->skb = skb;
316         meta->dmaaddr = dmaaddr;
317         skb->dev = ring->bcm->net_dev;
318         desc_addr = (u32)(dmaaddr + ring->memoffset);
319         desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
320                     (u32)(ring->rx_buffersize - ring->frameoffset));
321         if (slot == ring->nr_slots - 1)
322                 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
323         set_desc_addr(desc, desc_addr);
324         set_desc_ctl(desc, desc_ctl);
325
326         rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
327         rxhdr->frame_length = 0;
328         rxhdr->flags1 = 0;
329
330         return 0;
331 }
332
333 /* Allocate the initial descbuffers.
334  * This is used for an RX ring only.
335  */
336 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
337 {
338         int i, err = -ENOMEM;
339         struct bcm43xx_dmadesc *desc;
340         struct bcm43xx_dmadesc_meta *meta;
341
342         for (i = 0; i < ring->nr_slots; i++) {
343                 desc = ring->vbase + i;
344                 meta = ring->meta + i;
345
346                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
347                 if (err)
348                         goto err_unwind;
349         }
350         ring->used_slots = ring->nr_slots;
351         err = 0;
352 out:
353         return err;
354
355 err_unwind:
356         for (i--; i >= 0; i--) {
357                 desc = ring->vbase + i;
358                 meta = ring->meta + i;
359
360                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
361                 dev_kfree_skb(meta->skb);
362         }
363         goto out;
364 }
365
366 /* Do initial setup of the DMA controller.
367  * Reset the controller, write the ring busaddress
368  * and switch the "enable" bit on.
369  */
370 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
371 {
372         int err = 0;
373         u32 value;
374
375         if (ring->tx) {
376                 /* Set Transmit Control register to "transmit enable" */
377                 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
378                                   BCM43xx_DMA_TXCTRL_ENABLE);
379                 /* Set Transmit Descriptor ring address. */
380                 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING,
381                                   ring->dmabase + ring->memoffset);
382         } else {
383                 err = alloc_initial_descbuffers(ring);
384                 if (err)
385                         goto out;
386                 /* Set Receive Control "receive enable" and frame offset */
387                 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
388                 value |= BCM43xx_DMA_RXCTRL_ENABLE;
389                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value);
390                 /* Set Receive Descriptor ring address. */
391                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING,
392                                   ring->dmabase + ring->memoffset);
393                 /* Init the descriptor pointer. */
394                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200);
395         }
396
397 out:
398         return err;
399 }
400
401 /* Shutdown the DMA controller. */
402 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
403 {
404         if (ring->tx) {
405                 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
406                 /* Zero out Transmit Descriptor ring address. */
407                 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0);
408         } else {
409                 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
410                 /* Zero out Receive Descriptor ring address. */
411                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0);
412         }
413 }
414
415 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
416 {
417         struct bcm43xx_dmadesc *desc;
418         struct bcm43xx_dmadesc_meta *meta;
419         int i;
420
421         if (!ring->used_slots)
422                 return;
423         for (i = 0; i < ring->nr_slots; i++) {
424                 desc = ring->vbase + i;
425                 meta = ring->meta + i;
426
427                 if (!meta->skb) {
428                         assert(ring->tx);
429                         continue;
430                 }
431                 if (ring->tx) {
432                         unmap_descbuffer(ring, meta->dmaaddr,
433                                          meta->skb->len, 1);
434                 } else {
435                         unmap_descbuffer(ring, meta->dmaaddr,
436                                          ring->rx_buffersize, 0);
437                 }
438                 free_descriptor_buffer(ring, desc, meta, 0);
439         }
440 }
441
442 /* Main initialization function. */
443 static
444 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
445                                                u16 dma_controller_base,
446                                                int nr_descriptor_slots,
447                                                int tx)
448 {
449         struct bcm43xx_dmaring *ring;
450         int err;
451
452         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
453         if (!ring)
454                 goto out;
455
456         ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
457                              GFP_KERNEL);
458         if (!ring->meta)
459                 goto err_kfree_ring;
460
461         ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
462 #ifdef CONFIG_BCM947XX
463         if (bcm->pci_dev->bus->number == 0)
464                 ring->memoffset = 0;
465 #endif
466
467         ring->bcm = bcm;
468         ring->nr_slots = nr_descriptor_slots;
469         ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
470         ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
471         assert(ring->suspend_mark < ring->resume_mark);
472         ring->mmio_base = dma_controller_base;
473         if (tx) {
474                 ring->tx = 1;
475                 ring->current_slot = -1;
476         } else {
477                 switch (dma_controller_base) {
478                 case BCM43xx_MMIO_DMA1_BASE:
479                         ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
480                         ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
481                         break;
482                 case BCM43xx_MMIO_DMA4_BASE:
483                         ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
484                         ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
485                         break;
486                 default:
487                         assert(0);
488                 }
489         }
490
491         err = alloc_ringmemory(ring);
492         if (err)
493                 goto err_kfree_meta;
494         err = dmacontroller_setup(ring);
495         if (err)
496                 goto err_free_ringmemory;
497
498 out:
499         return ring;
500
501 err_free_ringmemory:
502         free_ringmemory(ring);
503 err_kfree_meta:
504         kfree(ring->meta);
505 err_kfree_ring:
506         kfree(ring);
507         ring = NULL;
508         goto out;
509 }
510
511 /* Main cleanup function. */
512 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
513 {
514         if (!ring)
515                 return;
516
517         dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
518                 ring->mmio_base,
519                 (ring->tx) ? "TX" : "RX",
520                 ring->max_used_slots, ring->nr_slots);
521         /* Device IRQs are disabled prior entering this function,
522          * so no need to take care of concurrency with rx handler stuff.
523          */
524         dmacontroller_cleanup(ring);
525         free_all_descbuffers(ring);
526         free_ringmemory(ring);
527
528         kfree(ring->meta);
529         kfree(ring);
530 }
531
532 void bcm43xx_dma_free(struct bcm43xx_private *bcm)
533 {
534         struct bcm43xx_dma *dma;
535
536         if (bcm43xx_using_pio(bcm))
537                 return;
538         dma = bcm43xx_current_dma(bcm);
539
540         bcm43xx_destroy_dmaring(dma->rx_ring1);
541         dma->rx_ring1 = NULL;
542         bcm43xx_destroy_dmaring(dma->rx_ring0);
543         dma->rx_ring0 = NULL;
544         bcm43xx_destroy_dmaring(dma->tx_ring3);
545         dma->tx_ring3 = NULL;
546         bcm43xx_destroy_dmaring(dma->tx_ring2);
547         dma->tx_ring2 = NULL;
548         bcm43xx_destroy_dmaring(dma->tx_ring1);
549         dma->tx_ring1 = NULL;
550         bcm43xx_destroy_dmaring(dma->tx_ring0);
551         dma->tx_ring0 = NULL;
552 }
553
554 int bcm43xx_dma_init(struct bcm43xx_private *bcm)
555 {
556         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
557         struct bcm43xx_dmaring *ring;
558         int err = -ENOMEM;
559
560         /* setup TX DMA channels. */
561         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
562                                      BCM43xx_TXRING_SLOTS, 1);
563         if (!ring)
564                 goto out;
565         dma->tx_ring0 = ring;
566
567         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
568                                      BCM43xx_TXRING_SLOTS, 1);
569         if (!ring)
570                 goto err_destroy_tx0;
571         dma->tx_ring1 = ring;
572
573         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
574                                      BCM43xx_TXRING_SLOTS, 1);
575         if (!ring)
576                 goto err_destroy_tx1;
577         dma->tx_ring2 = ring;
578
579         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
580                                      BCM43xx_TXRING_SLOTS, 1);
581         if (!ring)
582                 goto err_destroy_tx2;
583         dma->tx_ring3 = ring;
584
585         /* setup RX DMA channels. */
586         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
587                                      BCM43xx_RXRING_SLOTS, 0);
588         if (!ring)
589                 goto err_destroy_tx3;
590         dma->rx_ring0 = ring;
591
592         if (bcm->current_core->rev < 5) {
593                 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
594                                              BCM43xx_RXRING_SLOTS, 0);
595                 if (!ring)
596                         goto err_destroy_rx0;
597                 dma->rx_ring1 = ring;
598         }
599
600         dprintk(KERN_INFO PFX "DMA initialized\n");
601         err = 0;
602 out:
603         return err;
604
605 err_destroy_rx0:
606         bcm43xx_destroy_dmaring(dma->rx_ring0);
607         dma->rx_ring0 = NULL;
608 err_destroy_tx3:
609         bcm43xx_destroy_dmaring(dma->tx_ring3);
610         dma->tx_ring3 = NULL;
611 err_destroy_tx2:
612         bcm43xx_destroy_dmaring(dma->tx_ring2);
613         dma->tx_ring2 = NULL;
614 err_destroy_tx1:
615         bcm43xx_destroy_dmaring(dma->tx_ring1);
616         dma->tx_ring1 = NULL;
617 err_destroy_tx0:
618         bcm43xx_destroy_dmaring(dma->tx_ring0);
619         dma->tx_ring0 = NULL;
620         goto out;
621 }
622
623 /* Generate a cookie for the TX header. */
624 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
625                            int slot)
626 {
627         u16 cookie = 0xF000;
628
629         /* Use the upper 4 bits of the cookie as
630          * DMA controller ID and store the slot number
631          * in the lower 12 bits.
632          * Note that the cookie must never be 0, as this
633          * is a special value used in RX path.
634          */
635         switch (ring->mmio_base) {
636         default:
637                 assert(0);
638         case BCM43xx_MMIO_DMA1_BASE:
639                 cookie = 0xA000;
640                 break;
641         case BCM43xx_MMIO_DMA2_BASE:
642                 cookie = 0xB000;
643                 break;
644         case BCM43xx_MMIO_DMA3_BASE:
645                 cookie = 0xC000;
646                 break;
647         case BCM43xx_MMIO_DMA4_BASE:
648                 cookie = 0xD000;
649                 break;
650         }
651         assert(((u16)slot & 0xF000) == 0x0000);
652         cookie |= (u16)slot;
653
654         return cookie;
655 }
656
657 /* Inspect a cookie and find out to which controller/slot it belongs. */
658 static
659 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
660                                       u16 cookie, int *slot)
661 {
662         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
663         struct bcm43xx_dmaring *ring = NULL;
664
665         switch (cookie & 0xF000) {
666         case 0xA000:
667                 ring = dma->tx_ring0;
668                 break;
669         case 0xB000:
670                 ring = dma->tx_ring1;
671                 break;
672         case 0xC000:
673                 ring = dma->tx_ring2;
674                 break;
675         case 0xD000:
676                 ring = dma->tx_ring3;
677                 break;
678         default:
679                 assert(0);
680         }
681         *slot = (cookie & 0x0FFF);
682         assert(*slot >= 0 && *slot < ring->nr_slots);
683
684         return ring;
685 }
686
687 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
688                                   int slot)
689 {
690         /* Everything is ready to start. Buffers are DMA mapped and
691          * associated with slots.
692          * "slot" is the last slot of the new frame we want to transmit.
693          * Close your seat belts now, please.
694          */
695         wmb();
696         slot = next_slot(ring, slot);
697         bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX,
698                           (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
699 }
700
701 static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
702                            struct sk_buff *skb,
703                            u8 cur_frag)
704 {
705         int slot;
706         struct bcm43xx_dmadesc *desc;
707         struct bcm43xx_dmadesc_meta *meta;
708         u32 desc_ctl;
709         u32 desc_addr;
710
711         assert(skb_shinfo(skb)->nr_frags == 0);
712
713         slot = request_slot(ring);
714         desc = ring->vbase + slot;
715         meta = ring->meta + slot;
716
717         /* Add a device specific TX header. */
718         assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
719         /* Reserve enough headroom for the device tx header. */
720         __skb_push(skb, sizeof(struct bcm43xx_txhdr));
721         /* Now calculate and add the tx header.
722          * The tx header includes the PLCP header.
723          */
724         bcm43xx_generate_txhdr(ring->bcm,
725                                (struct bcm43xx_txhdr *)skb->data,
726                                skb->data + sizeof(struct bcm43xx_txhdr),
727                                skb->len - sizeof(struct bcm43xx_txhdr),
728                                (cur_frag == 0),
729                                generate_cookie(ring, slot));
730
731         meta->skb = skb;
732         meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
733         if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
734                 return_slot(ring, slot);
735                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA TX SKB >1G "
736                                     "(0x%llx, len: %u)\n",
737                         (unsigned long long)meta->dmaaddr, skb->len);
738                 return -ENOMEM;
739         }
740
741         desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
742         desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
743         desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
744         desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
745                      (u32)(meta->skb->len - ring->frameoffset));
746         if (slot == ring->nr_slots - 1)
747                 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
748
749         set_desc_ctl(desc, desc_ctl);
750         set_desc_addr(desc, desc_addr);
751         /* Now transfer the whole frame. */
752         dmacontroller_poke_tx(ring, slot);
753
754         return 0;
755 }
756
757 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
758                    struct ieee80211_txb *txb)
759 {
760         /* We just received a packet from the kernel network subsystem.
761          * Add headers and DMA map the memory. Poke
762          * the device to send the stuff.
763          * Note that this is called from atomic context.
764          */
765         struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
766         u8 i;
767         struct sk_buff *skb;
768
769         assert(ring->tx);
770         if (unlikely(free_slots(ring) < txb->nr_frags)) {
771                 /* The queue should be stopped,
772                  * if we are low on free slots.
773                  * If this ever triggers, we have to lower the suspend_mark.
774                  */
775                 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
776                 return -ENOMEM;
777         }
778
779         for (i = 0; i < txb->nr_frags; i++) {
780                 skb = txb->fragments[i];
781                 /* Take skb from ieee80211_txb_free */
782                 txb->fragments[i] = NULL;
783                 dma_tx_fragment(ring, skb, i);
784                 //TODO: handle failure of dma_tx_fragment
785         }
786         ieee80211_txb_free(txb);
787
788         return 0;
789 }
790
791 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
792                                    struct bcm43xx_xmitstatus *status)
793 {
794         struct bcm43xx_dmaring *ring;
795         struct bcm43xx_dmadesc *desc;
796         struct bcm43xx_dmadesc_meta *meta;
797         int is_last_fragment;
798         int slot;
799
800         ring = parse_cookie(bcm, status->cookie, &slot);
801         assert(ring);
802         assert(ring->tx);
803         assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
804         while (1) {
805                 assert(slot >= 0 && slot < ring->nr_slots);
806                 desc = ring->vbase + slot;
807                 meta = ring->meta + slot;
808
809                 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
810                 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
811                 free_descriptor_buffer(ring, desc, meta, 1);
812                 /* Everything belonging to the slot is unmapped
813                  * and freed, so we can return it.
814                  */
815                 return_slot(ring, slot);
816
817                 if (is_last_fragment)
818                         break;
819                 slot = next_slot(ring, slot);
820         }
821         bcm->stats.last_tx = jiffies;
822 }
823
824 static void dma_rx(struct bcm43xx_dmaring *ring,
825                    int *slot)
826 {
827         struct bcm43xx_dmadesc *desc;
828         struct bcm43xx_dmadesc_meta *meta;
829         struct bcm43xx_rxhdr *rxhdr;
830         struct sk_buff *skb;
831         u16 len;
832         int err;
833         dma_addr_t dmaaddr;
834
835         desc = ring->vbase + *slot;
836         meta = ring->meta + *slot;
837
838         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
839         skb = meta->skb;
840
841         if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
842                 /* We received an xmit status. */
843                 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
844                 struct bcm43xx_xmitstatus stat;
845                 int i = 0;
846
847                 stat.cookie = le16_to_cpu(hw->cookie);
848                 while (stat.cookie == 0) {
849                         if (unlikely(++i >= 10000)) {
850                                 assert(0);
851                                 break;
852                         }
853                         udelay(2);
854                         barrier();
855                         stat.cookie = le16_to_cpu(hw->cookie);
856                 }
857                 stat.flags = hw->flags;
858                 stat.cnt1 = hw->cnt1;
859                 stat.cnt2 = hw->cnt2;
860                 stat.seq = le16_to_cpu(hw->seq);
861                 stat.unknown = le16_to_cpu(hw->unknown);
862
863                 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
864                 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
865                 /* recycle the descriptor buffer. */
866                 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
867
868                 return;
869         }
870         rxhdr = (struct bcm43xx_rxhdr *)skb->data;
871         len = le16_to_cpu(rxhdr->frame_length);
872         if (len == 0) {
873                 int i = 0;
874
875                 do {
876                         udelay(2);
877                         barrier();
878                         len = le16_to_cpu(rxhdr->frame_length);
879                 } while (len == 0 && i++ < 5);
880                 if (unlikely(len == 0)) {
881                         /* recycle the descriptor buffer. */
882                         sync_descbuffer_for_device(ring, meta->dmaaddr,
883                                                    ring->rx_buffersize);
884                         goto drop;
885                 }
886         }
887         if (unlikely(len > ring->rx_buffersize)) {
888                 /* The data did not fit into one descriptor buffer
889                  * and is split over multiple buffers.
890                  * This should never happen, as we try to allocate buffers
891                  * big enough. So simply ignore this packet.
892                  */
893                 int cnt = 0;
894                 s32 tmp = len;
895
896                 while (1) {
897                         desc = ring->vbase + *slot;
898                         meta = ring->meta + *slot;
899                         /* recycle the descriptor buffer. */
900                         sync_descbuffer_for_device(ring, meta->dmaaddr,
901                                                    ring->rx_buffersize);
902                         *slot = next_slot(ring, *slot);
903                         cnt++;
904                         tmp -= ring->rx_buffersize;
905                         if (tmp <= 0)
906                                 break;
907                 }
908                 printkl(KERN_ERR PFX "DMA RX buffer too small "
909                                      "(len: %u, buffer: %u, nr-dropped: %d)\n",
910                         len, ring->rx_buffersize, cnt);
911                 goto drop;
912         }
913         len -= IEEE80211_FCS_LEN;
914
915         dmaaddr = meta->dmaaddr;
916         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
917         if (unlikely(err)) {
918                 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
919                 sync_descbuffer_for_device(ring, dmaaddr,
920                                            ring->rx_buffersize);
921                 goto drop;
922         }
923
924         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
925         skb_put(skb, len + ring->frameoffset);
926         skb_pull(skb, ring->frameoffset);
927
928         err = bcm43xx_rx(ring->bcm, skb, rxhdr);
929         if (err) {
930                 dev_kfree_skb_irq(skb);
931                 goto drop;
932         }
933
934 drop:
935         return;
936 }
937
938 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
939 {
940         u32 status;
941         u16 descptr;
942         int slot, current_slot;
943 #ifdef CONFIG_BCM43XX_DEBUG
944         int used_slots = 0;
945 #endif
946
947         assert(!ring->tx);
948         status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS);
949         descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
950         current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
951         assert(current_slot >= 0 && current_slot < ring->nr_slots);
952
953         slot = ring->current_slot;
954         for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
955                 dma_rx(ring, &slot);
956 #ifdef CONFIG_BCM43XX_DEBUG
957                 if (++used_slots > ring->max_used_slots)
958                         ring->max_used_slots = used_slots;
959 #endif
960         }
961         bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX,
962                           (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
963         ring->current_slot = slot;
964 }
965
966 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
967 {
968         assert(ring->tx);
969         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
970         bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
971                           bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
972                           | BCM43xx_DMA_TXCTRL_SUSPEND);
973 }
974
975 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
976 {
977         assert(ring->tx);
978         bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
979                           bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
980                           & ~BCM43xx_DMA_TXCTRL_SUSPEND);
981         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
982 }