3 * BRIEF MODULE DESCRIPTION
4 * The Descriptor Based DMA channel manager that first appeared
5 * on the Au1550. I started with dma.c, but I think all that is
6 * left is this initial comment :-)
8 * Copyright 2004 Embedded Edge, LLC
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * You should have received a copy of the GNU General Public License along
28 * with this program; if not, write to the Free Software Foundation, Inc.,
29 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 #include <linux/kernel.h>
34 #include <linux/errno.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/string.h>
39 #include <linux/delay.h>
40 #include <asm/mach-au1x00/au1000.h>
41 #include <asm/mach-au1x00/au1xxx_dbdma.h>
42 #include <asm/system.h>
45 * The Descriptor Based DMA supports up to 16 channels.
47 * There are 32 devices defined. We keep an internal structure
48 * of devices using these channels, along with additional
51 * We allocate the descriptors and allow access to them through various
52 * functions. The drivers allocate the data buffers and assign them
55 static spinlock_t au1xxx_dbdma_spin_lock = SPIN_LOCK_UNLOCKED;
57 /* I couldn't find a macro that did this......
59 #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
61 static volatile dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE;
62 static int dbdma_initialized;
63 static void au1xxx_dbdma_init(void);
65 typedef struct dbdma_device_table {
69 u32 dev_physaddr; /* If FIFO */
74 typedef struct dbdma_chan_config {
77 dbdev_tab_t *chan_src;
78 dbdev_tab_t *chan_dest;
79 au1x_dma_chan_t *chan_ptr;
80 au1x_ddma_desc_t *chan_desc_base;
81 au1x_ddma_desc_t *get_ptr, *put_ptr, *cur_ptr;
83 void (*chan_callback)(int, void *, struct pt_regs *);
86 #define DEV_FLAGS_INUSE (1 << 0)
87 #define DEV_FLAGS_ANYUSE (1 << 1)
88 #define DEV_FLAGS_OUT (1 << 2)
89 #define DEV_FLAGS_IN (1 << 3)
91 static dbdev_tab_t dbdev_tab[] = {
93 { DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
94 { DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
95 { DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
96 { DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
99 { 0, 0, 0, 0x00000000, 0, 0 },
100 { 0, 0, 0, 0x00000000, 0, 0 },
101 { 0, 0, 0, 0x00000000, 0, 0 },
102 { 0, 0, 0, 0x00000000, 0, 0 },
105 { DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
106 { DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
107 { DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
108 { DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
109 { DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
110 { DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
113 { DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
114 { DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
117 { DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
118 { DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
121 { DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
122 { DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
125 { DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
126 { DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
128 { 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */
129 { 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
132 { DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
133 { DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
136 { DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
137 { DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
140 { DEV_FLAGS_INUSE, 0, 0, 0x00000000, 0, 0 },
141 { DEV_FLAGS_INUSE, 0, 0, 0x00000000, 0, 0 },
144 { DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, /* throttle */
145 { DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, /* always */
148 static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
150 /* Allocate a channel and return a non-zero descriptor if successful.
153 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
154 void (*callback)(int, void *, struct pt_regs *), void *callparam)
160 dbdev_tab_t *stp, *dtp;
162 volatile au1x_dma_chan_t *cp;
164 /* We do the intialization on the first channel allocation.
165 * We have to wait because of the interrupt handler initialization
166 * which can't be done successfully during board set up.
168 if (!dbdma_initialized)
170 dbdma_initialized = 1;
172 if ((srcid > DSCR_NDEV_IDS) || (destid > DSCR_NDEV_IDS))
175 stp = &dbdev_tab[srcid];
176 dtp = &dbdev_tab[destid];
180 /* Check to see if we can get both channels.
182 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
183 if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
184 (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
186 stp->dev_flags |= DEV_FLAGS_INUSE;
187 if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
188 (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
189 /* Got destination */
190 dtp->dev_flags |= DEV_FLAGS_INUSE;
193 /* Can't get dest. Release src.
195 stp->dev_flags &= ~DEV_FLAGS_INUSE;
202 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
205 /* Let's see if we can allocate a channel for it.
209 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
210 for (i=0; i<NUM_DBDMA_CHANS; i++) {
211 if (chan_tab_ptr[i] == NULL) {
212 /* If kmalloc fails, it is caught below same
213 * as a channel not available.
215 ctp = (chan_tab_t *)kmalloc(sizeof(chan_tab_t), GFP_KERNEL);
216 chan_tab_ptr[i] = ctp;
217 ctp->chan_index = chan = i;
221 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
224 memset(ctp, 0, sizeof(chan_tab_t));
225 dcp = DDMA_CHANNEL_BASE;
226 dcp += (0x0100 * chan);
227 ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
228 cp = (volatile au1x_dma_chan_t *)dcp;
230 ctp->chan_dest = dtp;
231 ctp->chan_callback = callback;
232 ctp->chan_callparam = callparam;
234 /* Initialize channel configuration.
237 if (stp->dev_intlevel)
239 if (stp->dev_intpolarity)
241 if (dtp->dev_intlevel)
243 if (dtp->dev_intpolarity)
248 /* Return a non-zero value that can be used to
249 * find the channel information in subsequent
252 rv = (u32)(&chan_tab_ptr[chan]);
257 stp->dev_flags &= ~DEV_FLAGS_INUSE;
258 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
264 /* Set the device width if source or destination is a FIFO.
265 * Should be 8, 16, or 32 bits.
268 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
272 dbdev_tab_t *stp, *dtp;
274 ctp = *((chan_tab_t **)chanid);
276 dtp = ctp->chan_dest;
279 if (stp->dev_flags & DEV_FLAGS_IN) { /* Source in fifo */
280 rv = stp->dev_devwidth;
281 stp->dev_devwidth = bits;
283 if (dtp->dev_flags & DEV_FLAGS_OUT) { /* Destination out fifo */
284 rv = dtp->dev_devwidth;
285 dtp->dev_devwidth = bits;
291 /* Allocate a descriptor ring, initializing as much as possible.
294 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
297 u32 desc_base, srcid, destid;
298 u32 cmd0, cmd1, src1, dest1;
301 dbdev_tab_t *stp, *dtp;
302 au1x_ddma_desc_t *dp;
304 /* I guess we could check this to be within the
305 * range of the table......
307 ctp = *((chan_tab_t **)chanid);
309 dtp = ctp->chan_dest;
311 /* The descriptors must be 32-byte aligned. There is a
312 * possibility the allocation will give us such an address,
313 * and if we try that first we are likely to not waste larger
316 desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), GFP_KERNEL);
320 if (desc_base & 0x1f) {
321 /* Lost....do it again, allocate extra, and round
324 kfree((const void *)desc_base);
325 i = entries * sizeof(au1x_ddma_desc_t);
326 i += (sizeof(au1x_ddma_desc_t) - 1);
327 if ((desc_base = (u32)kmalloc(i, GFP_KERNEL)) == 0)
330 desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
332 dp = (au1x_ddma_desc_t *)desc_base;
334 /* Keep track of the base descriptor.
336 ctp->chan_desc_base = dp;
338 /* Initialize the rings with as much information as we know.
340 srcid = stp - dbdev_tab; /* Index is channel device ID */
341 destid = dtp - dbdev_tab;
343 cmd0 = cmd1 = src1 = dest1 = 0;
346 cmd0 |= DSCR_CMD0_SID(srcid);
347 cmd0 |= DSCR_CMD0_DID(destid);
348 cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
349 cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_CURRENT);
351 switch (stp->dev_devwidth) {
353 cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
356 cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
360 cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
364 switch (dtp->dev_devwidth) {
366 cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
369 cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
373 cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
377 /* If the device is marked as an in/out FIFO, ensure it is
380 if (stp->dev_flags & DEV_FLAGS_IN)
381 cmd0 |= DSCR_CMD0_SN; /* Source in fifo */
382 if (dtp->dev_flags & DEV_FLAGS_OUT)
383 cmd0 |= DSCR_CMD0_DN; /* Destination out fifo */
385 /* Set up source1. For now, assume no stride and increment.
386 * A channel attribute update can change this later.
388 switch (stp->dev_tsize) {
390 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
393 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
396 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
400 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
404 /* If source input is fifo, set static address.
406 if (stp->dev_flags & DEV_FLAGS_IN) {
407 src0 = stp->dev_physaddr;
408 src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
411 /* Set up dest1. For now, assume no stride and increment.
412 * A channel attribute update can change this later.
414 switch (dtp->dev_tsize) {
416 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
419 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
422 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
426 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
430 /* If destination output is fifo, set static address.
432 if (dtp->dev_flags & DEV_FLAGS_OUT) {
433 dest0 = dtp->dev_physaddr;
434 dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
437 for (i=0; i<entries; i++) {
438 dp->dscr_cmd0 = cmd0;
439 dp->dscr_cmd1 = cmd1;
440 dp->dscr_source0 = src0;
441 dp->dscr_source1 = src1;
442 dp->dscr_dest0 = dest0;
443 dp->dscr_dest1 = dest1;
445 dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
449 /* Make last descrptor point to the first.
452 dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
453 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
455 return (u32)(ctp->chan_desc_base);
458 /* Put a source buffer into the DMA ring.
459 * This updates the source pointer and byte count. Normally used
460 * for memory to fifo transfers.
463 au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes)
466 au1x_ddma_desc_t *dp;
468 /* I guess we could check this to be within the
469 * range of the table......
471 ctp = *((chan_tab_t **)chanid);
473 /* We should have multiple callers for a particular channel,
474 * an interrupt doesn't affect this pointer nor the descriptor,
475 * so no locking should be needed.
479 /* If the descriptor is valid, we are way ahead of the DMA
480 * engine, so just return an error condition.
482 if (dp->dscr_cmd0 & DSCR_CMD0_V) {
486 /* Load up buffer address and byte count.
488 dp->dscr_source0 = virt_to_phys(buf);
489 dp->dscr_cmd1 = nbytes;
490 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
491 ctp->chan_ptr->ddma_dbell = 0xffffffff; /* Make it go */
493 /* Get next descriptor pointer.
495 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
497 /* return something not zero.
502 /* Put a destination buffer into the DMA ring.
503 * This updates the destination pointer and byte count. Normally used
504 * to place an empty buffer into the ring for fifo to memory transfers.
507 au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes)
510 au1x_ddma_desc_t *dp;
512 /* I guess we could check this to be within the
513 * range of the table......
515 ctp = *((chan_tab_t **)chanid);
517 /* We should have multiple callers for a particular channel,
518 * an interrupt doesn't affect this pointer nor the descriptor,
519 * so no locking should be needed.
523 /* If the descriptor is valid, we are way ahead of the DMA
524 * engine, so just return an error condition.
526 if (dp->dscr_cmd0 & DSCR_CMD0_V)
529 /* Load up buffer address and byte count.
531 dp->dscr_dest0 = virt_to_phys(buf);
532 dp->dscr_cmd1 = nbytes;
533 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
535 /* Get next descriptor pointer.
537 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
539 /* return something not zero.
544 /* Get a destination buffer into the DMA ring.
545 * Normally used to get a full buffer from the ring during fifo
546 * to memory transfers. This does not set the valid bit, you will
547 * have to put another destination buffer to keep the DMA going.
550 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
553 au1x_ddma_desc_t *dp;
556 /* I guess we could check this to be within the
557 * range of the table......
559 ctp = *((chan_tab_t **)chanid);
561 /* We should have multiple callers for a particular channel,
562 * an interrupt doesn't affect this pointer nor the descriptor,
563 * so no locking should be needed.
567 /* If the descriptor is valid, we are way ahead of the DMA
568 * engine, so just return an error condition.
570 if (dp->dscr_cmd0 & DSCR_CMD0_V)
573 /* Return buffer address and byte count.
575 *buf = (void *)(phys_to_virt(dp->dscr_dest0));
576 *nbytes = dp->dscr_cmd1;
579 /* Get next descriptor pointer.
581 ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
583 /* return something not zero.
589 au1xxx_dbdma_stop(u32 chanid)
592 volatile au1x_dma_chan_t *cp;
593 int halt_timeout = 0;
595 ctp = *((chan_tab_t **)chanid);
598 cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */
600 while (!(cp->ddma_stat & DDMA_STAT_H)) {
603 if (halt_timeout > 100) {
604 printk("warning: DMA channel won't halt\n");
608 /* clear current desc valid and doorbell */
609 cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
613 /* Start using the current descriptor pointer. If the dbdma encounters
614 * a not valid descriptor, it will stop. In this case, we can just
615 * continue by adding a buffer to the list and starting again.
618 au1xxx_dbdma_start(u32 chanid)
621 volatile au1x_dma_chan_t *cp;
623 ctp = *((chan_tab_t **)chanid);
626 cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
627 cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */
629 cp->ddma_dbell = 0xffffffff; /* Make it go */
634 au1xxx_dbdma_reset(u32 chanid)
637 au1x_ddma_desc_t *dp;
639 au1xxx_dbdma_stop(chanid);
641 ctp = *((chan_tab_t **)chanid);
642 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
644 /* Run through the descriptors and reset the valid indicator.
646 dp = ctp->chan_desc_base;
649 dp->dscr_cmd0 &= ~DSCR_CMD0_V;
650 dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
651 } while (dp != ctp->chan_desc_base);
655 au1xxx_get_dma_residue(u32 chanid)
658 volatile au1x_dma_chan_t *cp;
661 ctp = *((chan_tab_t **)chanid);
664 /* This is only valid if the channel is stopped.
666 rv = cp->ddma_bytecnt;
673 au1xxx_dbdma_chan_free(u32 chanid)
676 dbdev_tab_t *stp, *dtp;
678 ctp = *((chan_tab_t **)chanid);
680 dtp = ctp->chan_dest;
682 au1xxx_dbdma_stop(chanid);
684 if (ctp->chan_desc_base != NULL)
685 kfree(ctp->chan_desc_base);
687 stp->dev_flags &= ~DEV_FLAGS_INUSE;
688 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
689 chan_tab_ptr[ctp->chan_index] = NULL;
695 dbdma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
700 au1x_ddma_desc_t *dp;
701 volatile au1x_dma_chan_t *cp;
703 intstat = dbdma_gptr->ddma_intstat;
705 chan_index = au_ffs(intstat) - 1;
707 ctp = chan_tab_ptr[chan_index];
716 if (ctp->chan_callback)
717 (ctp->chan_callback)(irq, ctp->chan_callparam, regs);
719 ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
724 au1xxx_dbdma_init(void)
726 dbdma_gptr->ddma_config = 0;
727 dbdma_gptr->ddma_throttle = 0;
728 dbdma_gptr->ddma_inten = 0xffff;
731 if (request_irq(AU1550_DDMA_INT, dbdma_interrupt, SA_INTERRUPT,
732 "Au1xxx dbdma", (void *)dbdma_gptr))
733 printk("Can't get 1550 dbdma irq");
737 au1xxx_dbdma_dump(u32 chanid)
740 au1x_ddma_desc_t *dp;
741 dbdev_tab_t *stp, *dtp;
742 volatile au1x_dma_chan_t *cp;
744 ctp = *((chan_tab_t **)chanid);
746 dtp = ctp->chan_dest;
749 printk("Chan %x, stp %x (dev %d) dtp %x (dev %d) \n",
750 (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp, dtp - dbdev_tab);
751 printk("desc base %x, get %x, put %x, cur %x\n",
752 (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
753 (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
755 printk("dbdma chan %x\n", (u32)cp);
756 printk("cfg %08x, desptr %08x, statptr %08x\n",
757 cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
758 printk("dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
759 cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat, cp->ddma_bytecnt);
762 /* Run through the descriptors
764 dp = ctp->chan_desc_base;
767 printk("dp %08x, cmd0 %08x, cmd1 %08x\n",
768 (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
769 printk("src0 %08x, src1 %08x, dest0 %08x\n",
770 dp->dscr_source0, dp->dscr_source1, dp->dscr_dest0);
771 printk("dest1 %08x, stat %08x, nxtptr %08x\n",
772 dp->dscr_dest1, dp->dscr_stat, dp->dscr_nxtptr);
773 dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
774 } while (dp != ctp->chan_desc_base);