2 * linux/arch/arm/omap/dma.c
4 * Copyright (C) 2003 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
8 * Support functions for the OMAP internal DMA channels.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/spinlock.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
23 #include <asm/system.h>
25 #include <asm/hardware.h>
29 #define OMAP_DMA_ACTIVE 0x01
31 #define OMAP_DMA_CCR_EN (1 << 7)
33 #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
35 static int enable_1510_mode = 0;
43 void (* callback)(int lch, u16 ch_status, void *data);
48 static int dma_chan_count;
50 static spinlock_t dma_chan_lock;
51 static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
53 const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
54 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
55 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
56 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
57 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
58 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
61 static inline int get_gdma_dev(int req)
63 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
64 int shift = ((req - 1) % 5) * 6;
66 return ((omap_readl(reg) >> shift) & 0x3f) + 1;
69 static inline void set_gdma_dev(int req, int dev)
71 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
72 int shift = ((req - 1) % 5) * 6;
76 l &= ~(0x3f << shift);
77 l |= (dev - 1) << shift;
81 static void clear_lch_regs(int lch)
84 u32 lch_base = OMAP_DMA_BASE + lch * 0x40;
86 for (i = 0; i < 0x2c; i += 2)
87 omap_writew(0, lch_base + i);
90 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
91 int frame_count, int sync_mode)
95 w = omap_readw(OMAP_DMA_CSDP(lch));
98 omap_writew(w, OMAP_DMA_CSDP(lch));
100 w = omap_readw(OMAP_DMA_CCR(lch));
102 if (sync_mode == OMAP_DMA_SYNC_FRAME)
104 omap_writew(w, OMAP_DMA_CCR(lch));
106 w = omap_readw(OMAP_DMA_CCR2(lch));
108 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
110 omap_writew(w, OMAP_DMA_CCR2(lch));
112 omap_writew(elem_count, OMAP_DMA_CEN(lch));
113 omap_writew(frame_count, OMAP_DMA_CFN(lch));
116 void omap_set_dma_constant_fill(int lch, u32 color)
120 #ifdef CONFIG_DEBUG_KERNEL
121 if (omap_dma_in_1510_mode) {
122 printk(KERN_ERR "OMAP DMA constant fill not available in 1510 mode.");
127 w = omap_readw(OMAP_DMA_CCR2(lch)) & ~0x03;
129 omap_writew(w, OMAP_DMA_CCR2(lch));
131 omap_writew((u16)color, OMAP_DMA_COLOR_L(lch));
132 omap_writew((u16)(color >> 16), OMAP_DMA_COLOR_U(lch));
134 w = omap_readw(OMAP_DMA_LCH_CTRL(lch)) & ~0x0f;
135 w |= 1; /* Channel type G */
136 omap_writew(w, OMAP_DMA_LCH_CTRL(lch));
139 void omap_set_dma_transparent_copy(int lch, u32 color)
143 #ifdef CONFIG_DEBUG_KERNEL
144 if (omap_dma_in_1510_mode) {
145 printk(KERN_ERR "OMAP DMA transparent copy not available in 1510 mode.");
149 w = omap_readw(OMAP_DMA_CCR2(lch)) & ~0x03;
151 omap_writew(w, OMAP_DMA_CCR2(lch));
153 omap_writew((u16)color, OMAP_DMA_COLOR_L(lch));
154 omap_writew((u16)(color >> 16), OMAP_DMA_COLOR_U(lch));
156 w = omap_readw(OMAP_DMA_LCH_CTRL(lch)) & ~0x0f;
157 w |= 1; /* Channel type G */
158 omap_writew(w, OMAP_DMA_LCH_CTRL(lch));
161 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
162 unsigned long src_start)
166 w = omap_readw(OMAP_DMA_CSDP(lch));
169 omap_writew(w, OMAP_DMA_CSDP(lch));
171 w = omap_readw(OMAP_DMA_CCR(lch));
173 w |= src_amode << 12;
174 omap_writew(w, OMAP_DMA_CCR(lch));
176 omap_writew(src_start >> 16, OMAP_DMA_CSSA_U(lch));
177 omap_writew(src_start, OMAP_DMA_CSSA_L(lch));
180 void omap_set_dma_src_index(int lch, int eidx, int fidx)
182 omap_writew(eidx, OMAP_DMA_CSEI(lch));
183 omap_writew(fidx, OMAP_DMA_CSFI(lch));
186 void omap_set_dma_src_data_pack(int lch, int enable)
190 w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 6);
191 w |= enable ? (1 << 6) : 0;
192 omap_writew(w, OMAP_DMA_CSDP(lch));
195 void omap_set_dma_src_burst_mode(int lch, int burst_mode)
199 w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 7);
200 switch (burst_mode) {
201 case OMAP_DMA_DATA_BURST_4:
204 case OMAP_DMA_DATA_BURST_8:
208 printk(KERN_ERR "Invalid DMA burst mode\n");
212 omap_writew(w, OMAP_DMA_CSDP(lch));
215 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
216 unsigned long dest_start)
220 w = omap_readw(OMAP_DMA_CSDP(lch));
223 omap_writew(w, OMAP_DMA_CSDP(lch));
225 w = omap_readw(OMAP_DMA_CCR(lch));
227 w |= dest_amode << 14;
228 omap_writew(w, OMAP_DMA_CCR(lch));
230 omap_writew(dest_start >> 16, OMAP_DMA_CDSA_U(lch));
231 omap_writew(dest_start, OMAP_DMA_CDSA_L(lch));
234 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
236 omap_writew(eidx, OMAP_DMA_CDEI(lch));
237 omap_writew(fidx, OMAP_DMA_CDFI(lch));
240 void omap_set_dma_dest_data_pack(int lch, int enable)
244 w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 13);
245 w |= enable ? (1 << 13) : 0;
246 omap_writew(w, OMAP_DMA_CSDP(lch));
249 void omap_set_dma_dest_burst_mode(int lch, int burst_mode)
253 w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 14);
254 switch (burst_mode) {
255 case OMAP_DMA_DATA_BURST_4:
258 case OMAP_DMA_DATA_BURST_8:
262 printk(KERN_ERR "Invalid DMA burst mode\n");
266 omap_writew(w, OMAP_DMA_CSDP(lch));
269 void omap_start_dma(int lch)
273 if (!omap_dma_in_1510_mode()) {
276 next_lch = dma_chan[lch].next_lch;
278 /* Enable the queue, if needed so. */
279 if (next_lch != -1) {
280 /* Clear the STOP_LNK bits */
281 w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
283 omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
284 w = omap_readw(OMAP_DMA_CLNK_CTRL(next_lch));
286 omap_writew(w, OMAP_DMA_CLNK_CTRL(next_lch));
288 /* And set the ENABLE_LNK bits */
289 omap_writew(next_lch | (1 << 15),
290 OMAP_DMA_CLNK_CTRL(lch));
292 if (dma_chan[next_lch].next_lch == lch)
293 omap_writew(lch | (1 << 15),
294 OMAP_DMA_CLNK_CTRL(next_lch));
296 /* Read CSR to make sure it's cleared. */
297 w = omap_readw(OMAP_DMA_CSR(next_lch));
298 /* Enable some nice interrupts. */
299 omap_writew(dma_chan[next_lch].enabled_irqs,
300 OMAP_DMA_CICR(next_lch));
301 dma_chan[next_lch].flags |= OMAP_DMA_ACTIVE;
305 /* Read CSR to make sure it's cleared. */
306 w = omap_readw(OMAP_DMA_CSR(lch));
307 /* Enable some nice interrupts. */
308 omap_writew(dma_chan[lch].enabled_irqs, OMAP_DMA_CICR(lch));
310 w = omap_readw(OMAP_DMA_CCR(lch));
311 w |= OMAP_DMA_CCR_EN;
312 omap_writew(w, OMAP_DMA_CCR(lch));
313 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
316 void omap_stop_dma(int lch)
321 /* Disable all interrupts on the channel */
322 omap_writew(0, OMAP_DMA_CICR(lch));
324 if (omap_dma_in_1510_mode()) {
325 w = omap_readw(OMAP_DMA_CCR(lch));
326 w &= ~OMAP_DMA_CCR_EN;
327 omap_writew(w, OMAP_DMA_CCR(lch));
328 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
332 next_lch = dma_chan[lch].next_lch;
335 * According to thw HW spec, enabling the STOP_LNK bit
336 * resets the CCR_EN bit at the same time.
338 w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
340 w = omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
341 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
343 if (next_lch != -1) {
344 omap_writew(0, OMAP_DMA_CICR(next_lch));
345 w = omap_readw(OMAP_DMA_CLNK_CTRL(next_lch));
347 w = omap_writew(w, OMAP_DMA_CLNK_CTRL(next_lch));
348 dma_chan[next_lch].flags &= ~OMAP_DMA_ACTIVE;
352 void omap_enable_dma_irq(int lch, u16 bits)
354 dma_chan[lch].enabled_irqs |= bits;
357 void omap_disable_dma_irq(int lch, u16 bits)
359 dma_chan[lch].enabled_irqs &= ~bits;
362 static int dma_handle_ch(int ch)
366 if (enable_1510_mode && ch >= 6) {
367 csr = dma_chan[ch].saved_csr;
368 dma_chan[ch].saved_csr = 0;
370 csr = omap_readw(OMAP_DMA_CSR(ch));
371 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
372 dma_chan[ch + 6].saved_csr = csr >> 7;
377 if (unlikely(dma_chan[ch].dev_id == -1)) {
378 printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n",
382 if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
383 printk(KERN_WARNING "DMA timeout with device %d\n", dma_chan[ch].dev_id);
384 if (unlikely(csr & OMAP_DMA_DROP_IRQ))
385 printk(KERN_WARNING "DMA synchronization event drop occurred with device %d\n",
386 dma_chan[ch].dev_id);
387 if (likely(csr & OMAP_DMA_BLOCK_IRQ))
388 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
389 if (likely(dma_chan[ch].callback != NULL))
390 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
394 static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
396 int ch = ((int) dev_id) - 1;
402 handled_now += dma_handle_ch(ch);
403 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
404 handled_now += dma_handle_ch(ch + 6);
407 handled += handled_now;
410 return handled ? IRQ_HANDLED : IRQ_NONE;
413 int omap_request_dma(int dev_id, const char *dev_name,
414 void (* callback)(int lch, u16 ch_status, void *data),
415 void *data, int *dma_ch_out)
417 int ch, free_ch = -1;
419 struct omap_dma_lch *chan;
421 spin_lock_irqsave(&dma_chan_lock, flags);
422 for (ch = 0; ch < dma_chan_count; ch++) {
423 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
428 if (dev_id != 0 && dma_chan[ch].dev_id == dev_id) {
429 spin_unlock_irqrestore(&dma_chan_lock, flags);
434 spin_unlock_irqrestore(&dma_chan_lock, flags);
437 chan = dma_chan + free_ch;
438 chan->dev_id = dev_id;
439 clear_lch_regs(free_ch);
440 spin_unlock_irqrestore(&dma_chan_lock, flags);
442 chan->dev_id = dev_id;
443 chan->dev_name = dev_name;
444 chan->callback = callback;
446 chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
448 if (cpu_is_omap1610() || cpu_is_omap5912()) {
449 /* If the sync device is set, configure it dynamically. */
451 set_gdma_dev(free_ch + 1, dev_id);
452 dev_id = free_ch + 1;
454 /* Disable the 1510 compatibility mode and set the sync device
456 omap_writew(dev_id | (1 << 10), OMAP_DMA_CCR(free_ch));
458 omap_writew(dev_id, OMAP_DMA_CCR(free_ch));
460 *dma_ch_out = free_ch;
465 void omap_free_dma(int ch)
469 spin_lock_irqsave(&dma_chan_lock, flags);
470 if (dma_chan[ch].dev_id == -1) {
471 printk("omap_dma: trying to free nonallocated DMA channel %d\n", ch);
472 spin_unlock_irqrestore(&dma_chan_lock, flags);
475 dma_chan[ch].dev_id = -1;
476 spin_unlock_irqrestore(&dma_chan_lock, flags);
478 /* Disable all DMA interrupts for the channel. */
479 omap_writew(0, OMAP_DMA_CICR(ch));
480 /* Make sure the DMA transfer is stopped. */
481 omap_writew(0, OMAP_DMA_CCR(ch));
484 int omap_dma_in_1510_mode(void)
486 return enable_1510_mode;
490 * lch_queue DMA will start right after lch_head one is finished.
491 * For this DMA link to start, you still need to start (see omap_start_dma)
492 * the first one. That will fire up the entire queue.
494 void omap_dma_link_lch (int lch_head, int lch_queue)
496 if (omap_dma_in_1510_mode()) {
497 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
502 if ((dma_chan[lch_head].dev_id == -1) ||
503 (dma_chan[lch_queue].dev_id == -1)) {
504 printk(KERN_ERR "omap_dma: trying to link non requested channels\n");
508 dma_chan[lch_head].next_lch = lch_queue;
512 * Once the DMA queue is stopped, we can destroy it.
514 void omap_dma_unlink_lch (int lch_head, int lch_queue)
516 if (omap_dma_in_1510_mode()) {
517 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
522 if (dma_chan[lch_head].next_lch != lch_queue ||
523 dma_chan[lch_head].next_lch == -1) {
524 printk(KERN_ERR "omap_dma: trying to unlink non linked channels\n");
529 if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
530 (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
531 printk(KERN_ERR "omap_dma: You need to stop the DMA channels before unlinking\n");
535 dma_chan[lch_head].next_lch = -1;
536 dma_chan[lch_queue].next_lch = -1;
540 static struct lcd_dma_info {
543 void (* callback)(u16 status, void *data);
546 unsigned long addr, size;
547 int rotate, data_type, xres, yres;
550 void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
554 lcd_dma.data_type = data_type;
555 lcd_dma.xres = fb_xres;
556 lcd_dma.yres = fb_yres;
559 static void set_b1_regs(void)
561 unsigned long top, bottom;
568 switch (lcd_dma.data_type) {
569 case OMAP_DMA_DATA_TYPE_S8:
572 case OMAP_DMA_DATA_TYPE_S16:
575 case OMAP_DMA_DATA_TYPE_S32:
583 if (lcd_dma.rotate == 0) {
585 bottom = lcd_dma.addr + (lcd_dma.xres * lcd_dma.yres - 1) * es;
586 /* 1510 DMA requires the bottom address to be 2 more than the
587 * actual last memory access location. */
588 if (omap_dma_in_1510_mode() &&
589 lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
596 top = lcd_dma.addr + (lcd_dma.xres - 1) * es;
597 bottom = lcd_dma.addr + (lcd_dma.yres - 1) * lcd_dma.xres * es;
600 ei = (lcd_dma.xres - 1) * es + 1;
601 fi = -(lcd_dma.xres * (lcd_dma.yres - 1) + 2) * 2 + 1;
604 if (omap_dma_in_1510_mode()) {
605 omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
606 omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
607 omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
608 omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
614 omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
615 omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
616 omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
617 omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
619 omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
620 omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
622 w = omap_readw(OMAP1610_DMA_LCD_CSDP);
624 w |= lcd_dma.data_type;
625 omap_writew(w, OMAP1610_DMA_LCD_CSDP);
631 l = omap_readw(OMAP1610_DMA_LCD_CSDP);
632 /* Disable burst access */
634 omap_writew(l, OMAP1610_DMA_LCD_CSDP);
636 l = omap_readw(OMAP1610_DMA_LCD_CCR);
637 /* Set the double-indexed addressing mode */
639 omap_writew(l, OMAP1610_DMA_LCD_CCR);
641 omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
642 omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
643 omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
646 void omap_set_lcd_dma_b1_rotation(int rotate)
648 if (omap_dma_in_1510_mode()) {
649 printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
653 lcd_dma.rotate = rotate;
656 int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
659 spin_lock_irq(&lcd_dma.lock);
660 if (lcd_dma.reserved) {
661 spin_unlock_irq(&lcd_dma.lock);
662 printk(KERN_ERR "LCD DMA channel already reserved\n");
666 lcd_dma.reserved = 1;
667 spin_unlock_irq(&lcd_dma.lock);
668 lcd_dma.callback = callback;
669 lcd_dma.cb_data = data;
674 void omap_free_lcd_dma(void)
676 spin_lock(&lcd_dma.lock);
677 if (!lcd_dma.reserved) {
678 spin_unlock(&lcd_dma.lock);
679 printk(KERN_ERR "LCD DMA is not reserved\n");
683 if (!enable_1510_mode)
684 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
685 lcd_dma.reserved = 0;
686 spin_unlock(&lcd_dma.lock);
689 void omap_start_lcd_dma(void)
691 if (!enable_1510_mode) {
692 /* Set some reasonable defaults */
693 omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
694 omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
695 omap_writew(0x5740, OMAP1610_DMA_LCD_CCR);
698 if (!enable_1510_mode)
699 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) | 1, OMAP1610_DMA_LCD_CCR);
702 void omap_stop_lcd_dma(void)
704 if (!enable_1510_mode)
705 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
708 static int __init omap_init_dma(void)
712 if (cpu_is_omap1510()) {
713 printk(KERN_INFO "DMA support for OMAP1510 initialized\n");
715 enable_1510_mode = 1;
716 } else if (cpu_is_omap1610() || cpu_is_omap5912()) {
717 printk(KERN_INFO "OMAP DMA hardware version %d\n",
718 omap_readw(OMAP_DMA_HW_ID));
719 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
720 (omap_readw(OMAP_DMA_CAPS_0_U) << 16) | omap_readw(OMAP_DMA_CAPS_0_L),
721 (omap_readw(OMAP_DMA_CAPS_1_U) << 16) | omap_readw(OMAP_DMA_CAPS_1_L),
722 omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3),
723 omap_readw(OMAP_DMA_CAPS_4));
724 if (!enable_1510_mode) {
727 /* Disable OMAP 3.0/3.1 compatibility mode. */
728 w = omap_readw(OMAP_DMA_GSCR);
730 omap_writew(w, OMAP_DMA_GSCR);
731 dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
739 memset(&lcd_dma, 0, sizeof(lcd_dma));
740 spin_lock_init(&lcd_dma.lock);
741 spin_lock_init(&dma_chan_lock);
742 memset(&dma_chan, 0, sizeof(dma_chan));
744 for (ch = 0; ch < dma_chan_count; ch++) {
745 dma_chan[ch].dev_id = -1;
746 dma_chan[ch].next_lch = -1;
748 if (ch >= 6 && enable_1510_mode)
751 /* request_irq() doesn't like dev_id (ie. ch) being zero,
752 * so we have to kludge around this. */
753 r = request_irq(dma_irq[ch], dma_irq_handler, 0, "DMA",
758 printk(KERN_ERR "unable to request IRQ %d for DMA (error %d)\n",
760 for (i = 0; i < ch; i++)
761 free_irq(dma_irq[i], (void *) (i + 1));
768 arch_initcall(omap_init_dma);
770 EXPORT_SYMBOL(omap_request_dma);
771 EXPORT_SYMBOL(omap_free_dma);
772 EXPORT_SYMBOL(omap_start_dma);
773 EXPORT_SYMBOL(omap_stop_dma);
775 EXPORT_SYMBOL(omap_set_dma_transfer_params);
776 EXPORT_SYMBOL(omap_set_dma_constant_fill);
777 EXPORT_SYMBOL(omap_set_dma_transparent_copy);
779 EXPORT_SYMBOL(omap_set_dma_src_params);
780 EXPORT_SYMBOL(omap_set_dma_src_index);
781 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
782 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
784 EXPORT_SYMBOL(omap_set_dma_dest_params);
785 EXPORT_SYMBOL(omap_set_dma_dest_index);
786 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
787 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
789 EXPORT_SYMBOL(omap_dma_link_lch);
790 EXPORT_SYMBOL(omap_dma_unlink_lch);
792 EXPORT_SYMBOL(omap_request_lcd_dma);
793 EXPORT_SYMBOL(omap_free_lcd_dma);
794 EXPORT_SYMBOL(omap_start_lcd_dma);
795 EXPORT_SYMBOL(omap_stop_lcd_dma);
796 EXPORT_SYMBOL(omap_set_lcd_dma_b1);
797 EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);