2 * linux/arch/arm/omap/dma.c
4 * Copyright (C) 2003 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
8 * Support functions for the OMAP internal DMA channels.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/spinlock.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
23 #include <asm/system.h>
25 #include <asm/hardware.h>
29 #define OMAP_DMA_ACTIVE 0x01
31 #define OMAP_DMA_CCR_EN (1 << 7)
33 #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
35 static int enable_1510_mode = 0;
43 void (* callback)(int lch, u16 ch_status, void *data);
48 static int dma_chan_count;
50 static spinlock_t dma_chan_lock;
51 static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
53 const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
54 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
55 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
56 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
57 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
58 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
61 static inline int get_gdma_dev(int req)
63 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
64 int shift = ((req - 1) % 5) * 6;
66 return ((omap_readl(reg) >> shift) & 0x3f) + 1;
69 static inline void set_gdma_dev(int req, int dev)
71 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
72 int shift = ((req - 1) % 5) * 6;
76 l &= ~(0x3f << shift);
77 l |= (dev - 1) << shift;
81 static void clear_lch_regs(int lch)
84 u32 lch_base = OMAP_DMA_BASE + lch * 0x40;
86 for (i = 0; i < 0x2c; i += 2)
87 omap_writew(0, lch_base + i);
90 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
91 int frame_count, int sync_mode)
95 w = omap_readw(OMAP_DMA_CSDP_REG(lch));
98 omap_writew(w, OMAP_DMA_CSDP_REG(lch));
100 w = omap_readw(OMAP_DMA_CCR_REG(lch));
102 if (sync_mode == OMAP_DMA_SYNC_FRAME)
104 omap_writew(w, OMAP_DMA_CCR_REG(lch));
106 w = omap_readw(OMAP_DMA_CCR2_REG(lch));
108 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
110 omap_writew(w, OMAP_DMA_CCR2_REG(lch));
112 omap_writew(elem_count, OMAP_DMA_CEN_REG(lch));
113 omap_writew(frame_count, OMAP_DMA_CFN_REG(lch));
117 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
118 unsigned long src_start)
122 w = omap_readw(OMAP_DMA_CSDP_REG(lch));
125 omap_writew(w, OMAP_DMA_CSDP_REG(lch));
127 w = omap_readw(OMAP_DMA_CCR_REG(lch));
129 w |= src_amode << 12;
130 omap_writew(w, OMAP_DMA_CCR_REG(lch));
132 omap_writew(src_start >> 16, OMAP_DMA_CSSA_U_REG(lch));
133 omap_writew(src_start, OMAP_DMA_CSSA_L_REG(lch));
136 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
137 unsigned long dest_start)
141 w = omap_readw(OMAP_DMA_CSDP_REG(lch));
144 omap_writew(w, OMAP_DMA_CSDP_REG(lch));
146 w = omap_readw(OMAP_DMA_CCR_REG(lch));
148 w |= dest_amode << 14;
149 omap_writew(w, OMAP_DMA_CCR_REG(lch));
151 omap_writew(dest_start >> 16, OMAP_DMA_CDSA_U_REG(lch));
152 omap_writew(dest_start, OMAP_DMA_CDSA_L_REG(lch));
155 void omap_start_dma(int lch)
159 if (!omap_dma_in_1510_mode()) {
162 next_lch = dma_chan[lch].next_lch;
164 /* Enable the queue, if needed so. */
165 if (next_lch != -1) {
166 /* Clear the STOP_LNK bits */
167 w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(lch));
169 omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(lch));
170 w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(next_lch));
172 omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(next_lch));
174 /* And set the ENABLE_LNK bits */
175 omap_writew(next_lch | (1 << 15),
176 OMAP_DMA_CLNK_CTRL_REG(lch));
178 if (dma_chan[next_lch].next_lch == lch)
179 omap_writew(lch | (1 << 15),
180 OMAP_DMA_CLNK_CTRL_REG(next_lch));
182 /* Read CSR to make sure it's cleared. */
183 w = omap_readw(OMAP_DMA_CSR_REG(next_lch));
184 /* Enable some nice interrupts. */
185 omap_writew(dma_chan[next_lch].enabled_irqs,
186 OMAP_DMA_CICR_REG(next_lch));
187 dma_chan[next_lch].flags |= OMAP_DMA_ACTIVE;
191 /* Read CSR to make sure it's cleared. */
192 w = omap_readw(OMAP_DMA_CSR_REG(lch));
193 /* Enable some nice interrupts. */
194 omap_writew(dma_chan[lch].enabled_irqs, OMAP_DMA_CICR_REG(lch));
196 w = omap_readw(OMAP_DMA_CCR_REG(lch));
197 w |= OMAP_DMA_CCR_EN;
198 omap_writew(w, OMAP_DMA_CCR_REG(lch));
199 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
202 void omap_stop_dma(int lch)
207 /* Disable all interrupts on the channel */
208 omap_writew(0, OMAP_DMA_CICR_REG(lch));
210 if (omap_dma_in_1510_mode()) {
211 w = omap_readw(OMAP_DMA_CCR_REG(lch));
212 w &= ~OMAP_DMA_CCR_EN;
213 omap_writew(w, OMAP_DMA_CCR_REG(lch));
214 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
218 next_lch = dma_chan[lch].next_lch;
221 * According to thw HW spec, enabling the STOP_LNK bit
222 * resets the CCR_EN bit at the same time.
224 w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(lch));
226 w = omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(lch));
227 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
229 if (next_lch != -1) {
230 omap_writew(0, OMAP_DMA_CICR_REG(next_lch));
231 w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(next_lch));
233 w = omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(next_lch));
234 dma_chan[next_lch].flags &= ~OMAP_DMA_ACTIVE;
238 void omap_enable_dma_irq(int lch, u16 bits)
240 dma_chan[lch].enabled_irqs |= bits;
243 void omap_disable_dma_irq(int lch, u16 bits)
245 dma_chan[lch].enabled_irqs &= ~bits;
248 static int dma_handle_ch(int ch)
252 if (enable_1510_mode && ch >= 6) {
253 csr = dma_chan[ch].saved_csr;
254 dma_chan[ch].saved_csr = 0;
256 csr = omap_readw(OMAP_DMA_CSR_REG(ch));
257 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
258 dma_chan[ch + 6].saved_csr = csr >> 7;
263 if (unlikely(dma_chan[ch].dev_id == -1)) {
264 printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n",
268 if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
269 printk(KERN_WARNING "DMA timeout with device %d\n", dma_chan[ch].dev_id);
270 if (unlikely(csr & OMAP_DMA_DROP_IRQ))
271 printk(KERN_WARNING "DMA synchronization event drop occurred with device %d\n",
272 dma_chan[ch].dev_id);
273 if (likely(csr & OMAP_DMA_BLOCK_IRQ))
274 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
275 if (likely(dma_chan[ch].callback != NULL))
276 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
280 static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
282 int ch = ((int) dev_id) - 1;
288 handled_now += dma_handle_ch(ch);
289 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
290 handled_now += dma_handle_ch(ch + 6);
293 handled += handled_now;
296 return handled ? IRQ_HANDLED : IRQ_NONE;
299 int omap_request_dma(int dev_id, const char *dev_name,
300 void (* callback)(int lch, u16 ch_status, void *data),
301 void *data, int *dma_ch_out)
303 int ch, free_ch = -1;
305 struct omap_dma_lch *chan;
307 spin_lock_irqsave(&dma_chan_lock, flags);
308 for (ch = 0; ch < dma_chan_count; ch++) {
309 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
314 if (dev_id != 0 && dma_chan[ch].dev_id == dev_id) {
315 spin_unlock_irqrestore(&dma_chan_lock, flags);
320 spin_unlock_irqrestore(&dma_chan_lock, flags);
323 chan = dma_chan + free_ch;
324 chan->dev_id = dev_id;
325 clear_lch_regs(free_ch);
326 spin_unlock_irqrestore(&dma_chan_lock, flags);
328 chan->dev_id = dev_id;
329 chan->dev_name = dev_name;
330 chan->callback = callback;
332 chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
334 if (cpu_is_omap1610() || cpu_is_omap5912()) {
335 /* If the sync device is set, configure it dynamically. */
337 set_gdma_dev(free_ch + 1, dev_id);
338 dev_id = free_ch + 1;
340 /* Disable the 1510 compatibility mode and set the sync device
342 omap_writew(dev_id | (1 << 10), OMAP_DMA_CCR_REG(free_ch));
344 omap_writew(dev_id, OMAP_DMA_CCR_REG(free_ch));
346 *dma_ch_out = free_ch;
351 void omap_free_dma(int ch)
355 spin_lock_irqsave(&dma_chan_lock, flags);
356 if (dma_chan[ch].dev_id == -1) {
357 printk("omap_dma: trying to free nonallocated DMA channel %d\n", ch);
358 spin_unlock_irqrestore(&dma_chan_lock, flags);
361 dma_chan[ch].dev_id = -1;
362 spin_unlock_irqrestore(&dma_chan_lock, flags);
364 /* Disable all DMA interrupts for the channel. */
365 omap_writew(0, OMAP_DMA_CICR_REG(ch));
366 /* Make sure the DMA transfer is stopped. */
367 omap_writew(0, OMAP_DMA_CCR_REG(ch));
370 int omap_dma_in_1510_mode(void)
372 return enable_1510_mode;
376 * lch_queue DMA will start right after lch_head one is finished.
377 * For this DMA link to start, you still need to start (see omap_start_dma)
378 * the first one. That will fire up the entire queue.
380 void omap_dma_link_lch (int lch_head, int lch_queue)
382 if (omap_dma_in_1510_mode()) {
383 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
388 if ((dma_chan[lch_head].dev_id == -1) ||
389 (dma_chan[lch_queue].dev_id == -1)) {
390 printk(KERN_ERR "omap_dma: trying to link non requested channels\n");
394 dma_chan[lch_head].next_lch = lch_queue;
398 * Once the DMA queue is stopped, we can destroy it.
400 void omap_dma_unlink_lch (int lch_head, int lch_queue)
402 if (omap_dma_in_1510_mode()) {
403 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
408 if (dma_chan[lch_head].next_lch != lch_queue ||
409 dma_chan[lch_head].next_lch == -1) {
410 printk(KERN_ERR "omap_dma: trying to unlink non linked channels\n");
415 if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
416 (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
417 printk(KERN_ERR "omap_dma: You need to stop the DMA channels before unlinking\n");
421 dma_chan[lch_head].next_lch = -1;
422 dma_chan[lch_queue].next_lch = -1;
426 static struct lcd_dma_info {
429 void (* callback)(u16 status, void *data);
432 unsigned long addr, size;
433 int rotate, data_type, xres, yres;
436 void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
440 lcd_dma.data_type = data_type;
441 lcd_dma.xres = fb_xres;
442 lcd_dma.yres = fb_yres;
445 static void set_b1_regs(void)
447 unsigned long top, bottom;
454 switch (lcd_dma.data_type) {
455 case OMAP_DMA_DATA_TYPE_S8:
458 case OMAP_DMA_DATA_TYPE_S16:
461 case OMAP_DMA_DATA_TYPE_S32:
469 if (lcd_dma.rotate == 0) {
471 bottom = lcd_dma.addr + (lcd_dma.xres * lcd_dma.yres - 1) * es;
472 /* 1510 DMA requires the bottom address to be 2 more than the
473 * actual last memory access location. */
474 if (omap_dma_in_1510_mode() &&
475 lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
482 top = lcd_dma.addr + (lcd_dma.xres - 1) * es;
483 bottom = lcd_dma.addr + (lcd_dma.yres - 1) * lcd_dma.xres * es;
486 ei = (lcd_dma.xres - 1) * es + 1;
487 fi = -(lcd_dma.xres * (lcd_dma.yres - 1) + 2) * 2 + 1;
490 if (omap_dma_in_1510_mode()) {
491 omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
492 omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
493 omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
494 omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
500 omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
501 omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
502 omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
503 omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
505 omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
506 omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
508 w = omap_readw(OMAP1610_DMA_LCD_CSDP);
510 w |= lcd_dma.data_type;
511 omap_writew(w, OMAP1610_DMA_LCD_CSDP);
517 l = omap_readw(OMAP1610_DMA_LCD_CSDP);
518 /* Disable burst access */
520 omap_writew(l, OMAP1610_DMA_LCD_CSDP);
522 l = omap_readw(OMAP1610_DMA_LCD_CCR);
523 /* Set the double-indexed addressing mode */
525 omap_writew(l, OMAP1610_DMA_LCD_CCR);
527 omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
528 omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
529 omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
532 void omap_set_lcd_dma_b1_rotation(int rotate)
534 if (omap_dma_in_1510_mode()) {
535 printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
539 lcd_dma.rotate = rotate;
542 int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
545 spin_lock_irq(&lcd_dma.lock);
546 if (lcd_dma.reserved) {
547 spin_unlock_irq(&lcd_dma.lock);
548 printk(KERN_ERR "LCD DMA channel already reserved\n");
552 lcd_dma.reserved = 1;
553 spin_unlock_irq(&lcd_dma.lock);
554 lcd_dma.callback = callback;
555 lcd_dma.cb_data = data;
560 void omap_free_lcd_dma(void)
562 spin_lock(&lcd_dma.lock);
563 if (!lcd_dma.reserved) {
564 spin_unlock(&lcd_dma.lock);
565 printk(KERN_ERR "LCD DMA is not reserved\n");
569 if (!enable_1510_mode)
570 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
571 lcd_dma.reserved = 0;
572 spin_unlock(&lcd_dma.lock);
575 void omap_start_lcd_dma(void)
577 if (!enable_1510_mode) {
578 /* Set some reasonable defaults */
579 omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
580 omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
581 omap_writew(0x5740, OMAP1610_DMA_LCD_CCR);
584 if (!enable_1510_mode)
585 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) | 1, OMAP1610_DMA_LCD_CCR);
588 void omap_stop_lcd_dma(void)
590 if (!enable_1510_mode)
591 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
594 static int __init omap_init_dma(void)
598 if (cpu_is_omap1510()) {
599 printk(KERN_INFO "DMA support for OMAP1510 initialized\n");
601 enable_1510_mode = 1;
602 } else if (cpu_is_omap1610() || cpu_is_omap5912()) {
603 printk(KERN_INFO "OMAP DMA hardware version %d\n",
604 omap_readw(OMAP_DMA_HW_ID_REG));
605 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
606 (omap_readw(OMAP_DMA_CAPS_0_U_REG) << 16) | omap_readw(OMAP_DMA_CAPS_0_L_REG),
607 (omap_readw(OMAP_DMA_CAPS_1_U_REG) << 16) | omap_readw(OMAP_DMA_CAPS_1_L_REG),
608 omap_readw(OMAP_DMA_CAPS_2_REG), omap_readw(OMAP_DMA_CAPS_3_REG),
609 omap_readw(OMAP_DMA_CAPS_4_REG));
610 if (!enable_1510_mode) {
613 /* Disable OMAP 3.0/3.1 compatibility mode. */
614 w = omap_readw(OMAP_DMA_GSCR_REG);
616 omap_writew(w, OMAP_DMA_GSCR_REG);
617 dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
625 memset(&lcd_dma, 0, sizeof(lcd_dma));
626 spin_lock_init(&lcd_dma.lock);
627 spin_lock_init(&dma_chan_lock);
628 memset(&dma_chan, 0, sizeof(dma_chan));
630 for (ch = 0; ch < dma_chan_count; ch++) {
631 dma_chan[ch].dev_id = -1;
632 dma_chan[ch].next_lch = -1;
634 if (ch >= 6 && enable_1510_mode)
637 /* request_irq() doesn't like dev_id (ie. ch) being zero,
638 * so we have to kludge around this. */
639 r = request_irq(dma_irq[ch], dma_irq_handler, 0, "DMA",
644 printk(KERN_ERR "unable to request IRQ %d for DMA (error %d)\n",
646 for (i = 0; i < ch; i++)
647 free_irq(dma_irq[i], (void *) (i + 1));
654 arch_initcall(omap_init_dma);
656 EXPORT_SYMBOL(omap_request_dma);
657 EXPORT_SYMBOL(omap_free_dma);
658 EXPORT_SYMBOL(omap_start_dma);
659 EXPORT_SYMBOL(omap_stop_dma);
660 EXPORT_SYMBOL(omap_set_dma_transfer_params);
661 EXPORT_SYMBOL(omap_set_dma_src_params);
662 EXPORT_SYMBOL(omap_set_dma_dest_params);
663 EXPORT_SYMBOL(omap_dma_link_lch);
664 EXPORT_SYMBOL(omap_dma_unlink_lch);
666 EXPORT_SYMBOL(omap_request_lcd_dma);
667 EXPORT_SYMBOL(omap_free_lcd_dma);
668 EXPORT_SYMBOL(omap_start_lcd_dma);
669 EXPORT_SYMBOL(omap_stop_lcd_dma);
670 EXPORT_SYMBOL(omap_set_lcd_dma_b1);
671 EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);