This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / arm / mach-omap / dma.c
1 /*
2  * linux/arch/arm/omap/dma.c
3  *
4  * Copyright (C) 2003 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  *
8  * Support functions for the OMAP internal DMA channels.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/spinlock.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22
23 #include <asm/system.h>
24 #include <asm/irq.h>
25 #include <asm/hardware.h>
26 #include <asm/dma.h>
27 #include <asm/io.h>
28
29 #define OMAP_DMA_ACTIVE         0x01
30
31 #define OMAP_DMA_CCR_EN         (1 << 7)
32
33 #define OMAP_FUNC_MUX_ARM_BASE  (0xfffe1000 + 0xec)
34
35 static int enable_1510_mode = 0;
36
37 struct omap_dma_lch {
38         int next_lch;
39         int dev_id;
40         u16 saved_csr;
41         u16 enabled_irqs;
42         const char *dev_name;
43         void (* callback)(int lch, u16 ch_status, void *data);
44         void *data;
45         long flags;
46 };
47
48 static int dma_chan_count;
49
50 static spinlock_t dma_chan_lock;
51 static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
52
53 const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
54         INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
55         INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
56         INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
57         INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
58         INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
59 };
60
61 static inline int get_gdma_dev(int req)
62 {
63         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
64         int shift = ((req - 1) % 5) * 6;
65
66         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
67 }
68
69 static inline void set_gdma_dev(int req, int dev)
70 {
71         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
72         int shift = ((req - 1) % 5) * 6;
73         u32 l;
74
75         l = omap_readl(reg);
76         l &= ~(0x3f << shift);
77         l |= (dev - 1) << shift;
78         omap_writel(l, reg);
79 }
80
81 static void clear_lch_regs(int lch)
82 {
83         int i;
84         u32 lch_base = OMAP_DMA_BASE + lch * 0x40;
85
86         for (i = 0; i < 0x2c; i += 2)
87                 omap_writew(0, lch_base + i);
88 }
89
90 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
91                                   int frame_count, int sync_mode)
92 {
93         u16 w;
94
95         w = omap_readw(OMAP_DMA_CSDP_REG(lch));
96         w &= ~0x03;
97         w |= data_type;
98         omap_writew(w, OMAP_DMA_CSDP_REG(lch));
99
100         w = omap_readw(OMAP_DMA_CCR_REG(lch));
101         w &= ~(1 << 5);
102         if (sync_mode == OMAP_DMA_SYNC_FRAME)
103                 w |= 1 << 5;
104         omap_writew(w, OMAP_DMA_CCR_REG(lch));
105
106         w = omap_readw(OMAP_DMA_CCR2_REG(lch));
107         w &= ~(1 << 2);
108         if (sync_mode == OMAP_DMA_SYNC_BLOCK)
109                 w |= 1 << 2;
110         omap_writew(w, OMAP_DMA_CCR2_REG(lch));
111
112         omap_writew(elem_count, OMAP_DMA_CEN_REG(lch));
113         omap_writew(frame_count, OMAP_DMA_CFN_REG(lch));
114
115 }
116
117 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
118                              unsigned long src_start)
119 {
120         u16 w;
121
122         w = omap_readw(OMAP_DMA_CSDP_REG(lch));
123         w &= ~(0x1f << 2);
124         w |= src_port << 2;
125         omap_writew(w, OMAP_DMA_CSDP_REG(lch));
126
127         w = omap_readw(OMAP_DMA_CCR_REG(lch));
128         w &= ~(0x03 << 12);
129         w |= src_amode << 12;
130         omap_writew(w, OMAP_DMA_CCR_REG(lch));
131
132         omap_writew(src_start >> 16, OMAP_DMA_CSSA_U_REG(lch));
133         omap_writew(src_start, OMAP_DMA_CSSA_L_REG(lch));
134 }
135
136 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
137                               unsigned long dest_start)
138 {
139         u16 w;
140
141         w = omap_readw(OMAP_DMA_CSDP_REG(lch));
142         w &= ~(0x1f << 9);
143         w |= dest_port << 9;
144         omap_writew(w, OMAP_DMA_CSDP_REG(lch));
145
146         w = omap_readw(OMAP_DMA_CCR_REG(lch));
147         w &= ~(0x03 << 14);
148         w |= dest_amode << 14;
149         omap_writew(w, OMAP_DMA_CCR_REG(lch));
150
151         omap_writew(dest_start >> 16, OMAP_DMA_CDSA_U_REG(lch));
152         omap_writew(dest_start, OMAP_DMA_CDSA_L_REG(lch));
153 }
154
155 void omap_start_dma(int lch)
156 {
157         u16 w;
158
159         if (!omap_dma_in_1510_mode()) {
160                 int next_lch;
161
162                 next_lch = dma_chan[lch].next_lch;
163
164                 /* Enable the queue, if needed so. */
165                 if (next_lch != -1) {
166                         /* Clear the STOP_LNK bits */
167                         w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(lch));
168                         w &= ~(1 << 14);
169                         omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(lch));
170                         w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(next_lch));
171                         w &= ~(1 << 14);
172                         omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(next_lch));
173
174                         /* And set the ENABLE_LNK bits */
175                         omap_writew(next_lch | (1 << 15),
176                                     OMAP_DMA_CLNK_CTRL_REG(lch));
177                         /* The loop case */
178                         if (dma_chan[next_lch].next_lch == lch)
179                                 omap_writew(lch | (1 << 15),
180                                             OMAP_DMA_CLNK_CTRL_REG(next_lch));
181
182                         /* Read CSR to make sure it's cleared. */
183                         w = omap_readw(OMAP_DMA_CSR_REG(next_lch));
184                         /* Enable some nice interrupts. */
185                         omap_writew(dma_chan[next_lch].enabled_irqs,
186                                     OMAP_DMA_CICR_REG(next_lch));
187                         dma_chan[next_lch].flags |= OMAP_DMA_ACTIVE;
188                 }
189         }
190
191         /* Read CSR to make sure it's cleared. */
192         w = omap_readw(OMAP_DMA_CSR_REG(lch));
193         /* Enable some nice interrupts. */
194         omap_writew(dma_chan[lch].enabled_irqs, OMAP_DMA_CICR_REG(lch));
195
196         w = omap_readw(OMAP_DMA_CCR_REG(lch));
197         w |= OMAP_DMA_CCR_EN;
198         omap_writew(w, OMAP_DMA_CCR_REG(lch));
199         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
200 }
201
202 void omap_stop_dma(int lch)
203 {
204         u16 w;
205         int next_lch;
206
207         /* Disable all interrupts on the channel */
208         omap_writew(0, OMAP_DMA_CICR_REG(lch));
209
210         if (omap_dma_in_1510_mode()) {
211                 w = omap_readw(OMAP_DMA_CCR_REG(lch));
212                 w &= ~OMAP_DMA_CCR_EN;
213                 omap_writew(w, OMAP_DMA_CCR_REG(lch));
214                 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
215                 return;
216         }
217
218         next_lch = dma_chan[lch].next_lch;
219
220         /*
221          * According to thw HW spec, enabling the STOP_LNK bit
222          * resets the CCR_EN bit at the same time.
223          */
224         w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(lch));
225         w |= (1 << 14);
226         w = omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(lch));
227         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
228
229         if (next_lch != -1) {
230                 omap_writew(0, OMAP_DMA_CICR_REG(next_lch));
231                 w = omap_readw(OMAP_DMA_CLNK_CTRL_REG(next_lch));
232                 w |= (1 << 14);
233                 w = omap_writew(w, OMAP_DMA_CLNK_CTRL_REG(next_lch));
234                 dma_chan[next_lch].flags &= ~OMAP_DMA_ACTIVE;
235         }
236 }
237
238 void omap_enable_dma_irq(int lch, u16 bits)
239 {
240         dma_chan[lch].enabled_irqs |= bits;
241 }
242
243 void omap_disable_dma_irq(int lch, u16 bits)
244 {
245         dma_chan[lch].enabled_irqs &= ~bits;
246 }
247
248 static int dma_handle_ch(int ch)
249 {
250         u16 csr;
251
252         if (enable_1510_mode && ch >= 6) {
253                 csr = dma_chan[ch].saved_csr;
254                 dma_chan[ch].saved_csr = 0;
255         } else
256                 csr = omap_readw(OMAP_DMA_CSR_REG(ch));
257         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
258                 dma_chan[ch + 6].saved_csr = csr >> 7;
259                 csr &= 0x7f;
260         }
261         if (!csr)
262                 return 0;
263         if (unlikely(dma_chan[ch].dev_id == -1)) {
264                 printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n",
265                        ch, csr);
266                 return 0;
267         }
268         if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
269                 printk(KERN_WARNING "DMA timeout with device %d\n", dma_chan[ch].dev_id);
270         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
271                 printk(KERN_WARNING "DMA synchronization event drop occurred with device %d\n",
272                        dma_chan[ch].dev_id);
273         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
274                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
275         if (likely(dma_chan[ch].callback != NULL))
276                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
277         return 1;
278 }
279
280 static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
281 {
282         int ch = ((int) dev_id) - 1;
283         int handled = 0;
284
285         for (;;) {
286                 int handled_now = 0;
287
288                 handled_now += dma_handle_ch(ch);
289                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
290                         handled_now += dma_handle_ch(ch + 6);
291                 if (!handled_now)
292                         break;
293                 handled += handled_now;
294         }
295
296         return handled ? IRQ_HANDLED : IRQ_NONE;
297 }
298
299 int omap_request_dma(int dev_id, const char *dev_name,
300                      void (* callback)(int lch, u16 ch_status, void *data),
301                      void *data, int *dma_ch_out)
302 {
303         int ch, free_ch = -1;
304         unsigned long flags;
305         struct omap_dma_lch *chan;
306
307         spin_lock_irqsave(&dma_chan_lock, flags);
308         for (ch = 0; ch < dma_chan_count; ch++) {
309                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
310                         free_ch = ch;
311                         if (dev_id == 0)
312                                 break;
313                 }
314                 if (dev_id != 0 && dma_chan[ch].dev_id == dev_id) {
315                         spin_unlock_irqrestore(&dma_chan_lock, flags);
316                         return -EAGAIN;
317                 }
318         }
319         if (free_ch == -1) {
320                 spin_unlock_irqrestore(&dma_chan_lock, flags);
321                 return -EBUSY;
322         }
323         chan = dma_chan + free_ch;
324         chan->dev_id = dev_id;
325         clear_lch_regs(free_ch);
326         spin_unlock_irqrestore(&dma_chan_lock, flags);
327
328         chan->dev_id = dev_id;
329         chan->dev_name = dev_name;
330         chan->callback = callback;
331         chan->data = data;
332         chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
333
334         if (cpu_is_omap1610() || cpu_is_omap5912()) {
335                 /* If the sync device is set, configure it dynamically. */
336                 if (dev_id != 0) {
337                         set_gdma_dev(free_ch + 1, dev_id);
338                         dev_id = free_ch + 1;
339                 }
340                 /* Disable the 1510 compatibility mode and set the sync device
341                  * id. */
342                 omap_writew(dev_id | (1 << 10), OMAP_DMA_CCR_REG(free_ch));
343         } else {
344                 omap_writew(dev_id, OMAP_DMA_CCR_REG(free_ch));
345         }
346         *dma_ch_out = free_ch;
347
348         return 0;
349 }
350
351 void omap_free_dma(int ch)
352 {
353         unsigned long flags;
354
355         spin_lock_irqsave(&dma_chan_lock, flags);
356         if (dma_chan[ch].dev_id == -1) {
357                 printk("omap_dma: trying to free nonallocated DMA channel %d\n", ch);
358                 spin_unlock_irqrestore(&dma_chan_lock, flags);
359                 return;
360         }
361         dma_chan[ch].dev_id = -1;
362         spin_unlock_irqrestore(&dma_chan_lock, flags);
363
364         /* Disable all DMA interrupts for the channel. */
365         omap_writew(0, OMAP_DMA_CICR_REG(ch));
366         /* Make sure the DMA transfer is stopped. */
367         omap_writew(0, OMAP_DMA_CCR_REG(ch));
368 }
369
370 int omap_dma_in_1510_mode(void)
371 {
372         return enable_1510_mode;
373 }
374
375 /*
376  * lch_queue DMA will start right after lch_head one is finished.
377  * For this DMA link to start, you still need to start (see omap_start_dma)
378  * the first one. That will fire up the entire queue.
379  */
380 void omap_dma_link_lch (int lch_head, int lch_queue)
381 {
382         if (omap_dma_in_1510_mode()) {
383                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
384                 BUG();
385                 return;
386         }
387
388         if ((dma_chan[lch_head].dev_id == -1) ||
389             (dma_chan[lch_queue].dev_id == -1)) {
390                 printk(KERN_ERR "omap_dma: trying to link non requested channels\n");
391                 dump_stack();
392         }
393
394         dma_chan[lch_head].next_lch = lch_queue;
395 }
396
397 /*
398  * Once the DMA queue is stopped, we can destroy it.
399  */
400 void omap_dma_unlink_lch (int lch_head, int lch_queue)
401 {
402         if (omap_dma_in_1510_mode()) {
403                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
404                 BUG();
405                 return;
406         }
407
408         if (dma_chan[lch_head].next_lch != lch_queue ||
409             dma_chan[lch_head].next_lch == -1) {
410                 printk(KERN_ERR "omap_dma: trying to unlink non linked channels\n");
411                 dump_stack();
412         }
413
414
415         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
416             (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
417                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels before unlinking\n");
418                 dump_stack();
419         }
420
421         dma_chan[lch_head].next_lch = -1;
422         dma_chan[lch_queue].next_lch = -1;
423 }
424
425
426 static struct lcd_dma_info {
427         spinlock_t lock;
428         int reserved;
429         void (* callback)(u16 status, void *data);
430         void *cb_data;
431
432         unsigned long addr, size;
433         int rotate, data_type, xres, yres;
434 } lcd_dma;
435
436 void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
437                          int data_type)
438 {
439         lcd_dma.addr = addr;
440         lcd_dma.data_type = data_type;
441         lcd_dma.xres = fb_xres;
442         lcd_dma.yres = fb_yres;
443 }
444
445 static void set_b1_regs(void)
446 {
447         unsigned long top, bottom;
448         int es;
449         u16 w, en, fn;
450         s16 ei;
451         s32 fi;
452         u32 l;
453
454         switch (lcd_dma.data_type) {
455         case OMAP_DMA_DATA_TYPE_S8:
456                 es = 1;
457                 break;
458         case OMAP_DMA_DATA_TYPE_S16:
459                 es = 2;
460                 break;
461         case OMAP_DMA_DATA_TYPE_S32:
462                 es = 4;
463                 break;
464         default:
465                 BUG();
466                 return;
467         }
468
469         if (lcd_dma.rotate == 0) {
470                 top = lcd_dma.addr;
471                 bottom = lcd_dma.addr + (lcd_dma.xres * lcd_dma.yres - 1) * es;
472                 /* 1510 DMA requires the bottom address to be 2 more than the
473                  * actual last memory access location. */
474                 if (omap_dma_in_1510_mode() &&
475                     lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
476                         bottom += 2;
477                 en = lcd_dma.xres;
478                 fn = lcd_dma.yres;
479                 ei = 0;
480                 fi = 0;
481         } else {
482                 top = lcd_dma.addr + (lcd_dma.xres - 1) * es;
483                 bottom = lcd_dma.addr + (lcd_dma.yres - 1) * lcd_dma.xres * es;
484                 en = lcd_dma.yres;
485                 fn = lcd_dma.xres;
486                 ei = (lcd_dma.xres - 1) * es + 1;
487                 fi = -(lcd_dma.xres * (lcd_dma.yres - 1) + 2) * 2 + 1;
488         }
489
490         if (omap_dma_in_1510_mode()) {
491                 omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
492                 omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
493                 omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
494                 omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
495
496                 return;
497         }
498
499         /* 1610 regs */
500         omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
501         omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
502         omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
503         omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
504
505         omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
506         omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
507
508         w = omap_readw(OMAP1610_DMA_LCD_CSDP);
509         w &= ~0x03;
510         w |= lcd_dma.data_type;
511         omap_writew(w, OMAP1610_DMA_LCD_CSDP);
512
513         if (!lcd_dma.rotate)
514                 return;
515
516         /* Rotation stuff */
517         l = omap_readw(OMAP1610_DMA_LCD_CSDP);
518         /* Disable burst access */
519         l &= ~(0x03 << 7);
520         omap_writew(l, OMAP1610_DMA_LCD_CSDP);
521
522         l = omap_readw(OMAP1610_DMA_LCD_CCR);
523         /* Set the double-indexed addressing mode */
524         l |= (0x03 << 12);
525         omap_writew(l, OMAP1610_DMA_LCD_CCR);
526
527         omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
528         omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
529         omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
530 }
531
532 void omap_set_lcd_dma_b1_rotation(int rotate)
533 {
534         if (omap_dma_in_1510_mode()) {
535                 printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
536                 BUG();
537                 return;
538         }
539         lcd_dma.rotate = rotate;
540 }
541
542 int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
543                          void *data)
544 {
545         spin_lock_irq(&lcd_dma.lock);
546         if (lcd_dma.reserved) {
547                 spin_unlock_irq(&lcd_dma.lock);
548                 printk(KERN_ERR "LCD DMA channel already reserved\n");
549                 BUG();
550                 return -EBUSY;
551         }
552         lcd_dma.reserved = 1;
553         spin_unlock_irq(&lcd_dma.lock);
554         lcd_dma.callback = callback;
555         lcd_dma.cb_data = data;
556
557         return 0;
558 }
559
560 void omap_free_lcd_dma(void)
561 {
562         spin_lock(&lcd_dma.lock);
563         if (!lcd_dma.reserved) {
564                 spin_unlock(&lcd_dma.lock);
565                 printk(KERN_ERR "LCD DMA is not reserved\n");
566                 BUG();
567                 return;
568         }
569         if (!enable_1510_mode)
570                 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
571         lcd_dma.reserved = 0;
572         spin_unlock(&lcd_dma.lock);
573 }
574
575 void omap_start_lcd_dma(void)
576 {
577         if (!enable_1510_mode) {
578                 /* Set some reasonable defaults */
579                 omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
580                 omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
581                 omap_writew(0x5740, OMAP1610_DMA_LCD_CCR);
582         }
583         set_b1_regs();
584         if (!enable_1510_mode)
585                 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) | 1, OMAP1610_DMA_LCD_CCR);
586 }
587
588 void omap_stop_lcd_dma(void)
589 {
590         if (!enable_1510_mode)
591                 omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
592 }
593
594 static int __init omap_init_dma(void)
595 {
596         int ch, r;
597
598         if (cpu_is_omap1510()) {
599                 printk(KERN_INFO "DMA support for OMAP1510 initialized\n");
600                 dma_chan_count = 9;
601                 enable_1510_mode = 1;
602         } else if (cpu_is_omap1610() || cpu_is_omap5912()) {
603                 printk(KERN_INFO "OMAP DMA hardware version %d\n",
604                        omap_readw(OMAP_DMA_HW_ID_REG));
605                 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
606                        (omap_readw(OMAP_DMA_CAPS_0_U_REG) << 16) | omap_readw(OMAP_DMA_CAPS_0_L_REG),
607                        (omap_readw(OMAP_DMA_CAPS_1_U_REG) << 16) | omap_readw(OMAP_DMA_CAPS_1_L_REG),
608                        omap_readw(OMAP_DMA_CAPS_2_REG), omap_readw(OMAP_DMA_CAPS_3_REG),
609                        omap_readw(OMAP_DMA_CAPS_4_REG));
610                 if (!enable_1510_mode) {
611                         u16 w;
612
613                         /* Disable OMAP 3.0/3.1 compatibility mode. */
614                         w = omap_readw(OMAP_DMA_GSCR_REG);
615                         w |= 1 << 3;
616                         omap_writew(w, OMAP_DMA_GSCR_REG);
617                         dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
618                 } else
619                         dma_chan_count = 9;
620         } else {
621                 dma_chan_count = 0;
622                 return 0;
623         }
624
625         memset(&lcd_dma, 0, sizeof(lcd_dma));
626         spin_lock_init(&lcd_dma.lock);
627         spin_lock_init(&dma_chan_lock);
628         memset(&dma_chan, 0, sizeof(dma_chan));
629
630         for (ch = 0; ch < dma_chan_count; ch++) {
631                 dma_chan[ch].dev_id = -1;
632                 dma_chan[ch].next_lch = -1;
633
634                 if (ch >= 6 && enable_1510_mode)
635                         continue;
636
637                 /* request_irq() doesn't like dev_id (ie. ch) being zero,
638                  * so we have to kludge around this. */
639                 r = request_irq(dma_irq[ch], dma_irq_handler, 0, "DMA",
640                                 (void *) (ch + 1));
641                 if (r != 0) {
642                         int i;
643
644                         printk(KERN_ERR "unable to request IRQ %d for DMA (error %d)\n",
645                                dma_irq[ch], r);
646                         for (i = 0; i < ch; i++)
647                                 free_irq(dma_irq[i], (void *) (i + 1));
648                         return r;
649                 }
650         }
651
652         return 0;
653 }
654 arch_initcall(omap_init_dma);
655
656 EXPORT_SYMBOL(omap_request_dma);
657 EXPORT_SYMBOL(omap_free_dma);
658 EXPORT_SYMBOL(omap_start_dma);
659 EXPORT_SYMBOL(omap_stop_dma);
660 EXPORT_SYMBOL(omap_set_dma_transfer_params);
661 EXPORT_SYMBOL(omap_set_dma_src_params);
662 EXPORT_SYMBOL(omap_set_dma_dest_params);
663 EXPORT_SYMBOL(omap_dma_link_lch);
664 EXPORT_SYMBOL(omap_dma_unlink_lch);
665
666 EXPORT_SYMBOL(omap_request_lcd_dma);
667 EXPORT_SYMBOL(omap_free_lcd_dma);
668 EXPORT_SYMBOL(omap_start_lcd_dma);
669 EXPORT_SYMBOL(omap_stop_lcd_dma);
670 EXPORT_SYMBOL(omap_set_lcd_dma_b1);
671 EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);