1 Support functions forthe Intel 80310 DMA channels
2 ==================================================
4 Dave Jiang <dave.jiang@intel.com>
5 Last updated: 09/18/2001
7 The Intel 80310 XScale chipset provides 3 DMA channels via the 80312 I/O
8 companion chip. Two of them resides on the primary PCI bus and one on the
11 The DMA API provided is not compatible with the generic interface in the
12 ARM tree unfortunately due to how the 80312 DMACs work. Hopefully some time
13 in the near future a software interface can be done to bridge the differences.
14 The DMA API has been modeled after Nicholas Pitre's SA11x0 DMA API therefore
15 they will look somewhat similar.
21 int dma_request(dmach_t channel, const char *device_id);
23 This function will attempt to allocate the channel depending on what the
26 IOP310_DMA_P0: PCI Primary 1
27 IOP310_DMA_P1: PCI Primary 2
28 IOP310_DMA_S0: PCI Secondary 1
31 Once the user allocates the DMA channel it is owned until released. Although
32 other users can also use the same DMA channel, but no new resources will be
33 allocated. The function will return the allocated channel number if successful.
35 int dma_queue_buffer(dmach_t channel, dma_sghead_t *listhead);
37 The user will construct a SGL in the form of below:
39 * Scattered Gather DMA List for user
41 typedef struct _dma_desc
43 u32 NDAR; /* next descriptor adress [READONLY] */
44 u32 PDAR; /* PCI address */
45 u32 PUADR; /* upper PCI address */
46 u32 LADR; /* local address */
47 u32 BC; /* byte count */
48 u32 DC; /* descriptor control */
51 typedef struct _dma_sgl
53 dma_desc_t dma_desc; /* DMA descriptor */
54 u32 status; /* descriptor status [READONLY] */
55 u32 data; /* user defined data */
56 struct _dma_sgl *next; /* next descriptor [READONLY] */
60 typedef struct _dma_head
62 u32 total; /* total elements in SGL */
63 u32 status; /* status of sgl */
64 u32 mode; /* read or write mode */
65 dma_sgl_t *list; /* pointer to list */
66 dma_callback_t callback; /* callback function */
70 The user shall allocate user SGL elements by calling the function:
71 dma_get_buffer(). This function will give the user an SGL element. The user
72 is responsible for creating the SGL head however. The user is also
73 responsible for allocating the memory for DMA data. The following code segment
74 shows how a DMA operation can be performed:
76 #include <asm/arch/iop310-dma.h>
80 char dev_id[] = "Primary 0";
81 dma_head_t *sgl_head = NULL;
82 dma_sgl_t *sgl = NULL;
86 DECLARE_WAIT_QUEUE_HEAD(wait_q);
89 *(IOP310_ATUCR) = (IOP310_ATUCR_PRIM_OUT_ENAB |
90 IOP310_ATUCR_DIR_ADDR_ENAB);
92 channel = dma_request(IOP310_DMA_P0, dev_id);
94 sgl_head = (dma_head_t *)kmalloc(sizeof(dma_head_t), GFP_KERNEL);
95 sgl_head->callback = NULL; /* no callback created */
96 sgl_head->total = 2; /* allocating 2 DMA descriptors */
97 sgl_head->mode = (DMA_MOD_WRITE);
100 /* now we get the two descriptors */
101 sgl = dma_get_buffer(channel, 2);
103 /* we set the header to point to the list we allocated */
104 sgl_head->list = sgl;
106 /* allocate 1k of DMA data */
107 sgl->data = (u32)kmalloc(1024, GFP_KERNEL);
109 /* Local address is physical */
110 sgl->dma_desc.LADR = (u32)virt_to_phys(sgl->data);
112 /* write to arbitrary location over the PCI bus */
113 sgl->dma_desc.PDAR = 0x00600000;
114 sgl->dma_desc.PUADR = 0;
115 sgl->dma_desc.BC = 1024;
117 /* set write & invalidate PCI command */
118 sgl->dma_desc.DC = DMA_DCR_PCI_MWI;
122 memset(sgl->data, 0xFF, 1024);
124 /* User's responsibility to keep buffers cached coherent */
125 cpu_dcache_clean(sgl->data, sgl->data + 1024);
129 sgl->data = (u32)kmalloc(1024, GFP_KERNEL);
130 sgl->dma_desc.LADR = (u32)virt_to_phys(sgl->data);
131 sgl->dma_desc.PDAR = 0x00610000;
132 sgl->dma_desc.PUADR = 0;
133 sgl->dma_desc.BC = 1024;
135 /* second descriptor has interrupt flag enabled */
136 sgl->dma_desc.DC = (DMA_DCR_PCI_MWI | DMA_DCR_IE);
138 /* must set end of chain flag */
139 sgl->status = DMA_END_CHAIN; /* DO NOT FORGET THIS!!!! */
141 memset(sgl->data, 0x0f, 1024);
142 /* User's responsibility to keep buffers cached coherent */
143 cpu_dcache_clean(sgl->data, sgl->data + 1024);
145 /* queuing the buffer, this function will sleep since no callback */
146 err = dma_queue_buffer(channel, sgl_head);
148 /* now we are woken from DMA complete */
150 /* do data operations here */
152 /* free DMA data if necessary */
154 /* return the descriptors */
155 dma_return_buffer(channel, sgl_head->list);
160 kfree((void *)sgl_head);
164 dma_sgl_t * dma_get_buffer(dmach_t channel, int buf_num);
166 This call allocates DMA descriptors for the user.
169 void dma_return_buffer(dmach_t channel, dma_sgl_t *list);
171 This call returns the allocated descriptors back to the API.
174 int dma_suspend(dmach_t channel);
176 This call suspends any DMA transfer on the given channel.
180 int dma_resume(dmach_t channel);
182 This call resumes a DMA transfer which would have been stopped through
186 int dma_flush_all(dmach_t channel);
188 This completely flushes all queued buffers and on-going DMA transfers on a
189 given channel. This is called when DMA channel errors have occurred.
192 void dma_free(dmach_t channel);
194 This clears all activities on a given DMA channel and releases it for future
201 It is the user's responsibility to allocate, free, and keep track of the
202 allocated DMA data memory. Upon calling dma_queue_buffer() the user must
203 relinquish the control of the buffers to the kernel and not change the
204 state of the buffers that it has passed to the kernel. The user will regain
205 the control of the buffers when it has been woken up by the bottom half of
206 the DMA interrupt handler. The user can allocate cached buffers or non-cached
207 via pci_alloc_consistent(). It is the user's responsibility to ensure that
208 the data is cache coherent.
211 The user is responsble to ensure the ATU is setup properly for DMA transfers.
213 All Disclaimers apply. Use this at your own discretion. Neither Intel nor I
214 will be responsible ifanything goes wrong.