2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
4 * Copyright (c) by Scott McNab <sdm@fractalgraphics.com.au>
6 * Trident 4DWave-NX memory page allocation (TLB area)
7 * Trident chip can handle only 16MByte of the memory at the same time.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <sound/driver.h>
28 #include <linux/time.h>
29 #include <sound/core.h>
30 #include <sound/trident.h>
32 /* page arguments of these two macros are Trident page (4096 bytes), not like
33 * aligned pages in others
35 #define __set_tlb_bus(trident,page,ptr,addr) \
36 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
37 (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
38 #define __tlb_to_ptr(trident,page) \
39 (void*)((trident)->tlb.shadow_entries[page])
40 #define __tlb_to_addr(trident,page) \
41 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
44 /* page size == SNDRV_TRIDENT_PAGE_SIZE */
45 #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */
46 #define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */
47 /* fill TLB entrie(s) corresponding to page with ptr */
48 #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
49 /* fill TLB entrie(s) corresponding to page with silence pointer */
50 #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
51 /* get aligned page from offset address */
52 #define get_aligned_page(offset) ((offset) >> 12)
53 /* get offset address from aligned page */
54 #define aligned_page_offset(page) ((page) << 12)
55 /* get buffer address from aligned page */
56 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, page)
57 /* get PCI physical address from aligned page */
58 #define page_to_addr(trident,page) __tlb_to_addr(trident, page)
60 #elif PAGE_SIZE == 8192
61 /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/
62 #define ALIGN_PAGE_SIZE PAGE_SIZE
63 #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2)
64 #define get_aligned_page(offset) ((offset) >> 13)
65 #define aligned_page_offset(page) ((page) << 13)
66 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1)
67 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1)
69 /* fill TLB entries -- we need to fill two entries */
70 static inline void set_tlb_bus(trident_t *trident, int page, unsigned long ptr, dma_addr_t addr)
73 __set_tlb_bus(trident, page, ptr, addr);
74 __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
76 static inline void set_silent_tlb(trident_t *trident, int page)
79 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
80 __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
85 #define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE)
86 #define ALIGN_PAGE_SIZE (SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES)
87 #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES)
88 /* Note: if alignment doesn't match to the maximum size, the last few blocks
89 * become unusable. To use such blocks, you'll need to check the validity
90 * of accessing page in set_tlb_bus and set_silent_tlb. search_empty()
91 * should also check it, too.
93 #define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE)
94 #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE)
95 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES)
96 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES)
98 /* fill TLB entries -- UNIT_PAGES entries must be filled */
99 static inline void set_tlb_bus(trident_t *trident, int page, unsigned long ptr, dma_addr_t addr)
103 for (i = 0; i < UNIT_PAGES; i++, page++) {
104 __set_tlb_bus(trident, page, ptr, addr);
105 ptr += SNDRV_TRIDENT_PAGE_SIZE;
106 addr += SNDRV_TRIDENT_PAGE_SIZE;
109 static inline void set_silent_tlb(trident_t *trident, int page)
113 for (i = 0; i < UNIT_PAGES; i++, page++)
114 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
117 #endif /* PAGE_SIZE */
119 /* calculate buffer pointer from offset address */
120 inline static void *offset_ptr(trident_t *trident, int offset)
123 ptr = page_to_ptr(trident, get_aligned_page(offset));
124 ptr += offset % ALIGN_PAGE_SIZE;
128 /* first and last (aligned) pages of memory block */
129 #define firstpg(blk) (((snd_trident_memblk_arg_t*)snd_util_memblk_argptr(blk))->first_page)
130 #define lastpg(blk) (((snd_trident_memblk_arg_t*)snd_util_memblk_argptr(blk))->last_page)
133 * search empty pages which may contain given size
135 static snd_util_memblk_t *
136 search_empty(snd_util_memhdr_t *hdr, int size)
138 snd_util_memblk_t *blk, *prev;
142 psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1);
145 list_for_each(p, &hdr->block) {
146 blk = list_entry(p, snd_util_memblk_t, list);
147 if (page + psize <= firstpg(blk))
149 page = lastpg(blk) + 1;
151 if (page + psize > MAX_ALIGN_PAGES)
155 /* create a new memory block */
156 blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev);
159 blk->offset = aligned_page_offset(page); /* set aligned offset */
161 lastpg(blk) = page + psize - 1;
167 * check if the given pointer is valid for pages
169 static int is_valid_page(unsigned long ptr)
171 if (ptr & ~0x3fffffffUL) {
172 snd_printk("max memory size is 1GB!!\n");
175 if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) {
176 snd_printk("page is not aligned\n");
183 * page allocation for DMA (Scatter-Gather version)
185 static snd_util_memblk_t *
186 snd_trident_alloc_sg_pages(trident_t *trident, snd_pcm_substream_t *substream)
188 snd_util_memhdr_t *hdr;
189 snd_util_memblk_t *blk;
190 snd_pcm_runtime_t *runtime = substream->runtime;
192 struct snd_sg_buf *sgbuf = runtime->dma_private;
194 snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL);
195 hdr = trident->tlb.memhdr;
196 snd_assert(hdr != NULL, return NULL);
200 down(&hdr->block_mutex);
201 blk = search_empty(hdr, runtime->dma_bytes);
203 up(&hdr->block_mutex);
206 if (lastpg(blk) - firstpg(blk) >= sgbuf->pages) {
207 snd_printk(KERN_ERR "page calculation doesn't match: allocated pages = %d, trident = %d/%d\n", sgbuf->pages, firstpg(blk), lastpg(blk));
208 __snd_util_mem_free(hdr, blk);
209 up(&hdr->block_mutex);
213 /* set TLB entries */
215 for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
216 dma_addr_t addr = sgbuf->table[idx].addr;
217 unsigned long ptr = (unsigned long)sgbuf->table[idx].buf;
218 if (! is_valid_page(addr)) {
219 __snd_util_mem_free(hdr, blk);
220 up(&hdr->block_mutex);
223 set_tlb_bus(trident, page, ptr, addr);
225 up(&hdr->block_mutex);
230 * page allocation for DMA (contiguous version)
232 static snd_util_memblk_t *
233 snd_trident_alloc_cont_pages(trident_t *trident, snd_pcm_substream_t *substream)
235 snd_util_memhdr_t *hdr;
236 snd_util_memblk_t *blk;
238 snd_pcm_runtime_t *runtime = substream->runtime;
242 snd_assert(runtime->dma_bytes> 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL);
243 hdr = trident->tlb.memhdr;
244 snd_assert(hdr != NULL, return NULL);
246 down(&hdr->block_mutex);
247 blk = search_empty(hdr, runtime->dma_bytes);
249 up(&hdr->block_mutex);
253 /* set TLB entries */
254 addr = runtime->dma_addr;
255 ptr = (unsigned long)runtime->dma_area;
256 for (page = firstpg(blk); page <= lastpg(blk); page++,
257 ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) {
258 if (! is_valid_page(addr)) {
259 __snd_util_mem_free(hdr, blk);
260 up(&hdr->block_mutex);
263 set_tlb_bus(trident, page, ptr, addr);
265 up(&hdr->block_mutex);
270 * page allocation for DMA
273 snd_trident_alloc_pages(trident_t *trident, snd_pcm_substream_t *substream)
275 snd_assert(trident != NULL, return NULL);
276 snd_assert(substream != NULL, return NULL);
277 if (substream->dma_device.type == SNDRV_DMA_TYPE_DEV_SG)
278 return snd_trident_alloc_sg_pages(trident, substream);
280 return snd_trident_alloc_cont_pages(trident, substream);
285 * release DMA buffer from page table
287 int snd_trident_free_pages(trident_t *trident, snd_util_memblk_t *blk)
289 snd_util_memhdr_t *hdr;
292 snd_assert(trident != NULL, return -EINVAL);
293 snd_assert(blk != NULL, return -EINVAL);
295 hdr = trident->tlb.memhdr;
296 down(&hdr->block_mutex);
297 /* reset TLB entries */
298 for (page = firstpg(blk); page <= lastpg(blk); page++)
299 set_silent_tlb(trident, page);
300 /* free memory block */
301 __snd_util_mem_free(hdr, blk);
302 up(&hdr->block_mutex);
307 /*----------------------------------------------------------------
308 * memory allocation using multiple pages (for synth)
309 *----------------------------------------------------------------
310 * Unlike the DMA allocation above, non-contiguous pages are
312 *----------------------------------------------------------------*/
316 static int synth_alloc_pages(trident_t *hw, snd_util_memblk_t *blk);
317 static int synth_free_pages(trident_t *hw, snd_util_memblk_t *blk);
320 * allocate a synth sample area
323 snd_trident_synth_alloc(trident_t *hw, unsigned int size)
325 snd_util_memblk_t *blk;
326 snd_util_memhdr_t *hdr = hw->tlb.memhdr;
328 down(&hdr->block_mutex);
329 blk = __snd_util_mem_alloc(hdr, size);
331 up(&hdr->block_mutex);
334 if (synth_alloc_pages(hw, blk)) {
335 __snd_util_mem_free(hdr, blk);
336 up(&hdr->block_mutex);
339 up(&hdr->block_mutex);
345 * free a synth sample area
348 snd_trident_synth_free(trident_t *hw, snd_util_memblk_t *blk)
350 snd_util_memhdr_t *hdr = hw->tlb.memhdr;
352 down(&hdr->block_mutex);
353 synth_free_pages(hw, blk);
354 __snd_util_mem_free(hdr, blk);
355 up(&hdr->block_mutex);
361 * reset TLB entry and free kernel page
363 static void clear_tlb(trident_t *trident, int page)
365 void *ptr = page_to_ptr(trident, page);
366 dma_addr_t addr = page_to_addr(trident, page);
367 set_silent_tlb(trident, page);
369 struct snd_dma_buffer dmab;
372 dmab.bytes = ALIGN_PAGE_SIZE;
373 snd_dma_free_pages(&trident->dma_dev, &dmab);
377 /* check new allocation range */
378 static void get_single_page_range(snd_util_memhdr_t *hdr, snd_util_memblk_t *blk, int *first_page_ret, int *last_page_ret)
381 snd_util_memblk_t *q;
382 int first_page, last_page;
383 first_page = firstpg(blk);
384 if ((p = blk->list.prev) != &hdr->block) {
385 q = list_entry(p, snd_util_memblk_t, list);
386 if (lastpg(q) == first_page)
387 first_page++; /* first page was already allocated */
389 last_page = lastpg(blk);
390 if ((p = blk->list.next) != &hdr->block) {
391 q = list_entry(p, snd_util_memblk_t, list);
392 if (firstpg(q) == last_page)
393 last_page--; /* last page was already allocated */
395 *first_page_ret = first_page;
396 *last_page_ret = last_page;
400 * allocate kernel pages and assign them to TLB
402 static int synth_alloc_pages(trident_t *hw, snd_util_memblk_t *blk)
404 int page, first_page, last_page;
405 struct snd_dma_buffer dmab;
407 firstpg(blk) = get_aligned_page(blk->offset);
408 lastpg(blk) = get_aligned_page(blk->offset + blk->size - 1);
409 get_single_page_range(hw->tlb.memhdr, blk, &first_page, &last_page);
411 /* allocate a kernel page for each Trident page -
412 * fortunately Trident page size and kernel PAGE_SIZE is identical!
414 for (page = first_page; page <= last_page; page++) {
415 if (snd_dma_alloc_pages(&hw->dma_dev, ALIGN_PAGE_SIZE, &dmab) < 0)
417 if (! is_valid_page(dmab.addr)) {
418 snd_dma_free_pages(&hw->dma_dev, &dmab);
421 set_tlb_bus(hw, page, (unsigned long)dmab.area, dmab.addr);
426 /* release allocated pages */
427 last_page = page - 1;
428 for (page = first_page; page <= last_page; page++)
437 static int synth_free_pages(trident_t *trident, snd_util_memblk_t *blk)
439 int page, first_page, last_page;
441 get_single_page_range(trident->tlb.memhdr, blk, &first_page, &last_page);
442 for (page = first_page; page <= last_page; page++)
443 clear_tlb(trident, page);
449 * bzero(blk + offset, size)
451 int snd_trident_synth_bzero(trident_t *trident, snd_util_memblk_t *blk, int offset, int size)
453 int page, nextofs, end_offset, temp, temp1;
455 offset += blk->offset;
456 end_offset = offset + size;
457 page = get_aligned_page(offset) + 1;
459 nextofs = aligned_page_offset(page);
460 temp = nextofs - offset;
461 temp1 = end_offset - offset;
464 memset(offset_ptr(trident, offset), 0, temp);
467 } while (offset < end_offset);
472 * copy_from_user(blk + offset, data, size)
474 int snd_trident_synth_copy_from_user(trident_t *trident, snd_util_memblk_t *blk, int offset, const char *data, int size)
476 int page, nextofs, end_offset, temp, temp1;
478 offset += blk->offset;
479 end_offset = offset + size;
480 page = get_aligned_page(offset) + 1;
482 nextofs = aligned_page_offset(page);
483 temp = nextofs - offset;
484 temp1 = end_offset - offset;
487 if (copy_from_user(offset_ptr(trident, offset), data, temp))
492 } while (offset < end_offset);