2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 * EMU10K1 memory page allocation (PTB area)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sound/driver.h>
25 #include <linux/time.h>
26 #include <sound/core.h>
27 #include <sound/emu10k1.h>
29 /* page arguments of these two macros are Emu page (4096 bytes), not like
30 * aligned pages in others
32 #define __set_ptb_entry(emu,page,addr) \
33 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
35 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
36 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
37 /* get aligned page from offset address */
38 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
39 /* get offset address from aligned page */
40 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
43 /* page size == EMUPAGESIZE */
44 /* fill PTB entrie(s) corresponding to page with addr */
45 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
46 /* fill PTB entrie(s) corresponding to page with silence pointer */
47 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
49 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
50 static inline void set_ptb_entry(emu10k1_t *emu, int page, dma_addr_t addr)
54 for (i = 0; i < UNIT_PAGES; i++, page++) {
55 __set_ptb_entry(emu, page, addr);
59 static inline void set_silent_ptb(emu10k1_t *emu, int page)
63 for (i = 0; i < UNIT_PAGES; i++, page++)
64 /* do not increment ptr */
65 __set_ptb_entry(emu, page, emu->silent_page.addr);
67 #endif /* PAGE_SIZE */
72 static int synth_alloc_pages(emu10k1_t *hw, emu10k1_memblk_t *blk);
73 static int synth_free_pages(emu10k1_t *hw, emu10k1_memblk_t *blk);
75 #define get_emu10k1_memblk(l,member) list_entry(l, emu10k1_memblk_t, member)
78 /* initialize emu10k1 part */
79 static void emu10k1_memblk_init(emu10k1_memblk_t *blk)
81 blk->mapped_page = -1;
82 INIT_LIST_HEAD(&blk->mapped_link);
83 INIT_LIST_HEAD(&blk->mapped_order_link);
86 blk->first_page = get_aligned_page(blk->mem.offset);
87 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
88 blk->pages = blk->last_page - blk->first_page + 1;
92 * search empty region on PTB with the given size
94 * if an empty region is found, return the page and store the next mapped block
96 * if not found, return a negative error code.
98 static int search_empty_map_area(emu10k1_t *emu, int npages, struct list_head **nextp)
100 int page = 0, found_page = -ENOMEM;
101 int max_size = npages;
103 struct list_head *candidate = &emu->mapped_link_head;
104 struct list_head *pos;
106 list_for_each (pos, &emu->mapped_link_head) {
107 emu10k1_memblk_t *blk = get_emu10k1_memblk(pos, mapped_link);
108 snd_assert(blk->mapped_page >= 0, continue);
109 size = blk->mapped_page - page;
110 if (size == npages) {
114 else if (size > max_size) {
115 /* we look for the maximum empty hole */
120 page = blk->mapped_page + blk->pages;
122 size = MAX_ALIGN_PAGES - page;
123 if (size >= max_size) {
132 * map a memory block onto emu10k1's PTB
134 * call with memblk_lock held
136 static int map_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk)
139 struct list_head *next;
141 page = search_empty_map_area(emu, blk->pages, &next);
142 if (page < 0) /* not found */
144 /* insert this block in the proper position of mapped list */
145 list_add_tail(&blk->mapped_link, next);
146 /* append this as a newest block in order list */
147 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
148 blk->mapped_page = page;
150 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
151 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
159 * return the size of resultant empty pages
161 * call with memblk_lock held
163 static int unmap_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk)
165 int start_page, end_page, mpage, pg;
169 /* calculate the expected size of empty region */
170 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
171 q = get_emu10k1_memblk(p, mapped_link);
172 start_page = q->mapped_page + q->pages;
175 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
176 q = get_emu10k1_memblk(p, mapped_link);
177 end_page = q->mapped_page;
179 end_page = MAX_ALIGN_PAGES;
182 list_del(&blk->mapped_link);
183 list_del(&blk->mapped_order_link);
185 mpage = blk->mapped_page;
186 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
187 set_silent_ptb(emu, mpage);
190 blk->mapped_page = -1;
191 return end_page - start_page; /* return the new empty size */
195 * search empty pages with the given size, and create a memory block
197 * unlike synth_alloc the memory block is aligned to the page start
199 static emu10k1_memblk_t *
200 search_empty(emu10k1_t *emu, int size)
203 emu10k1_memblk_t *blk;
206 psize = get_aligned_page(size + PAGE_SIZE -1);
208 list_for_each(p, &emu->memhdr->block) {
209 blk = get_emu10k1_memblk(p, mem.list);
210 if (page + psize <= blk->first_page)
212 page = blk->last_page + 1;
214 if (page + psize > emu->max_cache_pages)
218 /* create a new memory block */
219 blk = (emu10k1_memblk_t *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
222 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
223 emu10k1_memblk_init(blk);
229 * check if the given pointer is valid for pages
231 static int is_valid_page(emu10k1_t *emu, dma_addr_t addr)
233 if (addr & ~emu->dma_mask) {
234 snd_printk("max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
237 if (addr & (EMUPAGESIZE-1)) {
238 snd_printk("page is not aligned\n");
245 * map the given memory block on PTB.
246 * if the block is already mapped, update the link order.
247 * if no empty pages are found, tries to release unsed memory blocks
248 * and retry the mapping.
250 int snd_emu10k1_memblk_map(emu10k1_t *emu, emu10k1_memblk_t *blk)
254 struct list_head *p, *nextp;
255 emu10k1_memblk_t *deleted;
258 spin_lock_irqsave(&emu->memblk_lock, flags);
259 if (blk->mapped_page >= 0) {
260 /* update order link */
261 list_del(&blk->mapped_order_link);
262 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
263 spin_unlock_irqrestore(&emu->memblk_lock, flags);
266 if ((err = map_memblk(emu, blk)) < 0) {
267 /* no enough page - try to unmap some blocks */
268 /* starting from the oldest block */
269 p = emu->mapped_order_link_head.next;
270 for (; p != &emu->mapped_order_link_head; p = nextp) {
272 deleted = get_emu10k1_memblk(p, mapped_order_link);
273 if (deleted->map_locked)
275 size = unmap_memblk(emu, deleted);
276 if (size >= blk->pages) {
277 /* ok the empty region is enough large */
278 err = map_memblk(emu, blk);
283 spin_unlock_irqrestore(&emu->memblk_lock, flags);
288 * page allocation for DMA
291 snd_emu10k1_alloc_pages(emu10k1_t *emu, snd_pcm_substream_t *substream)
293 snd_pcm_runtime_t *runtime = substream->runtime;
294 struct snd_sg_buf *sgbuf = runtime->dma_private;
295 snd_util_memhdr_t *hdr;
296 emu10k1_memblk_t *blk;
299 snd_assert(emu, return NULL);
300 snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes < MAXPAGES * EMUPAGESIZE, return NULL);
302 snd_assert(hdr, return NULL);
304 down(&hdr->block_mutex);
305 blk = search_empty(emu, runtime->dma_bytes);
307 up(&hdr->block_mutex);
310 /* fill buffer addresses but pointers are not stored so that
311 * snd_free_pci_page() is not called in in synth_free()
314 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
316 #ifdef CONFIG_SND_DEBUG
317 if (idx >= sgbuf->pages) {
318 printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
319 blk->first_page, blk->last_page, sgbuf->pages);
320 up(&hdr->block_mutex);
324 addr = sgbuf->table[idx].addr;
325 if (! is_valid_page(emu, addr)) {
326 printk(KERN_ERR "emu: failure page = %d\n", idx);
327 up(&hdr->block_mutex);
330 emu->page_addr_table[page] = addr;
331 emu->page_ptr_table[page] = NULL;
334 /* set PTB entries */
335 blk->map_locked = 1; /* do not unmap this block! */
336 err = snd_emu10k1_memblk_map(emu, blk);
338 __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk);
339 up(&hdr->block_mutex);
342 up(&hdr->block_mutex);
343 return (snd_util_memblk_t *)blk;
348 * release DMA buffer from page table
350 int snd_emu10k1_free_pages(emu10k1_t *emu, snd_util_memblk_t *blk)
352 snd_assert(emu && blk, return -EINVAL);
353 return snd_emu10k1_synth_free(emu, blk);
358 * memory allocation using multiple pages (for synth)
359 * Unlike the DMA allocation above, non-contiguous pages are assined.
363 * allocate a synth sample area
366 snd_emu10k1_synth_alloc(emu10k1_t *hw, unsigned int size)
368 emu10k1_memblk_t *blk;
369 snd_util_memhdr_t *hdr = hw->memhdr;
371 down(&hdr->block_mutex);
372 blk = (emu10k1_memblk_t *)__snd_util_mem_alloc(hdr, size);
374 up(&hdr->block_mutex);
377 if (synth_alloc_pages(hw, blk)) {
378 __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk);
379 up(&hdr->block_mutex);
382 snd_emu10k1_memblk_map(hw, blk);
383 up(&hdr->block_mutex);
384 return (snd_util_memblk_t *)blk;
389 * free a synth sample area
392 snd_emu10k1_synth_free(emu10k1_t *emu, snd_util_memblk_t *memblk)
394 snd_util_memhdr_t *hdr = emu->memhdr;
395 emu10k1_memblk_t *blk = (emu10k1_memblk_t *)memblk;
398 down(&hdr->block_mutex);
399 spin_lock_irqsave(&emu->memblk_lock, flags);
400 if (blk->mapped_page >= 0)
401 unmap_memblk(emu, blk);
402 spin_unlock_irqrestore(&emu->memblk_lock, flags);
403 synth_free_pages(emu, blk);
404 __snd_util_mem_free(hdr, memblk);
405 up(&hdr->block_mutex);
410 /* check new allocation range */
411 static void get_single_page_range(snd_util_memhdr_t *hdr, emu10k1_memblk_t *blk, int *first_page_ret, int *last_page_ret)
415 int first_page, last_page;
416 first_page = blk->first_page;
417 if ((p = blk->mem.list.prev) != &hdr->block) {
418 q = get_emu10k1_memblk(p, mem.list);
419 if (q->last_page == first_page)
420 first_page++; /* first page was already allocated */
422 last_page = blk->last_page;
423 if ((p = blk->mem.list.next) != &hdr->block) {
424 q = get_emu10k1_memblk(p, mem.list);
425 if (q->first_page == last_page)
426 last_page--; /* last page was already allocated */
428 *first_page_ret = first_page;
429 *last_page_ret = last_page;
433 * allocate kernel pages
435 static int synth_alloc_pages(emu10k1_t *emu, emu10k1_memblk_t *blk)
437 int page, first_page, last_page;
438 struct snd_dma_buffer dmab;
440 emu10k1_memblk_init(blk);
441 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
442 /* allocate kernel pages */
443 for (page = first_page; page <= last_page; page++) {
444 if (snd_dma_alloc_pages(&emu->dma_dev, PAGE_SIZE, &dmab) < 0)
446 if (! is_valid_page(emu, dmab.addr)) {
447 snd_dma_free_pages(&emu->dma_dev, &dmab);
450 emu->page_addr_table[page] = dmab.addr;
451 emu->page_ptr_table[page] = dmab.area;
456 /* release allocated pages */
457 last_page = page - 1;
458 for (page = first_page; page <= last_page; page++) {
459 dmab.area = emu->page_ptr_table[page];
460 dmab.addr = emu->page_addr_table[page];
461 dmab.bytes = PAGE_SIZE;
462 snd_dma_free_pages(&emu->dma_dev, &dmab);
463 emu->page_addr_table[page] = 0;
464 emu->page_ptr_table[page] = NULL;
473 static int synth_free_pages(emu10k1_t *emu, emu10k1_memblk_t *blk)
475 int page, first_page, last_page;
476 struct snd_dma_buffer dmab;
478 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
479 for (page = first_page; page <= last_page; page++) {
480 if (emu->page_ptr_table[page] == NULL)
482 dmab.area = emu->page_ptr_table[page];
483 dmab.addr = emu->page_addr_table[page];
484 dmab.bytes = PAGE_SIZE;
485 snd_dma_free_pages(&emu->dma_dev, &dmab);
486 emu->page_addr_table[page] = 0;
487 emu->page_ptr_table[page] = NULL;
493 /* calculate buffer pointer from offset address */
494 inline static void *offset_ptr(emu10k1_t *emu, int page, int offset)
497 snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
498 ptr = emu->page_ptr_table[page];
500 printk("emu10k1: access to NULL ptr: page = %d\n", page);
503 ptr += offset & (PAGE_SIZE - 1);
508 * bzero(blk + offset, size)
510 int snd_emu10k1_synth_bzero(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, int size)
512 int page, nextofs, end_offset, temp, temp1;
514 emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk;
516 offset += blk->offset & (PAGE_SIZE - 1);
517 end_offset = offset + size;
518 page = get_aligned_page(offset);
520 nextofs = aligned_page_offset(page + 1);
521 temp = nextofs - offset;
522 temp1 = end_offset - offset;
525 ptr = offset_ptr(emu, page + p->first_page, offset);
527 memset(ptr, 0, temp);
530 } while (offset < end_offset);
535 * copy_from_user(blk + offset, data, size)
537 int snd_emu10k1_synth_copy_from_user(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, const char __user *data, int size)
539 int page, nextofs, end_offset, temp, temp1;
541 emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk;
543 offset += blk->offset & (PAGE_SIZE - 1);
544 end_offset = offset + size;
545 page = get_aligned_page(offset);
547 nextofs = aligned_page_offset(page + 1);
548 temp = nextofs - offset;
549 temp1 = end_offset - offset;
552 ptr = offset_ptr(emu, page + p->first_page, offset);
553 if (ptr && copy_from_user(ptr, data, temp))
558 } while (offset < end_offset);