ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / sound / pci / emu10k1 / memory.c
1 /*
2  *  Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
4  *
5  *  EMU10K1 memory page allocation (PTB area)
6  *
7  *
8  *   This program is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU General Public License as published by
10  *   the Free Software Foundation; either version 2 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This program is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with this program; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21  *
22  */
23
24 #include <sound/driver.h>
25 #include <linux/time.h>
26 #include <sound/core.h>
27 #include <sound/emu10k1.h>
28
29 /* page arguments of these two macros are Emu page (4096 bytes), not like
30  * aligned pages in others
31  */
32 #define __set_ptb_entry(emu,page,addr) \
33         (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
34
35 #define UNIT_PAGES              (PAGE_SIZE / EMUPAGESIZE)
36 #define MAX_ALIGN_PAGES         (MAXPAGES / UNIT_PAGES)
37 /* get aligned page from offset address */
38 #define get_aligned_page(offset)        ((offset) >> PAGE_SHIFT)
39 /* get offset address from aligned page */
40 #define aligned_page_offset(page)       ((page) << PAGE_SHIFT)
41
42 #if PAGE_SIZE == 4096
43 /* page size == EMUPAGESIZE */
44 /* fill PTB entrie(s) corresponding to page with addr */
45 #define set_ptb_entry(emu,page,addr)    __set_ptb_entry(emu,page,addr)
46 /* fill PTB entrie(s) corresponding to page with silence pointer */
47 #define set_silent_ptb(emu,page)        __set_ptb_entry(emu,page,emu->silent_page.addr)
48 #else
49 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
50 static inline void set_ptb_entry(emu10k1_t *emu, int page, dma_addr_t addr)
51 {
52         int i;
53         page *= UNIT_PAGES;
54         for (i = 0; i < UNIT_PAGES; i++, page++) {
55                 __set_ptb_entry(emu, page, addr);
56                 addr += EMUPAGESIZE;
57         }
58 }
59 static inline void set_silent_ptb(emu10k1_t *emu, int page)
60 {
61         int i;
62         page *= UNIT_PAGES;
63         for (i = 0; i < UNIT_PAGES; i++, page++)
64                 /* do not increment ptr */
65                 __set_ptb_entry(emu, page, emu->silent_page.addr);
66 }
67 #endif /* PAGE_SIZE */
68
69
70 /*
71  */
72 static int synth_alloc_pages(emu10k1_t *hw, emu10k1_memblk_t *blk);
73 static int synth_free_pages(emu10k1_t *hw, emu10k1_memblk_t *blk);
74
75 #define get_emu10k1_memblk(l,member)    list_entry(l, emu10k1_memblk_t, member)
76
77
78 /* initialize emu10k1 part */
79 static void emu10k1_memblk_init(emu10k1_memblk_t *blk)
80 {
81         blk->mapped_page = -1;
82         INIT_LIST_HEAD(&blk->mapped_link);
83         INIT_LIST_HEAD(&blk->mapped_order_link);
84         blk->map_locked = 0;
85
86         blk->first_page = get_aligned_page(blk->mem.offset);
87         blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
88         blk->pages = blk->last_page - blk->first_page + 1;
89 }
90
91 /*
92  * search empty region on PTB with the given size
93  *
94  * if an empty region is found, return the page and store the next mapped block
95  * in nextp
96  * if not found, return a negative error code.
97  */
98 static int search_empty_map_area(emu10k1_t *emu, int npages, struct list_head **nextp)
99 {
100         int page = 0, found_page = -ENOMEM;
101         int max_size = npages;
102         int size;
103         struct list_head *candidate = &emu->mapped_link_head;
104         struct list_head *pos;
105
106         list_for_each (pos, &emu->mapped_link_head) {
107                 emu10k1_memblk_t *blk = get_emu10k1_memblk(pos, mapped_link);
108                 snd_assert(blk->mapped_page >= 0, continue);
109                 size = blk->mapped_page - page;
110                 if (size == npages) {
111                         *nextp = pos;
112                         return page;
113                 }
114                 else if (size > max_size) {
115                         /* we look for the maximum empty hole */
116                         max_size = size;
117                         candidate = pos;
118                         found_page = page;
119                 }
120                 page = blk->mapped_page + blk->pages;
121         }
122         size = MAX_ALIGN_PAGES - page;
123         if (size >= max_size) {
124                 *nextp = pos;
125                 return page;
126         }
127         *nextp = candidate;
128         return found_page;
129 }
130
131 /*
132  * map a memory block onto emu10k1's PTB
133  *
134  * call with memblk_lock held
135  */
136 static int map_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk)
137 {
138         int page, pg;
139         struct list_head *next;
140
141         page = search_empty_map_area(emu, blk->pages, &next);
142         if (page < 0) /* not found */
143                 return page;
144         /* insert this block in the proper position of mapped list */
145         list_add_tail(&blk->mapped_link, next);
146         /* append this as a newest block in order list */
147         list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
148         blk->mapped_page = page;
149         /* fill PTB */
150         for (pg = blk->first_page; pg <= blk->last_page; pg++) {
151                 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
152                 page++;
153         }
154         return 0;
155 }
156
157 /*
158  * unmap the block
159  * return the size of resultant empty pages
160  *
161  * call with memblk_lock held
162  */
163 static int unmap_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk)
164 {
165         int start_page, end_page, mpage, pg;
166         struct list_head *p;
167         emu10k1_memblk_t *q;
168
169         /* calculate the expected size of empty region */
170         if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
171                 q = get_emu10k1_memblk(p, mapped_link);
172                 start_page = q->mapped_page + q->pages;
173         } else
174                 start_page = 0;
175         if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
176                 q = get_emu10k1_memblk(p, mapped_link);
177                 end_page = q->mapped_page;
178         } else
179                 end_page = MAX_ALIGN_PAGES;
180
181         /* remove links */
182         list_del(&blk->mapped_link);
183         list_del(&blk->mapped_order_link);
184         /* clear PTB */
185         mpage = blk->mapped_page;
186         for (pg = blk->first_page; pg <= blk->last_page; pg++) {
187                 set_silent_ptb(emu, mpage);
188                 mpage++;
189         }
190         blk->mapped_page = -1;
191         return end_page - start_page; /* return the new empty size */
192 }
193
194 /*
195  * search empty pages with the given size, and create a memory block
196  *
197  * unlike synth_alloc the memory block is aligned to the page start
198  */
199 static emu10k1_memblk_t *
200 search_empty(emu10k1_t *emu, int size)
201 {
202         struct list_head *p;
203         emu10k1_memblk_t *blk;
204         int page, psize;
205
206         psize = get_aligned_page(size + PAGE_SIZE -1);
207         page = 0;
208         list_for_each(p, &emu->memhdr->block) {
209                 blk = get_emu10k1_memblk(p, mem.list);
210                 if (page + psize <= blk->first_page)
211                         goto __found_pages;
212                 page = blk->last_page + 1;
213         }
214         if (page + psize > emu->max_cache_pages)
215                 return NULL;
216
217 __found_pages:
218         /* create a new memory block */
219         blk = (emu10k1_memblk_t *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
220         if (blk == NULL)
221                 return NULL;
222         blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
223         emu10k1_memblk_init(blk);
224         return blk;
225 }
226
227
228 /*
229  * check if the given pointer is valid for pages
230  */
231 static int is_valid_page(emu10k1_t *emu, dma_addr_t addr)
232 {
233         if (addr & ~emu->dma_mask) {
234                 snd_printk("max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
235                 return 0;
236         }
237         if (addr & (EMUPAGESIZE-1)) {
238                 snd_printk("page is not aligned\n");
239                 return 0;
240         }
241         return 1;
242 }
243
244 /*
245  * map the given memory block on PTB.
246  * if the block is already mapped, update the link order.
247  * if no empty pages are found, tries to release unsed memory blocks
248  * and retry the mapping.
249  */
250 int snd_emu10k1_memblk_map(emu10k1_t *emu, emu10k1_memblk_t *blk)
251 {
252         int err;
253         int size;
254         struct list_head *p, *nextp;
255         emu10k1_memblk_t *deleted;
256         unsigned long flags;
257
258         spin_lock_irqsave(&emu->memblk_lock, flags);
259         if (blk->mapped_page >= 0) {
260                 /* update order link */
261                 list_del(&blk->mapped_order_link);
262                 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
263                 spin_unlock_irqrestore(&emu->memblk_lock, flags);
264                 return 0;
265         }
266         if ((err = map_memblk(emu, blk)) < 0) {
267                 /* no enough page - try to unmap some blocks */
268                 /* starting from the oldest block */
269                 p = emu->mapped_order_link_head.next;
270                 for (; p != &emu->mapped_order_link_head; p = nextp) {
271                         nextp = p->next;
272                         deleted = get_emu10k1_memblk(p, mapped_order_link);
273                         if (deleted->map_locked)
274                                 continue;
275                         size = unmap_memblk(emu, deleted);
276                         if (size >= blk->pages) {
277                                 /* ok the empty region is enough large */
278                                 err = map_memblk(emu, blk);
279                                 break;
280                         }
281                 }
282         }
283         spin_unlock_irqrestore(&emu->memblk_lock, flags);
284         return err;
285 }
286
287 /*
288  * page allocation for DMA
289  */
290 snd_util_memblk_t *
291 snd_emu10k1_alloc_pages(emu10k1_t *emu, snd_pcm_substream_t *substream)
292 {
293         snd_pcm_runtime_t *runtime = substream->runtime;
294         struct snd_sg_buf *sgbuf = runtime->dma_private;
295         snd_util_memhdr_t *hdr;
296         emu10k1_memblk_t *blk;
297         int page, err, idx;
298
299         snd_assert(emu, return NULL);
300         snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes < MAXPAGES * EMUPAGESIZE, return NULL);
301         hdr = emu->memhdr;
302         snd_assert(hdr, return NULL);
303
304         down(&hdr->block_mutex);
305         blk = search_empty(emu, runtime->dma_bytes);
306         if (blk == NULL) {
307                 up(&hdr->block_mutex);
308                 return NULL;
309         }
310         /* fill buffer addresses but pointers are not stored so that
311          * snd_free_pci_page() is not called in in synth_free()
312          */
313         idx = 0;
314         for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
315                 dma_addr_t addr;
316 #ifdef CONFIG_SND_DEBUG
317                 if (idx >= sgbuf->pages) {
318                         printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
319                                blk->first_page, blk->last_page, sgbuf->pages);
320                         up(&hdr->block_mutex);
321                         return NULL;
322                 }
323 #endif
324                 addr = sgbuf->table[idx].addr;
325                 if (! is_valid_page(emu, addr)) {
326                         printk(KERN_ERR "emu: failure page = %d\n", idx);
327                         up(&hdr->block_mutex);
328                         return NULL;
329                 }
330                 emu->page_addr_table[page] = addr;
331                 emu->page_ptr_table[page] = NULL;
332         }
333
334         /* set PTB entries */
335         blk->map_locked = 1; /* do not unmap this block! */
336         err = snd_emu10k1_memblk_map(emu, blk);
337         if (err < 0) {
338                 __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk);
339                 up(&hdr->block_mutex);
340                 return NULL;
341         }
342         up(&hdr->block_mutex);
343         return (snd_util_memblk_t *)blk;
344 }
345
346
347 /*
348  * release DMA buffer from page table
349  */
350 int snd_emu10k1_free_pages(emu10k1_t *emu, snd_util_memblk_t *blk)
351 {
352         snd_assert(emu && blk, return -EINVAL);
353         return snd_emu10k1_synth_free(emu, blk);
354 }
355
356
357 /*
358  * memory allocation using multiple pages (for synth)
359  * Unlike the DMA allocation above, non-contiguous pages are assined.
360  */
361
362 /*
363  * allocate a synth sample area
364  */
365 snd_util_memblk_t *
366 snd_emu10k1_synth_alloc(emu10k1_t *hw, unsigned int size)
367 {
368         emu10k1_memblk_t *blk;
369         snd_util_memhdr_t *hdr = hw->memhdr; 
370
371         down(&hdr->block_mutex);
372         blk = (emu10k1_memblk_t *)__snd_util_mem_alloc(hdr, size);
373         if (blk == NULL) {
374                 up(&hdr->block_mutex);
375                 return NULL;
376         }
377         if (synth_alloc_pages(hw, blk)) {
378                 __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk);
379                 up(&hdr->block_mutex);
380                 return NULL;
381         }
382         snd_emu10k1_memblk_map(hw, blk);
383         up(&hdr->block_mutex);
384         return (snd_util_memblk_t *)blk;
385 }
386
387
388 /*
389  * free a synth sample area
390  */
391 int
392 snd_emu10k1_synth_free(emu10k1_t *emu, snd_util_memblk_t *memblk)
393 {
394         snd_util_memhdr_t *hdr = emu->memhdr; 
395         emu10k1_memblk_t *blk = (emu10k1_memblk_t *)memblk;
396         unsigned long flags;
397
398         down(&hdr->block_mutex);
399         spin_lock_irqsave(&emu->memblk_lock, flags);
400         if (blk->mapped_page >= 0)
401                 unmap_memblk(emu, blk);
402         spin_unlock_irqrestore(&emu->memblk_lock, flags);
403         synth_free_pages(emu, blk);
404          __snd_util_mem_free(hdr, memblk);
405         up(&hdr->block_mutex);
406         return 0;
407 }
408
409
410 /* check new allocation range */
411 static void get_single_page_range(snd_util_memhdr_t *hdr, emu10k1_memblk_t *blk, int *first_page_ret, int *last_page_ret)
412 {
413         struct list_head *p;
414         emu10k1_memblk_t *q;
415         int first_page, last_page;
416         first_page = blk->first_page;
417         if ((p = blk->mem.list.prev) != &hdr->block) {
418                 q = get_emu10k1_memblk(p, mem.list);
419                 if (q->last_page == first_page)
420                         first_page++;  /* first page was already allocated */
421         }
422         last_page = blk->last_page;
423         if ((p = blk->mem.list.next) != &hdr->block) {
424                 q = get_emu10k1_memblk(p, mem.list);
425                 if (q->first_page == last_page)
426                         last_page--; /* last page was already allocated */
427         }
428         *first_page_ret = first_page;
429         *last_page_ret = last_page;
430 }
431
432 /*
433  * allocate kernel pages
434  */
435 static int synth_alloc_pages(emu10k1_t *emu, emu10k1_memblk_t *blk)
436 {
437         int page, first_page, last_page;
438         struct snd_dma_buffer dmab;
439
440         emu10k1_memblk_init(blk);
441         get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
442         /* allocate kernel pages */
443         for (page = first_page; page <= last_page; page++) {
444                 if (snd_dma_alloc_pages(&emu->dma_dev, PAGE_SIZE, &dmab) < 0)
445                         goto __fail;
446                 if (! is_valid_page(emu, dmab.addr)) {
447                         snd_dma_free_pages(&emu->dma_dev, &dmab);
448                         goto __fail;
449                 }
450                 emu->page_addr_table[page] = dmab.addr;
451                 emu->page_ptr_table[page] = dmab.area;
452         }
453         return 0;
454
455 __fail:
456         /* release allocated pages */
457         last_page = page - 1;
458         for (page = first_page; page <= last_page; page++) {
459                 dmab.area = emu->page_ptr_table[page];
460                 dmab.addr = emu->page_addr_table[page];
461                 dmab.bytes = PAGE_SIZE;
462                 snd_dma_free_pages(&emu->dma_dev, &dmab);
463                 emu->page_addr_table[page] = 0;
464                 emu->page_ptr_table[page] = NULL;
465         }
466
467         return -ENOMEM;
468 }
469
470 /*
471  * free pages
472  */
473 static int synth_free_pages(emu10k1_t *emu, emu10k1_memblk_t *blk)
474 {
475         int page, first_page, last_page;
476         struct snd_dma_buffer dmab;
477
478         get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
479         for (page = first_page; page <= last_page; page++) {
480                 if (emu->page_ptr_table[page] == NULL)
481                         continue;
482                 dmab.area = emu->page_ptr_table[page];
483                 dmab.addr = emu->page_addr_table[page];
484                 dmab.bytes = PAGE_SIZE;
485                 snd_dma_free_pages(&emu->dma_dev, &dmab);
486                 emu->page_addr_table[page] = 0;
487                 emu->page_ptr_table[page] = NULL;
488         }
489
490         return 0;
491 }
492
493 /* calculate buffer pointer from offset address */
494 inline static void *offset_ptr(emu10k1_t *emu, int page, int offset)
495 {
496         char *ptr;
497         snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
498         ptr = emu->page_ptr_table[page];
499         if (! ptr) {
500                 printk("emu10k1: access to NULL ptr: page = %d\n", page);
501                 return NULL;
502         }
503         ptr += offset & (PAGE_SIZE - 1);
504         return (void*)ptr;
505 }
506
507 /*
508  * bzero(blk + offset, size)
509  */
510 int snd_emu10k1_synth_bzero(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, int size)
511 {
512         int page, nextofs, end_offset, temp, temp1;
513         void *ptr;
514         emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk;
515
516         offset += blk->offset & (PAGE_SIZE - 1);
517         end_offset = offset + size;
518         page = get_aligned_page(offset);
519         do {
520                 nextofs = aligned_page_offset(page + 1);
521                 temp = nextofs - offset;
522                 temp1 = end_offset - offset;
523                 if (temp1 < temp)
524                         temp = temp1;
525                 ptr = offset_ptr(emu, page + p->first_page, offset);
526                 if (ptr)
527                         memset(ptr, 0, temp);
528                 offset = nextofs;
529                 page++;
530         } while (offset < end_offset);
531         return 0;
532 }
533
534 /*
535  * copy_from_user(blk + offset, data, size)
536  */
537 int snd_emu10k1_synth_copy_from_user(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, const char __user *data, int size)
538 {
539         int page, nextofs, end_offset, temp, temp1;
540         void *ptr;
541         emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk;
542
543         offset += blk->offset & (PAGE_SIZE - 1);
544         end_offset = offset + size;
545         page = get_aligned_page(offset);
546         do {
547                 nextofs = aligned_page_offset(page + 1);
548                 temp = nextofs - offset;
549                 temp1 = end_offset - offset;
550                 if (temp1 < temp)
551                         temp = temp1;
552                 ptr = offset_ptr(emu, page + p->first_page, offset);
553                 if (ptr && copy_from_user(ptr, data, temp))
554                         return -EFAULT;
555                 offset = nextofs;
556                 data += temp;
557                 page++;
558         } while (offset < end_offset);
559         return 0;
560 }