2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3 * Takashi Iwai <tiwai@suse.de>
5 * Generic memory allocators
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/proc_fs.h>
27 #include <linux/init.h>
28 #include <linux/pci.h>
29 #include <linux/slab.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/moduleparam.h>
33 #include <asm/semaphore.h>
34 #include <sound/memalloc.h>
40 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@suse.cz>");
41 MODULE_DESCRIPTION("Memory allocator for ALSA system.");
42 MODULE_LICENSE("GPL");
48 static int enable[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1};
50 module_param_array(enable, bool, boot_devs, 0444);
51 MODULE_PARM_DESC(enable, "Enable cards to allocate buffers.");
56 void *snd_malloc_sgbuf_pages(const struct snd_dma_device *dev,
57 size_t size, struct snd_dma_buffer *dmab,
59 int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
64 static DECLARE_MUTEX(list_mutex);
65 static LIST_HEAD(mem_list_head);
67 /* buffer preservation list */
69 struct snd_dma_device dev;
70 struct snd_dma_buffer buffer;
72 struct list_head list;
75 /* id for pre-allocated buffers */
76 #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
78 #ifdef CONFIG_SND_DEBUG
79 #define __ASTRING__(x) #x
80 #define snd_assert(expr, args...) do {\
82 printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\
87 #define snd_assert(expr, args...) /**/
94 #if defined(__i386__) || defined(__ppc__) || defined(__x86_64__)
96 * A hack to allocate large buffers via dma_alloc_coherent()
98 * since dma_alloc_coherent always tries GFP_DMA when the requested
99 * pci memory region is below 32bit, it happens quite often that even
100 * 2 order of pages cannot be allocated.
102 * so in the following, we allocate at first without dma_mask, so that
103 * allocation will be done without GFP_DMA. if the area doesn't match
104 * with the requested region, then realloate with the original dma_mask
107 * Really, we want to move this type of thing into dma_alloc_coherent()
108 * so dma_mask doesn't have to be messed with.
111 static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
112 dma_addr_t *dma_handle, int flags)
115 u64 dma_mask, coherent_dma_mask;
117 if (dev == NULL || !dev->dma_mask)
118 return dma_alloc_coherent(dev, size, dma_handle, flags);
119 dma_mask = *dev->dma_mask;
120 coherent_dma_mask = dev->coherent_dma_mask;
121 *dev->dma_mask = 0xffffffff; /* do without masking */
122 dev->coherent_dma_mask = 0xffffffff; /* do without masking */
123 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
124 *dev->dma_mask = dma_mask; /* restore */
125 dev->coherent_dma_mask = coherent_dma_mask; /* restore */
127 /* obtained address is out of range? */
128 if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) {
129 /* reallocate with the proper mask */
130 dma_free_coherent(dev, size, ret, *dma_handle);
131 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
134 /* wish to success now with the proper mask... */
135 if (dma_mask != 0xffffffffUL) {
136 /* allocation with GFP_ATOMIC to avoid the long stall */
137 flags &= ~GFP_KERNEL;
139 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
145 /* redefine dma_alloc_coherent for some architectures */
146 #undef dma_alloc_coherent
147 #define dma_alloc_coherent snd_dma_hack_alloc_coherent
153 * Generic memory allocators
157 static long snd_allocated_pages; /* holding the number of allocated pages */
159 static void mark_pages(void *res, int order)
161 struct page *page = virt_to_page(res);
162 struct page *last_page = page + (1 << order);
163 while (page < last_page)
164 SetPageReserved(page++);
165 snd_allocated_pages += 1 << order;
168 static void unmark_pages(void *res, int order)
170 struct page *page = virt_to_page(res);
171 struct page *last_page = page + (1 << order);
172 while (page < last_page)
173 ClearPageReserved(page++);
174 snd_allocated_pages -= 1 << order;
178 * snd_malloc_pages - allocate pages with the given size
179 * @size: the size to allocate in bytes
180 * @gfp_flags: the allocation conditions, GFP_XXX
182 * Allocates the physically contiguous pages with the given size.
184 * Returns the pointer of the buffer, or NULL if no enoguh memory.
186 void *snd_malloc_pages(size_t size, unsigned int gfp_flags)
191 snd_assert(size > 0, return NULL);
192 snd_assert(gfp_flags != 0, return NULL);
193 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
194 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) {
201 * snd_malloc_pages_fallback - allocate pages with the given size with fallback
202 * @size: the requested size to allocate in bytes
203 * @gfp_flags: the allocation conditions, GFP_XXX
204 * @res_size: the pointer to store the size of buffer actually allocated
206 * Allocates the physically contiguous pages with the given request
207 * size. When no space is left, this function reduces the size and
208 * tries to allocate again. The size actually allocated is stored in
211 * Returns the pointer of the buffer, or NULL if no enoguh memory.
213 void *snd_malloc_pages_fallback(size_t size, unsigned int gfp_flags, size_t *res_size)
217 snd_assert(size > 0, return NULL);
218 snd_assert(res_size != NULL, return NULL);
220 if ((res = snd_malloc_pages(size, gfp_flags)) != NULL) {
225 } while (size >= PAGE_SIZE);
230 * snd_free_pages - release the pages
231 * @ptr: the buffer pointer to release
232 * @size: the allocated buffer size
234 * Releases the buffer allocated via snd_malloc_pages().
236 void snd_free_pages(void *ptr, size_t size)
242 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
243 unmark_pages(ptr, pg);
244 free_pages((unsigned long) ptr, pg);
249 * Bus-specific memory allocators
253 static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
257 unsigned int gfp_flags;
259 snd_assert(size > 0, return NULL);
260 snd_assert(dma != NULL, return NULL);
261 pg = get_order(size);
262 gfp_flags = GFP_KERNEL;
264 gfp_flags |= __GFP_NOWARN;
265 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
272 static void *snd_malloc_dev_pages_fallback(struct device *dev, size_t size,
273 dma_addr_t *dma, size_t *res_size)
277 snd_assert(res_size != NULL, return NULL);
279 if ((res = snd_malloc_dev_pages(dev, size, dma)) != NULL) {
284 } while (size >= PAGE_SIZE);
288 static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
295 pg = get_order(size);
296 unmark_pages(ptr, pg);
297 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
302 static void *snd_malloc_sbus_pages(struct device *dev, size_t size,
303 dma_addr_t *dma_addr)
305 struct sbus_dev *sdev = (struct sbus_dev *)dev;
309 snd_assert(size > 0, return NULL);
310 snd_assert(dma_addr != NULL, return NULL);
311 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
312 res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr);
319 static void *snd_malloc_sbus_pages_fallback(struct device *dev, size_t size,
320 dma_addr_t *dma_addr, size_t *res_size)
324 snd_assert(res_size != NULL, return NULL);
326 if ((res = snd_malloc_sbus_pages(dev, size, dma_addr)) != NULL) {
331 } while (size >= PAGE_SIZE);
335 static void snd_free_sbus_pages(struct device *dev, size_t size,
336 void *ptr, dma_addr_t dma_addr)
338 struct sbus_dev *sdev = (struct sbus_dev *)dev;
343 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
344 unmark_pages(ptr, pg);
345 sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr);
348 #endif /* CONFIG_SBUS */
352 * ALSA generic memory management
358 * compare the two devices
359 * returns non-zero if matched.
361 static int compare_device(const struct snd_dma_device *a, const struct snd_dma_device *b, int allow_unused)
363 if (a->type != b->type)
365 if (a->id != b->id) {
366 if (! allow_unused || (a->id != SNDRV_DMA_DEVICE_UNUSED && b->id != SNDRV_DMA_DEVICE_UNUSED))
369 return a->dev == b->dev;
373 * snd_dma_alloc_pages - allocate the buffer area according to the given type
374 * @dev: the buffer device info
375 * @size: the buffer size to allocate
376 * @dmab: buffer allocation record to store the allocated data
378 * Calls the memory-allocator function for the corresponding
381 * Returns zero if the buffer with the given size is allocated successfuly,
382 * other a negative value at error.
384 int snd_dma_alloc_pages(const struct snd_dma_device *dev, size_t size,
385 struct snd_dma_buffer *dmab)
387 snd_assert(dev != NULL, return -ENXIO);
388 snd_assert(size > 0, return -ENXIO);
389 snd_assert(dmab != NULL, return -ENXIO);
393 case SNDRV_DMA_TYPE_CONTINUOUS:
394 dmab->area = snd_malloc_pages(size, (unsigned long)dev->dev);
398 case SNDRV_DMA_TYPE_SBUS:
399 dmab->area = snd_malloc_sbus_pages(dev->dev, size, &dmab->addr);
402 case SNDRV_DMA_TYPE_DEV:
403 dmab->area = snd_malloc_dev_pages(dev->dev, size, &dmab->addr);
405 case SNDRV_DMA_TYPE_DEV_SG:
406 snd_malloc_sgbuf_pages(dev, size, dmab, NULL);
409 printk(KERN_ERR "snd-malloc: invalid device type %d\n", dev->type);
421 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
422 * @dev: the buffer device info
423 * @size: the buffer size to allocate
424 * @dmab: buffer allocation record to store the allocated data
426 * Calls the memory-allocator function for the corresponding
427 * buffer type. When no space is left, this function reduces the size and
428 * tries to allocate again. The size actually allocated is stored in
431 * Returns zero if the buffer with the given size is allocated successfuly,
432 * other a negative value at error.
434 int snd_dma_alloc_pages_fallback(const struct snd_dma_device *dev, size_t size,
435 struct snd_dma_buffer *dmab)
437 snd_assert(dev != NULL, return -ENXIO);
438 snd_assert(size > 0, return -ENXIO);
439 snd_assert(dmab != NULL, return -ENXIO);
443 case SNDRV_DMA_TYPE_CONTINUOUS:
444 dmab->area = snd_malloc_pages_fallback(size, (unsigned long)dev->dev, &dmab->bytes);
448 case SNDRV_DMA_TYPE_SBUS:
449 dmab->area = snd_malloc_sbus_pages_fallback(dev->dev, size, &dmab->addr, &dmab->bytes);
452 case SNDRV_DMA_TYPE_DEV:
453 dmab->area = snd_malloc_dev_pages_fallback(dev->dev, size, &dmab->addr, &dmab->bytes);
455 case SNDRV_DMA_TYPE_DEV_SG:
456 snd_malloc_sgbuf_pages(dev, size, dmab, &dmab->bytes);
459 printk(KERN_ERR "snd-malloc: invalid device type %d\n", dev->type);
471 * snd_dma_free_pages - release the allocated buffer
472 * @dev: the buffer device info
473 * @dmbab: the buffer allocation record to release
475 * Releases the allocated buffer via snd_dma_alloc_pages().
477 void snd_dma_free_pages(const struct snd_dma_device *dev, struct snd_dma_buffer *dmab)
480 case SNDRV_DMA_TYPE_CONTINUOUS:
481 snd_free_pages(dmab->area, dmab->bytes);
484 case SNDRV_DMA_TYPE_SBUS:
485 snd_free_sbus_pages(dev->dev, dmab->bytes, dmab->area, dmab->addr);
488 case SNDRV_DMA_TYPE_DEV:
489 snd_free_dev_pages(dev->dev, dmab->bytes, dmab->area, dmab->addr);
491 case SNDRV_DMA_TYPE_DEV_SG:
492 snd_free_sgbuf_pages(dmab);
495 printk(KERN_ERR "snd-malloc: invalid device type %d\n", dev->type);
501 * search for the device
503 static struct snd_mem_list *mem_list_find(const struct snd_dma_device *dev, int search_empty)
506 struct snd_mem_list *mem;
508 list_for_each(p, &mem_list_head) {
509 mem = list_entry(p, struct snd_mem_list, list);
510 if (mem->used && search_empty)
512 if (compare_device(&mem->dev, dev, search_empty))
519 * snd_dma_get_reserved - get the reserved buffer for the given device
520 * @dev: the buffer device info
521 * @dmab: the buffer allocation record to store
523 * Looks for the reserved-buffer list and re-uses if the same buffer
524 * is found in the list. When the buffer is found, it's marked as used.
525 * For unmarking the buffer, call snd_dma_free_reserved().
527 * Returns the size of buffer if the buffer is found, or zero if not found.
529 size_t snd_dma_get_reserved(const struct snd_dma_device *dev, struct snd_dma_buffer *dmab)
531 struct snd_mem_list *mem;
533 snd_assert(dev && dmab, return 0);
536 mem = mem_list_find(dev, 1);
549 * snd_dma_free_reserved - unmark the reserved buffer
550 * @dev: the buffer device info
552 * Looks for the matching reserved buffer and erases the mark on it
557 int snd_dma_free_reserved(const struct snd_dma_device *dev)
559 struct snd_mem_list *mem;
561 snd_assert(dev, return -EINVAL);
563 mem = mem_list_find(dev, 0);
571 * snd_dma_set_reserved - reserve the buffer
572 * @dev: the buffer device info
573 * @dmab: the buffer to reserve
575 * Reserves the given buffer as a reserved buffer.
576 * When an old reserved buffer already exists, the old one is released
577 * and replaced with the new one.
579 * When NULL buffer pointer or zero buffer size is given, the existing
580 * buffer is released and the entry is removed.
582 * Returns zero if successful, or a negative code at error.
584 int snd_dma_set_reserved(const struct snd_dma_device *dev, struct snd_dma_buffer *dmab)
586 struct snd_mem_list *mem;
588 snd_assert(dev, return -EINVAL);
590 mem = mem_list_find(dev, 0);
593 printk(KERN_WARNING "snd-page-alloc: releasing the used block (type=%d, id=0x%x\n", mem->dev.type, mem->dev.id);
594 snd_dma_free_pages(dev, &mem->buffer);
595 if (! dmab || ! dmab->bytes) {
596 /* remove the entry */
597 list_del(&mem->list);
603 if (! dmab || ! dmab->bytes) {
607 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
613 list_add_tail(&mem->list, &mem_list_head);
615 /* store the entry */
623 * purge all reserved buffers
625 static void free_all_reserved_pages(void)
628 struct snd_mem_list *mem;
631 while (! list_empty(&mem_list_head)) {
632 p = mem_list_head.next;
633 mem = list_entry(p, struct snd_mem_list, list);
635 snd_dma_free_pages(&mem->dev, &mem->buffer);
644 * allocation of buffers for pre-defined devices
648 /* FIXME: for pci only - other bus? */
649 struct prealloc_dev {
650 unsigned short vendor;
651 unsigned short device;
652 unsigned long dma_mask;
654 unsigned int buffers;
657 #define HAMMERFALL_BUFFER_SIZE (16*1024*4*(26+1))
659 static struct prealloc_dev prealloc_devices[] __initdata = {
664 .dma_mask = 0xffffffff,
665 .size = HAMMERFALL_BUFFER_SIZE,
672 .dma_mask = 0xffffffff,
673 .size = HAMMERFALL_BUFFER_SIZE,
676 { }, /* terminator */
680 * compose a snd_dma_device struct for the PCI device
682 static inline void snd_dma_device_pci(struct snd_dma_device *dev, struct pci_dev *pci, unsigned int id)
684 memset(dev, 0, sizeof(*dev));
685 dev->type = SNDRV_DMA_TYPE_DEV;
686 dev->dev = snd_dma_pci_data(pci);
690 static void __init preallocate_cards(void)
692 struct pci_dev *pci = NULL;
697 while ((pci = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci)) != NULL) {
698 struct prealloc_dev *dev;
700 if (card >= SNDRV_CARDS)
702 for (dev = prealloc_devices; dev->vendor; dev++) {
703 if (dev->vendor == pci->vendor && dev->device == pci->device)
708 if (! enable[card++]) {
709 printk(KERN_DEBUG "snd-page-alloc: skipping card %d, device %04x:%04x\n", card, pci->vendor, pci->device);
713 if (pci_set_dma_mask(pci, dev->dma_mask) < 0 ||
714 pci_set_consistent_dma_mask(pci, dev->dma_mask) < 0) {
715 printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", dev->dma_mask, dev->vendor, dev->device);
718 for (i = 0; i < dev->buffers; i++) {
719 struct snd_mem_list *mem;
720 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
722 printk(KERN_WARNING "snd-page-alloc: can't malloc memlist\n");
725 memset(mem, 0, sizeof(*mem));
726 snd_dma_device_pci(&mem->dev, pci, SNDRV_DMA_DEVICE_UNUSED);
727 if (snd_dma_alloc_pages(&mem->dev, dev->size, &mem->buffer) < 0) {
728 printk(KERN_WARNING "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", dev->size);
732 list_add_tail(&mem->list, &mem_list_head);
739 #define preallocate_cards() /* NOP */
743 #ifdef CONFIG_PROC_FS
745 * proc file interface
747 static int snd_mem_proc_read(char *page, char **start, off_t off,
748 int count, int *eof, void *data)
751 long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
753 struct snd_mem_list *mem;
757 len += sprintf(page + len, "pages : %li bytes (%li pages per %likB)\n",
758 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
760 list_for_each(p, &mem_list_head) {
761 mem = list_entry(p, struct snd_mem_list, list);
763 len += sprintf(page + len, "buffer %d : ", devno);
764 if (mem->dev.id == SNDRV_DMA_DEVICE_UNUSED)
765 len += sprintf(page + len, "UNUSED");
767 len += sprintf(page + len, "ID %08x", mem->dev.id);
768 len += sprintf(page + len, " : type ");
769 switch (mem->dev.type) {
770 case SNDRV_DMA_TYPE_CONTINUOUS:
771 len += sprintf(page + len, "CONT [%p]", mem->dev.dev);
774 case SNDRV_DMA_TYPE_SBUS:
776 struct sbus_dev *sdev = (struct sbus_dev *)(mem->dev.dev);
777 len += sprintf(page + len, "SBUS [%x]", sdev->slot);
781 case SNDRV_DMA_TYPE_DEV:
782 case SNDRV_DMA_TYPE_DEV_SG:
784 len += sprintf(page + len, "%s [%s]",
785 mem->dev.type == SNDRV_DMA_TYPE_DEV_SG ? "DEV-SG" : "DEV",
786 mem->dev.dev->bus_id);
788 len += sprintf(page + len, "ISA");
791 len += sprintf(page + len, "UNKNOWN");
794 len += sprintf(page + len, "\n addr = 0x%lx, size = %d bytes, used = %s\n",
795 (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes,
796 mem->used ? "yes" : "no");
801 #endif /* CONFIG_PROC_FS */
807 static int __init snd_mem_init(void)
809 #ifdef CONFIG_PROC_FS
810 create_proc_read_entry("driver/snd-page-alloc", 0, 0, snd_mem_proc_read, NULL);
816 static void __exit snd_mem_exit(void)
818 remove_proc_entry("driver/snd-page-alloc", NULL);
819 free_all_reserved_pages();
820 if (snd_allocated_pages > 0)
821 printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
825 module_init(snd_mem_init)
826 module_exit(snd_mem_exit)
832 EXPORT_SYMBOL(snd_dma_alloc_pages);
833 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
834 EXPORT_SYMBOL(snd_dma_free_pages);
836 EXPORT_SYMBOL(snd_dma_get_reserved);
837 EXPORT_SYMBOL(snd_dma_free_reserved);
838 EXPORT_SYMBOL(snd_dma_set_reserved);
840 EXPORT_SYMBOL(snd_malloc_pages);
841 EXPORT_SYMBOL(snd_malloc_pages_fallback);
842 EXPORT_SYMBOL(snd_free_pages);