X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sound%2Fcore%2Fmemalloc.c;h=3fc6f97075ed87ae962562bfae0cf3ad14a8e9b6;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=344a83fd7c2e6b884cc318cad9518dfa733def8e;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 344a83fd7..3fc6f9707 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -28,9 +28,10 @@ #include #include #include +#include #include #include -#include +#include #include #ifdef CONFIG_SBUS #include @@ -42,17 +43,6 @@ MODULE_DESCRIPTION("Memory allocator for ALSA system."); MODULE_LICENSE("GPL"); -#ifndef SNDRV_CARDS -#define SNDRV_CARDS 8 -#endif - -/* FIXME: so far only some PCI devices have the preallocation table */ -#ifdef CONFIG_PCI -static int enable[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; -module_param_array(enable, bool, NULL, 0444); -MODULE_PARM_DESC(enable, "Enable cards to allocate buffers."); -#endif - /* */ @@ -64,7 +54,7 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab); /* */ -static DECLARE_MUTEX(list_mutex); +static DEFINE_MUTEX(list_mutex); static LIST_HEAD(mem_list_head); /* buffer preservation list */ @@ -93,7 +83,7 @@ struct snd_mem_list { * Hacks */ -#if defined(__i386__) || defined(__ppc__) || defined(__x86_64__) +#if defined(__i386__) /* * A hack to allocate large buffers via dma_alloc_coherent() * @@ -111,7 +101,8 @@ struct snd_mem_list { */ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flags) + dma_addr_t *dma_handle, + gfp_t flags) { void *ret; u64 dma_mask, coherent_dma_mask; @@ -150,10 +141,6 @@ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, #endif /* arch */ -#if ! defined(__arm__) -#define NEED_RESERVE_PAGES -#endif - /* * * Generic memory allocators @@ -172,20 +159,6 @@ static inline void dec_snd_pages(int order) snd_allocated_pages -= 1 << order; } -static void mark_pages(struct page *page, int order) -{ - struct page *last_page = page + (1 << order); - while (page < last_page) - SetPageReserved(page++); -} - -static void unmark_pages(struct page *page, int order) -{ - struct page *last_page = page + (1 << order); - while (page < last_page) - ClearPageReserved(page++); -} - /** * snd_malloc_pages - allocate pages with the given size * @size: the size to allocate in bytes @@ -195,18 +168,17 @@ static void unmark_pages(struct page *page, int order) * * Returns the pointer of the buffer, or NULL if no enoguh memory. */ -void *snd_malloc_pages(size_t size, unsigned int gfp_flags) +void *snd_malloc_pages(size_t size, gfp_t gfp_flags) { int pg; void *res; snd_assert(size > 0, return NULL); snd_assert(gfp_flags != 0, return NULL); + gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ pg = get_order(size); - if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { - mark_pages(virt_to_page(res), pg); + if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) inc_snd_pages(pg); - } return res; } @@ -225,7 +197,6 @@ void snd_free_pages(void *ptr, size_t size) return; pg = get_order(size); dec_snd_pages(pg); - unmark_pages(virt_to_page(ptr), pg); free_pages((unsigned long) ptr, pg); } @@ -240,21 +211,18 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d { int pg; void *res; - unsigned int gfp_flags; + gfp_t gfp_flags; snd_assert(size > 0, return NULL); snd_assert(dma != NULL, return NULL); pg = get_order(size); gfp_flags = GFP_KERNEL + | __GFP_COMP /* compound page lets parts be mapped */ | __GFP_NORETRY /* don't trigger OOM-killer */ | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); - if (res != NULL) { -#ifdef NEED_RESERVE_PAGES - mark_pages(virt_to_page(res), pg); /* should be dma_to_page() */ -#endif + if (res != NULL) inc_snd_pages(pg); - } return res; } @@ -269,9 +237,6 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, return; pg = get_order(size); dec_snd_pages(pg); -#ifdef NEED_RESERVE_PAGES - unmark_pages(virt_to_page(ptr), pg); /* should be dma_to_page() */ -#endif dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); } @@ -447,19 +412,23 @@ size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) snd_assert(dmab, return 0); - down(&list_mutex); + mutex_lock(&list_mutex); list_for_each(p, &mem_list_head) { mem = list_entry(p, struct snd_mem_list, list); if (mem->id == id && - ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev))) { + (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || + ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { + struct device *dev = dmab->dev.dev; list_del(p); *dmab = mem->buffer; + if (dmab->dev.dev == NULL) + dmab->dev.dev = dev; kfree(mem); - up(&list_mutex); + mutex_unlock(&list_mutex); return dmab->bytes; } } - up(&list_mutex); + mutex_unlock(&list_mutex); return 0; } @@ -480,11 +449,11 @@ int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) mem = kmalloc(sizeof(*mem), GFP_KERNEL); if (! mem) return -ENOMEM; - down(&list_mutex); + mutex_lock(&list_mutex); mem->buffer = *dmab; mem->id = id; list_add_tail(&mem->list, &mem_list_head); - up(&list_mutex); + mutex_unlock(&list_mutex); return 0; } @@ -496,7 +465,7 @@ static void free_all_reserved_pages(void) struct list_head *p; struct snd_mem_list *mem; - down(&list_mutex); + mutex_lock(&list_mutex); while (! list_empty(&mem_list_head)) { p = mem_list_head.next; mem = list_entry(p, struct snd_mem_list, list); @@ -504,95 +473,17 @@ static void free_all_reserved_pages(void) snd_dma_free_pages(&mem->buffer); kfree(mem); } - up(&list_mutex); -} - - - -/* - * allocation of buffers for pre-defined devices - */ - -#ifdef CONFIG_PCI -/* FIXME: for pci only - other bus? */ -struct prealloc_dev { - unsigned short vendor; - unsigned short device; - unsigned long dma_mask; - unsigned int size; - unsigned int buffers; -}; - -#define HAMMERFALL_BUFFER_SIZE (16*1024*4*(26+1)+0x10000) - -static struct prealloc_dev prealloc_devices[] __initdata = { - { - /* hammerfall */ - .vendor = 0x10ee, - .device = 0x3fc4, - .dma_mask = 0xffffffff, - .size = HAMMERFALL_BUFFER_SIZE, - .buffers = 2 - }, - { - /* HDSP */ - .vendor = 0x10ee, - .device = 0x3fc5, - .dma_mask = 0xffffffff, - .size = HAMMERFALL_BUFFER_SIZE, - .buffers = 2 - }, - { }, /* terminator */ -}; - -static void __init preallocate_cards(void) -{ - struct pci_dev *pci = NULL; - int card; - - card = 0; - - while ((pci = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci)) != NULL) { - struct prealloc_dev *dev; - unsigned int i; - if (card >= SNDRV_CARDS) - break; - for (dev = prealloc_devices; dev->vendor; dev++) { - if (dev->vendor == pci->vendor && dev->device == pci->device) - break; - } - if (! dev->vendor) - continue; - if (! enable[card++]) { - printk(KERN_DEBUG "snd-page-alloc: skipping card %d, device %04x:%04x\n", card, pci->vendor, pci->device); - continue; - } - - if (pci_set_dma_mask(pci, dev->dma_mask) < 0 || - pci_set_consistent_dma_mask(pci, dev->dma_mask) < 0) { - printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", dev->dma_mask, dev->vendor, dev->device); - continue; - } - for (i = 0; i < dev->buffers; i++) { - struct snd_dma_buffer dmab; - memset(&dmab, 0, sizeof(dmab)); - if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), - dev->size, &dmab) < 0) - printk(KERN_WARNING "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", dev->size); - else - snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); - } - } + mutex_unlock(&list_mutex); } -#else -#define preallocate_cards() /* NOP */ -#endif #ifdef CONFIG_PROC_FS /* * proc file interface */ +#define SND_MEM_PROC_FILE "driver/snd-page-alloc" +static struct proc_dir_entry *snd_mem_proc; + static int snd_mem_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -603,7 +494,7 @@ static int snd_mem_proc_read(char *page, char **start, off_t off, int devno; static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; - down(&list_mutex); + mutex_lock(&list_mutex); len += snprintf(page + len, count - len, "pages : %li bytes (%li pages per %likB)\n", pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); @@ -618,9 +509,101 @@ static int snd_mem_proc_read(char *page, char **start, off_t off, " addr = 0x%lx, size = %d bytes\n", (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); } - up(&list_mutex); + mutex_unlock(&list_mutex); return len; } + +/* FIXME: for pci only - other bus? */ +#ifdef CONFIG_PCI +#define gettoken(bufp) strsep(bufp, " \t\n") + +static int snd_mem_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + char buf[128]; + char *token, *p; + + if (count > ARRAY_SIZE(buf) - 1) + count = ARRAY_SIZE(buf) - 1; + if (copy_from_user(buf, buffer, count)) + return -EFAULT; + buf[ARRAY_SIZE(buf) - 1] = '\0'; + + p = buf; + token = gettoken(&p); + if (! token || *token == '#') + return (int)count; + if (strcmp(token, "add") == 0) { + char *endp; + int vendor, device, size, buffers; + long mask; + int i, alloced; + struct pci_dev *pci; + + if ((token = gettoken(&p)) == NULL || + (vendor = simple_strtol(token, NULL, 0)) <= 0 || + (token = gettoken(&p)) == NULL || + (device = simple_strtol(token, NULL, 0)) <= 0 || + (token = gettoken(&p)) == NULL || + (mask = simple_strtol(token, NULL, 0)) < 0 || + (token = gettoken(&p)) == NULL || + (size = memparse(token, &endp)) < 64*1024 || + size > 16*1024*1024 /* too big */ || + (token = gettoken(&p)) == NULL || + (buffers = simple_strtol(token, NULL, 0)) <= 0 || + buffers > 4) { + printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); + return (int)count; + } + vendor &= 0xffff; + device &= 0xffff; + + alloced = 0; + pci = NULL; + while ((pci = pci_get_device(vendor, device, pci)) != NULL) { + if (mask > 0 && mask < 0xffffffff) { + if (pci_set_dma_mask(pci, mask) < 0 || + pci_set_consistent_dma_mask(pci, mask) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); + return (int)count; + } + } + for (i = 0; i < buffers; i++) { + struct snd_dma_buffer dmab; + memset(&dmab, 0, sizeof(dmab)); + if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), + size, &dmab) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); + pci_dev_put(pci); + return (int)count; + } + snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); + } + alloced++; + } + if (! alloced) { + for (i = 0; i < buffers; i++) { + struct snd_dma_buffer dmab; + memset(&dmab, 0, sizeof(dmab)); + /* FIXME: We can allocate only in ZONE_DMA + * without a device pointer! + */ + if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, + size, &dmab) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); + break; + } + snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); + } + } + } else if (strcmp(token, "erase") == 0) + /* FIXME: need for releasing each buffer chunk? */ + free_all_reserved_pages(); + else + printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); + return (int)count; +} +#endif /* CONFIG_PCI */ #endif /* CONFIG_PROC_FS */ /* @@ -630,15 +613,20 @@ static int snd_mem_proc_read(char *page, char **start, off_t off, static int __init snd_mem_init(void) { #ifdef CONFIG_PROC_FS - create_proc_read_entry("driver/snd-page-alloc", 0, NULL, snd_mem_proc_read, NULL); + snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); + if (snd_mem_proc) { + snd_mem_proc->read_proc = snd_mem_proc_read; +#ifdef CONFIG_PCI + snd_mem_proc->write_proc = snd_mem_proc_write; +#endif + } #endif - preallocate_cards(); return 0; } static void __exit snd_mem_exit(void) { - remove_proc_entry("driver/snd-page-alloc", NULL); + remove_proc_entry(SND_MEM_PROC_FILE, NULL); free_all_reserved_pages(); if (snd_allocated_pages > 0) printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);