X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sound%2Fcore%2Fmemalloc.c;h=3fc6f97075ed87ae962562bfae0cf3ad14a8e9b6;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=c5f6bc1d7ee70edf88221b1be423e5cd4ea4b921;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index c5f6bc1d7..3fc6f9707 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -28,8 +28,10 @@ #include #include #include +#include #include -#include +#include +#include #include #ifdef CONFIG_SBUS #include @@ -41,17 +43,10 @@ MODULE_DESCRIPTION("Memory allocator for ALSA system."); MODULE_LICENSE("GPL"); -#ifndef SNDRV_CARDS -#define SNDRV_CARDS 8 -#endif -static int enable[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; -MODULE_PARM(enable, "1-" __MODULE_STRING(SNDRV_CARDS) "i"); -MODULE_PARM_DESC(enable, "Enable cards to allocate buffers."); - /* */ -void *snd_malloc_sgbuf_pages(const struct snd_dma_device *dev, +void *snd_malloc_sgbuf_pages(struct device *device, size_t size, struct snd_dma_buffer *dmab, size_t *res_size); int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab); @@ -59,14 +54,13 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab); /* */ -static DECLARE_MUTEX(list_mutex); +static DEFINE_MUTEX(list_mutex); static LIST_HEAD(mem_list_head); /* buffer preservation list */ struct snd_mem_list { - struct snd_dma_device dev; struct snd_dma_buffer buffer; - int used; + unsigned int id; struct list_head list; }; @@ -89,32 +83,7 @@ struct snd_mem_list { * Hacks */ -static void *snd_dma_alloc_coherent1(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flags) -{ - if (dev) - return dma_alloc_coherent(dev, size, dma_handle, flags); - else /* FIXME: dma_alloc_coherent does't always accept dev=NULL */ - return pci_alloc_consistent(NULL, size, dma_handle); -} - -static void snd_dma_free_coherent1(struct device *dev, size_t size, void *dma_addr, - dma_addr_t dma_handle) -{ - if (dev) - return dma_free_coherent(dev, size, dma_addr, dma_handle); - else - return pci_free_consistent(NULL, size, dma_addr, dma_handle); -} - -#undef dma_alloc_coherent -#define dma_alloc_coherent snd_dma_alloc_coherent1 -#undef dma_free_coherent -#define dma_free_coherent snd_dma_free_coherent1 - - -#if defined(__i386__) || defined(__ppc__) || defined(__x86_64__) - +#if defined(__i386__) /* * A hack to allocate large buffers via dma_alloc_coherent() * @@ -132,17 +101,21 @@ static void snd_dma_free_coherent1(struct device *dev, size_t size, void *dma_ad */ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flags) + dma_addr_t *dma_handle, + gfp_t flags) { void *ret; - u64 dma_mask; + u64 dma_mask, coherent_dma_mask; if (dev == NULL || !dev->dma_mask) return dma_alloc_coherent(dev, size, dma_handle, flags); dma_mask = *dev->dma_mask; + coherent_dma_mask = dev->coherent_dma_mask; *dev->dma_mask = 0xffffffff; /* do without masking */ + dev->coherent_dma_mask = 0xffffffff; /* do without masking */ ret = dma_alloc_coherent(dev, size, dma_handle, flags); *dev->dma_mask = dma_mask; /* restore */ + dev->coherent_dma_mask = coherent_dma_mask; /* restore */ if (ret) { /* obtained address is out of range? */ if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) { @@ -152,8 +125,12 @@ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, } } else { /* wish to success now with the proper mask... */ - if (dma_mask != 0xffffffffUL) + if (dma_mask != 0xffffffffUL) { + /* allocation with GFP_ATOMIC to avoid the long stall */ + flags &= ~GFP_KERNEL; + flags |= GFP_ATOMIC; ret = dma_alloc_coherent(dev, size, dma_handle, flags); + } } return ret; } @@ -172,21 +149,13 @@ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, static long snd_allocated_pages; /* holding the number of allocated pages */ -static void mark_pages(void *res, int order) +static inline void inc_snd_pages(int order) { - struct page *page = virt_to_page(res); - struct page *last_page = page + (1 << order); - while (page < last_page) - SetPageReserved(page++); snd_allocated_pages += 1 << order; } -static void unmark_pages(void *res, int order) +static inline void dec_snd_pages(int order) { - struct page *page = virt_to_page(res); - struct page *last_page = page + (1 << order); - while (page < last_page) - ClearPageReserved(page++); snd_allocated_pages -= 1 << order; } @@ -199,49 +168,20 @@ static void unmark_pages(void *res, int order) * * Returns the pointer of the buffer, or NULL if no enoguh memory. */ -void *snd_malloc_pages(size_t size, unsigned int gfp_flags) +void *snd_malloc_pages(size_t size, gfp_t gfp_flags) { int pg; void *res; snd_assert(size > 0, return NULL); snd_assert(gfp_flags != 0, return NULL); - for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++); - if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { - mark_pages(res, pg); - } + gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ + pg = get_order(size); + if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) + inc_snd_pages(pg); return res; } -/** - * snd_malloc_pages_fallback - allocate pages with the given size with fallback - * @size: the requested size to allocate in bytes - * @gfp_flags: the allocation conditions, GFP_XXX - * @res_size: the pointer to store the size of buffer actually allocated - * - * Allocates the physically contiguous pages with the given request - * size. When no space is left, this function reduces the size and - * tries to allocate again. The size actually allocated is stored in - * res_size argument. - * - * Returns the pointer of the buffer, or NULL if no enoguh memory. - */ -void *snd_malloc_pages_fallback(size_t size, unsigned int gfp_flags, size_t *res_size) -{ - void *res; - - snd_assert(size > 0, return NULL); - snd_assert(res_size != NULL, return NULL); - do { - if ((res = snd_malloc_pages(size, gfp_flags)) != NULL) { - *res_size = size; - return res; - } - size >>= 1; - } while (size >= PAGE_SIZE); - return NULL; -} - /** * snd_free_pages - release the pages * @ptr: the buffer pointer to release @@ -255,8 +195,8 @@ void snd_free_pages(void *ptr, size_t size) if (ptr == NULL) return; - for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++); - unmark_pages(ptr, pg); + pg = get_order(size); + dec_snd_pages(pg); free_pages((unsigned long) ptr, pg); } @@ -266,41 +206,28 @@ void snd_free_pages(void *ptr, size_t size) * */ +/* allocate the coherent DMA pages */ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) { int pg; void *res; - unsigned int gfp_flags; + gfp_t gfp_flags; snd_assert(size > 0, return NULL); snd_assert(dma != NULL, return NULL); pg = get_order(size); - gfp_flags = GFP_KERNEL; - if (pg > 0) - gfp_flags |= __GFP_NOWARN; + gfp_flags = GFP_KERNEL + | __GFP_COMP /* compound page lets parts be mapped */ + | __GFP_NORETRY /* don't trigger OOM-killer */ + | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); if (res != NULL) - mark_pages(res, pg); + inc_snd_pages(pg); return res; } -static void *snd_malloc_dev_pages_fallback(struct device *dev, size_t size, - dma_addr_t *dma, size_t *res_size) -{ - void *res; - - snd_assert(res_size != NULL, return NULL); - do { - if ((res = snd_malloc_dev_pages(dev, size, dma)) != NULL) { - *res_size = size; - return res; - } - size >>= 1; - } while (size >= PAGE_SIZE); - return NULL; -} - +/* free the coherent DMA pages */ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, dma_addr_t dma) { @@ -309,7 +236,7 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, if (ptr == NULL) return; pg = get_order(size); - unmark_pages(ptr, pg); + dec_snd_pages(pg); dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); } @@ -324,30 +251,13 @@ static void *snd_malloc_sbus_pages(struct device *dev, size_t size, snd_assert(size > 0, return NULL); snd_assert(dma_addr != NULL, return NULL); - for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++); + pg = get_order(size); res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr); - if (res != NULL) { - mark_pages(res, pg); - } + if (res != NULL) + inc_snd_pages(pg); return res; } -static void *snd_malloc_sbus_pages_fallback(struct device *dev, size_t size, - dma_addr_t *dma_addr, size_t *res_size) -{ - void *res; - - snd_assert(res_size != NULL, return NULL); - do { - if ((res = snd_malloc_sbus_pages(dev, size, dma_addr)) != NULL) { - *res_size = size; - return res; - } - size >>= 1; - } while (size >= PAGE_SIZE); - return NULL; -} - static void snd_free_sbus_pages(struct device *dev, size_t size, void *ptr, dma_addr_t dma_addr) { @@ -356,8 +266,8 @@ static void snd_free_sbus_pages(struct device *dev, size_t size, if (ptr == NULL) return; - for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++); - unmark_pages(ptr, pg); + pg = get_order(size); + dec_snd_pages(pg); sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr); } @@ -370,24 +280,10 @@ static void snd_free_sbus_pages(struct device *dev, size_t size, */ -/* - * compare the two devices - * returns non-zero if matched. - */ -static int compare_device(const struct snd_dma_device *a, const struct snd_dma_device *b, int allow_unused) -{ - if (a->type != b->type) - return 0; - if (a->id != b->id) { - if (! allow_unused || (a->id != SNDRV_DMA_DEVICE_UNUSED && b->id != SNDRV_DMA_DEVICE_UNUSED)) - return 0; - } - return a->dev == b->dev; -} - /** * snd_dma_alloc_pages - allocate the buffer area according to the given type - * @dev: the buffer device info + * @type: the DMA buffer type + * @device: the device pointer * @size: the buffer size to allocate * @dmab: buffer allocation record to store the allocated data * @@ -397,32 +293,33 @@ static int compare_device(const struct snd_dma_device *a, const struct snd_dma_d * Returns zero if the buffer with the given size is allocated successfuly, * other a negative value at error. */ -int snd_dma_alloc_pages(const struct snd_dma_device *dev, size_t size, +int snd_dma_alloc_pages(int type, struct device *device, size_t size, struct snd_dma_buffer *dmab) { - snd_assert(dev != NULL, return -ENXIO); snd_assert(size > 0, return -ENXIO); snd_assert(dmab != NULL, return -ENXIO); + dmab->dev.type = type; + dmab->dev.dev = device; dmab->bytes = 0; - switch (dev->type) { + switch (type) { case SNDRV_DMA_TYPE_CONTINUOUS: - dmab->area = snd_malloc_pages(size, (unsigned long)dev->dev); + dmab->area = snd_malloc_pages(size, (unsigned long)device); dmab->addr = 0; break; #ifdef CONFIG_SBUS case SNDRV_DMA_TYPE_SBUS: - dmab->area = snd_malloc_sbus_pages(dev->dev, size, &dmab->addr); + dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr); break; #endif case SNDRV_DMA_TYPE_DEV: - dmab->area = snd_malloc_dev_pages(dev->dev, size, &dmab->addr); + dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); break; case SNDRV_DMA_TYPE_DEV_SG: - snd_malloc_sgbuf_pages(dev, size, dmab, NULL); + snd_malloc_sgbuf_pages(device, size, dmab, NULL); break; default: - printk(KERN_ERR "snd-malloc: invalid device type %d\n", dev->type); + printk(KERN_ERR "snd-malloc: invalid device type %d\n", type); dmab->area = NULL; dmab->addr = 0; return -ENXIO; @@ -435,7 +332,8 @@ int snd_dma_alloc_pages(const struct snd_dma_device *dev, size_t size, /** * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback - * @dev: the buffer device info + * @type: the DMA buffer type + * @device: the device pointer * @size: the buffer size to allocate * @dmab: buffer allocation record to store the allocated data * @@ -447,35 +345,20 @@ int snd_dma_alloc_pages(const struct snd_dma_device *dev, size_t size, * Returns zero if the buffer with the given size is allocated successfuly, * other a negative value at error. */ -int snd_dma_alloc_pages_fallback(const struct snd_dma_device *dev, size_t size, +int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, struct snd_dma_buffer *dmab) { - snd_assert(dev != NULL, return -ENXIO); + int err; + snd_assert(size > 0, return -ENXIO); snd_assert(dmab != NULL, return -ENXIO); - dmab->bytes = 0; - switch (dev->type) { - case SNDRV_DMA_TYPE_CONTINUOUS: - dmab->area = snd_malloc_pages_fallback(size, (unsigned long)dev->dev, &dmab->bytes); - dmab->addr = 0; - break; -#ifdef CONFIG_SBUS - case SNDRV_DMA_TYPE_SBUS: - dmab->area = snd_malloc_sbus_pages_fallback(dev->dev, size, &dmab->addr, &dmab->bytes); - break; -#endif - case SNDRV_DMA_TYPE_DEV: - dmab->area = snd_malloc_dev_pages_fallback(dev->dev, size, &dmab->addr, &dmab->bytes); - break; - case SNDRV_DMA_TYPE_DEV_SG: - snd_malloc_sgbuf_pages(dev, size, dmab, &dmab->bytes); - break; - default: - printk(KERN_ERR "snd-malloc: invalid device type %d\n", dev->type); - dmab->area = NULL; - dmab->addr = 0; - return -ENXIO; + while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { + if (err != -ENOMEM) + return err; + size >>= 1; + if (size <= PAGE_SIZE) + return -ENOMEM; } if (! dmab->area) return -ENOMEM; @@ -485,153 +368,92 @@ int snd_dma_alloc_pages_fallback(const struct snd_dma_device *dev, size_t size, /** * snd_dma_free_pages - release the allocated buffer - * @dev: the buffer device info - * @dmbab: the buffer allocation record to release + * @dmab: the buffer allocation record to release * * Releases the allocated buffer via snd_dma_alloc_pages(). */ -void snd_dma_free_pages(const struct snd_dma_device *dev, struct snd_dma_buffer *dmab) +void snd_dma_free_pages(struct snd_dma_buffer *dmab) { - switch (dev->type) { + switch (dmab->dev.type) { case SNDRV_DMA_TYPE_CONTINUOUS: snd_free_pages(dmab->area, dmab->bytes); break; #ifdef CONFIG_SBUS case SNDRV_DMA_TYPE_SBUS: - snd_free_sbus_pages(dev->dev, dmab->bytes, dmab->area, dmab->addr); + snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); break; #endif case SNDRV_DMA_TYPE_DEV: - snd_free_dev_pages(dev->dev, dmab->bytes, dmab->area, dmab->addr); + snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); break; case SNDRV_DMA_TYPE_DEV_SG: snd_free_sgbuf_pages(dmab); break; default: - printk(KERN_ERR "snd-malloc: invalid device type %d\n", dev->type); + printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type); } } -/* - * search for the device - */ -static struct snd_mem_list *mem_list_find(const struct snd_dma_device *dev, int search_empty) -{ - struct list_head *p; - struct snd_mem_list *mem; - - list_for_each(p, &mem_list_head) { - mem = list_entry(p, struct snd_mem_list, list); - if (mem->used && search_empty) - continue; - if (compare_device(&mem->dev, dev, search_empty)) - return mem; - } - return NULL; -} - /** * snd_dma_get_reserved - get the reserved buffer for the given device - * @dev: the buffer device info * @dmab: the buffer allocation record to store + * @id: the buffer id * * Looks for the reserved-buffer list and re-uses if the same buffer - * is found in the list. When the buffer is found, it's marked as used. - * For unmarking the buffer, call snd_dma_free_reserved(). + * is found in the list. When the buffer is found, it's removed from the free list. * * Returns the size of buffer if the buffer is found, or zero if not found. */ -size_t snd_dma_get_reserved(const struct snd_dma_device *dev, struct snd_dma_buffer *dmab) +size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) { + struct list_head *p; struct snd_mem_list *mem; - snd_assert(dev && dmab, return 0); + snd_assert(dmab, return 0); - down(&list_mutex); - mem = mem_list_find(dev, 1); - if (mem) { - mem->used = 1; - mem->dev = *dev; - *dmab = mem->buffer; - up(&list_mutex); - return dmab->bytes; + mutex_lock(&list_mutex); + list_for_each(p, &mem_list_head) { + mem = list_entry(p, struct snd_mem_list, list); + if (mem->id == id && + (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || + ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { + struct device *dev = dmab->dev.dev; + list_del(p); + *dmab = mem->buffer; + if (dmab->dev.dev == NULL) + dmab->dev.dev = dev; + kfree(mem); + mutex_unlock(&list_mutex); + return dmab->bytes; + } } - up(&list_mutex); + mutex_unlock(&list_mutex); return 0; } /** - * snd_dma_free_reserved - unmark the reserved buffer - * @dev: the buffer device info - * - * Looks for the matching reserved buffer and erases the mark on it - * if found. - * - * Returns zero. - */ -int snd_dma_free_reserved(const struct snd_dma_device *dev) -{ - struct snd_mem_list *mem; - - snd_assert(dev, return -EINVAL); - down(&list_mutex); - mem = mem_list_find(dev, 0); - if (mem) - mem->used = 0; - up(&list_mutex); - return 0; -} - -/** - * snd_dma_set_reserved - reserve the buffer - * @dev: the buffer device info + * snd_dma_reserve_buf - reserve the buffer * @dmab: the buffer to reserve + * @id: the buffer id * * Reserves the given buffer as a reserved buffer. - * When an old reserved buffer already exists, the old one is released - * and replaced with the new one. - * - * When NULL buffer pointer or zero buffer size is given, the existing - * buffer is released and the entry is removed. * * Returns zero if successful, or a negative code at error. */ -int snd_dma_set_reserved(const struct snd_dma_device *dev, struct snd_dma_buffer *dmab) +int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) { struct snd_mem_list *mem; - snd_assert(dev, return -EINVAL); - down(&list_mutex); - mem = mem_list_find(dev, 0); - if (mem) { - if (mem->used) - printk(KERN_WARNING "snd-page-alloc: releasing the used block (type=%d, id=0x%x\n", mem->dev.type, mem->dev.id); - snd_dma_free_pages(dev, &mem->buffer); - if (! dmab || ! dmab->bytes) { - /* remove the entry */ - list_del(&mem->list); - kfree(mem); - up(&list_mutex); - return 0; - } - } else { - if (! dmab || ! dmab->bytes) { - up(&list_mutex); - return 0; - } - mem = kmalloc(sizeof(*mem), GFP_KERNEL); - if (! mem) { - up(&list_mutex); - return -ENOMEM; - } - mem->dev = *dev; - list_add_tail(&mem->list, &mem_list_head); - } - /* store the entry */ - mem->used = 1; + snd_assert(dmab, return -EINVAL); + mem = kmalloc(sizeof(*mem), GFP_KERNEL); + if (! mem) + return -ENOMEM; + mutex_lock(&list_mutex); mem->buffer = *dmab; - up(&list_mutex); + mem->id = id; + list_add_tail(&mem->list, &mem_list_head); + mutex_unlock(&list_mutex); return 0; } @@ -643,123 +465,25 @@ static void free_all_reserved_pages(void) struct list_head *p; struct snd_mem_list *mem; - down(&list_mutex); + mutex_lock(&list_mutex); while (! list_empty(&mem_list_head)) { p = mem_list_head.next; mem = list_entry(p, struct snd_mem_list, list); list_del(p); - snd_dma_free_pages(&mem->dev, &mem->buffer); + snd_dma_free_pages(&mem->buffer); kfree(mem); } - up(&list_mutex); -} - - - -/* - * allocation of buffers for pre-defined devices - */ - -#ifdef CONFIG_PCI -/* FIXME: for pci only - other bus? */ -struct prealloc_dev { - unsigned short vendor; - unsigned short device; - unsigned long dma_mask; - unsigned int size; - unsigned int buffers; -}; - -#define HAMMERFALL_BUFFER_SIZE (16*1024*4*(26+1)) - -static struct prealloc_dev prealloc_devices[] __initdata = { - { - /* hammerfall */ - .vendor = 0x10ee, - .device = 0x3fc4, - .dma_mask = 0xffffffff, - .size = HAMMERFALL_BUFFER_SIZE, - .buffers = 2 - }, - { - /* HDSP */ - .vendor = 0x10ee, - .device = 0x3fc5, - .dma_mask = 0xffffffff, - .size = HAMMERFALL_BUFFER_SIZE, - .buffers = 2 - }, - { }, /* terminator */ -}; - -/* - * compose a snd_dma_device struct for the PCI device - */ -static inline void snd_dma_device_pci(struct snd_dma_device *dev, struct pci_dev *pci, unsigned int id) -{ - memset(dev, 0, sizeof(*dev)); - dev->type = SNDRV_DMA_TYPE_DEV; - dev->dev = snd_dma_pci_data(pci); - dev->id = id; -} - -static void __init preallocate_cards(void) -{ - struct pci_dev *pci = NULL; - int card; - - card = 0; - - while ((pci = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci)) != NULL) { - struct prealloc_dev *dev; - unsigned int i; - if (card >= SNDRV_CARDS) - break; - for (dev = prealloc_devices; dev->vendor; dev++) { - if (dev->vendor == pci->vendor && dev->device == pci->device) - break; - } - if (! dev->vendor) - continue; - if (! enable[card++]) { - printk(KERN_DEBUG "snd-page-alloc: skipping card %d, device %04x:%04x\n", card, pci->vendor, pci->device); - continue; - } - - if (pci_set_dma_mask(pci, dev->dma_mask) < 0 || - pci_set_consistent_dma_mask(pci, dev->dma_mask) < 0) { - printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", dev->dma_mask, dev->vendor, dev->device); - continue; - } - for (i = 0; i < dev->buffers; i++) { - struct snd_mem_list *mem; - mem = kmalloc(sizeof(*mem), GFP_KERNEL); - if (! mem) { - printk(KERN_WARNING "snd-page-alloc: can't malloc memlist\n"); - break; - } - memset(mem, 0, sizeof(*mem)); - snd_dma_device_pci(&mem->dev, pci, SNDRV_DMA_DEVICE_UNUSED); - if (snd_dma_alloc_pages(&mem->dev, dev->size, &mem->buffer) < 0) { - printk(KERN_WARNING "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", dev->size); - kfree(mem); - } else { - down(&list_mutex); - list_add_tail(&mem->list, &mem_list_head); - up(&list_mutex); - } - } - } + mutex_unlock(&list_mutex); } -#else -#define preallocate_cards() /* NOP */ -#endif #ifdef CONFIG_PROC_FS /* * proc file interface */ +#define SND_MEM_PROC_FILE "driver/snd-page-alloc" +static struct proc_dir_entry *snd_mem_proc; + static int snd_mem_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -768,52 +492,118 @@ static int snd_mem_proc_read(char *page, char **start, off_t off, struct list_head *p; struct snd_mem_list *mem; int devno; + static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; - down(&list_mutex); - len += sprintf(page + len, "pages : %li bytes (%li pages per %likB)\n", - pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); + mutex_lock(&list_mutex); + len += snprintf(page + len, count - len, + "pages : %li bytes (%li pages per %likB)\n", + pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); devno = 0; list_for_each(p, &mem_list_head) { mem = list_entry(p, struct snd_mem_list, list); devno++; - len += sprintf(page + len, "buffer %d : ", devno); - if (mem->dev.id == SNDRV_DMA_DEVICE_UNUSED) - len += sprintf(page + len, "UNUSED"); - else - len += sprintf(page + len, "ID %08x", mem->dev.id); - len += sprintf(page + len, " : type "); - switch (mem->dev.type) { - case SNDRV_DMA_TYPE_CONTINUOUS: - len += sprintf(page + len, "CONT [%p]", mem->dev.dev); - break; -#ifdef CONFIG_SBUS - case SNDRV_DMA_TYPE_SBUS: - { - struct sbus_dev *sdev = (struct sbus_dev *)(mem->dev.dev); - len += sprintf(page + len, "SBUS [%x]", sdev->slot); - } - break; -#endif - case SNDRV_DMA_TYPE_DEV: - case SNDRV_DMA_TYPE_DEV_SG: - if (mem->dev.dev) { - len += sprintf(page + len, "%s [%s]", - mem->dev.type == SNDRV_DMA_TYPE_DEV_SG ? "DEV-SG" : "DEV", - mem->dev.dev->bus_id); - } else - len += sprintf(page + len, "ISA"); - break; - default: - len += sprintf(page + len, "UNKNOWN"); - break; - } - len += sprintf(page + len, "\n addr = 0x%lx, size = %d bytes, used = %s\n", - (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes, - mem->used ? "yes" : "no"); + len += snprintf(page + len, count - len, + "buffer %d : ID %08x : type %s\n", + devno, mem->id, types[mem->buffer.dev.type]); + len += snprintf(page + len, count - len, + " addr = 0x%lx, size = %d bytes\n", + (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); } - up(&list_mutex); + mutex_unlock(&list_mutex); return len; } + +/* FIXME: for pci only - other bus? */ +#ifdef CONFIG_PCI +#define gettoken(bufp) strsep(bufp, " \t\n") + +static int snd_mem_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + char buf[128]; + char *token, *p; + + if (count > ARRAY_SIZE(buf) - 1) + count = ARRAY_SIZE(buf) - 1; + if (copy_from_user(buf, buffer, count)) + return -EFAULT; + buf[ARRAY_SIZE(buf) - 1] = '\0'; + + p = buf; + token = gettoken(&p); + if (! token || *token == '#') + return (int)count; + if (strcmp(token, "add") == 0) { + char *endp; + int vendor, device, size, buffers; + long mask; + int i, alloced; + struct pci_dev *pci; + + if ((token = gettoken(&p)) == NULL || + (vendor = simple_strtol(token, NULL, 0)) <= 0 || + (token = gettoken(&p)) == NULL || + (device = simple_strtol(token, NULL, 0)) <= 0 || + (token = gettoken(&p)) == NULL || + (mask = simple_strtol(token, NULL, 0)) < 0 || + (token = gettoken(&p)) == NULL || + (size = memparse(token, &endp)) < 64*1024 || + size > 16*1024*1024 /* too big */ || + (token = gettoken(&p)) == NULL || + (buffers = simple_strtol(token, NULL, 0)) <= 0 || + buffers > 4) { + printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); + return (int)count; + } + vendor &= 0xffff; + device &= 0xffff; + + alloced = 0; + pci = NULL; + while ((pci = pci_get_device(vendor, device, pci)) != NULL) { + if (mask > 0 && mask < 0xffffffff) { + if (pci_set_dma_mask(pci, mask) < 0 || + pci_set_consistent_dma_mask(pci, mask) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); + return (int)count; + } + } + for (i = 0; i < buffers; i++) { + struct snd_dma_buffer dmab; + memset(&dmab, 0, sizeof(dmab)); + if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), + size, &dmab) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); + pci_dev_put(pci); + return (int)count; + } + snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); + } + alloced++; + } + if (! alloced) { + for (i = 0; i < buffers; i++) { + struct snd_dma_buffer dmab; + memset(&dmab, 0, sizeof(dmab)); + /* FIXME: We can allocate only in ZONE_DMA + * without a device pointer! + */ + if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, + size, &dmab) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); + break; + } + snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); + } + } + } else if (strcmp(token, "erase") == 0) + /* FIXME: need for releasing each buffer chunk? */ + free_all_reserved_pages(); + else + printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); + return (int)count; +} +#endif /* CONFIG_PCI */ #endif /* CONFIG_PROC_FS */ /* @@ -823,15 +613,20 @@ static int snd_mem_proc_read(char *page, char **start, off_t off, static int __init snd_mem_init(void) { #ifdef CONFIG_PROC_FS - create_proc_read_entry("driver/snd-page-alloc", 0, 0, snd_mem_proc_read, NULL); + snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); + if (snd_mem_proc) { + snd_mem_proc->read_proc = snd_mem_proc_read; +#ifdef CONFIG_PCI + snd_mem_proc->write_proc = snd_mem_proc_write; +#endif + } #endif - preallocate_cards(); return 0; } static void __exit snd_mem_exit(void) { - remove_proc_entry("driver/snd-page-alloc", NULL); + remove_proc_entry(SND_MEM_PROC_FILE, NULL); free_all_reserved_pages(); if (snd_allocated_pages > 0) printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); @@ -842,25 +637,6 @@ module_init(snd_mem_init) module_exit(snd_mem_exit) -#ifndef MODULE - -/* format is: snd-page-alloc=enable */ - -static int __init snd_mem_setup(char *str) -{ - static unsigned __initdata nr_dev = 0; - - if (nr_dev >= SNDRV_CARDS) - return 0; - (void)(get_option(&str,&enable[nr_dev]) == 2); - nr_dev++; - return 1; -} - -__setup("snd-page-alloc=", snd_mem_setup); - -#endif - /* * exports */ @@ -868,10 +644,8 @@ EXPORT_SYMBOL(snd_dma_alloc_pages); EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); EXPORT_SYMBOL(snd_dma_free_pages); -EXPORT_SYMBOL(snd_dma_get_reserved); -EXPORT_SYMBOL(snd_dma_free_reserved); -EXPORT_SYMBOL(snd_dma_set_reserved); +EXPORT_SYMBOL(snd_dma_get_reserved_buf); +EXPORT_SYMBOL(snd_dma_reserve_buf); EXPORT_SYMBOL(snd_malloc_pages); -EXPORT_SYMBOL(snd_malloc_pages_fallback); EXPORT_SYMBOL(snd_free_pages);