X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fvmalloc.c;fp=mm%2Fvmalloc.c;h=266162d2ba28254a9c8ca4366a8262d81d032f3c;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=c0504f1e34ebdbd2003ba8aaa63287f366b100cb;hpb=4e76c8a9fa413ccc09d3f7f664183dcce3555d57;p=linux-2.6.git diff --git a/mm/vmalloc.c b/mm/vmalloc.c index c0504f1e3..266162d2b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -256,6 +256,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); } +/* Caller must hold vmlist_lock */ +static struct vm_struct *__find_vm_area(void *addr) +{ + struct vm_struct *tmp; + + for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { + if (tmp->addr == addr) + break; + } + + return tmp; +} + /* Caller must hold vmlist_lock */ struct vm_struct *__remove_vm_area(void *addr) { @@ -317,6 +330,8 @@ void __vunmap(void *addr, int deallocate_pages) return; } + debug_check_no_locks_freed(addr, area->size); + if (deallocate_pages) { int i; @@ -325,7 +340,7 @@ void __vunmap(void *addr, int deallocate_pages) __free_page(area->pages[i]); } - if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) + if (area->flags & VM_VPAGES) vfree(area->pages); else kfree(area->pages); @@ -412,9 +427,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ - if (array_size > PAGE_SIZE) + if (array_size > PAGE_SIZE) { pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); - else + area->flags |= VM_VPAGES; + } else pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); area->pages = pages; if (!area->pages) { @@ -498,10 +514,32 @@ EXPORT_SYMBOL(__vmalloc); */ void *vmalloc(unsigned long size) { - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } EXPORT_SYMBOL(vmalloc); +/** + * vmalloc_user - allocate virtually contiguous memory which has + * been zeroed so it can be mapped to userspace without + * leaking data. + * + * @size: allocation size + */ +void *vmalloc_user(unsigned long size) +{ + struct vm_struct *area; + void *ret; + + ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + + return ret; +} +EXPORT_SYMBOL(vmalloc_user); + /** * vmalloc_node - allocate memory on a specific node * @@ -516,7 +554,7 @@ EXPORT_SYMBOL(vmalloc); */ void *vmalloc_node(unsigned long size, int node) { - return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); + return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); } EXPORT_SYMBOL(vmalloc_node); @@ -556,6 +594,28 @@ void *vmalloc_32(unsigned long size) } EXPORT_SYMBOL(vmalloc_32); +/** + * vmalloc_32_user - allocate virtually contiguous memory (32bit + * addressable) which is zeroed so it can be + * mapped to userspace without leaking data. + * + * @size: allocation size + */ +void *vmalloc_32_user(unsigned long size) +{ + struct vm_struct *area; + void *ret; + + ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + + return ret; +} +EXPORT_SYMBOL(vmalloc_32_user); + long vread(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; @@ -630,3 +690,64 @@ finished: read_unlock(&vmlist_lock); return buf - buf_start; } + +/** + * remap_vmalloc_range - map vmalloc pages to userspace + * + * @vma: vma to cover (map full range of vma) + * @addr: vmalloc memory + * @pgoff: number of pages into addr before first page to map + * @returns: 0 for success, -Exxx on failure + * + * This function checks that addr is a valid vmalloc'ed area, and + * that it is big enough to cover the vma. Will return failure if + * that criteria isn't met. + * + * Similar to remap_pfn_range (see mm/memory.c) + */ +int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, + unsigned long pgoff) +{ + struct vm_struct *area; + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + int ret; + + if ((PAGE_SIZE-1) & (unsigned long)addr) + return -EINVAL; + + read_lock(&vmlist_lock); + area = __find_vm_area(addr); + if (!area) + goto out_einval_locked; + + if (!(area->flags & VM_USERMAP)) + goto out_einval_locked; + + if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) + goto out_einval_locked; + read_unlock(&vmlist_lock); + + addr += pgoff << PAGE_SHIFT; + do { + struct page *page = vmalloc_to_page(addr); + ret = vm_insert_page(vma, uaddr, page); + if (ret) + return ret; + + uaddr += PAGE_SIZE; + addr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0); + + /* Prevent "things" like memory migration? VM_flags need a cleanup... */ + vma->vm_flags |= VM_RESERVED; + + return ret; + +out_einval_locked: + read_unlock(&vmlist_lock); + return -EINVAL; +} +EXPORT_SYMBOL(remap_vmalloc_range); +