X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=sidebyside;f=mm%2Fvmalloc.c;h=c0504f1e34ebdbd2003ba8aaa63287f366b100cb;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=266162d2ba28254a9c8ca4366a8262d81d032f3c;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 266162d2b..c0504f1e3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -256,19 +256,6 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); } -/* Caller must hold vmlist_lock */ -static struct vm_struct *__find_vm_area(void *addr) -{ - struct vm_struct *tmp; - - for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { - if (tmp->addr == addr) - break; - } - - return tmp; -} - /* Caller must hold vmlist_lock */ struct vm_struct *__remove_vm_area(void *addr) { @@ -330,8 +317,6 @@ void __vunmap(void *addr, int deallocate_pages) return; } - debug_check_no_locks_freed(addr, area->size); - if (deallocate_pages) { int i; @@ -340,7 +325,7 @@ void __vunmap(void *addr, int deallocate_pages) __free_page(area->pages[i]); } - if (area->flags & VM_VPAGES) + if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) vfree(area->pages); else kfree(area->pages); @@ -427,10 +412,9 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ - if (array_size > PAGE_SIZE) { + if (array_size > PAGE_SIZE) pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); - area->flags |= VM_VPAGES; - } else + else pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); area->pages = pages; if (!area->pages) { @@ -514,32 +498,10 @@ EXPORT_SYMBOL(__vmalloc); */ void *vmalloc(unsigned long size) { - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } EXPORT_SYMBOL(vmalloc); -/** - * vmalloc_user - allocate virtually contiguous memory which has - * been zeroed so it can be mapped to userspace without - * leaking data. - * - * @size: allocation size - */ -void *vmalloc_user(unsigned long size) -{ - struct vm_struct *area; - void *ret; - - ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - - return ret; -} -EXPORT_SYMBOL(vmalloc_user); - /** * vmalloc_node - allocate memory on a specific node * @@ -554,7 +516,7 @@ EXPORT_SYMBOL(vmalloc_user); */ void *vmalloc_node(unsigned long size, int node) { - return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); + return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); } EXPORT_SYMBOL(vmalloc_node); @@ -594,28 +556,6 @@ void *vmalloc_32(unsigned long size) } EXPORT_SYMBOL(vmalloc_32); -/** - * vmalloc_32_user - allocate virtually contiguous memory (32bit - * addressable) which is zeroed so it can be - * mapped to userspace without leaking data. - * - * @size: allocation size - */ -void *vmalloc_32_user(unsigned long size) -{ - struct vm_struct *area; - void *ret; - - ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - - return ret; -} -EXPORT_SYMBOL(vmalloc_32_user); - long vread(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; @@ -690,64 +630,3 @@ finished: read_unlock(&vmlist_lock); return buf - buf_start; } - -/** - * remap_vmalloc_range - map vmalloc pages to userspace - * - * @vma: vma to cover (map full range of vma) - * @addr: vmalloc memory - * @pgoff: number of pages into addr before first page to map - * @returns: 0 for success, -Exxx on failure - * - * This function checks that addr is a valid vmalloc'ed area, and - * that it is big enough to cover the vma. Will return failure if - * that criteria isn't met. - * - * Similar to remap_pfn_range (see mm/memory.c) - */ -int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, - unsigned long pgoff) -{ - struct vm_struct *area; - unsigned long uaddr = vma->vm_start; - unsigned long usize = vma->vm_end - vma->vm_start; - int ret; - - if ((PAGE_SIZE-1) & (unsigned long)addr) - return -EINVAL; - - read_lock(&vmlist_lock); - area = __find_vm_area(addr); - if (!area) - goto out_einval_locked; - - if (!(area->flags & VM_USERMAP)) - goto out_einval_locked; - - if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) - goto out_einval_locked; - read_unlock(&vmlist_lock); - - addr += pgoff << PAGE_SHIFT; - do { - struct page *page = vmalloc_to_page(addr); - ret = vm_insert_page(vma, uaddr, page); - if (ret) - return ret; - - uaddr += PAGE_SIZE; - addr += PAGE_SIZE; - usize -= PAGE_SIZE; - } while (usize > 0); - - /* Prevent "things" like memory migration? VM_flags need a cleanup... */ - vma->vm_flags |= VM_RESERVED; - - return ret; - -out_einval_locked: - read_unlock(&vmlist_lock); - return -EINVAL; -} -EXPORT_SYMBOL(remap_vmalloc_range); -