X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fkernel%2Fvdso.c;h=256faa779694d4ed39a651c1f2f56452b32f410e;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=1d135e93d376549ed03d67c4c9bb609ebbbd6bde;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 1d135e93d..256faa779 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -8,6 +8,7 @@ * 2 of the License, or (at your option) any later version. */ +#include #include #include #include @@ -224,7 +225,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, struct vm_area_struct *vma; unsigned long vdso_pages; unsigned long vdso_base; - int rc; #ifdef CONFIG_PPC64 if (test_thread_flag(TIF_32BIT)) { @@ -239,13 +239,20 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, vdso_base = VDSO32_MBASE; #endif - current->mm->context.vdso_base = 0; + current->thread.vdso_base = 0; /* vDSO has a problem and was disabled, just don't "enable" it for the * process */ if (vdso_pages == 0) return 0; + + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (vma == NULL) + return -ENOMEM; + + memset(vma, 0, sizeof(*vma)); + /* Add a page to the vdso size for the data page */ vdso_pages ++; @@ -254,23 +261,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, * at vdso_base which is the "natural" base for it, but we might fail * and end up putting it elsewhere. */ - down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, vdso_base, vdso_pages << PAGE_SHIFT, 0, 0); - if (IS_ERR_VALUE(vdso_base)) { - rc = vdso_base; - goto fail_mmapsem; + if (vdso_base & ~PAGE_MASK) { + kmem_cache_free(vm_area_cachep, vma); + return (int)vdso_base; } + current->thread.vdso_base = vdso_base; - /* Allocate a VMA structure and fill it up */ - vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); - if (vma == NULL) { - rc = -ENOMEM; - goto fail_mmapsem; - } vma->vm_mm = mm; - vma->vm_start = vdso_base; + vma->vm_start = current->thread.vdso_base; vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); /* @@ -283,38 +284,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, * It's fine to use that for setting breakpoints in the vDSO code * pages though */ - vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; vma->vm_flags |= mm->def_flags; vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; vma->vm_ops = &vdso_vmops; - /* Insert new VMA */ - rc = insert_vm_struct(mm, vma); - if (rc) - goto fail_vma; - - /* Put vDSO base into mm struct and account for memory usage */ - current->mm->context.vdso_base = vdso_base; + down_write(&mm->mmap_sem); + if (insert_vm_struct(mm, vma)) { + up_write(&mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return -ENOMEM; + } vx_vmpages_add(mm, (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); up_write(&mm->mmap_sem); - return 0; - fail_vma: - kmem_cache_free(vm_area_cachep, vma); - fail_mmapsem: - up_write(&mm->mmap_sem); - return rc; -} - -const char *arch_vma_name(struct vm_area_struct *vma) -{ - if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) - return "[vdso]"; - return NULL; + return 0; } - - static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, unsigned long *size) {