vserver 1.9.5.x5
[linux-2.6.git] / arch / ia64 / ia32 / binfmt_elf32.c
index 679e68a..3941e04 100644 (file)
@@ -35,17 +35,20 @@ extern void ia64_elf32_init (struct pt_regs *regs);
 
 static void elf32_set_personality (void);
 
-#define setup_arg_pages(bprm,exec)             ia32_setup_arg_pages(bprm,exec)
+#define setup_arg_pages(bprm,tos,exec)         ia32_setup_arg_pages(bprm,exec)
 #define elf_map                                elf32_map
 
 #undef SET_PERSONALITY
 #define SET_PERSONALITY(ex, ibcs2)     elf32_set_personality()
 
+#define elf_read_implies_exec(ex, have_pt_gnu_stack)   (!(have_pt_gnu_stack))
+
 /* Ugly but avoids duplication */
 #include "../../../fs/binfmt_elf.c"
 
 extern struct page *ia32_shared_page[];
 extern unsigned long *ia32_gdt;
+extern struct page *ia32_gate_page;
 
 struct page *
 ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type)
@@ -57,10 +60,25 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int
        return pg;
 }
 
+struct page *
+ia32_install_gate_page (struct vm_area_struct *vma, unsigned long address, int *type)
+{
+       struct page *pg = ia32_gate_page;
+       get_page(pg);
+       if (type)
+               *type = VM_FAULT_MINOR;
+       return pg;
+}
+
+
 static struct vm_operations_struct ia32_shared_page_vm_ops = {
        .nopage = ia32_install_shared_page
 };
 
+static struct vm_operations_struct ia32_gate_page_vm_ops = {
+       .nopage = ia32_install_gate_page
+};
+
 void
 ia64_elf32_init (struct pt_regs *regs)
 {
@@ -73,18 +91,47 @@ ia64_elf32_init (struct pt_regs *regs)
         */
        vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
        if (vma) {
+               memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
                vma->vm_start = IA32_GDT_OFFSET;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_page_prot = PAGE_SHARED;
-               vma->vm_flags = VM_READ|VM_MAYREAD;
+               vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
                vma->vm_ops = &ia32_shared_page_vm_ops;
-               vma->vm_pgoff = 0;
-               vma->vm_file = NULL;
-               vma->vm_private_data = NULL;
                down_write(&current->mm->mmap_sem);
                {
-                       insert_vm_struct(current->mm, vma);
+                       if (insert_vm_struct(current->mm, vma)) {
+                               kmem_cache_free(vm_area_cachep, vma);
+                               up_write(&current->mm->mmap_sem);
+                               BUG();
+                       }
+               }
+               up_write(&current->mm->mmap_sem);
+       }
+
+       /*
+        * When user stack is not executable, push sigreturn code to stack makes
+        * segmentation fault raised when returning to kernel. So now sigreturn
+        * code is locked in specific gate page, which is pointed by pretcode
+        * when setup_frame_ia32
+        */
+       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       if (vma) {
+               memset(vma, 0, sizeof(*vma));
+               vma->vm_mm = current->mm;
+               vma->vm_start = IA32_GATE_OFFSET;
+               vma->vm_end = vma->vm_start + PAGE_SIZE;
+               vma->vm_page_prot = PAGE_COPY_EXEC;
+               vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
+                               | VM_MAYEXEC | VM_RESERVED;
+               vma->vm_ops = &ia32_gate_page_vm_ops;
+               down_write(&current->mm->mmap_sem);
+               {
+                       if (insert_vm_struct(current->mm, vma)) {
+                               kmem_cache_free(vm_area_cachep, vma);
+                               up_write(&current->mm->mmap_sem);
+                               BUG();
+                       }
                }
                up_write(&current->mm->mmap_sem);
        }
@@ -95,18 +142,19 @@ ia64_elf32_init (struct pt_regs *regs)
         */
        vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
        if (vma) {
+               memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
                vma->vm_start = IA32_LDT_OFFSET;
                vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
                vma->vm_page_prot = PAGE_SHARED;
                vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
-               vma->vm_ops = NULL;
-               vma->vm_pgoff = 0;
-               vma->vm_file = NULL;
-               vma->vm_private_data = NULL;
                down_write(&current->mm->mmap_sem);
                {
-                       insert_vm_struct(current->mm, vma);
+                       if (insert_vm_struct(current->mm, vma)) {
+                               kmem_cache_free(vm_area_cachep, vma);
+                               up_write(&current->mm->mmap_sem);
+                               BUG();
+                       }
                }
                up_write(&current->mm->mmap_sem);
        }
@@ -151,10 +199,10 @@ ia64_elf32_init (struct pt_regs *regs)
 int
 ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
 {
-       unsigned long stack_base;
+       unsigned long stack_base, grow;
        struct vm_area_struct *mpnt;
        struct mm_struct *mm = current->mm;
-       int i;
+       int i, ret;
 
        stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
        mm->arg_start = bprm->p + stack_base;
@@ -168,11 +216,16 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
        if (!mpnt)
                return -ENOMEM;
 
-       if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) {
+       grow = (IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
+               >> PAGE_SHIFT;
+       if (security_vm_enough_memory(grow) ||
+               !vx_vmpages_avail(mm, grow)) {
                kmem_cache_free(vm_area_cachep, mpnt);
                return -ENOMEM;
        }
 
+       memset(mpnt, 0, sizeof(*mpnt));
+
        down_write(&current->mm->mmap_sem);
        {
                mpnt->vm_mm = current->mm;
@@ -186,24 +239,30 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
                        mpnt->vm_flags = VM_STACK_FLAGS;
                mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
                                        PAGE_COPY_EXEC: PAGE_COPY;
-               mpnt->vm_ops = NULL;
-               mpnt->vm_pgoff = 0;
-               mpnt->vm_file = NULL;
-               mpnt->vm_private_data = 0;
-               insert_vm_struct(current->mm, mpnt);
-               current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+               if ((ret = insert_vm_struct(current->mm, mpnt))) {
+                       up_write(&current->mm->mmap_sem);
+                       kmem_cache_free(vm_area_cachep, mpnt);
+                       return ret;
+               }
+               // current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
+               vx_vmpages_sub(current->mm, current->mm->total_vm - vma_pages(mpnt));
+               current->mm->stack_vm = current->mm->total_vm;
        }
 
        for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
                struct page *page = bprm->page[i];
                if (page) {
                        bprm->page[i] = NULL;
-                       put_dirty_page(current, page, stack_base, mpnt->vm_page_prot);
+                       install_arg_page(mpnt, page, stack_base);
                }
                stack_base += PAGE_SIZE;
        }
        up_write(&current->mm->mmap_sem);
 
+       /* Can't do it in ia64_elf32_init(). Needs to be done before calls to
+          elf32_map() */
+       current->thread.ppl = ia32_init_pp_list();
+
        return 0;
 }
 
@@ -213,7 +272,6 @@ elf32_set_personality (void)
        set_personality(PER_LINUX32);
        current->thread.map_base  = IA32_PAGE_OFFSET/3;
        current->thread.task_size = IA32_PAGE_OFFSET;   /* use what Linux/x86 uses... */
-       current->thread.flags |= IA64_THREAD_XSTACK;    /* data must be executable */
        set_fs(USER_DS);                                /* set addr limit for new TASK_SIZE */
 }
 
@@ -225,3 +283,16 @@ elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int p
        return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
                            eppnt->p_offset - pgoff);
 }
+
+#define cpu_uses_ia32el()      (local_cpu_data->family > 0x1f)
+
+static int __init check_elf32_binfmt(void)
+{
+       if (cpu_uses_ia32el()) {
+               printk("Please use IA-32 EL for executing IA-32 binaries\n");
+               return unregister_binfmt(&elf_format);
+       }
+       return 0;
+}
+
+module_init(check_elf32_binfmt)