Revert to Fedora kernel-2.6.17-1.2187_FC5 patched with vs2.0.2.1; there are too many...
[linux-2.6.git] / fs / binfmt_elf.c
index abb113e..479ee5f 100644 (file)
@@ -37,7 +37,9 @@
 #include <linux/pagemap.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
+#include <linux/random.h>
 #include <linux/vs_memory.h>
+#include <linux/vs_cvirt.h>
 
 #include <asm/uaccess.h>
 #include <asm/param.h>
@@ -58,7 +60,7 @@ extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
  * If we don't support core dumping, then supply a NULL so we
  * don't even try.
  */
-#ifdef USE_ELF_CORE_DUMP
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
 #else
 #define elf_core_dump  NULL
@@ -70,6 +72,10 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
 # define ELF_MIN_ALIGN PAGE_SIZE
 #endif
 
+#ifndef ELF_CORE_EFLAGS
+#define ELF_CORE_EFLAGS        0
+#endif
+
 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
@@ -82,14 +88,17 @@ static struct linux_binfmt elf_format = {
                .min_coredump   = ELF_EXEC_PAGESIZE
 };
 
-#define BAD_ADDR(x)    ((unsigned long)(x) > TASK_SIZE)
+#define BAD_ADDR(x)    ((unsigned long)(x) >= PAGE_MASK)
 
 static int set_brk(unsigned long start, unsigned long end)
 {
        start = ELF_PAGEALIGN(start);
        end = ELF_PAGEALIGN(end);
        if (end > start) {
-               unsigned long addr = do_brk(start, end - start);
+               unsigned long addr;
+               down_write(&current->mm->mmap_sem);
+               addr = do_brk(start, end - start);
+               up_write(&current->mm->mmap_sem);
                if (BAD_ADDR(addr))
                        return addr;
        }
@@ -104,15 +113,17 @@ static int set_brk(unsigned long start, unsigned long end)
    be in memory */
 
 
-static void padzero(unsigned long elf_bss)
+static int padzero(unsigned long elf_bss)
 {
        unsigned long nbyte;
 
        nbyte = ELF_PAGEOFFSET(elf_bss);
        if (nbyte) {
                nbyte = ELF_MIN_ALIGN - nbyte;
-               clear_user((void __user *) elf_bss, nbyte);
+               if (clear_user((void __user *) elf_bss, nbyte))
+                       return -EFAULT;
        }
+       return 0;
 }
 
 /* Let's use some macros to make this stack manipulation a litle clearer */
@@ -128,7 +139,7 @@ static void padzero(unsigned long elf_bss)
 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
 #endif
 
-static void
+static int
 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
                int interp_aout, unsigned long load_addr,
                unsigned long interp_load_addr)
@@ -157,11 +168,17 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
        if (k_platform) {
                size_t len = strlen(k_platform) + 1;
 
-#ifdef __HAVE_ARCH_ALIGN_STACK
-               p = (unsigned long)arch_align_stack((unsigned long)p);
-#endif
+               /*
+                * In some cases (e.g. Hyper-Threading), we want to avoid L1
+                * evictions by the processes running on the same package. One
+                * thing we can do is to shuffle the initial stack for them.
+                */
+        
+               p = arch_align_stack(p);
+
                u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
-               __copy_to_user(u_platform, k_platform, len);
+               if (__copy_to_user(u_platform, k_platform, len))
+                       return -EFAULT;
        }
 
        /* Create the ELF interpreter info */
@@ -223,7 +240,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
 #endif
 
        /* Now, let's put argc (and argv, envp if appropriate) on the stack */
-       __put_user(argc, sp++);
+       if (__put_user(argc, sp++))
+               return -EFAULT;
        if (interp_aout) {
                argv = sp + 2;
                envp = argv + argc + 1;
@@ -235,31 +253,35 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
        }
 
        /* Populate argv and envp */
-       p = current->mm->arg_start;
+       p = current->mm->arg_end = current->mm->arg_start;
        while (argc-- > 0) {
                size_t len;
                __put_user((elf_addr_t)p, argv++);
                len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
                if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
-                       return;
+                       return 0;
                p += len;
        }
-       __put_user(0, argv);
+       if (__put_user(0, argv))
+               return -EFAULT;
        current->mm->arg_end = current->mm->env_start = p;
        while (envc-- > 0) {
                size_t len;
                __put_user((elf_addr_t)p, envp++);
                len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
                if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
-                       return;
+                       return 0;
                p += len;
        }
-       __put_user(0, envp);
+       if (__put_user(0, envp))
+               return -EFAULT;
        current->mm->env_end = p;
 
        /* Put the elf_info on the stack in the right place.  */
        sp = (elf_addr_t __user *)envp + 1;
-       copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
+       if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
+               return -EFAULT;
+       return 0;
 }
 
 #ifndef elf_map
@@ -275,6 +297,11 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        addr = ELF_PAGESTART(addr);
        size = ELF_PAGEALIGN(size);
 
+       /* mmap() will return -EINVAL if given a zero size, but a
+        * segment with zero filesize is perfectly valid */
+       if (!size)
+               return addr;
+
        down_write(&current->mm->mmap_sem);
 
        /*
@@ -292,7 +319,7 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
                        do_munmap(current->mm, map_addr+size, total_size-size);
        } else
                map_addr = do_mmap(filep, addr, size, prot, type, off);
-               
+
        up_write(&current->mm->mmap_sem);
 
        return map_addr;
@@ -318,6 +345,7 @@ static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
                                ELF_PAGESTART(cmds[first_idx].p_vaddr);
 }
 
+
 /* This is much more generalized than the library routine read function,
    so we keep this separate.  Technically the library read function
    is only provided so that we can read a.out libraries that have
@@ -325,7 +353,7 @@ static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
 
 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
                                     struct file * interpreter,
-                                    unsigned long *interp_load_addr,
+                                    unsigned long *interp_map_addr,
                                     unsigned long no_base)
 {
        struct elf_phdr *elf_phdata;
@@ -396,6 +424,8 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
 
            map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
            total_size = 0;
+           if (!*interp_map_addr)
+               *interp_map_addr = map_addr;
            error = map_addr;
            if (BAD_ADDR(map_addr))
                goto out_close;
@@ -411,7 +441,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
             * <= p_memsize so it is only necessary to check p_memsz.
             */
            k = load_addr + eppnt->p_vaddr;
-           if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
+           if (BAD_ADDR(k) || eppnt->p_filesz > eppnt->p_memsz ||
                eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
                error = -ENOMEM;
                goto out_close;
@@ -441,18 +471,23 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
         * that there are zero-mapped pages up to and including the 
         * last bss page.
         */
-       padzero(elf_bss);
+       if (padzero(elf_bss)) {
+               error = -EFAULT;
+               goto out_close;
+       }
+
        elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);   /* What we have mapped so far */
 
        /* Map the last of the bss segment */
        if (last_bss > elf_bss) {
+               down_write(&current->mm->mmap_sem);
                error = do_brk(elf_bss, last_bss - elf_bss);
+               up_write(&current->mm->mmap_sem);
                if (BAD_ADDR(error))
                        goto out_close;
        }
 
-       *interp_load_addr = load_addr;
-       error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
+       error = load_addr;
 
 out_close:
        kfree(elf_phdata);
@@ -486,7 +521,9 @@ static unsigned long load_aout_interp(struct exec * interp_ex,
                goto out;
        }
 
+       down_write(&current->mm->mmap_sem);     
        do_brk(0, text_data);
+       up_write(&current->mm->mmap_sem);
        if (!interpreter->f_op || !interpreter->f_op->read)
                goto out;
        if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
@@ -494,8 +531,11 @@ static unsigned long load_aout_interp(struct exec * interp_ex,
        flush_icache_range((unsigned long)addr,
                           (unsigned long)addr + text_data);
 
+
+       down_write(&current->mm->mmap_sem);     
        do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
                interp_ex->a_bss);
+       up_write(&current->mm->mmap_sem);
        elf_entry = interp_ex->a_entry;
 
 out:
@@ -511,6 +551,24 @@ out:
 #define INTERPRETER_AOUT 1
 #define INTERPRETER_ELF 2
 
+#ifndef STACK_RND_MASK
+#define STACK_RND_MASK 0x7ff           /* with 4K pages 8MB of VA */
+#endif
+
+static unsigned long randomize_stack_top(unsigned long stack_top)
+{
+       unsigned int random_variable = 0;
+
+       if (current->flags & PF_RANDOMIZE) {
+               random_variable = get_random_int() & STACK_RND_MASK;
+               random_variable <<= PAGE_SHIFT;
+       }
+#ifdef CONFIG_STACK_GROWSUP
+       return PAGE_ALIGN(stack_top) + random_variable;
+#else
+       return PAGE_ALIGN(stack_top) - random_variable;
+#endif
+}
 
 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
 {
@@ -526,12 +584,12 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
        int elf_exec_fileno;
        int retval, i;
        unsigned int size;
-       unsigned long elf_entry, interp_load_addr = 0;
+       unsigned long elf_entry, interp_load_addr = 0, interp_map_addr = 0;
        unsigned long start_code, end_code, start_data, end_data;
        unsigned long reloc_func_desc = 0;
        char passed_fileno[6];
        struct files_struct *files;
-       int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
+       int have_pt_gnu_stack, executable_stack;
        unsigned long def_flags = 0;
        struct {
                struct elfhdr elf_ex;
@@ -620,7 +678,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
                                goto out_free_file;
 
                        retval = -ENOMEM;
-                       elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
+                       elf_interpreter = kmalloc(elf_ppnt->p_filesz,
                                                           GFP_KERNEL);
                        if (!elf_interpreter)
                                goto out_free_file;
@@ -699,22 +757,9 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
                }
        have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
 
-       relocexec = 0;
-
-       if (current->personality == PER_LINUX)
-       switch (exec_shield) {
-       case 1:
-               if (executable_stack == EXSTACK_DISABLE_X) {
-                       current->flags |= PF_RELOCEXEC;
-                       relocexec = PF_RELOCEXEC;
-               }
-               break;
-
-       case 2:
+       if (current->personality == PER_LINUX && (exec_shield & 2)) {
                executable_stack = EXSTACK_DISABLE_X;
-               current->flags |= PF_RELOCEXEC;
-               relocexec = PF_RELOCEXEC;
-               break;
+               current->flags |= PF_RANDOMIZE;
        }
 
        /* Some simple consistency checks for the interpreter */
@@ -769,7 +814,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
        retval = flush_old_exec(bprm);
        if (retval)
                goto out_free_dentry;
-       current->flags |= relocexec;
 
 #ifdef __i386__
        /*
@@ -798,18 +842,20 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
        /* Do this immediately, since STACK_TOP as used in setup_arg_pages
           may depend on the personality.  */
        SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
-       if (exec_shield != 2 &&
-                       elf_read_implies_exec(loc->elf_ex, have_pt_gnu_stack))
+       if (!(exec_shield & 2) &&
+                       elf_read_implies_exec(loc->elf_ex, executable_stack))
                current->personality |= READ_IMPLIES_EXEC;
 
+       if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+               current->flags |= PF_RANDOMIZE;
        arch_pick_mmap_layout(current->mm);
 
        /* Do this so that we can load the interpreter, if need be.  We will
           change some of these later */
-       // current->mm->rss = 0;
-       vx_rsspages_sub(current->mm, current->mm->rss);
        current->mm->free_area_cache = current->mm->mmap_base;
-       retval = setup_arg_pages(bprm, executable_stack);
+       current->mm->cached_hole_size = 0;
+       retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
+                                executable_stack);
        if (retval < 0) {
                send_sig(SIGKILL, current, 0);
                goto out_free_dentry;
@@ -846,7 +892,14 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
                                nbyte = ELF_MIN_ALIGN - nbyte;
                                if (nbyte > elf_brk - elf_bss)
                                        nbyte = elf_brk - elf_bss;
-                               clear_user((void __user *) elf_bss + load_bias, nbyte);
+                               if (clear_user((void __user *)elf_bss +
+                                                       load_bias, nbyte)) {
+                                       /*
+                                        * This bss-zeroing can fail if the ELF
+                                        * file specifies odd protections.  So
+                                        * we don't check the return value
+                                        */
+                               }
                        }
                }
 
@@ -891,7 +944,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
                 * allowed task size. Note that p_filesz must always be
                 * <= p_memsz so it is only necessary to check p_memsz.
                 */
-               if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+               if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
                    elf_ppnt->p_memsz > TASK_SIZE ||
                    TASK_SIZE - elf_ppnt->p_memsz < k) {
                        /* set_brk can never work.  Avoid overflows.  */
@@ -930,22 +983,31 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
                send_sig(SIGKILL, current, 0);
                goto out_free_dentry;
        }
-       padzero(elf_bss);
+       if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+               send_sig(SIGSEGV, current, 0);
+               retval = -EFAULT; /* Nobody gets to see this, but.. */
+               goto out_free_dentry;
+       }
 
        if (elf_interpreter) {
                if (interpreter_type == INTERPRETER_AOUT)
                        elf_entry = load_aout_interp(&loc->interp_ex,
                                                     interpreter);
-               else
+               else {
                        elf_entry = load_elf_interp(&loc->interp_elf_ex,
                                                    interpreter,
-                                                   &interp_load_addr,
+                                                   &interp_map_addr,
                                                    load_bias);
+                       if (!BAD_ADDR(elf_entry)) {
+                               /* load_elf_interp() returns relocation adjustment */
+                               interp_load_addr = elf_entry;
+                               elf_entry += loc->interp_elf_ex.e_entry;
+                       }
+               }
                if (BAD_ADDR(elf_entry)) {
-                       printk(KERN_ERR "Unable to load interpreter %.128s\n",
-                               elf_interpreter);
                        force_sig(SIGSEGV, current);
-                       retval = -ENOEXEC; /* Nobody gets to see this, but.. */
+                       retval = IS_ERR((void *)elf_entry) ?
+                                       (int)elf_entry : -EINVAL;
                        goto out_free_dentry;
                }
                reloc_func_desc = interp_load_addr;
@@ -955,22 +1017,28 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
                kfree(elf_interpreter);
        } else {
                elf_entry = loc->elf_ex.e_entry;
+               if (BAD_ADDR(elf_entry)) {
+                       force_sig(SIGSEGV, current);
+                       retval = -EINVAL;
+                       goto out_free_dentry;
+               }
        }
 
-       kfree(elf_phdata);
-
        if (interpreter_type != INTERPRETER_AOUT)
                sys_close(elf_exec_fileno);
 
        set_binfmt(&elf_format);
 
-       /*
-        * Map the vsyscall trampoline. This address is then passed via
-        * AT_SYSINFO.
-        */
-#ifdef __HAVE_ARCH_VSYSCALL
-       map_vsyscall();
-#endif
+#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
+       retval = arch_setup_additional_pages(bprm, executable_stack,
+                       start_code, interp_map_addr);
+       if (retval < 0) {
+               send_sig(SIGKILL, current, 0);
+               goto out_free_fh;
+       }
+#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
+
+       kfree(elf_phdata);
 
        compute_creds(bprm);
        current->flags &= ~PF_FORKNOEXEC;
@@ -986,7 +1054,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
        current->mm->start_stack = bprm->p;
 
 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
-       if (current->flags & PF_RELOCEXEC)
+       if (current->flags & PF_RANDOMIZE)
                randomize_brk(elf_brk);
 #endif
        if (current->personality & MMAP_PAGE_ZERO) {
@@ -1033,8 +1101,7 @@ out_free_dentry:
        if (interpreter)
                fput(interpreter);
 out_free_interp:
-       if (elf_interpreter)
-               kfree(elf_interpreter);
+       kfree(elf_interpreter);
 out_free_file:
        sys_close(elf_exec_fileno);
 out_free_fh:
@@ -1044,8 +1111,6 @@ out_free_fh:
        }
 out_free_ph:
        kfree(elf_phdata);
-       current->flags &= ~PF_RELOCEXEC;
-       current->flags |= old_relocexec;
        goto out;
 }
 
@@ -1103,7 +1168,7 @@ static int load_elf_library(struct file *file)
        error = do_mmap(file,
                        ELF_PAGESTART(eppnt->p_vaddr),
                        (eppnt->p_filesz +
-                         ELF_PAGEOFFSET(eppnt->p_vaddr)),
+                        ELF_PAGEOFFSET(eppnt->p_vaddr)),
                        PROT_READ | PROT_WRITE | PROT_EXEC,
                        MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
                        (eppnt->p_offset -
@@ -1113,12 +1178,18 @@ static int load_elf_library(struct file *file)
                goto out_free_ph;
 
        elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
-       padzero(elf_bss);
+       if (padzero(elf_bss)) {
+               error = -EFAULT;
+               goto out_free_ph;
+       }
 
        len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
        bss = eppnt->p_memsz + eppnt->p_vaddr;
-       if (bss > len)
+       if (bss > len) {
+               down_write(&current->mm->mmap_sem);
                do_brk(len, bss - len);
+               up_write(&current->mm->mmap_sem);
+       }
        error = 0;
 
 out_free_ph:
@@ -1131,7 +1202,7 @@ out:
  * Note that some platforms still use traditional core dumps and not
  * the ELF core dump.  Each platform can select it as appropriate.
  */
-#ifdef USE_ELF_CORE_DUMP
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
 
 /*
  * ELF core dumper
@@ -1148,7 +1219,7 @@ static int dump_write(struct file *file, const void *addr, int nr)
        return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
 }
 
-static int dump_seek(struct file *file, off_t off)
+static int dump_seek(struct file *file, loff_t off)
 {
        if (file->f_op->llseek) {
                if (file->f_op->llseek(file, off, 0) != off)
@@ -1167,10 +1238,17 @@ static int dump_seek(struct file *file, off_t off)
  */
 static int maydump(struct vm_area_struct *vma)
 {
-       /* Do not dump I/O mapped devices, shared memory, or special mappings */
-       if (vma->vm_flags & (VM_IO | VM_SHARED | VM_RESERVED))
+       /* Do not dump I/O mapped devices or special mappings */
+       if (vma->vm_flags & (VM_IO | VM_RESERVED))
                return 0;
 
+       if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO.  */
+               return 1;
+
+       /* Dump shared memory only if mapped from an anonymous file.  */
+       if (vma->vm_flags & VM_SHARED)
+               return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
+
        /* If it hasn't been written to, don't write it out */
        if (!vma->anon_vma)
                return 0;
@@ -1232,7 +1310,7 @@ static int writenote(struct memelfnote *men, struct file *file)
        if (!dump_seek(file, (off))) \
                goto end_coredump;
 
-static inline void fill_elf_header(struct elfhdr *elf, int segs)
+static void fill_elf_header(struct elfhdr *elf, int segs)
 {
        memcpy(elf->e_ident, ELFMAG, SELFMAG);
        elf->e_ident[EI_CLASS] = ELF_CLASS;
@@ -1247,7 +1325,7 @@ static inline void fill_elf_header(struct elfhdr *elf, int segs)
        elf->e_entry = 0;
        elf->e_phoff = sizeof(struct elfhdr);
        elf->e_shoff = 0;
-       elf->e_flags = 0;
+       elf->e_flags = ELF_CORE_EFLAGS;
        elf->e_ehsize = sizeof(struct elfhdr);
        elf->e_phentsize = sizeof(struct elf_phdr);
        elf->e_phnum = segs;
@@ -1257,7 +1335,7 @@ static inline void fill_elf_header(struct elfhdr *elf, int segs)
        return;
 }
 
-static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
+static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
 {
        phdr->p_type = PT_NOTE;
        phdr->p_offset = offset;
@@ -1294,7 +1372,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
        prstatus->pr_ppid = p->parent->pid;
        prstatus->pr_pgrp = process_group(p);
        prstatus->pr_sid = p->signal->session;
-       if (p->pid == p->tgid) {
+       if (thread_group_leader(p)) {
                /*
                 * This is the record for the group leader.  Add in the
                 * cumulative times of previous dead threads.  This total
@@ -1305,22 +1383,22 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
                 * this and each other thread to finish dying after the
                 * core dump synchronization phase.
                 */
-               jiffies_to_timeval(p->utime + p->signal->utime,
+               cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
                                   &prstatus->pr_utime);
-               jiffies_to_timeval(p->stime + p->signal->stime,
+               cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
                                   &prstatus->pr_stime);
        } else {
-               jiffies_to_timeval(p->utime, &prstatus->pr_utime);
-               jiffies_to_timeval(p->stime, &prstatus->pr_stime);
+               cputime_to_timeval(p->utime, &prstatus->pr_utime);
+               cputime_to_timeval(p->stime, &prstatus->pr_stime);
        }
-       jiffies_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
-       jiffies_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+       cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
+       cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
 }
 
-static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
-                       struct mm_struct *mm)
+static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
+                      struct mm_struct *mm)
 {
-       int i, len;
+       unsigned int i, len;
        
        /* first copy the parameters from user space */
        memset(psinfo, 0, sizeof(struct elf_prpsinfo));
@@ -1328,8 +1406,9 @@ static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
        len = mm->arg_end - mm->arg_start;
        if (len >= ELF_PRARGSZ)
                len = ELF_PRARGSZ-1;
-       copy_from_user(&psinfo->pr_psargs,
-                      (const char __user *)mm->arg_start, len);
+       if (copy_from_user(&psinfo->pr_psargs,
+                          (const char __user *)mm->arg_start, len))
+               return -EFAULT;
        for(i = 0; i < len; i++)
                if (psinfo->pr_psargs[i] == 0)
                        psinfo->pr_psargs[i] = ' ';
@@ -1342,7 +1421,7 @@ static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
 
        i = p->state ? ffz(~p->state) + 1 : 0;
        psinfo->pr_state = i;
-       psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
+       psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
        psinfo->pr_zomb = psinfo->pr_sname == 'Z';
        psinfo->pr_nice = task_nice(p);
        psinfo->pr_flag = p->flags;
@@ -1350,7 +1429,7 @@ static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
        SET_GID(psinfo->pr_gid, p->gid);
        strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
        
-       return;
+       return 0;
 }
 
 /* Here is the structure in which status of each thread is captured. */
@@ -1473,12 +1552,11 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
                read_lock(&tasklist_lock);
                do_each_thread(g,p)
                        if (current->mm == p->mm && current != p) {
-                               tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
+                               tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
                                if (!tmp) {
                                        read_unlock(&tasklist_lock);
                                        goto cleanup;
                                }
-                               memset(tmp, 0, sizeof(*tmp));
                                INIT_LIST_HEAD(&tmp->list);
                                tmp->thread = p;
                                list_add(&tmp->list, &thread_list);
@@ -1520,9 +1598,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
        fill_psinfo(psinfo, current->group_leader, current->mm);
        fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
        
-       fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
-  
-       numnote = 3;
+       numnote = 2;
 
        auxv = (elf_addr_t *) current->mm->saved_auxv;
 
@@ -1629,7 +1705,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
                                        DUMP_SEEK (file->f_pos + PAGE_SIZE);
                                } else {
                                        void *kaddr;
-                                       flush_cache_page(vma, addr);
+                                       flush_cache_page(vma, addr, page_to_pfn(page));
                                        kaddr = kmap(page);
                                        if ((size += PAGE_SIZE) > limit ||
                                            !dump_write(file, kaddr,
@@ -1649,17 +1725,17 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
        ELF_CORE_WRITE_EXTRA_DATA;
 #endif
 
-       if ((off_t) file->f_pos != offset) {
+       if ((off_t)file->f_pos != offset) {
                /* Sanity check */
-               printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
-                      (off_t) file->f_pos, offset);
+               printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
+                      (off_t)file->f_pos, offset);
        }
 
 end_coredump:
        set_fs(fs);
 
 cleanup:
-       while(!list_empty(&thread_list)) {
+       while (!list_empty(&thread_list)) {
                struct list_head *tmp = thread_list.next;
                list_del(tmp);
                kfree(list_entry(tmp, struct elf_thread_status, list));