#include <linux/pagemap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/random.h>
+#include <linux/vs_memory.h>
+#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/param.h>
+#include <asm/page.h>
#include <linux/elf.h>
* If we don't support core dumping, then supply a NULL so we
* don't even try.
*/
-#ifdef USE_ELF_CORE_DUMP
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
#else
#define elf_core_dump NULL
# define ELF_MIN_ALIGN PAGE_SIZE
#endif
+#ifndef ELF_CORE_EFLAGS
+#define ELF_CORE_EFLAGS 0
+#endif
+
#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
.min_coredump = ELF_EXEC_PAGESIZE
};
-#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
+#define BAD_ADDR(x) ((unsigned long)(x) >= PAGE_MASK)
static int set_brk(unsigned long start, unsigned long end)
{
start = ELF_PAGEALIGN(start);
end = ELF_PAGEALIGN(end);
if (end > start) {
- unsigned long addr = do_brk(start, end - start);
+ unsigned long addr;
+ down_write(¤t->mm->mmap_sem);
+ addr = do_brk(start, end - start);
+ up_write(¤t->mm->mmap_sem);
if (BAD_ADDR(addr))
return addr;
}
be in memory */
-static void padzero(unsigned long elf_bss)
+static int padzero(unsigned long elf_bss)
{
unsigned long nbyte;
nbyte = ELF_PAGEOFFSET(elf_bss);
if (nbyte) {
nbyte = ELF_MIN_ALIGN - nbyte;
- clear_user((void __user *) elf_bss, nbyte);
+ if (clear_user((void __user *) elf_bss, nbyte))
+ return -EFAULT;
}
+ return 0;
}
/* Let's use some macros to make this stack manipulation a litle clearer */
#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
#endif
-static void
+static int
create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
int interp_aout, unsigned long load_addr,
unsigned long interp_load_addr)
if (k_platform) {
size_t len = strlen(k_platform) + 1;
-#ifdef __HAVE_ARCH_ALIGN_STACK
- p = (unsigned long)arch_align_stack((unsigned long)p);
-#endif
+ /*
+ * In some cases (e.g. Hyper-Threading), we want to avoid L1
+ * evictions by the processes running on the same package. One
+ * thing we can do is to shuffle the initial stack for them.
+ */
+
+ p = arch_align_stack(p);
+
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
- __copy_to_user(u_platform, k_platform, len);
+ if (__copy_to_user(u_platform, k_platform, len))
+ return -EFAULT;
}
/* Create the ELF interpreter info */
NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
if (k_platform) {
- NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
+ NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
#endif
/* Now, let's put argc (and argv, envp if appropriate) on the stack */
- __put_user(argc, sp++);
+ if (__put_user(argc, sp++))
+ return -EFAULT;
if (interp_aout) {
argv = sp + 2;
envp = argv + argc + 1;
- __put_user((elf_addr_t)(long)argv, sp++);
- __put_user((elf_addr_t)(long)envp, sp++);
+ __put_user((elf_addr_t)(unsigned long)argv, sp++);
+ __put_user((elf_addr_t)(unsigned long)envp, sp++);
} else {
argv = sp;
envp = argv + argc + 1;
}
/* Populate argv and envp */
- p = current->mm->arg_start;
+ p = current->mm->arg_end = current->mm->arg_start;
while (argc-- > 0) {
size_t len;
__put_user((elf_addr_t)p, argv++);
len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
- return;
+ return 0;
p += len;
}
- __put_user(0, argv);
+ if (__put_user(0, argv))
+ return -EFAULT;
current->mm->arg_end = current->mm->env_start = p;
while (envc-- > 0) {
size_t len;
__put_user((elf_addr_t)p, envp++);
len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
- return;
+ return 0;
p += len;
}
- __put_user(0, envp);
+ if (__put_user(0, envp))
+ return -EFAULT;
current->mm->env_end = p;
/* Put the elf_info on the stack in the right place. */
sp = (elf_addr_t __user *)envp + 1;
- copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
+ if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
+ return -EFAULT;
+ return 0;
}
#ifndef elf_map
addr = ELF_PAGESTART(addr);
size = ELF_PAGEALIGN(size);
+ /* mmap() will return -EINVAL if given a zero size, but a
+ * segment with zero filesize is perfectly valid */
+ if (!size)
+ return addr;
+
down_write(¤t->mm->mmap_sem);
/*
do_munmap(current->mm, map_addr+size, total_size-size);
} else
map_addr = do_mmap(filep, addr, size, prot, type, off);
-
+
up_write(¤t->mm->mmap_sem);
return map_addr;
ELF_PAGESTART(cmds[first_idx].p_vaddr);
}
+
/* This is much more generalized than the library routine read function,
so we keep this separate. Technically the library read function
is only provided so that we can read a.out libraries that have
static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
struct file * interpreter,
- unsigned long *interp_load_addr,
+ unsigned long *interp_map_addr,
unsigned long no_base)
{
struct elf_phdr *elf_phdata;
*/
if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
goto out;
- if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
+ if (interp_elf_ex->e_phnum < 1 ||
+ interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
goto out;
/* Now read in all of the header information */
goto out;
retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
- error = retval;
- if (retval < 0)
+ error = -EIO;
+ if (retval != size) {
+ if (retval < 0)
+ error = retval;
goto out_close;
+ }
total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
if (!total_size)
map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
total_size = 0;
+ if (!*interp_map_addr)
+ *interp_map_addr = map_addr;
error = map_addr;
if (BAD_ADDR(map_addr))
goto out_close;
* <= p_memsize so it is only necessary to check p_memsz.
*/
k = load_addr + eppnt->p_vaddr;
- if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
+ if (BAD_ADDR(k) || eppnt->p_filesz > eppnt->p_memsz ||
eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
error = -ENOMEM;
goto out_close;
* that there are zero-mapped pages up to and including the
* last bss page.
*/
- padzero(elf_bss);
+ if (padzero(elf_bss)) {
+ error = -EFAULT;
+ goto out_close;
+ }
+
elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
/* Map the last of the bss segment */
if (last_bss > elf_bss) {
+ down_write(¤t->mm->mmap_sem);
error = do_brk(elf_bss, last_bss - elf_bss);
+ up_write(¤t->mm->mmap_sem);
if (BAD_ADDR(error))
goto out_close;
}
- *interp_load_addr = load_addr;
- error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
+ error = load_addr;
out_close:
kfree(elf_phdata);
goto out;
}
+ down_write(¤t->mm->mmap_sem);
do_brk(0, text_data);
+ up_write(¤t->mm->mmap_sem);
if (!interpreter->f_op || !interpreter->f_op->read)
goto out;
if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
flush_icache_range((unsigned long)addr,
(unsigned long)addr + text_data);
+
+ down_write(¤t->mm->mmap_sem);
do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
interp_ex->a_bss);
+ up_write(¤t->mm->mmap_sem);
elf_entry = interp_ex->a_entry;
out:
#define INTERPRETER_AOUT 1
#define INTERPRETER_ELF 2
+#ifndef STACK_RND_MASK
+#define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
+#endif
+
+static unsigned long randomize_stack_top(unsigned long stack_top)
+{
+ unsigned int random_variable = 0;
+
+ if (current->flags & PF_RANDOMIZE) {
+ random_variable = get_random_int() & STACK_RND_MASK;
+ random_variable <<= PAGE_SHIFT;
+ }
+#ifdef CONFIG_STACK_GROWSUP
+ return PAGE_ALIGN(stack_top) + random_variable;
+#else
+ return PAGE_ALIGN(stack_top) - random_variable;
+#endif
+}
static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
int elf_exec_fileno;
int retval, i;
unsigned int size;
- unsigned long elf_entry, interp_load_addr = 0;
+ unsigned long elf_entry, interp_load_addr = 0, interp_map_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc = 0;
- struct elfhdr elf_ex;
- struct elfhdr interp_elf_ex;
- struct exec interp_ex;
char passed_fileno[6];
struct files_struct *files;
- int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
+ int have_pt_gnu_stack, executable_stack;
unsigned long def_flags = 0;
+ struct {
+ struct elfhdr elf_ex;
+ struct elfhdr interp_elf_ex;
+ struct exec interp_ex;
+ } *loc;
+
+ loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+ if (!loc) {
+ retval = -ENOMEM;
+ goto out_ret;
+ }
/* Get the exec-header */
- elf_ex = *((struct elfhdr *) bprm->buf);
+ loc->elf_ex = *((struct elfhdr *) bprm->buf);
retval = -ENOEXEC;
/* First of all, some simple consistency checks */
- if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+ if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
goto out;
- if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
+ if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
goto out;
- if (!elf_check_arch(&elf_ex))
+ if (!elf_check_arch(&loc->elf_ex))
goto out;
if (!bprm->file->f_op||!bprm->file->f_op->mmap)
goto out;
/* Now read in all of the header information */
- retval = -ENOMEM;
- if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
+ if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
goto out;
- if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
+ if (loc->elf_ex.e_phnum < 1 ||
+ loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
goto out;
- size = elf_ex.e_phnum * sizeof(struct elf_phdr);
+ size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
+ retval = -ENOMEM;
elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
if (!elf_phdata)
goto out;
- retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
- if (retval < 0)
+ retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
+ if (retval != size) {
+ if (retval >= 0)
+ retval = -EIO;
goto out_free_ph;
+ }
files = current->files; /* Refcounted so ok */
retval = unshare_files();
start_data = 0;
end_data = 0;
- for (i = 0; i < elf_ex.e_phnum; i++) {
+ for (i = 0; i < loc->elf_ex.e_phnum; i++) {
if (elf_ppnt->p_type == PT_INTERP) {
/* This is the program interpreter used for
* shared libraries - for now assume that this
* is an a.out format binary
*/
- retval = -ENOMEM;
- if (elf_ppnt->p_filesz > PATH_MAX)
+ retval = -ENOEXEC;
+ if (elf_ppnt->p_filesz > PATH_MAX ||
+ elf_ppnt->p_filesz < 2)
goto out_free_file;
- elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
+
+ retval = -ENOMEM;
+ elf_interpreter = kmalloc(elf_ppnt->p_filesz,
GFP_KERNEL);
if (!elf_interpreter)
goto out_free_file;
retval = kernel_read(bprm->file, elf_ppnt->p_offset,
elf_interpreter,
elf_ppnt->p_filesz);
- if (retval < 0)
+ if (retval != elf_ppnt->p_filesz) {
+ if (retval >= 0)
+ retval = -EIO;
goto out_free_interp;
+ }
+ /* make sure path is NULL terminated */
+ retval = -ENOEXEC;
+ if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
+ goto out_free_interp;
+
/* If the program interpreter is one of these two,
* then assume an iBCS2 image. Otherwise assume
* a native linux image.
* switch really is going to happen - do this in
* flush_thread(). - akpm
*/
- SET_PERSONALITY(elf_ex, ibcs2_interpreter);
+ SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
interpreter = open_exec(elf_interpreter);
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
goto out_free_interp;
retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
- if (retval < 0)
+ if (retval != BINPRM_BUF_SIZE) {
+ if (retval >= 0)
+ retval = -EIO;
goto out_free_dentry;
+ }
/* Get the exec headers */
- interp_ex = *((struct exec *) bprm->buf);
- interp_elf_ex = *((struct elfhdr *) bprm->buf);
+ loc->interp_ex = *((struct exec *) bprm->buf);
+ loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
break;
}
elf_ppnt++;
elf_ppnt = elf_phdata;
executable_stack = EXSTACK_DEFAULT;
- for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
+ for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
if (elf_ppnt->p_type == PT_GNU_STACK) {
if (elf_ppnt->p_flags & PF_X)
executable_stack = EXSTACK_ENABLE_X;
executable_stack = EXSTACK_DISABLE_X;
break;
}
- if (i == elf_ex.e_phnum)
- def_flags |= VM_EXEC | VM_MAYEXEC;
-
- relocexec = 0;
+ have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
- if (current->personality == PER_LINUX)
- switch (exec_shield) {
- case 1:
- if (executable_stack != EXSTACK_DEFAULT) {
- current->flags |= PF_RELOCEXEC;
- relocexec = PF_RELOCEXEC;
- }
- break;
-
- case 2:
+ if (current->personality == PER_LINUX && (exec_shield & 2)) {
executable_stack = EXSTACK_DISABLE_X;
- current->flags |= PF_RELOCEXEC;
- relocexec = PF_RELOCEXEC;
- break;
+ current->flags |= PF_RANDOMIZE;
}
/* Some simple consistency checks for the interpreter */
interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
/* Now figure out which format our binary is */
- if ((N_MAGIC(interp_ex) != OMAGIC) &&
- (N_MAGIC(interp_ex) != ZMAGIC) &&
- (N_MAGIC(interp_ex) != QMAGIC))
+ if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
+ (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
+ (N_MAGIC(loc->interp_ex) != QMAGIC))
interpreter_type = INTERPRETER_ELF;
- if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+ if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
interpreter_type &= ~INTERPRETER_ELF;
retval = -ELIBBAD;
}
/* Verify the interpreter has a valid arch */
if ((interpreter_type == INTERPRETER_ELF) &&
- !elf_check_arch(&interp_elf_ex))
+ !elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
} else {
/* Executables without an interpreter also need a personality */
- SET_PERSONALITY(elf_ex, ibcs2_interpreter);
+ SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
}
/* OK, we are done with that, now set up the arg stuff,
retval = flush_old_exec(bprm);
if (retval)
goto out_free_dentry;
- current->flags |= relocexec;
#ifdef __i386__
/*
* Turn off the CS limit completely if exec-shield disabled or
* NX active:
*/
- if (!exec_shield)
+ if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
arch_add_exec_range(current->mm, -1);
#endif
current->mm->end_data = 0;
current->mm->end_code = 0;
current->mm->mmap = NULL;
-#ifdef __HAVE_ARCH_MMAP_TOP
- current->mm->mmap_top = mmap_top();
-#endif
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
- SET_PERSONALITY(elf_ex, ibcs2_interpreter);
+ SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
+ if (!(exec_shield & 2) &&
+ elf_read_implies_exec(loc->elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+ if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ current->flags |= PF_RANDOMIZE;
+ arch_pick_mmap_layout(current->mm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
- // current->mm->rss = 0;
- vx_rsspages_sub(current->mm, current->mm->rss);
- current->mm->free_area_cache = TASK_UNMAPPED_BASE;
- current->mm->non_executable_cache = current->mm->mmap_top;
- retval = setup_arg_pages(bprm, executable_stack);
+ current->mm->free_area_cache = current->mm->mmap_base;
+ current->mm->cached_hole_size = 0;
+ retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
+ executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
the correct location in memory.
*/
- for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
+ for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
nbyte = ELF_MIN_ALIGN - nbyte;
if (nbyte > elf_brk - elf_bss)
nbyte = elf_brk - elf_bss;
- clear_user((void __user *) elf_bss + load_bias, nbyte);
+ if (clear_user((void __user *)elf_bss +
+ load_bias, nbyte)) {
+ /*
+ * This bss-zeroing can fail if the ELF
+ * file specifies odd protections. So
+ * we don't check the return value
+ */
+ }
}
}
elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
- if (elf_ex.e_type == ET_EXEC || load_addr_set)
+ if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
elf_flags |= MAP_FIXED;
- else if (elf_ex.e_type == ET_DYN)
+ else if (loc->elf_ex.e_type == ET_DYN)
#ifdef __i386__
load_bias = 0;
#else
#endif
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
- if (BAD_ADDR(error))
- continue;
+ if (BAD_ADDR(error)) {
+ send_sig(SIGKILL, current, 0);
+ goto out_free_dentry;
+ }
if (!load_addr_set) {
load_addr_set = 1;
load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
- if (elf_ex.e_type == ET_DYN) {
+ if (loc->elf_ex.e_type == ET_DYN) {
load_bias += error -
ELF_PAGESTART(load_bias + vaddr);
load_addr += load_bias;
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
- if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+ if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
elf_ppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - elf_ppnt->p_memsz < k) {
/* set_brk can never work. Avoid overflows. */
elf_brk = k;
}
- elf_ex.e_entry += load_bias;
+ loc->elf_ex.e_entry += load_bias;
elf_bss += load_bias;
elf_brk += load_bias;
start_code += load_bias;
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
- padzero(elf_bss);
+ if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+ send_sig(SIGSEGV, current, 0);
+ retval = -EFAULT; /* Nobody gets to see this, but.. */
+ goto out_free_dentry;
+ }
if (elf_interpreter) {
if (interpreter_type == INTERPRETER_AOUT)
- elf_entry = load_aout_interp(&interp_ex,
+ elf_entry = load_aout_interp(&loc->interp_ex,
interpreter);
- else
- elf_entry = load_elf_interp(&interp_elf_ex,
+ else {
+ elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
- &interp_load_addr,
+ &interp_map_addr,
load_bias);
+ if (!BAD_ADDR(elf_entry)) {
+ /* load_elf_interp() returns relocation adjustment */
+ interp_load_addr = elf_entry;
+ elf_entry += loc->interp_elf_ex.e_entry;
+ }
+ }
if (BAD_ADDR(elf_entry)) {
- printk(KERN_ERR "Unable to load interpreter\n");
- send_sig(SIGSEGV, current, 0);
- retval = -ENOEXEC; /* Nobody gets to see this, but.. */
+ force_sig(SIGSEGV, current);
+ retval = IS_ERR((void *)elf_entry) ?
+ (int)elf_entry : -EINVAL;
goto out_free_dentry;
}
reloc_func_desc = interp_load_addr;
fput(interpreter);
kfree(elf_interpreter);
} else {
- elf_entry = elf_ex.e_entry;
+ elf_entry = loc->elf_ex.e_entry;
+ if (BAD_ADDR(elf_entry)) {
+ force_sig(SIGSEGV, current);
+ retval = -EINVAL;
+ goto out_free_dentry;
+ }
}
- kfree(elf_phdata);
-
if (interpreter_type != INTERPRETER_AOUT)
sys_close(elf_exec_fileno);
set_binfmt(&elf_format);
- /*
- * Map the vsyscall trampoline. This address is then passed via
- * AT_SYSINFO.
- */
-#ifdef __HAVE_ARCH_VSYSCALL
- map_vsyscall();
-#endif
+#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
+ retval = arch_setup_additional_pages(bprm, executable_stack,
+ start_code, interp_map_addr);
+ if (retval < 0) {
+ send_sig(SIGKILL, current, 0);
+ goto out_free_fh;
+ }
+#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
+
+ kfree(elf_phdata);
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
- create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
+ create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
load_addr, interp_load_addr);
/* N.B. passed_fileno might not be initialized? */
if (interpreter_type == INTERPRETER_AOUT)
current->mm->start_stack = bprm->p;
#ifdef __HAVE_ARCH_RANDOMIZE_BRK
- if (current->flags & PF_RELOCEXEC)
+ if (current->flags & PF_RANDOMIZE)
randomize_brk(elf_brk);
#endif
if (current->personality & MMAP_PAGE_ZERO) {
}
retval = 0;
out:
+ kfree(loc);
+out_ret:
return retval;
/* error cleanup */
if (interpreter)
fput(interpreter);
out_free_interp:
- if (elf_interpreter)
- kfree(elf_interpreter);
+ kfree(elf_interpreter);
out_free_file:
sys_close(elf_exec_fileno);
out_free_fh:
}
out_free_ph:
kfree(elf_phdata);
- current->flags &= ~PF_RELOCEXEC;
- current->flags |= old_relocexec;
goto out;
}
static int load_elf_library(struct file *file)
{
struct elf_phdr *elf_phdata;
+ struct elf_phdr *eppnt;
unsigned long elf_bss, bss, len;
int retval, error, i, j;
struct elfhdr elf_ex;
/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
error = -ENOMEM;
- elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
+ elf_phdata = kmalloc(j, GFP_KERNEL);
if (!elf_phdata)
goto out;
+ eppnt = elf_phdata;
error = -ENOEXEC;
- retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
+ retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
if (retval != j)
goto out_free_ph;
for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
- if ((elf_phdata + i)->p_type == PT_LOAD) j++;
+ if ((eppnt + i)->p_type == PT_LOAD)
+ j++;
if (j != 1)
goto out_free_ph;
- while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
+ while (eppnt->p_type != PT_LOAD)
+ eppnt++;
/* Now use mmap to map the library into memory. */
down_write(¤t->mm->mmap_sem);
error = do_mmap(file,
- ELF_PAGESTART(elf_phdata->p_vaddr),
- (elf_phdata->p_filesz +
- ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
+ ELF_PAGESTART(eppnt->p_vaddr),
+ (eppnt->p_filesz +
+ ELF_PAGEOFFSET(eppnt->p_vaddr)),
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
- (elf_phdata->p_offset -
- ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
+ (eppnt->p_offset -
+ ELF_PAGEOFFSET(eppnt->p_vaddr)));
up_write(¤t->mm->mmap_sem);
- if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
+ if (error != ELF_PAGESTART(eppnt->p_vaddr))
goto out_free_ph;
- elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
- padzero(elf_bss);
+ elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
+ if (padzero(elf_bss)) {
+ error = -EFAULT;
+ goto out_free_ph;
+ }
- len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
- bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
- if (bss > len)
+ len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
+ bss = eppnt->p_memsz + eppnt->p_vaddr;
+ if (bss > len) {
+ down_write(¤t->mm->mmap_sem);
do_brk(len, bss - len);
+ up_write(¤t->mm->mmap_sem);
+ }
error = 0;
out_free_ph:
* Note that some platforms still use traditional core dumps and not
* the ELF core dump. Each platform can select it as appropriate.
*/
-#ifdef USE_ELF_CORE_DUMP
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
/*
* ELF core dumper
return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
}
-static int dump_seek(struct file *file, off_t off)
+static int dump_seek(struct file *file, loff_t off)
{
if (file->f_op->llseek) {
if (file->f_op->llseek(file, off, 0) != off)
*/
static int maydump(struct vm_area_struct *vma)
{
- /*
- * If we may not read the contents, don't allow us to dump
- * them either. "dump_write()" can't handle it anyway.
- */
- if (!(vma->vm_flags & VM_READ))
+ /* Do not dump I/O mapped devices or special mappings */
+ if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
- /* Do not dump I/O mapped devices! -DaveM */
- if (vma->vm_flags & VM_IO)
- return 0;
-#if 1
- if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
+ if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
return 1;
- if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
+
+ /* Dump shared memory only if mapped from an anonymous file. */
+ if (vma->vm_flags & VM_SHARED)
+ return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
+
+ /* If it hasn't been written to, don't write it out */
+ if (!vma->anon_vma)
return 0;
-#endif
+
return 1;
}
if (!dump_seek(file, (off))) \
goto end_coredump;
-static inline void fill_elf_header(struct elfhdr *elf, int segs)
+static void fill_elf_header(struct elfhdr *elf, int segs)
{
memcpy(elf->e_ident, ELFMAG, SELFMAG);
elf->e_ident[EI_CLASS] = ELF_CLASS;
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
- elf->e_flags = 0;
+ elf->e_flags = ELF_CORE_EFLAGS;
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize = sizeof(struct elf_phdr);
elf->e_phnum = segs;
return;
}
-static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
+static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
{
phdr->p_type = PT_NOTE;
phdr->p_offset = offset;
prstatus->pr_ppid = p->parent->pid;
prstatus->pr_pgrp = process_group(p);
prstatus->pr_sid = p->signal->session;
- jiffies_to_timeval(p->utime, &prstatus->pr_utime);
- jiffies_to_timeval(p->stime, &prstatus->pr_stime);
- jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
- jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
+ if (thread_group_leader(p)) {
+ /*
+ * This is the record for the group leader. Add in the
+ * cumulative times of previous dead threads. This total
+ * won't include the time of each live thread whose state
+ * is included in the core dump. The final total reported
+ * to our parent process when it calls wait4 will include
+ * those sums as well as the little bit more time it takes
+ * this and each other thread to finish dying after the
+ * core dump synchronization phase.
+ */
+ cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
+ &prstatus->pr_utime);
+ cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
+ &prstatus->pr_stime);
+ } else {
+ cputime_to_timeval(p->utime, &prstatus->pr_utime);
+ cputime_to_timeval(p->stime, &prstatus->pr_stime);
+ }
+ cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
+ cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
}
-static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
- struct mm_struct *mm)
+static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
+ struct mm_struct *mm)
{
- int i, len;
+ unsigned int i, len;
/* first copy the parameters from user space */
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
len = mm->arg_end - mm->arg_start;
if (len >= ELF_PRARGSZ)
len = ELF_PRARGSZ-1;
- copy_from_user(&psinfo->pr_psargs,
- (const char __user *)mm->arg_start, len);
+ if (copy_from_user(&psinfo->pr_psargs,
+ (const char __user *)mm->arg_start, len))
+ return -EFAULT;
for(i = 0; i < len; i++)
if (psinfo->pr_psargs[i] == 0)
psinfo->pr_psargs[i] = ' ';
i = p->state ? ffz(~p->state) + 1 : 0;
psinfo->pr_state = i;
- psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
+ psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
psinfo->pr_zomb = psinfo->pr_sname == 'Z';
psinfo->pr_nice = task_nice(p);
psinfo->pr_flag = p->flags;
SET_GID(psinfo->pr_gid, p->gid);
strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
- return;
+ return 0;
}
/* Here is the structure in which status of each thread is captured. */
struct list_head list;
struct elf_prstatus prstatus; /* NT_PRSTATUS */
elf_fpregset_t fpu; /* NT_PRFPREG */
+ struct task_struct *thread;
#ifdef ELF_CORE_COPY_XFPREGS
elf_fpxregset_t xfpu; /* NT_PRXFPREG */
#endif
* we need to keep a linked list of every threads pr_status and then
* create a single section for them in the final core file.
*/
-static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
+static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
{
-
- struct elf_thread_status *t;
int sz = 0;
-
- t = kmalloc(sizeof(*t), GFP_ATOMIC);
- if (!t)
- return 0;
- memset(t, 0, sizeof(*t));
-
- INIT_LIST_HEAD(&t->list);
+ struct task_struct *p = t->thread;
t->num_notes = 0;
fill_prstatus(&t->prstatus, p, signr);
sz += notesize(&t->notes[2]);
}
#endif
- list_add(&t->list, thread_list);
return sz;
}
struct vm_area_struct *vma;
struct elfhdr *elf = NULL;
off_t offset = 0, dataoff;
- unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
+ unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
int numnote;
struct memelfnote *notes = NULL;
struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
goto cleanup;
#endif
- /* capture the status of all other threads */
if (signr) {
+ struct elf_thread_status *tmp;
read_lock(&tasklist_lock);
do_each_thread(g,p)
if (current->mm == p->mm && current != p) {
- int sz = elf_dump_thread_status(signr, p, &thread_list);
- if (!sz) {
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp) {
read_unlock(&tasklist_lock);
goto cleanup;
- } else
- thread_status_size += sz;
+ }
+ INIT_LIST_HEAD(&tmp->list);
+ tmp->thread = p;
+ list_add(&tmp->list, &thread_list);
}
while_each_thread(g,p);
read_unlock(&tasklist_lock);
- }
+ list_for_each(t, &thread_list) {
+ struct elf_thread_status *tmp;
+ int sz;
+ tmp = list_entry(t, struct elf_thread_status, list);
+ sz = elf_dump_thread_status(signr, tmp);
+ thread_status_size += sz;
+ }
+ }
/* now collect the dump for the current */
memset(prstatus, 0, sizeof(*prstatus));
fill_prstatus(prstatus, current, signr);
fill_psinfo(psinfo, current->group_leader, current->mm);
fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
- fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
-
- numnote = 3;
+ numnote = 2;
auxv = (elf_addr_t *) current->mm->saved_auxv;
DUMP_SEEK (file->f_pos + PAGE_SIZE);
} else {
void *kaddr;
- flush_cache_page(vma, addr);
+ flush_cache_page(vma, addr, page_to_pfn(page));
kaddr = kmap(page);
if ((size += PAGE_SIZE) > limit ||
!dump_write(file, kaddr,
ELF_CORE_WRITE_EXTRA_DATA;
#endif
- if ((off_t) file->f_pos != offset) {
+ if ((off_t)file->f_pos != offset) {
/* Sanity check */
- printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
- (off_t) file->f_pos, offset);
+ printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
+ (off_t)file->f_pos, offset);
}
end_coredump:
set_fs(fs);
cleanup:
- while(!list_empty(&thread_list)) {
+ while (!list_empty(&thread_list)) {
struct list_head *tmp = thread_list.next;
list_del(tmp);
kfree(list_entry(tmp, struct elf_thread_status, list));