2 * arch/sh/kernel/vsyscall.c
4 * Copyright (C) 2006 Paul Mundt
7 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/slab.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/gfp.h>
18 #include <linux/module.h>
19 #include <linux/elf.h>
20 #include <linux/vs_memory.h>
23 * Should the kernel map a VDSO page into processes and pass its
24 * address down to glibc upon exec()?
26 unsigned int __read_mostly vdso_enabled = 1;
27 EXPORT_SYMBOL_GPL(vdso_enabled);
29 static int __init vdso_setup(char *s)
31 vdso_enabled = simple_strtoul(s, NULL, 0);
34 __setup("vdso=", vdso_setup);
37 * These symbols are defined by vsyscall.o to mark the bounds
38 * of the ELF DSO images included therein.
40 extern const char vsyscall_trapa_start, vsyscall_trapa_end;
41 static void *syscall_page;
43 int __init vsyscall_init(void)
45 syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
48 * XXX: Map this page to a fixmap entry if we get around
49 * to adding the page to ELF core dumps
53 &vsyscall_trapa_start,
54 &vsyscall_trapa_end - &vsyscall_trapa_start);
59 static struct page *syscall_vma_nopage(struct vm_area_struct *vma,
60 unsigned long address, int *type)
62 unsigned long offset = address - vma->vm_start;
65 if (address < vma->vm_start || address > vma->vm_end)
68 page = virt_to_page(syscall_page + offset);
75 /* Prevent VMA merging */
76 static void syscall_vma_close(struct vm_area_struct *vma)
80 static struct vm_operations_struct syscall_vm_ops = {
81 .nopage = syscall_vma_nopage,
82 .close = syscall_vma_close,
85 /* Setup a VMA at program startup for the vsyscall page */
86 int arch_setup_additional_pages(struct linux_binprm *bprm,
89 struct vm_area_struct *vma;
90 struct mm_struct *mm = current->mm;
94 down_write(&mm->mmap_sem);
95 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
96 if (IS_ERR_VALUE(addr)) {
101 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
107 vma->vm_start = addr;
108 vma->vm_end = addr + PAGE_SIZE;
109 /* MAYWRITE to allow gdb to COW and set breakpoints */
110 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
111 vma->vm_flags |= mm->def_flags;
112 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
113 vma->vm_ops = &syscall_vm_ops;
116 ret = insert_vm_struct(mm, vma);
118 kmem_cache_free(vm_area_cachep, vma);
122 current->mm->context.vdso = (void *)addr;
126 up_write(&mm->mmap_sem);
130 const char *arch_vma_name(struct vm_area_struct *vma)
132 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
138 struct vm_area_struct *get_gate_vma(struct task_struct *task)
143 int in_gate_area(struct task_struct *task, unsigned long address)
148 int in_gate_area_no_task(unsigned long address)