+extern void SYSENTER_RETURN_OFFSET;
+
+unsigned int vdso_enabled = 1;
+
+/*
+ * This is called from binfmt_elf, we create the special vma for the
+ * vDSO and insert it into the mm struct tree.
+ */
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int executable_stack, unsigned long start_code,
+ unsigned long interp_map_address)
+{
+ struct thread_info *ti = current_thread_info();
+ unsigned long addr = 0, len;
+ unsigned flags = MAP_PRIVATE;
+ int err;
+
+ current->mm->context.vdso = NULL;
+ if (unlikely(!vdso_enabled) || unlikely(!sysenter_pages[0]))
+ return 0;
+
+ /*
+ * Map the vDSO (it will be randomized):
+ */
+ down_write(¤t->mm->mmap_sem);
+ len = PAGE_SIZE > ELF_EXEC_PAGESIZE ? PAGE_SIZE : ELF_EXEC_PAGESIZE;
+ if (0==exec_shield) { /* off; %cs limit off */
+ addr = STACK_TOP; /* minimal interference with anybody */
+ flags = MAP_PRIVATE | MAP_FIXED;
+ }
+ else if ((3<<2) & exec_shield) { /* vdso just below .text */
+ addr = (((2<<2) & exec_shield) && interp_map_address) ?
+ interp_map_address : start_code;
+ /* 1MB for vm86; 64K for vm86 himem */
+ if ((0x110000 + len) <= addr) {
+ addr = (PAGE_MASK & addr) - len;
+ }
+ else { /* start_code is too low */
+ addr = 0;
+ }
+ }
+ addr = get_unmapped_area_prot(NULL, addr, len, 0,
+ flags, PROT_READ | PROT_EXEC);
+ if (unlikely(addr & ~PAGE_MASK)) {
+ up_write(¤t->mm->mmap_sem);
+ return addr;
+ }
+ err = install_special_mapping(current->mm, addr, len,
+ VM_DONTEXPAND | VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ PAGE_READONLY_EXEC,
+ sysenter_pages);
+ if (likely(err == 0)) {
+ current->mm->context.vdso = (void *)addr;
+ ti->sysenter_return = &SYSENTER_RETURN_OFFSET + addr;
+ }
+ up_write(¤t->mm->mmap_sem);
+ return err;
+}
+
+int in_gate_area_no_task(unsigned long addr)
+{
+ return 0;
+}
+
+int in_gate_area(struct task_struct *task, unsigned long addr)
+{
+ return 0;
+}
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+ return NULL;
+}