-/* RED-PEN: This knows too much about high level VM */
-/* Alternative would be to generate a vma with appropriate backing options
- and let it be handled by generic VM */
-int map_syscall32(struct mm_struct *mm, unsigned long address)
-{
- pte_t *pte;
- pmd_t *pmd;
- int err = 0;
-
- down_read(&mm->mmap_sem);
- spin_lock(&mm->page_table_lock);
- pmd = pmd_alloc(mm, pgd_offset(mm, address), address);
- if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) {
- if (pte_none(*pte)) {
- set_pte(pte,
- mk_pte(virt_to_page(syscall32_page),
- PAGE_KERNEL_VSYSCALL));
- }
- /* Flush only the local CPU. Other CPUs taking a fault
- will just end up here again */
- __flush_tlb_one(address);
- } else
- err = -ENOMEM;
- spin_unlock(&mm->page_table_lock);
- up_read(&mm->mmap_sem);
- return err;
+static struct page *
+syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
+{
+ struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
+ get_page(p);
+ return p;
+}
+
+/* Prevent VMA merging */
+static void syscall32_vma_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct syscall32_vm_ops = {
+ .close = syscall32_vma_close,
+ .nopage = syscall32_nopage,
+};
+
+struct linux_binprm;
+
+/* Setup a VMA at program startup for the vsyscall page */
+int syscall32_setup_pages(struct linux_binprm *bprm, int exstack,
+ unsigned long start_code,
+ unsigned long interp_map_address)
+{
+ int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma)
+ return -ENOMEM;
+
+ memset(vma, 0, sizeof(struct vm_area_struct));
+ /* Could randomize here */
+ vma->vm_start = VSYSCALL32_BASE;
+ vma->vm_end = VSYSCALL32_END;
+ /* MAYWRITE to allow gdb to COW and set breakpoints */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+ * without matching up the same kernel and hardware config to see
+ * what PC values meant.
+ */
+ vma->vm_flags |= VM_ALWAYSDUMP;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_ops = &syscall32_vm_ops;
+ vma->vm_mm = mm;
+
+ down_write(&mm->mmap_sem);
+ if ((ret = insert_vm_struct(mm, vma))) {
+ up_write(&mm->mmap_sem);
+ kmem_cache_free(vm_area_cachep, vma);
+ return ret;
+ }
+ vx_vmpages_add(mm, npages);
+ up_write(&mm->mmap_sem);
+ return 0;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_start == VSYSCALL32_BASE &&
+ vma->vm_mm && vma->vm_mm->task_size == IA32_PAGE_OFFSET)
+ return "[vdso]";
+ return NULL;