vserver 1.9.5.x5
[linux-2.6.git] / arch / x86_64 / ia32 / syscall32.c
index 64b7a59..399ff49 100644 (file)
@@ -28,32 +28,51 @@ extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
 extern int sysctl_vsyscall32;
 
 char *syscall32_page; 
-static int use_sysenter __initdata = -1;
+static int use_sysenter = -1;
 
-/* RED-PEN: This knows too much about high level VM */ 
-/* Alternative would be to generate a vma with appropriate backing options
-   and let it be handled by generic VM */ 
-int map_syscall32(struct mm_struct *mm, unsigned long address) 
+/*
+ * Map the 32bit vsyscall page on demand.
+ *
+ * RED-PEN: This knows too much about high level VM.
+ *
+ * Alternative would be to generate a vma with appropriate backing options
+ * and let it be handled by generic VM.
+ */
+int __map_syscall32(struct mm_struct *mm, unsigned long address)
 { 
+       pgd_t *pgd;
+       pud_t *pud;
        pte_t *pte;
        pmd_t *pmd;
-       int err = 0;
+       int err = -ENOMEM;
 
-       down_read(&mm->mmap_sem);
        spin_lock(&mm->page_table_lock); 
-       pmd = pmd_alloc(mm, pgd_offset(mm, address), address); 
-       if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) { 
-               if (pte_none(*pte)) { 
-                       set_pte(pte, 
-                               mk_pte(virt_to_page(syscall32_page), 
-                                      PAGE_KERNEL_VSYSCALL)); 
+       pgd = pgd_offset(mm, address);
+       pud = pud_alloc(mm, pgd, address);
+       if (pud) {
+               pmd = pmd_alloc(mm, pud, address);
+               if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) {
+                       if (pte_none(*pte)) {
+                               set_pte(pte,
+                                       mk_pte(virt_to_page(syscall32_page),
+                                              PAGE_KERNEL_VSYSCALL32));
+                       }
+                       /* Flush only the local CPU. Other CPUs taking a fault
+                          will just end up here again
+                          This probably not needed and just paranoia. */
+                       __flush_tlb_one(address);
+                       err = 0;
                }
-               /* Flush only the local CPU. Other CPUs taking a fault
-                  will just end up here again */
-               __flush_tlb_one(address); 
-       } else
-               err = -ENOMEM; 
+       }
        spin_unlock(&mm->page_table_lock);
+       return err;
+}
+
+int map_syscall32(struct mm_struct *mm, unsigned long address)
+{
+       int err;
+       down_read(&mm->mmap_sem);
+       err = __map_syscall32(mm, address);
        up_read(&mm->mmap_sem);
        return err;
 }
@@ -76,16 +95,17 @@ static int __init init_syscall32(void)
        
 __initcall(init_syscall32); 
 
-void __init syscall32_cpu_init(void)
+/* May not be __init: called during resume */
+void syscall32_cpu_init(void)
 {
        if (use_sysenter < 0)
                use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
 
        /* Load these always in case some future AMD CPU supports
           SYSENTER from compat mode too. */
-       wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
-       wrmsr(MSR_IA32_SYSENTER_ESP, 0, 0);
-       wrmsrl(MSR_IA32_SYSENTER_EIP, ia32_sysenter_target);
+       checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+       checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
+       checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
 
        wrmsrl(MSR_CSTAR, ia32_cstar_target);
 }