fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / i386 / mm / fault.c
index 0c77e0b..88c55fe 100644 (file)
 #include <linux/vt_kern.h>             /* For unblank_screen() */
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/hardirq.h>
 #include <asm/desc.h>
+#include <asm/kdebug.h>
+#include <asm/segment.h>
 
 extern void die(const char *,struct pt_regs *,long);
 
+static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+       vmalloc_sync_all();
+       return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_page_fault_notifier);
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+                       struct pt_regs *regs, long err, int trap, int sig)
+{
+       struct die_args args = {
+               .regs = regs,
+               .str = str,
+               .err = err,
+               .trapnr = trap,
+               .signr = sig
+       };
+       return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+
 /*
  * Unlock any spinlocks which will prevent us from getting the
  * message out 
@@ -77,15 +106,18 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
        unsigned seg = regs->xcs & 0xffff;
        u32 seg_ar, seg_limit, base, *desc;
 
-       /* The standard kernel/user address space limit. */
-       *eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg;
-
        /* Unlikely, but must come before segment checks. */
-       if (unlikely((regs->eflags & VM_MASK) != 0))
-               return eip + (seg << 4);
+       if (unlikely(regs->eflags & VM_MASK)) {
+               base = seg << 4;
+               *eip_limit = base + 0xffff;
+               return base + (eip & 0xffff);
+       }
+
+       /* The standard kernel/user address space limit. */
+       *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
        
        /* By far the most common cases. */
-       if (likely(seg == __USER_CS || seg == __KERNEL_CS))
+       if (likely(SEGMENT_IS_FLAT_CODE(seg)))
                return eip;
 
        /* Check the segment exists, is within the current LDT/GDT size,
@@ -108,14 +140,12 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
                desc = (void *)desc + (seg & ~7);
        } else {
                /* Must disable preemption while reading the GDT. */
-               desc = (u32 *)&cpu_gdt_table[get_cpu()];
+               desc = (u32 *)get_cpu_gdt_table(get_cpu());
                desc = (void *)desc + (seg & ~7);
        }
 
        /* Decode the code segment base from the descriptor */
-       base =   (desc[0] >> 16) |
-               ((desc[1] & 0xff) << 16) |
-                (desc[1] & 0xff000000);
+       base = get_desc_base((unsigned long *)desc);
 
        if (seg & (1<<2)) { 
                up(&current->mm->context.sem);
@@ -137,7 +167,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
 { 
        unsigned long limit;
-       unsigned long instr = get_segment_eip (regs, &limit);
+       unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
        int scan_more = 1;
        int prefetch = 0; 
        int i;
@@ -147,9 +177,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
                unsigned char instr_hi;
                unsigned char instr_lo;
 
-               if (instr > limit)
+               if (instr > (unsigned char *)limit)
                        break;
-               if (__get_user(opcode, (unsigned char *) instr))
+               if (probe_kernel_address(instr, opcode))
                        break; 
 
                instr_hi = opcode & 0xf0; 
@@ -174,9 +204,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
                case 0x00:
                        /* Prefetch instruction is 0x0F0D or 0x0F18 */
                        scan_more = 0;
-                       if (instr > limit)
+                       if (instr > (unsigned char *)limit)
                                break;
-                       if (__get_user(opcode, (unsigned char *) instr)) 
+                       if (probe_kernel_address(instr, opcode))
                                break;
                        prefetch = (instr_lo == 0xF) &&
                                (opcode == 0x0D || opcode == 0x18);
@@ -189,15 +219,94 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
        return prefetch;
 }
 
-static inline int is_prefetch(struct pt_regs *regs, unsigned long addr)
+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+                             unsigned long error_code)
 {
        if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-                    boot_cpu_data.x86 >= 6))
+                    boot_cpu_data.x86 >= 6)) {
+               /* Catch an obscure case of prefetch inside an NX page. */
+               if (nx_enabled && (error_code & 16))
+                       return 0;
                return __is_prefetch(regs, addr);
+       }
        return 0;
 } 
 
-asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
+static noinline void force_sig_info_fault(int si_signo, int si_code,
+       unsigned long address, struct task_struct *tsk)
+{
+       siginfo_t info;
+
+       info.si_signo = si_signo;
+       info.si_errno = 0;
+       info.si_code = si_code;
+       info.si_addr = (void __user *)address;
+       force_sig_info(si_signo, &info, tsk);
+}
+
+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
+
+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
+{
+       unsigned index = pgd_index(address);
+       pgd_t *pgd_k;
+       pud_t *pud, *pud_k;
+       pmd_t *pmd, *pmd_k;
+
+       pgd += index;
+       pgd_k = init_mm.pgd + index;
+
+       if (!pgd_present(*pgd_k))
+               return NULL;
+
+       /*
+        * set_pgd(pgd, *pgd_k); here would be useless on PAE
+        * and redundant with the set_pmd() on non-PAE. As would
+        * set_pud.
+        */
+
+       pud = pud_offset(pgd, address);
+       pud_k = pud_offset(pgd_k, address);
+       if (!pud_present(*pud_k))
+               return NULL;
+
+       pmd = pmd_offset(pud, address);
+       pmd_k = pmd_offset(pud_k, address);
+       if (!pmd_present(*pmd_k))
+               return NULL;
+       if (!pmd_present(*pmd))
+               set_pmd(pmd, *pmd_k);
+       else
+               BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+       return pmd_k;
+}
+
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ *
+ * This assumes no large pages in there.
+ */
+static inline int vmalloc_fault(unsigned long address)
+{
+       unsigned long pgd_paddr;
+       pmd_t *pmd_k;
+       pte_t *pte_k;
+       /*
+        * Synchronize this task's top level page-table
+        * with the 'reference' page table.
+        *
+        * Do _not_ use "current" here. We might be inside
+        * an interrupt in the middle of a task switch..
+        */
+       pgd_paddr = read_cr3();
+       pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+       if (!pmd_k)
+               return -1;
+       pte_k = pte_offset_kernel(pmd_k, address);
+       if (!pte_present(*pte_k))
+               return -1;
+       return 0;
+}
 
 /*
  * This routine handles page faults.  It determines the address,
@@ -208,27 +317,25 @@ asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
  *     bit 0 == 0 means no page found, 1 means protection fault
  *     bit 1 == 0 means read, 1 means write
  *     bit 2 == 0 means kernel, 1 means user-mode
+ *     bit 3 == 1 means use of reserved bit detected
+ *     bit 4 == 1 means fault was an instruction fetch
  */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+fastcall void __kprobes do_page_fault(struct pt_regs *regs,
+                                     unsigned long error_code)
 {
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        unsigned long address;
        unsigned long page;
-       int write;
-       siginfo_t info;
+       int write, si_code;
 
        /* get the address */
-       __asm__("movl %%cr2,%0":"=r" (address));
-
-       /* It's safe to allow irq's after cr2 has been saved */
-       if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
-               local_irq_enable();
+        address = read_cr2();
 
        tsk = current;
 
-       info.si_code = SEGV_MAPERR;
+       si_code = SEGV_MAPERR;
 
        /*
         * We fault-in kernel-space virtual memory on-demand. The
@@ -241,17 +348,29 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
         *
         * This verifies that the fault happens in kernel space
         * (error_code & 4) == 0, and that the fault was not a
-        * protection error (error_code & 1) == 0.
+        * protection error (error_code & 9) == 0.
         */
-       if (unlikely(address >= TASK_SIZE)) { 
-               if (!(error_code & 5))
-                       goto vmalloc_fault;
-               /* 
+       if (unlikely(address >= TASK_SIZE)) {
+               if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
+                       return;
+               if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+                                               SIGSEGV) == NOTIFY_STOP)
+                       return;
+               /*
                 * Don't take the mm semaphore here. If we fixup a prefetch
                 * fault we could otherwise deadlock.
                 */
                goto bad_area_nosemaphore;
-       } 
+       }
+
+       if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+                                       SIGSEGV) == NOTIFY_STOP)
+               return;
+
+       /* It's safe to allow irq's after cr2 has been saved and the vmalloc
+          fault has been handled. */
+       if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+               local_irq_enable();
 
        mm = tsk->mm;
 
@@ -262,7 +381,27 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
-       down_read(&mm->mmap_sem);
+       /* When running in the kernel we expect faults to occur only to
+        * addresses in user space.  All other faults represent errors in the
+        * kernel and should generate an OOPS.  Unfortunatly, in the case of an
+        * erroneous fault occurring in a code path which already holds mmap_sem
+        * we will deadlock attempting to validate the fault against the
+        * address space.  Luckily the kernel only validly references user
+        * space from well defined areas of code, which are listed in the
+        * exceptions table.
+        *
+        * As the vast majority of faults will be valid we will only perform
+        * the source reference check when there is a possibilty of a deadlock.
+        * Attempt to lock the address space, if we cannot we then validate the
+        * source.  If this is invalid we can skip the address space check,
+        * thus avoiding the deadlock.
+        */
+       if (!down_read_trylock(&mm->mmap_sem)) {
+               if ((error_code & 4) == 0 &&
+                   !search_exception_tables(regs->eip))
+                       goto bad_area_nosemaphore;
+               down_read(&mm->mmap_sem);
+       }
 
        vma = find_vma(mm, address);
        if (!vma)
@@ -273,12 +412,12 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
                goto bad_area;
        if (error_code & 4) {
                /*
-                * accessing the stack below %esp is always a bug.
-                * The "+ 32" is there due to some instructions (like
-                * pusha) doing post-decrement on the stack and that
-                * doesn't show up until later..
+                * Accessing the stack below %esp is always a bug.
+                * The large cushion allows instructions like enter
+                * and pusha to work.  ("enter $65535,$31" pushes
+                * 32 pointers and then decrements %esp by 65535.)
                 */
-               if (address + 32 < regs->esp)
+               if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
                        goto bad_area;
        }
        if (expand_stack(vma, address))
@@ -288,15 +427,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
  * we can handle it..
  */
 good_area:
-       info.si_code = SEGV_ACCERR;
+       si_code = SEGV_ACCERR;
        write = 0;
        switch (error_code & 3) {
                default:        /* 3: write, present */
-#ifdef TEST_VERIFY_AREA
-                       if (regs->cs == KERNEL_CS)
-                               printk("WP fault at %08lx\n", regs->eip);
-#endif
-                       /* fall through */
+                               /* fall through */
                case 2:         /* write, not present */
                        if (!(vma->vm_flags & VM_WRITE))
                                goto bad_area;
@@ -305,7 +440,7 @@ good_area:
                case 1:         /* read, present */
                        goto bad_area;
                case 0:         /* read, not present */
-                       if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
                                goto bad_area;
        }
 
@@ -355,18 +490,14 @@ bad_area_nosemaphore:
                 * Valid to do another page fault here because this one came 
                 * from user space.
                 */
-               if (is_prefetch(regs, address))
+               if (is_prefetch(regs, address, error_code))
                        return;
 
                tsk->thread.cr2 = address;
                /* Kernel addresses are always protection faults */
                tsk->thread.error_code = error_code | (address >= TASK_SIZE);
                tsk->thread.trap_no = 14;
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               /* info.si_code has been set above */
-               info.si_addr = (void *)address;
-               force_sig_info(SIGSEGV, &info, tsk);
+               force_sig_info_fault(SIGSEGV, si_code, address, tsk);
                return;
        }
 
@@ -396,7 +527,7 @@ no_context:
         * had been triggered by is_prefetch fixup_exception would have 
         * handled it.
         */
-       if (is_prefetch(regs, address))
+       if (is_prefetch(regs, address, error_code))
                return;
 
 /*
@@ -406,16 +537,31 @@ no_context:
 
        bust_spinlocks(1);
 
-       if (address < PAGE_SIZE)
-               printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-       else
-               printk(KERN_ALERT "Unable to handle kernel paging request");
-       printk(" at virtual address %08lx\n",address);
-       printk(KERN_ALERT " printing eip:\n");
-       printk("%08lx\n", regs->eip);
-       asm("movl %%cr3,%0":"=r" (page));
+       if (oops_may_print()) {
+       #ifdef CONFIG_X86_PAE
+               if (error_code & 16) {
+                       pte_t *pte = lookup_address(address);
+
+                       if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
+                               printk(KERN_CRIT "kernel tried to execute "
+                                       "NX-protected page - exploit attempt? "
+                                       "(uid: %d)\n", current->uid);
+               }
+       #endif
+               if (address < PAGE_SIZE)
+                       printk(KERN_ALERT "BUG: unable to handle kernel NULL "
+                                       "pointer dereference");
+               else
+                       printk(KERN_ALERT "BUG: unable to handle kernel paging"
+                                       " request");
+               printk(" at virtual address %08lx\n",address);
+               printk(KERN_ALERT " printing eip:\n");
+               printk("%08lx\n", regs->eip);
+       }
+       page = read_cr3();
        page = ((unsigned long *) __va(page))[address >> 22];
-       printk(KERN_ALERT "*pde = %08lx\n", page);
+       if (oops_may_print())
+               printk(KERN_ALERT "*pde = %08lx\n", page);
        /*
         * We must not directly access the pte in the highpte
         * case, the page table might be allocated in highmem.
@@ -423,13 +569,16 @@ no_context:
         * it's allocated already.
         */
 #ifndef CONFIG_HIGHPTE
-       if (page & 1) {
+       if ((page & 1) && oops_may_print()) {
                page &= PAGE_MASK;
                address &= 0x003ff000;
                page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
                printk(KERN_ALERT "*pte = %08lx\n", page);
        }
 #endif
+       tsk->thread.cr2 = address;
+       tsk->thread.trap_no = 14;
+       tsk->thread.error_code = error_code;
        die("Oops", regs, error_code);
        bust_spinlocks(0);
        do_exit(SIGKILL);
@@ -440,12 +589,13 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (tsk->pid == 1) {
+       if (is_init(tsk)) {
                yield();
                down_read(&mm->mmap_sem);
                goto survive;
        }
-       printk("VM: killing process %s\n", tsk->comm);
+       printk("VM: killing process %s(%d:#%u)\n",
+               tsk->comm, tsk->pid, tsk->xid);
        if (error_code & 4)
                do_exit(SIGKILL);
        goto no_context;
@@ -458,54 +608,48 @@ do_sigbus:
                goto no_context;
 
        /* User space => ok to do another page fault */
-       if (is_prefetch(regs, address))
+       if (is_prefetch(regs, address, error_code))
                return;
 
        tsk->thread.cr2 = address;
        tsk->thread.error_code = error_code;
        tsk->thread.trap_no = 14;
-       info.si_signo = SIGBUS;
-       info.si_errno = 0;
-       info.si_code = BUS_ADRERR;
-       info.si_addr = (void *)address;
-       force_sig_info(SIGBUS, &info, tsk);
-       return;
-
-vmalloc_fault:
-       {
-               /*
-                * Synchronize this task's top level page-table
-                * with the 'reference' page table.
-                *
-                * Do _not_ use "tsk" here. We might be inside
-                * an interrupt in the middle of a task switch..
-                */
-               int index = pgd_index(address);
-               pgd_t *pgd, *pgd_k;
-               pmd_t *pmd, *pmd_k;
-               pte_t *pte_k;
-
-               asm("movl %%cr3,%0":"=r" (pgd));
-               pgd = index + (pgd_t *)__va(pgd);
-               pgd_k = init_mm.pgd + index;
-
-               if (!pgd_present(*pgd_k))
-                       goto no_context;
-
-               /*
-                * set_pgd(pgd, *pgd_k); here would be useless on PAE
-                * and redundant with the set_pmd() on non-PAE.
-                */
+       force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
+}
 
-               pmd = pmd_offset(pgd, address);
-               pmd_k = pmd_offset(pgd_k, address);
-               if (!pmd_present(*pmd_k))
-                       goto no_context;
-               set_pmd(pmd, *pmd_k);
+#ifndef CONFIG_X86_PAE
+void vmalloc_sync_all(void)
+{
+       /*
+        * Note that races in the updates of insync and start aren't
+        * problematic: insync can only get set bits added, and updates to
+        * start are only improving performance (without affecting correctness
+        * if undone).
+        */
+       static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+       static unsigned long start = TASK_SIZE;
+       unsigned long address;
 
-               pte_k = pte_offset_kernel(pmd_k, address);
-               if (!pte_present(*pte_k))
-                       goto no_context;
-               return;
+       BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
+       for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
+               if (!test_bit(pgd_index(address), insync)) {
+                       unsigned long flags;
+                       struct page *page;
+
+                       spin_lock_irqsave(&pgd_lock, flags);
+                       for (page = pgd_list; page; page =
+                                       (struct page *)page->index)
+                               if (!vmalloc_sync_one(page_address(page),
+                                                               address)) {
+                                       BUG_ON(page != pgd_list);
+                                       break;
+                               }
+                       spin_unlock_irqrestore(&pgd_lock, flags);
+                       if (!page)
+                               set_bit(pgd_index(address), insync);
+               }
+               if (address == start && test_bit(pgd_index(address), insync))
+                       start = address + PGDIR_SIZE;
        }
 }
+#endif