2 * linux/arch/i386/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/tty.h>
21 #include <linux/vt_kern.h> /* For unblank_screen() */
22 #include <linux/highmem.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
26 #include <asm/system.h>
27 #include <asm/uaccess.h>
29 #include <asm/kdebug.h>
31 extern void die(const char *,struct pt_regs *,long);
33 DEFINE_PER_CPU(pgd_t *, cur_pgd);
36 * Unlock any spinlocks which will prevent us from getting the
39 void bust_spinlocks(int yes)
41 int loglevel_save = console_loglevel;
52 * OK, the message is on the console. Now we call printk()
53 * without oops_in_progress set so that printk will give klogd
54 * a poke. Hold onto your hats...
56 console_loglevel = 15; /* NMI oopser may have shut the console up */
58 console_loglevel = loglevel_save;
62 * Return EIP plus the CS segment base. The segment limit is also
63 * adjusted, clamped to the kernel/user address space (whichever is
64 * appropriate), and returned in *eip_limit.
66 * The segment is checked, because it might have been changed by another
67 * task between the original faulting instruction and here.
69 * If CS is no longer a valid code segment, or if EIP is beyond the
70 * limit, or if it is a kernel address when CS is not a kernel segment,
71 * then the returned value will be greater than *eip_limit.
73 * This is slow, but is very rarely executed.
75 static inline unsigned long get_segment_eip(struct pt_regs *regs,
76 unsigned long *eip_limit)
78 unsigned long eip = regs->eip;
79 unsigned seg = regs->xcs & 0xffff;
80 u32 seg_ar, seg_limit, base, *desc;
82 /* The standard kernel/user address space limit. */
83 *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
85 /* Unlikely, but must come before segment checks. */
86 if (unlikely((regs->eflags & VM_MASK) != 0))
87 return eip + (seg << 4);
89 /* By far the most common cases. */
90 if (likely(seg == __USER_CS || seg == __KERNEL_CS))
93 /* Check the segment exists, is within the current LDT/GDT size,
94 that kernel/user (ring 0..3) has the appropriate privilege,
95 that it's a code segment, and get the limit. */
96 __asm__ ("larl %3,%0; lsll %3,%1"
97 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
98 if ((~seg_ar & 0x9800) || eip > seg_limit) {
100 return 1; /* So that returned eip > *eip_limit. */
103 /* Get the GDT/LDT descriptor base.
104 When you look for races in this code remember that
105 LDT and other horrors are only used in user space. */
107 /* Must lock the LDT while reading it. */
108 down(¤t->mm->context.sem);
109 desc = current->mm->context.ldt;
110 desc = (void *)desc + (seg & ~7);
112 /* Must disable preemption while reading the GDT. */
113 desc = (u32 *)get_cpu_gdt_table(get_cpu());
114 desc = (void *)desc + (seg & ~7);
117 /* Decode the code segment base from the descriptor */
118 base = get_desc_base((unsigned long *)desc);
121 up(¤t->mm->context.sem);
125 /* Adjust EIP and segment limit, and clamp at the kernel limit.
126 It's legitimate for segments to wrap at 0xffffffff. */
128 if (seg_limit < *eip_limit && seg_limit >= base)
129 *eip_limit = seg_limit;
134 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
135 * Check that here and ignore it.
137 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
140 unsigned long instr = get_segment_eip (regs, &limit);
145 for (i = 0; scan_more && i < 15; i++) {
146 unsigned char opcode;
147 unsigned char instr_hi;
148 unsigned char instr_lo;
152 if (__get_user(opcode, (unsigned char *) instr))
155 instr_hi = opcode & 0xf0;
156 instr_lo = opcode & 0x0f;
162 /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
163 scan_more = ((instr_lo & 7) == 0x6);
167 /* 0x64 thru 0x67 are valid prefixes in all modes. */
168 scan_more = (instr_lo & 0xC) == 0x4;
171 /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
172 scan_more = !instr_lo || (instr_lo>>1) == 1;
175 /* Prefetch instruction is 0x0F0D or 0x0F18 */
179 if (__get_user(opcode, (unsigned char *) instr))
181 prefetch = (instr_lo == 0xF) &&
182 (opcode == 0x0D || opcode == 0x18);
192 static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
193 unsigned long error_code)
195 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
196 boot_cpu_data.x86 >= 6)) {
197 /* Catch an obscure case of prefetch inside an NX page. */
198 if (nx_enabled && (error_code & 16))
200 return __is_prefetch(regs, addr);
205 fastcall void do_invalid_op(struct pt_regs *, unsigned long);
208 * This routine handles page faults. It determines the address,
209 * and the problem, and then passes it off to one of the appropriate
213 * bit 0 == 0 means no page found, 1 means protection fault
214 * bit 1 == 0 means read, 1 means write
215 * bit 2 == 0 means kernel, 1 means user-mode
217 fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code,
218 unsigned long address)
220 struct task_struct *tsk;
221 struct mm_struct *mm;
222 struct vm_area_struct * vma;
227 /* Set the "privileged fault" bit to something sane. */
229 error_code |= (regs->xcs & 2) << 1;
230 if (regs->eflags & X86_EFLAGS_VM)
233 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
234 SIGSEGV) == NOTIFY_STOP)
237 /* It's safe to allow irq's after cr2 has been saved */
238 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
244 info.si_code = SEGV_MAPERR;
247 * We fault-in kernel-space virtual memory on-demand. The
248 * 'reference' page table is init_mm.pgd.
250 * NOTE! We MUST NOT take any locks for this case. We may
251 * be in an interrupt or a critical region, and should
252 * only copy the information from the master page table,
255 * This verifies that the fault happens in kernel space
256 * (error_code & 4) == 0, and that the fault was not a
257 * protection error (error_code & 1) == 0.
259 if (unlikely(address >= TASK_SIZE)) {
260 if (!(error_code & 5))
263 * Don't take the mm semaphore here. If we fixup a prefetch
264 * fault we could otherwise deadlock.
266 goto bad_area_nosemaphore;
272 * If we're in an interrupt, have no user context or are running in an
273 * atomic region then we must not take the fault..
275 if (in_atomic() || !mm)
276 goto bad_area_nosemaphore;
278 /* When running in the kernel we expect faults to occur only to
279 * addresses in user space. All other faults represent errors in the
280 * kernel and should generate an OOPS. Unfortunatly, in the case of an
281 * erroneous fault occuring in a code path which already holds mmap_sem
282 * we will deadlock attempting to validate the fault against the
283 * address space. Luckily the kernel only validly references user
284 * space from well defined areas of code, which are listed in the
287 * As the vast majority of faults will be valid we will only perform
288 * the source reference check when there is a possibilty of a deadlock.
289 * Attempt to lock the address space, if we cannot we then validate the
290 * source. If this is invalid we can skip the address space check,
291 * thus avoiding the deadlock.
293 if (!down_read_trylock(&mm->mmap_sem)) {
294 if ((error_code & 4) == 0 &&
295 !search_exception_tables(regs->eip))
296 goto bad_area_nosemaphore;
297 down_read(&mm->mmap_sem);
300 vma = find_vma(mm, address);
303 if (vma->vm_start <= address)
305 if (!(vma->vm_flags & VM_GROWSDOWN))
307 if (error_code & 4) {
309 * accessing the stack below %esp is always a bug.
310 * The "+ 32" is there due to some instructions (like
311 * pusha) doing post-decrement on the stack and that
312 * doesn't show up until later..
314 if (address + 32 < regs->esp)
317 if (expand_stack(vma, address))
320 * Ok, we have a good vm_area for this memory access, so
324 info.si_code = SEGV_ACCERR;
326 switch (error_code & 3) {
327 default: /* 3: write, present */
328 #ifdef TEST_VERIFY_AREA
329 if (regs->cs == KERNEL_CS)
330 printk("WP fault at %08lx\n", regs->eip);
333 case 2: /* write, not present */
334 if (!(vma->vm_flags & VM_WRITE))
338 case 1: /* read, present */
340 case 0: /* read, not present */
341 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
347 * If for any reason at all we couldn't handle the fault,
348 * make sure we exit gracefully rather than endlessly redo
351 switch (handle_mm_fault(mm, vma, address, write)) {
358 case VM_FAULT_SIGBUS:
367 * Did it hit the DOS screen memory VA from vm86 mode?
369 if (regs->eflags & VM_MASK) {
370 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
372 tsk->thread.screen_bitmap |= 1 << bit;
374 up_read(&mm->mmap_sem);
378 * Something tried to access memory that isn't in our memory map..
379 * Fix it, but check if it's kernel or user first..
382 up_read(&mm->mmap_sem);
384 bad_area_nosemaphore:
385 /* User mode accesses just cause a SIGSEGV */
386 if (error_code & 4) {
388 * Valid to do another page fault here because this one came
391 if (is_prefetch(regs, address, error_code))
394 tsk->thread.cr2 = address;
395 /* Kernel addresses are always protection faults */
396 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
397 tsk->thread.trap_no = 14;
398 info.si_signo = SIGSEGV;
400 /* info.si_code has been set above */
401 info.si_addr = (void __user *)address;
402 force_sig_info(SIGSEGV, &info, tsk);
406 #ifdef CONFIG_X86_F00F_BUG
408 * Pentium F0 0F C7 C8 bug workaround.
410 if (boot_cpu_data.f00f_bug) {
413 nr = (address - idt_descr.address) >> 3;
416 do_invalid_op(regs, 0);
423 /* Are we prepared to handle this kernel fault? */
424 if (fixup_exception(regs))
428 * Valid to do another page fault here, because if this fault
429 * had been triggered by is_prefetch fixup_exception would have
432 if (is_prefetch(regs, address, error_code))
436 * Oops. The kernel tried to access some bad page. We'll have to
437 * terminate things with extreme prejudice.
442 #ifdef CONFIG_X86_PAE
443 if (error_code & 16) {
444 pte_t *pte = lookup_address(address);
446 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
447 printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
450 if (address < PAGE_SIZE)
451 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
453 printk(KERN_ALERT "Unable to handle kernel paging request");
454 printk(" at virtual address %08lx\n",address);
455 printk(KERN_ALERT " printing eip:\n");
456 printk("%08lx\n", regs->eip);
457 page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
459 printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
460 machine_to_phys(page));
462 * We must not directly access the pte in the highpte
463 * case, the page table might be allocated in highmem.
464 * And lets rather not kmap-atomic the pte, just in case
465 * it's allocated already.
467 #ifndef CONFIG_HIGHPTE
470 address &= 0x003ff000;
471 page = machine_to_phys(page);
472 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
473 printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
474 machine_to_phys(page));
477 show_trace(NULL, (unsigned long *)®s[1]);
478 die("Oops", regs, error_code);
483 * We ran out of memory, or some other thing happened to us that made
484 * us unable to handle the page fault gracefully.
487 up_read(&mm->mmap_sem);
490 down_read(&mm->mmap_sem);
493 printk("VM: killing process %s\n", tsk->comm);
499 up_read(&mm->mmap_sem);
501 /* Kernel mode? Handle exceptions or die */
502 if (!(error_code & 4))
505 /* User space => ok to do another page fault */
506 if (is_prefetch(regs, address, error_code))
509 tsk->thread.cr2 = address;
510 tsk->thread.error_code = error_code;
511 tsk->thread.trap_no = 14;
512 info.si_signo = SIGBUS;
514 info.si_code = BUS_ADRERR;
515 info.si_addr = (void __user *)address;
516 force_sig_info(SIGBUS, &info, tsk);
522 * Synchronize this task's top level page-table
523 * with the 'reference' page table.
525 * Do _not_ use "tsk" here. We might be inside
526 * an interrupt in the middle of a task switch..
528 int index = pgd_index(address);
534 pgd = index + per_cpu(cur_pgd, smp_processor_id());
535 pgd_k = init_mm.pgd + index;
537 if (!pgd_present(*pgd_k))
541 * set_pgd(pgd, *pgd_k); here would be useless on PAE
542 * and redundant with the set_pmd() on non-PAE. As would
546 pud = pud_offset(pgd, address);
547 pud_k = pud_offset(pgd_k, address);
548 if (!pud_present(*pud_k))
551 pmd = pmd_offset(pud, address);
552 pmd_k = pmd_offset(pud_k, address);
553 if (!pmd_present(*pmd_k))
555 set_pmd(pmd, *pmd_k);
557 pte_k = pte_offset_kernel(pmd_k, address);
558 if (!pte_present(*pte_k))