2 * linux/arch/i386/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/tty.h>
21 #include <linux/vt_kern.h> /* For unblank_screen() */
22 #include <linux/highmem.h>
23 #include <linux/module.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hardirq.h>
30 #include <asm/tlbflush.h>
32 extern void die(const char *,struct pt_regs *,long);
35 * Unlock any spinlocks which will prevent us from getting the
38 void bust_spinlocks(int yes)
40 int loglevel_save = console_loglevel;
51 * OK, the message is on the console. Now we call printk()
52 * without oops_in_progress set so that printk will give klogd
53 * a poke. Hold onto your hats...
55 console_loglevel = 15; /* NMI oopser may have shut the console up */
57 console_loglevel = loglevel_save;
61 * Return EIP plus the CS segment base. The segment limit is also
62 * adjusted, clamped to the kernel/user address space (whichever is
63 * appropriate), and returned in *eip_limit.
65 * The segment is checked, because it might have been changed by another
66 * task between the original faulting instruction and here.
68 * If CS is no longer a valid code segment, or if EIP is beyond the
69 * limit, or if it is a kernel address when CS is not a kernel segment,
70 * then the returned value will be greater than *eip_limit.
72 * This is slow, but is very rarely executed.
74 static inline unsigned long get_segment_eip(struct pt_regs *regs,
75 unsigned long *eip_limit)
77 unsigned long eip = regs->eip;
78 unsigned seg = regs->xcs & 0xffff;
79 u32 seg_ar, seg_limit, base, *desc;
81 /* The standard kernel/user address space limit. */
82 *eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg;
84 /* Unlikely, but must come before segment checks. */
85 if (unlikely((regs->eflags & VM_MASK) != 0))
86 return eip + (seg << 4);
88 /* By far the most common cases. */
89 if (likely(seg == __USER_CS || seg == __KERNEL_CS))
92 /* Check the segment exists, is within the current LDT/GDT size,
93 that kernel/user (ring 0..3) has the appropriate privilege,
94 that it's a code segment, and get the limit. */
95 __asm__ ("larl %3,%0; lsll %3,%1"
96 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
97 if ((~seg_ar & 0x9800) || eip > seg_limit) {
99 return 1; /* So that returned eip > *eip_limit. */
102 /* Get the GDT/LDT descriptor base.
103 When you look for races in this code remember that
104 LDT and other horrors are only used in user space. */
106 /* Must lock the LDT while reading it. */
107 down(¤t->mm->context.sem);
109 /* horrible hack for 4/4 disabled kernels.
110 I'm not quite sure what the TLB flush is good for,
111 it's mindlessly copied from the read_ldt code */
112 __flush_tlb_global();
113 desc = kmap(current->mm->context.ldt_pages[(seg&~7)/PAGE_SIZE]);
114 desc = (void *)desc + ((seg & ~7) % PAGE_SIZE);
116 desc = current->mm->context.ldt;
117 desc = (void *)desc + (seg & ~7);
120 /* Must disable preemption while reading the GDT. */
121 desc = (u32 *)&cpu_gdt_table[get_cpu()];
122 desc = (void *)desc + (seg & ~7);
125 /* Decode the code segment base from the descriptor */
126 base = (desc[0] >> 16) |
127 ((desc[1] & 0xff) << 16) |
128 (desc[1] & 0xff000000);
132 kunmap((void *)((unsigned long)desc & PAGE_MASK));
134 up(¤t->mm->context.sem);
138 /* Adjust EIP and segment limit, and clamp at the kernel limit.
139 It's legitimate for segments to wrap at 0xffffffff. */
141 if (seg_limit < *eip_limit && seg_limit >= base)
142 *eip_limit = seg_limit;
147 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
148 * Check that here and ignore it.
150 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
153 unsigned long instr = get_segment_eip (regs, &limit);
158 for (i = 0; scan_more && i < 15; i++) {
159 unsigned char opcode;
160 unsigned char instr_hi;
161 unsigned char instr_lo;
165 if (__get_user(opcode, (unsigned char *) instr))
168 instr_hi = opcode & 0xf0;
169 instr_lo = opcode & 0x0f;
175 /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
176 scan_more = ((instr_lo & 7) == 0x6);
180 /* 0x64 thru 0x67 are valid prefixes in all modes. */
181 scan_more = (instr_lo & 0xC) == 0x4;
184 /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
185 scan_more = !instr_lo || (instr_lo>>1) == 1;
188 /* Prefetch instruction is 0x0F0D or 0x0F18 */
192 if (__get_user(opcode, (unsigned char *) instr))
194 prefetch = (instr_lo == 0xF) &&
195 (opcode == 0x0D || opcode == 0x18);
205 static inline int is_prefetch(struct pt_regs *regs, unsigned long addr)
207 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
208 boot_cpu_data.x86 >= 6))
209 return __is_prefetch(regs, addr);
213 asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
216 * This routine handles page faults. It determines the address,
217 * and the problem, and then passes it off to one of the appropriate
221 * bit 0 == 0 means no page found, 1 means protection fault
222 * bit 1 == 0 means read, 1 means write
223 * bit 2 == 0 means kernel, 1 means user-mode
225 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
227 struct task_struct *tsk;
228 struct mm_struct *mm;
229 struct vm_area_struct * vma;
230 unsigned long address;
235 /* get the address */
236 __asm__("movl %%cr2,%0":"=r" (address));
238 /* It's safe to allow irq's after cr2 has been saved */
239 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
244 info.si_code = SEGV_MAPERR;
247 * We fault-in kernel-space virtual memory on-demand. The
248 * 'reference' page table is init_mm.pgd.
250 * NOTE! We MUST NOT take any locks for this case. We may
251 * be in an interrupt or a critical region, and should
252 * only copy the information from the master page table,
255 * This verifies that the fault happens in kernel space
256 * (error_code & 4) == 0, and that the fault was not a
257 * protection error (error_code & 1) == 0.
261 * On 4/4 all kernels faults are either bugs, vmalloc or prefetch
263 /* If it's vm86 fall through */
264 if (unlikely(!(regs->eflags & VM_MASK) && ((regs->xcs & 3) == 0))) {
266 goto bad_area_nosemaphore;
270 if (unlikely(address >= TASK_SIZE)) {
271 if (!(error_code & 5))
274 * Don't take the mm semaphore here. If we fixup a prefetch
275 * fault we could otherwise deadlock.
277 goto bad_area_nosemaphore;
284 * If we're in an interrupt, have no user context or are running in an
285 * atomic region then we must not take the fault..
287 if (in_atomic() || !mm)
288 goto bad_area_nosemaphore;
290 down_read(&mm->mmap_sem);
292 vma = find_vma(mm, address);
295 if (vma->vm_start <= address)
297 if (!(vma->vm_flags & VM_GROWSDOWN))
299 if (error_code & 4) {
301 * accessing the stack below %esp is always a bug.
302 * The "+ 32" is there due to some instructions (like
303 * pusha) doing post-decrement on the stack and that
304 * doesn't show up until later..
306 if (address + 32 < regs->esp)
309 if (expand_stack(vma, address))
312 * Ok, we have a good vm_area for this memory access, so
316 info.si_code = SEGV_ACCERR;
318 switch (error_code & 3) {
319 default: /* 3: write, present */
320 #ifdef TEST_VERIFY_AREA
321 if (regs->cs == KERNEL_CS)
322 printk("WP fault at %08lx\n", regs->eip);
325 case 2: /* write, not present */
326 if (!(vma->vm_flags & VM_WRITE))
330 case 1: /* read, present */
332 case 0: /* read, not present */
333 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
339 * If for any reason at all we couldn't handle the fault,
340 * make sure we exit gracefully rather than endlessly redo
343 switch (handle_mm_fault(mm, vma, address, write)) {
350 case VM_FAULT_SIGBUS:
359 * Did it hit the DOS screen memory VA from vm86 mode?
361 if (regs->eflags & VM_MASK) {
362 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
364 tsk->thread.screen_bitmap |= 1 << bit;
366 up_read(&mm->mmap_sem);
370 * Something tried to access memory that isn't in our memory map..
371 * Fix it, but check if it's kernel or user first..
374 up_read(&mm->mmap_sem);
376 bad_area_nosemaphore:
377 /* User mode accesses just cause a SIGSEGV */
378 if (error_code & 4) {
380 * Valid to do another page fault here because this one came
383 if (is_prefetch(regs, address))
386 tsk->thread.cr2 = address;
387 /* Kernel addresses are always protection faults */
388 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
389 tsk->thread.trap_no = 14;
390 info.si_signo = SIGSEGV;
392 /* info.si_code has been set above */
393 info.si_addr = (void *)address;
394 force_sig_info(SIGSEGV, &info, tsk);
398 #ifdef CONFIG_X86_F00F_BUG
400 * Pentium F0 0F C7 C8 bug workaround.
402 if (boot_cpu_data.f00f_bug) {
405 nr = (address - idt_descr.address) >> 3;
408 do_invalid_op(regs, 0);
415 /* Are we prepared to handle this kernel fault? */
416 if (fixup_exception(regs))
420 * Valid to do another page fault here, because if this fault
421 * had been triggered by is_prefetch fixup_exception would have
424 if (is_prefetch(regs, address))
428 * Oops. The kernel tried to access some bad page. We'll have to
429 * terminate things with extreme prejudice.
434 #ifdef CONFIG_X86_PAE
441 pgd = init_mm.pgd + pgd_index(address);
442 if (pgd_present(*pgd)) {
443 pmd = pmd_offset(pgd, address);
444 if (pmd_val(*pmd) & _PAGE_NX)
445 printk(KERN_CRIT "kernel tried to access NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
449 if (address < PAGE_SIZE)
450 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
452 printk(KERN_ALERT "Unable to handle kernel paging request");
453 printk(" at virtual address %08lx\n",address);
454 printk(" printing eip:\n");
455 printk("%08lx\n", regs->eip);
456 asm("movl %%cr3,%0":"=r" (page));
457 page = ((unsigned long *) __va(page))[address >> 22];
458 printk(KERN_ALERT "*pde = %08lx\n", page);
460 * We must not directly access the pte in the highpte
461 * case, the page table might be allocated in highmem.
462 * And lets rather not kmap-atomic the pte, just in case
463 * it's allocated already.
465 #ifndef CONFIG_HIGHPTE
468 address &= 0x003ff000;
469 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
470 printk(KERN_ALERT "*pte = %08lx\n", page);
473 die("Oops", regs, error_code);
478 * We ran out of memory, or some other thing happened to us that made
479 * us unable to handle the page fault gracefully.
482 up_read(&mm->mmap_sem);
485 down_read(&mm->mmap_sem);
488 printk("VM: killing process %s\n", tsk->comm);
494 up_read(&mm->mmap_sem);
496 /* Kernel mode? Handle exceptions or die */
497 if (!(error_code & 4))
500 /* User space => ok to do another page fault */
501 if (is_prefetch(regs, address))
504 tsk->thread.cr2 = address;
505 tsk->thread.error_code = error_code;
506 tsk->thread.trap_no = 14;
507 info.si_signo = SIGBUS;
509 info.si_code = BUS_ADRERR;
510 info.si_addr = (void *)address;
511 force_sig_info(SIGBUS, &info, tsk);
517 * Synchronize this task's top level page-table
518 * with the 'reference' page table.
520 * Do _not_ use "tsk" here. We might be inside
521 * an interrupt in the middle of a task switch..
523 int index = pgd_index(address);
528 asm("movl %%cr3,%0":"=r" (pgd));
529 pgd = index + (pgd_t *)__va(pgd);
530 pgd_k = init_mm.pgd + index;
532 if (!pgd_present(*pgd_k))
536 * set_pgd(pgd, *pgd_k); here would be useless on PAE
537 * and redundant with the set_pmd() on non-PAE.
540 pmd = pmd_offset(pgd, address);
541 pmd_k = pmd_offset(pgd_k, address);
542 if (!pmd_present(*pmd_k))
544 set_pmd(pmd, *pmd_k);
546 pte_k = pte_offset_kernel(pmd_k, address);
547 if (!pte_present(*pte_k))