1 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
2 index cd535c7..1b53da9 100644
3 --- a/arch/x86/mm/fault.c
4 +++ b/arch/x86/mm/fault.c
5 @@ -599,6 +599,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
9 + int should_exit_no_context = 0;
13 @@ -886,6 +887,9 @@ no_context:
14 oops_end(flags, regs, SIGKILL);
17 + if (should_exit_no_context)
21 * We ran out of memory, or some other thing happened to us that made
22 * us unable to handle the page fault gracefully.
23 @@ -911,8 +915,11 @@ do_sigbus:
24 up_read(&mm->mmap_sem);
26 /* Kernel mode? Handle exceptions or die */
27 - if (!(error_code & PF_USER))
28 + if (!(error_code & PF_USER)) {
29 + should_exit_no_context = 1;
34 /* User space => ok to do another page fault */
35 if (is_prefetch(regs, address, error_code))
36 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
37 index d8be92a..0325100 100644
38 --- a/fs/proc/task_mmu.c
39 +++ b/fs/proc/task_mmu.c
40 @@ -205,6 +205,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
41 struct file *file = vma->vm_file;
42 int flags = vma->vm_flags;
43 unsigned long ino = 0;
44 + unsigned long start;
48 @@ -214,8 +215,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
52 + /* We don't show the stack guard page in /proc/maps */
53 + start = vma->vm_start;
54 + if (vma->vm_flags & VM_GROWSDOWN)
57 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
61 flags & VM_READ ? 'r' : '-',
62 flags & VM_WRITE ? 'w' : '-',
63 diff --git a/mm/memory.c b/mm/memory.c
64 index 1c1a375..2638c6f 100644
67 @@ -2405,6 +2405,26 @@ out_nomap:
71 + * This is like a special single-page "expand_downwards()",
72 + * except we must first make sure that 'address-PAGE_SIZE'
73 + * doesn't hit another vma.
75 + * The "find_vma()" will do the right thing even if we wrap
77 +static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
79 + address &= PAGE_MASK;
80 + if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
81 + address -= PAGE_SIZE;
82 + if (find_vma(vma->vm_mm, address) != vma)
85 + expand_stack(vma, address);
91 * We enter with non-exclusive mmap_sem (to exclude vma changes,
92 * but allow concurrent faults), and pte mapped but not yet locked.
93 * We return with mmap_sem still held, but pte unmapped and unlocked.
94 @@ -2417,9 +2437,13 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
98 - /* Allocate our own private page. */
99 - pte_unmap(page_table);
100 + pte_unmap(page_table);
102 + /* Check if we need to add a guard page to the stack */
103 + if (check_stack_guard_page(vma, address) < 0)
104 + return VM_FAULT_SIGBUS;
106 + /* Allocate our own private page. */
107 if (!vx_rss_avail(mm, 1))
109 if (unlikely(anon_vma_prepare(vma)))
110 diff --git a/mm/mmap.c b/mm/mmap.c
111 index 7201372..bef13a1 100644
114 @@ -1573,7 +1573,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
115 * Overcommit.. This must be the final test, as it will
116 * update security statistics.
118 - if (security_vm_enough_memory(grow))
119 + if (security_vm_enough_memory_mm(mm, grow))
122 /* Ok, everything looks good - let it rip */