--- /dev/null
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index cd535c7..1b53da9 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -599,6 +599,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ unsigned long address;
+ int write, si_code;
+ int fault;
++ int should_exit_no_context = 0;
+ #ifdef CONFIG_X86_64
+ unsigned long flags;
+ #endif
+@@ -886,6 +887,9 @@ no_context:
+ oops_end(flags, regs, SIGKILL);
+ #endif
+
++ if (should_exit_no_context)
++ return;
++
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+@@ -911,8 +915,11 @@ do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+- if (!(error_code & PF_USER))
++ if (!(error_code & PF_USER)) {
++ should_exit_no_context = 1;
+ goto no_context;
++ }
++
+ #ifdef CONFIG_X86_32
+ /* User space => ok to do another page fault */
+ if (is_prefetch(regs, address, error_code))
+diff --git a/mm/memory.c b/mm/memory.c
+index 1c1a375..9739ae4 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2405,6 +2405,26 @@ out_nomap:
+ }
+
+ /*
++ * This is like a special single-page "expand_downwards()",
++ * except we must first make sure that 'address-PAGE_SIZE'
++ * doesn't hit another vma.
++ *
++ * The "find_vma()" will do the right thing even if we wrap
++ */
++static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
++{
++ address &= PAGE_MASK;
++ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
++ address -= PAGE_SIZE;
++ if (find_vma(vma->vm_mm, address) != vma)
++ return -ENOMEM;
++
++ expand_stack(vma, address);
++ }
++ return 0;
++}
++
++/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2417,6 +2437,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ spinlock_t *ptl;
+ pte_t entry;
+
++ if (check_stack_guard_page(vma, address) < 0) {
++ pte_unmap(page_table);
++ return VM_FAULT_SIGBUS;
++ }
++
+ /* Allocate our own private page. */
+ pte_unmap(page_table);
+