1 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
2 index c45f415..3a6dbad 100644
3 --- a/arch/x86/include/asm/desc.h
4 +++ b/arch/x86/include/asm/desc.h
9 +#include <linux/mm_types.h>
11 static inline void fill_ldt(struct desc_struct *desc,
12 const struct user_desc *info)
13 @@ -94,6 +95,9 @@ static inline int desc_empty(const void *ptr)
15 #define load_TLS(t, cpu) native_load_tls(t, cpu)
16 #define set_ldt native_set_ldt
18 +#define load_user_cs_desc native_load_user_cs_desc
19 +#endif /*CONFIG_X86_32*/
21 #define write_ldt_entry(dt, entry, desc) \
22 native_write_ldt_entry(dt, entry, desc)
23 @@ -380,4 +384,25 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
24 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
28 +static inline void set_user_cs(struct desc_struct *desc, unsigned long limit)
30 + limit = (limit - 1) / PAGE_SIZE;
31 + desc->a = limit & 0xffff;
32 + desc->b = (limit & 0xf0000) | 0x00c0fb00;
35 +static inline void native_load_user_cs_desc(int cpu, struct mm_struct *mm)
37 + get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs;
40 +#define arch_add_exec_range arch_add_exec_range
41 +#define arch_remove_exec_range arch_remove_exec_range
42 +#define arch_flush_exec_range arch_flush_exec_range
43 +extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
44 +extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
45 +extern void arch_flush_exec_range(struct mm_struct *mm);
46 +#endif /* CONFIG_X86_32 */
48 #endif /* _ASM_X86_DESC_H */
49 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
50 index 80a1dee..8314c66 100644
51 --- a/arch/x86/include/asm/mmu.h
52 +++ b/arch/x86/include/asm/mmu.h
55 * The x86 doesn't have a mmu context, but
56 * we put the segment information here.
58 + * exec_limit is used to track the range PROT_EXEC
67 + struct desc_struct user_cs;
68 + unsigned long exec_limit;
73 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
74 index 4fb37c8..d5cc31c 100644
75 --- a/arch/x86/include/asm/paravirt.h
76 +++ b/arch/x86/include/asm/paravirt.h
77 @@ -139,6 +139,9 @@ struct pv_cpu_ops {
78 void (*store_gdt)(struct desc_ptr *);
79 void (*store_idt)(struct desc_ptr *);
80 void (*set_ldt)(const void *desc, unsigned entries);
82 + void (*load_user_cs_desc)(int cpu, struct mm_struct *mm);
83 +#endif /*CONFIG_X86_32*/
84 unsigned long (*store_tr)(void);
85 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
87 @@ -955,6 +958,12 @@ static inline void set_ldt(const void *addr, unsigned entries)
89 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
92 +static inline void load_user_cs_desc(unsigned int cpu, struct mm_struct *mm)
94 + PVOP_VCALL2(pv_cpu_ops.load_user_cs_desc, cpu, mm);
96 +#endif /*CONFIG_X86_32*/
97 static inline void store_gdt(struct desc_ptr *dtr)
99 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
100 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
101 index c776826..fb6b579 100644
102 --- a/arch/x86/include/asm/processor.h
103 +++ b/arch/x86/include/asm/processor.h
104 @@ -160,6 +160,9 @@ static inline int hlt_works(int cpu)
106 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
108 +#define __HAVE_ARCH_ALIGN_STACK
109 +extern unsigned long arch_align_stack(unsigned long sp);
111 extern void cpu_detect(struct cpuinfo_x86 *c);
113 extern struct pt_regs *idle_regs(struct pt_regs *);
114 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
115 index 3ffdcfa..62cba96 100644
116 --- a/arch/x86/kernel/cpu/common.c
117 +++ b/arch/x86/kernel/cpu/common.c
118 @@ -804,6 +804,20 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
119 /* Filter out anything that depends on CPUID levels we don't have */
120 filter_cpuid_features(c, true);
123 + * emulation of NX with segment limits unfortunately means
124 + * we have to disable the fast system calls, due to the way that
125 + * sysexit clears the segment limits on return.
126 + * If we have either disabled exec-shield on the boot command line,
127 + * or we have NX, then we don't need to do this.
129 + if (exec_shield != 0) {
130 +#ifdef CONFIG_X86_PAE
131 + if (!test_cpu_cap(c, X86_FEATURE_NX))
133 + clear_cpu_cap(c, X86_FEATURE_SEP);
136 /* If the model name is still unset, do table lookup. */
137 if (!c->x86_model_id[0]) {
139 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
140 index 70ec9b9..d956b8c 100644
141 --- a/arch/x86/kernel/paravirt.c
142 +++ b/arch/x86/kernel/paravirt.c
143 @@ -369,6 +369,9 @@ struct pv_cpu_ops pv_cpu_ops = {
144 .read_tscp = native_read_tscp,
145 .load_tr_desc = native_load_tr_desc,
146 .set_ldt = native_set_ldt,
147 +#ifdef CONFIG_X86_32
148 + .load_user_cs_desc = native_load_user_cs_desc,
149 +#endif /*CONFIG_X86_32*/
150 .load_gdt = native_load_gdt,
151 .load_idt = native_load_idt,
152 .store_gdt = native_store_gdt,
153 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
154 index 59f4524..068e286 100644
155 --- a/arch/x86/kernel/process_32.c
156 +++ b/arch/x86/kernel/process_32.c
157 @@ -299,7 +299,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
159 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
163 set_user_gs(regs, 0);
167 regs->ds = __USER_DS;
168 @@ -308,6 +311,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
169 regs->cs = __USER_CS;
174 + load_user_cs_desc(cpu, current->mm);
178 * Free the old FP and other extended state
180 @@ -354,7 +362,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
181 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
183 __unlazy_fpu(prev_p);
186 + load_user_cs_desc(cpu, next_p->mm);
188 /* we're going to use this soon, after a few expensive things */
189 if (next_p->fpu_counter > 5)
190 @@ -495,3 +504,40 @@ unsigned long get_wchan(struct task_struct *p)
194 +static void modify_cs(struct mm_struct *mm, unsigned long limit)
196 + mm->context.exec_limit = limit;
197 + set_user_cs(&mm->context.user_cs, limit);
198 + if (mm == current->mm) {
202 + load_user_cs_desc(cpu, mm);
207 +void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
209 + if (limit > mm->context.exec_limit)
210 + modify_cs(mm, limit);
213 +void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
215 + struct vm_area_struct *vma;
216 + unsigned long limit = PAGE_SIZE;
218 + if (old_end == mm->context.exec_limit) {
219 + for (vma = mm->mmap; vma; vma = vma->vm_next)
220 + if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
221 + limit = vma->vm_end;
222 + modify_cs(mm, limit);
226 +void arch_flush_exec_range(struct mm_struct *mm)
228 + mm->context.exec_limit = 0;
229 + set_user_cs(&mm->context.user_cs, 0);
231 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
232 index 07d60c8..41e9129 100644
233 --- a/arch/x86/kernel/traps.c
234 +++ b/arch/x86/kernel/traps.c
235 @@ -118,6 +118,76 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
236 if (!user_mode_vm(regs))
241 +__compare_user_cs_desc(const struct desc_struct *desc1,
242 + const struct desc_struct *desc2)
244 + return ((desc1->limit0 != desc2->limit0) ||
245 + (desc1->limit != desc2->limit) ||
246 + (desc1->base0 != desc2->base0) ||
247 + (desc1->base1 != desc2->base1) ||
248 + (desc1->base2 != desc2->base2));
252 + * lazy-check for CS validity on exec-shield binaries:
254 + * the original non-exec stack patch was written by
255 + * Solar Designer <solar at openwall.com>. Thanks!
258 +check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
260 + struct desc_struct *desc1, *desc2;
261 + struct vm_area_struct *vma;
262 + unsigned long limit;
264 + if (current->mm == NULL)
268 + if (current->mm->context.exec_limit != -1UL) {
270 + spin_lock(¤t->mm->page_table_lock);
271 + for (vma = current->mm->mmap; vma; vma = vma->vm_next)
272 + if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
273 + limit = vma->vm_end;
274 + vma = get_gate_vma(current);
275 + if (vma && (vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
276 + limit = vma->vm_end;
277 + spin_unlock(¤t->mm->page_table_lock);
278 + if (limit >= TASK_SIZE)
280 + current->mm->context.exec_limit = limit;
282 + set_user_cs(¤t->mm->context.user_cs, limit);
284 + desc1 = ¤t->mm->context.user_cs;
285 + desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
287 + if (__compare_user_cs_desc(desc1, desc2)) {
289 + * The CS was not in sync - reload it and retry the
290 + * instruction. If the instruction still faults then
291 + * we won't hit this branch next time around.
293 + if (print_fatal_signals >= 2) {
294 + printk(KERN_ERR "#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n",
295 + error_code, error_code/8, regs->ip,
296 + smp_processor_id());
297 + printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x, CPU_cs: %08x/%08x.\n",
298 + current->mm->context.exec_limit,
299 + desc1->a, desc1->b, desc2->a, desc2->b);
302 + load_user_cs_desc(cpu, current->mm);
311 static void __kprobes
312 @@ -276,6 +346,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
313 if (!user_mode(regs))
316 +#ifdef CONFIG_X86_32
322 + ok = check_lazy_exec_limit(cpu, regs, error_code);
328 + if (print_fatal_signals) {
329 + printk(KERN_ERR "#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n",
330 + error_code, error_code/8, regs->ip, smp_processor_id());
331 + printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x.\n",
332 + current->mm->context.exec_limit,
333 + current->mm->context.user_cs.a,
334 + current->mm->context.user_cs.b);
337 +#endif /*CONFIG_X86_32*/
339 tsk->thread.error_code = error_code;
340 tsk->thread.trap_no = 13;
342 @@ -885,19 +978,37 @@ do_device_not_available(struct pt_regs *regs, long error_code)
347 + * The fixup code for errors in iret jumps to here (iret_exc). It loses
348 + * the original trap number and erorr code. The bogus trap 32 and error
349 + * code 0 are what the vanilla kernel delivers via:
350 + * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
352 + * NOTE: Because of the final "1" in the macro we need to enable interrupts.
354 + * In case of a general protection fault in the iret instruction, we
355 + * need to check for a lazy CS update for exec-shield.
357 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
365 - info.si_signo = SIGILL;
367 - info.si_code = ILL_BADSTK;
368 - info.si_addr = NULL;
369 - if (notify_die(DIE_TRAP, "iret exception",
370 - regs, error_code, 32, SIGILL) == NOTIFY_STOP)
372 - do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
374 + ok = check_lazy_exec_limit(cpu, regs, error_code);
377 + if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
378 + error_code, 32, SIGSEGV) != NOTIFY_STOP) {
380 + info.si_signo = SIGSEGV;
382 + info.si_code = ILL_BADSTK;
384 + do_trap(32, SIGSEGV, "iret exception", regs, error_code, &info);
389 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
390 index 34c1bfb..32c3d8d 100644
391 --- a/arch/x86/mm/init.c
392 +++ b/arch/x86/mm/init.c
393 @@ -228,6 +228,12 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
396 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
397 +#ifdef CONFIG_X86_32
400 + printk(KERN_INFO "Using x86 segment limits to approximate "
401 + "NX protection\n");
404 /* Enable PSE if available */
406 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
407 index 949708d..c1373b6 100644
408 --- a/arch/x86/mm/init_32.c
409 +++ b/arch/x86/mm/init_32.c
410 @@ -587,6 +587,54 @@ void zap_low_mappings(void)
411 pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
412 EXPORT_SYMBOL_GPL(__supported_pte_mask);
414 +#ifdef CONFIG_X86_PAE
416 +static int disable_nx __initdata;
421 + * Control non executable mappings.
424 + * off Disable (disables exec-shield too)
426 +static int __init noexec_setup(char *str)
428 + if (!str || !strcmp(str, "on")) {
430 + __supported_pte_mask |= _PAGE_NX;
433 + } else if (!strcmp(str, "off")) {
435 + __supported_pte_mask &= ~_PAGE_NX;
442 +early_param("noexec", noexec_setup);
444 +void __init set_nx(void)
446 + unsigned int v[4], l, h;
448 + if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
449 + cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
451 + if ((v[3] & (1 << 20)) && !disable_nx) {
452 + rdmsr(MSR_EFER, l, h);
454 + wrmsr(MSR_EFER, l, h);
456 + __supported_pte_mask |= _PAGE_NX;
462 /* user-defined highmem size */
463 static unsigned int highmem_pages = -1;
465 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
466 index 1658296..72056cf 100644
467 --- a/arch/x86/mm/mmap.c
468 +++ b/arch/x86/mm/mmap.c
469 @@ -111,13 +111,16 @@ static unsigned long mmap_legacy_base(void)
471 void arch_pick_mmap_layout(struct mm_struct *mm)
473 - if (mmap_is_legacy()) {
474 + if (!(2 & exec_shield) && mmap_is_legacy()) {
475 mm->mmap_base = mmap_legacy_base();
476 mm->get_unmapped_area = arch_get_unmapped_area;
477 mm->unmap_area = arch_unmap_area;
479 mm->mmap_base = mmap_base();
480 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
481 + if (!(current->personality & READ_IMPLIES_EXEC)
483 + mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
484 mm->unmap_area = arch_unmap_area_topdown;
487 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
488 index 821e970..ea5a4c3 100644
489 --- a/arch/x86/mm/tlb.c
490 +++ b/arch/x86/mm/tlb.c
492 #include <linux/interrupt.h>
493 #include <linux/module.h>
495 +#include <asm/desc.h>
496 #include <asm/tlbflush.h>
497 #include <asm/mmu_context.h>
498 #include <asm/apic.h>
499 @@ -129,6 +130,12 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
500 union smp_flush_state *f;
502 cpu = smp_processor_id();
504 +#ifdef CONFIG_X86_32
505 + if (current->active_mm)
506 + load_user_cs_desc(cpu, current->active_mm);
510 * orig_rax contains the negated interrupt vector.
511 * Use that to determine where the sender put the data.
512 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
513 index 58bc00f..1fdafb5 100644
514 --- a/arch/x86/vdso/vdso32-setup.c
515 +++ b/arch/x86/vdso/vdso32-setup.c
516 @@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
518 addr = VDSO_HIGH_BASE;
520 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
521 + addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1);
522 if (IS_ERR_VALUE(addr)) {
525 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
526 index 0a1700a..37b8744 100644
527 --- a/arch/x86/xen/enlighten.c
528 +++ b/arch/x86/xen/enlighten.c
529 @@ -321,6 +321,24 @@ static void xen_set_ldt(const void *addr, unsigned entries)
530 xen_mc_issue(PARAVIRT_LAZY_CPU);
533 +#ifdef CONFIG_X86_32
534 +static void xen_load_user_cs_desc(int cpu, struct mm_struct *mm)
539 + struct desc_struct user_cs;
541 + gdt = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS];
542 + mgdt = virt_to_machine(gdt);
544 + user_cs = mm->context.user_cs;
545 + descriptor = (u64) user_cs.a | ((u64) user_cs.b) << 32;
547 + HYPERVISOR_update_descriptor(mgdt.maddr, descriptor);
549 +#endif /*CONFIG_X86_32*/
551 static void xen_load_gdt(const struct desc_ptr *dtr)
553 unsigned long va = dtr->address;
554 @@ -886,6 +904,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
556 .load_tr_desc = paravirt_nop,
557 .set_ldt = xen_set_ldt,
558 +#ifdef CONFIG_X86_32
559 + .load_user_cs_desc = xen_load_user_cs_desc,
560 +#endif /*CONFIG_X86_32*/
561 .load_gdt = xen_load_gdt,
562 .load_idt = xen_load_idt,
563 .load_tls = xen_load_tls,
564 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
565 index 40381df..f856fab 100644
566 --- a/fs/binfmt_elf.c
567 +++ b/fs/binfmt_elf.c
568 @@ -73,7 +73,7 @@ static struct linux_binfmt elf_format = {
572 -#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
573 +#define BAD_ADDR(x) IS_ERR_VALUE(x)
575 static int set_brk(unsigned long start, unsigned long end)
577 @@ -721,6 +721,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
581 + if (current->personality == PER_LINUX && (exec_shield & 2)) {
582 + executable_stack = EXSTACK_DISABLE_X;
583 + current->flags |= PF_RANDOMIZE;
586 /* Some simple consistency checks for the interpreter */
587 if (elf_interpreter) {
589 @@ -740,6 +745,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
591 goto out_free_dentry;
593 +#ifdef CONFIG_X86_32
595 + * Turn off the CS limit completely if exec-shield disabled or
598 + if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
599 + arch_add_exec_range(current->mm, -1);
602 /* OK, This is the point of no return */
603 current->flags &= ~PF_FORKNOEXEC;
604 current->mm->def_flags = def_flags;
605 @@ -747,7 +761,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
606 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
607 may depend on the personality. */
608 SET_PERSONALITY(loc->elf_ex);
609 - if (elf_read_implies_exec(loc->elf_ex, executable_stack))
610 + if (!(exec_shield & 2) &&
611 + elf_read_implies_exec(loc->elf_ex, executable_stack))
612 current->personality |= READ_IMPLIES_EXEC;
614 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
615 @@ -912,7 +927,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
619 - if (!IS_ERR((void *)elf_entry)) {
620 + if (!BAD_ADDR(elf_entry)) {
622 * load_elf_interp() returns relocation
624 diff --git a/include/linux/mm.h b/include/linux/mm.h
625 index ad613ed..08f08d0 100644
626 --- a/include/linux/mm.h
627 +++ b/include/linux/mm.h
628 @@ -1135,7 +1135,13 @@ extern int install_special_mapping(struct mm_struct *mm,
629 unsigned long addr, unsigned long len,
630 unsigned long flags, struct page **pages);
632 -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
633 +extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
635 +static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr,
636 + unsigned long len, unsigned long pgoff, unsigned long flags)
638 + return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
641 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
642 unsigned long len, unsigned long prot,
643 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
644 index 0e80e26..af904ea 100644
645 --- a/include/linux/mm_types.h
646 +++ b/include/linux/mm_types.h
647 @@ -198,6 +198,9 @@ struct mm_struct {
648 unsigned long (*get_unmapped_area) (struct file *filp,
649 unsigned long addr, unsigned long len,
650 unsigned long pgoff, unsigned long flags);
651 + unsigned long (*get_unmapped_exec_area) (struct file *filp,
652 + unsigned long addr, unsigned long len,
653 + unsigned long pgoff, unsigned long flags);
654 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
655 unsigned long mmap_base; /* base of mmap area */
656 unsigned long task_size; /* size of task vm space */
657 diff --git a/include/linux/resource.h b/include/linux/resource.h
658 index 40fc7e6..68c2549 100644
659 --- a/include/linux/resource.h
660 +++ b/include/linux/resource.h
661 @@ -55,8 +55,11 @@ struct rlimit {
663 * Limit the stack by to some sane default: root can always
664 * increase this limit if needed.. 8MB seems reasonable.
666 + * (2MB more to cover randomization effects.)
668 -#define _STK_LIM (8*1024*1024)
669 +#define _STK_LIM (10*1024*1024)
670 +#define EXEC_STACK_BIAS (2*1024*1024)
673 * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
674 diff --git a/include/linux/sched.h b/include/linux/sched.h
675 index 4896fdf..3513e03 100644
676 --- a/include/linux/sched.h
677 +++ b/include/linux/sched.h
678 @@ -101,6 +101,9 @@ struct fs_struct;
680 struct perf_counter_context;
682 +extern int exec_shield;
683 +extern int print_fatal_signals;
686 * List of flags we want to share for kernel threads,
687 * if only because they are not used by them anyway.
688 @@ -359,6 +362,10 @@ extern int sysctl_max_map_count;
690 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
691 unsigned long, unsigned long);
693 +extern unsigned long
694 +arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
695 + unsigned long, unsigned long);
697 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
698 unsigned long len, unsigned long pgoff,
699 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
700 index ce664f9..1905e22 100644
701 --- a/kernel/sysctl.c
702 +++ b/kernel/sysctl.c
703 @@ -87,6 +87,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
705 extern int sysctl_nr_trim_pages;
708 +int exec_shield = (1<<0);
709 +/* exec_shield is a bitmask:
710 + * 0: off; vdso at STACK_TOP, 1 page below TASK_SIZE
711 + * (1<<0) 1: on [also on if !=0]
712 + * (1<<1) 2: force noexecstack regardless of PT_GNU_STACK
714 + * (1<<2) 4: vdso just below .text of main (unless too low)
715 + * (1<<3) 8: vdso just below .text of PT_INTERP (unless too low)
716 + * are ignored because the vdso is placed completely randomly
719 +static int __init setup_exec_shield(char *str)
721 + get_option(&str, &exec_shield);
725 +__setup("exec-shield=", setup_exec_shield);
727 #ifdef CONFIG_RCU_TORTURE_TEST
728 extern int rcutorture_runnable;
729 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
730 @@ -382,6 +402,14 @@ static struct ctl_table kern_table[] = {
731 .proc_handler = &proc_dointvec,
734 + .ctl_name = CTL_UNNUMBERED,
735 + .procname = "exec-shield",
736 + .data = &exec_shield,
737 + .maxlen = sizeof(int),
739 + .proc_handler = &proc_dointvec,
742 .ctl_name = KERN_CORE_USES_PID,
743 .procname = "core_uses_pid",
744 .data = &core_uses_pid,
745 diff --git a/mm/mmap.c b/mm/mmap.c
746 index 34579b2..260bb3c 100644
750 #include <linux/rmap.h>
751 #include <linux/mmu_notifier.h>
752 #include <linux/perf_counter.h>
753 +#include <linux/random.h>
755 #include <asm/uaccess.h>
756 #include <asm/cacheflush.h>
758 #define arch_rebalance_pgtables(addr, len) (addr)
761 +/* No sane architecture will #define these to anything else */
762 +#ifndef arch_add_exec_range
763 +#define arch_add_exec_range(mm, limit) do { ; } while (0)
765 +#ifndef arch_flush_exec_range
766 +#define arch_flush_exec_range(mm) do { ; } while (0)
768 +#ifndef arch_remove_exec_range
769 +#define arch_remove_exec_range(mm, limit) do { ; } while (0)
773 static void unmap_region(struct mm_struct *mm,
774 struct vm_area_struct *vma, struct vm_area_struct *prev,
775 unsigned long start, unsigned long end);
776 @@ -392,6 +405,8 @@ static inline void
777 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
778 struct vm_area_struct *prev, struct rb_node *rb_parent)
780 + if (vma->vm_flags & VM_EXEC)
781 + arch_add_exec_range(mm, vma->vm_end);
783 vma->vm_next = prev->vm_next;
785 @@ -494,6 +509,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
786 rb_erase(&vma->vm_rb, &mm->mm_rb);
787 if (mm->mmap_cache == vma)
788 mm->mmap_cache = prev;
789 + if (vma->vm_flags & VM_EXEC)
790 + arch_remove_exec_range(mm, vma->vm_end);
794 @@ -803,6 +820,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
795 } else /* cases 2, 5, 7 */
796 vma_adjust(prev, prev->vm_start,
797 end, prev->vm_pgoff, NULL);
798 + if (prev->vm_flags & VM_EXEC)
799 + arch_add_exec_range(mm, prev->vm_end);
803 @@ -957,7 +976,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
804 /* Obtain the address to map to. we verify (or select) it and ensure
805 * that it represents a valid section of the address space.
807 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
808 + addr = get_unmapped_area_prot(file, addr, len, pgoff, flags,
810 if (addr & ~PAGE_MASK)
813 @@ -1442,13 +1462,17 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
817 -get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
818 - unsigned long pgoff, unsigned long flags)
819 +get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
820 + unsigned long pgoff, unsigned long flags, int exec)
822 unsigned long (*get_area)(struct file *, unsigned long,
823 unsigned long, unsigned long, unsigned long);
825 - get_area = current->mm->get_unmapped_area;
826 + if (exec && current->mm->get_unmapped_exec_area)
827 + get_area = current->mm->get_unmapped_exec_area;
829 + get_area = current->mm->get_unmapped_area;
831 if (file && file->f_op && file->f_op->get_unmapped_area)
832 get_area = file->f_op->get_unmapped_area;
833 addr = get_area(file, addr, len, pgoff, flags);
834 @@ -1462,8 +1486,76 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
836 return arch_rebalance_pgtables(addr, len);
838 +EXPORT_SYMBOL(get_unmapped_area_prot);
840 +#define SHLIB_BASE 0x00110000
843 +arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
844 + unsigned long len0, unsigned long pgoff, unsigned long flags)
846 + unsigned long addr = addr0, len = len0;
847 + struct mm_struct *mm = current->mm;
848 + struct vm_area_struct *vma;
851 + if (len > TASK_SIZE)
854 + if (flags & MAP_FIXED)
858 + addr = randomize_range(SHLIB_BASE, 0x01000000, len);
861 + addr = PAGE_ALIGN(addr);
862 + vma = find_vma(mm, addr);
863 + if (TASK_SIZE - len >= addr &&
864 + (!vma || addr + len <= vma->vm_start))
869 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
870 + /* At this point: (!vma || addr < vma->vm_end). */
871 + if (TASK_SIZE - len < addr)
874 + if (!vma || addr + len <= vma->vm_start) {
876 + * Must not let a PROT_EXEC mapping get into the
879 + if (addr + len > mm->brk)
883 + * Up until the brk area we randomize addresses
884 + * as much as possible:
886 + if (addr >= 0x01000000) {
887 + tmp = randomize_range(0x01000000,
888 + PAGE_ALIGN(max(mm->start_brk,
889 + (unsigned long)0x08000000)), len);
890 + vma = find_vma(mm, tmp);
891 + if (TASK_SIZE - len >= tmp &&
892 + (!vma || tmp + len <= vma->vm_start))
896 + * Ok, randomization didnt work out - return
897 + * the result of the linear search:
901 + addr = vma->vm_end;
905 + return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
908 -EXPORT_SYMBOL(get_unmapped_area);
910 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
911 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
912 @@ -1538,6 +1630,14 @@ out:
913 return prev ? prev->vm_next : vma;
916 +static int over_stack_limit(unsigned long sz)
918 + if (sz < EXEC_STACK_BIAS)
920 + return (sz - EXEC_STACK_BIAS) >
921 + current->signal->rlim[RLIMIT_STACK].rlim_cur;
925 * Verify that the stack growth is acceptable and
926 * update accounting. This is shared with both the
927 @@ -1554,7 +1654,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
930 /* Stack limit test */
931 - if (size > rlim[RLIMIT_STACK].rlim_cur)
932 + if (over_stack_limit(size))
935 /* mlock limit tests */
936 @@ -1864,10 +1964,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
937 if (new->vm_ops && new->vm_ops->open)
938 new->vm_ops->open(new);
942 + unsigned long old_end = vma->vm_end;
944 vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
945 ((addr - new->vm_start) >> PAGE_SHIFT), new);
947 + if (vma->vm_flags & VM_EXEC)
948 + arch_remove_exec_range(mm, old_end);
950 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
953 @@ -2116,6 +2220,7 @@ void exit_mmap(struct mm_struct *mm)
954 vm_unacct_memory(nr_accounted);
955 free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
956 tlb_finish_mmu(tlb, 0, end);
957 + arch_flush_exec_range(mm);
960 * Walk the list again, actually closing and freeing it,
961 diff --git a/mm/mprotect.c b/mm/mprotect.c
962 index d80311b..032423d 100644
966 #include <linux/perf_counter.h>
967 #include <asm/uaccess.h>
968 #include <asm/pgtable.h>
969 +#include <asm/pgalloc.h>
970 #include <asm/cacheflush.h>
971 #include <asm/tlbflush.h>
973 +#ifndef arch_remove_exec_range
974 +#define arch_remove_exec_range(mm, limit) do { ; } while (0)
977 #ifndef pgprot_modify
978 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
980 @@ -139,7 +144,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
981 struct mm_struct *mm = vma->vm_mm;
982 unsigned long oldflags = vma->vm_flags;
983 long nrpages = (end - start) >> PAGE_SHIFT;
984 - unsigned long charged = 0;
985 + unsigned long charged = 0, old_end = vma->vm_end;
988 int dirty_accountable = 0;
989 @@ -204,6 +209,9 @@ success:
990 dirty_accountable = 1;
993 + if (oldflags & VM_EXEC)
994 + arch_remove_exec_range(current->mm, old_end);
996 mmu_notifier_invalidate_range_start(mm, start, end);
997 if (is_vm_hugetlb_page(vma))
998 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
999 diff --git a/mm/mremap.c b/mm/mremap.c
1000 index a39b7b9..6bebfde 100644
1003 @@ -400,8 +400,8 @@ unsigned long do_mremap(unsigned long addr,
1004 if (vma->vm_flags & VM_MAYSHARE)
1005 map_flags |= MAP_SHARED;
1007 - new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1008 - vma->vm_pgoff, map_flags);
1009 + new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
1010 + vma->vm_pgoff, map_flags, vma->vm_flags & VM_EXEC);
1011 if (new_addr & ~PAGE_MASK) {