Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / arch / i386 / kernel / process.c
index 75b2009..a995ce8 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <stdarg.h>
 
+#include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
@@ -28,7 +29,7 @@
 #include <linux/a.out.h>
 #include <linux/interrupt.h>
 #include <linux/config.h>
-#include <linux/version.h>
+#include <linux/utsname.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
 #include <linux/init.h>
@@ -36,6 +37,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/ptrace.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/ldt.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
-#include <asm/irq.h>
 #include <asm/desc.h>
+#include <asm/vm86.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
 #endif
 
-#include <linux/irq.h>
 #include <linux/err.h>
 
+#include <asm/tlbflush.h>
+#include <asm/cpu.h>
+
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
-int hlt_counter;
+static int hlt_counter;
+
+unsigned long boot_option_idle_override = 0;
+EXPORT_SYMBOL(boot_option_idle_override);
 
 /*
  * Return saved PC of a blocked thread.
@@ -69,6 +76,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
  * Powermanagement idle function, if any..
  */
 void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 
 void disable_hlt(void)
 {
@@ -90,14 +99,27 @@ EXPORT_SYMBOL(enable_hlt);
  */
 void default_idle(void)
 {
-       if (!hlt_counter && current_cpu_data.hlt_works_ok) {
-               local_irq_disable();
-               if (!need_resched())
-                       safe_halt();
-               else
-                       local_irq_enable();
+       local_irq_enable();
+
+       if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+               smp_mb__after_clear_bit();
+               while (!need_resched()) {
+                       local_irq_disable();
+                       if (!need_resched())
+                               safe_halt();
+                       else
+                               local_irq_enable();
+               }
+               set_thread_flag(TIF_POLLING_NRFLAG);
+       } else {
+               while (!need_resched())
+                       cpu_relax();
        }
 }
+#ifdef CONFIG_APM_MODULE
+EXPORT_SYMBOL(default_idle);
+#endif
 
 /*
  * On SMP it's slightly faster (but much more power-consuming!)
@@ -106,30 +128,41 @@ void default_idle(void)
  */
 static void poll_idle (void)
 {
-       int oldval;
-
        local_irq_enable();
 
-       /*
-        * Deal with another CPU just having chosen a thread to
-        * run here:
-        */
-       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+       asm volatile(
+               "2:"
+               "testl %0, %1;"
+               "rep; nop;"
+               "je 2b;"
+               : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+}
 
-       if (!oldval) {
-               set_thread_flag(TIF_POLLING_NRFLAG);
-               asm volatile(
-                       "2:"
-                       "testl %0, %1;"
-                       "rep; nop;"
-                       "je 2b;"
-                       : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+#ifdef CONFIG_HOTPLUG_CPU
+#include <asm/nmi.h>
+/* We don't actually take CPU down, just spin without interrupts. */
+static inline void play_dead(void)
+{
+       /* This must be done before dead CPU ack */
+       cpu_exit_clear();
+       wbinvd();
+       mb();
+       /* Ack it */
+       __get_cpu_var(cpu_state) = CPU_DEAD;
 
-               clear_thread_flag(TIF_POLLING_NRFLAG);
-       } else {
-               set_need_resched();
-       }
+       /*
+        * With physical CPU hotplug, we should halt the cpu
+        */
+       local_irq_disable();
+       while (1)
+               halt();
 }
+#else
+static inline void play_dead(void)
+{
+       BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
 
 /*
  * The idle thread. There's no useful work to be
@@ -137,22 +170,65 @@ static void poll_idle (void)
  * low exit latency (ie sit in a loop waiting for
  * somebody to say that they'd like to reschedule)
  */
-void cpu_idle (void)
+void cpu_idle(void)
 {
+       int cpu = smp_processor_id();
+
+       set_thread_flag(TIF_POLLING_NRFLAG);
+
        /* endless idle loop with no priority at all */
        while (1) {
                while (!need_resched()) {
-                       void (*idle)(void) = pm_idle;
+                       void (*idle)(void);
+
+                       if (__get_cpu_var(cpu_idle_state))
+                               __get_cpu_var(cpu_idle_state) = 0;
+
+                       rmb();
+                       idle = pm_idle;
 
                        if (!idle)
                                idle = default_idle;
 
-                       irq_stat[smp_processor_id()].idle_timestamp = jiffies;
+                       if (cpu_is_offline(cpu))
+                               play_dead();
+
+                       __get_cpu_var(irq_stat).idle_timestamp = jiffies;
                        idle();
                }
+               preempt_enable_no_resched();
                schedule();
+               preempt_disable();
+       }
+}
+
+void cpu_idle_wait(void)
+{
+       unsigned int cpu, this_cpu = get_cpu();
+       cpumask_t map;
+
+       set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
+       put_cpu();
+
+       cpus_clear(map);
+       for_each_online_cpu(cpu) {
+               per_cpu(cpu_idle_state, cpu) = 1;
+               cpu_set(cpu, map);
        }
+
+       __get_cpu_var(cpu_idle_state) = 0;
+
+       wmb();
+       do {
+               ssleep(1);
+               for_each_online_cpu(cpu) {
+                       if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
+                               cpu_clear(cpu, map);
+               }
+               cpus_and(map, map, cpu_online_map);
+       } while (!cpus_empty(map));
 }
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
 
 /*
  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
@@ -165,36 +241,28 @@ static void mwait_idle(void)
 {
        local_irq_enable();
 
-       if (!need_resched()) {
-               set_thread_flag(TIF_POLLING_NRFLAG);
-               do {
-                       __monitor((void *)&current_thread_info()->flags, 0, 0);
-                       if (need_resched())
-                               break;
-                       __mwait(0, 0);
-               } while (!need_resched());
-               clear_thread_flag(TIF_POLLING_NRFLAG);
+       while (!need_resched()) {
+               __monitor((void *)&current_thread_info()->flags, 0, 0);
+               smp_mb();
+               if (need_resched())
+                       break;
+               __mwait(0, 0);
        }
 }
 
-void __init select_idle_routine(const struct cpuinfo_x86 *c)
+void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
        if (cpu_has(c, X86_FEATURE_MWAIT)) {
                printk("monitor/mwait feature present.\n");
                /*
                 * Skip, if setup has overridden idle.
-                * Also, take care of system with asymmetric CPUs.
-                * Use, mwait_idle only if all cpus support it.
-                * If not, we fallback to default_idle()
+                * One CPU supports mwait => All CPUs supports mwait
                 */
                if (!pm_idle) {
                        printk("using mwait in idle threads.\n");
                        pm_idle = mwait_idle;
                }
-               return;
        }
-       pm_idle = default_idle;
-       return;
 }
 
 static int __init idle_setup (char *str)
@@ -211,6 +279,7 @@ static int __init idle_setup (char *str)
                pm_idle = default_idle;
        }
 
+       boot_option_idle_override = 1;
        return 1;
 }
 
@@ -225,9 +294,12 @@ void show_regs(struct pt_regs * regs)
        printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
        print_symbol("EIP is at %s\n", regs->eip);
 
-       if (regs->xcs & 3)
+       if (user_mode_vm(regs))
                printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
-       printk(" EFLAGS: %08lx    %s  (%s)\n",regs->eflags, print_tainted(),UTS_RELEASE);
+       printk(" EFLAGS: %08lx    %s  (%s %.*s)\n",
+              regs->eflags, print_tainted(), system_utsname.release,
+              (int)strcspn(system_utsname.version, " "),
+              system_utsname.version);
        printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
                regs->eax,regs->ebx,regs->ecx,regs->edx);
        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
@@ -235,16 +307,10 @@ void show_regs(struct pt_regs * regs)
        printk(" DS: %04x ES: %04x\n",
                0xffff & regs->xds,0xffff & regs->xes);
 
-       __asm__("movl %%cr0, %0": "=r" (cr0));
-       __asm__("movl %%cr2, %0": "=r" (cr2));
-       __asm__("movl %%cr3, %0": "=r" (cr3));
-       /* This could fault if %cr4 does not exist */
-       __asm__("1: movl %%cr4, %0              \n"
-               "2:                             \n"
-               ".section __ex_table,\"a\"      \n"
-               ".long 1b,2b                    \n"
-               ".previous                      \n"
-               : "=r" (cr4): "0" (0));
+       cr0 = read_cr0();
+       cr2 = read_cr2();
+       cr3 = read_cr3();
+       cr4 = read_cr4_safe();
        printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
        show_trace(NULL, &regs->esp);
 }
@@ -287,6 +353,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
        /* Ok, create the new process.. */
        return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
 }
+EXPORT_SYMBOL(kernel_thread);
 
 /*
  * Free current thread data structures etc..
@@ -294,13 +361,22 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 void exit_thread(void)
 {
        struct task_struct *tsk = current;
+       struct thread_struct *t = &tsk->thread;
 
        /* The process may have allocated an io port bitmap... nuke it. */
-       if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
+       if (unlikely(NULL != t->io_bitmap_ptr)) {
                int cpu = get_cpu();
-               struct tss_struct *tss = init_tss + cpu;
-               kfree(tsk->thread.io_bitmap_ptr);
-               tsk->thread.io_bitmap_ptr = NULL;
+               struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+               kfree(t->io_bitmap_ptr);
+               t->io_bitmap_ptr = NULL;
+               /*
+                * Careful, clear this in the TSS too:
+                */
+               memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
+               t->io_bitmap_max = 0;
+               tss->io_bitmap_owner = NULL;
+               tss->io_bitmap_max = 0;
                tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
                put_cpu();
        }
@@ -316,23 +392,13 @@ void flush_thread(void)
         * Forget coprocessor state..
         */
        clear_fpu(tsk);
-       tsk->used_math = 0;
+       clear_used_math();
 }
 
 void release_thread(struct task_struct *dead_task)
 {
-       if (dead_task->mm) {
-               // temporary debugging check
-               if (dead_task->mm->context.size) {
-                       printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-                                       dead_task->comm,
-                                       dead_task->mm->context.ldt,
-                                       dead_task->mm->context.size);
-                       BUG();
-               }
-       }
-
-       release_x86_irqs(dead_task);
+       BUG_ON(dead_task->mm);
+       release_vm86_irqs(dead_task);
 }
 
 /*
@@ -352,11 +418,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
        struct task_struct *tsk;
        int err;
 
-       childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+       childregs = task_pt_regs(p);
        *childregs = *regs;
        childregs->eax = 0;
        childregs->esp = esp;
-       p->set_child_tid = p->clear_child_tid = NULL;
 
        p->thread.esp = (unsigned long) childregs;
        p->thread.esp0 = (unsigned long) (childregs+1);
@@ -369,8 +434,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
        tsk = current;
        if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
                p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-               if (!p->thread.io_bitmap_ptr)
+               if (!p->thread.io_bitmap_ptr) {
+                       p->thread.io_bitmap_max = 0;
                        return -ENOMEM;
+               }
                memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
                        IO_BITMAP_BYTES);
        }
@@ -401,8 +468,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
 
        err = 0;
  out:
-       if (err && p->thread.io_bitmap_ptr)
+       if (err && p->thread.io_bitmap_ptr) {
                kfree(p->thread.io_bitmap_ptr);
+               p->thread.io_bitmap_max = 0;
+       }
        return err;
 }
 
@@ -447,16 +516,14 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
 
        dump->u_fpvalid = dump_fpu (regs, &dump->i387);
 }
+EXPORT_SYMBOL(dump_thread);
 
 /* 
  * Capture the user space registers if the task is not running (in user space)
  */
 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 {
-       struct pt_regs ptregs;
-       
-       ptregs = *(struct pt_regs *)
-               ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
+       struct pt_regs ptregs = *task_pt_regs(tsk);
        ptregs.xcs &= 0xffff;
        ptregs.xds &= 0xffff;
        ptregs.xes &= 0xffff;
@@ -467,13 +534,64 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
        return 1;
 }
 
+static inline void
+handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
+{
+       if (!next->io_bitmap_ptr) {
+               /*
+                * Disable the bitmap via an invalid offset. We still cache
+                * the previous bitmap owner and the IO bitmap contents:
+                */
+               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+               return;
+       }
+       if (likely(next == tss->io_bitmap_owner)) {
+               /*
+                * Previous owner of the bitmap (hence the bitmap content)
+                * matches the next task, we dont have to do anything but
+                * to set a valid offset in the TSS:
+                */
+               tss->io_bitmap_base = IO_BITMAP_OFFSET;
+               return;
+       }
+       /*
+        * Lazy TSS's I/O bitmap copy. We set an invalid offset here
+        * and we let the task to get a GPF in case an I/O instruction
+        * is performed.  The handler of the GPF will verify that the
+        * faulting task has a valid I/O bitmap and, it true, does the
+        * real copy and restart the instruction.  This will save us
+        * redundant copies when the currently switched task does not
+        * perform any I/O during its timeslice.
+        */
+       tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+}
+
 /*
- * This special macro can be used to load a debugging register
+ * This function selects if the context switch from prev to next
+ * has to tweak the TSC disable bit in the cr4.
  */
-#define loaddebug(thread,register) \
-               __asm__("movl %0,%%db" #register  \
-                       : /* no output */ \
-                       :"r" (thread->debugreg[register]))
+static inline void disable_tsc(struct task_struct *prev_p,
+                              struct task_struct *next_p)
+{
+       struct thread_info *prev, *next;
+
+       /*
+        * gcc should eliminate the ->thread_info dereference if
+        * has_secure_computing returns 0 at compile time (SECCOMP=n).
+        */
+       prev = task_thread_info(prev_p);
+       next = task_thread_info(next_p);
+
+       if (has_secure_computing(prev) || has_secure_computing(next)) {
+               /* slow path here */
+               if (has_secure_computing(prev) &&
+                   !has_secure_computing(next)) {
+                       write_cr4(read_cr4() & ~X86_CR4_TSD);
+               } else if (!has_secure_computing(prev) &&
+                          has_secure_computing(next))
+                       write_cr4(read_cr4() | X86_CR4_TSD);
+       }
+}
 
 /*
  *     switch_to(x,yn) should switch tasks from x to y.
@@ -507,72 +625,72 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        struct thread_struct *prev = &prev_p->thread,
                                 *next = &next_p->thread;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = init_tss + cpu;
+       struct tss_struct *tss = &per_cpu(init_tss, cpu);
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
        __unlazy_fpu(prev_p);
+       if (next_p->mm)
+               load_user_cs_desc(cpu, next_p->mm);
 
        /*
-        * Reload esp0, LDT and the page table pointer:
+        * Reload esp0.
         */
        load_esp0(tss, next);
 
        /*
-        * Load the per-thread Thread-Local Storage descriptor.
+        * Save away %fs and %gs. No need to save %es and %ds, as
+        * those are always kernel segments while inside the kernel.
+        * Doing this before setting the new TLS descriptors avoids
+        * the situation where we temporarily have non-reloadable
+        * segments in %fs and %gs.  This could be an issue if the
+        * NMI handler ever used %fs or %gs (it does not today), or
+        * if the kernel is running inside of a hypervisor layer.
         */
-       load_TLS(next, cpu);
+       savesegment(fs, prev->fs);
+       savesegment(gs, prev->gs);
 
        /*
-        * Save away %fs and %gs. No need to save %es and %ds, as
-        * those are always kernel segments while inside the kernel.
+        * Load the per-thread Thread-Local Storage descriptor.
         */
-       asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
-       asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+       load_TLS(next, cpu);
 
        /*
         * Restore %fs and %gs if needed.
+        *
+        * Glibc normally makes %fs be zero, and %gs is one of
+        * the TLS segments.
         */
-       if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) {
+       if (unlikely(prev->fs | next->fs))
                loadsegment(fs, next->fs);
+
+       if (prev->gs | next->gs)
                loadsegment(gs, next->gs);
-       }
+
+       /*
+        * Restore IOPL if needed.
+        */
+       if (unlikely(prev->iopl != next->iopl))
+               set_iopl_mask(next->iopl);
 
        /*
         * Now maybe reload the debug registers
         */
        if (unlikely(next->debugreg[7])) {
-               loaddebug(next, 0);
-               loaddebug(next, 1);
-               loaddebug(next, 2);
-               loaddebug(next, 3);
+               set_debugreg(next->debugreg[0], 0);
+               set_debugreg(next->debugreg[1], 1);
+               set_debugreg(next->debugreg[2], 2);
+               set_debugreg(next->debugreg[3], 3);
                /* no 4 and 5 */
-               loaddebug(next, 6);
-               loaddebug(next, 7);
+               set_debugreg(next->debugreg[6], 6);
+               set_debugreg(next->debugreg[7], 7);
        }
 
-       if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-               if (next->io_bitmap_ptr) {
-                       /*
-                        * 4 cachelines copy ... not good, but not that
-                        * bad either. Anyone got something better?
-                        * This only affects processes which use ioperm().
-                        * [Putting the TSSs into 4k-tlb mapped regions
-                        * and playing VM tricks to switch the IO bitmap
-                        * is not really acceptable.]
-                        */
-                       memcpy(tss->io_bitmap, next->io_bitmap_ptr,
-                               IO_BITMAP_BYTES);
-                       tss->io_bitmap_base = IO_BITMAP_OFFSET;
-               } else
-                       /*
-                        * a bitmap offset pointing outside of the TSS limit
-                        * causes a nicely controllable SIGSEGV if a process
-                        * tries to use a port IO instruction. The first
-                        * sys_ioperm() call sets up the bitmap properly.
-                        */
-                       tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
-       }
+       if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
+               handle_io_bitmap(next, tss);
+
+       disable_tsc(prev_p, next_p);
+
        return prev_p;
 }
 
@@ -593,7 +711,7 @@ asmlinkage int sys_clone(struct pt_regs regs)
        child_tidptr = (int __user *)regs.edi;
        if (!newsp)
                newsp = regs.esp;
-       return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, &regs, 0, parent_tidptr, child_tidptr);
+       return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
 }
 
 /*
@@ -628,7 +746,9 @@ asmlinkage int sys_execve(struct pt_regs regs)
                        (char __user * __user *) regs.edx,
                        &regs);
        if (error == 0) {
+               task_lock(current);
                current->ptrace &= ~PT_DTRACE;
+               task_unlock(current);
                /* Make sure we don't return using sysenter.. */
                set_thread_flag(TIF_IRET);
        }
@@ -647,7 +767,7 @@ unsigned long get_wchan(struct task_struct *p)
        int count = 0;
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
-       stack_page = (unsigned long)p->thread_info;
+       stack_page = (unsigned long)task_stack_page(p);
        esp = p->thread.esp;
        if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
                return 0;
@@ -759,6 +879,8 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
                return -EINVAL;
 
+       memset(&info, 0, sizeof(info));
+
        desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
 
        info.entry_number = idx;
@@ -776,3 +898,66 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
        return 0;
 }
 
+unsigned long arch_align_stack(unsigned long sp)
+{
+       if (randomize_va_space)
+               sp -= get_random_int() % 8192;
+       return sp & ~0xf;
+}
+
+void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
+{
+       if (limit > mm->context.exec_limit) {
+               mm->context.exec_limit = limit;
+               set_user_cs(&mm->context.user_cs, limit);
+               if (mm == current->mm) {
+                       preempt_disable();
+                       load_user_cs_desc(smp_processor_id(), mm);
+                       preempt_enable();
+               }
+       }
+}
+
+void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
+{
+       struct vm_area_struct *vma;
+       unsigned long limit = PAGE_SIZE;
+
+       if (old_end == mm->context.exec_limit) {
+               for (vma = mm->mmap; vma; vma = vma->vm_next)
+                       if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
+                               limit = vma->vm_end;
+
+               mm->context.exec_limit = limit;
+               set_user_cs(&mm->context.user_cs, limit);
+               if (mm == current->mm) {
+                       preempt_disable();
+                       load_user_cs_desc(smp_processor_id(), mm);
+                       preempt_enable();
+               }
+       }
+}
+
+void arch_flush_exec_range(struct mm_struct *mm)
+{
+       mm->context.exec_limit = 0;
+       set_user_cs(&mm->context.user_cs, 0);
+}
+
+/*
+ * Generate random brk address between 128MB and 196MB. (if the layout
+ * allows it.)
+ */
+void randomize_brk(unsigned long old_brk)
+{
+       unsigned long new_brk, range_start, range_end;
+
+       range_start = 0x08000000;
+       if (current->mm->brk >= range_start)
+               range_start = current->mm->brk;
+       range_end = range_start + 0x02000000;
+       new_brk = randomize_range(range_start, range_end, 0);
+       if (new_brk)
+               current->mm->brk = new_brk;
+}
+