2 * linux/arch/i386/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * This file handles the architecture-dependent parts of process handling..
16 #include <linux/cpu.h>
17 #include <linux/errno.h>
18 #include <linux/sched.h>
20 #include <linux/kernel.h>
22 #include <linux/elfcore.h>
23 #include <linux/smp.h>
24 #include <linux/smp_lock.h>
25 #include <linux/stddef.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/user.h>
29 #include <linux/a.out.h>
30 #include <linux/interrupt.h>
31 #include <linux/utsname.h>
32 #include <linux/delay.h>
33 #include <linux/reboot.h>
34 #include <linux/init.h>
35 #include <linux/mc146818rtc.h>
36 #include <linux/module.h>
37 #include <linux/kallsyms.h>
38 #include <linux/ptrace.h>
39 #include <linux/random.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
46 #include <asm/processor.h>
50 #ifdef CONFIG_MATH_EMULATION
51 #include <asm/math_emu.h>
54 #include <xen/interface/physdev.h>
55 #include <xen/interface/vcpu.h>
56 #include <xen/cpu_hotplug.h>
58 #include <linux/err.h>
60 #include <asm/tlbflush.h>
63 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
65 static int hlt_counter;
67 unsigned long boot_option_idle_override = 0;
68 EXPORT_SYMBOL(boot_option_idle_override);
71 * Return saved PC of a blocked thread.
73 unsigned long thread_saved_pc(struct task_struct *tsk)
75 return ((unsigned long *)tsk->thread.esp)[3];
79 * Powermanagement idle function, if any..
81 void (*pm_idle)(void);
82 EXPORT_SYMBOL(pm_idle);
83 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
85 void disable_hlt(void)
90 EXPORT_SYMBOL(disable_hlt);
97 EXPORT_SYMBOL(enable_hlt);
99 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
107 current_thread_info()->status &= ~TS_POLLING;
108 smp_mb__after_clear_bit();
110 current_thread_info()->status |= TS_POLLING;
113 #ifdef CONFIG_APM_MODULE
114 EXPORT_SYMBOL(default_idle);
117 #ifdef CONFIG_HOTPLUG_CPU
118 extern cpumask_t cpu_initialized;
119 static inline void play_dead(void)
123 cpu_clear(smp_processor_id(), cpu_initialized);
124 preempt_enable_no_resched();
125 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
129 static inline void play_dead(void)
133 #endif /* CONFIG_HOTPLUG_CPU */
136 * The idle thread. There's no useful work to be
137 * done, so just try to conserve power and have a
138 * low exit latency (ie sit in a loop waiting for
139 * somebody to say that they'd like to reschedule)
143 int cpu = smp_processor_id();
145 current_thread_info()->status |= TS_POLLING;
148 /* endless idle loop with no priority at all */
150 while (!need_resched()) {
152 if (__get_cpu_var(cpu_idle_state))
153 __get_cpu_var(cpu_idle_state) = 0;
157 if (cpu_is_offline(cpu))
160 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
163 preempt_enable_no_resched();
169 void cpu_idle_wait(void)
171 unsigned int cpu, this_cpu = get_cpu();
174 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
178 for_each_online_cpu(cpu) {
179 per_cpu(cpu_idle_state, cpu) = 1;
183 __get_cpu_var(cpu_idle_state) = 0;
188 for_each_online_cpu(cpu) {
189 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
192 cpus_and(map, map, cpu_online_map);
193 } while (!cpus_empty(map));
195 EXPORT_SYMBOL_GPL(cpu_idle_wait);
197 /* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
198 /* Always use xen_idle() instead. */
199 void __devinit select_idle_routine(const struct cpuinfo_x86 *c) {}
201 void show_regs(struct pt_regs * regs)
203 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
206 printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
207 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
208 print_symbol("EIP is at %s\n", regs->eip);
210 if (user_mode_vm(regs))
211 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
212 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
213 regs->eflags, print_tainted(), system_utsname.release,
214 (int)strcspn(system_utsname.version, " "),
215 system_utsname.version);
216 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
217 regs->eax,regs->ebx,regs->ecx,regs->edx);
218 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
219 regs->esi, regs->edi, regs->ebp);
220 printk(" DS: %04x ES: %04x\n",
221 0xffff & regs->xds,0xffff & regs->xes);
226 cr4 = read_cr4_safe();
227 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
228 show_trace(NULL, regs, ®s->esp);
232 * This gets run with %ebx containing the
233 * function to call, and %edx containing
236 extern void kernel_thread_helper(void);
237 __asm__(".section .text\n"
239 "kernel_thread_helper:\n\t"
248 * Create a kernel thread
250 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
254 memset(®s, 0, sizeof(regs));
256 regs.ebx = (unsigned long) fn;
257 regs.edx = (unsigned long) arg;
259 regs.xds = __USER_DS;
260 regs.xes = __USER_DS;
262 regs.eip = (unsigned long) kernel_thread_helper;
263 regs.xcs = GET_KERNEL_CS();
264 regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
266 /* Ok, create the new process.. */
267 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
269 EXPORT_SYMBOL(kernel_thread);
272 * Free current thread data structures etc..
274 void exit_thread(void)
276 /* The process may have allocated an io port bitmap... nuke it. */
277 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
278 struct task_struct *tsk = current;
279 struct thread_struct *t = &tsk->thread;
281 struct physdev_set_iobitmap set_iobitmap = { 0 };
282 HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
283 kfree(t->io_bitmap_ptr);
284 t->io_bitmap_ptr = NULL;
285 clear_thread_flag(TIF_IO_BITMAP);
289 void flush_thread(void)
291 struct task_struct *tsk = current;
293 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
294 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
295 clear_tsk_thread_flag(tsk, TIF_DEBUG);
297 * Forget coprocessor state..
303 void release_thread(struct task_struct *dead_task)
305 BUG_ON(dead_task->mm);
306 release_vm86_irqs(dead_task);
310 * This gets called before we allocate a new thread and copy
311 * the current task into it.
313 void prepare_to_copy(struct task_struct *tsk)
318 int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
319 unsigned long unused,
320 struct task_struct * p, struct pt_regs * regs)
322 struct pt_regs * childregs;
323 struct task_struct *tsk;
326 childregs = task_pt_regs(p);
329 childregs->esp = esp;
331 p->thread.esp = (unsigned long) childregs;
332 p->thread.esp0 = (unsigned long) (childregs+1);
334 p->thread.eip = (unsigned long) ret_from_fork;
336 savesegment(fs,p->thread.fs);
337 savesegment(gs,p->thread.gs);
340 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
341 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
342 if (!p->thread.io_bitmap_ptr) {
343 p->thread.io_bitmap_max = 0;
346 memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
348 set_tsk_thread_flag(p, TIF_IO_BITMAP);
352 * Set a new TLS for the child thread?
354 if (clone_flags & CLONE_SETTLS) {
355 struct desc_struct *desc;
356 struct user_desc info;
360 if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
363 if (LDT_empty(&info))
366 idx = info.entry_number;
367 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
370 desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
371 desc->a = LDT_entry_a(&info);
372 desc->b = LDT_entry_b(&info);
375 p->thread.iopl = current->thread.iopl;
379 if (err && p->thread.io_bitmap_ptr) {
380 kfree(p->thread.io_bitmap_ptr);
381 p->thread.io_bitmap_max = 0;
387 * fill in the user structure for a core dump..
389 void dump_thread(struct pt_regs * regs, struct user * dump)
393 /* changed the size calculations - should hopefully work better. lbt */
394 dump->magic = CMAGIC;
395 dump->start_code = 0;
396 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
397 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
398 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
399 dump->u_dsize -= dump->u_tsize;
401 for (i = 0; i < 8; i++)
402 dump->u_debugreg[i] = current->thread.debugreg[i];
404 if (dump->start_stack < TASK_SIZE)
405 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
407 dump->regs.ebx = regs->ebx;
408 dump->regs.ecx = regs->ecx;
409 dump->regs.edx = regs->edx;
410 dump->regs.esi = regs->esi;
411 dump->regs.edi = regs->edi;
412 dump->regs.ebp = regs->ebp;
413 dump->regs.eax = regs->eax;
414 dump->regs.ds = regs->xds;
415 dump->regs.es = regs->xes;
416 savesegment(fs,dump->regs.fs);
417 savesegment(gs,dump->regs.gs);
418 dump->regs.orig_eax = regs->orig_eax;
419 dump->regs.eip = regs->eip;
420 dump->regs.cs = regs->xcs;
421 dump->regs.eflags = regs->eflags;
422 dump->regs.esp = regs->esp;
423 dump->regs.ss = regs->xss;
425 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
427 EXPORT_SYMBOL(dump_thread);
430 * Capture the user space registers if the task is not running (in user space)
432 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
434 struct pt_regs ptregs = *task_pt_regs(tsk);
435 ptregs.xcs &= 0xffff;
436 ptregs.xds &= 0xffff;
437 ptregs.xes &= 0xffff;
438 ptregs.xss &= 0xffff;
440 elf_core_copy_regs(regs, &ptregs);
446 * This function selects if the context switch from prev to next
447 * has to tweak the TSC disable bit in the cr4.
449 static inline void disable_tsc(struct task_struct *prev_p,
450 struct task_struct *next_p)
452 struct thread_info *prev, *next;
455 * gcc should eliminate the ->thread_info dereference if
456 * has_secure_computing returns 0 at compile time (SECCOMP=n).
458 prev = task_thread_info(prev_p);
459 next = task_thread_info(next_p);
461 if (has_secure_computing(prev) || has_secure_computing(next)) {
463 if (has_secure_computing(prev) &&
464 !has_secure_computing(next)) {
465 write_cr4(read_cr4() & ~X86_CR4_TSD);
466 } else if (!has_secure_computing(prev) &&
467 has_secure_computing(next))
468 write_cr4(read_cr4() | X86_CR4_TSD);
473 * switch_to(x,yn) should switch tasks from x to y.
475 * We fsave/fwait so that an exception goes off at the right time
476 * (as a call from the fsave or fwait in effect) rather than to
477 * the wrong process. Lazy FP saving no longer makes any sense
478 * with modern CPU's, and this simplifies a lot of things (SMP
479 * and UP become the same).
481 * NOTE! We used to use the x86 hardware context switching. The
482 * reason for not using it any more becomes apparent when you
483 * try to recover gracefully from saved state that is no longer
484 * valid (stale segment register values in particular). With the
485 * hardware task-switch, there is no way to fix up bad state in
486 * a reasonable manner.
488 * The fact that Intel documents the hardware task-switching to
489 * be slow is a fairly red herring - this code is not noticeably
490 * faster. However, there _is_ some room for improvement here,
491 * so the performance issues may eventually be a valid point.
492 * More important, however, is the fact that this allows us much
495 * The return value (in %eax) will be the "prev" task after
496 * the task-switch, and shows up in ret_from_fork in entry.S,
499 struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
501 struct thread_struct *prev = &prev_p->thread,
502 *next = &next_p->thread;
503 int cpu = smp_processor_id();
504 #ifndef CONFIG_X86_NO_TSS
505 struct tss_struct *tss = &per_cpu(init_tss, cpu);
507 struct physdev_set_iopl iopl_op;
508 struct physdev_set_iobitmap iobmp_op;
509 multicall_entry_t _mcl[8], *mcl = _mcl;
511 /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
514 * This is basically '__unlazy_fpu', except that we queue a
515 * multicall to indicate FPU task switch, rather than
516 * synchronously trapping to Xen.
518 if (prev_p->thread_info->status & TS_USEDFPU) {
519 __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
520 mcl->op = __HYPERVISOR_fpu_taskswitch;
524 #if 0 /* lazy fpu sanity check */
525 else BUG_ON(!(read_cr0() & 8));
529 load_user_cs_desc(cpu, next_p->mm);
533 * This is load_esp0(tss, next) with a multicall.
535 mcl->op = __HYPERVISOR_stack_switch;
536 mcl->args[0] = __KERNEL_DS;
537 mcl->args[1] = next->esp0;
541 * Load the per-thread Thread-Local Storage descriptor.
542 * This is load_TLS(next, cpu) with multicalls.
545 if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
546 next->tls_array[i].b != prev->tls_array[i].b)) { \
547 mcl->op = __HYPERVISOR_update_descriptor; \
548 *(u64 *)&mcl->args[0] = virt_to_machine( \
549 &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
550 *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
557 if (unlikely(prev->iopl != next->iopl)) {
558 iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
559 mcl->op = __HYPERVISOR_physdev_op;
560 mcl->args[0] = PHYSDEVOP_set_iopl;
561 mcl->args[1] = (unsigned long)&iopl_op;
565 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
566 iobmp_op.bitmap = (char *)next->io_bitmap_ptr;
567 iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
568 mcl->op = __HYPERVISOR_physdev_op;
569 mcl->args[0] = PHYSDEVOP_set_iobitmap;
570 mcl->args[1] = (unsigned long)&iobmp_op;
574 (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
577 * Restore %fs and %gs if needed.
579 * Glibc normally makes %fs be zero, and %gs is one of
582 if (unlikely(next->fs))
583 loadsegment(fs, next->fs);
586 loadsegment(gs, next->gs);
589 * Now maybe reload the debug registers
591 if (unlikely(next->debugreg[7])) {
592 set_debugreg(next->debugreg[0], 0);
593 set_debugreg(next->debugreg[1], 1);
594 set_debugreg(next->debugreg[2], 2);
595 set_debugreg(next->debugreg[3], 3);
597 set_debugreg(next->debugreg[6], 6);
598 set_debugreg(next->debugreg[7], 7);
601 disable_tsc(prev_p, next_p);
606 asmlinkage int sys_fork(struct pt_regs regs)
608 return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
611 asmlinkage int sys_clone(struct pt_regs regs)
613 unsigned long clone_flags;
615 int __user *parent_tidptr, *child_tidptr;
617 clone_flags = regs.ebx;
619 parent_tidptr = (int __user *)regs.edx;
620 child_tidptr = (int __user *)regs.edi;
623 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr);
627 * This is trivial, and on the face of it looks like it
628 * could equally well be done in user mode.
630 * Not so, for quite unobvious reasons - register pressure.
631 * In user mode vfork() cannot have a stack frame, and if
632 * done by calling the "clone()" system call directly, you
633 * do not have enough call-clobbered registers to hold all
634 * the information you need.
636 asmlinkage int sys_vfork(struct pt_regs regs)
638 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
642 * sys_execve() executes a new program.
644 asmlinkage int sys_execve(struct pt_regs regs)
649 filename = getname((char __user *) regs.ebx);
650 error = PTR_ERR(filename);
651 if (IS_ERR(filename))
653 error = do_execve(filename,
654 (char __user * __user *) regs.ecx,
655 (char __user * __user *) regs.edx,
659 current->ptrace &= ~PT_DTRACE;
660 task_unlock(current);
661 /* Make sure we don't return using sysenter.. */
662 set_thread_flag(TIF_IRET);
669 #define top_esp (THREAD_SIZE - sizeof(unsigned long))
670 #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
672 unsigned long get_wchan(struct task_struct *p)
674 unsigned long ebp, esp, eip;
675 unsigned long stack_page;
677 if (!p || p == current || p->state == TASK_RUNNING)
679 stack_page = (unsigned long)task_stack_page(p);
681 if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
683 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
684 ebp = *(unsigned long *) esp;
686 if (ebp < stack_page || ebp > top_ebp+stack_page)
688 eip = *(unsigned long *) (ebp+4);
689 if (!in_sched_functions(eip))
691 ebp = *(unsigned long *) ebp;
692 } while (count++ < 16);
697 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
699 static int get_free_idx(void)
701 struct thread_struct *t = ¤t->thread;
704 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
705 if (desc_empty(t->tls_array + idx))
706 return idx + GDT_ENTRY_TLS_MIN;
711 * Set a given TLS descriptor:
713 asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
715 struct thread_struct *t = ¤t->thread;
716 struct user_desc info;
717 struct desc_struct *desc;
720 if (copy_from_user(&info, u_info, sizeof(info)))
722 idx = info.entry_number;
725 * index -1 means the kernel should try to find and
726 * allocate an empty descriptor:
729 idx = get_free_idx();
732 if (put_user(idx, &u_info->entry_number))
736 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
739 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
742 * We must not get preempted while modifying the TLS.
746 if (LDT_empty(&info)) {
750 desc->a = LDT_entry_a(&info);
751 desc->b = LDT_entry_b(&info);
761 * Get the current Thread-Local Storage area:
764 #define GET_BASE(desc) ( \
765 (((desc)->a >> 16) & 0x0000ffff) | \
766 (((desc)->b << 16) & 0x00ff0000) | \
767 ( (desc)->b & 0xff000000) )
769 #define GET_LIMIT(desc) ( \
770 ((desc)->a & 0x0ffff) | \
771 ((desc)->b & 0xf0000) )
773 #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
774 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
775 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
776 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
777 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
778 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
780 asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
782 struct user_desc info;
783 struct desc_struct *desc;
786 if (get_user(idx, &u_info->entry_number))
788 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
791 memset(&info, 0, sizeof(info));
793 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
795 info.entry_number = idx;
796 info.base_addr = GET_BASE(desc);
797 info.limit = GET_LIMIT(desc);
798 info.seg_32bit = GET_32BIT(desc);
799 info.contents = GET_CONTENTS(desc);
800 info.read_exec_only = !GET_WRITABLE(desc);
801 info.limit_in_pages = GET_LIMIT_PAGES(desc);
802 info.seg_not_present = !GET_PRESENT(desc);
803 info.useable = GET_USEABLE(desc);
805 if (copy_to_user(u_info, &info, sizeof(info)))
810 unsigned long arch_align_stack(unsigned long sp)
812 if (randomize_va_space)
813 sp -= get_random_int() % 8192;
817 void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
819 if (limit > mm->context.exec_limit) {
820 mm->context.exec_limit = limit;
821 set_user_cs(&mm->context.user_cs, limit);
822 if (mm == current->mm) {
824 load_user_cs_desc(smp_processor_id(), mm);
830 void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
832 struct vm_area_struct *vma;
833 unsigned long limit = PAGE_SIZE;
835 if (old_end == mm->context.exec_limit) {
836 for (vma = mm->mmap; vma; vma = vma->vm_next)
837 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
840 mm->context.exec_limit = limit;
841 set_user_cs(&mm->context.user_cs, limit);
842 if (mm == current->mm) {
844 load_user_cs_desc(smp_processor_id(), mm);
850 void arch_flush_exec_range(struct mm_struct *mm)
852 mm->context.exec_limit = 0;
853 set_user_cs(&mm->context.user_cs, 0);
857 * Generate random brk address between 128MB and 196MB. (if the layout
860 void randomize_brk(unsigned long old_brk)
862 unsigned long new_brk, range_start, range_end;
864 range_start = 0x08000000;
865 if (current->mm->brk >= range_start)
866 range_start = current->mm->brk;
867 range_end = range_start + 0x02000000;
868 new_brk = randomize_range(range_start, range_end, 0);
870 current->mm->brk = new_brk;