2 * linux/arch/i386/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * This file handles the architecture-dependent parts of process handling..
16 #include <linux/cpu.h>
17 #include <linux/errno.h>
18 #include <linux/sched.h>
20 #include <linux/kernel.h>
22 #include <linux/elfcore.h>
23 #include <linux/smp.h>
24 #include <linux/smp_lock.h>
25 #include <linux/stddef.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/user.h>
29 #include <linux/a.out.h>
30 #include <linux/interrupt.h>
31 #include <linux/utsname.h>
32 #include <linux/delay.h>
33 #include <linux/reboot.h>
34 #include <linux/init.h>
35 #include <linux/mc146818rtc.h>
36 #include <linux/module.h>
37 #include <linux/kallsyms.h>
38 #include <linux/ptrace.h>
39 #include <linux/random.h>
40 #include <linux/personality.h>
42 #include <asm/uaccess.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
47 #include <asm/processor.h>
51 #ifdef CONFIG_MATH_EMULATION
52 #include <asm/math_emu.h>
55 #include <xen/interface/physdev.h>
56 #include <xen/interface/vcpu.h>
57 #include <xen/cpu_hotplug.h>
59 #include <linux/err.h>
61 #include <asm/tlbflush.h>
65 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
67 static int hlt_counter;
69 unsigned long boot_option_idle_override = 0;
70 EXPORT_SYMBOL(boot_option_idle_override);
73 * Return saved PC of a blocked thread.
75 unsigned long thread_saved_pc(struct task_struct *tsk)
77 return ((unsigned long *)tsk->thread.esp)[3];
81 * Powermanagement idle function, if any..
83 void (*pm_idle)(void);
84 EXPORT_SYMBOL(pm_idle);
85 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
87 void disable_hlt(void)
92 EXPORT_SYMBOL(disable_hlt);
99 EXPORT_SYMBOL(enable_hlt);
101 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
104 current_thread_info()->status &= ~TS_POLLING;
106 * TS_POLLING-cleared state must be visible before we
113 safe_halt(); /* enables interrupts racelessly */
116 current_thread_info()->status |= TS_POLLING;
118 #ifdef CONFIG_APM_MODULE
119 EXPORT_SYMBOL(default_idle);
122 #ifdef CONFIG_HOTPLUG_CPU
123 extern cpumask_t cpu_initialized;
124 static inline void play_dead(void)
128 cpu_clear(smp_processor_id(), cpu_initialized);
129 preempt_enable_no_resched();
130 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
134 static inline void play_dead(void)
138 #endif /* CONFIG_HOTPLUG_CPU */
141 * The idle thread. There's no useful work to be
142 * done, so just try to conserve power and have a
143 * low exit latency (ie sit in a loop waiting for
144 * somebody to say that they'd like to reschedule)
148 int cpu = smp_processor_id();
150 current_thread_info()->status |= TS_POLLING;
152 /* endless idle loop with no priority at all */
154 while (!need_resched()) {
156 if (__get_cpu_var(cpu_idle_state))
157 __get_cpu_var(cpu_idle_state) = 0;
161 if (cpu_is_offline(cpu))
164 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
167 preempt_enable_no_resched();
173 void cpu_idle_wait(void)
175 unsigned int cpu, this_cpu = get_cpu();
176 cpumask_t map, tmp = current->cpus_allowed;
178 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
182 for_each_online_cpu(cpu) {
183 per_cpu(cpu_idle_state, cpu) = 1;
187 __get_cpu_var(cpu_idle_state) = 0;
192 for_each_online_cpu(cpu) {
193 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
196 cpus_and(map, map, cpu_online_map);
197 } while (!cpus_empty(map));
199 set_cpus_allowed(current, tmp);
201 EXPORT_SYMBOL_GPL(cpu_idle_wait);
203 /* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
204 /* Always use xen_idle() instead. */
205 void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) {}
207 void __devinit select_idle_routine(const struct cpuinfo_x86 *c) {}
209 void show_regs(struct pt_regs * regs)
211 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
214 printk("Pid: %d[#%u], comm: %20s\n",
215 current->pid, current->xid, current->comm);
216 printk("EIP: %04x:[<%08lx>] CPU: %d\n",
217 0xffff & regs->xcs,regs->eip, smp_processor_id());
218 print_symbol("EIP is at %s\n", regs->eip);
220 if (user_mode_vm(regs))
221 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
222 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
223 regs->eflags, print_tainted(), init_utsname()->release,
224 (int)strcspn(init_utsname()->version, " "),
225 init_utsname()->version);
226 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
227 regs->eax,regs->ebx,regs->ecx,regs->edx);
228 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
229 regs->esi, regs->edi, regs->ebp);
230 printk(" DS: %04x ES: %04x GS: %04x\n",
231 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
236 cr4 = read_cr4_safe();
237 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
238 show_trace(NULL, regs, ®s->esp);
242 * This gets run with %ebx containing the
243 * function to call, and %edx containing
246 extern void kernel_thread_helper(void);
249 * Create a kernel thread
251 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
255 memset(®s, 0, sizeof(regs));
257 regs.ebx = (unsigned long) fn;
258 regs.edx = (unsigned long) arg;
260 regs.xds = __USER_DS;
261 regs.xes = __USER_DS;
262 regs.xgs = __KERNEL_PDA;
264 regs.eip = (unsigned long) kernel_thread_helper;
265 regs.xcs = __KERNEL_CS | get_kernel_rpl();
266 regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
268 /* Ok, create the new process.. */
269 return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD,
270 0, ®s, 0, NULL, NULL);
272 EXPORT_SYMBOL(kernel_thread);
275 * Free current thread data structures etc..
277 void exit_thread(void)
279 /* The process may have allocated an io port bitmap... nuke it. */
280 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
281 struct task_struct *tsk = current;
282 struct thread_struct *t = &tsk->thread;
284 struct physdev_set_iobitmap set_iobitmap = { 0 };
285 HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
286 kfree(t->io_bitmap_ptr);
287 t->io_bitmap_ptr = NULL;
288 clear_thread_flag(TIF_IO_BITMAP);
292 void flush_thread(void)
294 struct task_struct *tsk = current;
296 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
297 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
298 clear_tsk_thread_flag(tsk, TIF_DEBUG);
300 * Forget coprocessor state..
306 void release_thread(struct task_struct *dead_task)
308 BUG_ON(dead_task->mm);
309 release_vm86_irqs(dead_task);
313 * This gets called before we allocate a new thread and copy
314 * the current task into it.
316 void prepare_to_copy(struct task_struct *tsk)
321 int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
322 unsigned long unused,
323 struct task_struct * p, struct pt_regs * regs)
325 struct pt_regs * childregs;
326 struct task_struct *tsk;
329 childregs = task_pt_regs(p);
332 childregs->esp = esp;
334 p->thread.esp = (unsigned long) childregs;
335 p->thread.esp0 = (unsigned long) (childregs+1);
337 p->thread.eip = (unsigned long) ret_from_fork;
339 savesegment(fs,p->thread.fs);
342 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
343 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
344 IO_BITMAP_BYTES, GFP_KERNEL);
345 if (!p->thread.io_bitmap_ptr) {
346 p->thread.io_bitmap_max = 0;
349 set_tsk_thread_flag(p, TIF_IO_BITMAP);
353 * Set a new TLS for the child thread?
355 if (clone_flags & CLONE_SETTLS) {
356 struct desc_struct *desc;
357 struct user_desc info;
361 if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
364 if (LDT_empty(&info))
367 idx = info.entry_number;
368 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
371 desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
372 desc->a = LDT_entry_a(&info);
373 desc->b = LDT_entry_b(&info);
376 p->thread.iopl = current->thread.iopl;
380 if (err && p->thread.io_bitmap_ptr) {
381 kfree(p->thread.io_bitmap_ptr);
382 p->thread.io_bitmap_max = 0;
388 * fill in the user structure for a core dump..
390 void dump_thread(struct pt_regs * regs, struct user * dump)
394 /* changed the size calculations - should hopefully work better. lbt */
395 dump->magic = CMAGIC;
396 dump->start_code = 0;
397 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
398 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
399 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
400 dump->u_dsize -= dump->u_tsize;
402 for (i = 0; i < 8; i++)
403 dump->u_debugreg[i] = current->thread.debugreg[i];
405 if (dump->start_stack < TASK_SIZE)
406 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
408 dump->regs.ebx = regs->ebx;
409 dump->regs.ecx = regs->ecx;
410 dump->regs.edx = regs->edx;
411 dump->regs.esi = regs->esi;
412 dump->regs.edi = regs->edi;
413 dump->regs.ebp = regs->ebp;
414 dump->regs.eax = regs->eax;
415 dump->regs.ds = regs->xds;
416 dump->regs.es = regs->xes;
417 savesegment(fs,dump->regs.fs);
418 dump->regs.gs = regs->xgs;
419 dump->regs.orig_eax = regs->orig_eax;
420 dump->regs.eip = regs->eip;
421 dump->regs.cs = regs->xcs;
422 dump->regs.eflags = regs->eflags;
423 dump->regs.esp = regs->esp;
424 dump->regs.ss = regs->xss;
426 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
428 EXPORT_SYMBOL(dump_thread);
431 * Capture the user space registers if the task is not running (in user space)
433 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
435 struct pt_regs ptregs = *task_pt_regs(tsk);
436 ptregs.xcs &= 0xffff;
437 ptregs.xds &= 0xffff;
438 ptregs.xes &= 0xffff;
439 ptregs.xss &= 0xffff;
441 elf_core_copy_regs(regs, &ptregs);
446 static noinline void __switch_to_xtra(struct task_struct *next_p)
448 struct thread_struct *next;
450 next = &next_p->thread;
452 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
453 set_debugreg(next->debugreg[0], 0);
454 set_debugreg(next->debugreg[1], 1);
455 set_debugreg(next->debugreg[2], 2);
456 set_debugreg(next->debugreg[3], 3);
458 set_debugreg(next->debugreg[6], 6);
459 set_debugreg(next->debugreg[7], 7);
462 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
464 * Disable the bitmap via an invalid offset. We still cache
465 * the previous bitmap owner and the IO bitmap contents:
467 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
471 if (likely(next == tss->io_bitmap_owner)) {
473 * Previous owner of the bitmap (hence the bitmap content)
474 * matches the next task, we dont have to do anything but
475 * to set a valid offset in the TSS:
477 tss->io_bitmap_base = IO_BITMAP_OFFSET;
481 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
482 * and we let the task to get a GPF in case an I/O instruction
483 * is performed. The handler of the GPF will verify that the
484 * faulting task has a valid I/O bitmap and, it true, does the
485 * real copy and restart the instruction. This will save us
486 * redundant copies when the currently switched task does not
487 * perform any I/O during its timeslice.
489 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
490 #endif /* !CONFIG_XEN */
494 * This function selects if the context switch from prev to next
495 * has to tweak the TSC disable bit in the cr4.
497 static inline void disable_tsc(struct task_struct *prev_p,
498 struct task_struct *next_p)
500 struct thread_info *prev, *next;
503 * gcc should eliminate the ->thread_info dereference if
504 * has_secure_computing returns 0 at compile time (SECCOMP=n).
506 prev = task_thread_info(prev_p);
507 next = task_thread_info(next_p);
509 if (has_secure_computing(prev) || has_secure_computing(next)) {
511 if (has_secure_computing(prev) &&
512 !has_secure_computing(next)) {
513 write_cr4(read_cr4() & ~X86_CR4_TSD);
514 } else if (!has_secure_computing(prev) &&
515 has_secure_computing(next))
516 write_cr4(read_cr4() | X86_CR4_TSD);
521 * switch_to(x,yn) should switch tasks from x to y.
523 * We fsave/fwait so that an exception goes off at the right time
524 * (as a call from the fsave or fwait in effect) rather than to
525 * the wrong process. Lazy FP saving no longer makes any sense
526 * with modern CPU's, and this simplifies a lot of things (SMP
527 * and UP become the same).
529 * NOTE! We used to use the x86 hardware context switching. The
530 * reason for not using it any more becomes apparent when you
531 * try to recover gracefully from saved state that is no longer
532 * valid (stale segment register values in particular). With the
533 * hardware task-switch, there is no way to fix up bad state in
534 * a reasonable manner.
536 * The fact that Intel documents the hardware task-switching to
537 * be slow is a fairly red herring - this code is not noticeably
538 * faster. However, there _is_ some room for improvement here,
539 * so the performance issues may eventually be a valid point.
540 * More important, however, is the fact that this allows us much
543 * The return value (in %eax) will be the "prev" task after
544 * the task-switch, and shows up in ret_from_fork in entry.S,
547 struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
549 struct thread_struct *prev = &prev_p->thread,
550 *next = &next_p->thread;
551 int cpu = smp_processor_id();
552 #ifndef CONFIG_X86_NO_TSS
553 struct tss_struct *tss = &per_cpu(init_tss, cpu);
555 struct physdev_set_iobitmap iobmp_op;
556 multicall_entry_t _mcl[8], *mcl = _mcl;
558 /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
561 * This is basically '__unlazy_fpu', except that we queue a
562 * multicall to indicate FPU task switch, rather than
563 * synchronously trapping to Xen.
565 if (prev_p->thread_info->status & TS_USEDFPU) {
566 __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
567 mcl->op = __HYPERVISOR_fpu_taskswitch;
571 #if 0 /* lazy fpu sanity check */
572 else BUG_ON(!(read_cr0() & 8));
576 load_user_cs_desc(cpu, next_p->mm);
580 * This is load_esp0(tss, next) with a multicall.
582 mcl->op = __HYPERVISOR_stack_switch;
583 mcl->args[0] = __KERNEL_DS;
584 mcl->args[1] = next->esp0;
588 * Load the per-thread Thread-Local Storage descriptor.
589 * This is load_TLS(next, cpu) with multicalls.
592 if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
593 next->tls_array[i].b != prev->tls_array[i].b)) { \
594 mcl->op = __HYPERVISOR_update_descriptor; \
595 *(u64 *)&mcl->args[0] = virt_to_machine( \
596 &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
597 *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
604 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
605 iobmp_op.bitmap = (char *)next->io_bitmap_ptr;
606 iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
607 mcl->op = __HYPERVISOR_physdev_op;
608 mcl->args[0] = PHYSDEVOP_set_iobitmap;
609 mcl->args[1] = (unsigned long)&iobmp_op;
613 (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
616 * Restore %fs if needed.
618 * Glibc normally makes %fs be zero.
620 if (unlikely(next->fs))
621 loadsegment(fs, next->fs);
623 write_pda(pcurrent, next_p);
625 /* we're going to use this soon, after a few expensive things */
626 if (next_p->fpu_counter > 5)
627 prefetch(&next->i387.fxsave);
630 * Now maybe handle debug registers and/or IO bitmaps
632 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
633 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
634 __switch_to_xtra(next_p);
636 disable_tsc(prev_p, next_p);
638 /* If the task has used fpu the last 5 timeslices, just do a full
639 * restore of the math state immediately to avoid the trap; the
640 * chances of needing FPU soon are obviously high now
642 if (next_p->fpu_counter > 5)
643 math_state_restore();
648 asmlinkage int sys_fork(struct pt_regs regs)
650 return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
653 asmlinkage int sys_clone(struct pt_regs regs)
655 unsigned long clone_flags;
657 int __user *parent_tidptr, *child_tidptr;
659 clone_flags = regs.ebx;
661 parent_tidptr = (int __user *)regs.edx;
662 child_tidptr = (int __user *)regs.edi;
665 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr);
669 * This is trivial, and on the face of it looks like it
670 * could equally well be done in user mode.
672 * Not so, for quite unobvious reasons - register pressure.
673 * In user mode vfork() cannot have a stack frame, and if
674 * done by calling the "clone()" system call directly, you
675 * do not have enough call-clobbered registers to hold all
676 * the information you need.
678 asmlinkage int sys_vfork(struct pt_regs regs)
680 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
684 * sys_execve() executes a new program.
686 asmlinkage int sys_execve(struct pt_regs regs)
691 filename = getname((char __user *) regs.ebx);
692 error = PTR_ERR(filename);
693 if (IS_ERR(filename))
695 error = do_execve(filename,
696 (char __user * __user *) regs.ecx,
697 (char __user * __user *) regs.edx,
700 /* Make sure we don't return using sysenter.. */
701 set_thread_flag(TIF_IRET);
708 #define top_esp (THREAD_SIZE - sizeof(unsigned long))
709 #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
711 unsigned long get_wchan(struct task_struct *p)
713 unsigned long ebp, esp, eip;
714 unsigned long stack_page;
716 if (!p || p == current || p->state == TASK_RUNNING)
718 stack_page = (unsigned long)task_stack_page(p);
720 if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
722 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
723 ebp = *(unsigned long *) esp;
725 if (ebp < stack_page || ebp > top_ebp+stack_page)
727 eip = *(unsigned long *) (ebp+4);
728 if (!in_sched_functions(eip))
730 ebp = *(unsigned long *) ebp;
731 } while (count++ < 16);
736 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
738 static int get_free_idx(void)
740 struct thread_struct *t = ¤t->thread;
743 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
744 if (desc_empty(t->tls_array + idx))
745 return idx + GDT_ENTRY_TLS_MIN;
750 * Set a given TLS descriptor:
752 asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
754 struct thread_struct *t = ¤t->thread;
755 struct user_desc info;
756 struct desc_struct *desc;
759 if (copy_from_user(&info, u_info, sizeof(info)))
761 idx = info.entry_number;
764 * index -1 means the kernel should try to find and
765 * allocate an empty descriptor:
768 idx = get_free_idx();
771 if (put_user(idx, &u_info->entry_number))
775 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
778 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
781 * We must not get preempted while modifying the TLS.
785 if (LDT_empty(&info)) {
789 desc->a = LDT_entry_a(&info);
790 desc->b = LDT_entry_b(&info);
800 * Get the current Thread-Local Storage area:
803 #define GET_BASE(desc) ( \
804 (((desc)->a >> 16) & 0x0000ffff) | \
805 (((desc)->b << 16) & 0x00ff0000) | \
806 ( (desc)->b & 0xff000000) )
808 #define GET_LIMIT(desc) ( \
809 ((desc)->a & 0x0ffff) | \
810 ((desc)->b & 0xf0000) )
812 #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
813 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
814 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
815 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
816 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
817 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
819 asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
821 struct user_desc info;
822 struct desc_struct *desc;
825 if (get_user(idx, &u_info->entry_number))
827 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
830 memset(&info, 0, sizeof(info));
832 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
834 info.entry_number = idx;
835 info.base_addr = GET_BASE(desc);
836 info.limit = GET_LIMIT(desc);
837 info.seg_32bit = GET_32BIT(desc);
838 info.contents = GET_CONTENTS(desc);
839 info.read_exec_only = !GET_WRITABLE(desc);
840 info.limit_in_pages = GET_LIMIT_PAGES(desc);
841 info.seg_not_present = !GET_PRESENT(desc);
842 info.useable = GET_USEABLE(desc);
844 if (copy_to_user(u_info, &info, sizeof(info)))
849 unsigned long arch_align_stack(unsigned long sp)
851 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
852 sp -= get_random_int() % 8192;
856 void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
858 if (limit > mm->context.exec_limit) {
859 mm->context.exec_limit = limit;
860 set_user_cs(&mm->context.user_cs, limit);
861 if (mm == current->mm) {
863 load_user_cs_desc(smp_processor_id(), mm);
869 void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
871 struct vm_area_struct *vma;
872 unsigned long limit = PAGE_SIZE;
874 if (old_end == mm->context.exec_limit) {
875 for (vma = mm->mmap; vma; vma = vma->vm_next)
876 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
879 mm->context.exec_limit = limit;
880 set_user_cs(&mm->context.user_cs, limit);
881 if (mm == current->mm) {
883 load_user_cs_desc(smp_processor_id(), mm);
889 void arch_flush_exec_range(struct mm_struct *mm)
891 mm->context.exec_limit = 0;
892 set_user_cs(&mm->context.user_cs, 0);
896 * Generate random brk address between 128MB and 196MB. (if the layout
899 void randomize_brk(unsigned long old_brk)
901 unsigned long new_brk, range_start, range_end;
903 range_start = 0x08000000;
904 if (current->mm->brk >= range_start)
905 range_start = current->mm->brk;
906 range_end = range_start + 0x02000000;
907 new_brk = randomize_range(range_start, range_end, 0);
909 current->mm->brk = new_brk;