2 * linux/arch/i386/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * This file handles the architecture-dependent parts of process handling..
16 #include <linux/errno.h>
17 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/elfcore.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/user.h>
28 #include <linux/a.out.h>
29 #include <linux/interrupt.h>
30 #include <linux/config.h>
31 #include <linux/utsname.h>
32 #include <linux/delay.h>
33 #include <linux/reboot.h>
34 #include <linux/init.h>
35 #include <linux/mc146818rtc.h>
36 #include <linux/module.h>
37 #include <linux/kallsyms.h>
38 #include <linux/ptrace.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
45 #include <asm/processor.h>
49 #include <asm-xen/xen-public/physdev.h>
50 #ifdef CONFIG_MATH_EMULATION
51 #include <asm/math_emu.h>
54 #include <linux/irq.h>
55 #include <linux/err.h>
56 #include <linux/random.h>
58 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
62 unsigned long boot_option_idle_override = 0;
63 EXPORT_SYMBOL(boot_option_idle_override);
66 * Return saved PC of a blocked thread.
68 unsigned long thread_saved_pc(struct task_struct *tsk)
70 return ((unsigned long *)tsk->thread.esp)[3];
74 * Powermanagement idle function, if any..
76 void (*pm_idle)(void);
77 static cpumask_t cpu_idle_map;
79 void disable_hlt(void)
84 EXPORT_SYMBOL(disable_hlt);
91 EXPORT_SYMBOL(enable_hlt);
94 * Exec-Shield randomisation. We do not support the segment tricks
95 * until the hypervisor impact is understood, but this part is easy.
96 * Both functions copied from arch/i386/kernel/process. - riel
98 unsigned long arch_align_stack(unsigned long sp)
100 if (randomize_va_space)
101 sp -= ((get_random_int() % 65536) << 4);
105 void randomize_brk(unsigned long old_brk)
107 unsigned long new_brk, range_start, range_end;
109 range_start = 0x08000000;
110 if (current->mm->brk >= range_start)
111 range_start = current->mm->brk;
112 range_end = range_start + 0x02000000;
113 new_brk = randomize_range(range_start, range_end, 0);
115 current->mm->brk = new_brk;
118 void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
120 if (limit > mm->context.exec_limit) {
121 mm->context.exec_limit = limit;
122 set_user_cs(&mm->context.user_cs, limit);
123 if (mm == current->mm)
124 load_user_cs_desc(smp_processor_id(), mm);
128 void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
130 struct vm_area_struct *vma;
131 unsigned long limit = 0;
133 if (old_end == mm->context.exec_limit) {
134 for (vma = mm->mmap; vma; vma = vma->vm_next)
135 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
138 mm->context.exec_limit = limit;
139 set_user_cs(&mm->context.user_cs, limit);
140 if (mm == current->mm)
141 load_user_cs_desc(smp_processor_id(), mm);
145 void arch_flush_exec_range(struct mm_struct *mm)
147 mm->context.exec_limit = 0;
148 set_user_cs(&mm->context.user_cs, 0);
151 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
152 extern int set_timeout_timer(void);
159 cpu = smp_processor_id();
160 if (rcu_pending(cpu))
161 rcu_check_callbacks(cpu, 0);
163 if (need_resched()) {
165 } else if (set_timeout_timer() == 0) {
166 /* NB. Blocking reenable events in a race-free manner. */
175 * The idle thread. There's no useful work to be
176 * done, so just try to conserve power and have a
177 * low exit latency (ie sit in a loop waiting for
178 * somebody to say that they'd like to reschedule)
182 int cpu = _smp_processor_id();
184 /* endless idle loop with no priority at all */
186 while (!need_resched()) {
188 if (cpu_isset(cpu, cpu_idle_map))
189 cpu_clear(cpu, cpu_idle_map);
192 per_cpu(irq_stat, cpu).idle_timestamp = jiffies;
199 void cpu_idle_wait(void)
204 for_each_online_cpu(cpu)
205 cpu_set(cpu, cpu_idle_map);
210 cpus_and(map, cpu_idle_map, cpu_online_map);
211 } while (!cpus_empty(map));
213 EXPORT_SYMBOL_GPL(cpu_idle_wait);
215 /* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
216 /* Always use xen_idle() instead. */
217 void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
219 void show_regs(struct pt_regs * regs)
222 printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
223 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
224 print_symbol("EIP is at %s\n", regs->eip);
227 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
228 printk(" EFLAGS: %08lx %s (%s)\n",
229 regs->eflags, print_tainted(), system_utsname.release);
230 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
231 regs->eax,regs->ebx,regs->ecx,regs->edx);
232 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
233 regs->esi, regs->edi, regs->ebp);
234 printk(" DS: %04x ES: %04x\n",
235 0xffff & regs->xds,0xffff & regs->xes);
237 show_trace(NULL, ®s->esp);
241 * This gets run with %ebx containing the
242 * function to call, and %edx containing
245 extern void kernel_thread_helper(void);
246 __asm__(".section .text\n"
248 "kernel_thread_helper:\n\t"
257 * Create a kernel thread
259 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
263 memset(®s, 0, sizeof(regs));
265 regs.ebx = (unsigned long) fn;
266 regs.edx = (unsigned long) arg;
268 regs.xds = __USER_DS;
269 regs.xes = __USER_DS;
271 regs.eip = (unsigned long) kernel_thread_helper;
272 regs.xcs = __KERNEL_CS;
273 regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
275 /* Ok, create the new process.. */
276 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
280 * Free current thread data structures etc..
282 void exit_thread(void)
284 struct task_struct *tsk = current;
285 struct thread_struct *t = &tsk->thread;
287 /* The process may have allocated an io port bitmap... nuke it. */
288 if (unlikely(NULL != t->io_bitmap_ptr)) {
289 physdev_op_t op = { 0 };
290 op.cmd = PHYSDEVOP_SET_IOBITMAP;
291 HYPERVISOR_physdev_op(&op);
292 kfree(t->io_bitmap_ptr);
293 t->io_bitmap_ptr = NULL;
297 void flush_thread(void)
299 struct task_struct *tsk = current;
301 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
302 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
304 * Forget coprocessor state..
310 void release_thread(struct task_struct *dead_task)
313 // temporary debugging check
314 if (dead_task->mm->context.size) {
315 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
317 dead_task->mm->context.ldt,
318 dead_task->mm->context.size);
323 release_vm86_irqs(dead_task);
327 * This gets called before we allocate a new thread and copy
328 * the current task into it.
330 void prepare_to_copy(struct task_struct *tsk)
335 int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
336 unsigned long unused,
337 struct task_struct * p, struct pt_regs * regs)
339 struct pt_regs * childregs;
340 struct task_struct *tsk;
343 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
346 childregs->esp = esp;
348 p->thread.esp = (unsigned long) childregs;
349 p->thread.esp0 = (unsigned long) (childregs+1);
351 p->thread.eip = (unsigned long) ret_from_fork;
353 savesegment(fs,p->thread.fs);
354 savesegment(gs,p->thread.gs);
357 if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
358 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
359 if (!p->thread.io_bitmap_ptr) {
360 p->thread.io_bitmap_max = 0;
363 memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
368 * Set a new TLS for the child thread?
370 if (clone_flags & CLONE_SETTLS) {
371 struct desc_struct *desc;
372 struct user_desc info;
376 if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
379 if (LDT_empty(&info))
382 idx = info.entry_number;
383 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
386 desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
387 desc->a = LDT_entry_a(&info);
388 desc->b = LDT_entry_b(&info);
391 p->thread.io_pl = current->thread.io_pl;
395 if (err && p->thread.io_bitmap_ptr) {
396 kfree(p->thread.io_bitmap_ptr);
397 p->thread.io_bitmap_max = 0;
403 * fill in the user structure for a core dump..
405 void dump_thread(struct pt_regs * regs, struct user * dump)
409 /* changed the size calculations - should hopefully work better. lbt */
410 dump->magic = CMAGIC;
411 dump->start_code = 0;
412 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
413 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
414 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
415 dump->u_dsize -= dump->u_tsize;
417 for (i = 0; i < 8; i++)
418 dump->u_debugreg[i] = current->thread.debugreg[i];
420 if (dump->start_stack < TASK_SIZE)
421 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
423 dump->regs.ebx = regs->ebx;
424 dump->regs.ecx = regs->ecx;
425 dump->regs.edx = regs->edx;
426 dump->regs.esi = regs->esi;
427 dump->regs.edi = regs->edi;
428 dump->regs.ebp = regs->ebp;
429 dump->regs.eax = regs->eax;
430 dump->regs.ds = regs->xds;
431 dump->regs.es = regs->xes;
432 savesegment(fs,dump->regs.fs);
433 savesegment(gs,dump->regs.gs);
434 dump->regs.orig_eax = regs->orig_eax;
435 dump->regs.eip = regs->eip;
436 dump->regs.cs = regs->xcs;
437 dump->regs.eflags = regs->eflags;
438 dump->regs.esp = regs->esp;
439 dump->regs.ss = regs->xss;
441 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
445 * Capture the user space registers if the task is not running (in user space)
447 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
449 struct pt_regs ptregs;
451 ptregs = *(struct pt_regs *)
452 ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
453 ptregs.xcs &= 0xffff;
454 ptregs.xds &= 0xffff;
455 ptregs.xes &= 0xffff;
456 ptregs.xss &= 0xffff;
458 elf_core_copy_regs(regs, &ptregs);
460 boot_option_idle_override = 1;
465 * switch_to(x,yn) should switch tasks from x to y.
467 * We fsave/fwait so that an exception goes off at the right time
468 * (as a call from the fsave or fwait in effect) rather than to
469 * the wrong process. Lazy FP saving no longer makes any sense
470 * with modern CPU's, and this simplifies a lot of things (SMP
471 * and UP become the same).
473 * NOTE! We used to use the x86 hardware context switching. The
474 * reason for not using it any more becomes apparent when you
475 * try to recover gracefully from saved state that is no longer
476 * valid (stale segment register values in particular). With the
477 * hardware task-switch, there is no way to fix up bad state in
478 * a reasonable manner.
480 * The fact that Intel documents the hardware task-switching to
481 * be slow is a fairly red herring - this code is not noticeably
482 * faster. However, there _is_ some room for improvement here,
483 * so the performance issues may eventually be a valid point.
484 * More important, however, is the fact that this allows us much
487 * The return value (in %eax) will be the "prev" task after
488 * the task-switch, and shows up in ret_from_fork in entry.S,
491 struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
493 struct thread_struct *prev = &prev_p->thread,
494 *next = &next_p->thread;
495 int cpu = smp_processor_id();
496 struct tss_struct *tss = &per_cpu(init_tss, cpu);
497 physdev_op_t iopl_op, iobmp_op;
498 multicall_entry_t _mcl[8], *mcl = _mcl;
500 /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
503 * This is basically '__unlazy_fpu', except that we queue a
504 * multicall to indicate FPU task switch, rather than
505 * synchronously trapping to Xen.
507 if (prev_p->thread_info->status & TS_USEDFPU) {
508 __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
509 mcl->op = __HYPERVISOR_fpu_taskswitch;
515 * Reload esp0, LDT and the page table pointer:
516 * This is load_esp0(tss, next) with a multicall.
518 tss->esp0 = next->esp0;
519 mcl->op = __HYPERVISOR_stack_switch;
520 mcl->args[0] = tss->ss0;
521 mcl->args[1] = tss->esp0;
525 * Load the per-thread Thread-Local Storage descriptor.
526 * This is load_TLS(next, cpu) with multicalls.
529 if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
530 next->tls_array[i].b != prev->tls_array[i].b)) { \
531 mcl->op = __HYPERVISOR_update_descriptor; \
532 mcl->args[0] = virt_to_machine(&get_cpu_gdt_table(cpu) \
533 [GDT_ENTRY_TLS_MIN + i]); \
534 mcl->args[1] = ((u32 *)&next->tls_array[i])[0]; \
535 mcl->args[2] = ((u32 *)&next->tls_array[i])[1]; \
542 if (unlikely(prev->io_pl != next->io_pl)) {
543 iopl_op.cmd = PHYSDEVOP_SET_IOPL;
544 iopl_op.u.set_iopl.iopl = next->io_pl;
545 mcl->op = __HYPERVISOR_physdev_op;
546 mcl->args[0] = (unsigned long)&iopl_op;
550 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
552 PHYSDEVOP_SET_IOBITMAP;
553 iobmp_op.u.set_iobitmap.bitmap =
554 (unsigned long)next->io_bitmap_ptr;
555 iobmp_op.u.set_iobitmap.nr_ports =
556 next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
557 mcl->op = __HYPERVISOR_physdev_op;
558 mcl->args[0] = (unsigned long)&iobmp_op;
562 (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
565 * Restore %fs and %gs if needed.
567 if (unlikely(next->fs | next->gs)) {
568 loadsegment(fs, next->fs);
569 loadsegment(gs, next->gs);
573 * Exec-shield CS adjustment.
576 load_user_cs_desc(cpu, next_p->mm);
579 * Now maybe reload the debug registers
581 if (unlikely(next->debugreg[7])) {
594 asmlinkage int sys_fork(struct pt_regs regs)
596 return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
599 asmlinkage int sys_clone(struct pt_regs regs)
601 unsigned long clone_flags;
603 int __user *parent_tidptr, *child_tidptr;
605 clone_flags = regs.ebx;
607 parent_tidptr = (int __user *)regs.edx;
608 child_tidptr = (int __user *)regs.edi;
611 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr);
615 * This is trivial, and on the face of it looks like it
616 * could equally well be done in user mode.
618 * Not so, for quite unobvious reasons - register pressure.
619 * In user mode vfork() cannot have a stack frame, and if
620 * done by calling the "clone()" system call directly, you
621 * do not have enough call-clobbered registers to hold all
622 * the information you need.
624 asmlinkage int sys_vfork(struct pt_regs regs)
626 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
630 * sys_execve() executes a new program.
632 asmlinkage int sys_execve(struct pt_regs regs)
637 filename = getname((char __user *) regs.ebx);
638 error = PTR_ERR(filename);
639 if (IS_ERR(filename))
641 error = do_execve(filename,
642 (char __user * __user *) regs.ecx,
643 (char __user * __user *) regs.edx,
647 current->ptrace &= ~PT_DTRACE;
648 task_unlock(current);
649 /* Make sure we don't return using sysenter.. */
650 set_thread_flag(TIF_IRET);
657 #define top_esp (THREAD_SIZE - sizeof(unsigned long))
658 #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
660 unsigned long get_wchan(struct task_struct *p)
662 unsigned long ebp, esp, eip;
663 unsigned long stack_page;
665 if (!p || p == current || p->state == TASK_RUNNING)
667 stack_page = (unsigned long)p->thread_info;
669 if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
671 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
672 ebp = *(unsigned long *) esp;
674 if (ebp < stack_page || ebp > top_ebp+stack_page)
676 eip = *(unsigned long *) (ebp+4);
677 if (!in_sched_functions(eip))
679 ebp = *(unsigned long *) ebp;
680 } while (count++ < 16);
685 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
687 static int get_free_idx(void)
689 struct thread_struct *t = ¤t->thread;
692 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
693 if (desc_empty(t->tls_array + idx))
694 return idx + GDT_ENTRY_TLS_MIN;
699 * Set a given TLS descriptor:
701 asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
703 struct thread_struct *t = ¤t->thread;
704 struct user_desc info;
705 struct desc_struct *desc;
708 if (copy_from_user(&info, u_info, sizeof(info)))
710 idx = info.entry_number;
713 * index -1 means the kernel should try to find and
714 * allocate an empty descriptor:
717 idx = get_free_idx();
720 if (put_user(idx, &u_info->entry_number))
724 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
727 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
730 * We must not get preempted while modifying the TLS.
734 if (LDT_empty(&info)) {
738 desc->a = LDT_entry_a(&info);
739 desc->b = LDT_entry_b(&info);
749 * Get the current Thread-Local Storage area:
752 #define GET_BASE(desc) ( \
753 (((desc)->a >> 16) & 0x0000ffff) | \
754 (((desc)->b << 16) & 0x00ff0000) | \
755 ( (desc)->b & 0xff000000) )
757 #define GET_LIMIT(desc) ( \
758 ((desc)->a & 0x0ffff) | \
759 ((desc)->b & 0xf0000) )
761 #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
762 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
763 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
764 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
765 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
766 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
768 asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
770 struct user_desc info;
771 struct desc_struct *desc;
774 if (get_user(idx, &u_info->entry_number))
776 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
779 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
781 info.entry_number = idx;
782 info.base_addr = GET_BASE(desc);
783 info.limit = GET_LIMIT(desc);
784 info.seg_32bit = GET_32BIT(desc);
785 info.contents = GET_CONTENTS(desc);
786 info.read_exec_only = !GET_WRITABLE(desc);
787 info.limit_in_pages = GET_LIMIT_PAGES(desc);
788 info.seg_not_present = !GET_PRESENT(desc);
789 info.useable = GET_USEABLE(desc);
791 if (copy_to_user(u_info, &info, sizeof(info)))