X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fkernel%2Fprocess.c;h=abf49a36473f756db5357c209e21e50f970c12d5;hb=refs%2Fheads%2Fvserver;hp=0095fa1dda6f6452f7a51fa08333458f0f75a6d0;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 0095fa1dd..abf49a364 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -13,6 +13,7 @@ #include +#include #include #include #include @@ -27,8 +28,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include #include @@ -44,18 +46,24 @@ #include #include #include -#include #include +#include #ifdef CONFIG_MATH_EMULATION #include #endif -#include #include +#include +#include +#include + asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); -int hlt_counter; +static int hlt_counter; + +unsigned long boot_option_idle_override = 0; +EXPORT_SYMBOL(boot_option_idle_override); /* * Return saved PC of a blocked thread. @@ -69,6 +77,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk) * Powermanagement idle function, if any.. */ void (*pm_idle)(void); +EXPORT_SYMBOL(pm_idle); +static DEFINE_PER_CPU(unsigned int, cpu_idle_state); void disable_hlt(void) { @@ -90,14 +100,28 @@ EXPORT_SYMBOL(enable_hlt); */ void default_idle(void) { - if (!hlt_counter && current_cpu_data.hlt_works_ok) { + if (!hlt_counter && boot_cpu_data.hlt_works_ok) { + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); + local_irq_disable(); if (!need_resched()) - safe_halt(); + safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); + current_thread_info()->status |= TS_POLLING; + } else { + /* loop is done by the caller */ + cpu_relax(); } } +#ifdef CONFIG_APM_MODULE +EXPORT_SYMBOL(default_idle); +#endif /* * On SMP it's slightly faster (but much more power-consuming!) @@ -106,30 +130,34 @@ void default_idle(void) */ static void poll_idle (void) { - int oldval; + cpu_relax(); +} - local_irq_enable(); +#ifdef CONFIG_HOTPLUG_CPU +#include +/* We don't actually take CPU down, just spin without interrupts. */ +static inline void play_dead(void) +{ + /* This must be done before dead CPU ack */ + cpu_exit_clear(); + wbinvd(); + mb(); + /* Ack it */ + __get_cpu_var(cpu_state) = CPU_DEAD; /* - * Deal with another CPU just having chosen a thread to - * run here: + * With physical CPU hotplug, we should halt the cpu */ - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); - - if (!oldval) { - set_thread_flag(TIF_POLLING_NRFLAG); - asm volatile( - "2:" - "testl %0, %1;" - "rep; nop;" - "je 2b;" - : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags)); - - clear_thread_flag(TIF_POLLING_NRFLAG); - } else { - set_need_resched(); - } + local_irq_disable(); + while (1) + halt(); } +#else +static inline void play_dead(void) +{ + BUG(); +} +#endif /* CONFIG_HOTPLUG_CPU */ /* * The idle thread. There's no useful work to be @@ -137,30 +165,67 @@ static void poll_idle (void) * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ -void cpu_idle (void) +void cpu_idle(void) { + int cpu = smp_processor_id(); + + current_thread_info()->status |= TS_POLLING; + /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) { void (*idle)(void); - /* - * Mark this as an RCU critical section so that - * synchronize_kernel() in the unload path waits - * for our completion. - */ - rcu_read_lock(); + + if (__get_cpu_var(cpu_idle_state)) + __get_cpu_var(cpu_idle_state) = 0; + + rmb(); idle = pm_idle; if (!idle) idle = default_idle; - irq_stat[smp_processor_id()].idle_timestamp = jiffies; + if (cpu_is_offline(cpu)) + play_dead(); + + __get_cpu_var(irq_stat).idle_timestamp = jiffies; idle(); - rcu_read_unlock(); } + preempt_enable_no_resched(); schedule(); + preempt_disable(); + } +} + +void cpu_idle_wait(void) +{ + unsigned int cpu, this_cpu = get_cpu(); + cpumask_t map, tmp = current->cpus_allowed; + + set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); + put_cpu(); + + cpus_clear(map); + for_each_online_cpu(cpu) { + per_cpu(cpu_idle_state, cpu) = 1; + cpu_set(cpu, map); } + + __get_cpu_var(cpu_idle_state) = 0; + + wmb(); + do { + ssleep(1); + for_each_online_cpu(cpu) { + if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) + cpu_clear(cpu, map); + } + cpus_and(map, map, cpu_online_map); + } while (!cpus_empty(map)); + + set_cpus_allowed(current, tmp); } +EXPORT_SYMBOL_GPL(cpu_idle_wait); /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, @@ -168,24 +233,28 @@ void cpu_idle (void) * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). + * + * New with Core Duo processors, MWAIT can take some hints based on CPU + * capability. */ -static void mwait_idle(void) +void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { - local_irq_enable(); - if (!need_resched()) { - set_thread_flag(TIF_POLLING_NRFLAG); - do { - __monitor((void *)¤t_thread_info()->flags, 0, 0); - if (need_resched()) - break; - __mwait(0, 0); - } while (!need_resched()); - clear_thread_flag(TIF_POLLING_NRFLAG); + __monitor((void *)¤t_thread_info()->flags, 0, 0); + smp_mb(); + if (!need_resched()) + __mwait(eax, ecx); } } -void __init select_idle_routine(const struct cpuinfo_x86 *c) +/* Default MONITOR/MWAIT with no hints, used for default C1 state */ +static void mwait_idle(void) +{ + local_irq_enable(); + mwait_idle_with_hints(0, 0); +} + +void __devinit select_idle_routine(const struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_MWAIT)) { printk("monitor/mwait feature present.\n"); @@ -214,6 +283,7 @@ static int __init idle_setup (char *str) pm_idle = default_idle; } + boot_option_idle_override = 1; return 1; } @@ -224,32 +294,31 @@ void show_regs(struct pt_regs * regs) unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; printk("\n"); - printk("Pid: %d, comm: %20s\n", current->pid, current->comm); - printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); + printk("Pid: %d[#%u], comm: %20s\n", + current->pid, current->xid, current->comm); + printk("EIP: %04x:[<%08lx>] CPU: %d\n", + 0xffff & regs->xcs,regs->eip, smp_processor_id()); print_symbol("EIP is at %s\n", regs->eip); - if (regs->xcs & 3) + if (user_mode_vm(regs)) printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); - printk(" EFLAGS: %08lx %s (%s)\n",regs->eflags, print_tainted(),UTS_RELEASE); + printk(" EFLAGS: %08lx %s (%s %.*s)\n", + regs->eflags, print_tainted(), init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->eax,regs->ebx,regs->ecx,regs->edx); printk("ESI: %08lx EDI: %08lx EBP: %08lx", regs->esi, regs->edi, regs->ebp); - printk(" DS: %04x ES: %04x\n", - 0xffff & regs->xds,0xffff & regs->xes); - - __asm__("movl %%cr0, %0": "=r" (cr0)); - __asm__("movl %%cr2, %0": "=r" (cr2)); - __asm__("movl %%cr3, %0": "=r" (cr3)); - /* This could fault if %cr4 does not exist */ - __asm__("1: movl %%cr4, %0 \n" - "2: \n" - ".section __ex_table,\"a\" \n" - ".long 1b,2b \n" - ".previous \n" - : "=r" (cr4): "0" (0)); + printk(" DS: %04x ES: %04x GS: %04x\n", + 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs); + + cr0 = read_cr0(); + cr2 = read_cr2(); + cr3 = read_cr3(); + cr4 = read_cr4_safe(); printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); - show_trace(NULL, ®s->esp); + show_trace(NULL, regs, ®s->esp); } /* @@ -258,15 +327,6 @@ void show_regs(struct pt_regs * regs) * the "args". */ extern void kernel_thread_helper(void); -__asm__(".section .text\n" - ".align 4\n" - "kernel_thread_helper:\n\t" - "movl %edx,%eax\n\t" - "pushl %edx\n\t" - "call *%ebx\n\t" - "pushl %eax\n\t" - "call do_exit\n" - ".previous"); /* * Create a kernel thread @@ -282,30 +342,33 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) regs.xds = __USER_DS; regs.xes = __USER_DS; + regs.xgs = __KERNEL_PDA; regs.orig_eax = -1; regs.eip = (unsigned long) kernel_thread_helper; - regs.xcs = __KERNEL_CS; + regs.xcs = __KERNEL_CS | get_kernel_rpl(); regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; /* Ok, create the new process.. */ - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); + return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, + 0, ®s, 0, NULL, NULL); } +EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. */ void exit_thread(void) { - struct task_struct *tsk = current; - struct thread_struct *t = &tsk->thread; - /* The process may have allocated an io port bitmap... nuke it. */ - if (unlikely(NULL != t->io_bitmap_ptr)) { + if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { + struct task_struct *tsk = current; + struct thread_struct *t = &tsk->thread; int cpu = get_cpu(); struct tss_struct *tss = &per_cpu(init_tss, cpu); kfree(t->io_bitmap_ptr); t->io_bitmap_ptr = NULL; + clear_thread_flag(TIF_IO_BITMAP); /* * Careful, clear this in the TSS too: */ @@ -324,27 +387,18 @@ void flush_thread(void) memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); + clear_tsk_thread_flag(tsk, TIF_DEBUG); /* * Forget coprocessor state.. */ clear_fpu(tsk); - tsk->used_math = 0; + clear_used_math(); } void release_thread(struct task_struct *dead_task) { - if (dead_task->mm) { - // temporary debugging check - if (dead_task->mm->context.size) { - printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", - dead_task->comm, - dead_task->mm->context.ldt, - dead_task->mm->context.size); - BUG(); - } - } - - release_x86_irqs(dead_task); + BUG_ON(dead_task->mm); + release_vm86_irqs(dead_task); } /* @@ -364,11 +418,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, struct task_struct *tsk; int err; - childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; + childregs = task_pt_regs(p); *childregs = *regs; childregs->eax = 0; childregs->esp = esp; - p->set_child_tid = p->clear_child_tid = NULL; p->thread.esp = (unsigned long) childregs; p->thread.esp0 = (unsigned long) (childregs+1); @@ -376,17 +429,16 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, p->thread.eip = (unsigned long) ret_from_fork; savesegment(fs,p->thread.fs); - savesegment(gs,p->thread.gs); tsk = current; - if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) { - p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); + if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { + p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, + IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } - memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr, - IO_BITMAP_BYTES); + set_tsk_thread_flag(p, TIF_IO_BITMAP); } /* @@ -453,7 +505,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump) dump->regs.ds = regs->xds; dump->regs.es = regs->xes; savesegment(fs,dump->regs.fs); - savesegment(gs,dump->regs.gs); + dump->regs.gs = regs->xgs; dump->regs.orig_eax = regs->orig_eax; dump->regs.eip = regs->eip; dump->regs.cs = regs->xcs; @@ -463,16 +515,14 @@ void dump_thread(struct pt_regs * regs, struct user * dump) dump->u_fpvalid = dump_fpu (regs, &dump->i387); } +EXPORT_SYMBOL(dump_thread); /* * Capture the user space registers if the task is not running (in user space) */ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) { - struct pt_regs ptregs; - - ptregs = *(struct pt_regs *) - ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs)); + struct pt_regs ptregs = *task_pt_regs(tsk); ptregs.xcs &= 0xffff; ptregs.xds &= 0xffff; ptregs.xes &= 0xffff; @@ -483,10 +533,24 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) return 1; } -static inline void -handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss) +static noinline void __switch_to_xtra(struct task_struct *next_p, + struct tss_struct *tss) { - if (!next->io_bitmap_ptr) { + struct thread_struct *next; + + next = &next_p->thread; + + if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { + set_debugreg(next->debugreg[0], 0); + set_debugreg(next->debugreg[1], 1); + set_debugreg(next->debugreg[2], 2); + set_debugreg(next->debugreg[3], 3); + /* no 4 and 5 */ + set_debugreg(next->debugreg[6], 6); + set_debugreg(next->debugreg[7], 7); + } + + if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { /* * Disable the bitmap via an invalid offset. We still cache * the previous bitmap owner and the IO bitmap contents: @@ -494,6 +558,7 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss) tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; return; } + if (likely(next == tss->io_bitmap_owner)) { /* * Previous owner of the bitmap (hence the bitmap content) @@ -514,13 +579,33 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss) */ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; } + /* - * This special macro can be used to load a debugging register + * This function selects if the context switch from prev to next + * has to tweak the TSC disable bit in the cr4. */ -#define loaddebug(thread,register) \ - __asm__("movl %0,%%db" #register \ - : /* no output */ \ - :"r" (thread->debugreg[register])) +static inline void disable_tsc(struct task_struct *prev_p, + struct task_struct *next_p) +{ + struct thread_info *prev, *next; + + /* + * gcc should eliminate the ->thread_info dereference if + * has_secure_computing returns 0 at compile time (SECCOMP=n). + */ + prev = task_thread_info(prev_p); + next = task_thread_info(next_p); + + if (has_secure_computing(prev) || has_secure_computing(next)) { + /* slow path here */ + if (has_secure_computing(prev) && + !has_secure_computing(next)) { + write_cr4(read_cr4() & ~X86_CR4_TSD); + } else if (!has_secure_computing(prev) && + has_secure_computing(next)) + write_cr4(read_cr4() | X86_CR4_TSD); + } +} /* * switch_to(x,yn) should switch tasks from x to y. @@ -559,47 +644,61 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ __unlazy_fpu(prev_p); + if (next_p->mm) + load_user_cs_desc(cpu, next_p->mm); + + + /* we're going to use this soon, after a few expensive things */ + if (next_p->fpu_counter > 5) + prefetch(&next->i387.fxsave); /* - * Reload esp0, LDT and the page table pointer: + * Reload esp0. */ load_esp0(tss, next); /* - * Load the per-thread Thread-Local Storage descriptor. + * Save away %fs. No need to save %gs, as it was saved on the + * stack on entry. No need to save %es and %ds, as those are + * always kernel segments while inside the kernel. Doing this + * before setting the new TLS descriptors avoids the situation + * where we temporarily have non-reloadable segments in %fs + * and %gs. This could be an issue if the NMI handler ever + * used %fs or %gs (it does not today), or if the kernel is + * running inside of a hypervisor layer. */ - load_TLS(next, cpu); + savesegment(fs, prev->fs); /* - * Save away %fs and %gs. No need to save %es and %ds, as - * those are always kernel segments while inside the kernel. + * Load the per-thread Thread-Local Storage descriptor. */ - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); + load_TLS(next, cpu); /* - * Restore %fs and %gs if needed. + * Restore %fs if needed. + * + * Glibc normally makes %fs be zero. */ - if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) { + if (unlikely(prev->fs | next->fs)) loadsegment(fs, next->fs); - loadsegment(gs, next->gs); - } + + write_pda(pcurrent, next_p); /* - * Now maybe reload the debug registers + * Now maybe handle debug registers and/or IO bitmaps */ - if (unlikely(next->debugreg[7])) { - loaddebug(next, 0); - loaddebug(next, 1); - loaddebug(next, 2); - loaddebug(next, 3); - /* no 4 and 5 */ - loaddebug(next, 6); - loaddebug(next, 7); - } + if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) + || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) + __switch_to_xtra(next_p, tss); - if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) - handle_io_bitmap(next, tss); + disable_tsc(prev_p, next_p); + + /* If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + if (next_p->fpu_counter > 5) + math_state_restore(); return prev_p; } @@ -656,7 +755,6 @@ asmlinkage int sys_execve(struct pt_regs regs) (char __user * __user *) regs.edx, ®s); if (error == 0) { - current->ptrace &= ~PT_DTRACE; /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); } @@ -675,7 +773,7 @@ unsigned long get_wchan(struct task_struct *p) int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; - stack_page = (unsigned long)p->thread_info; + stack_page = (unsigned long)task_stack_page(p); esp = p->thread.esp; if (!stack_page || esp < stack_page || esp > top_esp+stack_page) return 0; @@ -787,6 +885,8 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; + memset(&info, 0, sizeof(info)); + desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; info.entry_number = idx; @@ -804,3 +904,66 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) return 0; } +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_int() % 8192; + return sp & ~0xf; +} + +void arch_add_exec_range(struct mm_struct *mm, unsigned long limit) +{ + if (limit > mm->context.exec_limit) { + mm->context.exec_limit = limit; + set_user_cs(&mm->context.user_cs, limit); + if (mm == current->mm) { + preempt_disable(); + load_user_cs_desc(smp_processor_id(), mm); + preempt_enable(); + } + } +} + +void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end) +{ + struct vm_area_struct *vma; + unsigned long limit = PAGE_SIZE; + + if (old_end == mm->context.exec_limit) { + for (vma = mm->mmap; vma; vma = vma->vm_next) + if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit)) + limit = vma->vm_end; + + mm->context.exec_limit = limit; + set_user_cs(&mm->context.user_cs, limit); + if (mm == current->mm) { + preempt_disable(); + load_user_cs_desc(smp_processor_id(), mm); + preempt_enable(); + } + } +} + +void arch_flush_exec_range(struct mm_struct *mm) +{ + mm->context.exec_limit = 0; + set_user_cs(&mm->context.user_cs, 0); +} + +/* + * Generate random brk address between 128MB and 196MB. (if the layout + * allows it.) + */ +void randomize_brk(unsigned long old_brk) +{ + unsigned long new_brk, range_start, range_end; + + range_start = 0x08000000; + if (current->mm->brk >= range_start) + range_start = current->mm->brk; + range_end = range_start + 0x02000000; + new_brk = randomize_range(range_start, range_end, 0); + if (new_brk) + current->mm->brk = new_brk; +} +