X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fkernel%2Fprocess-xen.c;h=4ba058f7e1b373f1201f472348a99eb29f272f1c;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=a9508f64d690ada8c88a633cde19c1f4e8eda5ed;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/arch/i386/kernel/process-xen.c b/arch/i386/kernel/process-xen.c index a9508f64d..4ba058f7e 100644 --- a/arch/i386/kernel/process-xen.c +++ b/arch/i386/kernel/process-xen.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -59,6 +60,7 @@ #include #include +#include asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -99,16 +101,19 @@ EXPORT_SYMBOL(enable_hlt); /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */ void xen_idle(void) { - local_irq_disable(); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); - if (need_resched()) + local_irq_disable(); + if (!need_resched()) + safe_halt(); /* enables interrupts racelessly */ + else local_irq_enable(); - else { - current_thread_info()->status &= ~TS_POLLING; - smp_mb__after_clear_bit(); - safe_halt(); - current_thread_info()->status |= TS_POLLING; - } + current_thread_info()->status |= TS_POLLING; } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); @@ -144,7 +149,6 @@ void cpu_idle(void) current_thread_info()->status |= TS_POLLING; - /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) { @@ -169,7 +173,7 @@ void cpu_idle(void) void cpu_idle_wait(void) { unsigned int cpu, this_cpu = get_cpu(); - cpumask_t map; + cpumask_t map, tmp = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); put_cpu(); @@ -191,11 +195,15 @@ void cpu_idle_wait(void) } cpus_and(map, map, cpu_online_map); } while (!cpus_empty(map)); + + set_cpus_allowed(current, tmp); } EXPORT_SYMBOL_GPL(cpu_idle_wait); /* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */ /* Always use xen_idle() instead. */ +void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) {} + void __devinit select_idle_routine(const struct cpuinfo_x86 *c) {} void show_regs(struct pt_regs * regs) @@ -203,22 +211,24 @@ void show_regs(struct pt_regs * regs) unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; printk("\n"); - printk("Pid: %d, comm: %20s\n", current->pid, current->comm); - printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); + printk("Pid: %d[#%u], comm: %20s\n", + current->pid, current->xid, current->comm); + printk("EIP: %04x:[<%08lx>] CPU: %d\n", + 0xffff & regs->xcs,regs->eip, smp_processor_id()); print_symbol("EIP is at %s\n", regs->eip); if (user_mode_vm(regs)) printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); printk(" EFLAGS: %08lx %s (%s %.*s)\n", - regs->eflags, print_tainted(), system_utsname.release, - (int)strcspn(system_utsname.version, " "), - system_utsname.version); + regs->eflags, print_tainted(), init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->eax,regs->ebx,regs->ecx,regs->edx); printk("ESI: %08lx EDI: %08lx EBP: %08lx", regs->esi, regs->edi, regs->ebp); - printk(" DS: %04x ES: %04x\n", - 0xffff & regs->xds,0xffff & regs->xes); + printk(" DS: %04x ES: %04x GS: %04x\n", + 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs); cr0 = read_cr0(); cr2 = read_cr2(); @@ -234,15 +244,6 @@ void show_regs(struct pt_regs * regs) * the "args". */ extern void kernel_thread_helper(void); -__asm__(".section .text\n" - ".align 4\n" - "kernel_thread_helper:\n\t" - "movl %edx,%eax\n\t" - "pushl %edx\n\t" - "call *%ebx\n\t" - "pushl %eax\n\t" - "call do_exit\n" - ".previous"); /* * Create a kernel thread @@ -258,13 +259,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) regs.xds = __USER_DS; regs.xes = __USER_DS; + regs.xgs = __KERNEL_PDA; regs.orig_eax = -1; regs.eip = (unsigned long) kernel_thread_helper; - regs.xcs = GET_KERNEL_CS(); + regs.xcs = __KERNEL_CS | get_kernel_rpl(); regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; /* Ok, create the new process.. */ - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); + return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, + 0, ®s, 0, NULL, NULL); } EXPORT_SYMBOL(kernel_thread); @@ -334,17 +337,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, p->thread.eip = (unsigned long) ret_from_fork; savesegment(fs,p->thread.fs); - savesegment(gs,p->thread.gs); tsk = current; if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { - p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); + p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, + IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } - memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr, - IO_BITMAP_BYTES); set_tsk_thread_flag(p, TIF_IO_BITMAP); } @@ -414,7 +415,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump) dump->regs.ds = regs->xds; dump->regs.es = regs->xes; savesegment(fs,dump->regs.fs); - savesegment(gs,dump->regs.gs); + dump->regs.gs = regs->xgs; dump->regs.orig_eax = regs->orig_eax; dump->regs.eip = regs->eip; dump->regs.cs = regs->xcs; @@ -442,6 +443,53 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) return 1; } +static noinline void __switch_to_xtra(struct task_struct *next_p) +{ + struct thread_struct *next; + + next = &next_p->thread; + + if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { + set_debugreg(next->debugreg[0], 0); + set_debugreg(next->debugreg[1], 1); + set_debugreg(next->debugreg[2], 2); + set_debugreg(next->debugreg[3], 3); + /* no 4 and 5 */ + set_debugreg(next->debugreg[6], 6); + set_debugreg(next->debugreg[7], 7); + } +#ifndef CONFIG_XEN + if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { + /* + * Disable the bitmap via an invalid offset. We still cache + * the previous bitmap owner and the IO bitmap contents: + */ + tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; + return; + } + + if (likely(next == tss->io_bitmap_owner)) { + /* + * Previous owner of the bitmap (hence the bitmap content) + * matches the next task, we dont have to do anything but + * to set a valid offset in the TSS: + */ + tss->io_bitmap_base = IO_BITMAP_OFFSET; + return; + } + /* + * Lazy TSS's I/O bitmap copy. We set an invalid offset here + * and we let the task to get a GPF in case an I/O instruction + * is performed. The handler of the GPF will verify that the + * faulting task has a valid I/O bitmap and, it true, does the + * real copy and restart the instruction. This will save us + * redundant copies when the currently switched task does not + * perform any I/O during its timeslice. + */ + tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; +#endif /* !CONFIG_XEN */ +} + /* * This function selects if the context switch from prev to next * has to tweak the TSC disable bit in the cr4. @@ -504,7 +552,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas #ifndef CONFIG_X86_NO_TSS struct tss_struct *tss = &per_cpu(init_tss, cpu); #endif - struct physdev_set_iopl iopl_op; struct physdev_set_iobitmap iobmp_op; multicall_entry_t _mcl[8], *mcl = _mcl; @@ -554,14 +601,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas C(0); C(1); C(2); #undef C - if (unlikely(prev->iopl != next->iopl)) { - iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3; - mcl->op = __HYPERVISOR_physdev_op; - mcl->args[0] = PHYSDEVOP_set_iopl; - mcl->args[1] = (unsigned long)&iopl_op; - mcl++; - } - if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { iobmp_op.bitmap = (char *)next->io_bitmap_ptr; iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0; @@ -574,32 +613,35 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas (void)HYPERVISOR_multicall(_mcl, mcl - _mcl); /* - * Restore %fs and %gs if needed. + * Restore %fs if needed. * - * Glibc normally makes %fs be zero, and %gs is one of - * the TLS segments. + * Glibc normally makes %fs be zero. */ if (unlikely(next->fs)) loadsegment(fs, next->fs); - if (next->gs) - loadsegment(gs, next->gs); + write_pda(pcurrent, next_p); + + /* we're going to use this soon, after a few expensive things */ + if (next_p->fpu_counter > 5) + prefetch(&next->i387.fxsave); /* - * Now maybe reload the debug registers + * Now maybe handle debug registers and/or IO bitmaps */ - if (unlikely(next->debugreg[7])) { - set_debugreg(next->debugreg[0], 0); - set_debugreg(next->debugreg[1], 1); - set_debugreg(next->debugreg[2], 2); - set_debugreg(next->debugreg[3], 3); - /* no 4 and 5 */ - set_debugreg(next->debugreg[6], 6); - set_debugreg(next->debugreg[7], 7); - } + if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) + || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) + __switch_to_xtra(next_p); disable_tsc(prev_p, next_p); + /* If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + if (next_p->fpu_counter > 5) + math_state_restore(); + return prev_p; } @@ -655,9 +697,6 @@ asmlinkage int sys_execve(struct pt_regs regs) (char __user * __user *) regs.edx, ®s); if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); } @@ -809,7 +848,7 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) unsigned long arch_align_stack(unsigned long sp) { - if (randomize_va_space) + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() % 8192; return sp & ~0xf; }