X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fkernel%2Fvm86.c;h=00e0118e717c3568a960021be276e9c4f7e37835;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=4484e47a77c57506960d4ec3cca23abeed48b669;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 4484e47a7..00e0118e7 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -4,7 +4,7 @@ * Copyright (C) 1994 Linus Torvalds * * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 - * stack - Manfred Spraul + * stack - Manfred Spraul * * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle * them correctly. Now the emulation will be in a @@ -30,6 +30,7 @@ * */ +#include #include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include #include @@ -121,7 +123,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) do_exit(SIGSEGV); } - tss = init_tss + get_cpu(); + tss = &per_cpu(init_tss, get_cpu()); current->thread.esp0 = current->thread.saved_esp0; current->thread.sysenter_cs = __KERNEL_CS; load_esp0(tss, ¤t->thread); @@ -134,41 +136,32 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) return ret; } -static void mark_screen_rdonly(struct task_struct * tsk) +static void mark_screen_rdonly(struct mm_struct *mm) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; - pte_t *pte, *mapped; + pte_t *pte; + spinlock_t *ptl; int i; - preempt_disable(); - spin_lock(&tsk->mm->page_table_lock); - pgd = pgd_offset(tsk->mm, 0xA0000); - if (pgd_none(*pgd)) + pgd = pgd_offset(mm, 0xA0000); + if (pgd_none_or_clear_bad(pgd)) goto out; - if (pgd_bad(*pgd)) { - pgd_ERROR(*pgd); - pgd_clear(pgd); + pud = pud_offset(pgd, 0xA0000); + if (pud_none_or_clear_bad(pud)) goto out; - } - pmd = pmd_offset(pgd, 0xA0000); - if (pmd_none(*pmd)) - goto out; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); + pmd = pmd_offset(pud, 0xA0000); + if (pmd_none_or_clear_bad(pmd)) goto out; - } - pte = mapped = pte_offset_map(pmd, 0xA0000); + pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); for (i = 0; i < 32; i++) { if (pte_present(*pte)) set_pte(pte, pte_wrprotect(*pte)); pte++; } - pte_unmap(mapped); + pte_unmap_unlock(pte, ptl); out: - spin_unlock(&tsk->mm->page_table_lock); - preempt_enable(); flush_tlb(); } @@ -228,7 +221,7 @@ asmlinkage int sys_vm86(struct pt_regs regs) goto out; case VM86_PLUS_INSTALL_CHECK: /* NOTE: on old vm86 stuff this will return the error - from verify_area(), because the subfunction is + from access_ok(), because the subfunction is interpreted as (invalid) address to vm86_struct. So the installation check works. */ @@ -260,6 +253,7 @@ out: static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) { struct tss_struct *tss; + long eax; /* * make sure the vm86() system call doesn't try to do anything silly */ @@ -300,10 +294,10 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk */ info->regs32->eax = 0; tsk->thread.saved_esp0 = tsk->thread.esp0; - asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); - asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); + savesegment(fs, tsk->thread.saved_fs); + savesegment(gs, tsk->thread.saved_gs); - tss = init_tss + get_cpu(); + tss = &per_cpu(init_tss, get_cpu()); tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; @@ -312,14 +306,20 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk tsk->thread.screen_bitmap = info->screen_bitmap; if (info->flags & VM86_SCREEN_BITMAP) - mark_screen_rdonly(tsk); + mark_screen_rdonly(tsk->mm); + __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t"); + __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax)); + + /*call audit_syscall_exit since we do not exit via the normal paths */ + if (unlikely(current->audit_context)) + audit_syscall_exit(AUDITSC_RESULT(eax), eax); + __asm__ __volatile__( - "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" "movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" "jmp resume_userspace" : /* no outputs */ - :"r" (&info->regs), "r" (tsk->thread_info) : "ax"); + :"r" (&info->regs), "r" (task_thread_info(tsk))); /* we never return here */ } @@ -548,7 +548,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) unsigned char opcode; unsigned char __user *csp; unsigned char __user *ssp; - unsigned short ip, sp; + unsigned short ip, sp, orig_flags; int data32, pref_done; #define CHECK_IF_IN_TRAP \ @@ -557,8 +557,12 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) #define VM86_FAULT_RETURN do { \ if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ return_to_32bit(regs, VM86_PICRETURN); \ + if (orig_flags & TF_MASK) \ + handle_vm86_trap(regs, 0, 1); \ return; } while (0) + orig_flags = *(unsigned short *)®s->eflags; + csp = (unsigned char __user *) (regs->cs << 4); ssp = (unsigned char __user *) (regs->ss << 4); sp = SP(regs); @@ -704,7 +708,7 @@ static struct vm86_irqs { int sig; } vm86_irqs[16]; -static spinlock_t irqbits_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(irqbits_lock); static int irqbits; #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ @@ -723,7 +727,14 @@ static irqreturn_t irq_handler(int intno, void *dev_id, struct pt_regs * regs) irqbits |= irq_bit; if (vm86_irqs[intno].sig) send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); - /* else user will poll for IRQs */ + /* + * IRQ will be re-enabled when user asks for the irq (whether + * polling or as a result of the signal) + */ + disable_irq_nosync(intno); + spin_unlock_irqrestore(&irqbits_lock, flags); + return IRQ_HANDLED; + out: spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE; @@ -741,7 +752,7 @@ static inline void free_vm86_irq(int irqnumber) spin_unlock_irqrestore(&irqbits_lock, flags); } -void release_x86_irqs(struct task_struct *task) +void release_vm86_irqs(struct task_struct *task) { int i; for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) @@ -753,17 +764,20 @@ static inline int get_and_reset_irq(int irqnumber) { int bit; unsigned long flags; + int ret = 0; if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; + if (bit) { + enable_irq(irqnumber); + ret = 1; + } + spin_unlock_irqrestore(&irqbits_lock, flags); - if (!bit) - return 0; - enable_irq(irqnumber); - return 1; + return ret; }