* Copyright (C) 1994 Linus Torvalds
*
* 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
- * stack - Manfred Spraul <manfreds@colorfullife.com>
+ * stack - Manfred Spraul <manfred@colorfullife.com>
*
* 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
* them correctly. Now the emulation will be in a
*
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
+#include <linux/audit.h>
#include <asm/uaccess.h>
#include <asm/io.h>
do_exit(SIGSEGV);
}
- tss = init_tss + get_cpu();
+ tss = &per_cpu(init_tss, get_cpu());
current->thread.esp0 = current->thread.saved_esp0;
current->thread.sysenter_cs = __KERNEL_CS;
- load_virtual_esp0(tss, current);
+ load_esp0(tss, ¤t->thread);
current->thread.saved_esp0 = 0;
put_cpu();
return ret;
}
-static void mark_screen_rdonly(struct task_struct * tsk)
+static void mark_screen_rdonly(struct mm_struct *mm)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
- pte_t *pte, *mapped;
+ pte_t *pte;
+ spinlock_t *ptl;
int i;
- preempt_disable();
- spin_lock(&tsk->mm->page_table_lock);
- pgd = pgd_offset(tsk->mm, 0xA0000);
- if (pgd_none(*pgd))
+ pgd = pgd_offset(mm, 0xA0000);
+ if (pgd_none_or_clear_bad(pgd))
goto out;
- if (pgd_bad(*pgd)) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
+ pud = pud_offset(pgd, 0xA0000);
+ if (pud_none_or_clear_bad(pud))
goto out;
- }
- pmd = pmd_offset(pgd, 0xA0000);
- if (pmd_none(*pmd))
- goto out;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
+ pmd = pmd_offset(pud, 0xA0000);
+ if (pmd_none_or_clear_bad(pmd))
goto out;
- }
- pte = mapped = pte_offset_map(pmd, 0xA0000);
+ pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
for (i = 0; i < 32; i++) {
if (pte_present(*pte))
set_pte(pte, pte_wrprotect(*pte));
pte++;
}
- pte_unmap(mapped);
+ pte_unmap_unlock(pte, ptl);
out:
- spin_unlock(&tsk->mm->page_table_lock);
- preempt_enable();
flush_tlb();
}
goto out;
case VM86_PLUS_INSTALL_CHECK:
/* NOTE: on old vm86 stuff this will return the error
- from verify_area(), because the subfunction is
+ from access_ok(), because the subfunction is
interpreted as (invalid) address to vm86_struct.
So the installation check works.
*/
static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
{
struct tss_struct *tss;
+ long eax;
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
*/
info->regs32->eax = 0;
tsk->thread.saved_esp0 = tsk->thread.esp0;
- asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs));
- asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs));
+ savesegment(fs, tsk->thread.saved_fs);
+ savesegment(gs, tsk->thread.saved_gs);
- tss = init_tss + get_cpu();
+ tss = &per_cpu(init_tss, get_cpu());
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
- load_virtual_esp0(tss, tsk);
+ load_esp0(tss, &tsk->thread);
put_cpu();
tsk->thread.screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
- mark_screen_rdonly(tsk);
+ mark_screen_rdonly(tsk->mm);
+ __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t");
+ __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax));
+
+ /*call audit_syscall_exit since we do not exit via the normal paths */
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(current, AUDITSC_RESULT(eax), eax);
+
__asm__ __volatile__(
- "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
"movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
"jmp resume_userspace"
: /* no outputs */
- :"r" (&info->regs), "r" (tsk->thread_info) : "ax");
+ :"r" (&info->regs), "r" (task_thread_info(tsk)));
/* we never return here */
}
unsigned char opcode;
unsigned char __user *csp;
unsigned char __user *ssp;
- unsigned short ip, sp;
+ unsigned short ip, sp, orig_flags;
int data32, pref_done;
#define CHECK_IF_IN_TRAP \
#define VM86_FAULT_RETURN do { \
if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \
return_to_32bit(regs, VM86_PICRETURN); \
+ if (orig_flags & TF_MASK) \
+ handle_vm86_trap(regs, 0, 1); \
return; } while (0)
+ orig_flags = *(unsigned short *)®s->eflags;
+
csp = (unsigned char __user *) (regs->cs << 4);
ssp = (unsigned char __user *) (regs->ss << 4);
sp = SP(regs);
int sig;
} vm86_irqs[16];
-static spinlock_t irqbits_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(irqbits_lock);
static int irqbits;
#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \
irqbits |= irq_bit;
if (vm86_irqs[intno].sig)
send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
- /* else user will poll for IRQs */
+ /*
+ * IRQ will be re-enabled when user asks for the irq (whether
+ * polling or as a result of the signal)
+ */
+ disable_irq_nosync(intno);
+ spin_unlock_irqrestore(&irqbits_lock, flags);
+ return IRQ_HANDLED;
+
out:
spin_unlock_irqrestore(&irqbits_lock, flags);
return IRQ_NONE;
spin_unlock_irqrestore(&irqbits_lock, flags);
}
-void release_x86_irqs(struct task_struct *task)
+void release_vm86_irqs(struct task_struct *task)
{
int i;
for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
{
int bit;
unsigned long flags;
+ int ret = 0;
if (invalid_vm86_irq(irqnumber)) return 0;
if (vm86_irqs[irqnumber].tsk != current) return 0;
spin_lock_irqsave(&irqbits_lock, flags);
bit = irqbits & (1 << irqnumber);
irqbits &= ~bit;
+ if (bit) {
+ enable_irq(irqnumber);
+ ret = 1;
+ }
+
spin_unlock_irqrestore(&irqbits_lock, flags);
- if (!bit)
- return 0;
- enable_irq(irqnumber);
- return 1;
+ return ret;
}