4 * Copyright (C) 1994 Linus Torvalds
6 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
7 * stack - Manfred Spraul <manfred@colorfullife.com>
9 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
10 * them correctly. Now the emulation will be in a
11 * consistent state after stackfaults - Kasper Dupont
12 * <kasperd@daimi.au.dk>
14 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
15 * <kasperd@daimi.au.dk>
17 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
18 * caused by Kasper Dupont's changes - Stas Sergeev
20 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
21 * Kasper Dupont <kasperd@daimi.au.dk>
23 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
24 * Kasper Dupont <kasperd@daimi.au.dk>
26 * 9 apr 2002 - Changed stack access macros to jump to a label
27 * instead of returning to userspace. This simplifies
28 * do_int, and is needed by handle_vm6_fault. Kasper
29 * Dupont <kasperd@daimi.au.dk>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/sched.h>
37 #include <linux/kernel.h>
38 #include <linux/signal.h>
39 #include <linux/string.h>
41 #include <linux/smp.h>
42 #include <linux/smp_lock.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
46 #include <linux/stddef.h>
48 #include <asm/uaccess.h>
50 #include <asm/tlbflush.h>
56 * Interrupt handling is not guaranteed:
57 * - a real x86 will disable all interrupts for one instruction
58 * after a "mov ss,xx" to make stack handling atomic even without
59 * the 'lss' instruction. We can't guarantee this in v86 mode,
60 * as the next instruction might result in a page fault or similar.
61 * - a real x86 will have interrupts disabled for one instruction
62 * past the 'sti' that enables them. We don't bother with all the
65 * Let's hope these problems do not actually matter for anything.
69 #define KVM86 ((struct kernel_vm86_struct *)regs)
70 #define VMPI KVM86->vm86plus
74 * 8- and 16-bit register defines..
76 #define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0])
77 #define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1])
78 #define IP(regs) (*(unsigned short *)&((regs)->pt.eip))
79 #define SP(regs) (*(unsigned short *)&((regs)->pt.esp))
82 * virtual flags (16 and 32-bit versions)
84 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
85 #define VEFLAGS (current->thread.v86flags)
87 #define set_flags(X,new,mask) \
88 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
90 #define SAFE_MASK (0xDD5)
91 #define RETURN_MASK (0xDFF)
93 /* convert kernel_vm86_regs to vm86_regs */
94 static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
95 const struct kernel_vm86_regs *regs)
99 /* kernel_vm86_regs is missing xfs, so copy everything up to
100 (but not including) xgs, and then rest after xgs. */
101 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.xgs));
102 ret += copy_to_user(&user->__null_gs, ®s->pt.xgs,
103 sizeof(struct kernel_vm86_regs) -
104 offsetof(struct kernel_vm86_regs, pt.xgs));
109 /* convert vm86_regs to kernel_vm86_regs */
110 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
111 const struct vm86_regs __user *user,
116 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.xgs));
117 ret += copy_from_user(®s->pt.xgs, &user->__null_gs,
118 sizeof(struct kernel_vm86_regs) -
119 offsetof(struct kernel_vm86_regs, pt.xgs) +
125 struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
126 struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
128 #ifndef CONFIG_X86_NO_TSS
129 struct tss_struct *tss;
135 * This gets called from entry.S with interrupts disabled, but
136 * from process context. Enable interrupts here, before trying
137 * to access user space.
141 if (!current->thread.vm86_info) {
142 printk("no vm86_info: BAD\n");
145 set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
146 tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs);
147 tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap);
149 printk("vm86: could not access userspace vm86_info\n");
153 #ifndef CONFIG_X86_NO_TSS
154 tss = &per_cpu(init_tss, get_cpu());
156 current->thread.esp0 = current->thread.saved_esp0;
157 current->thread.sysenter_cs = __KERNEL_CS;
158 load_esp0(tss, ¤t->thread);
159 current->thread.saved_esp0 = 0;
160 #ifndef CONFIG_X86_NO_TSS
166 loadsegment(fs, current->thread.saved_fs);
167 ret->xgs = current->thread.saved_gs;
172 static void mark_screen_rdonly(struct mm_struct *mm)
181 pgd = pgd_offset(mm, 0xA0000);
182 if (pgd_none_or_clear_bad(pgd))
184 pud = pud_offset(pgd, 0xA0000);
185 if (pud_none_or_clear_bad(pud))
187 pmd = pmd_offset(pud, 0xA0000);
188 if (pmd_none_or_clear_bad(pmd))
190 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
191 for (i = 0; i < 32; i++) {
192 if (pte_present(*pte))
193 set_pte(pte, pte_wrprotect(*pte));
196 pte_unmap_unlock(pte, ptl);
203 static int do_vm86_irq_handling(int subfunction, int irqnumber);
204 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
206 asmlinkage int sys_vm86old(struct pt_regs regs)
208 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx;
209 struct kernel_vm86_struct info; /* declare this _on top_,
210 * this avoids wasting of stack space.
211 * This remains on the stack until we
212 * return to 32 bit user space.
214 struct task_struct *tsk;
215 int tmp, ret = -EPERM;
218 if (tsk->thread.saved_esp0)
220 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
221 offsetof(struct kernel_vm86_struct, vm86plus) -
226 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
228 tsk->thread.vm86_info = v86;
229 do_sys_vm86(&info, tsk);
230 ret = 0; /* we never return here */
236 asmlinkage int sys_vm86(struct pt_regs regs)
238 struct kernel_vm86_struct info; /* declare this _on top_,
239 * this avoids wasting of stack space.
240 * This remains on the stack until we
241 * return to 32 bit user space.
243 struct task_struct *tsk;
245 struct vm86plus_struct __user *v86;
249 case VM86_REQUEST_IRQ:
251 case VM86_GET_IRQ_BITS:
252 case VM86_GET_AND_RESET_IRQ:
253 ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx);
255 case VM86_PLUS_INSTALL_CHECK:
256 /* NOTE: on old vm86 stuff this will return the error
257 from access_ok(), because the subfunction is
258 interpreted as (invalid) address to vm86_struct.
259 So the installation check works.
265 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
267 if (tsk->thread.saved_esp0)
269 v86 = (struct vm86plus_struct __user *)regs.ecx;
270 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
271 offsetof(struct kernel_vm86_struct, regs32) -
277 info.vm86plus.is_vm86pus = 1;
278 tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
279 do_sys_vm86(&info, tsk);
280 ret = 0; /* we never return here */
286 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
288 #ifndef CONFIG_X86_NO_TSS
289 struct tss_struct *tss;
292 * make sure the vm86() system call doesn't try to do anything silly
294 info->regs.pt.xds = 0;
295 info->regs.pt.xes = 0;
296 info->regs.pt.xgs = 0;
298 /* we are clearing fs later just before "jmp resume_userspace",
299 * because it is not saved/restored.
303 * The eflags register is also special: we cannot trust that the user
304 * has set it up safely, so this makes sure interrupt etc flags are
305 * inherited from protected mode.
307 VEFLAGS = info->regs.pt.eflags;
308 info->regs.pt.eflags &= SAFE_MASK;
309 info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
310 info->regs.pt.eflags |= VM_MASK;
312 switch (info->cpu_type) {
314 tsk->thread.v86mask = 0;
317 tsk->thread.v86mask = NT_MASK | IOPL_MASK;
320 tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
323 tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
328 * Save old state, set default return value (%eax) to 0
330 info->regs32->eax = 0;
331 tsk->thread.saved_esp0 = tsk->thread.esp0;
332 savesegment(fs, tsk->thread.saved_fs);
333 tsk->thread.saved_gs = info->regs32->xgs;
335 #ifndef CONFIG_X86_NO_TSS
336 tss = &per_cpu(init_tss, get_cpu());
338 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
340 tsk->thread.sysenter_cs = 0;
341 load_esp0(tss, &tsk->thread);
342 #ifndef CONFIG_X86_NO_TSS
346 tsk->thread.screen_bitmap = info->screen_bitmap;
347 if (info->flags & VM86_SCREEN_BITMAP)
348 mark_screen_rdonly(tsk->mm);
350 /*call audit_syscall_exit since we do not exit via the normal paths */
351 if (unlikely(current->audit_context))
352 audit_syscall_exit(AUDITSC_RESULT(0), 0);
354 __asm__ __volatile__(
358 "jmp resume_userspace"
360 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
361 /* we never return here */
364 static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
366 struct pt_regs * regs32;
368 regs32 = save_v86_state(regs16);
369 regs32->eax = retval;
370 __asm__ __volatile__("movl %0,%%esp\n\t"
372 "jmp resume_userspace"
373 : : "r" (regs32), "r" (current_thread_info()));
376 static inline void set_IF(struct kernel_vm86_regs * regs)
379 if (VEFLAGS & VIP_MASK)
380 return_to_32bit(regs, VM86_STI);
383 static inline void clear_IF(struct kernel_vm86_regs * regs)
385 VEFLAGS &= ~VIF_MASK;
388 static inline void clear_TF(struct kernel_vm86_regs * regs)
390 regs->pt.eflags &= ~TF_MASK;
393 static inline void clear_AC(struct kernel_vm86_regs * regs)
395 regs->pt.eflags &= ~AC_MASK;
398 /* It is correct to call set_IF(regs) from the set_vflags_*
399 * functions. However someone forgot to call clear_IF(regs)
400 * in the opposite case.
401 * After the command sequence CLI PUSHF STI POPF you should
402 * end up with interrups disabled, but you ended up with
403 * interrupts enabled.
404 * ( I was testing my own changes, but the only bug I
405 * could find was in a function I had not changed. )
409 static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
411 set_flags(VEFLAGS, eflags, current->thread.v86mask);
412 set_flags(regs->pt.eflags, eflags, SAFE_MASK);
413 if (eflags & IF_MASK)
419 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
421 set_flags(VFLAGS, flags, current->thread.v86mask);
422 set_flags(regs->pt.eflags, flags, SAFE_MASK);
429 static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
431 unsigned long flags = regs->pt.eflags & RETURN_MASK;
433 if (VEFLAGS & VIF_MASK)
436 return flags | (VEFLAGS & current->thread.v86mask);
439 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
441 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
443 :"m" (*bitmap),"r" (nr));
447 #define val_byte(val, n) (((__u8 *)&val)[n])
449 #define pushb(base, ptr, val, err_label) \
453 if (put_user(__val, base + ptr) < 0) \
457 #define pushw(base, ptr, val, err_label) \
461 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
464 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
468 #define pushl(base, ptr, val, err_label) \
472 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
475 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
478 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
481 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
485 #define popb(base, ptr, err_label) \
488 if (get_user(__res, base + ptr) < 0) \
494 #define popw(base, ptr, err_label) \
497 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
500 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
506 #define popl(base, ptr, err_label) \
509 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
512 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
515 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
518 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
524 /* There are so many possible reasons for this function to return
525 * VM86_INTx, so adding another doesn't bother me. We can expect
526 * userspace programs to be able to handle it. (Getting a problem
527 * in userspace is always better than an Oops anyway.) [KD]
529 static void do_int(struct kernel_vm86_regs *regs, int i,
530 unsigned char __user * ssp, unsigned short sp)
532 unsigned long __user *intr_ptr;
533 unsigned long segoffs;
535 if (regs->pt.xcs == BIOSSEG)
537 if (is_revectored(i, &KVM86->int_revectored))
539 if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored))
541 intr_ptr = (unsigned long __user *) (i << 2);
542 if (get_user(segoffs, intr_ptr))
544 if ((segoffs >> 16) == BIOSSEG)
546 pushw(ssp, sp, get_vflags(regs), cannot_handle);
547 pushw(ssp, sp, regs->pt.xcs, cannot_handle);
548 pushw(ssp, sp, IP(regs), cannot_handle);
549 regs->pt.xcs = segoffs >> 16;
551 IP(regs) = segoffs & 0xffff;
558 return_to_32bit(regs, VM86_INTx + (i << 8));
561 int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno)
563 if (VMPI.is_vm86pus) {
564 if ( (trapno==3) || (trapno==1) )
565 return_to_32bit(regs, VM86_TRAP + (trapno << 8));
566 do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
570 return 1; /* we let this handle by the calling routine */
571 send_sig(SIGTRAP, current, 1);
572 current->thread.trap_no = trapno;
573 current->thread.error_code = error_code;
577 void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
579 unsigned char opcode;
580 unsigned char __user *csp;
581 unsigned char __user *ssp;
582 unsigned short ip, sp, orig_flags;
583 int data32, pref_done;
585 #define CHECK_IF_IN_TRAP \
586 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
588 #define VM86_FAULT_RETURN do { \
589 if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \
590 return_to_32bit(regs, VM86_PICRETURN); \
591 if (orig_flags & TF_MASK) \
592 handle_vm86_trap(regs, 0, 1); \
595 orig_flags = *(unsigned short *)®s->pt.eflags;
597 csp = (unsigned char __user *) (regs->pt.xcs << 4);
598 ssp = (unsigned char __user *) (regs->pt.xss << 4);
605 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
606 case 0x66: /* 32-bit data */ data32=1; break;
607 case 0x67: /* 32-bit address */ break;
608 case 0x2e: /* CS */ break;
609 case 0x3e: /* DS */ break;
610 case 0x26: /* ES */ break;
611 case 0x36: /* SS */ break;
612 case 0x65: /* GS */ break;
613 case 0x64: /* FS */ break;
614 case 0xf2: /* repnz */ break;
615 case 0xf3: /* rep */ break;
616 default: pref_done = 1;
618 } while (!pref_done);
625 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
628 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
637 unsigned long newflags;
639 newflags=popl(ssp, sp, simulate_sigsegv);
642 newflags = popw(ssp, sp, simulate_sigsegv);
648 set_vflags_long(newflags, regs);
650 set_vflags_short(newflags, regs);
657 int intno=popb(csp, ip, simulate_sigsegv);
659 if (VMPI.vm86dbg_active) {
660 if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] )
661 return_to_32bit(regs, VM86_INTx + (intno << 8));
663 do_int(regs, intno, ssp, sp);
672 unsigned long newflags;
674 newip=popl(ssp, sp, simulate_sigsegv);
675 newcs=popl(ssp, sp, simulate_sigsegv);
676 newflags=popl(ssp, sp, simulate_sigsegv);
679 newip = popw(ssp, sp, simulate_sigsegv);
680 newcs = popw(ssp, sp, simulate_sigsegv);
681 newflags = popw(ssp, sp, simulate_sigsegv);
685 regs->pt.xcs = newcs;
688 set_vflags_long(newflags, regs);
690 set_vflags_short(newflags, regs);
703 * Damn. This is incorrect: the 'sti' instruction should actually
704 * enable interrupts after the /next/ instruction. Not good.
706 * Probably needs some horsing around with the TF flag. Aiee..
714 return_to_32bit(regs, VM86_UNKNOWN);
720 /* FIXME: After a long discussion with Stas we finally
721 * agreed, that this is wrong. Here we should
722 * really send a SIGSEGV to the user program.
723 * But how do we create the correct context? We
724 * are inside a general protection fault handler
725 * and has just returned from a page fault handler.
726 * The correct context for the signal handler
727 * should be a mixture of the two, but how do we
728 * get the information? [KD]
730 return_to_32bit(regs, VM86_UNKNOWN);
733 /* ---------------- vm86 special IRQ passing stuff ----------------- */
735 #define VM86_IRQNAME "vm86irq"
737 static struct vm86_irqs {
738 struct task_struct *tsk;
742 static DEFINE_SPINLOCK(irqbits_lock);
745 #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \
746 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
749 static irqreturn_t irq_handler(int intno, void *dev_id)
754 spin_lock_irqsave(&irqbits_lock, flags);
755 irq_bit = 1 << intno;
756 if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk)
759 if (vm86_irqs[intno].sig)
760 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
762 * IRQ will be re-enabled when user asks for the irq (whether
763 * polling or as a result of the signal)
765 disable_irq_nosync(intno);
766 spin_unlock_irqrestore(&irqbits_lock, flags);
770 spin_unlock_irqrestore(&irqbits_lock, flags);
774 static inline void free_vm86_irq(int irqnumber)
778 free_irq(irqnumber, NULL);
779 vm86_irqs[irqnumber].tsk = NULL;
781 spin_lock_irqsave(&irqbits_lock, flags);
782 irqbits &= ~(1 << irqnumber);
783 spin_unlock_irqrestore(&irqbits_lock, flags);
786 void release_vm86_irqs(struct task_struct *task)
789 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
790 if (vm86_irqs[i].tsk == task)
794 static inline int get_and_reset_irq(int irqnumber)
800 if (invalid_vm86_irq(irqnumber)) return 0;
801 if (vm86_irqs[irqnumber].tsk != current) return 0;
802 spin_lock_irqsave(&irqbits_lock, flags);
803 bit = irqbits & (1 << irqnumber);
806 enable_irq(irqnumber);
810 spin_unlock_irqrestore(&irqbits_lock, flags);
815 static int do_vm86_irq_handling(int subfunction, int irqnumber)
818 switch (subfunction) {
819 case VM86_GET_AND_RESET_IRQ: {
820 return get_and_reset_irq(irqnumber);
822 case VM86_GET_IRQ_BITS: {
825 case VM86_REQUEST_IRQ: {
826 int sig = irqnumber >> 8;
827 int irq = irqnumber & 255;
828 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
829 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
830 if (invalid_vm86_irq(irq)) return -EPERM;
831 if (vm86_irqs[irq].tsk) return -EPERM;
832 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
834 vm86_irqs[irq].sig = sig;
835 vm86_irqs[irq].tsk = current;
838 case VM86_FREE_IRQ: {
839 if (invalid_vm86_irq(irqnumber)) return -EPERM;
840 if (!vm86_irqs[irqnumber].tsk) return 0;
841 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
842 free_vm86_irq(irqnumber);