4 * Copyright (C) 1994 Linus Torvalds
6 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
7 * stack - Manfred Spraul <manfred@colorfullife.com>
9 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
10 * them correctly. Now the emulation will be in a
11 * consistent state after stackfaults - Kasper Dupont
12 * <kasperd@daimi.au.dk>
14 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
15 * <kasperd@daimi.au.dk>
17 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
18 * caused by Kasper Dupont's changes - Stas Sergeev
20 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
21 * Kasper Dupont <kasperd@daimi.au.dk>
23 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
24 * Kasper Dupont <kasperd@daimi.au.dk>
26 * 9 apr 2002 - Changed stack access macros to jump to a label
27 * instead of returning to userspace. This simplifies
28 * do_int, and is needed by handle_vm6_fault. Kasper
29 * Dupont <kasperd@daimi.au.dk>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/sched.h>
37 #include <linux/kernel.h>
38 #include <linux/signal.h>
39 #include <linux/string.h>
41 #include <linux/smp.h>
42 #include <linux/smp_lock.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
47 #include <asm/uaccess.h>
49 #include <asm/tlbflush.h>
55 * Interrupt handling is not guaranteed:
56 * - a real x86 will disable all interrupts for one instruction
57 * after a "mov ss,xx" to make stack handling atomic even without
58 * the 'lss' instruction. We can't guarantee this in v86 mode,
59 * as the next instruction might result in a page fault or similar.
60 * - a real x86 will have interrupts disabled for one instruction
61 * past the 'sti' that enables them. We don't bother with all the
64 * Let's hope these problems do not actually matter for anything.
68 #define KVM86 ((struct kernel_vm86_struct *)regs)
69 #define VMPI KVM86->vm86plus
73 * 8- and 16-bit register defines..
75 #define AL(regs) (((unsigned char *)&((regs)->eax))[0])
76 #define AH(regs) (((unsigned char *)&((regs)->eax))[1])
77 #define IP(regs) (*(unsigned short *)&((regs)->eip))
78 #define SP(regs) (*(unsigned short *)&((regs)->esp))
81 * virtual flags (16 and 32-bit versions)
83 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
84 #define VEFLAGS (current->thread.v86flags)
86 #define set_flags(X,new,mask) \
87 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
89 #define SAFE_MASK (0xDD5)
90 #define RETURN_MASK (0xDFF)
92 #define VM86_REGS_PART2 orig_eax
93 #define VM86_REGS_SIZE1 \
94 ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) )
95 #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
97 struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
98 struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
100 #ifndef CONFIG_X86_NO_TSS
101 struct tss_struct *tss;
107 * This gets called from entry.S with interrupts disabled, but
108 * from process context. Enable interrupts here, before trying
109 * to access user space.
113 if (!current->thread.vm86_info) {
114 printk("no vm86_info: BAD\n");
117 set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
118 tmp = copy_to_user(¤t->thread.vm86_info->regs,regs, VM86_REGS_SIZE1);
119 tmp += copy_to_user(¤t->thread.vm86_info->regs.VM86_REGS_PART2,
120 ®s->VM86_REGS_PART2, VM86_REGS_SIZE2);
121 tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap);
123 printk("vm86: could not access userspace vm86_info\n");
127 #ifndef CONFIG_X86_NO_TSS
128 tss = &per_cpu(init_tss, get_cpu());
130 current->thread.esp0 = current->thread.saved_esp0;
131 current->thread.sysenter_cs = __KERNEL_CS;
132 load_esp0(tss, ¤t->thread);
133 current->thread.saved_esp0 = 0;
134 #ifndef CONFIG_X86_NO_TSS
138 loadsegment(fs, current->thread.saved_fs);
139 loadsegment(gs, current->thread.saved_gs);
144 static void mark_screen_rdonly(struct mm_struct *mm)
153 pgd = pgd_offset(mm, 0xA0000);
154 if (pgd_none_or_clear_bad(pgd))
156 pud = pud_offset(pgd, 0xA0000);
157 if (pud_none_or_clear_bad(pud))
159 pmd = pmd_offset(pud, 0xA0000);
160 if (pmd_none_or_clear_bad(pmd))
162 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
163 for (i = 0; i < 32; i++) {
164 if (pte_present(*pte))
165 set_pte(pte, pte_wrprotect(*pte));
168 pte_unmap_unlock(pte, ptl);
175 static int do_vm86_irq_handling(int subfunction, int irqnumber);
176 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
178 asmlinkage int sys_vm86old(struct pt_regs regs)
180 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx;
181 struct kernel_vm86_struct info; /* declare this _on top_,
182 * this avoids wasting of stack space.
183 * This remains on the stack until we
184 * return to 32 bit user space.
186 struct task_struct *tsk;
187 int tmp, ret = -EPERM;
190 if (tsk->thread.saved_esp0)
192 tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1);
193 tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
194 (long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2);
198 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
200 tsk->thread.vm86_info = v86;
201 do_sys_vm86(&info, tsk);
202 ret = 0; /* we never return here */
208 asmlinkage int sys_vm86(struct pt_regs regs)
210 struct kernel_vm86_struct info; /* declare this _on top_,
211 * this avoids wasting of stack space.
212 * This remains on the stack until we
213 * return to 32 bit user space.
215 struct task_struct *tsk;
217 struct vm86plus_struct __user *v86;
221 case VM86_REQUEST_IRQ:
223 case VM86_GET_IRQ_BITS:
224 case VM86_GET_AND_RESET_IRQ:
225 ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx);
227 case VM86_PLUS_INSTALL_CHECK:
228 /* NOTE: on old vm86 stuff this will return the error
229 from access_ok(), because the subfunction is
230 interpreted as (invalid) address to vm86_struct.
231 So the installation check works.
237 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
239 if (tsk->thread.saved_esp0)
241 v86 = (struct vm86plus_struct __user *)regs.ecx;
242 tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1);
243 tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
244 (long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2);
249 info.vm86plus.is_vm86pus = 1;
250 tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
251 do_sys_vm86(&info, tsk);
252 ret = 0; /* we never return here */
258 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
260 #ifndef CONFIG_X86_NO_TSS
261 struct tss_struct *tss;
265 * make sure the vm86() system call doesn't try to do anything silly
267 info->regs.__null_ds = 0;
268 info->regs.__null_es = 0;
270 /* we are clearing fs,gs later just before "jmp resume_userspace",
271 * because starting with Linux 2.1.x they aren't no longer saved/restored
275 * The eflags register is also special: we cannot trust that the user
276 * has set it up safely, so this makes sure interrupt etc flags are
277 * inherited from protected mode.
279 VEFLAGS = info->regs.eflags;
280 info->regs.eflags &= SAFE_MASK;
281 info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK;
282 info->regs.eflags |= VM_MASK;
284 switch (info->cpu_type) {
286 tsk->thread.v86mask = 0;
289 tsk->thread.v86mask = NT_MASK | IOPL_MASK;
292 tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
295 tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
300 * Save old state, set default return value (%eax) to 0
302 info->regs32->eax = 0;
303 tsk->thread.saved_esp0 = tsk->thread.esp0;
304 savesegment(fs, tsk->thread.saved_fs);
305 savesegment(gs, tsk->thread.saved_gs);
307 #ifndef CONFIG_X86_NO_TSS
308 tss = &per_cpu(init_tss, get_cpu());
310 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
312 tsk->thread.sysenter_cs = 0;
313 load_esp0(tss, &tsk->thread);
314 #ifndef CONFIG_X86_NO_TSS
318 tsk->thread.screen_bitmap = info->screen_bitmap;
319 if (info->flags & VM86_SCREEN_BITMAP)
320 mark_screen_rdonly(tsk->mm);
321 __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t");
322 __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax));
324 /*call audit_syscall_exit since we do not exit via the normal paths */
325 if (unlikely(current->audit_context))
326 audit_syscall_exit(AUDITSC_RESULT(eax), eax);
328 __asm__ __volatile__(
331 "jmp resume_userspace"
333 :"r" (&info->regs), "r" (task_thread_info(tsk)));
334 /* we never return here */
337 static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
339 struct pt_regs * regs32;
341 regs32 = save_v86_state(regs16);
342 regs32->eax = retval;
343 __asm__ __volatile__("movl %0,%%esp\n\t"
345 "jmp resume_userspace"
346 : : "r" (regs32), "r" (current_thread_info()));
349 static inline void set_IF(struct kernel_vm86_regs * regs)
352 if (VEFLAGS & VIP_MASK)
353 return_to_32bit(regs, VM86_STI);
356 static inline void clear_IF(struct kernel_vm86_regs * regs)
358 VEFLAGS &= ~VIF_MASK;
361 static inline void clear_TF(struct kernel_vm86_regs * regs)
363 regs->eflags &= ~TF_MASK;
366 static inline void clear_AC(struct kernel_vm86_regs * regs)
368 regs->eflags &= ~AC_MASK;
371 /* It is correct to call set_IF(regs) from the set_vflags_*
372 * functions. However someone forgot to call clear_IF(regs)
373 * in the opposite case.
374 * After the command sequence CLI PUSHF STI POPF you should
375 * end up with interrups disabled, but you ended up with
376 * interrupts enabled.
377 * ( I was testing my own changes, but the only bug I
378 * could find was in a function I had not changed. )
382 static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
384 set_flags(VEFLAGS, eflags, current->thread.v86mask);
385 set_flags(regs->eflags, eflags, SAFE_MASK);
386 if (eflags & IF_MASK)
392 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
394 set_flags(VFLAGS, flags, current->thread.v86mask);
395 set_flags(regs->eflags, flags, SAFE_MASK);
402 static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
404 unsigned long flags = regs->eflags & RETURN_MASK;
406 if (VEFLAGS & VIF_MASK)
409 return flags | (VEFLAGS & current->thread.v86mask);
412 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
414 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
416 :"m" (*bitmap),"r" (nr));
420 #define val_byte(val, n) (((__u8 *)&val)[n])
422 #define pushb(base, ptr, val, err_label) \
426 if (put_user(__val, base + ptr) < 0) \
430 #define pushw(base, ptr, val, err_label) \
434 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
437 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
441 #define pushl(base, ptr, val, err_label) \
445 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
448 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
451 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
454 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
458 #define popb(base, ptr, err_label) \
461 if (get_user(__res, base + ptr) < 0) \
467 #define popw(base, ptr, err_label) \
470 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
473 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
479 #define popl(base, ptr, err_label) \
482 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
485 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
488 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
491 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
497 /* There are so many possible reasons for this function to return
498 * VM86_INTx, so adding another doesn't bother me. We can expect
499 * userspace programs to be able to handle it. (Getting a problem
500 * in userspace is always better than an Oops anyway.) [KD]
502 static void do_int(struct kernel_vm86_regs *regs, int i,
503 unsigned char __user * ssp, unsigned short sp)
505 unsigned long __user *intr_ptr;
506 unsigned long segoffs;
508 if (regs->cs == BIOSSEG)
510 if (is_revectored(i, &KVM86->int_revectored))
512 if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored))
514 intr_ptr = (unsigned long __user *) (i << 2);
515 if (get_user(segoffs, intr_ptr))
517 if ((segoffs >> 16) == BIOSSEG)
519 pushw(ssp, sp, get_vflags(regs), cannot_handle);
520 pushw(ssp, sp, regs->cs, cannot_handle);
521 pushw(ssp, sp, IP(regs), cannot_handle);
522 regs->cs = segoffs >> 16;
524 IP(regs) = segoffs & 0xffff;
531 return_to_32bit(regs, VM86_INTx + (i << 8));
534 int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno)
536 if (VMPI.is_vm86pus) {
537 if ( (trapno==3) || (trapno==1) )
538 return_to_32bit(regs, VM86_TRAP + (trapno << 8));
539 do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs));
543 return 1; /* we let this handle by the calling routine */
544 if (current->ptrace & PT_PTRACED) {
546 spin_lock_irqsave(¤t->sighand->siglock, flags);
547 sigdelset(¤t->blocked, SIGTRAP);
549 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
551 send_sig(SIGTRAP, current, 1);
552 current->thread.trap_no = trapno;
553 current->thread.error_code = error_code;
557 void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
559 unsigned char opcode;
560 unsigned char __user *csp;
561 unsigned char __user *ssp;
562 unsigned short ip, sp, orig_flags;
563 int data32, pref_done;
565 #define CHECK_IF_IN_TRAP \
566 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
568 #define VM86_FAULT_RETURN do { \
569 if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \
570 return_to_32bit(regs, VM86_PICRETURN); \
571 if (orig_flags & TF_MASK) \
572 handle_vm86_trap(regs, 0, 1); \
575 orig_flags = *(unsigned short *)®s->eflags;
577 csp = (unsigned char __user *) (regs->cs << 4);
578 ssp = (unsigned char __user *) (regs->ss << 4);
585 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
586 case 0x66: /* 32-bit data */ data32=1; break;
587 case 0x67: /* 32-bit address */ break;
588 case 0x2e: /* CS */ break;
589 case 0x3e: /* DS */ break;
590 case 0x26: /* ES */ break;
591 case 0x36: /* SS */ break;
592 case 0x65: /* GS */ break;
593 case 0x64: /* FS */ break;
594 case 0xf2: /* repnz */ break;
595 case 0xf3: /* rep */ break;
596 default: pref_done = 1;
598 } while (!pref_done);
605 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
608 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
617 unsigned long newflags;
619 newflags=popl(ssp, sp, simulate_sigsegv);
622 newflags = popw(ssp, sp, simulate_sigsegv);
628 set_vflags_long(newflags, regs);
630 set_vflags_short(newflags, regs);
637 int intno=popb(csp, ip, simulate_sigsegv);
639 if (VMPI.vm86dbg_active) {
640 if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] )
641 return_to_32bit(regs, VM86_INTx + (intno << 8));
643 do_int(regs, intno, ssp, sp);
652 unsigned long newflags;
654 newip=popl(ssp, sp, simulate_sigsegv);
655 newcs=popl(ssp, sp, simulate_sigsegv);
656 newflags=popl(ssp, sp, simulate_sigsegv);
659 newip = popw(ssp, sp, simulate_sigsegv);
660 newcs = popw(ssp, sp, simulate_sigsegv);
661 newflags = popw(ssp, sp, simulate_sigsegv);
668 set_vflags_long(newflags, regs);
670 set_vflags_short(newflags, regs);
683 * Damn. This is incorrect: the 'sti' instruction should actually
684 * enable interrupts after the /next/ instruction. Not good.
686 * Probably needs some horsing around with the TF flag. Aiee..
694 return_to_32bit(regs, VM86_UNKNOWN);
700 /* FIXME: After a long discussion with Stas we finally
701 * agreed, that this is wrong. Here we should
702 * really send a SIGSEGV to the user program.
703 * But how do we create the correct context? We
704 * are inside a general protection fault handler
705 * and has just returned from a page fault handler.
706 * The correct context for the signal handler
707 * should be a mixture of the two, but how do we
708 * get the information? [KD]
710 return_to_32bit(regs, VM86_UNKNOWN);
713 /* ---------------- vm86 special IRQ passing stuff ----------------- */
715 #define VM86_IRQNAME "vm86irq"
717 static struct vm86_irqs {
718 struct task_struct *tsk;
722 static DEFINE_SPINLOCK(irqbits_lock);
725 #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \
726 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
729 static irqreturn_t irq_handler(int intno, void *dev_id, struct pt_regs * regs)
734 spin_lock_irqsave(&irqbits_lock, flags);
735 irq_bit = 1 << intno;
736 if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk)
739 if (vm86_irqs[intno].sig)
740 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
742 * IRQ will be re-enabled when user asks for the irq (whether
743 * polling or as a result of the signal)
745 disable_irq_nosync(intno);
746 spin_unlock_irqrestore(&irqbits_lock, flags);
750 spin_unlock_irqrestore(&irqbits_lock, flags);
754 static inline void free_vm86_irq(int irqnumber)
758 free_irq(irqnumber, NULL);
759 vm86_irqs[irqnumber].tsk = NULL;
761 spin_lock_irqsave(&irqbits_lock, flags);
762 irqbits &= ~(1 << irqnumber);
763 spin_unlock_irqrestore(&irqbits_lock, flags);
766 void release_vm86_irqs(struct task_struct *task)
769 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
770 if (vm86_irqs[i].tsk == task)
774 static inline int get_and_reset_irq(int irqnumber)
780 if (invalid_vm86_irq(irqnumber)) return 0;
781 if (vm86_irqs[irqnumber].tsk != current) return 0;
782 spin_lock_irqsave(&irqbits_lock, flags);
783 bit = irqbits & (1 << irqnumber);
786 enable_irq(irqnumber);
790 spin_unlock_irqrestore(&irqbits_lock, flags);
795 static int do_vm86_irq_handling(int subfunction, int irqnumber)
798 switch (subfunction) {
799 case VM86_GET_AND_RESET_IRQ: {
800 return get_and_reset_irq(irqnumber);
802 case VM86_GET_IRQ_BITS: {
805 case VM86_REQUEST_IRQ: {
806 int sig = irqnumber >> 8;
807 int irq = irqnumber & 255;
808 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
809 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
810 if (invalid_vm86_irq(irq)) return -EPERM;
811 if (vm86_irqs[irq].tsk) return -EPERM;
812 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
814 vm86_irqs[irq].sig = sig;
815 vm86_irqs[irq].tsk = current;
818 case VM86_FREE_IRQ: {
819 if (invalid_vm86_irq(irqnumber)) return -EPERM;
820 if (!vm86_irqs[irqnumber].tsk) return 0;
821 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
822 free_vm86_irq(irqnumber);