*/
#include <linux/capability.h>
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
{
-#ifndef CONFIG_X86_NO_TSS
struct tss_struct *tss;
-#endif
struct pt_regs *ret;
unsigned long tmp;
do_exit(SIGSEGV);
}
-#ifndef CONFIG_X86_NO_TSS
tss = &per_cpu(init_tss, get_cpu());
-#endif
current->thread.esp0 = current->thread.saved_esp0;
current->thread.sysenter_cs = __KERNEL_CS;
load_esp0(tss, ¤t->thread);
current->thread.saved_esp0 = 0;
-#ifndef CONFIG_X86_NO_TSS
put_cpu();
-#endif
loadsegment(fs, current->thread.saved_fs);
loadsegment(gs, current->thread.saved_gs);
static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
{
-#ifndef CONFIG_X86_NO_TSS
struct tss_struct *tss;
-#endif
long eax;
/*
* make sure the vm86() system call doesn't try to do anything silly
savesegment(fs, tsk->thread.saved_fs);
savesegment(gs, tsk->thread.saved_gs);
-#ifndef CONFIG_X86_NO_TSS
tss = &per_cpu(init_tss, get_cpu());
-#endif
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
load_esp0(tss, &tsk->thread);
-#ifndef CONFIG_X86_NO_TSS
put_cpu();
-#endif
tsk->thread.screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
/*call audit_syscall_exit since we do not exit via the normal paths */
if (unlikely(current->audit_context))
- audit_syscall_exit(AUDITSC_RESULT(eax), eax);
+ audit_syscall_exit(current, AUDITSC_RESULT(eax), eax);
__asm__ __volatile__(
"movl %0,%%esp\n\t"