vserver 1.9.3
[linux-2.6.git] / include / asm-x86_64 / i387.h
1 /*
2  * include/asm-x86_64/i387.h
3  *
4  * Copyright (C) 1994 Linus Torvalds
5  *
6  * Pentium III FXSR, SSE support
7  * General FPU state handling cleanups
8  *      Gareth Hughes <gareth@valinux.com>, May 2000
9  * x86-64 work by Andi Kleen 2002
10  */
11
12 #ifndef __ASM_X86_64_I387_H
13 #define __ASM_X86_64_I387_H
14
15 #include <linux/sched.h>
16 #include <asm/processor.h>
17 #include <asm/sigcontext.h>
18 #include <asm/user.h>
19 #include <asm/thread_info.h>
20 #include <asm/uaccess.h>
21
22 extern void fpu_init(void);
23 extern unsigned int mxcsr_feature_mask;
24 extern void mxcsr_feature_mask_init(void);
25 extern void init_fpu(struct task_struct *child);
26 extern int save_i387(struct _fpstate __user *buf);
27
28 static inline int need_signal_i387(struct task_struct *me) 
29
30         if (!me->used_math)
31                 return 0;
32         me->used_math = 0; 
33         if (me->thread_info->status & TS_USEDFPU)
34                 return 0;
35         return 1;
36
37
38 /*
39  * FPU lazy state save handling...
40  */
41
42 #define unlazy_fpu(tsk) do { \
43         if ((tsk)->thread_info->status & TS_USEDFPU) \
44                 save_init_fpu(tsk); \
45 } while (0)
46
47 /* Ignore delayed exceptions from user space */
48 static inline void tolerant_fwait(void)
49 {
50         asm volatile("1: fwait\n"
51                      "2:\n"
52                      "   .section __ex_table,\"a\"\n"
53                      "  .align 8\n"
54                      "  .quad 1b,2b\n"
55                      "  .previous\n");
56 }
57
58 #define clear_fpu(tsk) do { \
59         if ((tsk)->thread_info->status & TS_USEDFPU) {          \
60                 tolerant_fwait();                               \
61                 (tsk)->thread_info->status &= ~TS_USEDFPU;      \
62                 stts();                                         \
63         }                                                       \
64 } while (0)
65
66 /*
67  * ptrace request handers...
68  */
69 extern int get_fpregs(struct user_i387_struct __user *buf,
70                       struct task_struct *tsk);
71 extern int set_fpregs(struct task_struct *tsk,
72                       struct user_i387_struct __user *buf);
73
74 /*
75  * i387 state interaction
76  */
77 #define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
78 #define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
79 #define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
80 #define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
81 #define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
82 #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
83 #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
84
85 static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) 
86
87         int err;
88         asm volatile("1:  rex64 ; fxrstor (%[fx])\n\t"
89                      "2:\n"
90                      ".section .fixup,\"ax\"\n"
91                      "3:  movl $-1,%[err]\n"
92                      "    jmp  2b\n"
93                      ".previous\n"
94                      ".section __ex_table,\"a\"\n"
95                      "   .align 8\n"
96                      "   .quad  1b,3b\n"
97                      ".previous"
98                      : [err] "=r" (err)
99                      : [fx] "r" (fx), "0" (0)); 
100         if (unlikely(err))
101                 init_fpu(current);
102         return err;
103
104
105 static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) 
106
107         int err;
108         asm volatile("1:  rex64 ; fxsave (%[fx])\n\t"
109                      "2:\n"
110                      ".section .fixup,\"ax\"\n"
111                      "3:  movl $-1,%[err]\n"
112                      "    jmp  2b\n"
113                      ".previous\n"
114                      ".section __ex_table,\"a\"\n"
115                      "   .align 8\n"
116                      "   .quad  1b,3b\n"
117                      ".previous"
118                      : [err] "=r" (err)
119                      : [fx] "r" (fx), "0" (0)); 
120         if (unlikely(err))
121                 __clear_user(fx, sizeof(struct i387_fxsave_struct));
122         return err;
123
124
125 static inline void kernel_fpu_begin(void)
126 {
127         struct thread_info *me = current_thread_info();
128         preempt_disable();
129         if (me->status & TS_USEDFPU) { 
130                 asm volatile("rex64 ; fxsave %0 ; fnclex"
131                               : "=m" (me->task->thread.i387.fxsave));
132                 me->status &= ~TS_USEDFPU;
133                 return;
134         }
135         clts();
136 }
137
138 static inline void kernel_fpu_end(void)
139 {
140         stts();
141         preempt_enable();
142 }
143
144 static inline void save_init_fpu( struct task_struct *tsk )
145 {
146         asm volatile( "rex64 ; fxsave %0 ; fnclex"
147                       : "=m" (tsk->thread.i387.fxsave));
148         tsk->thread_info->status &= ~TS_USEDFPU;
149         stts();
150 }
151
152 /* 
153  * This restores directly out of user space. Exceptions are handled.
154  */
155 static inline int restore_i387(struct _fpstate __user *buf)
156 {
157         return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
158 }
159
160 #endif /* __ASM_X86_64_I387_H */