2 * linux/arch/i386/kernel/i387.c
4 * Copyright (C) 1994 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
11 #include <linux/config.h>
12 #include <linux/sched.h>
13 #include <asm/processor.h>
15 #include <asm/math_emu.h>
16 #include <asm/sigcontext.h>
18 #include <asm/ptrace.h>
19 #include <asm/uaccess.h>
21 #ifdef CONFIG_MATH_EMULATION
22 #define HAVE_HWFP (boot_cpu_data.hard_math)
27 unsigned long mxcsr_feature_mask = 0xffffffff;
29 void mxcsr_feature_mask_init(void)
31 unsigned long mask = 0;
34 memset(¤t->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
35 asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
36 mask = current->thread.i387.fxsave.mxcsr_mask;
37 if (mask == 0) mask = 0x0000ffbf;
39 mxcsr_feature_mask &= mask;
44 * The _current_ task is using the FPU for the first time
45 * so initialize it and set the mxcsr to its default
46 * value at reset if we support XMM instructions and then
47 * remeber the current task has used the FPU.
49 void init_fpu(struct task_struct *tsk)
52 memset(&tsk->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
53 tsk->thread.i387.fxsave.cwd = 0x37f;
55 tsk->thread.i387.fxsave.mxcsr = 0x1f80;
57 memset(&tsk->thread.i387.fsave, 0, sizeof(struct i387_fsave_struct));
58 tsk->thread.i387.fsave.cwd = 0xffff037fu;
59 tsk->thread.i387.fsave.swd = 0xffff0000u;
60 tsk->thread.i387.fsave.twd = 0xffffffffu;
61 tsk->thread.i387.fsave.fos = 0xffff0000u;
63 /* only the device not available exception or ptrace can call init_fpu */
64 set_stopped_child_used_math(tsk);
68 * FPU lazy state save handling.
71 void kernel_fpu_begin(void)
73 struct thread_info *thread = current_thread_info();
76 if (thread->status & TS_USEDFPU) {
77 __save_init_fpu(thread->task);
83 void restore_fpu( struct task_struct *tsk )
86 asm volatile( "fxrstor %0"
87 : : "m" (tsk->thread.i387.fxsave) );
89 asm volatile( "frstor %0"
90 : : "m" (tsk->thread.i387.fsave) );
95 * FPU tag word conversions.
98 static inline unsigned short twd_i387_to_fxsr( unsigned short twd )
100 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
102 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
104 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
105 /* and move the valid bits to the lower byte. */
106 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
107 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
108 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
112 static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave )
114 struct _fpxreg *st = NULL;
115 unsigned long tos = (fxsave->swd >> 11) & 7;
116 unsigned long twd = (unsigned long) fxsave->twd;
118 unsigned long ret = 0xffff0000u;
121 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
123 for ( i = 0 ; i < 8 ; i++ ) {
125 st = FPREG_ADDR( fxsave, (i - tos) & 7 );
127 switch ( st->exponent & 0x7fff ) {
129 tag = 2; /* Special */
132 if ( !st->significand[0] &&
133 !st->significand[1] &&
134 !st->significand[2] &&
135 !st->significand[3] ) {
138 tag = 2; /* Special */
142 if ( st->significand[3] & 0x8000 ) {
145 tag = 2; /* Special */
152 ret |= (tag << (2 * i));
159 * FPU state interaction.
162 unsigned short get_fpu_cwd( struct task_struct *tsk )
164 if ( cpu_has_fxsr ) {
165 return tsk->thread.i387.fxsave.cwd;
167 return (unsigned short)tsk->thread.i387.fsave.cwd;
171 unsigned short get_fpu_swd( struct task_struct *tsk )
173 if ( cpu_has_fxsr ) {
174 return tsk->thread.i387.fxsave.swd;
176 return (unsigned short)tsk->thread.i387.fsave.swd;
180 unsigned short get_fpu_twd( struct task_struct *tsk )
182 if ( cpu_has_fxsr ) {
183 return tsk->thread.i387.fxsave.twd;
185 return (unsigned short)tsk->thread.i387.fsave.twd;
189 unsigned short get_fpu_mxcsr( struct task_struct *tsk )
192 return tsk->thread.i387.fxsave.mxcsr;
198 void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
200 if ( cpu_has_fxsr ) {
201 tsk->thread.i387.fxsave.cwd = cwd;
203 tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u);
207 void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
209 if ( cpu_has_fxsr ) {
210 tsk->thread.i387.fxsave.swd = swd;
212 tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u);
216 void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
218 if ( cpu_has_fxsr ) {
219 tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
221 tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u);
226 * FXSR floating point environment conversions.
229 static int convert_fxsr_to_user( struct _fpstate __user *buf,
230 struct i387_fxsave_struct *fxsave )
232 unsigned long env[7];
233 struct _fpreg __user *to;
234 struct _fpxreg *from;
237 env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
238 env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
239 env[2] = twd_fxsr_to_i387(fxsave);
240 env[3] = fxsave->fip;
241 env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
242 env[5] = fxsave->foo;
243 env[6] = fxsave->fos;
245 if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) )
249 from = (struct _fpxreg *) &fxsave->st_space[0];
250 for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
251 unsigned long __user *t = (unsigned long __user *)to;
252 unsigned long *f = (unsigned long *)from;
254 if (__put_user(*f, t) ||
255 __put_user(*(f + 1), t + 1) ||
256 __put_user(from->exponent, &to->exponent))
262 static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave,
263 struct _fpstate __user *buf )
265 unsigned long env[7];
267 struct _fpreg __user *from;
270 if ( __copy_from_user( env, buf, 7 * sizeof(long) ) )
273 fxsave->cwd = (unsigned short)(env[0] & 0xffff);
274 fxsave->swd = (unsigned short)(env[1] & 0xffff);
275 fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
276 fxsave->fip = env[3];
277 fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
278 fxsave->fcs = (env[4] & 0xffff);
279 fxsave->foo = env[5];
280 fxsave->fos = env[6];
282 to = (struct _fpxreg *) &fxsave->st_space[0];
284 for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
285 unsigned long *t = (unsigned long *)to;
286 unsigned long __user *f = (unsigned long __user *)from;
288 if (__get_user(*t, f) ||
289 __get_user(*(t + 1), f + 1) ||
290 __get_user(to->exponent, &from->exponent))
297 * Signal frame handlers.
300 static inline int save_i387_fsave( struct _fpstate __user *buf )
302 struct task_struct *tsk = current;
305 tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
306 if ( __copy_to_user( buf, &tsk->thread.i387.fsave,
307 sizeof(struct i387_fsave_struct) ) )
312 static int save_i387_fxsave( struct _fpstate __user *buf )
314 struct task_struct *tsk = current;
319 if ( convert_fxsr_to_user( buf, &tsk->thread.i387.fxsave ) )
322 err |= __put_user( tsk->thread.i387.fxsave.swd, &buf->status );
323 err |= __put_user( X86_FXSR_MAGIC, &buf->magic );
327 if ( __copy_to_user( &buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
328 sizeof(struct i387_fxsave_struct) ) )
333 int save_i387( struct _fpstate __user *buf )
338 /* This will cause a "finit" to be triggered by the next
339 * attempted FPU operation by the 'current' process.
344 if ( cpu_has_fxsr ) {
345 return save_i387_fxsave( buf );
347 return save_i387_fsave( buf );
350 return save_i387_soft( ¤t->thread.i387.soft, buf );
354 static inline int restore_i387_fsave( struct _fpstate __user *buf )
356 struct task_struct *tsk = current;
358 return __copy_from_user( &tsk->thread.i387.fsave, buf,
359 sizeof(struct i387_fsave_struct) );
362 static int restore_i387_fxsave( struct _fpstate __user *buf )
365 struct task_struct *tsk = current;
367 err = __copy_from_user( &tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
368 sizeof(struct i387_fxsave_struct) );
369 /* mxcsr reserved bits must be masked to zero for security reasons */
370 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
371 return err ? 1 : convert_fxsr_from_user( &tsk->thread.i387.fxsave, buf );
374 int restore_i387( struct _fpstate __user *buf )
379 if ( cpu_has_fxsr ) {
380 err = restore_i387_fxsave( buf );
382 err = restore_i387_fsave( buf );
385 err = restore_i387_soft( ¤t->thread.i387.soft, buf );
392 * ptrace request handlers.
395 static inline int get_fpregs_fsave( struct user_i387_struct __user *buf,
396 struct task_struct *tsk )
398 return __copy_to_user( buf, &tsk->thread.i387.fsave,
399 sizeof(struct user_i387_struct) );
402 static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf,
403 struct task_struct *tsk )
405 return convert_fxsr_to_user( (struct _fpstate __user *)buf,
406 &tsk->thread.i387.fxsave );
409 int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk )
412 if ( cpu_has_fxsr ) {
413 return get_fpregs_fxsave( buf, tsk );
415 return get_fpregs_fsave( buf, tsk );
418 return save_i387_soft( &tsk->thread.i387.soft,
419 (struct _fpstate __user *)buf );
423 static inline int set_fpregs_fsave( struct task_struct *tsk,
424 struct user_i387_struct __user *buf )
426 return __copy_from_user( &tsk->thread.i387.fsave, buf,
427 sizeof(struct user_i387_struct) );
430 static inline int set_fpregs_fxsave( struct task_struct *tsk,
431 struct user_i387_struct __user *buf )
433 return convert_fxsr_from_user( &tsk->thread.i387.fxsave,
434 (struct _fpstate __user *)buf );
437 int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf )
440 if ( cpu_has_fxsr ) {
441 return set_fpregs_fxsave( tsk, buf );
443 return set_fpregs_fsave( tsk, buf );
446 return restore_i387_soft( &tsk->thread.i387.soft,
447 (struct _fpstate __user *)buf );
451 int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk )
453 if ( cpu_has_fxsr ) {
454 if (__copy_to_user( buf, &tsk->thread.i387.fxsave,
455 sizeof(struct user_fxsr_struct) ))
463 int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf )
467 if ( cpu_has_fxsr ) {
468 if (__copy_from_user( &tsk->thread.i387.fxsave, buf,
469 sizeof(struct user_fxsr_struct) ))
471 /* mxcsr reserved bits must be masked to zero for security reasons */
472 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
480 * FPU state for core dumps.
483 static inline void copy_fpu_fsave( struct task_struct *tsk,
484 struct user_i387_struct *fpu )
486 memcpy( fpu, &tsk->thread.i387.fsave,
487 sizeof(struct user_i387_struct) );
490 static inline void copy_fpu_fxsave( struct task_struct *tsk,
491 struct user_i387_struct *fpu )
494 unsigned short *from;
497 memcpy( fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long) );
499 to = (unsigned short *)&fpu->st_space[0];
500 from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0];
501 for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) {
502 memcpy( to, from, 5 * sizeof(unsigned short) );
506 int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
509 struct task_struct *tsk = current;
511 fpvalid = !!used_math();
514 if ( cpu_has_fxsr ) {
515 copy_fpu_fxsave( tsk, fpu );
517 copy_fpu_fsave( tsk, fpu );
524 int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
526 int fpvalid = !!tsk_used_math(tsk);
532 copy_fpu_fxsave(tsk, fpu);
534 copy_fpu_fsave(tsk, fpu);
539 int dump_task_extended_fpu(struct task_struct *tsk, struct user_fxsr_struct *fpu)
541 int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr;
546 memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(*fpu));