VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / ppc / kernel / process.c
index b82a200..2ea5053 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/init_task.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/mqueue.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
@@ -49,6 +50,7 @@ extern unsigned long _get_SP(void);
 
 struct task_struct *last_task_used_math = NULL;
 struct task_struct *last_task_used_altivec = NULL;
+struct task_struct *last_task_used_spe = NULL;
 
 static struct fs_struct init_fs = INIT_FS;
 static struct files_struct init_files = INIT_FILES;
@@ -177,6 +179,34 @@ enable_kernel_altivec(void)
 EXPORT_SYMBOL(enable_kernel_altivec);
 #endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
+int
+dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+{
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+       /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+       memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+       return 1;
+}
+
+void
+enable_kernel_spe(void)
+{
+       WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+               giveup_spe(current);
+       else
+               giveup_spe(NULL);       /* just enable SPE for kernel - force */
+#else
+       giveup_spe(last_task_used_spe);
+#endif /* __SMP __ */
+}
+EXPORT_SYMBOL(enable_kernel_spe);
+#endif /* CONFIG_SPE */
+
 void
 enable_kernel_fp(void)
 {
@@ -244,6 +274,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
        if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
                giveup_altivec(prev);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       /*
+        * If the previous thread used spe in the last quantum
+        * (thus changing spe regs) then save them.
+        *
+        * On SMP we always save/restore spe regs just to avoid the
+        * complexity of changing processors.
+        */
+       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
+               giveup_spe(prev);
+#endif /* CONFIG_SPE */
 #endif /* CONFIG_SMP */
 
        /* Avoid the trap.  On smp this this never happens since
@@ -251,6 +292,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
         */
        if (new->thread.regs && last_task_used_altivec == new)
                new->thread.regs->msr |= MSR_VEC;
+#ifdef CONFIG_SPE
+       /* Avoid the trap.  On smp this this never happens since
+        * we don't set last_task_used_spe
+        */
+       if (new->thread.regs && last_task_used_spe == new)
+               new->thread.regs->msr |= MSR_SPE;
+#endif /* CONFIG_SPE */
        new_thread = &new->thread;
        old_thread = &current->thread;
        last = _switch(old_thread, new_thread);
@@ -354,6 +402,10 @@ void prepare_to_copy(struct task_struct *tsk)
        if (regs->msr & MSR_VEC)
                giveup_altivec(current);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+#endif /* CONFIG_SPE */
        preempt_enable();
 }
 
@@ -427,9 +479,9 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
        regs->gpr[1] = sp;
        regs->msr = MSR_USER;
        if (last_task_used_math == current)
-               last_task_used_math = 0;
+               last_task_used_math = NULL;
        if (last_task_used_altivec == current)
-               last_task_used_altivec = 0;
+               last_task_used_altivec = NULL;
        memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
        current->thread.fpscr = 0;
 #ifdef CONFIG_ALTIVEC
@@ -438,18 +490,45 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       memset(current->thread.evr, 0, sizeof(current->thread.evr));
+       current->thread.acc = 0;
+       current->thread.spefscr = 0;
+       current->thread.used_spe = 0;
+#endif /* CONFIG_SPE */
 }
 
+#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
+               | PR_FP_EXC_RES | PR_FP_EXC_INV)
+
 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
 {
        struct pt_regs *regs = tsk->thread.regs;
 
-       if (val > PR_FP_EXC_PRECISE)
+       /* This is a bit hairy.  If we are an SPE enabled  processor
+        * (have embedded fp) we store the IEEE exception enable flags in
+        * fpexc_mode.  fpexc_mode is also used for setting FP exception
+        * mode (asyn, precise, disabled) for 'Classic' FP. */
+       if (val & PR_FP_EXC_SW_ENABLE) {
+#ifdef CONFIG_SPE
+               tsk->thread.fpexc_mode = val &
+                       (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+#else
                return -EINVAL;
-       tsk->thread.fpexc_mode = __pack_fe01(val);
-       if (regs != NULL && (regs->msr & MSR_FP) != 0)
-               regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
-                       | tsk->thread.fpexc_mode;
+#endif
+       } else {
+               /* on a CONFIG_SPE this does not hurt us.  The bits that
+                * __pack_fe01 use do not overlap with bits used for
+                * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
+                * on CONFIG_SPE implementations are reserved so writing to
+                * them does not change anything */
+               if (val > PR_FP_EXC_PRECISE)
+                       return -EINVAL;
+               tsk->thread.fpexc_mode = __pack_fe01(val);
+               if (regs != NULL && (regs->msr & MSR_FP) != 0)
+                       regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
+                               | tsk->thread.fpexc_mode;
+       }
        return 0;
 }
 
@@ -457,8 +536,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
 {
        unsigned int val;
 
-       val = __unpack_fe01(tsk->thread.fpexc_mode);
-       return put_user(val, (unsigned int *) adr);
+       if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+#ifdef CONFIG_SPE
+               val = tsk->thread.fpexc_mode;
+#else
+               return -EINVAL;
+#endif
+       else
+               val = __unpack_fe01(tsk->thread.fpexc_mode);
+       return put_user(val, (unsigned int __user *) adr);
 }
 
 int sys_clone(unsigned long clone_flags, unsigned long usp,
@@ -506,6 +592,10 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
        if (regs->msr & MSR_VEC)
                giveup_altivec(current);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+#endif /* CONFIG_SPE */
        preempt_enable();
        error = do_execve(filename, (char __user *__user *) a1,
                          (char __user *__user *) a2, regs);