vserver 1.9.5.x5
[linux-2.6.git] / arch / ppc / kernel / process.c
index b82a200..82de66e 100644 (file)
@@ -35,6 +35,8 @@
 #include <linux/init_task.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/mqueue.h>
+#include <linux/hardirq.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/prom.h>
-#include <asm/hardirq.h>
 
 extern unsigned long _get_SP(void);
 
 struct task_struct *last_task_used_math = NULL;
 struct task_struct *last_task_used_altivec = NULL;
+struct task_struct *last_task_used_spe = NULL;
 
 static struct fs_struct init_fs = INIT_FS;
 static struct files_struct init_files = INIT_FILES;
@@ -177,6 +179,34 @@ enable_kernel_altivec(void)
 EXPORT_SYMBOL(enable_kernel_altivec);
 #endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
+int
+dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+{
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+       /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+       memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+       return 1;
+}
+
+void
+enable_kernel_spe(void)
+{
+       WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+               giveup_spe(current);
+       else
+               giveup_spe(NULL);       /* just enable SPE for kernel - force */
+#else
+       giveup_spe(last_task_used_spe);
+#endif /* __SMP __ */
+}
+EXPORT_SYMBOL(enable_kernel_spe);
+#endif /* CONFIG_SPE */
+
 void
 enable_kernel_fp(void)
 {
@@ -244,6 +274,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
        if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
                giveup_altivec(prev);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       /*
+        * If the previous thread used spe in the last quantum
+        * (thus changing spe regs) then save them.
+        *
+        * On SMP we always save/restore spe regs just to avoid the
+        * complexity of changing processors.
+        */
+       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
+               giveup_spe(prev);
+#endif /* CONFIG_SPE */
 #endif /* CONFIG_SMP */
 
        /* Avoid the trap.  On smp this this never happens since
@@ -251,6 +292,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
         */
        if (new->thread.regs && last_task_used_altivec == new)
                new->thread.regs->msr |= MSR_VEC;
+#ifdef CONFIG_SPE
+       /* Avoid the trap.  On smp this this never happens since
+        * we don't set last_task_used_spe
+        */
+       if (new->thread.regs && last_task_used_spe == new)
+               new->thread.regs->msr |= MSR_SPE;
+#endif /* CONFIG_SPE */
        new_thread = &new->thread;
        old_thread = &current->thread;
        last = _switch(old_thread, new_thread);
@@ -273,21 +321,10 @@ void show_regs(struct pt_regs * regs)
        trap = TRAP(regs);
        if (trap == 0x300 || trap == 0x600)
                printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
-       printk("TASK = %p[%d] '%s' THREAD: %p",
+       printk("TASK = %p[%d] '%s' THREAD: %p\n",
               current, current->pid, current->comm, current->thread_info);
        printk("Last syscall: %ld ", current->thread.last_syscall);
 
-#if defined(CONFIG_4xx) && defined(DCRN_PLB0_BEAR)
-       printk("\nPLB0: bear= 0x%8.8x acr=   0x%8.8x besr=  0x%8.8x\n",
-           mfdcr(DCRN_PLB0_BEAR), mfdcr(DCRN_PLB0_ACR),
-           mfdcr(DCRN_PLB0_BESR));
-#endif
-#if defined(CONFIG_4xx) && defined(DCRN_POB0_BEAR)
-       printk("PLB0 to OPB: bear= 0x%8.8x besr0= 0x%8.8x besr1= 0x%8.8x\n",
-           mfdcr(DCRN_POB0_BEAR), mfdcr(DCRN_POB0_BESR0),
-           mfdcr(DCRN_POB0_BESR1));
-#endif
-
 #ifdef CONFIG_SMP
        printk(" CPU: %d", smp_processor_id());
 #endif /* CONFIG_SMP */
@@ -322,6 +359,10 @@ void exit_thread(void)
                last_task_used_math = NULL;
        if (last_task_used_altivec == current)
                last_task_used_altivec = NULL;
+#ifdef CONFIG_SPE
+       if (last_task_used_spe == current)
+               last_task_used_spe = NULL;
+#endif
 }
 
 void flush_thread(void)
@@ -330,6 +371,10 @@ void flush_thread(void)
                last_task_used_math = NULL;
        if (last_task_used_altivec == current)
                last_task_used_altivec = NULL;
+#ifdef CONFIG_SPE
+       if (last_task_used_spe == current)
+               last_task_used_spe = NULL;
+#endif
 }
 
 void
@@ -354,6 +399,10 @@ void prepare_to_copy(struct task_struct *tsk)
        if (regs->msr & MSR_VEC)
                giveup_altivec(current);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+#endif /* CONFIG_SPE */
        preempt_enable();
 }
 
@@ -370,8 +419,6 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
        unsigned long childframe;
 
-       p->set_child_tid = p->clear_child_tid = NULL;
-
        CHECK_FULL_REGS(regs);
        /* Copy registers */
        sp -= sizeof(struct pt_regs);
@@ -427,9 +474,13 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
        regs->gpr[1] = sp;
        regs->msr = MSR_USER;
        if (last_task_used_math == current)
-               last_task_used_math = 0;
+               last_task_used_math = NULL;
        if (last_task_used_altivec == current)
-               last_task_used_altivec = 0;
+               last_task_used_altivec = NULL;
+#ifdef CONFIG_SPE
+       if (last_task_used_spe == current)
+               last_task_used_spe = NULL;
+#endif
        memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
        current->thread.fpscr = 0;
 #ifdef CONFIG_ALTIVEC
@@ -438,18 +489,45 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       memset(current->thread.evr, 0, sizeof(current->thread.evr));
+       current->thread.acc = 0;
+       current->thread.spefscr = 0;
+       current->thread.used_spe = 0;
+#endif /* CONFIG_SPE */
 }
 
+#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
+               | PR_FP_EXC_RES | PR_FP_EXC_INV)
+
 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
 {
        struct pt_regs *regs = tsk->thread.regs;
 
-       if (val > PR_FP_EXC_PRECISE)
+       /* This is a bit hairy.  If we are an SPE enabled  processor
+        * (have embedded fp) we store the IEEE exception enable flags in
+        * fpexc_mode.  fpexc_mode is also used for setting FP exception
+        * mode (asyn, precise, disabled) for 'Classic' FP. */
+       if (val & PR_FP_EXC_SW_ENABLE) {
+#ifdef CONFIG_SPE
+               tsk->thread.fpexc_mode = val &
+                       (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+#else
                return -EINVAL;
-       tsk->thread.fpexc_mode = __pack_fe01(val);
-       if (regs != NULL && (regs->msr & MSR_FP) != 0)
-               regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
-                       | tsk->thread.fpexc_mode;
+#endif
+       } else {
+               /* on a CONFIG_SPE this does not hurt us.  The bits that
+                * __pack_fe01 use do not overlap with bits used for
+                * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
+                * on CONFIG_SPE implementations are reserved so writing to
+                * them does not change anything */
+               if (val > PR_FP_EXC_PRECISE)
+                       return -EINVAL;
+               tsk->thread.fpexc_mode = __pack_fe01(val);
+               if (regs != NULL && (regs->msr & MSR_FP) != 0)
+                       regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
+                               | tsk->thread.fpexc_mode;
+       }
        return 0;
 }
 
@@ -457,8 +535,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
 {
        unsigned int val;
 
-       val = __unpack_fe01(tsk->thread.fpexc_mode);
-       return put_user(val, (unsigned int *) adr);
+       if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+#ifdef CONFIG_SPE
+               val = tsk->thread.fpexc_mode;
+#else
+               return -EINVAL;
+#endif
+       else
+               val = __unpack_fe01(tsk->thread.fpexc_mode);
+       return put_user(val, (unsigned int __user *) adr);
 }
 
 int sys_clone(unsigned long clone_flags, unsigned long usp,
@@ -469,8 +554,7 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
        CHECK_FULL_REGS(regs);
        if (usp == 0)
                usp = regs->gpr[1];     /* stack pointer for child */
-       return do_fork(clone_flags & ~CLONE_IDLETASK, usp, regs, 0,
-                       parent_tidp, child_tidp);
+       return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
 }
 
 int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
@@ -506,11 +590,18 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
        if (regs->msr & MSR_VEC)
                giveup_altivec(current);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+#endif /* CONFIG_SPE */
        preempt_enable();
        error = do_execve(filename, (char __user *__user *) a1,
                          (char __user *__user *) a2, regs);
-       if (error == 0)
+       if (error == 0) {
+               task_lock(current);
                current->ptrace &= ~PT_DTRACE;
+               task_unlock(current);
+       }
        putname(filename);
 out:
        return error;
@@ -572,7 +663,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
                ++count;
                sp = *(unsigned long *)sp;
        }
-#if !CONFIG_KALLSYMS
+#ifndef CONFIG_KALLSYMS
        if (count > 0)
                printk("\n");
 #endif