vserver 1.9.3
[linux-2.6.git] / arch / ppc / kernel / process.c
index 7e47f38..d9ab6a7 100644 (file)
@@ -35,6 +35,8 @@
 #include <linux/init_task.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/mqueue.h>
+#include <linux/hardirq.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/prom.h>
-#include <asm/hardirq.h>
 
 extern unsigned long _get_SP(void);
 
 struct task_struct *last_task_used_math = NULL;
 struct task_struct *last_task_used_altivec = NULL;
+struct task_struct *last_task_used_spe = NULL;
 
 static struct fs_struct init_fs = INIT_FS;
 static struct files_struct init_files = INIT_FILES;
@@ -163,7 +165,7 @@ dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
 void
 enable_kernel_altivec(void)
 {
-       WARN_ON(current_thread_info()->preempt_count == 0 && !irqs_disabled());
+       WARN_ON(preemptible());
 
 #ifdef CONFIG_SMP
        if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
@@ -177,10 +179,38 @@ enable_kernel_altivec(void)
 EXPORT_SYMBOL(enable_kernel_altivec);
 #endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
+int
+dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+{
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+       /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+       memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+       return 1;
+}
+
+void
+enable_kernel_spe(void)
+{
+       WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+               giveup_spe(current);
+       else
+               giveup_spe(NULL);       /* just enable SPE for kernel - force */
+#else
+       giveup_spe(last_task_used_spe);
+#endif /* __SMP __ */
+}
+EXPORT_SYMBOL(enable_kernel_spe);
+#endif /* CONFIG_SPE */
+
 void
 enable_kernel_fp(void)
 {
-       WARN_ON(current_thread_info()->preempt_count == 0 && !irqs_disabled());
+       WARN_ON(preemptible());
 
 #ifdef CONFIG_SMP
        if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
@@ -244,6 +274,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
        if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
                giveup_altivec(prev);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       /*
+        * If the previous thread used spe in the last quantum
+        * (thus changing spe regs) then save them.
+        *
+        * On SMP we always save/restore spe regs just to avoid the
+        * complexity of changing processors.
+        */
+       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
+               giveup_spe(prev);
+#endif /* CONFIG_SPE */
 #endif /* CONFIG_SMP */
 
        /* Avoid the trap.  On smp this this never happens since
@@ -251,6 +292,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
         */
        if (new->thread.regs && last_task_used_altivec == new)
                new->thread.regs->msr |= MSR_VEC;
+#ifdef CONFIG_SPE
+       /* Avoid the trap.  On smp this this never happens since
+        * we don't set last_task_used_spe
+        */
+       if (new->thread.regs && last_task_used_spe == new)
+               new->thread.regs->msr |= MSR_SPE;
+#endif /* CONFIG_SPE */
        new_thread = &new->thread;
        old_thread = &current->thread;
        last = _switch(old_thread, new_thread);
@@ -354,6 +402,10 @@ void prepare_to_copy(struct task_struct *tsk)
        if (regs->msr & MSR_VEC)
                giveup_altivec(current);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+#endif /* CONFIG_SPE */
        preempt_enable();
 }
 
@@ -427,9 +479,9 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
        regs->gpr[1] = sp;
        regs->msr = MSR_USER;
        if (last_task_used_math == current)
-               last_task_used_math = 0;
+               last_task_used_math = NULL;
        if (last_task_used_altivec == current)
-               last_task_used_altivec = 0;
+               last_task_used_altivec = NULL;
        memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
        current->thread.fpscr = 0;
 #ifdef CONFIG_ALTIVEC
@@ -438,18 +490,45 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       memset(current->thread.evr, 0, sizeof(current->thread.evr));
+       current->thread.acc = 0;
+       current->thread.spefscr = 0;
+       current->thread.used_spe = 0;
+#endif /* CONFIG_SPE */
 }
 
+#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
+               | PR_FP_EXC_RES | PR_FP_EXC_INV)
+
 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
 {
        struct pt_regs *regs = tsk->thread.regs;
 
-       if (val > PR_FP_EXC_PRECISE)
+       /* This is a bit hairy.  If we are an SPE enabled  processor
+        * (have embedded fp) we store the IEEE exception enable flags in
+        * fpexc_mode.  fpexc_mode is also used for setting FP exception
+        * mode (asyn, precise, disabled) for 'Classic' FP. */
+       if (val & PR_FP_EXC_SW_ENABLE) {
+#ifdef CONFIG_SPE
+               tsk->thread.fpexc_mode = val &
+                       (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+#else
                return -EINVAL;
-       tsk->thread.fpexc_mode = __pack_fe01(val);
-       if (regs != NULL && (regs->msr & MSR_FP) != 0)
-               regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
-                       | tsk->thread.fpexc_mode;
+#endif
+       } else {
+               /* on a CONFIG_SPE this does not hurt us.  The bits that
+                * __pack_fe01 use do not overlap with bits used for
+                * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
+                * on CONFIG_SPE implementations are reserved so writing to
+                * them does not change anything */
+               if (val > PR_FP_EXC_PRECISE)
+                       return -EINVAL;
+               tsk->thread.fpexc_mode = __pack_fe01(val);
+               if (regs != NULL && (regs->msr & MSR_FP) != 0)
+                       regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
+                               | tsk->thread.fpexc_mode;
+       }
        return 0;
 }
 
@@ -457,8 +536,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
 {
        unsigned int val;
 
-       val = __unpack_fe01(tsk->thread.fpexc_mode);
-       return put_user(val, (unsigned int *) adr);
+       if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+#ifdef CONFIG_SPE
+               val = tsk->thread.fpexc_mode;
+#else
+               return -EINVAL;
+#endif
+       else
+               val = __unpack_fe01(tsk->thread.fpexc_mode);
+       return put_user(val, (unsigned int __user *) adr);
 }
 
 int sys_clone(unsigned long clone_flags, unsigned long usp,
@@ -469,8 +555,7 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
        CHECK_FULL_REGS(regs);
        if (usp == 0)
                usp = regs->gpr[1];     /* stack pointer for child */
-       return do_fork(clone_flags & ~CLONE_IDLETASK, usp, regs, 0,
-                       parent_tidp, child_tidp);
+       return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
 }
 
 int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
@@ -506,6 +591,10 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
        if (regs->msr & MSR_VEC)
                giveup_altivec(current);
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+#endif /* CONFIG_SPE */
        preempt_enable();
        error = do_execve(filename, (char __user *__user *) a1,
                          (char __user *__user *) a2, regs);
@@ -572,7 +661,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
                ++count;
                sp = *(unsigned long *)sp;
        }
-#if !CONFIG_KALLSYMS
+#ifndef CONFIG_KALLSYMS
        if (count > 0)
                printk("\n");
 #endif
@@ -668,12 +757,6 @@ void __init ll_puts(const char *s)
 }
 #endif
 
-/*
- * These bracket the sleeping functions..
- */
-#define first_sched    ((unsigned long) scheduling_functions_start_here)
-#define last_sched     ((unsigned long) scheduling_functions_end_here)
-
 unsigned long get_wchan(struct task_struct *p)
 {
        unsigned long ip, sp;
@@ -688,7 +771,7 @@ unsigned long get_wchan(struct task_struct *p)
                        return 0;
                if (count > 0) {
                        ip = *(unsigned long *)(sp + 4);
-                       if (ip < first_sched || ip >= last_sched)
+                       if (!in_sched_functions(ip))
                                return ip;
                }
        } while (count++ < 16);