#include <linux/init_task.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
+#include <linux/mqueue.h>
+#include <linux/hardirq.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/prom.h>
-#include <asm/hardirq.h>
extern unsigned long _get_SP(void);
struct task_struct *last_task_used_math = NULL;
struct task_struct *last_task_used_altivec = NULL;
+struct task_struct *last_task_used_spe = NULL;
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
void
enable_kernel_altivec(void)
{
- WARN_ON(current_thread_info()->preempt_count == 0 && !irqs_disabled());
+ WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
EXPORT_SYMBOL(enable_kernel_altivec);
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+int
+dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+{
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+ /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+ memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35);
+ return 1;
+}
+
+void
+enable_kernel_spe(void)
+{
+ WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+ if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+ giveup_spe(current);
+ else
+ giveup_spe(NULL); /* just enable SPE for kernel - force */
+#else
+ giveup_spe(last_task_used_spe);
+#endif /* __SMP __ */
+}
+EXPORT_SYMBOL(enable_kernel_spe);
+#endif /* CONFIG_SPE */
+
void
enable_kernel_fp(void)
{
- WARN_ON(current_thread_info()->preempt_count == 0 && !irqs_disabled());
+ WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
giveup_altivec(prev);
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ /*
+ * If the previous thread used spe in the last quantum
+ * (thus changing spe regs) then save them.
+ *
+ * On SMP we always save/restore spe regs just to avoid the
+ * complexity of changing processors.
+ */
+ if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
+ giveup_spe(prev);
+#endif /* CONFIG_SPE */
#endif /* CONFIG_SMP */
/* Avoid the trap. On smp this this never happens since
*/
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
+#ifdef CONFIG_SPE
+ /* Avoid the trap. On smp this this never happens since
+ * we don't set last_task_used_spe
+ */
+ if (new->thread.regs && last_task_used_spe == new)
+ new->thread.regs->msr |= MSR_SPE;
+#endif /* CONFIG_SPE */
new_thread = &new->thread;
old_thread = ¤t->thread;
last = _switch(old_thread, new_thread);
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+#endif /* CONFIG_SPE */
preempt_enable();
}
regs->gpr[1] = sp;
regs->msr = MSR_USER;
if (last_task_used_math == current)
- last_task_used_math = 0;
+ last_task_used_math = NULL;
if (last_task_used_altivec == current)
- last_task_used_altivec = 0;
+ last_task_used_altivec = NULL;
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
#ifdef CONFIG_ALTIVEC
current->thread.vrsave = 0;
current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ memset(current->thread.evr, 0, sizeof(current->thread.evr));
+ current->thread.acc = 0;
+ current->thread.spefscr = 0;
+ current->thread.used_spe = 0;
+#endif /* CONFIG_SPE */
}
+#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
+ | PR_FP_EXC_RES | PR_FP_EXC_INV)
+
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
- if (val > PR_FP_EXC_PRECISE)
+ /* This is a bit hairy. If we are an SPE enabled processor
+ * (have embedded fp) we store the IEEE exception enable flags in
+ * fpexc_mode. fpexc_mode is also used for setting FP exception
+ * mode (asyn, precise, disabled) for 'Classic' FP. */
+ if (val & PR_FP_EXC_SW_ENABLE) {
+#ifdef CONFIG_SPE
+ tsk->thread.fpexc_mode = val &
+ (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+#else
return -EINVAL;
- tsk->thread.fpexc_mode = __pack_fe01(val);
- if (regs != NULL && (regs->msr & MSR_FP) != 0)
- regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
- | tsk->thread.fpexc_mode;
+#endif
+ } else {
+ /* on a CONFIG_SPE this does not hurt us. The bits that
+ * __pack_fe01 use do not overlap with bits used for
+ * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
+ * on CONFIG_SPE implementations are reserved so writing to
+ * them does not change anything */
+ if (val > PR_FP_EXC_PRECISE)
+ return -EINVAL;
+ tsk->thread.fpexc_mode = __pack_fe01(val);
+ if (regs != NULL && (regs->msr & MSR_FP) != 0)
+ regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
+ | tsk->thread.fpexc_mode;
+ }
return 0;
}
{
unsigned int val;
- val = __unpack_fe01(tsk->thread.fpexc_mode);
- return put_user(val, (unsigned int *) adr);
+ if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+#ifdef CONFIG_SPE
+ val = tsk->thread.fpexc_mode;
+#else
+ return -EINVAL;
+#endif
+ else
+ val = __unpack_fe01(tsk->thread.fpexc_mode);
+ return put_user(val, (unsigned int __user *) adr);
}
int sys_clone(unsigned long clone_flags, unsigned long usp,
CHECK_FULL_REGS(regs);
if (usp == 0)
usp = regs->gpr[1]; /* stack pointer for child */
- return do_fork(clone_flags & ~CLONE_IDLETASK, usp, regs, 0,
- parent_tidp, child_tidp);
+ return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
}
int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+#endif /* CONFIG_SPE */
preempt_enable();
error = do_execve(filename, (char __user *__user *) a1,
(char __user *__user *) a2, regs);
++count;
sp = *(unsigned long *)sp;
}
-#if !CONFIG_KALLSYMS
+#ifndef CONFIG_KALLSYMS
if (count > 0)
printk("\n");
#endif
}
#endif
-/*
- * These bracket the sleeping functions..
- */
-#define first_sched ((unsigned long) scheduling_functions_start_here)
-#define last_sched ((unsigned long) scheduling_functions_end_here)
-
unsigned long get_wchan(struct task_struct *p)
{
unsigned long ip, sp;
return 0;
if (count > 0) {
ip = *(unsigned long *)(sp + 4);
- if (ip < first_sched || ip >= last_sched)
+ if (!in_sched_functions(ip))
return ip;
}
} while (count++ < 16);