2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 #include <linux/config.h>
10 #include <linux/errno.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/ptrace.h>
18 #include <linux/slab.h>
19 #include <linux/mman.h>
20 #include <linux/personality.h>
21 #include <linux/sys.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
27 #include <asm/bootinfo.h>
30 #include <asm/pgtable.h>
31 #include <asm/system.h>
32 #include <asm/mipsregs.h>
33 #include <asm/processor.h>
34 #include <asm/uaccess.h>
37 #include <asm/isadep.h>
41 * We use this if we don't have any better idle routine..
42 * (This to kill: kernel/platform.c.
44 void default_idle (void)
49 * The idle thread. There's no useful work to be done, so just try to conserve
50 * power and have a low exit latency (ie sit in a loop waiting for somebody to
51 * say that they'd like to reschedule)
53 ATTRIB_NORET void cpu_idle(void)
55 /* endless idle loop with no priority at all */
57 while (!need_resched())
64 asmlinkage void ret_from_fork(void);
66 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
70 /* New thread loses kernel privileges. */
71 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK);
74 status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR;
77 regs->cp0_status = status;
78 current->used_math = 0;
82 current_thread_info()->addr_limit = USER_DS;
85 void exit_thread(void)
89 void flush_thread(void)
93 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
94 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
96 struct thread_info *ti = p->thread_info;
97 struct pt_regs *childregs;
100 childksp = (unsigned long)ti + THREAD_SIZE - 32;
102 if (is_fpu_owner()) {
106 /* set up new TSS. */
107 childregs = (struct pt_regs *) childksp - 1;
109 childregs->regs[7] = 0; /* Clear error flag */
111 #ifdef CONFIG_BINFMT_IRIX
112 if (current->personality != PER_LINUX) {
113 /* Under IRIX things are a little different. */
114 childregs->regs[2] = 0;
115 childregs->regs[3] = 1;
116 regs->regs[2] = p->pid;
121 childregs->regs[2] = 0; /* Child gets zero as return value */
122 regs->regs[2] = p->pid;
125 if (childregs->cp0_status & ST0_CU0) {
126 childregs->regs[28] = (unsigned long) ti;
127 childregs->regs[29] = childksp;
128 ti->addr_limit = KERNEL_DS;
130 childregs->regs[29] = usp;
131 ti->addr_limit = USER_DS;
133 p->thread.reg29 = (unsigned long) childregs;
134 p->thread.reg31 = (unsigned long) ret_from_fork;
137 * New tasks lose permission to use the fpu. This accelerates context
138 * switching for most programs since they don't use the fpu.
140 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
141 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
142 clear_tsk_thread_flag(p, TIF_USEDFPU);
143 p->set_child_tid = p->clear_child_tid = NULL;
148 /* Fill in the fpu structure for a core dump.. */
149 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
151 memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu));
156 * Create a kernel thread
158 long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
162 __asm__ __volatile__(
167 " beq $6, $sp, 1f \n"
168 #ifdef CONFIG_MIPS32 /* On o32 the caller has to create the stackframe */
176 #ifdef CONFIG_MIPS32 /* On o32 the caller has to deallocate the stackframe */
181 : "i" (__NR_clone), "i" (__NR_exit), "r" (arg), "r" (fn),
182 "r" (flags | CLONE_VM | CLONE_UNTRACED)
184 * The called subroutine might have destroyed any of the
185 * at, result, argument or temporary registers ...
187 : "$2", "$3", "$4", "$5", "$6", "$7", "$8",
188 "$9","$10","$11","$12","$13","$14","$15","$24","$25","$31");
193 struct mips_frame_info {
197 static struct mips_frame_info schedule_frame;
198 static struct mips_frame_info schedule_timeout_frame;
199 static struct mips_frame_info sleep_on_frame;
200 static struct mips_frame_info sleep_on_timeout_frame;
201 static struct mips_frame_info wait_for_completion_frame;
202 static int mips_frame_info_initialized;
203 static int __init get_frame_info(struct mips_frame_info *info, void *func)
206 union mips_instruction *ip = (union mips_instruction *)func;
207 info->pc_offset = -1;
208 info->frame_offset = -1;
209 for (i = 0; i < 128; i++, ip++) {
210 /* if jal, jalr, jr, stop. */
211 if (ip->j_format.opcode == jal_op ||
212 (ip->r_format.opcode == spec_op &&
213 (ip->r_format.func == jalr_op ||
214 ip->r_format.func == jr_op)))
219 ip->i_format.opcode == sw_op &&
222 ip->i_format.opcode == sd_op &&
224 ip->i_format.rs == 29)
226 /* sw / sd $ra, offset($sp) */
227 if (ip->i_format.rt == 31) {
228 if (info->pc_offset != -1)
231 ip->i_format.simmediate / sizeof(long);
233 /* sw / sd $s8, offset($sp) */
234 if (ip->i_format.rt == 30) {
235 if (info->frame_offset != -1)
238 ip->i_format.simmediate / sizeof(long);
242 if (info->pc_offset == -1 || info->frame_offset == -1) {
243 printk("Can't analyze prologue code at %p\n", func);
244 info->pc_offset = -1;
245 info->frame_offset = -1;
252 static int __init frame_info_init(void)
254 mips_frame_info_initialized =
255 !get_frame_info(&schedule_frame, schedule) &&
256 !get_frame_info(&schedule_timeout_frame, schedule_timeout) &&
257 !get_frame_info(&sleep_on_frame, sleep_on) &&
258 !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) &&
259 !get_frame_info(&wait_for_completion_frame, wait_for_completion);
264 arch_initcall(frame_info_init);
267 * Return saved PC of a blocked thread.
269 unsigned long thread_saved_pc(struct task_struct *tsk)
271 extern void ret_from_fork(void);
272 struct thread_struct *t = &tsk->thread;
274 /* New born processes are a special case */
275 if (t->reg31 == (unsigned long) ret_from_fork)
278 if (schedule_frame.pc_offset < 0)
280 return ((unsigned long *)t->reg29)[schedule_frame.pc_offset];
284 * These bracket the sleeping functions..
286 #define first_sched ((unsigned long) scheduling_functions_start_here)
287 #define last_sched ((unsigned long) scheduling_functions_end_here)
289 /* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */
290 unsigned long get_wchan(struct task_struct *p)
292 unsigned long frame, pc;
294 if (!p || p == current || p->state == TASK_RUNNING)
297 if (!mips_frame_info_initialized)
299 pc = thread_saved_pc(p);
300 if (pc < first_sched || pc >= last_sched)
303 if (pc >= (unsigned long) sleep_on_timeout)
304 goto schedule_timeout_caller;
305 if (pc >= (unsigned long) sleep_on)
306 goto schedule_caller;
307 if (pc >= (unsigned long) interruptible_sleep_on_timeout)
308 goto schedule_timeout_caller;
309 if (pc >= (unsigned long)interruptible_sleep_on)
310 goto schedule_caller;
311 if (pc >= (unsigned long)wait_for_completion)
312 goto schedule_caller;
313 goto schedule_timeout_caller;
316 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
317 if (pc >= (unsigned long) sleep_on)
318 pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset];
320 pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset];
323 schedule_timeout_caller:
325 * The schedule_timeout frame
327 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
330 * frame now points to sleep_on_timeout's frame
332 pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset];
334 if (pc >= first_sched && pc < last_sched) {
335 /* schedule_timeout called by [interruptible_]sleep_on_timeout */
336 frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset];
337 pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset];
343 if (current->thread.mflags & MF_32BIT_REGS) /* Kludge for 32-bit ps */
350 EXPORT_SYMBOL(get_wchan);