patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / ppc / kernel / process.c
1 /*
2  *  arch/ppc/kernel/process.c
3  *
4  *  Derived from "arch/i386/kernel/process.c"
5  *    Copyright (C) 1995  Linus Torvalds
6  *
7  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
8  *  Paul Mackerras (paulus@cs.anu.edu.au)
9  *
10  *  PowerPC version
11  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19
20 #include <linux/config.h>
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/smp.h>
26 #include <linux/smp_lock.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/ptrace.h>
30 #include <linux/slab.h>
31 #include <linux/user.h>
32 #include <linux/elf.h>
33 #include <linux/init.h>
34 #include <linux/prctl.h>
35 #include <linux/init_task.h>
36 #include <linux/module.h>
37 #include <linux/kallsyms.h>
38
39 #include <asm/pgtable.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
42 #include <asm/io.h>
43 #include <asm/processor.h>
44 #include <asm/mmu.h>
45 #include <asm/prom.h>
46 #include <asm/hardirq.h>
47
48 extern unsigned long _get_SP(void);
49
50 struct task_struct *last_task_used_math = NULL;
51 struct task_struct *last_task_used_altivec = NULL;
52
53 static struct fs_struct init_fs = INIT_FS;
54 static struct files_struct init_files = INIT_FILES;
55 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
56 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
57 struct mm_struct init_mm = INIT_MM(init_mm);
58 EXPORT_SYMBOL(init_mm);
59
60 /* this is 8kB-aligned so we can get to the thread_info struct
61    at the base of it from the stack pointer with 1 integer instruction. */
62 union thread_union init_thread_union
63         __attribute__((__section__(".data.init_task"))) =
64 { INIT_THREAD_INFO(init_task) };
65
66 /* initial task structure */
67 struct task_struct init_task = INIT_TASK(init_task);
68 EXPORT_SYMBOL(init_task);
69
70 /* only used to get secondary processor up */
71 struct task_struct *current_set[NR_CPUS] = {&init_task, };
72
73 #undef SHOW_TASK_SWITCHES
74 #undef CHECK_STACK
75
76 #if defined(CHECK_STACK)
77 unsigned long
78 kernel_stack_top(struct task_struct *tsk)
79 {
80         return ((unsigned long)tsk) + sizeof(union task_union);
81 }
82
83 unsigned long
84 task_top(struct task_struct *tsk)
85 {
86         return ((unsigned long)tsk) + sizeof(struct thread_info);
87 }
88
89 /* check to make sure the kernel stack is healthy */
90 int check_stack(struct task_struct *tsk)
91 {
92         unsigned long stack_top = kernel_stack_top(tsk);
93         unsigned long tsk_top = task_top(tsk);
94         int ret = 0;
95
96 #if 0
97         /* check thread magic */
98         if ( tsk->thread.magic != THREAD_MAGIC )
99         {
100                 ret |= 1;
101                 printk("thread.magic bad: %08x\n", tsk->thread.magic);
102         }
103 #endif
104
105         if ( !tsk )
106                 printk("check_stack(): tsk bad tsk %p\n",tsk);
107
108         /* check if stored ksp is bad */
109         if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) )
110         {
111                 printk("stack out of bounds: %s/%d\n"
112                        " tsk_top %08lx ksp %08lx stack_top %08lx\n",
113                        tsk->comm,tsk->pid,
114                        tsk_top, tsk->thread.ksp, stack_top);
115                 ret |= 2;
116         }
117
118         /* check if stack ptr RIGHT NOW is bad */
119         if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) )
120         {
121                 printk("current stack ptr out of bounds: %s/%d\n"
122                        " tsk_top %08lx sp %08lx stack_top %08lx\n",
123                        current->comm,current->pid,
124                        tsk_top, _get_SP(), stack_top);
125                 ret |= 4;
126         }
127
128 #if 0
129         /* check amount of free stack */
130         for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ )
131         {
132                 if ( !i )
133                         printk("check_stack(): i = %p\n", i);
134                 if ( *i != 0 )
135                 {
136                         /* only notify if it's less than 900 bytes */
137                         if ( (i - (unsigned long *)task_top(tsk))  < 900 )
138                                 printk("%d bytes free on stack\n",
139                                        i - task_top(tsk));
140                         break;
141                 }
142         }
143 #endif
144
145         if (ret)
146         {
147                 panic("bad kernel stack");
148         }
149         return(ret);
150 }
151 #endif /* defined(CHECK_STACK) */
152
153 #ifdef CONFIG_ALTIVEC
154 int
155 dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
156 {
157         if (regs->msr & MSR_VEC)
158                 giveup_altivec(current);
159         memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
160         return 1;
161 }
162
163 void
164 enable_kernel_altivec(void)
165 {
166         WARN_ON(preemptible());
167
168 #ifdef CONFIG_SMP
169         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
170                 giveup_altivec(current);
171         else
172                 giveup_altivec(NULL);   /* just enable AltiVec for kernel - force */
173 #else
174         giveup_altivec(last_task_used_altivec);
175 #endif /* __SMP __ */
176 }
177 EXPORT_SYMBOL(enable_kernel_altivec);
178 #endif /* CONFIG_ALTIVEC */
179
180 void
181 enable_kernel_fp(void)
182 {
183         WARN_ON(preemptible());
184
185 #ifdef CONFIG_SMP
186         if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
187                 giveup_fpu(current);
188         else
189                 giveup_fpu(NULL);       /* just enables FP for kernel */
190 #else
191         giveup_fpu(last_task_used_math);
192 #endif /* CONFIG_SMP */
193 }
194 EXPORT_SYMBOL(enable_kernel_fp);
195
196 int
197 dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
198 {
199         preempt_disable();
200         if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
201                 giveup_fpu(tsk);
202         preempt_enable();
203         memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
204         return 1;
205 }
206
207 struct task_struct *__switch_to(struct task_struct *prev,
208         struct task_struct *new)
209 {
210         struct thread_struct *new_thread, *old_thread;
211         unsigned long s;
212         struct task_struct *last;
213
214         local_irq_save(s);
215 #ifdef CHECK_STACK
216         check_stack(prev);
217         check_stack(new);
218 #endif
219
220 #ifdef CONFIG_SMP
221         /* avoid complexity of lazy save/restore of fpu
222          * by just saving it every time we switch out if
223          * this task used the fpu during the last quantum.
224          *
225          * If it tries to use the fpu again, it'll trap and
226          * reload its fp regs.  So we don't have to do a restore
227          * every switch, just a save.
228          *  -- Cort
229          */
230         if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
231                 giveup_fpu(prev);
232 #ifdef CONFIG_ALTIVEC
233         /*
234          * If the previous thread used altivec in the last quantum
235          * (thus changing altivec regs) then save them.
236          * We used to check the VRSAVE register but not all apps
237          * set it, so we don't rely on it now (and in fact we need
238          * to save & restore VSCR even if VRSAVE == 0).  -- paulus
239          *
240          * On SMP we always save/restore altivec regs just to avoid the
241          * complexity of changing processors.
242          *  -- Cort
243          */
244         if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
245                 giveup_altivec(prev);
246 #endif /* CONFIG_ALTIVEC */
247 #endif /* CONFIG_SMP */
248
249         /* Avoid the trap.  On smp this this never happens since
250          * we don't set last_task_used_altivec -- Cort
251          */
252         if (new->thread.regs && last_task_used_altivec == new)
253                 new->thread.regs->msr |= MSR_VEC;
254         new_thread = &new->thread;
255         old_thread = &current->thread;
256         last = _switch(old_thread, new_thread);
257         local_irq_restore(s);
258         return last;
259 }
260
261 void show_regs(struct pt_regs * regs)
262 {
263         int i, trap;
264
265         printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx    %s\n",
266                regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
267                print_tainted());
268         printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
269                regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
270                regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
271                regs->msr&MSR_IR ? 1 : 0,
272                regs->msr&MSR_DR ? 1 : 0);
273         trap = TRAP(regs);
274         if (trap == 0x300 || trap == 0x600)
275                 printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
276         printk("TASK = %p[%d] '%s' THREAD: %p",
277                current, current->pid, current->comm, current->thread_info);
278         printk("Last syscall: %ld ", current->thread.last_syscall);
279
280 #if defined(CONFIG_4xx) && defined(DCRN_PLB0_BEAR)
281         printk("\nPLB0: bear= 0x%8.8x acr=   0x%8.8x besr=  0x%8.8x\n",
282             mfdcr(DCRN_PLB0_BEAR), mfdcr(DCRN_PLB0_ACR),
283             mfdcr(DCRN_PLB0_BESR));
284 #endif
285 #if defined(CONFIG_4xx) && defined(DCRN_POB0_BEAR)
286         printk("PLB0 to OPB: bear= 0x%8.8x besr0= 0x%8.8x besr1= 0x%8.8x\n",
287             mfdcr(DCRN_POB0_BEAR), mfdcr(DCRN_POB0_BESR0),
288             mfdcr(DCRN_POB0_BESR1));
289 #endif
290
291 #ifdef CONFIG_SMP
292         printk(" CPU: %d", smp_processor_id());
293 #endif /* CONFIG_SMP */
294
295         for (i = 0;  i < 32;  i++) {
296                 long r;
297                 if ((i % 8) == 0)
298                         printk("\n" KERN_INFO "GPR%02d: ", i);
299                 if (__get_user(r, &regs->gpr[i]))
300                         break;
301                 printk("%08lX ", r);
302                 if (i == 12 && !FULL_REGS(regs))
303                         break;
304         }
305         printk("\n");
306 #ifdef CONFIG_KALLSYMS
307         /*
308          * Lookup NIP late so we have the best change of getting the
309          * above info out without failing
310          */
311         printk("NIP [%08lx] ", regs->nip);
312         print_symbol("%s\n", regs->nip);
313         printk("LR [%08lx] ", regs->link);
314         print_symbol("%s\n", regs->link);
315 #endif
316         show_stack(current, (unsigned long *) regs->gpr[1]);
317 }
318
319 void exit_thread(void)
320 {
321         if (last_task_used_math == current)
322                 last_task_used_math = NULL;
323         if (last_task_used_altivec == current)
324                 last_task_used_altivec = NULL;
325 }
326
327 void flush_thread(void)
328 {
329         if (last_task_used_math == current)
330                 last_task_used_math = NULL;
331         if (last_task_used_altivec == current)
332                 last_task_used_altivec = NULL;
333 }
334
335 void
336 release_thread(struct task_struct *t)
337 {
338 }
339
340 /*
341  * This gets called before we allocate a new thread and copy
342  * the current task into it.
343  */
344 void prepare_to_copy(struct task_struct *tsk)
345 {
346         struct pt_regs *regs = tsk->thread.regs;
347
348         if (regs == NULL)
349                 return;
350         preempt_disable();
351         if (regs->msr & MSR_FP)
352                 giveup_fpu(current);
353 #ifdef CONFIG_ALTIVEC
354         if (regs->msr & MSR_VEC)
355                 giveup_altivec(current);
356 #endif /* CONFIG_ALTIVEC */
357         preempt_enable();
358 }
359
360 /*
361  * Copy a thread..
362  */
363 int
364 copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
365             unsigned long unused,
366             struct task_struct *p, struct pt_regs *regs)
367 {
368         struct pt_regs *childregs, *kregs;
369         extern void ret_from_fork(void);
370         unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
371         unsigned long childframe;
372
373         p->set_child_tid = p->clear_child_tid = NULL;
374
375         CHECK_FULL_REGS(regs);
376         /* Copy registers */
377         sp -= sizeof(struct pt_regs);
378         childregs = (struct pt_regs *) sp;
379         *childregs = *regs;
380         if ((childregs->msr & MSR_PR) == 0) {
381                 /* for kernel thread, set `current' and stackptr in new task */
382                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
383                 childregs->gpr[2] = (unsigned long) p;
384                 p->thread.regs = NULL;  /* no user register state */
385         } else {
386                 childregs->gpr[1] = usp;
387                 p->thread.regs = childregs;
388                 if (clone_flags & CLONE_SETTLS)
389                         childregs->gpr[2] = childregs->gpr[6];
390         }
391         childregs->gpr[3] = 0;  /* Result from fork() */
392         sp -= STACK_FRAME_OVERHEAD;
393         childframe = sp;
394
395         /*
396          * The way this works is that at some point in the future
397          * some task will call _switch to switch to the new task.
398          * That will pop off the stack frame created below and start
399          * the new task running at ret_from_fork.  The new task will
400          * do some house keeping and then return from the fork or clone
401          * system call, using the stack frame created above.
402          */
403         sp -= sizeof(struct pt_regs);
404         kregs = (struct pt_regs *) sp;
405         sp -= STACK_FRAME_OVERHEAD;
406         p->thread.ksp = sp;
407         kregs->nip = (unsigned long)ret_from_fork;
408
409         p->thread.last_syscall = -1;
410
411         return 0;
412 }
413
414 /*
415  * Set up a thread for executing a new program
416  */
417 void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
418 {
419         set_fs(USER_DS);
420         memset(regs->gpr, 0, sizeof(regs->gpr));
421         regs->ctr = 0;
422         regs->link = 0;
423         regs->xer = 0;
424         regs->ccr = 0;
425         regs->mq = 0;
426         regs->nip = nip;
427         regs->gpr[1] = sp;
428         regs->msr = MSR_USER;
429         if (last_task_used_math == current)
430                 last_task_used_math = 0;
431         if (last_task_used_altivec == current)
432                 last_task_used_altivec = 0;
433         memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
434         current->thread.fpscr = 0;
435 #ifdef CONFIG_ALTIVEC
436         memset(current->thread.vr, 0, sizeof(current->thread.vr));
437         memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
438         current->thread.vrsave = 0;
439         current->thread.used_vr = 0;
440 #endif /* CONFIG_ALTIVEC */
441 }
442
443 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
444 {
445         struct pt_regs *regs = tsk->thread.regs;
446
447         if (val > PR_FP_EXC_PRECISE)
448                 return -EINVAL;
449         tsk->thread.fpexc_mode = __pack_fe01(val);
450         if (regs != NULL && (regs->msr & MSR_FP) != 0)
451                 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
452                         | tsk->thread.fpexc_mode;
453         return 0;
454 }
455
456 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
457 {
458         unsigned int val;
459
460         val = __unpack_fe01(tsk->thread.fpexc_mode);
461         return put_user(val, (unsigned int *) adr);
462 }
463
464 int sys_clone(unsigned long clone_flags, unsigned long usp,
465               int __user *parent_tidp, void __user *child_threadptr,
466               int __user *child_tidp, int p6,
467               struct pt_regs *regs)
468 {
469         CHECK_FULL_REGS(regs);
470         if (usp == 0)
471                 usp = regs->gpr[1];     /* stack pointer for child */
472         return do_fork(clone_flags & ~CLONE_IDLETASK, usp, regs, 0,
473                         parent_tidp, child_tidp);
474 }
475
476 int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
477              struct pt_regs *regs)
478 {
479         CHECK_FULL_REGS(regs);
480         return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
481 }
482
483 int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
484               struct pt_regs *regs)
485 {
486         CHECK_FULL_REGS(regs);
487         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
488                         regs, 0, NULL, NULL);
489 }
490
491 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
492                unsigned long a3, unsigned long a4, unsigned long a5,
493                struct pt_regs *regs)
494 {
495         int error;
496         char * filename;
497
498         filename = getname((char __user *) a0);
499         error = PTR_ERR(filename);
500         if (IS_ERR(filename))
501                 goto out;
502         preempt_disable();
503         if (regs->msr & MSR_FP)
504                 giveup_fpu(current);
505 #ifdef CONFIG_ALTIVEC
506         if (regs->msr & MSR_VEC)
507                 giveup_altivec(current);
508 #endif /* CONFIG_ALTIVEC */
509         preempt_enable();
510         error = do_execve(filename, (char __user *__user *) a1,
511                           (char __user *__user *) a2, regs);
512         if (error == 0)
513                 current->ptrace &= ~PT_DTRACE;
514         putname(filename);
515 out:
516         return error;
517 }
518
519 void dump_stack(void)
520 {
521         show_stack(current, NULL);
522 }
523
524 EXPORT_SYMBOL(dump_stack);
525
526 void show_stack(struct task_struct *tsk, unsigned long *stack)
527 {
528         unsigned long sp, stack_top, prev_sp, ret;
529         int count = 0;
530         unsigned long next_exc = 0;
531         struct pt_regs *regs;
532         extern char ret_from_except, ret_from_except_full, ret_from_syscall;
533
534         sp = (unsigned long) stack;
535         if (tsk == NULL)
536                 tsk = current;
537         if (sp == 0) {
538                 if (tsk == current)
539                         asm("mr %0,1" : "=r" (sp));
540                 else
541                         sp = tsk->thread.ksp;
542         }
543
544         prev_sp = (unsigned long) (tsk->thread_info + 1);
545         stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE;
546         while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) {
547                 if (count == 0) {
548                         printk("Call trace:");
549 #ifdef CONFIG_KALLSYMS
550                         printk("\n");
551 #endif
552                 } else {
553                         if (next_exc) {
554                                 ret = next_exc;
555                                 next_exc = 0;
556                         } else
557                                 ret = *(unsigned long *)(sp + 4);
558                         printk(" [%08lx] ", ret);
559 #ifdef CONFIG_KALLSYMS
560                         print_symbol("%s", ret);
561                         printk("\n");
562 #endif
563                         if (ret == (unsigned long) &ret_from_except
564                             || ret == (unsigned long) &ret_from_except_full
565                             || ret == (unsigned long) &ret_from_syscall) {
566                                 /* sp + 16 points to an exception frame */
567                                 regs = (struct pt_regs *) (sp + 16);
568                                 if (sp + 16 + sizeof(*regs) <= stack_top)
569                                         next_exc = regs->nip;
570                         }
571                 }
572                 ++count;
573                 sp = *(unsigned long *)sp;
574         }
575 #if !CONFIG_KALLSYMS
576         if (count > 0)
577                 printk("\n");
578 #endif
579 }
580
581 #if 0
582 /*
583  * Low level print for debugging - Cort
584  */
585 int __init ll_printk(const char *fmt, ...)
586 {
587         va_list args;
588         char buf[256];
589         int i;
590
591         va_start(args, fmt);
592         i=vsprintf(buf,fmt,args);
593         ll_puts(buf);
594         va_end(args);
595         return i;
596 }
597
598 int lines = 24, cols = 80;
599 int orig_x = 0, orig_y = 0;
600
601 void puthex(unsigned long val)
602 {
603         unsigned char buf[10];
604         int i;
605         for (i = 7;  i >= 0;  i--)
606         {
607                 buf[i] = "0123456789ABCDEF"[val & 0x0F];
608                 val >>= 4;
609         }
610         buf[8] = '\0';
611         prom_print(buf);
612 }
613
614 void __init ll_puts(const char *s)
615 {
616         int x,y;
617         char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000;
618         char c;
619         extern int mem_init_done;
620
621         if ( mem_init_done ) /* assume this means we can printk */
622         {
623                 printk(s);
624                 return;
625         }
626
627 #if 0
628         if ( have_of )
629         {
630                 prom_print(s);
631                 return;
632         }
633 #endif
634
635         /*
636          * can't ll_puts on chrp without openfirmware yet.
637          * vidmem just needs to be setup for it.
638          * -- Cort
639          */
640         if ( _machine != _MACH_prep )
641                 return;
642         x = orig_x;
643         y = orig_y;
644
645         while ( ( c = *s++ ) != '\0' ) {
646                 if ( c == '\n' ) {
647                         x = 0;
648                         if ( ++y >= lines ) {
649                                 /*scroll();*/
650                                 /*y--;*/
651                                 y = 0;
652                         }
653                 } else {
654                         vidmem [ ( x + cols * y ) * 2 ] = c;
655                         if ( ++x >= cols ) {
656                                 x = 0;
657                                 if ( ++y >= lines ) {
658                                         /*scroll();*/
659                                         /*y--;*/
660                                         y = 0;
661                                 }
662                         }
663                 }
664         }
665
666         orig_x = x;
667         orig_y = y;
668 }
669 #endif
670
671 unsigned long get_wchan(struct task_struct *p)
672 {
673         unsigned long ip, sp;
674         unsigned long stack_page = (unsigned long) p->thread_info;
675         int count = 0;
676         if (!p || p == current || p->state == TASK_RUNNING)
677                 return 0;
678         sp = p->thread.ksp;
679         do {
680                 sp = *(unsigned long *)sp;
681                 if (sp < stack_page || sp >= stack_page + 8188)
682                         return 0;
683                 if (count > 0) {
684                         ip = *(unsigned long *)(sp + 4);
685                         if (!in_sched_functions(ip))
686                                 return ip;
687                 }
688         } while (count++ < 16);
689         return 0;
690 }