2 * Architecture-specific setup.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
12 #include <linux/elf.h>
13 #include <linux/errno.h>
14 #include <linux/kallsyms.h>
15 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/notifier.h>
19 #include <linux/personality.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/smp_lock.h>
23 #include <linux/stddef.h>
24 #include <linux/thread_info.h>
25 #include <linux/unistd.h>
26 #include <linux/efi.h>
27 #include <linux/interrupt.h>
30 #include <asm/delay.h>
34 #include <asm/pgalloc.h>
35 #include <asm/processor.h>
37 #include <asm/tlbflush.h>
38 #include <asm/uaccess.h>
39 #include <asm/unwind.h>
43 # include <asm/perfmon.h>
48 void (*ia64_mark_idle)(int);
52 ia64_do_show_stack (struct unw_frame_info *info, void *arg)
54 unsigned long ip, sp, bsp;
55 char buf[128]; /* don't make it so big that it overflows the stack! */
57 printk("\nCall Trace:\n");
59 unw_get_ip(info, &ip);
63 unw_get_sp(info, &sp);
64 unw_get_bsp(info, &bsp);
65 snprintf(buf, sizeof(buf),
67 " sp=%016lx bsp=%016lx\n",
69 print_symbol(buf, ip);
70 } while (unw_unwind(info) >= 0);
74 show_stack (struct task_struct *task, unsigned long *sp)
77 unw_init_running(ia64_do_show_stack, NULL);
79 struct unw_frame_info info;
81 unw_init_from_blocked_task(&info, task);
82 ia64_do_show_stack(&info, NULL);
89 show_stack(NULL, NULL);
92 EXPORT_SYMBOL(dump_stack);
95 show_regs (struct pt_regs *regs)
97 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
100 printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
101 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
102 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
103 print_symbol("ip is at %s\n", ip);
104 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
105 regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
106 printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
107 regs->ar_rnat, regs->ar_bspstore, regs->pr);
108 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
109 regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
110 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
111 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
112 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
113 regs->f6.u.bits[1], regs->f6.u.bits[0],
114 regs->f7.u.bits[1], regs->f7.u.bits[0]);
115 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
116 regs->f8.u.bits[1], regs->f8.u.bits[0],
117 regs->f9.u.bits[1], regs->f9.u.bits[0]);
118 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
119 regs->f10.u.bits[1], regs->f10.u.bits[0],
120 regs->f11.u.bits[1], regs->f11.u.bits[0]);
122 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
123 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
124 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
125 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
126 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
127 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
128 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
129 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
130 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
132 if (user_mode(regs)) {
133 /* print the stacked registers */
134 unsigned long val, *bsp, ndirty;
135 int i, sof, is_nat = 0;
137 sof = regs->cr_ifs & 0x7f; /* size of frame */
138 ndirty = (regs->loadrs >> 19);
139 bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
140 for (i = 0; i < sof; ++i) {
141 get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
142 printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
143 ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
146 show_stack(NULL, NULL);
150 do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
152 if (fsys_mode(current, &scr->pt)) {
153 /* defer signal-handling etc. until we return to privilege-level 0. */
154 if (!ia64_psr(&scr->pt)->lp)
155 ia64_psr(&scr->pt)->lp = 1;
159 #ifdef CONFIG_PERFMON
160 if (current->thread.pfm_needs_checking)
164 /* deal with pending signal delivery */
165 if (test_thread_flag(TIF_SIGPENDING))
166 ia64_do_signal(oldset, scr, in_syscall);
169 static int pal_halt = 1;
170 static int __init nohalt_setup(char * str)
175 __setup("nohalt", nohalt_setup);
178 * We use this if we don't have any better idle routine..
183 unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
185 while (!need_resched())
186 if (pal_halt && !pmu_active)
190 #ifdef CONFIG_HOTPLUG_CPU
191 /* We don't actually take CPU down, just spin without interrupts. */
192 static inline void play_dead(void)
194 extern void ia64_cpu_local_tick (void);
196 __get_cpu_var(cpu_state) = CPU_DEAD;
198 /* We shouldn't have to disable interrupts while dead, but
199 * some interrupts just don't seem to go away, and this makes
200 * it "work" for testing purposes. */
204 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
208 * Enable timer interrupts from now on
209 * Not required if we put processor in SAL_BOOT_RENDEZ mode.
211 local_flush_tlb_all();
212 cpu_set(smp_processor_id(), cpu_online_map);
214 ia64_cpu_local_tick ();
218 static inline void play_dead(void)
222 #endif /* CONFIG_HOTPLUG_CPU */
224 void __attribute__((noreturn))
225 cpu_idle (void *unused)
227 void (*mark_idle)(int) = ia64_mark_idle;
229 /* endless idle loop with no priority at all */
235 while (!need_resched()) {
241 * Mark this as an RCU critical section so that
242 * synchronize_kernel() in the unload path waits
243 * for our completion.
261 if (cpu_is_offline(smp_processor_id()))
267 ia64_save_extra (struct task_struct *task)
269 #ifdef CONFIG_PERFMON
273 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
274 ia64_save_debug_regs(&task->thread.dbr[0]);
276 #ifdef CONFIG_PERFMON
277 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
280 info = __get_cpu_var(pfm_syst_info);
281 if (info & PFM_CPUINFO_SYST_WIDE)
282 pfm_syst_wide_update_task(task, info, 0);
285 #ifdef CONFIG_IA32_SUPPORT
286 if (IS_IA32_PROCESS(ia64_task_regs(task)))
287 ia32_save_state(task);
292 ia64_load_extra (struct task_struct *task)
294 #ifdef CONFIG_PERFMON
298 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
299 ia64_load_debug_regs(&task->thread.dbr[0]);
301 #ifdef CONFIG_PERFMON
302 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
305 info = __get_cpu_var(pfm_syst_info);
306 if (info & PFM_CPUINFO_SYST_WIDE)
307 pfm_syst_wide_update_task(task, info, 1);
310 #ifdef CONFIG_IA32_SUPPORT
311 if (IS_IA32_PROCESS(ia64_task_regs(task)))
312 ia32_load_state(task);
317 * Copy the state of an ia-64 thread.
319 * We get here through the following call chain:
321 * from user-level: from kernel:
323 * <clone syscall> <some kernel call frames>
326 * copy_thread copy_thread
328 * This means that the stack layout is as follows:
330 * +---------------------+ (highest addr)
332 * +---------------------+
333 * | struct switch_stack |
334 * +---------------------+
337 * | | <-- sp (lowest addr)
338 * +---------------------+
340 * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
341 * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
342 * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
343 * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
344 * the stack is page aligned and the page size is at least 4KB, this is always the case,
345 * so there is nothing to worry about.
348 copy_thread (int nr, unsigned long clone_flags,
349 unsigned long user_stack_base, unsigned long user_stack_size,
350 struct task_struct *p, struct pt_regs *regs)
352 extern char ia64_ret_from_clone, ia32_ret_from_clone;
353 struct switch_stack *child_stack, *stack;
354 unsigned long rbs, child_rbs, rbs_size;
355 struct pt_regs *child_ptregs;
360 * For SMP idle threads, fork_by_hand() calls do_fork with
367 stack = ((struct switch_stack *) regs) - 1;
369 child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
370 child_stack = (struct switch_stack *) child_ptregs - 1;
372 /* copy parent's switch_stack & pt_regs to child: */
373 memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
375 rbs = (unsigned long) current + IA64_RBS_OFFSET;
376 child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
377 rbs_size = stack->ar_bspstore - rbs;
379 /* copy the parent's register backing store to the child: */
380 memcpy((void *) child_rbs, (void *) rbs, rbs_size);
382 if (likely(user_mode(child_ptregs))) {
383 if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
384 child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
385 if (user_stack_base) {
386 child_ptregs->r12 = user_stack_base + user_stack_size - 16;
387 child_ptregs->ar_bspstore = user_stack_base;
388 child_ptregs->ar_rnat = 0;
389 child_ptregs->loadrs = 0;
393 * Note: we simply preserve the relative position of
394 * the stack pointer here. There is no need to
395 * allocate a scratch area here, since that will have
396 * been taken care of by the caller of sys_clone()
399 child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */
400 child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
402 child_stack->ar_bspstore = child_rbs + rbs_size;
403 if (IS_IA32_PROCESS(regs))
404 child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
406 child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
408 /* copy parts of thread_struct: */
409 p->thread.ksp = (unsigned long) child_stack - 16;
411 /* stop some PSR bits from being inherited.
412 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
413 * therefore we must specify them explicitly here and not include them in
414 * IA64_PSR_BITS_TO_CLEAR.
416 child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
417 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
420 * NOTE: The calling convention considers all floating point
421 * registers in the high partition (fph) to be scratch. Since
422 * the only way to get to this point is through a system call,
423 * we know that the values in fph are all dead. Hence, there
424 * is no need to inherit the fph state from the parent to the
425 * child and all we have to do is to make sure that
426 * IA64_THREAD_FPH_VALID is cleared in the child.
428 * XXX We could push this optimization a bit further by
429 * clearing IA64_THREAD_FPH_VALID on ANY system call.
430 * However, it's not clear this is worth doing. Also, it
431 * would be a slight deviation from the normal Linux system
432 * call behavior where scratch registers are preserved across
433 * system calls (unless used by the system call itself).
435 # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
436 | IA64_THREAD_PM_VALID)
437 # define THREAD_FLAGS_TO_SET 0
438 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
439 | THREAD_FLAGS_TO_SET);
440 ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
441 #ifdef CONFIG_IA32_SUPPORT
443 * If we're cloning an IA32 task then save the IA32 extra
444 * state from the current task to the new task
446 if (IS_IA32_PROCESS(ia64_task_regs(current))) {
448 if (clone_flags & CLONE_SETTLS)
449 retval = ia32_clone_tls(p, child_ptregs);
451 /* Copy partially mapped page list */
453 retval = ia32_copy_partial_page_list(p, clone_flags);
457 #ifdef CONFIG_PERFMON
458 if (current->thread.pfm_context)
459 pfm_inherit(p, child_ptregs);
465 do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
467 unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
468 elf_greg_t *dst = arg;
473 memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
475 if (unw_unwind_to_user(info) < 0)
478 unw_get_sp(info, &sp);
479 pt = (struct pt_regs *) (sp + 16);
481 urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
483 if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
486 ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
492 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
493 * predicate registers (p0-p63)
496 * ar.rsc ar.bsp ar.bspstore ar.rnat
497 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
501 for (i = 1, mask = (1UL << i); i < 32; ++i) {
502 unw_get_gr(info, i, &dst[i], &nat);
508 unw_get_pr(info, &dst[33]);
510 for (i = 0; i < 8; ++i)
511 unw_get_br(info, i, &dst[34 + i]);
513 unw_get_rp(info, &ip);
514 dst[42] = ip + ia64_psr(pt)->ri;
516 dst[44] = pt->cr_ipsr & IA64_PSR_UM;
518 unw_get_ar(info, UNW_AR_RSC, &dst[45]);
520 * For bsp and bspstore, unw_get_ar() would return the kernel
521 * addresses, but we need the user-level addresses instead:
523 dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */
524 dst[47] = pt->ar_bspstore;
526 unw_get_ar(info, UNW_AR_CCV, &dst[49]);
527 unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
528 unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
529 dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
530 unw_get_ar(info, UNW_AR_LC, &dst[53]);
531 unw_get_ar(info, UNW_AR_EC, &dst[54]);
532 unw_get_ar(info, UNW_AR_CSD, &dst[55]);
533 unw_get_ar(info, UNW_AR_SSD, &dst[56]);
537 do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
539 elf_fpreg_t *dst = arg;
542 memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */
544 if (unw_unwind_to_user(info) < 0)
547 /* f0 is 0.0, f1 is 1.0 */
549 for (i = 2; i < 32; ++i)
550 unw_get_fr(info, i, dst + i);
552 ia64_flush_fph(task);
553 if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
554 memcpy(dst + 32, task->thread.fph, 96*16);
558 do_copy_regs (struct unw_frame_info *info, void *arg)
560 do_copy_task_regs(current, info, arg);
564 do_dump_fpu (struct unw_frame_info *info, void *arg)
566 do_dump_task_fpu(current, info, arg);
570 dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
572 struct unw_frame_info tcore_info;
574 if (current == task) {
575 unw_init_running(do_copy_regs, regs);
577 memset(&tcore_info, 0, sizeof(tcore_info));
578 unw_init_from_blocked_task(&tcore_info, task);
579 do_copy_task_regs(task, &tcore_info, regs);
585 ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
587 unw_init_running(do_copy_regs, dst);
591 dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
593 struct unw_frame_info tcore_info;
595 if (current == task) {
596 unw_init_running(do_dump_fpu, dst);
598 memset(&tcore_info, 0, sizeof(tcore_info));
599 unw_init_from_blocked_task(&tcore_info, task);
600 do_dump_task_fpu(task, &tcore_info, dst);
606 dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
608 unw_init_running(do_dump_fpu, dst);
609 return 1; /* f0-f31 are always valid so we always return 1 */
613 sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
614 struct pt_regs *regs)
619 fname = getname(filename);
620 error = PTR_ERR(fname);
623 error = do_execve(fname, argv, envp, regs);
630 kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
632 extern void start_kernel_thread (void);
633 unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;
635 struct switch_stack sw;
639 memset(®s, 0, sizeof(regs));
640 regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */
641 regs.pt.r1 = helper_fptr[1]; /* set GP */
642 regs.pt.r9 = (unsigned long) fn; /* 1st argument */
643 regs.pt.r11 = (unsigned long) arg; /* 2nd argument */
644 /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */
645 regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
646 regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */
647 regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
648 regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
650 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s.pt, 0, NULL, NULL);
652 EXPORT_SYMBOL(kernel_thread);
654 /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */
656 kernel_thread_helper (int (*fn)(void *), void *arg)
658 #ifdef CONFIG_IA32_SUPPORT
659 if (IS_IA32_PROCESS(ia64_task_regs(current))) {
660 /* A kernel thread is always a 64-bit process. */
661 current->thread.map_base = DEFAULT_MAP_BASE;
662 current->thread.task_size = DEFAULT_TASK_SIZE;
663 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
664 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
671 * Flush thread state. This is called when a thread does an execve().
676 /* drop floating-point and debug-register state if it exists: */
677 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
678 ia64_drop_fpu(current);
679 if (IS_IA32_PROCESS(ia64_task_regs(current)))
680 ia32_drop_partial_page_list(current);
684 * Clean up state associated with current thread. This is called when
685 * the thread calls exit().
690 ia64_drop_fpu(current);
691 #ifdef CONFIG_PERFMON
692 /* if needed, stop monitoring and flush state to perfmon context */
693 if (current->thread.pfm_context)
694 pfm_exit_thread(current);
696 /* free debug register resources */
697 if (current->thread.flags & IA64_THREAD_DBG_VALID)
698 pfm_release_debug_registers(current);
700 if (IS_IA32_PROCESS(ia64_task_regs(current)))
701 ia32_drop_partial_page_list(current);
705 get_wchan (struct task_struct *p)
707 struct unw_frame_info info;
712 * Note: p may not be a blocked task (it could be current or
713 * another process running on some other CPU. Rather than
714 * trying to determine if p is really blocked, we just assume
715 * it's blocked and rely on the unwind routines to fail
716 * gracefully if the process wasn't really blocked after all.
719 unw_init_from_blocked_task(&info, p);
721 if (unw_unwind(&info) < 0)
723 unw_get_ip(&info, &ip);
724 if (!in_sched_functions(ip))
726 } while (count++ < 16);
733 pal_power_mgmt_info_u_t power_info[8];
734 unsigned long min_power;
735 int i, min_power_state;
737 if (ia64_pal_halt_info(power_info) != 0)
741 min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
742 for (i = 1; i < 8; ++i)
743 if (power_info[i].pal_power_mgmt_info_s.im
744 && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
745 min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
750 ia64_pal_halt(min_power_state);
754 machine_restart (char *restart_cmd)
756 (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
759 EXPORT_SYMBOL(machine_restart);
767 EXPORT_SYMBOL(machine_halt);
770 machine_power_off (void)
777 EXPORT_SYMBOL(machine_power_off);