#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
#include <linux/config.h>
+#include <linux/cpu.h>
#include <linux/pm.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <linux/unistd.h>
#include <linux/efi.h>
+#include <linux/interrupt.h>
+#include <asm/cpu.h>
#include <asm/delay.h>
#include <asm/elf.h>
#include <asm/ia32.h>
+#include <asm/irq.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
+#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#include <asm/user.h>
{
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
+ print_modules();
printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
safe_halt();
}
+#ifdef CONFIG_HOTPLUG_CPU
+/* We don't actually take CPU down, just spin without interrupts. */
+static inline void play_dead(void)
+{
+ extern void ia64_cpu_local_tick (void);
+ /* Ack it */
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+
+ /* We shouldn't have to disable interrupts while dead, but
+ * some interrupts just don't seem to go away, and this makes
+ * it "work" for testing purposes. */
+ max_xtp();
+ local_irq_disable();
+ /* Death loop */
+ while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
+ cpu_relax();
+
+ /*
+ * Enable timer interrupts from now on
+ * Not required if we put processor in SAL_BOOT_RENDEZ mode.
+ */
+ local_flush_tlb_all();
+ cpu_set(smp_processor_id(), cpu_online_map);
+ wmb();
+ ia64_cpu_local_tick ();
+ local_irq_enable();
+}
+#else
+static inline void play_dead(void)
+{
+ BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
void __attribute__((noreturn))
cpu_idle (void *unused)
{
if (!need_resched())
min_xtp();
#endif
-
while (!need_resched()) {
if (mark_idle)
(*mark_idle)(1);
#endif
schedule();
check_pgt_cache();
+ if (cpu_is_offline(smp_processor_id()))
+ play_dead();
}
}
ia32_save_state(p);
if (clone_flags & CLONE_SETTLS)
retval = ia32_clone_tls(p, child_ptregs);
+
+ /* Copy partially mapped page list */
+ if (!retval)
+ retval = ia32_copy_partial_page_list(p, clone_flags);
}
#endif
return error;
}
-void
-ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter)
-{
- set_personality(PER_LINUX);
- if (elf_ex->e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK)
- current->thread.flags |= IA64_THREAD_XSTACK;
- else
- current->thread.flags &= ~IA64_THREAD_XSTACK;
-}
-
pid_t
kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
{
/* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current);
+ if (IS_IA32_PROCESS(ia64_task_regs(current)))
+ ia32_drop_partial_page_list(current);
}
/*
if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current);
#endif
+ if (IS_IA32_PROCESS(ia64_task_regs(current)))
+ ia32_drop_partial_page_list(current);
}
unsigned long
struct unw_frame_info info;
unsigned long ip;
int count = 0;
- /*
- * These bracket the sleeping functions..
- */
-# define first_sched ((unsigned long) scheduling_functions_start_here)
-# define last_sched ((unsigned long) scheduling_functions_end_here)
/*
* Note: p may not be a blocked task (it could be current or
if (unw_unwind(&info) < 0)
return 0;
unw_get_ip(&info, &ip);
- if (ip < first_sched || ip >= last_sched)
+ if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
return 0;
-# undef first_sched
-# undef last_sched
}
void