#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
#include <linux/config.h>
+#include <linux/cpu.h>
#include <linux/pm.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <linux/unistd.h>
#include <linux/efi.h>
+#include <linux/interrupt.h>
+#include <asm/cpu.h>
#include <asm/delay.h>
#include <asm/elf.h>
#include <asm/ia32.h>
+#include <asm/irq.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
+#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#include <asm/user.h>
show_stack (struct task_struct *task, unsigned long *sp)
{
if (!task)
- unw_init_running(ia64_do_show_stack, 0);
+ unw_init_running(ia64_do_show_stack, NULL);
else {
struct unw_frame_info info;
unw_init_from_blocked_task(&info, task);
- ia64_do_show_stack(&info, 0);
+ ia64_do_show_stack(&info, NULL);
}
}
{
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
+ print_modules();
printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
ndirty = (regs->loadrs >> 19);
bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
for (i = 0; i < sof; ++i) {
- get_user(val, ia64_rse_skip_regs(bsp, i));
+ get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
}
safe_halt();
}
+#ifdef CONFIG_HOTPLUG_CPU
+/* We don't actually take CPU down, just spin without interrupts. */
+static inline void play_dead(void)
+{
+ extern void ia64_cpu_local_tick (void);
+ /* Ack it */
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+
+ /* We shouldn't have to disable interrupts while dead, but
+ * some interrupts just don't seem to go away, and this makes
+ * it "work" for testing purposes. */
+ max_xtp();
+ local_irq_disable();
+ /* Death loop */
+ while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
+ cpu_relax();
+
+ /*
+ * Enable timer interrupts from now on
+ * Not required if we put processor in SAL_BOOT_RENDEZ mode.
+ */
+ local_flush_tlb_all();
+ cpu_set(smp_processor_id(), cpu_online_map);
+ wmb();
+ ia64_cpu_local_tick ();
+ local_irq_enable();
+}
+#else
+static inline void play_dead(void)
+{
+ BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
void __attribute__((noreturn))
cpu_idle (void *unused)
{
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
- if (!idle)
- idle = default_idle;
-
#ifdef CONFIG_SMP
if (!need_resched())
min_xtp();
#endif
-
while (!need_resched()) {
+ void (*idle)(void);
+
if (mark_idle)
(*mark_idle)(1);
+ /*
+ * Mark this as an RCU critical section so that
+ * synchronize_kernel() in the unload path waits
+ * for our completion.
+ */
+ rcu_read_lock();
+ idle = pm_idle;
+ if (!idle)
+ idle = default_idle;
(*idle)();
+ rcu_read_unlock();
}
if (mark_idle)
#endif
schedule();
check_pgt_cache();
+ if (cpu_is_offline(smp_processor_id()))
+ play_dead();
}
}
ia32_save_state(p);
if (clone_flags & CLONE_SETTLS)
retval = ia32_clone_tls(p, child_ptregs);
+
+ /* Copy partially mapped page list */
+ if (!retval)
+ retval = ia32_copy_partial_page_list(p, clone_flags);
}
#endif
}
asmlinkage long
-sys_execve (char *filename, char **argv, char **envp, struct pt_regs *regs)
+sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
+ struct pt_regs *regs)
{
+ char *fname;
int error;
- filename = getname(filename);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
+ fname = getname(filename);
+ error = PTR_ERR(fname);
+ if (IS_ERR(fname))
goto out;
- error = do_execve(filename, argv, envp, regs);
- putname(filename);
+ error = do_execve(fname, argv, envp, regs);
+ putname(fname);
out:
return error;
}
-void
-ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter)
-{
- set_personality(PER_LINUX);
- if (elf_ex->e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK)
- current->thread.flags |= IA64_THREAD_XSTACK;
- else
- current->thread.flags &= ~IA64_THREAD_XSTACK;
-}
-
pid_t
kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
{
/* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current);
+ if (IS_IA32_PROCESS(ia64_task_regs(current)))
+ ia32_drop_partial_page_list(current);
}
/*
if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current);
#endif
+ if (IS_IA32_PROCESS(ia64_task_regs(current)))
+ ia32_drop_partial_page_list(current);
}
unsigned long
struct unw_frame_info info;
unsigned long ip;
int count = 0;
- /*
- * These bracket the sleeping functions..
- */
-# define first_sched ((unsigned long) scheduling_functions_start_here)
-# define last_sched ((unsigned long) scheduling_functions_end_here)
/*
* Note: p may not be a blocked task (it could be current or
if (unw_unwind(&info) < 0)
return 0;
unw_get_ip(&info, &ip);
- if (ip < first_sched || ip >= last_sched)
+ if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
return 0;
-# undef first_sched
-# undef last_sched
}
void
void
machine_restart (char *restart_cmd)
{
- (*efi.reset_system)(EFI_RESET_WARM, 0, 0, 0);
+ (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
}
EXPORT_SYMBOL(machine_restart);