Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / arch / ia64 / kernel / process.c
index 9129338..ea914cc 100644 (file)
@@ -3,9 +3,12 @@
  *
  * Copyright (C) 1998-2003 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
+ * 04/11/17 Ashok Raj  <ashok.raj@intel.com> Added CPU Hotplug Support
+ *
+ * 2005-10-07 Keith Owens <kaos@sgi.com>
+ *           Add notify_die() hooks.
  */
 #define __KERNEL_SYSCALLS__    /* see <asm/unistd.h> */
-#include <linux/config.h>
 
 #include <linux/cpu.h>
 #include <linux/pm.h>
@@ -32,6 +35,7 @@
 #include <asm/elf.h>
 #include <asm/ia32.h>
 #include <asm/irq.h>
+#include <asm/kdebug.h>
 #include <asm/pgalloc.h>
 #include <asm/processor.h>
 #include <asm/sal.h>
@@ -49,7 +53,7 @@
 #include "sigframe.h"
 
 void (*ia64_mark_idle)(int);
-static cpumask_t cpu_idle_map;
+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 
 unsigned long boot_option_idle_override = 0;
 EXPORT_SYMBOL(boot_option_idle_override);
@@ -172,27 +176,35 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
                ia64_do_signal(oldset, scr, in_syscall);
 }
 
-static int pal_halt = 1;
+static int pal_halt        = 1;
+static int can_do_pal_halt = 1;
+
 static int __init nohalt_setup(char * str)
 {
-       pal_halt = 0;
+       pal_halt = can_do_pal_halt = 0;
        return 1;
 }
 __setup("nohalt", nohalt_setup);
 
+void
+update_pal_halt_status(int status)
+{
+       can_do_pal_halt = pal_halt && status;
+}
+
 /*
  * We use this if we don't have any better idle routine..
  */
 void
 default_idle (void)
 {
-       unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
-
-       while (!need_resched())
-               if (pal_halt && !pmu_active)
+       local_irq_enable();
+       while (!need_resched()) {
+               if (can_do_pal_halt)
                        safe_halt();
                else
                        cpu_relax();
+       }
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -200,27 +212,20 @@ default_idle (void)
 static inline void play_dead(void)
 {
        extern void ia64_cpu_local_tick (void);
+       unsigned int this_cpu = smp_processor_id();
+
        /* Ack it */
        __get_cpu_var(cpu_state) = CPU_DEAD;
 
-       /* We shouldn't have to disable interrupts while dead, but
-        * some interrupts just don't seem to go away, and this makes
-        * it "work" for testing purposes. */
        max_xtp();
        local_irq_disable();
-       /* Death loop */
-       while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
-               cpu_relax();
-
+       idle_task_exit();
+       ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
        /*
-        * Enable timer interrupts from now on
-        * Not required if we put processor in SAL_BOOT_RENDEZ mode.
+        * The above is a point of no-return, the processor is
+        * expected to be in SAL loop now.
         */
-       local_flush_tlb_all();
-       cpu_set(smp_processor_id(), cpu_online_map);
-       wmb();
-       ia64_cpu_local_tick ();
-       local_irq_enable();
+       BUG();
 }
 #else
 static inline void play_dead(void)
@@ -229,20 +234,31 @@ static inline void play_dead(void)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-
 void cpu_idle_wait(void)
 {
-        int cpu;
-        cpumask_t map;
+       unsigned int cpu, this_cpu = get_cpu();
+       cpumask_t map;
 
-        for_each_online_cpu(cpu)
-                cpu_set(cpu, cpu_idle_map);
+       set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
+       put_cpu();
 
-        wmb();
-        do {
-                ssleep(1);
-                cpus_and(map, cpu_idle_map, cpu_online_map);
-        } while (!cpus_empty(map));
+       cpus_clear(map);
+       for_each_online_cpu(cpu) {
+               per_cpu(cpu_idle_state, cpu) = 1;
+               cpu_set(cpu, map);
+       }
+
+       __get_cpu_var(cpu_idle_state) = 0;
+
+       wmb();
+       do {
+               ssleep(1);
+               for_each_online_cpu(cpu) {
+                       if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
+                               cpu_clear(cpu, map);
+               }
+               cpus_and(map, map, cpu_online_map);
+       } while (!cpus_empty(map));
 }
 EXPORT_SYMBOL_GPL(cpu_idle_wait);
 
@@ -250,38 +266,42 @@ void __attribute__((noreturn))
 cpu_idle (void)
 {
        void (*mark_idle)(int) = ia64_mark_idle;
-       int cpu = smp_processor_id();
+       int cpu = smp_processor_id();
 
        /* endless idle loop with no priority at all */
        while (1) {
+               if (can_do_pal_halt)
+                       current_thread_info()->status &= ~TS_POLLING;
+               else
+                       current_thread_info()->status |= TS_POLLING;
+
+               if (!need_resched()) {
+                       void (*idle)(void);
 #ifdef CONFIG_SMP
-               if (!need_resched())
                        min_xtp();
 #endif
-               while (!need_resched()) {
-                       void (*idle)(void);
+                       if (__get_cpu_var(cpu_idle_state))
+                               __get_cpu_var(cpu_idle_state) = 0;
 
+                       rmb();
                        if (mark_idle)
                                (*mark_idle)(1);
 
-                       if (cpu_isset(cpu, cpu_idle_map))
-                               cpu_clear(cpu, cpu_idle_map);
-                       rmb();
                        idle = pm_idle;
                        if (!idle)
                                idle = default_idle;
                        (*idle)();
-               }
-
-               if (mark_idle)
-                       (*mark_idle)(0);
-
+                       if (mark_idle)
+                               (*mark_idle)(0);
 #ifdef CONFIG_SMP
-               normal_xtp();
+                       normal_xtp();
 #endif
+               }
+               preempt_enable_no_resched();
                schedule();
+               preempt_disable();
                check_pgt_cache();
-               if (cpu_is_offline(smp_processor_id()))
+               if (cpu_is_offline(cpu))
                        play_dead();
        }
 }
@@ -306,7 +326,7 @@ ia64_save_extra (struct task_struct *task)
 #endif
 
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(task)))
+       if (IS_IA32_PROCESS(task_pt_regs(task)))
                ia32_save_state(task);
 #endif
 }
@@ -331,7 +351,7 @@ ia64_load_extra (struct task_struct *task)
 #endif
 
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(task)))
+       if (IS_IA32_PROCESS(task_pt_regs(task)))
                ia32_load_state(task);
 #endif
 }
@@ -466,7 +486,7 @@ copy_thread (int nr, unsigned long clone_flags,
         * If we're cloning an IA32 task then save the IA32 extra
         * state from the current task to the new task
         */
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                ia32_save_state(p);
                if (clone_flags & CLONE_SETTLS)
                        retval = ia32_clone_tls(p, child_ptregs);
@@ -679,7 +699,7 @@ int
 kernel_thread_helper (int (*fn)(void *), void *arg)
 {
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                /* A kernel thread is always a 64-bit process. */
                current->thread.map_base  = DEFAULT_MAP_BASE;
                current->thread.task_size = DEFAULT_TASK_SIZE;
@@ -699,8 +719,13 @@ flush_thread (void)
        /* drop floating-point and debug-register state if it exists: */
        current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
        ia64_drop_fpu(current);
-       if (IS_IA32_PROCESS(ia64_task_regs(current)))
+#ifdef CONFIG_IA32_SUPPORT
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                ia32_drop_partial_page_list(current);
+               current->thread.task_size = IA32_PAGE_OFFSET;
+               set_fs(USER_DS);
+       }
+#endif
 }
 
 /*
@@ -710,6 +735,7 @@ flush_thread (void)
 void
 exit_thread (void)
 {
+
        ia64_drop_fpu(current);
 #ifdef CONFIG_PERFMON
        /* if needed, stop monitoring and flush state to perfmon context */
@@ -720,7 +746,7 @@ exit_thread (void)
        if (current->thread.flags & IA64_THREAD_DBG_VALID)
                pfm_release_debug_registers(current);
 #endif
-       if (IS_IA32_PROCESS(ia64_task_regs(current)))
+       if (IS_IA32_PROCESS(task_pt_regs(current)))
                ia32_drop_partial_page_list(current);
 }
 
@@ -776,19 +802,17 @@ cpu_halt (void)
 void
 machine_restart (char *restart_cmd)
 {
+       (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0);
        (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
 }
 
-EXPORT_SYMBOL(machine_restart);
-
 void
 machine_halt (void)
 {
+       (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0);
        cpu_halt();
 }
 
-EXPORT_SYMBOL(machine_halt);
-
 void
 machine_power_off (void)
 {
@@ -797,4 +821,3 @@ machine_power_off (void)
        machine_halt();
 }
 
-EXPORT_SYMBOL(machine_power_off);