fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / ia64 / kernel / smpboot.c
index 1db117a..b21ddec 100644 (file)
@@ -1,20 +1,31 @@
 /*
  * SMP boot-related support
  *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2001, 2004-2005 Intel Corp
+ *     Rohit Seth <rohit.seth@intel.com>
+ *     Suresh Siddha <suresh.b.siddha@intel.com>
+ *     Gordon Jin <gordon.jin@intel.com>
+ *     Ashok Raj  <ashok.raj@intel.com>
  *
  * 01/05/16 Rohit Seth <rohit.seth@intel.com>  Moved SMP booting functions from smp.c to here.
  * 01/04/27 David Mosberger <davidm@hpl.hp.com>        Added ITC synching code.
  * 02/07/31 David Mosberger <davidm@hpl.hp.com>        Switch over to hotplug-CPU boot-sequence.
  *                                             smp_boot_cpus()/smp_commence() is replaced by
  *                                             smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
+ * 04/06/21 Ashok Raj          <ashok.raj@intel.com> Added CPU Hotplug Support
+ * 04/12/26 Jin Gordon <gordon.jin@intel.com>
+ * 04/12/26 Rohit Seth <rohit.seth@intel.com>
+ *                                             Add multi-threading and multi-core detection
+ * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
+ *                                             Setup cpu_sibling_map and cpu_core_map
  */
-#include <linux/config.h>
 
 #include <linux/module.h>
 #include <linux/acpi.h>
 #include <linux/bootmem.h>
+#include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/kernel_stat.h>
 #include <linux/mm.h>
+#include <linux/notifier.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
 #include <linux/efi.h>
+#include <linux/percpu.h>
+#include <linux/bitops.h>
 
 #include <asm/atomic.h>
-#include <asm/bitops.h>
 #include <asm/cache.h>
 #include <asm/current.h>
 #include <asm/delay.h>
@@ -44,6 +57,7 @@
 #include <asm/ptrace.h>
 #include <asm/sal.h>
 #include <asm/system.h>
+#include <asm/tlbflush.h>
 #include <asm/unistd.h>
 
 #define SMP_DEBUG 0
 #define Dprintk(x...)
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PERMIT_BSP_REMOVE
+#define bsp_remove_ok  1
+#else
+#define bsp_remove_ok  0
+#endif
+
+/*
+ * Store all idle threads, this can be reused instead of creating
+ * a new thread. Also avoids complicated thread destroy functionality
+ * for idle threads.
+ */
+struct task_struct *idle_thread_array[NR_CPUS];
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+/*
+ * start_ap in head.S uses this to store current booting cpu
+ * info.
+ */
+struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
+
+#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
+
+#define get_idle_for_cpu(x)            (idle_thread_array[(x)])
+#define set_idle_for_cpu(x,p)  (idle_thread_array[(x)] = (p))
+
+#else
+
+#define get_idle_for_cpu(x)            (NULL)
+#define set_idle_for_cpu(x,p)
+#define set_brendez_area(x)
+#endif
+
 
 /*
  * ITC synchronization related stuff:
  */
-#define MASTER 0
+#define MASTER (0)
 #define SLAVE  (SMP_CACHE_BYTES/8)
 
 #define NUM_ROUNDS     64      /* magic value */
 #define NUM_ITERS      5       /* likewise */
 
-static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(itc_sync_lock);
 static volatile unsigned long go[SLAVE + 1];
 
 #define DEBUG_ITC_SYNC 0
 
-extern void __init calibrate_delay (void);
+extern void __devinit calibrate_delay (void);
 extern void start_ap (void);
 extern unsigned long ia64_iobase;
 
-task_t *task_for_booting_cpu;
+struct task_struct *task_for_booting_cpu;
+
+/*
+ * State for each CPU
+ */
+DEFINE_PER_CPU(int, cpu_state);
 
-/* Bitmask of currently online CPUs */
+/* Bitmasks of currently online, and possible CPUs */
 cpumask_t cpu_online_map;
 EXPORT_SYMBOL(cpu_online_map);
-cpumask_t phys_cpu_present_map;
-EXPORT_SYMBOL(phys_cpu_present_map);
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_possible_map);
+
+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+int smp_num_siblings = 1;
+int smp_num_cpucores = 1;
 
 /* which logical CPU number maps to which CPU (physical APIC ID) */
 volatile int ia64_cpu_to_sapicid[NR_CPUS];
@@ -95,15 +156,58 @@ char __initdata no_int_routing;
 
 unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
 
+#ifdef CONFIG_FORCE_CPEI_RETARGET
+#define CPEI_OVERRIDE_DEFAULT  (1)
+#else
+#define CPEI_OVERRIDE_DEFAULT  (0)
+#endif
+
+unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
+
+static int __init
+cmdl_force_cpei(char *str)
+{
+       int value=0;
+
+       get_option (&str, &value);
+       force_cpei_retarget = value;
+
+       return 1;
+}
+
+__setup("force_cpei=", cmdl_force_cpei);
+
 static int __init
 nointroute (char *str)
 {
        no_int_routing = 1;
+       printk ("no_int_routing on\n");
        return 1;
 }
 
 __setup("nointroute", nointroute);
 
+static void fix_b0_for_bsp(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       int cpuid;
+       static int fix_bsp_b0 = 1;
+
+       cpuid = smp_processor_id();
+
+       /*
+        * Cache the b0 value on the first AP that comes up
+        */
+       if (!(fix_bsp_b0 && cpuid))
+               return;
+
+       sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
+       printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
+
+       fix_bsp_b0 = 0;
+#endif
+}
+
 void
 sync_master (void *arg)
 {
@@ -114,7 +218,8 @@ sync_master (void *arg)
        local_irq_save(flags);
        {
                for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
-                       while (!go[MASTER]);
+                       while (!go[MASTER])
+                               cpu_relax();
                        go[MASTER] = 0;
                        go[SLAVE] = ia64_get_itc();
                }
@@ -137,7 +242,8 @@ get_delta (long *rt, long *master)
        for (i = 0; i < NUM_ITERS; ++i) {
                t0 = ia64_get_itc();
                go[MASTER] = 1;
-               while (!(tm = go[SLAVE]));
+               while (!(tm = go[SLAVE]))
+                       cpu_relax();
                go[SLAVE] = 0;
                t1 = ia64_get_itc();
 
@@ -192,7 +298,6 @@ ia64_sync_itc (unsigned int master)
 {
        long i, delta, adj, adjust_latency = 0, done = 0;
        unsigned long flags, rt, master_time_stamp, bound;
-       extern void ia64_cpu_local_tick (void);
 #if DEBUG_ITC_SYNC
        struct {
                long rt;        /* roundtrip time */
@@ -202,6 +307,14 @@ ia64_sync_itc (unsigned int master)
        } t[NUM_ROUNDS];
 #endif
 
+       /*
+        * Make sure local timer ticks are disabled while we sync.  If
+        * they were enabled, we'd have to worry about nasty issues
+        * like setting the ITC ahead of (or a long time before) the
+        * next scheduled tick.
+        */
+       BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
+
        go[MASTER] = 1;
 
        if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
@@ -209,7 +322,8 @@ ia64_sync_itc (unsigned int master)
                return;
        }
 
-       while (go[MASTER]);     /* wait for master to be ready */
+       while (go[MASTER])
+               cpu_relax();    /* wait for master to be ready */
 
        spin_lock_irqsave(&itc_sync_lock, flags);
        {
@@ -247,31 +361,22 @@ ia64_sync_itc (unsigned int master)
 
        printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
               "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
-
-       /*
-        * Check whether we sync'd the itc ahead of the next timer interrupt.  If so, just
-        * reset it.
-        */
-       if (time_after(ia64_get_itc(), local_cpu_data->itm_next)) {
-               Dprintk("CPU %d: oops, jumped a timer tick; resetting timer.\n",
-                       smp_processor_id());
-               ia64_cpu_local_tick();
-       }
 }
 
 /*
  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
  */
-static inline void __init
+static inline void __devinit
 smp_setup_percpu_timer (void)
 {
 }
 
-static void __init
+static void __devinit
 smp_callin (void)
 {
-       int cpuid, phys_id;
+       int cpuid, phys_id, itc_master;
        extern void ia64_init_itm(void);
+       extern volatile int time_keeper_id;
 
 #ifdef CONFIG_PERFMON
        extern void pfm_init_percpu(void);
@@ -279,37 +384,30 @@ smp_callin (void)
 
        cpuid = smp_processor_id();
        phys_id = hard_smp_processor_id();
+       itc_master = time_keeper_id;
 
-       if (cpu_test_and_set(cpuid, cpu_online_map)) {
+       if (cpu_online(cpuid)) {
                printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
                       phys_id, cpuid);
                BUG();
        }
 
-       smp_setup_percpu_timer();
+       fix_b0_for_bsp();
 
-       /*
-        * Get our bogomips.
-        */
-       ia64_init_itm();
+       lock_ipi_calllock();
+       cpu_set(cpuid, cpu_online_map);
+       unlock_ipi_calllock();
+       per_cpu(cpu_state, cpuid) = CPU_ONLINE;
 
-       /*
-        * Set I/O port base per CPU
-        */
-       ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
+       smp_setup_percpu_timer();
 
-       ia64_mca_cmc_vector_setup();    /* Setup vector on AP & enable */
+       ia64_mca_cmc_vector_setup();    /* Setup vector on AP */
 
 #ifdef CONFIG_PERFMON
        pfm_init_percpu();
 #endif
 
        local_irq_enable();
-       calibrate_delay();
-       local_cpu_data->loops_per_jiffy = loops_per_jiffy;
-#ifdef CONFIG_IA32_SUPPORT
-       ia32_gdt_init();
-#endif
 
        if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
                /*
@@ -318,10 +416,21 @@ smp_callin (void)
                 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
                 * local_bh_enable(), which bugs out if irqs are not enabled...
                 */
-               Dprintk("Going to syncup ITC with BP.\n");
-               ia64_sync_itc(0);
+               Dprintk("Going to syncup ITC with ITC Master.\n");
+               ia64_sync_itc(itc_master);
        }
 
+       /*
+        * Get our bogomips.
+        */
+       ia64_init_itm();
+       calibrate_delay();
+       local_cpu_data->loops_per_jiffy = loops_per_jiffy;
+
+#ifdef CONFIG_IA32_SUPPORT
+       ia32_gdt_init();
+#endif
+
        /*
         * Allow the master to continue.
         */
@@ -333,55 +442,80 @@ smp_callin (void)
 /*
  * Activate a secondary processor.  head.S calls this.
  */
-int __init
+int __devinit
 start_secondary (void *unused)
 {
-       extern int cpu_idle (void);
-
+       /* Early console may use I/O ports */
+       ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
        Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
        efi_map_pal_code();
        cpu_init();
+       preempt_disable();
        smp_callin();
 
-       return cpu_idle();
+       cpu_idle();
+       return 0;
 }
 
-static struct task_struct * __init
-fork_by_hand (void)
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
 {
-       /*
-        * Don't care about the IP and regs settings since we'll never reschedule the
-        * forked task.
-        */
-       return copy_process(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL, NULL);
+       return NULL;
 }
 
-static int __init
+struct create_idle {
+       struct work_struct work;
+       struct task_struct *idle;
+       struct completion done;
+       int cpu;
+};
+
+void
+do_fork_idle(struct work_struct *work)
+{
+       struct create_idle *c_idle =
+               container_of(work, struct create_idle, work);
+
+       c_idle->idle = fork_idle(c_idle->cpu);
+       complete(&c_idle->done);
+}
+
+static int __devinit
 do_boot_cpu (int sapicid, int cpu)
 {
-       struct task_struct *idle;
        int timeout;
+       struct create_idle c_idle = {
+               .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
+               .cpu    = cpu,
+               .done   = COMPLETION_INITIALIZER(c_idle.done),
+       };
+
+       c_idle.idle = get_idle_for_cpu(cpu);
+       if (c_idle.idle) {
+               init_idle(c_idle.idle, cpu);
+               goto do_rest;
+       }
 
        /*
         * We can't use kernel_thread since we must avoid to reschedule the child.
         */
-       idle = fork_by_hand();
-       if (IS_ERR(idle))
-               panic("failed fork for CPU %d", cpu);
-       wake_up_forked_process(idle);
+       if (!keventd_up() || current_is_keventd())
+               c_idle.work.func(&c_idle.work);
+       else {
+               schedule_work(&c_idle.work);
+               wait_for_completion(&c_idle.done);
+       }
 
-       /*
-        * We remove it from the pidhash and the runqueue
-        * once we got the process:
-        */
-       init_idle(idle, cpu);
+       if (IS_ERR(c_idle.idle))
+               panic("failed fork for CPU %d", cpu);
 
-       unhash_process(idle);
+       set_idle_for_cpu(cpu, c_idle.idle);
 
-       task_for_booting_cpu = idle;
+do_rest:
+       task_for_booting_cpu = c_idle.idle;
 
        Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
 
+       set_brendez_area(cpu);
        platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
 
        /*
@@ -409,25 +543,11 @@ decay (char *str)
 {
        int ticks;
        get_option (&str, &ticks);
-       cache_decay_ticks = ticks;
        return 1;
 }
 
 __setup("decay=", decay);
 
-/*
- * # of ticks an idle task is considered cache-hot.  Highly application-dependent.  There
- * are apps out there which are known to suffer significantly with values >= 4.
- */
-unsigned long cache_decay_ticks = 10;  /* equal to MIN_TIMESLICE */
-
-static void
-smp_tune_scheduling (void)
-{
-       printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
-              (cache_decay_ticks + 1) * 1000 / HZ);
-}
-
 /*
  * Initialize the logical CPU number to SAPICID mapping
  */
@@ -437,64 +557,25 @@ smp_build_cpu_map (void)
        int sapicid, cpu, i;
        int boot_cpu_id = hard_smp_processor_id();
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
                ia64_cpu_to_sapicid[cpu] = -1;
+       }
 
        ia64_cpu_to_sapicid[0] = boot_cpu_id;
-       cpus_clear(phys_cpu_present_map);
-       cpu_set(0, phys_cpu_present_map);
-
+       cpus_clear(cpu_present_map);
+       cpu_set(0, cpu_present_map);
+       cpu_set(0, cpu_possible_map);
        for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
                sapicid = smp_boot_data.cpu_phys_id[i];
                if (sapicid == boot_cpu_id)
                        continue;
-               cpu_set(cpu, phys_cpu_present_map);
+               cpu_set(cpu, cpu_present_map);
+               cpu_set(cpu, cpu_possible_map);
                ia64_cpu_to_sapicid[cpu] = sapicid;
                cpu++;
        }
 }
 
-#ifdef CONFIG_NUMA
-
-/* on which node is each logical CPU (one cacheline even for 64 CPUs) */
-u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_to_node_map);
-/* which logical CPUs are on which nodes */
-cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
-
-/*
- * Build cpu to node mapping and initialize the per node cpu masks.
- */
-void __init
-build_cpu_to_node_map (void)
-{
-       int cpu, i, node;
-
-       for(node=0; node<MAX_NUMNODES; node++)
-               cpus_clear(node_to_cpu_mask[node]);
-       for(cpu = 0; cpu < NR_CPUS; ++cpu) {
-               /*
-                * All Itanium NUMA platforms I know use ACPI, so maybe we
-                * can drop this ifdef completely.                    [EF]
-                */
-#ifdef CONFIG_ACPI_NUMA
-               node = -1;
-               for (i = 0; i < NR_CPUS; ++i)
-                       if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
-                               node = node_cpuid[i].nid;
-                               break;
-                       }
-#else
-#              error Fixme: Dunno how to build CPU-to-node map.
-#endif
-               cpu_to_node_map[cpu] = node;
-               if (node >= 0)
-                       cpu_set(cpu, node_to_cpu_mask[node]);
-       }
-}
-
-#endif /* CONFIG_NUMA */
-
 /*
  * Cycle through the APs sending Wakeup IPIs to boot each.
  */
@@ -521,7 +602,6 @@ smp_prepare_cpus (unsigned int max_cpus)
        printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
 
        current_thread_info()->cpu = 0;
-       smp_tune_scheduling();
 
        /*
         * If SMP should be disabled, then really disable it!
@@ -529,9 +609,11 @@ smp_prepare_cpus (unsigned int max_cpus)
        if (!max_cpus) {
                printk(KERN_INFO "SMP mode deactivated.\n");
                cpus_clear(cpu_online_map);
-               cpus_clear(phys_cpu_present_map);
+               cpus_clear(cpu_present_map);
+               cpus_clear(cpu_possible_map);
                cpu_set(0, cpu_online_map);
-               cpu_set(0, phys_cpu_present_map);
+               cpu_set(0, cpu_present_map);
+               cpu_set(0, cpu_possible_map);
                return;
        }
 }
@@ -540,7 +622,138 @@ void __devinit smp_prepare_boot_cpu(void)
 {
        cpu_set(smp_processor_id(), cpu_online_map);
        cpu_set(smp_processor_id(), cpu_callin_map);
+       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void
+clear_cpu_sibling_map(int cpu)
+{
+       int i;
+
+       for_each_cpu_mask(i, cpu_sibling_map[cpu])
+               cpu_clear(cpu, cpu_sibling_map[i]);
+       for_each_cpu_mask(i, cpu_core_map[cpu])
+               cpu_clear(cpu, cpu_core_map[i]);
+
+       cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
+}
+
+static void
+remove_siblinginfo(int cpu)
+{
+       int last = 0;
+
+       if (cpu_data(cpu)->threads_per_core == 1 &&
+           cpu_data(cpu)->cores_per_socket == 1) {
+               cpu_clear(cpu, cpu_core_map[cpu]);
+               cpu_clear(cpu, cpu_sibling_map[cpu]);
+               return;
+       }
+
+       last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
+
+       /* remove it from all sibling map's */
+       clear_cpu_sibling_map(cpu);
+}
+
+extern void fixup_irqs(void);
+
+int migrate_platform_irqs(unsigned int cpu)
+{
+       int new_cpei_cpu;
+       irq_desc_t *desc = NULL;
+       cpumask_t       mask;
+       int             retval = 0;
+
+       /*
+        * dont permit CPEI target to removed.
+        */
+       if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
+               printk ("CPU (%d) is CPEI Target\n", cpu);
+               if (can_cpei_retarget()) {
+                       /*
+                        * Now re-target the CPEI to a different processor
+                        */
+                       new_cpei_cpu = any_online_cpu(cpu_online_map);
+                       mask = cpumask_of_cpu(new_cpei_cpu);
+                       set_cpei_target_cpu(new_cpei_cpu);
+                       desc = irq_desc + ia64_cpe_irq;
+                       /*
+                        * Switch for now, immediatly, we need to do fake intr
+                        * as other interrupts, but need to study CPEI behaviour with
+                        * polling before making changes.
+                        */
+                       if (desc) {
+                               desc->chip->disable(ia64_cpe_irq);
+                               desc->chip->set_affinity(ia64_cpe_irq, mask);
+                               desc->chip->enable(ia64_cpe_irq);
+                               printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
+                       }
+               }
+               if (!desc) {
+                       printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
+                       retval = -EBUSY;
+               }
+       }
+       return retval;
+}
+
+/* must be called with cpucontrol mutex held */
+int __cpu_disable(void)
+{
+       int cpu = smp_processor_id();
+
+       /*
+        * dont permit boot processor for now
+        */
+       if (cpu == 0 && !bsp_remove_ok) {
+               printk ("Your platform does not support removal of BSP\n");
+               return (-EBUSY);
+       }
+
+       cpu_clear(cpu, cpu_online_map);
+
+       if (migrate_platform_irqs(cpu)) {
+               cpu_set(cpu, cpu_online_map);
+               return (-EBUSY);
+       }
+
+       remove_siblinginfo(cpu);
+       cpu_clear(cpu, cpu_online_map);
+       fixup_irqs();
+       local_flush_tlb_all();
+       cpu_clear(cpu, cpu_callin_map);
+       return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       unsigned int i;
+
+       for (i = 0; i < 100; i++) {
+               /* They ack this in play_dead by setting CPU_DEAD */
+               if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+               {
+                       printk ("CPU %d is now offline\n", cpu);
+                       return;
+               }
+               msleep(100);
+       }
+       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+}
+#else /* !CONFIG_HOTPLUG_CPU */
+int __cpu_disable(void)
+{
+       return -ENOSYS;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       /* We said "no" in __cpu_disable */
+       BUG();
 }
+#endif /* CONFIG_HOTPLUG_CPU */
 
 void
 smp_cpus_done (unsigned int dummy)
@@ -552,14 +765,31 @@ smp_cpus_done (unsigned int dummy)
         * Allow the user to impress friends.
         */
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
-               if (cpu_online(cpu))
-                       bogosum += cpu_data(cpu)->loops_per_jiffy;
+       for_each_online_cpu(cpu) {
+               bogosum += cpu_data(cpu)->loops_per_jiffy;
+       }
 
        printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
               (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
 }
 
+static inline void __devinit
+set_cpu_sibling_map(int cpu)
+{
+       int i;
+
+       for_each_online_cpu(i) {
+               if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
+                       cpu_set(i, cpu_core_map[cpu]);
+                       cpu_set(cpu, cpu_core_map[i]);
+                       if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
+                               cpu_set(i, cpu_sibling_map[cpu]);
+                               cpu_set(cpu, cpu_sibling_map[i]);
+                       }
+               }
+       }
+}
+
 int __devinit
 __cpu_up (unsigned int cpu)
 {
@@ -570,11 +800,28 @@ __cpu_up (unsigned int cpu)
        if (sapicid == -1)
                return -EINVAL;
 
+       /*
+        * Already booted cpu? not valid anymore since we dont
+        * do idle loop tightspin anymore.
+        */
+       if (cpu_isset(cpu, cpu_callin_map))
+               return -EINVAL;
+
+       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
        /* Processor goes to start_secondary(), sets online flag */
        ret = do_boot_cpu(sapicid, cpu);
        if (ret < 0)
                return ret;
 
+       if (cpu_data(cpu)->threads_per_core == 1 &&
+           cpu_data(cpu)->cores_per_socket == 1) {
+               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, cpu_core_map[cpu]);
+               return 0;
+       }
+
+       set_cpu_sibling_map(cpu);
+
        return 0;
 }
 
@@ -601,3 +848,60 @@ init_smp_config(void)
                printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
                       ia64_sal_strerror(sal_ret));
 }
+
+/*
+ * identify_siblings(cpu) gets called from identify_cpu. This populates the 
+ * information related to logical execution units in per_cpu_data structure.
+ */
+void __devinit
+identify_siblings(struct cpuinfo_ia64 *c)
+{
+       s64 status;
+       u16 pltid;
+       pal_logical_to_physical_t info;
+
+       if (smp_num_cpucores == 1 && smp_num_siblings == 1)
+               return;
+
+       if ((status = ia64_pal_logical_to_phys(-1, &info)) != PAL_STATUS_SUCCESS) {
+               printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
+                      status);
+               return;
+       }
+       if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
+               printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
+               return;
+       }
+
+       c->socket_id =  (pltid << 8) | info.overview_ppid;
+       c->cores_per_socket = info.overview_cpp;
+       c->threads_per_core = info.overview_tpc;
+       c->num_log = info.overview_num_log;
+
+       c->core_id = info.log1_cid;
+       c->thread_id = info.log1_tid;
+}
+
+/*
+ * returns non zero, if multi-threading is enabled
+ * on at least one physical package. Due to hotplug cpu
+ * and (maxcpus=), all threads may not necessarily be enabled
+ * even though the processor supports multi-threading.
+ */
+int is_multithreading_enabled(void)
+{
+       int i, j;
+
+       for_each_present_cpu(i) {
+               for_each_present_cpu(j) {
+                       if (j == i)
+                               continue;
+                       if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
+                               if (cpu_data(j)->core_id == cpu_data(i)->core_id)
+                                       return 1;
+                       }
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(is_multithreading_enabled);