*/
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/desc.h>
#include <asm/arch_hooks.h>
#include <asm/pgalloc.h>
+#if defined(__i386__)
+#include <asm/pda.h>
+#endif
#include <xen/evtchn.h>
#include <xen/interface/vcpu.h>
#include <xen/cpu_hotplug.h>
#include <xen/xenbus.h>
-extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
-extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
+extern irqreturn_t smp_reschedule_interrupt(int, void *);
+extern irqreturn_t smp_call_function_interrupt(int, void *);
extern void local_setup_timer(unsigned int cpu);
extern void local_teardown_timer(unsigned int cpu);
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
-int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
-EXPORT_SYMBOL(phys_proc_id);
-int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
-EXPORT_SYMBOL(cpu_core_id);
+EXPORT_SYMBOL(smp_num_siblings);
#if defined(__i386__)
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
#elif defined(__x86_64__)
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
#endif
+EXPORT_SYMBOL(cpu_llc_id);
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
void *xquad_portio;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_core_map);
#if defined(__i386__)
u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
EXPORT_SYMBOL(x86_cpu_to_apicid);
-#elif !defined(CONFIG_X86_IO_APIC)
-unsigned int maxcpus = NR_CPUS;
#endif
void __init prefill_possible_map(void)
static inline void
set_cpu_sibling_map(int cpu)
{
- phys_proc_id[cpu] = cpu;
- cpu_core_id[cpu] = 0;
+ cpu_data[cpu].phys_proc_id = cpu;
+ cpu_data[cpu].cpu_core_id = 0;
cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
cpu_core_map[cpu] = cpumask_of_cpu(cpu);
}
#endif
+#ifdef __i386__
+static inline void set_kernel_gs(void)
+{
+ /* Set %gs for this CPU's PDA. Memory clobber is to create a
+ barrier with respect to any PDA operations, so the compiler
+ doesn't move any before here. */
+ asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
+}
+#endif
+
void cpu_bringup(void)
{
+#ifdef __i386__
+ set_kernel_gs();
+ secondary_cpu_init();
+#else
cpu_init();
+#endif
touch_softlockup_watchdog();
preempt_disable();
local_irq_enable();
xen_smp_intr_init(0);
- for_each_cpu_mask (cpu, cpu_possible_map) {
+ /* Restrict the possible_map according to max_cpus. */
+ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
+ for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
+ continue;
+ cpu_clear(cpu, cpu_possible_map);
+ }
+
+ for_each_possible_cpu (cpu) {
+#ifdef __i386__
+ struct i386_pda *pda;
+ struct desc_struct *gdt;
+#endif
+
if (cpu == 0)
continue;
#endif
gdt_descr->address = get_zeroed_page(GFP_KERNEL);
if (unlikely(!gdt_descr->address)) {
- printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
+ cpu);
continue;
}
gdt_descr->size = GDT_SIZE;
memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
+#ifdef __i386__
+ gdt = (struct desc_struct *)gdt_descr->address;
+ pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
+
+ if (unlikely(!pda)) {
+ printk(KERN_CRIT "CPU%d failed to allocate PDA\n",
+ cpu);
+ continue;
+ }
+ cpu_pda(cpu) = pda;
+ cpu_pda(cpu)->cpu_number = cpu;
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
+ (u32 *)&gdt[GDT_ENTRY_PDA].b,
+ (unsigned long)pda, sizeof(*pda) - 1,
+ 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
+#endif
make_page_readonly(
(void *)gdt_descr->address,
XENFEAT_writable_descriptor_tables);
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
-#ifdef __x86_64__
cpu_pda(cpu)->pcurrent = idle;
+#ifdef __x86_64__
cpu_pda(cpu)->cpunumber = cpu;
clear_ti_thread_flag(idle->thread_info, TIF_FORK);
#endif
irq_ctx_init(cpu);
#ifdef CONFIG_HOTPLUG_CPU
- if (xen_start_info->flags & SIF_INITDOMAIN)
+ if (is_initial_xendomain())
cpu_set(cpu, cpu_present_map);
#else
cpu_set(cpu, cpu_present_map);
init_xenbus_allowed_cpumask();
- /* Currently, Xen gives no dynamic NUMA/HT info. */
- for (cpu = 1; cpu < NR_CPUS; cpu++) {
- cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
- cpu_core_map[cpu] = cpumask_of_cpu(cpu);
- }
-
-#ifdef CONFIG_X86_IO_APIC
/*
* Here we can be sure that there is an IO-APIC in the system. Let's
* go and set it up:
*/
+#ifdef CONFIG_X86_IO_APIC
if (!skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
#endif
}
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
}
static void
remove_siblinginfo(int cpu)
{
- phys_proc_id[cpu] = BAD_APICID;
- cpu_core_id[cpu] = BAD_APICID;
+ cpu_data[cpu].phys_proc_id = BAD_APICID;
+ cpu_data[cpu].cpu_core_id = BAD_APICID;
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
xen_smp_intr_exit(cpu);
-#ifdef __i386__
if (num_online_cpus() == 1)
alternatives_smp_switch(0);
-#endif
}
#else /* !CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_HOTPLUG_CPU */
-int __devinit __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu)
{
int rc;
if (rc)
return rc;
-#ifdef __i386__
if (num_online_cpus() == 1)
alternatives_smp_switch(1);
-#endif
/* This must be done before setting cpu_online_map */
set_cpu_sibling_map(cpu);
{
}
-#ifndef CONFIG_X86_LOCAL_APIC
+#ifdef CONFIG_X86_MPPARSE
+/*
+ * If the BIOS enumerates physical processors before logical,
+ * maxcpus=N at enumeration-time can be used to disable HT.
+ */
+static int __init parse_maxcpus(char *arg)
+{
+ extern unsigned int maxcpus;
+
+ maxcpus = simple_strtoul(arg, NULL, 0);
+ return 0;
+}
+early_param("maxcpus", parse_maxcpus);
+#endif
+
+#if defined(CONFIG_XEN_UNPRIVILEGED_GUEST) && defined(CONFIG_X86_32)
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;