vserver 1.9.5.x5
[linux-2.6.git] / arch / s390 / kernel / smp.c
index eb8a516..24550d2 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/tlbflush.h>
 
 /* prototypes */
-extern int cpu_idle(void * unused);
 
 extern volatile int __cpu_logical_map[];
 
@@ -77,7 +76,7 @@ static void smp_ext_bitcall_others(ec_bit_sig);
  * Structure and data for smp_call_function(). This is designed to minimise
  * static memory requirements. It also looks cleaner.
  */
-static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(call_lock);
 
 struct call_data_struct {
        void (*func) (void *info);
@@ -487,55 +486,45 @@ void smp_ctl_clear_bit(int cr, int bit) {
  * Lets check how many CPUs we have.
  */
 
-#ifdef CONFIG_HOTPLUG_CPU
-
 void
 __init smp_check_cpus(unsigned int max_cpus)
 {
-       int cpu;
+       int cpu, num_cpus;
+       __u16 boot_cpu_addr;
 
        /*
         * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
         */
-       for (cpu = 1; cpu < max_cpus; cpu++)
-               cpu_set(cpu, cpu_possible_map);
-}
 
-#else /* CONFIG_HOTPLUG_CPU */
+       boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
+       __cpu_logical_map[0] = boot_cpu_addr;
+       current_thread_info()->cpu = 0;
+       num_cpus = 1;
+       for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) {
+               if ((__u16) cpu == boot_cpu_addr)
+                       continue;
+               __cpu_logical_map[num_cpus] = (__u16) cpu;
+               if (signal_processor(num_cpus, sigp_sense) ==
+                   sigp_not_operational)
+                       continue;
+               cpu_set(num_cpus, cpu_present_map);
+               num_cpus++;
+       }
 
-void
-__init smp_check_cpus(unsigned int max_cpus)
-{
-        int curr_cpu, num_cpus;
-       __u16 boot_cpu_addr;
+       for (cpu = 1; cpu < max_cpus; cpu++)
+               cpu_set(cpu, cpu_possible_map);
 
-       boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
-        current_thread_info()->cpu = 0;
-        num_cpus = 1;
-        for (curr_cpu = 0;
-             curr_cpu <= 65535 && num_cpus < max_cpus; curr_cpu++) {
-                if ((__u16) curr_cpu == boot_cpu_addr)
-                        continue;
-                __cpu_logical_map[num_cpus] = (__u16) curr_cpu;
-                if (signal_processor(num_cpus, sigp_sense) ==
-                    sigp_not_operational)
-                        continue;
-               cpu_set(num_cpus, cpu_possible_map);
-                num_cpus++;
-        }
-        printk("Detected %d CPU's\n",(int) num_cpus);
-        printk("Boot cpu address %2X\n", boot_cpu_addr);
+       printk("Detected %d CPU's\n",(int) num_cpus);
+       printk("Boot cpu address %2X\n", boot_cpu_addr);
 }
 
-#endif /* CONFIG_HOTPLUG_CPU */
-
 /*
  *      Activate a secondary processor.
  */
 extern void init_cpu_timer(void);
 extern void init_cpu_vtimer(void);
 extern int pfault_init(void);
-extern int pfault_token(void);
+extern void pfault_fini(void);
 
 int __devinit start_secondary(void *cpuvoid)
 {
@@ -557,32 +546,27 @@ int __devinit start_secondary(void *cpuvoid)
         /* Print info about this processor */
         print_cpu_info(&S390_lowcore.cpu_data);
         /* cpu_idle will call schedule for us */
-        return cpu_idle(NULL);
+        cpu_idle();
+        return 0;
 }
 
 static void __init smp_create_idle(unsigned int cpu)
 {
-       struct pt_regs regs;
        struct task_struct *p;
 
        /*
         *  don't care about the psw and regs settings since we'll never
         *  reschedule the forked task.
         */
-       memset(&regs, 0, sizeof(struct pt_regs));
-       p = copy_process(CLONE_VM | CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
+       p = fork_idle(cpu);
        if (IS_ERR(p))
                panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-
-       wake_up_forked_process(p);
-       init_idle(p, cpu);
-       unhash_process(p);
        current_set[cpu] = p;
 }
 
 /* Reserving and releasing of CPUs */
 
-static spinlock_t smp_reserve_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(smp_reserve_lock);
 static int smp_cpu_reserved[NR_CPUS];
 
 int
@@ -643,6 +627,7 @@ __cpu_up(unsigned int cpu)
 {
        struct task_struct *idle;
         struct _lowcore    *cpu_lowcore;
+       struct stack_frame *sf;
         sigp_ccode          ccode;
        int                 curr_cpu;
 
@@ -666,9 +651,14 @@ __cpu_up(unsigned int cpu)
 
        idle = current_set[cpu];
         cpu_lowcore = lowcore_ptr[cpu];
-       cpu_lowcore->save_area[15] = idle->thread.ksp;
        cpu_lowcore->kernel_stack = (unsigned long)
                idle->thread_info + (THREAD_SIZE);
+       sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
+                                    - sizeof(struct pt_regs)
+                                    - sizeof(struct stack_frame));
+       memset(sf, 0, sizeof(struct stack_frame));
+       sf->gprs[9] = (unsigned long) sf;
+       cpu_lowcore->save_area[15] = (unsigned long) sf;
        __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
        __asm__ __volatile__("stam  0,15,0(%0)"
                             : : "a" (&cpu_lowcore->access_regs_save_area)
@@ -679,7 +669,8 @@ __cpu_up(unsigned int cpu)
        eieio();
        signal_processor(cpu,sigp_restart);
 
-       while (!cpu_online(cpu));
+       while (!cpu_online(cpu))
+               cpu_relax();
        return 0;
 }
 
@@ -695,6 +686,11 @@ __cpu_disable(void)
                return -EBUSY;
        }
 
+#ifdef CONFIG_PFAULT
+       /* Disable pfault pseudo page faults on this cpu. */
+       pfault_fini();
+#endif
+
        /* disable all external interrupts */
 
        cr_parms.start_ctl = 0;
@@ -729,13 +725,15 @@ void
 __cpu_die(unsigned int cpu)
 {
        /* Wait until target cpu is down */
-       while (!cpu_stopped(cpu));
+       while (!cpu_stopped(cpu))
+               cpu_relax();
        printk("Processor %d spun down\n", cpu);
 }
 
 void
 cpu_die(void)
 {
+       idle_task_exit();
        signal_processor(smp_processor_id(), sigp_stop);
        BUG();
        for(;;);
@@ -747,7 +745,7 @@ cpu_die(void)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       unsigned long async_stack;
+       unsigned long stack;
        unsigned int cpu;
         int i;
 
@@ -767,12 +765,18 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                lowcore_ptr[i] = (struct _lowcore *)
                        __get_free_pages(GFP_KERNEL|GFP_DMA, 
                                        sizeof(void*) == 8 ? 1 : 0);
-               async_stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
-               if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
+               stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
+               if (lowcore_ptr[i] == NULL || stack == 0ULL)
                        panic("smp_boot_cpus failed to allocate memory\n");
 
                *(lowcore_ptr[i]) = S390_lowcore;
-               lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
+               lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
+#ifdef CONFIG_CHECK_STACK
+               stack = __get_free_pages(GFP_KERNEL,0);
+               if (stack == 0ULL)
+                       panic("smp_boot_cpus failed to allocate memory\n");
+               lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
+#endif
        }
        set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
 
@@ -793,6 +797,7 @@ void __devinit smp_prepare_boot_cpu(void)
 
 void smp_cpus_done(unsigned int max_cpus)
 {
+       cpu_present_map = cpu_possible_map;
 }
 
 /*