fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / parisc / kernel / smp.c
index 0f1daa5..12cc019 100644 (file)
@@ -18,7 +18,6 @@
 */
 #undef ENTRY_SYS_CPUS  /* syscall support for iCOD-like functionality */
 
-#include <linux/autoconf.h>
 
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/kernel_stat.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
+#include <linux/bitops.h>
 
 #include <asm/system.h>
 #include <asm/atomic.h>
-#include <asm/bitops.h>
 #include <asm/current.h>
 #include <asm/delay.h>
-#include <asm/pgalloc.h>       /* for flush_tlb_all() proto/macro */
+#include <asm/tlbflush.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>           /* for CPU_IRQ_REGION and friends */
 
 #define kDEBUG 0
 
-spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(smp_lock);
 
 volatile struct task_struct *smp_init_current_idle_task;
 
-static volatile int cpu_now_booting = 0;       /* track which CPU is booting */
+static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
 
-unsigned long cache_decay_ticks;       /* declared by include/linux/sched.h */
-
-static int parisc_max_cpus = 1;
+static int parisc_max_cpus __read_mostly = 1;
 
 /* online cpus are ones that we've managed to bring up completely
  * possible cpus are all valid cpu 
@@ -73,8 +70,8 @@ static int parisc_max_cpus = 1;
  * empty in the beginning.
  */
 
-cpumask_t cpu_online_map = CPU_MASK_NONE;      /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map = CPU_MASK_ALL;     /* Bitmap of Present CPUs */
+cpumask_t cpu_online_map   __read_mostly = CPU_MASK_NONE;      /* Bitmap of online CPUs */
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;       /* Bitmap of Present CPUs */
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
@@ -157,7 +154,7 @@ halt_processor(void)
 
 
 irqreturn_t
-ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs
+ipi_interrupt(int irq, void *dev_id) 
 {
        int this_cpu = smp_processor_id();
        struct cpuinfo_parisc *p = &cpu_data[this_cpu];
@@ -183,12 +180,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
                while (ops) {
                        unsigned long which = ffz(~ops);
 
+                       ops &= ~(1 << which);
+
                        switch (which) {
+                       case IPI_NOP:
+#if (kDEBUG>=100)
+                               printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
+#endif /* kDEBUG */
+                               break;
+                               
                        case IPI_RESCHEDULE:
 #if (kDEBUG>=100)
                                printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
 #endif /* kDEBUG */
-                               ops &= ~(1 << IPI_RESCHEDULE);
                                /*
                                 * Reschedule callback.  Everything to be
                                 * done is done by the interrupt return path.
@@ -199,7 +203,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 #if (kDEBUG>=100)
                                printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
 #endif /* kDEBUG */
-                               ops &= ~(1 << IPI_CALL_FUNC);
                                {
                                        volatile struct smp_call_struct *data;
                                        void (*func)(void *info);
@@ -233,7 +236,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 #if (kDEBUG>=100)
                                printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
 #endif /* kDEBUG */
-                               ops &= ~(1 << IPI_CPU_START);
 #ifdef ENTRY_SYS_CPUS
                                p->state = STATE_RUNNING;
 #endif
@@ -243,7 +245,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 #if (kDEBUG>=100)
                                printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
 #endif /* kDEBUG */
-                               ops &= ~(1 << IPI_CPU_STOP);
 #ifdef ENTRY_SYS_CPUS
 #else
                                halt_processor();
@@ -254,15 +255,16 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 #if (kDEBUG>=100)
                                printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
 #endif /* kDEBUG */
-                               ops &= ~(1 << IPI_CPU_TEST);
                                break;
 
                        default:
                                printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
                                        this_cpu, which);
-                               ops &= ~(1 << which);
                                return IRQ_NONE;
                        } /* Switch */
+               /* let in any pending interrupts */
+               local_irq_enable();
+               local_irq_disable();
                } /* while (ops) */
        }
        return IRQ_HANDLED;
@@ -277,7 +279,7 @@ ipi_send(int cpu, enum ipi_message_type op)
 
        spin_lock_irqsave(&(p->lock),flags);
        p->pending_ipi |= 1 << op;
-       __raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);
+       gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
        spin_unlock_irqrestore(&(p->lock),flags);
 }
 
@@ -298,8 +300,8 @@ send_IPI_allbutself(enum ipi_message_type op)
 {
        int i;
        
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i) && i != smp_processor_id())
+       for_each_online_cpu(i) {
+               if (i != smp_processor_id())
                        send_IPI_single(i, op);
        }
 }
@@ -314,6 +316,12 @@ smp_send_start(void)       { send_IPI_allbutself(IPI_CPU_START); }
 void 
 smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
 
+void
+smp_send_all_nop(void)
+{
+       send_IPI_allbutself(IPI_NOP);
+}
+
 
 /**
  * Run a function on all other CPUs.
@@ -332,13 +340,18 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
 {
        struct smp_call_struct data;
        unsigned long timeout;
-       static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+       static DEFINE_SPINLOCK(lock);
+       int retries = 0;
 
        if (num_online_cpus() < 2)
                return 0;
 
        /* Can deadlock when called with interrupts disabled */
        WARN_ON(irqs_disabled());
+
+       /* can also deadlock if IPIs are disabled */
+       WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
+
        
        data.func = func;
        data.info = info;
@@ -365,21 +378,22 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
        /*  Send a message to all other CPUs and wait for them to respond  */
        send_IPI_allbutself(IPI_CALL_FUNC);
 
+ retry:
        /*  Wait for response  */
        timeout = jiffies + HZ;
        while ( (atomic_read (&data.unstarted_count) > 0) &&
                time_before (jiffies, timeout) )
                barrier ();
 
+       if (atomic_read (&data.unstarted_count) > 0) {
+               printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
+                     smp_processor_id(), ++retries);
+               goto retry;
+       }
        /* We either got one or timed out. Release the lock */
 
        mb();
        smp_call_function_data = NULL;
-       if (atomic_read (&data.unstarted_count) > 0) {
-               printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d)\n",
-                     smp_processor_id());
-               return -ETIMEDOUT;
-       }
 
        while (wait && atomic_read (&data.unfinished_count) > 0)
                        barrier ();
@@ -394,25 +408,10 @@ EXPORT_SYMBOL(smp_call_function);
  * as we want to ensure all TLB's flushed before proceeding.
  */
 
-extern void flush_tlb_all_local(void);
-
 void
 smp_flush_tlb_all(void)
 {
-       on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
-}
-
-
-void 
-smp_do_timer(struct pt_regs *regs)
-{
-       int cpu = smp_processor_id();
-       struct cpuinfo_parisc *data = &cpu_data[cpu];
-
-        if (!--data->prof_counter) {
-               data->prof_counter = data->prof_multiplier;
-               update_process_times(user_mode(regs));
-       }
+       on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
 }
 
 /*
@@ -421,8 +420,9 @@ smp_do_timer(struct pt_regs *regs)
 static void __init
 smp_cpu_init(int cpunum)
 {
-       extern int init_per_cpu(int);  /* arch/parisc/kernel/setup.c */
+       extern int init_per_cpu(int);  /* arch/parisc/kernel/processor.c */
        extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
+       extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
 
        /* Set modes and Enable floating point coprocessor */
        (void) init_per_cpu(cpunum);
@@ -448,6 +448,7 @@ smp_cpu_init(int cpunum)
        enter_lazy_tlb(&init_mm, current);
 
        init_IRQ();   /* make sure no IRQ's are enabled or pending */
+       start_cpu_itimer();
 }
 
 
@@ -457,13 +458,13 @@ smp_cpu_init(int cpunum)
  */
 void __init smp_callin(void)
 {
-       extern void cpu_idle(void);     /* arch/parisc/kernel/process.c */
        int slave_id = cpu_now_booting;
 #if 0
        void *istack;
 #endif
 
        smp_cpu_init(slave_id);
+       preempt_disable();
 
 #if 0  /* NOT WORKING YET - see entry.S */
        istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
@@ -475,7 +476,7 @@ void __init smp_callin(void)
 #endif
 
        flush_cache_all_local(); /* start with known state */
-       flush_tlb_all_local();
+       flush_tlb_all_local(NULL);
 
        local_irq_enable();  /* Interrupts have been off until now */
 
@@ -485,24 +486,6 @@ void __init smp_callin(void)
        panic("smp_callin() AAAAaaaaahhhh....\n");
 }
 
-/*
- * Create the idle task for a new Slave CPU.  DO NOT use kernel_thread()
- * because that could end up calling schedule(). If it did, the new idle
- * task could get scheduled before we had a chance to remove it from the
- * run-queue...
- */
-static struct task_struct *fork_by_hand(void)
-{
-       struct pt_regs regs;  
-
-       /*
-        * don't care about the regs settings since
-        * we'll never reschedule the forked task.
-        */
-       return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
-}
-
-
 /*
  * Bring one cpu online.
  */
@@ -521,14 +504,11 @@ int __init smp_boot_one_cpu(int cpuid)
         * Sheesh . . .
         */
 
-       idle = fork_by_hand();
+       idle = fork_idle(cpuid);
        if (IS_ERR(idle))
                panic("SMP: fork failed for CPU:%d", cpuid);
 
-       wake_up_forked_process(idle);
-       init_idle(idle, cpuid);
-       unhash_process(idle);
-       idle->thread_info->cpu = cpuid;
+       task_thread_info(idle)->cpu = cpuid;
 
        /* Let _start know what logical CPU we're booting
        ** (offset into init_tasks[],cpu_data[])
@@ -553,7 +533,7 @@ int __init smp_boot_one_cpu(int cpuid)
        ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
        ** contents of memory are valid."
        */
-       __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpuid].hpa);
+       gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
        mb();
 
        /* 
@@ -603,8 +583,6 @@ void __devinit smp_prepare_boot_cpu(void)
 
        cpu_set(bootstrap_processor, cpu_online_map);
        cpu_set(bootstrap_processor, cpu_present_map);
-
-       cache_decay_ticks = HZ/100;     /* FIXME very rough.  */
 }
 
 
@@ -630,7 +608,7 @@ void smp_cpus_done(unsigned int cpu_max)
 }
 
 
-int __devinit __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu)
 {
        if (cpu != 0 && cpu < parisc_max_cpus)
                smp_boot_one_cpu(cpu);
@@ -656,14 +634,13 @@ int sys_cpus(int argc, char **argv)
        if ( argc == 1 ){
        
 #ifdef DUMP_MORE_STATE
-               for(i=0; i<NR_CPUS; i++) {
+               for_each_online_cpu(i) {
                        int cpus_per_line = 4;
-                       if(cpu_online(i)) {
-                               if (j++ % cpus_per_line)
-                                       printk(" %3d",i);
-                               else
-                                       printk("\n %3d",i);
-                       }
+
+                       if (j++ % cpus_per_line)
+                               printk(" %3d",i);
+                       else
+                               printk("\n %3d",i);
                }
                printk("\n"); 
 #else
@@ -672,9 +649,7 @@ int sys_cpus(int argc, char **argv)
        } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
                printk("\nCPUSTATE  TASK CPUNUM CPUID HARDCPU(HPA)\n");
 #ifdef DUMP_MORE_STATE
-               for(i=0;i<NR_CPUS;i++) {
-                       if (!cpu_online(i))
-                               continue;
+               for_each_online_cpu(i) {
                        if (cpu_data[i].cpuid != NO_PROC_ID) {
                                switch(cpu_data[i].state) {
                                        case STATE_RENDEZVOUS:
@@ -708,9 +683,7 @@ int sys_cpus(int argc, char **argv)
        } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { 
 #ifdef DUMP_MORE_STATE
                printk("\nCPUSTATE   CPUID\n");
-               for (i=0;i<NR_CPUS;i++) {
-                       if (!cpu_online(i))
-                               continue;
+               for_each_online_cpu(i) {
                        if (cpu_data[i].cpuid != NO_PROC_ID) {
                                switch(cpu_data[i].state) {
                                        case STATE_RENDEZVOUS: