*/
#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
-#include <linux/autoconf.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/delay.h>
+#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/atomic.h>
-#include <asm/bitops.h>
#include <asm/current.h>
#include <asm/delay.h>
-#include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */
+#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
#define kDEBUG 0
-spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
-
-spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(smp_lock);
volatile struct task_struct *smp_init_current_idle_task;
-static volatile int cpu_now_booting = 0; /* track which CPU is booting */
-static int parisc_max_cpus = -1; /* Command line */
+static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
+
+static int parisc_max_cpus __read_mostly = 1;
+
+/* online cpus are ones that we've managed to bring up completely
+ * possible cpus are all valid cpu
+ * present cpus are all detected cpu
+ *
+ * On startup we bring up the "possible" cpus. Since we discover
+ * CPUs later, we add them as hotplug, so the possible cpu mask is
+ * empty in the beginning.
+ */
-unsigned long cache_decay_ticks; /* declared by include/linux/sched.h */
-cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map = CPU_MASK_NONE; /* Bitmap of Present CPUs */
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
while (ops) {
unsigned long which = ffz(~ops);
+ ops &= ~(1 << which);
+
switch (which) {
+ case IPI_NOP:
+#if (kDEBUG>=100)
+ printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
+#endif /* kDEBUG */
+ break;
+
case IPI_RESCHEDULE:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
#endif /* kDEBUG */
- ops &= ~(1 << IPI_RESCHEDULE);
/*
* Reschedule callback. Everything to be
* done is done by the interrupt return path.
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
#endif /* kDEBUG */
- ops &= ~(1 << IPI_CALL_FUNC);
{
volatile struct smp_call_struct *data;
void (*func)(void *info);
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
#endif /* kDEBUG */
- ops &= ~(1 << IPI_CPU_START);
#ifdef ENTRY_SYS_CPUS
p->state = STATE_RUNNING;
#endif
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
#endif /* kDEBUG */
- ops &= ~(1 << IPI_CPU_STOP);
#ifdef ENTRY_SYS_CPUS
#else
halt_processor();
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
#endif /* kDEBUG */
- ops &= ~(1 << IPI_CPU_TEST);
break;
default:
printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
this_cpu, which);
- ops &= ~(1 << which);
return IRQ_NONE;
} /* Switch */
} /* while (ops) */
spin_lock_irqsave(&(p->lock),flags);
p->pending_ipi |= 1 << op;
- __raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);
+ gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
spin_unlock_irqrestore(&(p->lock),flags);
}
{
int i;
- for (i = 0; i < parisc_max_cpus; i++) {
- if (cpu_online(i) && i != smp_processor_id())
+ for_each_online_cpu(i) {
+ if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
void
smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
+void
+smp_send_all_nop(void)
+{
+ send_IPI_allbutself(IPI_NOP);
+}
+
/**
* Run a function on all other CPUs.
{
struct smp_call_struct data;
unsigned long timeout;
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(lock);
+ int retries = 0;
+
+ if (num_online_cpus() < 2)
+ return 0;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
+
+ /* can also deadlock if IPIs are disabled */
+ WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
+
data.func = func;
data.info = info;
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(IPI_CALL_FUNC);
+ retry:
/* Wait for response */
timeout = jiffies + HZ;
while ( (atomic_read (&data.unstarted_count) > 0) &&
time_before (jiffies, timeout) )
barrier ();
+ if (atomic_read (&data.unstarted_count) > 0) {
+ printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
+ smp_processor_id(), ++retries);
+ goto retry;
+ }
/* We either got one or timed out. Release the lock */
mb();
smp_call_function_data = NULL;
- if (atomic_read (&data.unstarted_count) > 0) {
- printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d)\n",
- smp_processor_id());
- return -ETIMEDOUT;
- }
while (wait && atomic_read (&data.unfinished_count) > 0)
barrier ();
EXPORT_SYMBOL(smp_call_function);
-
-
-/*
- * Setup routine for controlling SMP activation
- *
- * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
- * activation entirely (the MPS table probe still happens, though).
- *
- * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
- * greater than 0, limits the maximum number of CPUs activated in
- * SMP mode to <NUM>.
- */
-
-static int __init nosmp(char *str)
-{
- parisc_max_cpus = 0;
- return 1;
-}
-
-__setup("nosmp", nosmp);
-
-static int __init maxcpus(char *str)
-{
- get_option(&str, &parisc_max_cpus);
- return 1;
-}
-
-__setup("maxcpus=", maxcpus);
-
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding.
*/
-extern void flush_tlb_all_local(void);
-
void
smp_flush_tlb_all(void)
{
- on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
+ on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
}
*/
void __init smp_callin(void)
{
- extern void cpu_idle(void); /* arch/parisc/kernel/process.c */
int slave_id = cpu_now_booting;
#if 0
void *istack;
#endif
smp_cpu_init(slave_id);
+ preempt_disable();
#if 0 /* NOT WORKING YET - see entry.S */
istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
#endif
flush_cache_all_local(); /* start with known state */
- flush_tlb_all_local();
+ flush_tlb_all_local(NULL);
local_irq_enable(); /* Interrupts have been off until now */
panic("smp_callin() AAAAaaaaahhhh....\n");
}
-#if 0
-/*
- * Create the idle task for a new Slave CPU. DO NOT use kernel_thread()
- * because that could end up calling schedule(). If it did, the new idle
- * task could get scheduled before we had a chance to remove it from the
- * run-queue...
- */
-static struct task_struct *fork_by_hand(void)
-{
- struct pt_regs regs;
-
- /*
- * don't care about the regs settings since
- * we'll never reschedule the forked task.
- */
- return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
-}
-
-
/*
* Bring one cpu online.
*/
-int __init smp_boot_one_cpu(int cpuid, int cpunum)
+int __init smp_boot_one_cpu(int cpuid)
{
struct task_struct *idle;
long timeout;
* Sheesh . . .
*/
- idle = fork_by_hand();
+ idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
- wake_up_forked_process(idle);
- init_idle(idle, cpunum);
- unhash_process(idle);
- idle->thread_info->cpu = cpunum;
+ task_thread_info(idle)->cpu = cpuid;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
- cpu_now_booting = cpunum;
+ cpu_now_booting = cpuid;
/*
** boot strap code needs to know the task address since
smp_init_current_idle_task = idle ;
mb();
+ printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
+
/*
** This gets PDC to release the CPU from a very tight loop.
- ** See MEM_RENDEZ comments in head.S.
+ **
+ ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
+ ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
+ ** is executed after receiving the rendezvous signal (an interrupt to
+ ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
+ ** contents of memory are valid."
*/
- __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
+ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
mb();
/*
* Once the "monarch CPU" sees the bit change, it can move on.
*/
for (timeout = 0; timeout < 10000; timeout++) {
- if(cpu_online(cpunum)) {
+ if(cpu_online(cpuid)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
smp_init_current_idle_task = NULL;
alive:
/* Remember the Slave data */
#if (kDEBUG>=100)
- printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
- cpuid, cpunum, timeout * 100);
+ printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
+ cpuid, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
- cpu_data[cpunum].state = STATE_RUNNING;
+ cpu_data[cpuid].state = STATE_RUNNING;
#endif
return 0;
}
-#endif
-
void __devinit smp_prepare_boot_cpu(void)
{
#endif
/* Setup BSP mappings */
- printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
- init_task.thread_info->cpu = bootstrap_processor;
- current->thread_info->cpu = bootstrap_processor;
+ printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
cpu_set(bootstrap_processor, cpu_online_map);
- cpu_set(bootstrap_processor, cpu_possible_map);
-
- /* Mark Boostrap processor as present */
- current->active_mm = &init_mm;
-
- cache_decay_ticks = HZ/100; /* FIXME very rough. */
+ cpu_set(bootstrap_processor, cpu_present_map);
}
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ cpus_clear(cpu_present_map);
+ cpu_set(0, cpu_present_map);
- if (max_cpus != -1)
- printk(KERN_INFO "SMP: Limited to %d CPUs\n", max_cpus);
-
- printk(KERN_INFO "SMP: Monarch CPU activated (%lu.%02lu BogoMIPS)\n",
- (cpu_data[0].loops_per_jiffy + 25) / 5000,
- ((cpu_data[0].loops_per_jiffy + 25) / 50) % 100);
-
- return;
+ parisc_max_cpus = max_cpus;
+ if (!max_cpus)
+ printk(KERN_INFO "SMP mode deactivated.\n");
}
int __devinit __cpu_up(unsigned int cpu)
{
+ if (cpu != 0 && cpu < parisc_max_cpus)
+ smp_boot_one_cpu(cpu);
+
return cpu_online(cpu) ? 0 : -ENOSYS;
}
if ( argc == 1 ){
#ifdef DUMP_MORE_STATE
- for(i=0; i<NR_CPUS; i++) {
+ for_each_online_cpu(i) {
int cpus_per_line = 4;
- if(cpu_online(i)) {
- if (j++ % cpus_per_line)
- printk(" %3d",i);
- else
- printk("\n %3d",i);
- }
+
+ if (j++ % cpus_per_line)
+ printk(" %3d",i);
+ else
+ printk("\n %3d",i);
}
printk("\n");
#else
} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
#ifdef DUMP_MORE_STATE
- for(i=0;i<NR_CPUS;i++) {
- if (!cpu_online(i))
- continue;
+ for_each_online_cpu(i) {
if (cpu_data[i].cpuid != NO_PROC_ID) {
switch(cpu_data[i].state) {
case STATE_RENDEZVOUS:
} else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
#ifdef DUMP_MORE_STATE
printk("\nCPUSTATE CPUID\n");
- for (i=0;i<NR_CPUS;i++) {
- if (!cpu_online(i))
- continue;
+ for_each_online_cpu(i) {
if (cpu_data[i].cpuid != NO_PROC_ID) {
switch(cpu_data[i].state) {
case STATE_RENDEZVOUS: