#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/swap.h>
+#include <linux/profile.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
-#include <asm/hardirq.h>
#include <asm/cpudata.h>
#define IRQ_RESCHEDULE 13
extern void calibrate_delay(void);
extern volatile int smp_processors_ready;
-extern unsigned long cpu_present_map;
-extern int smp_num_cpus;
-extern int smp_threads_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned char boot_cpu_id;
-extern int smp_activated;
-extern volatile int __cpu_number_map[NR_CPUS];
-extern volatile int __cpu_logical_map[NR_CPUS];
-extern volatile unsigned long ipi_count;
-extern volatile int smp_process_available;
-extern volatile int smp_commenced;
+
+extern cpumask_t smp_commenced_mask;
+
extern int __smp4m_processor_id(void);
/*#define SMP_DEBUG*/
local_flush_cache_all();
local_flush_tlb_all();
- set_irq_udt(boot_cpu_id);
-
/* Get our local ticker going. */
smp_setup_percpu_timer();
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
- init_idle();
-
/* Allow master to continue. */
- swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+ swap(&cpu_callin_map[cpuid], 1);
+ /* XXX: What's up with all the flushes? */
local_flush_cache_all();
local_flush_tlb_all();
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- while(!smp_commenced)
- barrier();
-
- local_flush_cache_all();
- local_flush_tlb_all();
+ while (!cpu_isset(cpuid, smp_commenced_mask))
+ mb();
local_irq_enable();
+
+ cpu_set(cpuid, cpu_online_map);
+ /* last one in gets all the interrupts (for testing) */
+ set_irq_udt(boot_cpu_id);
}
-extern int cpu_idle(void *unused);
extern void init_IRQ(void);
extern void cpu_panic(void);
-extern int start_secondary(void *unused);
/*
* Cycle through the processors asking the PROM to start each one.
void __init smp4m_boot_cpus(void)
{
- int cpucount = 0;
- int i, mid;
+ smp_setup_percpu_timer();
+ local_flush_cache_all();
+}
- printk("Entering SMP Mode...\n");
+int smp4m_boot_one_cpu(int i)
+{
+ extern unsigned long sun4m_cpu_startup;
+ unsigned long *entry = &sun4m_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+ int cpu_node;
- local_irq_enable();
- cpu_present_map = 0;
+ cpu_find_by_mid(i, &cpu_node);
- for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
- cpu_present_map |= (1<<mid);
+ /* Cook up an idler for this guy. */
+ p = fork_idle(i);
+ current_set[i] = task_thread_info(p);
+ /* See trampoline.S for details... */
+ entry += ((i-1) * 3);
- for(i=0; i < NR_CPUS; i++) {
- __cpu_number_map[i] = -1;
- __cpu_logical_map[i] = -1;
+ /*
+ * Initialize the contexts table
+ * Since the call to prom_startcpu() trashes the structure,
+ * we need to re-initialize it for each cpu
+ */
+ smp_penguin_ctable.which_io = 0;
+ smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
+ smp_penguin_ctable.reg_size = 0;
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk("Starting CPU %d at %p\n", i, entry);
+ local_flush_cache_all();
+ prom_startcpu(cpu_node,
+ &smp_penguin_ctable, 0, (char *)entry);
+
+ /* wheee... it's going... */
+ for(timeout = 0; timeout < 10000; timeout++) {
+ if(cpu_callin_map[i])
+ break;
+ udelay(200);
}
- __cpu_number_map[boot_cpu_id] = 0;
- __cpu_logical_map[0] = boot_cpu_id;
- current_thread_info()->cpu = boot_cpu_id;
+ if (!(cpu_callin_map[i])) {
+ printk("Processor %d is stuck.\n", i);
+ return -ENODEV;
+ }
- smp_store_cpu_info(boot_cpu_id);
- set_irq_udt(boot_cpu_id);
- smp_setup_percpu_timer();
local_flush_cache_all();
- if(cpu_find_by_instance(1, NULL, NULL))
- return; /* Not an MP box. */
- for(i = 0; i < NR_CPUS; i++) {
- if(i == boot_cpu_id)
- continue;
-
- if(cpu_present_map & (1 << i)) {
- extern unsigned long sun4m_cpu_startup;
- unsigned long *entry = &sun4m_cpu_startup;
- struct task_struct *p;
- int timeout;
-
- /* Cook up an idler for this guy. */
- kernel_thread(start_secondary, NULL, CLONE_IDLETASK);
-
- cpucount++;
-
- p = prev_task(&init_task);
-
- init_idle(p, i);
-
- current_set[i] = p->thread_info;
-
- unhash_process(p);
-
- /* See trampoline.S for details... */
- entry += ((i-1) * 3);
-
- /*
- * Initialize the contexts table
- * Since the call to prom_startcpu() trashes the structure,
- * we need to re-initialize it for each cpu
- */
- smp_penguin_ctable.which_io = 0;
- smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
- smp_penguin_ctable.reg_size = 0;
-
- /* whirrr, whirrr, whirrrrrrrrr... */
- printk("Starting CPU %d at %p\n", i, entry);
- local_flush_cache_all();
- prom_startcpu(cpu_data(i).prom_node,
- &smp_penguin_ctable, 0, (char *)entry);
-
- /* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
- break;
- udelay(200);
- }
- if(cpu_callin_map[i]) {
- /* Another "Red Snapper". */
- __cpu_number_map[i] = i;
- __cpu_logical_map[i] = i;
- } else {
- cpucount--;
- printk("Processor %d is stuck.\n", i);
- }
- }
- if(!(cpu_callin_map[i])) {
- cpu_present_map &= ~(1 << i);
- __cpu_number_map[i] = -1;
+ return 0;
+}
+
+void __init smp4m_smp_done(void)
+{
+ int i, first;
+ int *prev;
+
+ /* setup cpu list for irq rotation */
+ first = 0;
+ prev = &first;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i)) {
+ *prev = i;
+ prev = &cpu_data(i).next;
}
}
+ *prev = first;
local_flush_cache_all();
- if(cpucount == 0) {
- printk("Error: only one Processor found.\n");
- cpu_present_map = (1 << smp_processor_id());
- } else {
- unsigned long bogosum = 0;
- for(i = 0; i < NR_CPUS; i++) {
- if(cpu_present_map & (1 << i))
- bogosum += cpu_data(i).udelay_val;
- }
- printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
- cpucount + 1,
- bogosum/(500000/HZ),
- (bogosum/(5000/HZ))%100);
- smp_activated = 1;
- smp_num_cpus = cpucount + 1;
- }
/* Free unneeded trap tables */
- if (!(cpu_present_map & (1 << 1))) {
+ if (!cpu_isset(1, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu1));
- set_page_count(virt_to_page(trapbase_cpu1), 1);
+ init_page_count(virt_to_page(trapbase_cpu1));
free_page((unsigned long)trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
- if (!(cpu_present_map & (1 << 2))) {
+ if (!cpu_isset(2, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu2));
- set_page_count(virt_to_page(trapbase_cpu2), 1);
+ init_page_count(virt_to_page(trapbase_cpu2));
free_page((unsigned long)trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
- if (!(cpu_present_map & (1 << 3))) {
+ if (!cpu_isset(3, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu3));
- set_page_count(virt_to_page(trapbase_cpu3), 1);
+ init_page_count(virt_to_page(trapbase_cpu3));
free_page((unsigned long)trapbase_cpu3);
totalram_pages++;
num_physpages++;
*/
void smp4m_irq_rotate(int cpu)
{
+ int next = cpu_data(cpu).next;
+ if (next != cpu)
+ set_irq_udt(next);
}
/* Cross calls, in order to work efficiently and atomically do all
void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
{
static unsigned long smp_cpu_in_msg[NR_CPUS];
- unsigned long mask;
+ cpumask_t mask;
int me = smp_processor_id();
int irq, i;
smp_cpu_in_msg[me]++;
if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
- mask = cpu_present_map;
+ mask = cpu_online_map;
if(target == MSG_ALL_BUT_SELF)
- mask &= ~(1 << me);
+ cpu_clear(me, mask);
for(i = 0; i < 4; i++) {
- if(mask & (1 << i))
+ if (cpu_isset(i, mask))
set_cpu_int(i, irq);
}
} else {
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
- unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
- unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
+ unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */
+ unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
} ccall_info;
-static spinlock_t cross_call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
{
- if(smp_processors_ready) {
- register int ncpus = smp_num_cpus;
+ register int ncpus = SUN4M_NCPUS;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
/* Init receive/complete mapping, plus fire the IPI's off. */
{
- register unsigned long mask;
+ cpumask_t mask = cpu_online_map;
register int i;
- mask = (cpu_present_map & ~(1 << smp_processor_id()));
+ cpu_clear(smp_processor_id(), mask);
for(i = 0; i < ncpus; i++) {
- if(mask & (1 << i)) {
+ if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
set_cpu_int(i, IRQ_CROSS_CALL);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
- }
}
/* Running cross calls. */
ccall_info.processors_out[i] = 1;
}
-extern void sparc_do_profile(unsigned long pc, unsigned long o7);
-
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
int cpu = smp_processor_id();
clear_profile_irq(cpu);
- if(!user_mode(regs))
- sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
+ profile_tick(CPU_PROFILING, regs);
if(!--prof_counter(cpu)) {
int user = user_mode(regs);
void __init sun4m_init_smp(void)
{
- BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
+ BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
}