vserver 1.9.5.x5
[linux-2.6.git] / arch / i386 / mach-voyager / voyager_smp.c
index 2bdc18f..743fe45 100644 (file)
 #include <asm/desc.h>
 #include <asm/voyager.h>
 #include <asm/vic.h>
-#include <asm/pgalloc.h>
 #include <asm/mtrr.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
-#include <asm/desc.h>
 #include <asm/arch_hooks.h>
 
 #include <linux/irq.h>
 
-int reboot_smp = 0;
-
 /* TLB state -- visible externally, indexed physically */
-struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};
+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
 
 /* CPU IRQ affinity -- set to all ones initially */
 static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1]  = ~0UL };
@@ -154,7 +150,7 @@ static inline void
 send_CPI_allbutself(__u8 cpi)
 {
        __u8 cpu = smp_processor_id();
-       __u32 mask = cpus_coerce(cpu_online_map) & ~(1 << cpu);
+       __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
        send_CPI(mask, cpi);
 }
 
@@ -255,7 +251,7 @@ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
 static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
 
 /* Lock for enable/disable of VIC interrupts */
-static spinlock_t vic_irq_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+static  __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
 
 /* The boot processor is correctly set up in PC mode when it 
  * comes up, but the secondaries need their master/slave 8259
@@ -403,11 +399,11 @@ find_smp_config(void)
        /* set up everything for just this CPU, we can alter
         * this as we start the other CPUs later */
        /* now get the CPU disposition from the extended CMOS */
-       phys_cpu_present_map = cpus_promote(voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK));
-       cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
-       cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
-       cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
-       printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_coerce(phys_cpu_present_map));
+       cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
+       cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
+       cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
+       cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
+       printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
        /* Here we set up the VIC to enable SMP */
        /* enable the CPIs by writing the base vector to their register */
        outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
@@ -458,13 +454,12 @@ setup_trampoline(void)
 }
 
 /* Routine initially called when a non-boot CPU is brought online */
-int __init
+static void __init
 start_secondary(void *unused)
 {
        __u8 cpuid = hard_smp_processor_id();
        /* external functions not defined in the headers */
        extern void calibrate_delay(void);
-       extern int cpu_idle(void);
 
        cpu_init();
 
@@ -521,16 +516,7 @@ start_secondary(void *unused)
 
        cpu_set(cpuid, cpu_online_map);
        wmb();
-       return cpu_idle();
-}
-
-static struct task_struct * __init
-fork_by_hand(void)
-{
-       struct pt_regs regs;
-       /* don't care about the eip and regs settings since we'll
-        * never reschedule the forked task. */
-       return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
+       cpu_idle();
 }
 
 
@@ -588,16 +574,10 @@ do_boot_cpu(__u8 cpu)
        hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
 
        cpucount++;
-       idle = fork_by_hand();
+       idle = fork_idle(cpu);
        if(IS_ERR(idle))
                panic("failed fork for CPU%d", cpu);
-
-       wake_up_forked_process(idle);
-
-       init_idle(idle, cpu);
-
        idle->thread.eip = (unsigned long) start_secondary;
-       unhash_process(idle);
        /* init_tasks (in sched.c) is indexed logically */
        stack_start.esp = (void *) idle->thread.esp;
 
@@ -707,12 +687,12 @@ smp_boot_cpus(void)
                /* now that the cat has probed the Voyager System Bus, sanity
                 * check the cpu map */
                if( ((voyager_quad_processors | voyager_extended_vic_processors)
-                    & cpus_coerce(phys_cpu_present_map)) != cpus_coerce(phys_cpu_present_map)) {
+                    & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) {
                        /* should panic */
                        printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
                }
        } else if(voyager_level == 4)
-               voyager_extended_vic_processors = cpus_coerce(phys_cpu_present_map);
+               voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0];
 
        /* this sets up the idle task to run on the current cpu */
        voyager_extended_cpus = 1;
@@ -801,8 +781,8 @@ initialize_secondary(void)
  * System interrupts occur because some problem was detected on the
  * various busses.  To find out what you have to probe all the
  * hardware via the CAT bus.  FIXME: At the moment we do nothing. */
-asmlinkage void
-smp_vic_sys_interrupt(void)
+fastcall void
+smp_vic_sys_interrupt(struct pt_regs *regs)
 {
        ack_CPI(VIC_SYS_INT);
        printk("Voyager SYSTEM INTERRUPT\n");
@@ -811,11 +791,11 @@ smp_vic_sys_interrupt(void)
 /* Handle a voyager CMN_INT; These interrupts occur either because of
  * a system status change or because a single bit memory error
  * occurred.  FIXME: At the moment, ignore all this. */
-asmlinkage void
-smp_vic_cmn_interrupt(void)
+fastcall void
+smp_vic_cmn_interrupt(struct pt_regs *regs)
 {
        static __u8 in_cmn_int = 0;
-       static spinlock_t cmn_int_lock = SPIN_LOCK_UNLOCKED;
+       static DEFINE_SPINLOCK(cmn_int_lock);
 
        /* common ints are broadcast, so make sure we only do this once */
        _raw_spin_lock(&cmn_int_lock);
@@ -840,7 +820,7 @@ smp_vic_cmn_interrupt(void)
 /*
  * Reschedule call back. Nothing to do, all the work is done
  * automatically when we return from the interrupt.  */
-asmlinkage void
+static void
 smp_reschedule_interrupt(void)
 {
        /* do nothing */
@@ -848,7 +828,7 @@ smp_reschedule_interrupt(void)
 
 static struct mm_struct * flush_mm;
 static unsigned long flush_va;
-static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(tlbstate_lock);
 #define FLUSH_ALL      0xffffffff
 
 /*
@@ -861,9 +841,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
 static inline void
 leave_mm (unsigned long cpu)
 {
-       if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
+       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
                BUG();
-       cpu_clear(cpu,  cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+       cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
        load_cr3(swapper_pg_dir);
 }
 
@@ -871,7 +851,7 @@ leave_mm (unsigned long cpu)
 /*
  * Invalidate call-back
  */
-asmlinkage void 
+static void 
 smp_invalidate_interrupt(void)
 {
        __u8 cpu = smp_processor_id();
@@ -884,8 +864,8 @@ smp_invalidate_interrupt(void)
                smp_processor_id()));
        */
 
-       if (flush_mm == cpu_tlbstate[cpu].active_mm) {
-               if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
+       if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+               if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
                        if (flush_va == FLUSH_ALL)
                                local_flush_tlb();
                        else
@@ -910,7 +890,7 @@ flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
 
        if (!cpumask)
                BUG();
-       if ((cpumask & cpus_coerce(cpu_online_map)) != cpumask)
+       if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
                BUG();
        if (cpumask & (1 << smp_processor_id()))
                BUG();
@@ -953,7 +933,7 @@ flush_tlb_current_task(void)
 
        preempt_disable();
 
-       cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
+       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
        local_flush_tlb();
        if (cpu_mask)
                flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
@@ -969,7 +949,7 @@ flush_tlb_mm (struct mm_struct * mm)
 
        preempt_disable();
 
-       cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
+       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -990,7 +970,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 
        preempt_disable();
 
-       cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
+       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
        if (current->active_mm == mm) {
                if(current->mm)
                        __flush_tlb_one(va);
@@ -1005,7 +985,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 }
 
 /* enable the requested IRQs */
-asmlinkage void
+static void
 smp_enable_irq_interrupt(void)
 {
        __u8 irq;
@@ -1038,7 +1018,7 @@ smp_stop_cpu_function(void *dummy)
               __asm__("hlt");
 }
 
-static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(call_lock);
 
 struct call_data_struct {
        void (*func) (void *info);
@@ -1054,7 +1034,7 @@ static struct call_data_struct * call_data;
  * previously set up.  This is used to schedule a function for
  * execution on all CPU's - set up the function then broadcast a
  * function_interrupt CPI to come here on each CPU */
-asmlinkage void
+static void
 smp_call_function_interrupt(void)
 {
        void (*func) (void *info) = call_data->func;
@@ -1099,7 +1079,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
                   int wait)
 {
        struct call_data_struct data;
-       __u32 mask = cpus_coerce(cpu_online_map);
+       __u32 mask = cpus_addr(cpu_online_map)[0];
 
        mask &= ~(1<<smp_processor_id());
 
@@ -1149,50 +1129,50 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
  * no local APIC, so I can't do this
  *
  * This function is currently a placeholder and is unused in the code */
-asmlinkage void 
-smp_apic_timer_interrupt(struct pt_regs regs)
+fastcall void 
+smp_apic_timer_interrupt(struct pt_regs *regs)
 {
-       wrapper_smp_local_timer_interrupt(&regs);
+       wrapper_smp_local_timer_interrupt(regs);
 }
 
 /* All of the QUAD interrupt GATES */
-asmlinkage void
-smp_qic_timer_interrupt(struct pt_regs regs)
+fastcall void
+smp_qic_timer_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_TIMER_CPI);
-       wrapper_smp_local_timer_interrupt(&regs);
+       wrapper_smp_local_timer_interrupt(regs);
 }
 
-asmlinkage void
-smp_qic_invalidate_interrupt(void)
+fastcall void
+smp_qic_invalidate_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_INVALIDATE_CPI);
        smp_invalidate_interrupt();
 }
 
-asmlinkage void
-smp_qic_reschedule_interrupt(void)
+fastcall void
+smp_qic_reschedule_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_RESCHEDULE_CPI);
        smp_reschedule_interrupt();
 }
 
-asmlinkage void
-smp_qic_enable_irq_interrupt(void)
+fastcall void
+smp_qic_enable_irq_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
        smp_enable_irq_interrupt();
 }
 
-asmlinkage void
-smp_qic_call_function_interrupt(void)
+fastcall void
+smp_qic_call_function_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
        smp_call_function_interrupt();
 }
 
-asmlinkage void
-smp_vic_cpi_interrupt(struct pt_regs regs)
+fastcall void
+smp_vic_cpi_interrupt(struct pt_regs *regs)
 {
        __u8 cpu = smp_processor_id();
 
@@ -1202,7 +1182,7 @@ smp_vic_cpi_interrupt(struct pt_regs regs)
                ack_VIC_CPI(VIC_CPI_LEVEL0);
 
        if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
-               wrapper_smp_local_timer_interrupt(&regs);
+               wrapper_smp_local_timer_interrupt(regs);
        if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
                smp_invalidate_interrupt();
        if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
@@ -1219,7 +1199,7 @@ do_flush_tlb_all(void* info)
        unsigned long cpu = smp_processor_id();
 
        __flush_tlb_all();
-       if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
+       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
                leave_mm(cpu);
 }
 
@@ -1303,8 +1283,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
        int cpu = smp_processor_id();
        long weight;
 
-       x86_do_profile(regs);
-
+       profile_tick(CPU_PROFILING, regs);
        if (--per_cpu(prof_counter, cpu) <= 0) {
                /*
                 * The multiplier may have changed since the last time we got
@@ -1790,9 +1769,9 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
        unsigned long irq_mask = 1 << irq;
        int cpu;
 
-       real_mask = cpus_coerce(mask) & voyager_extended_vic_processors;
+       real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
        
-       if(cpus_coerce(mask) == 0)
+       if(cpus_addr(mask)[0] == 0)
                /* can't have no cpu's to accept the interrupt -- extremely
                 * bad things will happen */
                return;