fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / i386 / mach-voyager / voyager_smp.c
index d7054f4..74aeedf 100644 (file)
@@ -9,7 +9,7 @@
  * This file provides all the same external entries as smp.c but uses
  * the voyager hal to provide the functionality
  */
-#include <linux/config.h>
+#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/kernel_stat.h>
 #include <linux/delay.h>
 #include <asm/desc.h>
 #include <asm/voyager.h>
 #include <asm/vic.h>
-#include <asm/pgalloc.h>
 #include <asm/mtrr.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
-#include <asm/desc.h>
 #include <asm/arch_hooks.h>
-
-#include <linux/irq.h>
-
-int reboot_smp = 0;
+#include <asm/pda.h>
 
 /* TLB state -- visible externally, indexed physically */
-struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};
+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
 
 /* CPU IRQ affinity -- set to all ones initially */
 static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1]  = ~0UL };
 
-/* Set when the idlers are all forked - Set in main.c but not actually
- * used by any other parts of the kernel */
-int smp_threads_ready = 0;
-
 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
  * indexed physically */
 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_data);
 
 /* physical ID of the CPU used to boot the system */
 unsigned char boot_cpu_id;
@@ -80,19 +72,12 @@ static volatile unsigned long smp_invalidate_needed;
 /* Bitmask of currently online CPUs - used by setup.c for
    /proc/cpuinfo, visible externally but still physical */
 cpumask_t cpu_online_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_online_map);
 
 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
  * by scheduler but indexed physically */
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 
-/* estimate of time used to flush the SMP-local cache - used in
- * processor affinity calculations */
-cycles_t cacheflush_time = 0;
-
-/* cache decay ticks for scheduler---a fairly useless quantity for the
-   voyager system with its odd affinity and huge L3 cache */
-unsigned long cache_decay_ticks = 20;
-
 
 /* The internal functions */
 static void send_CPI(__u32 cpuset, __u8 cpi);
@@ -101,8 +86,8 @@ static int ack_QIC_CPI(__u8 cpi);
 static void ack_special_QIC_CPI(__u8 cpi);
 static void ack_VIC_CPI(__u8 cpi);
 static void send_CPI_allbutself(__u8 cpi);
-static void enable_vic_irq(unsigned int irq);
-static void disable_vic_irq(unsigned int irq);
+static void mask_vic_irq(unsigned int irq);
+static void unmask_vic_irq(unsigned int irq);
 static unsigned int startup_vic_irq(unsigned int irq);
 static void enable_local_vic_irq(unsigned int irq);
 static void disable_local_vic_irq(unsigned int irq);
@@ -113,9 +98,9 @@ static void ack_vic_irq(unsigned int irq);
 static void vic_enable_cpi(void);
 static void do_boot_cpu(__u8 cpuid);
 static void do_quad_bootstrap(void);
-static inline void wrapper_smp_local_timer_interrupt(struct pt_regs *);
 
 int hard_smp_processor_id(void);
+int safe_smp_processor_id(void);
 
 /* Inline functions */
 static inline void
@@ -141,6 +126,14 @@ send_QIC_CPI(__u32 cpuset, __u8 cpi)
        }
 }
 
+static inline void
+wrapper_smp_local_timer_interrupt(void)
+{
+       irq_enter();
+       smp_local_timer_interrupt();
+       irq_exit();
+}
+
 static inline void
 send_one_CPI(__u8 cpu, __u8 cpi)
 {
@@ -154,7 +147,7 @@ static inline void
 send_CPI_allbutself(__u8 cpi)
 {
        __u8 cpu = smp_processor_id();
-       __u32 mask = cpus_coerce(cpu_online_map) & ~(1 << cpu);
+       __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
        send_CPI(mask, cpi);
 }
 
@@ -213,15 +206,12 @@ ack_CPI(__u8 cpi)
 /* The VIC IRQ descriptors -- these look almost identical to the
  * 8259 IRQs except that masks and things must be kept per processor
  */
-static struct hw_interrupt_type vic_irq_type = {
-       "VIC-level",
-       startup_vic_irq,        /* startup */
-       disable_vic_irq,        /* shutdown */
-       enable_vic_irq,         /* enable */
-       disable_vic_irq,        /* disable */
-       before_handle_vic_irq,  /* ack */
-       after_handle_vic_irq,   /* end */
-       set_vic_irq_affinity,   /* affinity */
+static struct irq_chip vic_chip = {
+       .name           = "VIC",
+       .startup        = startup_vic_irq,
+       .mask           = mask_vic_irq,
+       .unmask         = unmask_vic_irq,
+       .set_affinity   = set_vic_irq_affinity,
 };
 
 /* used to count up as CPUs are brought on line (starts at 0) */
@@ -247,6 +237,9 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 /* This is for the new dynamic CPU boot code */
 cpumask_t cpu_callin_map = CPU_MASK_NONE;
 cpumask_t cpu_callout_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_callout_map);
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_possible_map);
 
 /* The per processor IRQ masks (these are usually kept in sync) */
 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -255,7 +248,7 @@ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
 static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
 
 /* Lock for enable/disable of VIC interrupts */
-static spinlock_t vic_irq_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+static  __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
 
 /* The boot processor is correctly set up in PC mode when it 
  * comes up, but the secondaries need their master/slave 8259
@@ -403,11 +396,12 @@ find_smp_config(void)
        /* set up everything for just this CPU, we can alter
         * this as we start the other CPUs later */
        /* now get the CPU disposition from the extended CMOS */
-       phys_cpu_present_map = cpus_promote(voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK));
-       cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
-       cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
-       cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
-       printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_coerce(phys_cpu_present_map));
+       cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
+       cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
+       cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
+       cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
+       cpu_possible_map = phys_cpu_present_map;
+       printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
        /* Here we set up the VIC to enable SMP */
        /* enable the CPIs by writing the base vector to their register */
        outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
@@ -429,6 +423,7 @@ find_smp_config(void)
             VOYAGER_SUS_IN_CONTROL_PORT);
 
        current_thread_info()->cpu = boot_cpu_id;
+       write_pda(cpu_number, boot_cpu_id);
 }
 
 /*
@@ -458,15 +453,14 @@ setup_trampoline(void)
 }
 
 /* Routine initially called when a non-boot CPU is brought online */
-int __init
+static void __init
 start_secondary(void *unused)
 {
        __u8 cpuid = hard_smp_processor_id();
        /* external functions not defined in the headers */
        extern void calibrate_delay(void);
-       extern int cpu_idle(void);
 
-       cpu_init();
+       secondary_cpu_init();
 
        /* OK, we're in the routine */
        ack_CPI(VIC_CPU_BOOT_CPI);
@@ -521,16 +515,7 @@ start_secondary(void *unused)
 
        cpu_set(cpuid, cpu_online_map);
        wmb();
-       return cpu_idle();
-}
-
-static struct task_struct * __init
-fork_by_hand(void)
-{
-       struct pt_regs regs;
-       /* don't care about the eip and regs settings since we'll
-        * never reschedule the forked task. */
-       return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
+       cpu_idle();
 }
 
 
@@ -588,19 +573,22 @@ do_boot_cpu(__u8 cpu)
        hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
 
        cpucount++;
-       idle = fork_by_hand();
+       idle = fork_idle(cpu);
        if(IS_ERR(idle))
                panic("failed fork for CPU%d", cpu);
-
-       wake_up_forked_process(idle);
-
-       init_idle(idle, cpu);
-
        idle->thread.eip = (unsigned long) start_secondary;
-       unhash_process(idle);
        /* init_tasks (in sched.c) is indexed logically */
        stack_start.esp = (void *) idle->thread.esp;
 
+       /* Pre-allocate and initialize the CPU's GDT and PDA so it
+          doesn't have to do any memory allocation during the
+          delicate CPU-bringup phase. */
+       if (!init_gdt(cpu, idle)) {
+               printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
+               cpucount--;
+               return;
+       }
+
        irq_ctx_init(cpu);
 
        /* Note: Don't modify initial ss override */
@@ -681,6 +669,7 @@ do_boot_cpu(__u8 cpu)
                print_cpu_info(&cpu_data[cpu]);
                wmb();
                cpu_set(cpu, cpu_callout_map);
+               cpu_set(cpu, cpu_present_map);
        }
        else {
                printk("CPU%d FAILED TO BOOT: ", cpu);
@@ -707,12 +696,12 @@ smp_boot_cpus(void)
                /* now that the cat has probed the Voyager System Bus, sanity
                 * check the cpu map */
                if( ((voyager_quad_processors | voyager_extended_vic_processors)
-                    & cpus_coerce(phys_cpu_present_map)) != cpus_coerce(phys_cpu_present_map)) {
+                    & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) {
                        /* should panic */
                        printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
                }
        } else if(voyager_level == 4)
-               voyager_extended_vic_processors = cpus_coerce(phys_cpu_present_map);
+               voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0];
 
        /* this sets up the idle task to run on the current cpu */
        voyager_extended_cpus = 1;
@@ -783,6 +772,12 @@ initialize_secondary(void)
        set_current(hard_get_current());
 #endif
 
+       /*
+        * switch to the per CPU GDT we already set up
+        * in do_boot_cpu()
+        */
+       cpu_set_gdt(current_thread_info()->cpu);
+
        /*
         * We don't actually need to load the full TSS,
         * basically just the stack pointer and the eip.
@@ -801,21 +796,21 @@ initialize_secondary(void)
  * System interrupts occur because some problem was detected on the
  * various busses.  To find out what you have to probe all the
  * hardware via the CAT bus.  FIXME: At the moment we do nothing. */
-asmlinkage void
-smp_vic_sys_interrupt(void)
+fastcall void
+smp_vic_sys_interrupt(struct pt_regs *regs)
 {
        ack_CPI(VIC_SYS_INT);
-       printk("Voyager SYSTEM INTERRUPT\n");
+       printk("Voyager SYSTEM INTERRUPT\n");   
 }
 
 /* Handle a voyager CMN_INT; These interrupts occur either because of
  * a system status change or because a single bit memory error
  * occurred.  FIXME: At the moment, ignore all this. */
-asmlinkage void
-smp_vic_cmn_interrupt(void)
+fastcall void
+smp_vic_cmn_interrupt(struct pt_regs *regs)
 {
        static __u8 in_cmn_int = 0;
-       static spinlock_t cmn_int_lock = SPIN_LOCK_UNLOCKED;
+       static DEFINE_SPINLOCK(cmn_int_lock);
 
        /* common ints are broadcast, so make sure we only do this once */
        _raw_spin_lock(&cmn_int_lock);
@@ -840,7 +835,7 @@ smp_vic_cmn_interrupt(void)
 /*
  * Reschedule call back. Nothing to do, all the work is done
  * automatically when we return from the interrupt.  */
-asmlinkage void
+static void
 smp_reschedule_interrupt(void)
 {
        /* do nothing */
@@ -848,7 +843,7 @@ smp_reschedule_interrupt(void)
 
 static struct mm_struct * flush_mm;
 static unsigned long flush_va;
-static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(tlbstate_lock);
 #define FLUSH_ALL      0xffffffff
 
 /*
@@ -861,9 +856,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
 static inline void
 leave_mm (unsigned long cpu)
 {
-       if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
+       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
                BUG();
-       cpu_clear(cpu,  cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+       cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
        load_cr3(swapper_pg_dir);
 }
 
@@ -871,7 +866,7 @@ leave_mm (unsigned long cpu)
 /*
  * Invalidate call-back
  */
-asmlinkage void 
+static void 
 smp_invalidate_interrupt(void)
 {
        __u8 cpu = smp_processor_id();
@@ -884,8 +879,8 @@ smp_invalidate_interrupt(void)
                smp_processor_id()));
        */
 
-       if (flush_mm == cpu_tlbstate[cpu].active_mm) {
-               if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
+       if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+               if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
                        if (flush_va == FLUSH_ALL)
                                local_flush_tlb();
                        else
@@ -910,7 +905,7 @@ flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
 
        if (!cpumask)
                BUG();
-       if ((cpumask & cpus_coerce(cpu_online_map)) != cpumask)
+       if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
                BUG();
        if (cpumask & (1 << smp_processor_id()))
                BUG();
@@ -953,7 +948,7 @@ flush_tlb_current_task(void)
 
        preempt_disable();
 
-       cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
+       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
        local_flush_tlb();
        if (cpu_mask)
                flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
@@ -969,7 +964,7 @@ flush_tlb_mm (struct mm_struct * mm)
 
        preempt_disable();
 
-       cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
+       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -990,7 +985,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 
        preempt_disable();
 
-       cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
+       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
        if (current->active_mm == mm) {
                if(current->mm)
                        __flush_tlb_one(va);
@@ -1003,9 +998,10 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 
        preempt_enable();
 }
+EXPORT_SYMBOL(flush_tlb_page);
 
 /* enable the requested IRQs */
-asmlinkage void
+static void
 smp_enable_irq_interrupt(void)
 {
        __u8 irq;
@@ -1035,10 +1031,10 @@ smp_stop_cpu_function(void *dummy)
        cpu_clear(smp_processor_id(), cpu_online_map);
        local_irq_disable();
        for(;;)
-              __asm__("hlt");
+               halt();
 }
 
-static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(call_lock);
 
 struct call_data_struct {
        void (*func) (void *info);
@@ -1054,7 +1050,7 @@ static struct call_data_struct * call_data;
  * previously set up.  This is used to schedule a function for
  * execution on all CPU's - set up the function then broadcast a
  * function_interrupt CPI to come here on each CPU */
-asmlinkage void
+static void
 smp_call_function_interrupt(void)
 {
        void (*func) (void *info) = call_data->func;
@@ -1099,13 +1095,16 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
                   int wait)
 {
        struct call_data_struct data;
-       __u32 mask = cpus_coerce(cpu_online_map);
+       __u32 mask = cpus_addr(cpu_online_map)[0];
 
        mask &= ~(1<<smp_processor_id());
 
        if (!mask)
                return 0;
 
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
        data.func = func;
        data.info = info;
        data.started = mask;
@@ -1131,6 +1130,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
 
        return 0;
 }
+EXPORT_SYMBOL(smp_call_function);
 
 /* Sorry about the name.  In an APIC based system, the APICs
  * themselves are programmed to send a timer interrupt.  This is used
@@ -1146,51 +1146,56 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
  * no local APIC, so I can't do this
  *
  * This function is currently a placeholder and is unused in the code */
-asmlinkage void 
-smp_apic_timer_interrupt(struct pt_regs regs)
+fastcall void 
+smp_apic_timer_interrupt(struct pt_regs *regs)
 {
-       wrapper_smp_local_timer_interrupt(&regs);
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       wrapper_smp_local_timer_interrupt();
+       set_irq_regs(old_regs);
 }
 
 /* All of the QUAD interrupt GATES */
-asmlinkage void
-smp_qic_timer_interrupt(struct pt_regs regs)
+fastcall void
+smp_qic_timer_interrupt(struct pt_regs *regs)
 {
+       struct pt_regs *old_regs = set_irq_regs(regs);
        ack_QIC_CPI(QIC_TIMER_CPI);
-       wrapper_smp_local_timer_interrupt(&regs);
+       wrapper_smp_local_timer_interrupt();
+       set_irq_regs(old_regs);
 }
 
-asmlinkage void
-smp_qic_invalidate_interrupt(void)
+fastcall void
+smp_qic_invalidate_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_INVALIDATE_CPI);
        smp_invalidate_interrupt();
 }
 
-asmlinkage void
-smp_qic_reschedule_interrupt(void)
+fastcall void
+smp_qic_reschedule_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_RESCHEDULE_CPI);
        smp_reschedule_interrupt();
 }
 
-asmlinkage void
-smp_qic_enable_irq_interrupt(void)
+fastcall void
+smp_qic_enable_irq_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
        smp_enable_irq_interrupt();
 }
 
-asmlinkage void
-smp_qic_call_function_interrupt(void)
+fastcall void
+smp_qic_call_function_interrupt(struct pt_regs *regs)
 {
        ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
        smp_call_function_interrupt();
 }
 
-asmlinkage void
-smp_vic_cpi_interrupt(struct pt_regs regs)
+fastcall void
+smp_vic_cpi_interrupt(struct pt_regs *regs)
 {
+       struct pt_regs *old_regs = set_irq_regs(regs);
        __u8 cpu = smp_processor_id();
 
        if(is_cpu_quad())
@@ -1199,7 +1204,7 @@ smp_vic_cpi_interrupt(struct pt_regs regs)
                ack_VIC_CPI(VIC_CPI_LEVEL0);
 
        if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
-               wrapper_smp_local_timer_interrupt(&regs);
+               wrapper_smp_local_timer_interrupt();
        if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
                smp_invalidate_interrupt();
        if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
@@ -1208,6 +1213,7 @@ smp_vic_cpi_interrupt(struct pt_regs regs)
                smp_enable_irq_interrupt();
        if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
                smp_call_function_interrupt();
+       set_irq_regs(old_regs);
 }
 
 static void
@@ -1216,7 +1222,7 @@ do_flush_tlb_all(void* info)
        unsigned long cpu = smp_processor_id();
 
        __flush_tlb_all();
-       if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
+       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
                leave_mm(cpu);
 }
 
@@ -1262,6 +1268,12 @@ hard_smp_processor_id(void)
        return 0;
 }
 
+int
+safe_smp_processor_id(void)
+{
+       return hard_smp_processor_id();
+}
+
 /* broadcast a halt to all other CPUs */
 void
 smp_send_stop(void)
@@ -1272,18 +1284,10 @@ smp_send_stop(void)
 /* this function is triggered in time.c when a clock tick fires
  * we need to re-broadcast the tick to all CPUs */
 void
-smp_vic_timer_interrupt(struct pt_regs *regs)
+smp_vic_timer_interrupt(void)
 {
        send_CPI_allbutself(VIC_TIMER_CPI);
-       smp_local_timer_interrupt(regs);
-}
-
-static inline void
-wrapper_smp_local_timer_interrupt(struct pt_regs *regs)
-{
-       irq_enter();
-       smp_local_timer_interrupt(regs);
-       irq_exit();
+       smp_local_timer_interrupt();
 }
 
 /* local (per CPU) timer interrupt.  It does both profiling and
@@ -1295,13 +1299,12 @@ wrapper_smp_local_timer_interrupt(struct pt_regs *regs)
  * value into /proc/profile.
  */
 void
-smp_local_timer_interrupt(struct pt_regs * regs)
+smp_local_timer_interrupt(void)
 {
        int cpu = smp_processor_id();
        long weight;
 
-       x86_do_profile(regs);
-
+       profile_tick(CPU_PROFILING);
        if (--per_cpu(prof_counter, cpu) <= 0) {
                /*
                 * The multiplier may have changed since the last time we got
@@ -1319,7 +1322,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
                                                per_cpu(prof_counter, cpu);
                }
 
-               update_process_times(user_mode(regs));
+               update_process_times(user_mode_vm(get_irq_regs()));
        }
 
        if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
@@ -1406,6 +1409,17 @@ setup_profiling_timer(unsigned int multiplier)
        return 0;
 }
 
+/* This is a bit of a mess, but forced on us by the genirq changes
+ * there's no genirq handler that really does what voyager wants
+ * so hack it up with the simple IRQ handler */
+static void fastcall
+handle_vic_irq(unsigned int irq, struct irq_desc *desc)
+{
+       before_handle_vic_irq(irq);
+       handle_simple_irq(irq, desc);
+       after_handle_vic_irq(irq);
+}
+
 
 /*  The CPIs are handled in the per cpu 8259s, so they must be
  *  enabled to be received: FIX: enabling the CPIs in the early
@@ -1442,7 +1456,7 @@ smp_intr_init(void)
         * This is for later: first 16 correspond to PC IRQs; next 16
         * are Primary MC IRQs and final 16 are Secondary MC IRQs */
        for(i = 0; i < 48; i++)
-               irq_desc[i].handler = &vic_irq_type;
+               set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
 }
 
 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
@@ -1540,7 +1554,7 @@ ack_VIC_CPI(__u8 cpi)
 static unsigned int
 startup_vic_irq(unsigned int irq)
 {
-       enable_vic_irq(irq);
+       unmask_vic_irq(irq);
 
        return 0;
 }
@@ -1567,7 +1581,7 @@ startup_vic_irq(unsigned int irq)
  *    adjust their masks accordingly.  */
 
 static void
-enable_vic_irq(unsigned int irq)
+unmask_vic_irq(unsigned int irq)
 {
        /* linux doesn't to processor-irq affinity, so enable on
         * all CPUs we know about */
@@ -1576,7 +1590,7 @@ enable_vic_irq(unsigned int irq)
        __u32 processorList = 0;
        unsigned long flags;
 
-       VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
+       VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
                irq, cpu, cpu_irq_affinity[cpu]));
        spin_lock_irqsave(&vic_irq_lock, flags);
        for_each_online_cpu(real_cpu) {
@@ -1600,7 +1614,7 @@ enable_vic_irq(unsigned int irq)
 }
 
 static void
-disable_vic_irq(unsigned int irq)
+mask_vic_irq(unsigned int irq)
 {
        /* lazy disable, do nothing */
 }
@@ -1724,7 +1738,7 @@ after_handle_vic_irq(unsigned int irq)
 
                        printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
                               cpu, irq);
-                       for_each_cpu(real_cpu, mask) {
+                       for_each_possible_cpu(real_cpu, mask) {
 
                                outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
                                     VIC_PROCESSOR_ID);
@@ -1787,9 +1801,9 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
        unsigned long irq_mask = 1 << irq;
        int cpu;
 
-       real_mask = cpus_coerce(mask) & voyager_extended_vic_processors;
+       real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
        
-       if(cpus_coerce(mask) == 0)
+       if(cpus_addr(mask)[0] == 0)
                /* can't have no cpu's to accept the interrupt -- extremely
                 * bad things will happen */
                return;
@@ -1828,7 +1842,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
         * disabled again as it comes in (voyager lazy disable).  If
         * the affinity map is tightened to disable the interrupt on a
         * cpu, it will be pushed off when it comes in */
-       enable_vic_irq(irq);
+       unmask_vic_irq(irq);
 }
 
 static void
@@ -1935,6 +1949,8 @@ void __devinit smp_prepare_boot_cpu(void)
 {
        cpu_set(smp_processor_id(), cpu_online_map);
        cpu_set(smp_processor_id(), cpu_callout_map);
+       cpu_set(smp_processor_id(), cpu_possible_map);
+       cpu_set(smp_processor_id(), cpu_present_map);
 }
 
 int __devinit
@@ -1959,3 +1975,10 @@ smp_cpus_done(unsigned int max_cpus)
 {
        zap_low_mappings();
 }
+
+void __init
+smp_setup_processor_id(void)
+{
+       current_thread_info()->cpu = hard_smp_processor_id();
+       write_pda(cpu_number, hard_smp_processor_id());
+}