X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fmach-voyager%2Fvoyager_smp.c;h=70e560a1b79ad1c457d0ac9777d3931d2c8ba87a;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=2bdc18fff00ab9768420d071ebe9e038d46ae971;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 2bdc18fff..70e560a1b 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -10,6 +10,7 @@ * the voyager hal to provide the functionality */ #include +#include #include #include #include @@ -24,30 +25,21 @@ #include #include #include -#include #include #include #include -#include #include -#include - -int reboot_smp = 0; - /* TLB state -- visible externally, indexed physically */ -struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0 }}; +DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; /* CPU IRQ affinity -- set to all ones initially */ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; -/* Set when the idlers are all forked - Set in main.c but not actually - * used by any other parts of the kernel */ -int smp_threads_ready = 0; - /* per CPU data structure (for /proc/cpuinfo et al), visible externally * indexed physically */ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; +EXPORT_SYMBOL(cpu_data); /* physical ID of the CPU used to boot the system */ unsigned char boot_cpu_id; @@ -80,19 +72,12 @@ static volatile unsigned long smp_invalidate_needed; /* Bitmask of currently online CPUs - used by setup.c for /proc/cpuinfo, visible externally but still physical */ cpumask_t cpu_online_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_online_map); /* Bitmask of CPUs present in the system - exported by i386_syms.c, used * by scheduler but indexed physically */ cpumask_t phys_cpu_present_map = CPU_MASK_NONE; -/* estimate of time used to flush the SMP-local cache - used in - * processor affinity calculations */ -cycles_t cacheflush_time = 0; - -/* cache decay ticks for scheduler---a fairly useless quantity for the - voyager system with its odd affinity and huge L3 cache */ -unsigned long cache_decay_ticks = 20; - /* The internal functions */ static void send_CPI(__u32 cpuset, __u8 cpi); @@ -113,7 +98,6 @@ static void ack_vic_irq(unsigned int irq); static void vic_enable_cpi(void); static void do_boot_cpu(__u8 cpuid); static void do_quad_bootstrap(void); -static inline void wrapper_smp_local_timer_interrupt(struct pt_regs *); int hard_smp_processor_id(void); @@ -141,6 +125,14 @@ send_QIC_CPI(__u32 cpuset, __u8 cpi) } } +static inline void +wrapper_smp_local_timer_interrupt(struct pt_regs *regs) +{ + irq_enter(); + smp_local_timer_interrupt(regs); + irq_exit(); +} + static inline void send_one_CPI(__u8 cpu, __u8 cpi) { @@ -154,7 +146,7 @@ static inline void send_CPI_allbutself(__u8 cpi) { __u8 cpu = smp_processor_id(); - __u32 mask = cpus_coerce(cpu_online_map) & ~(1 << cpu); + __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); send_CPI(mask, cpi); } @@ -214,14 +206,14 @@ ack_CPI(__u8 cpi) * 8259 IRQs except that masks and things must be kept per processor */ static struct hw_interrupt_type vic_irq_type = { - "VIC-level", - startup_vic_irq, /* startup */ - disable_vic_irq, /* shutdown */ - enable_vic_irq, /* enable */ - disable_vic_irq, /* disable */ - before_handle_vic_irq, /* ack */ - after_handle_vic_irq, /* end */ - set_vic_irq_affinity, /* affinity */ + .typename = "VIC-level", + .startup = startup_vic_irq, + .shutdown = disable_vic_irq, + .enable = enable_vic_irq, + .disable = disable_vic_irq, + .ack = before_handle_vic_irq, + .end = after_handle_vic_irq, + .set_affinity = set_vic_irq_affinity, }; /* used to count up as CPUs are brought on line (starts at 0) */ @@ -247,6 +239,9 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; /* This is for the new dynamic CPU boot code */ cpumask_t cpu_callin_map = CPU_MASK_NONE; cpumask_t cpu_callout_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_callout_map); +cpumask_t cpu_possible_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_possible_map); /* The per processor IRQ masks (these are usually kept in sync) */ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; @@ -255,7 +250,7 @@ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; /* Lock for enable/disable of VIC interrupts */ -static spinlock_t vic_irq_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); /* The boot processor is correctly set up in PC mode when it * comes up, but the secondaries need their master/slave 8259 @@ -403,11 +398,12 @@ find_smp_config(void) /* set up everything for just this CPU, we can alter * this as we start the other CPUs later */ /* now get the CPU disposition from the extended CMOS */ - phys_cpu_present_map = cpus_promote(voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK)); - cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; - cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; - cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; - printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_coerce(phys_cpu_present_map)); + cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); + cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; + cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; + cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; + cpu_possible_map = phys_cpu_present_map; + printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); /* Here we set up the VIC to enable SMP */ /* enable the CPIs by writing the base vector to their register */ outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); @@ -458,13 +454,12 @@ setup_trampoline(void) } /* Routine initially called when a non-boot CPU is brought online */ -int __init +static void __init start_secondary(void *unused) { __u8 cpuid = hard_smp_processor_id(); /* external functions not defined in the headers */ extern void calibrate_delay(void); - extern int cpu_idle(void); cpu_init(); @@ -521,16 +516,7 @@ start_secondary(void *unused) cpu_set(cpuid, cpu_online_map); wmb(); - return cpu_idle(); -} - -static struct task_struct * __init -fork_by_hand(void) -{ - struct pt_regs regs; - /* don't care about the eip and regs settings since we'll - * never reschedule the forked task. */ - return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL); + cpu_idle(); } @@ -588,16 +574,10 @@ do_boot_cpu(__u8 cpu) hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; cpucount++; - idle = fork_by_hand(); + idle = fork_idle(cpu); if(IS_ERR(idle)) panic("failed fork for CPU%d", cpu); - - wake_up_forked_process(idle); - - init_idle(idle, cpu); - idle->thread.eip = (unsigned long) start_secondary; - unhash_process(idle); /* init_tasks (in sched.c) is indexed logically */ stack_start.esp = (void *) idle->thread.esp; @@ -707,12 +687,12 @@ smp_boot_cpus(void) /* now that the cat has probed the Voyager System Bus, sanity * check the cpu map */ if( ((voyager_quad_processors | voyager_extended_vic_processors) - & cpus_coerce(phys_cpu_present_map)) != cpus_coerce(phys_cpu_present_map)) { + & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) { /* should panic */ printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n"); } } else if(voyager_level == 4) - voyager_extended_vic_processors = cpus_coerce(phys_cpu_present_map); + voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0]; /* this sets up the idle task to run on the current cpu */ voyager_extended_cpus = 1; @@ -801,8 +781,8 @@ initialize_secondary(void) * System interrupts occur because some problem was detected on the * various busses. To find out what you have to probe all the * hardware via the CAT bus. FIXME: At the moment we do nothing. */ -asmlinkage void -smp_vic_sys_interrupt(void) +fastcall void +smp_vic_sys_interrupt(struct pt_regs *regs) { ack_CPI(VIC_SYS_INT); printk("Voyager SYSTEM INTERRUPT\n"); @@ -811,11 +791,11 @@ smp_vic_sys_interrupt(void) /* Handle a voyager CMN_INT; These interrupts occur either because of * a system status change or because a single bit memory error * occurred. FIXME: At the moment, ignore all this. */ -asmlinkage void -smp_vic_cmn_interrupt(void) +fastcall void +smp_vic_cmn_interrupt(struct pt_regs *regs) { static __u8 in_cmn_int = 0; - static spinlock_t cmn_int_lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(cmn_int_lock); /* common ints are broadcast, so make sure we only do this once */ _raw_spin_lock(&cmn_int_lock); @@ -840,7 +820,7 @@ smp_vic_cmn_interrupt(void) /* * Reschedule call back. Nothing to do, all the work is done * automatically when we return from the interrupt. */ -asmlinkage void +static void smp_reschedule_interrupt(void) { /* do nothing */ @@ -848,7 +828,7 @@ smp_reschedule_interrupt(void) static struct mm_struct * flush_mm; static unsigned long flush_va; -static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(tlbstate_lock); #define FLUSH_ALL 0xffffffff /* @@ -861,9 +841,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; static inline void leave_mm (unsigned long cpu) { - if (cpu_tlbstate[cpu].state == TLBSTATE_OK) + if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) BUG(); - cpu_clear(cpu, cpu_tlbstate[cpu].active_mm->cpu_vm_mask); + cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); load_cr3(swapper_pg_dir); } @@ -871,7 +851,7 @@ leave_mm (unsigned long cpu) /* * Invalidate call-back */ -asmlinkage void +static void smp_invalidate_interrupt(void) { __u8 cpu = smp_processor_id(); @@ -884,8 +864,8 @@ smp_invalidate_interrupt(void) smp_processor_id())); */ - if (flush_mm == cpu_tlbstate[cpu].active_mm) { - if (cpu_tlbstate[cpu].state == TLBSTATE_OK) { + if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { + if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { if (flush_va == FLUSH_ALL) local_flush_tlb(); else @@ -910,7 +890,7 @@ flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, if (!cpumask) BUG(); - if ((cpumask & cpus_coerce(cpu_online_map)) != cpumask) + if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask) BUG(); if (cpumask & (1 << smp_processor_id())) BUG(); @@ -953,7 +933,7 @@ flush_tlb_current_task(void) preempt_disable(); - cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id()); + cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); local_flush_tlb(); if (cpu_mask) flush_tlb_others(cpu_mask, mm, FLUSH_ALL); @@ -969,7 +949,7 @@ flush_tlb_mm (struct mm_struct * mm) preempt_disable(); - cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id()); + cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); if (current->active_mm == mm) { if (current->mm) @@ -990,7 +970,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) preempt_disable(); - cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id()); + cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); if (current->active_mm == mm) { if(current->mm) __flush_tlb_one(va); @@ -1003,9 +983,10 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) preempt_enable(); } +EXPORT_SYMBOL(flush_tlb_page); /* enable the requested IRQs */ -asmlinkage void +static void smp_enable_irq_interrupt(void) { __u8 irq; @@ -1035,10 +1016,10 @@ smp_stop_cpu_function(void *dummy) cpu_clear(smp_processor_id(), cpu_online_map); local_irq_disable(); for(;;) - __asm__("hlt"); + halt(); } -static spinlock_t call_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(call_lock); struct call_data_struct { void (*func) (void *info); @@ -1054,7 +1035,7 @@ static struct call_data_struct * call_data; * previously set up. This is used to schedule a function for * execution on all CPU's - set up the function then broadcast a * function_interrupt CPI to come here on each CPU */ -asmlinkage void +static void smp_call_function_interrupt(void) { void (*func) (void *info) = call_data->func; @@ -1099,7 +1080,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait) { struct call_data_struct data; - __u32 mask = cpus_coerce(cpu_online_map); + __u32 mask = cpus_addr(cpu_online_map)[0]; mask &= ~(1<