{
int i;
for_each_cpu_mask(i, cpu_possible_map) {
- spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
+ spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
}
return 0;
}
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm)
{
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
spin_unlock(&call_lock);
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
void smp_stop_cpu(void)
{
{
smp_stop_cpu();
for (;;)
- asm("hlt");
+ halt();
}
void smp_send_stop(void)
return;
/* Don't deadlock on the call lock in panic */
if (!spin_trylock(&call_lock)) {
- /* ignore locking because we have paniced anyways */
+ /* ignore locking because we have panicked anyways */
nolock = 1;
}
__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
int safe_smp_processor_id(void)
{
- int apicid, i;
+ unsigned apicid, i;
if (disable_apic)
return 0;
apicid = hard_smp_processor_id();
- if (x86_cpu_to_apicid[apicid] == apicid)
+ if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
return apicid;
for (i = 0; i < NR_CPUS; ++i) {