volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
+unsigned long cache_decay_ticks = 100;
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
void smp_flush_cache_mm(struct mm_struct *mm)
{
if(mm->context != NO_CONTEXT) {
- if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ cpumask_t cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask))
xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
local_flush_cache_mm(mm);
}
void smp_flush_tlb_mm(struct mm_struct *mm)
{
if(mm->context != NO_CONTEXT) {
- if(mm->cpu_vm_mask != (1 << smp_processor_id())) {
+ cpumask_t cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask)) {
xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
- mm->cpu_vm_mask = (1 << smp_processor_id());
+ mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
}
local_flush_tlb_mm(mm);
}
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ cpumask_t cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask))
xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
local_flush_cache_range(vma, start, end);
}
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ cpumask_t cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask))
xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
local_flush_tlb_range(vma, start, end);
}
struct mm_struct *mm = vma->vm_mm;
if(mm->context != NO_CONTEXT) {
- if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ cpumask_t cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
local_flush_cache_page(vma, page);
}
struct mm_struct *mm = vma->vm_mm;
if(mm->context != NO_CONTEXT) {
- if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ cpumask_t cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
local_flush_tlb_page(vma, page);
}
}
+void smp_reschedule_irq(void)
+{
+ set_need_resched();
+}
+
void smp_flush_page_to_ram(unsigned long page)
{
/* Current theory is that those who call this are the one's
void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
{
- if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ cpumask_t cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
local_flush_sig_insns(mm, insn_addr);
}