* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
extern void build_tlb_refill_handler(void);
-/* CP0 hazard avoidance. */
-#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
- "nop; nop; nop; nop; nop; nop;\n\t" \
- ".set reorder\n\t")
+/*
+ * Make sure all entries differ. If they're not different
+ * MIPS32 will take revenge ...
+ */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
+
+#include <asm/smtc.h>
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+ { \
+ unsigned int mvpflags; \
+ local_irq_save(flags);\
+ mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+ evpe(mvpflags); \
+ local_irq_restore(flags); \
+ }
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
void local_flush_tlb_all(void)
{
unsigned long old_ctx;
int entry;
- local_irq_save(flags);
+ ENTER_CRITICAL(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
write_c0_entrylo0(0);
/* Blast 'em all away. */
while (entry < current_cpu_data.tlbsize) {
- /*
- * Make sure all entries differ. If they're not different
- * MIPS32 will take revenge ...
- */
- write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
+/* All entries common to a mm share an asid. To effectively flush
+ these entries, we just bump the asid. */
void local_flush_tlb_mm(struct mm_struct *mm)
{
- int cpu = smp_processor_id();
+ int cpu;
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm,cpu);
+ preempt_disable();
+
+ cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ drop_mmu_context(mm, cpu);
+ }
+
+ preempt_enable();
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags;
int size;
- local_irq_save(flags);
+ ENTER_CRITICAL(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= current_cpu_data.tlbsize/2) {
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
- BARRIER;
+ tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 +
- (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
} else {
drop_mmu_context(mm, cpu);
}
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
}
unsigned long flags;
int size;
- local_irq_save(flags);
+ ENTER_CRITICAL(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= current_cpu_data.tlbsize / 2) {
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
- BARRIER;
+ tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
} else {
local_flush_tlb_all();
}
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1);
- local_irq_save(flags);
+ ENTER_CRITICAL(flags);
oldpid = read_c0_entryhi();
write_c0_entryhi(page | newpid);
mtc0_tlbw_hazard();
tlb_probe();
- BARRIER;
+ tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
goto finish;
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
finish:
write_c0_entryhi(oldpid);
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
}
unsigned long flags;
int oldpid, idx;
- local_irq_save(flags);
- page &= (PAGE_MASK << 1);
+ ENTER_CRITICAL(flags);
oldpid = read_c0_entryhi();
+ page &= (PAGE_MASK << 1);
write_c0_entryhi(page);
mtc0_tlbw_hazard();
tlb_probe();
- BARRIER;
+ tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx >= 0) {
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
/*
{
unsigned long flags;
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
int idx, pid;
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
+ ENTER_CRITICAL(flags);
- local_irq_save(flags);
+ pid = read_c0_entryhi() & ASID_MASK;
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
- BARRIER;
- pmdp = pmd_offset(pgdp, address);
+ tlb_probe_hazard();
+ pudp = pud_offset(pgdp, address);
+ pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
ptep = pte_offset_map(pmdp, address);
- #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
- write_c0_entrylo0(ptep->pte_high);
- ptep++;
- write_c0_entrylo1(ptep->pte_high);
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+ write_c0_entrylo0(ptep->pte_high);
+ ptep++;
+ write_c0_entrylo1(ptep->pte_high);
#else
- write_c0_entrylo0(pte_val(*ptep++) >> 6);
- write_c0_entrylo1(pte_val(*ptep) >> 6);
+ write_c0_entrylo0(pte_val(*ptep++) >> 6);
+ write_c0_entrylo1(pte_val(*ptep) >> 6);
#endif
- write_c0_entryhi(address | pid);
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
tlbw_use_hazard();
- write_c0_entryhi(pid);
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
#if 0
pte_t *ptep;
int idx;
- local_irq_save(flags);
+ ENTER_CRITICAL(flags);
address &= (PAGE_MASK << 1);
asid = read_c0_entryhi() & ASID_MASK;
write_c0_entryhi(address | asid);
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
- BARRIER;
+ tlb_probe_hazard();
pmdp = pmd_offset(pgdp, address);
idx = read_c0_index();
ptep = pte_offset_map(pmdp, address);
else
tlb_write_indexed();
tlbw_use_hazard();
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
#endif
unsigned long old_pagemask;
unsigned long old_ctx;
- local_irq_save(flags);
+ ENTER_CRITICAL(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
write_c0_wired(wired + 1);
write_c0_index(wired);
- BARRIER;
+ tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
- BARRIER;
+ tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
}
/*
unsigned long old_pagemask;
unsigned long old_ctx;
- local_irq_save(flags);
+ ENTER_CRITICAL(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
if (--temp_tlb_entry < wired) {
- printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
+ printk(KERN_WARNING
+ "No TLB space left for add_temporary_entry\n");
ret = -ENOSPC;
goto out;
}
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
out:
- local_irq_restore(flags);
+ EXIT_CRITICAL(flags);
return ret;
}
* is not supported, we assume R4k style. Cpu probing already figured
* out the number of tlb entries.
*/
- if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
+ if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
+ return;
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * If TLB is shared in SMTC system, total size already
+ * has been calculated and written into cpu_data tlbsize
+ */
+ if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
return;
+#endif /* CONFIG_MIPS_MT_SMTC */
reg = read_c0_config1();
if (!((config >> 7) & 3))
c->tlbsize = ((reg >> 25) & 0x3f) + 1;
}
+static int __initdata ntlb = 0;
+static int __init set_ntlb(char *str)
+{
+ get_option(&str, &ntlb);
+ return 1;
+}
+
+__setup("ntlb=", set_ntlb);
+
void __init tlb_init(void)
{
unsigned int config = read_c0_config();
probe_tlb(config);
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
+ write_c0_framemask(0);
temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
+ /* Did I tell you that ARC SUCKS? */
+
+ if (ntlb) {
+ if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
+ int wired = current_cpu_data.tlbsize - ntlb;
+ write_c0_wired(wired);
+ write_c0_index(wired-1);
+ printk ("Restricting TLB to %d entries\n", ntlb);
+ } else
+ printk("Ignoring invalid argument ntlb=%d\n", ntlb);
+ }
+
build_tlb_refill_handler();
}