/* Derived heavily from Linus's Alpha/AXP ASN code... */
-#include <asm/page.h>
-
-/*
- * For the 8k pagesize kernel, use only 10 hw context bits to optimize some shifts in
- * the fast tlbmiss handlers, instead of all 13 bits (specifically for vpte offset
- * calculation). For other pagesizes, this optimization in the tlbhandlers can not be
- * done; but still, all 13 bits can not be used because the tlb handlers use "andcc"
- * instruction which sign extends 13 bit arguments.
- */
-#if PAGE_SHIFT == 13
-#define CTX_VERSION_SHIFT 10
-#define TAG_CONTEXT_BITS 0x3ff
-#else
-#define CTX_VERSION_SHIFT 12
-#define TAG_CONTEXT_BITS 0xfff
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/spinlock.h>
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
-#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
-#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
-#define CTX_VALID(__ctx) \
- (!(((__ctx) ^ tlb_context_cache) & CTX_VERSION_MASK))
-#define CTX_HWBITS(__ctx) ((__ctx) & ~CTX_VERSION_MASK)
-
extern void get_new_mmu_context(struct mm_struct *mm);
/* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
* This just needs to set mm->context to an invalid context.
*/
-#define init_new_context(__tsk, __mm) (((__mm)->context = 0UL), 0)
+#define init_new_context(__tsk, __mm) \
+ (((__mm)->context.sparc64_ctx_val = 0UL), 0)
/* Destroy a dead context. This occurs when mmput drops the
* mm_users count to zero, the mmaps have been released, and
#define destroy_context(__mm) \
do { spin_lock(&ctx_alloc_lock); \
if (CTX_VALID((__mm)->context)) { \
- unsigned long nr = CTX_HWBITS((__mm)->context); \
+ unsigned long nr = CTX_NRBITS((__mm)->context); \
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
} \
spin_unlock(&ctx_alloc_lock); \
register unsigned long pgd_cache asm("o4"); \
paddr = __pa((__mm)->pgd); \
pgd_cache = 0UL; \
- if ((__tsk)->thread_info->flags & _TIF_32BIT) \
- pgd_cache = \
- ((unsigned long)pgd_val((__mm)->pgd[0])) << 11UL; \
+ if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
+ pgd_cache = get_pgd_cache((__mm)->pgd); \
__asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
"mov %3, %%g4\n\t" \
"mov %0, %%g7\n\t" \
"flush %%g6" \
: /* No outputs */ \
: "r" (CTX_HWBITS((__mm)->context)), \
- "r" (0x10), "i" (ASI_DMMU))
+ "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU))
extern void __flush_tlb_mm(unsigned long, unsigned long);
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid;
+ int cpu;
+ /* Note: page_table_lock is used here to serialize switch_mm
+ * and activate_mm, and their calls to get_new_mmu_context.
+ * This use of page_table_lock is unrelated to its other uses.
+ */
spin_lock(&mm->page_table_lock);
- if (CTX_VALID(mm->context))
- ctx_valid = 1;
- else
- ctx_valid = 0;
+ ctx_valid = CTX_VALID(mm->context);
+ if (!ctx_valid)
+ get_new_mmu_context(mm);
+ spin_unlock(&mm->page_table_lock);
if (!ctx_valid || (old_mm != mm)) {
- if (!ctx_valid)
- get_new_mmu_context(mm);
-
load_secondary_context(mm);
reload_tlbmiss_state(tsk, mm);
}
- {
- int cpu = smp_processor_id();
-
- /* Even if (mm == old_mm) we _must_ check
- * the cpu_vm_mask. If we do not we could
- * corrupt the TLB state because of how
- * smp_flush_tlb_{page,range,mm} on sparc64
- * and lazy tlb switches work. -DaveM
- */
- if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
- cpu_set(cpu, mm->cpu_vm_mask);
- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
- }
+ /* Even if (mm == old_mm) we _must_ check
+ * the cpu_vm_mask. If we do not we could
+ * corrupt the TLB state because of how
+ * smp_flush_tlb_{page,range,mm} on sparc64
+ * and lazy tlb switches work. -DaveM
+ */
+ cpu = smp_processor_id();
+ if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
+ cpu_set(cpu, mm->cpu_vm_mask);
+ __flush_tlb_mm(CTX_HWBITS(mm->context),
+ SECONDARY_CONTEXT);
}
- spin_unlock(&mm->page_table_lock);
}
-extern void __flush_tlb_mm(unsigned long, unsigned long);
-
#define deactivate_mm(tsk,mm) do { } while (0)
/* Activate a new MM instance for the current task. */
{
int cpu;
+ /* Note: page_table_lock is used here to serialize switch_mm
+ * and activate_mm, and their calls to get_new_mmu_context.
+ * This use of page_table_lock is unrelated to its other uses.
+ */
spin_lock(&mm->page_table_lock);
if (!CTX_VALID(mm->context))
get_new_mmu_context(mm);