#define __PPC64_MMU_CONTEXT_H
#include <linux/config.h>
-#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/mmu.h>
-#include <asm/ppcdebug.h>
#include <asm/cputable.h>
/*
return __ffs(b[2]) + 128;
}
-#define NO_CONTEXT 0
-#define FIRST_USER_CONTEXT 0x10 /* First 16 reserved for kernel */
-#define LAST_USER_CONTEXT 0x8000 /* Same as PID_MAX for now... */
-#define NUM_USER_CONTEXT (LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
-
-/* Choose whether we want to implement our context
- * number allocator as a LIFO or FIFO queue.
- */
-#if 1
-#define MMU_CONTEXT_LIFO
-#else
-#define MMU_CONTEXT_FIFO
-#endif
-
-struct mmu_context_queue_t {
- spinlock_t lock;
- long head;
- long tail;
- long size;
- mm_context_id_t elements[LAST_USER_CONTEXT];
-};
-
-extern struct mmu_context_queue_t mmu_context_queue;
-
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-/*
- * The context number queue has underflowed.
- * Meaning: we tried to push a context number that was freed
- * back onto the context queue and the queue was already full.
- */
-static inline void
-mmu_context_underflow(void)
-{
- printk(KERN_DEBUG "mmu_context_underflow\n");
- panic("mmu_context_underflow");
-}
-
-/*
- * Set up the context for a new address space.
- */
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- long head;
- unsigned long flags;
- /* This does the right thing across a fork (I hope) */
-
- spin_lock_irqsave(&mmu_context_queue.lock, flags);
-
- if (mmu_context_queue.size <= 0) {
- spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
- return -ENOMEM;
- }
-
- head = mmu_context_queue.head;
- mm->context.id = mmu_context_queue.elements[head];
+#define NO_CONTEXT 0
+#define MAX_CONTEXT (0x100000-1)
- head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
- mmu_context_queue.head = head;
- mmu_context_queue.size--;
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
- spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
-
- return 0;
-}
-
-/*
- * We're finished using the context for an address space.
- */
-static inline void
-destroy_context(struct mm_struct *mm)
-{
- long index;
- unsigned long flags;
-
- spin_lock_irqsave(&mmu_context_queue.lock, flags);
-
- if (mmu_context_queue.size >= NUM_USER_CONTEXT) {
- spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
- mmu_context_underflow();
- }
-
-#ifdef MMU_CONTEXT_LIFO
- index = mmu_context_queue.head;
- index = (index > 0) ? index-1 : LAST_USER_CONTEXT-1;
- mmu_context_queue.head = index;
-#else
- index = mmu_context_queue.tail;
- index = (index < LAST_USER_CONTEXT-1) ? index+1 : 0;
- mmu_context_queue.tail = index;
-#endif
-
- mmu_context_queue.size++;
- mmu_context_queue.elements[index] = mm->context.id;
-
- spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
-}
-
-extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
-extern void flush_slb(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
/*
* switch_mm is the entry point called from the architecture independent
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
-#ifdef CONFIG_ALTIVEC
- asm volatile (
- BEGIN_FTR_SECTION
- "dssall;\n"
- END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- : : );
-#endif /* CONFIG_ALTIVEC */
-
if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
cpu_set(smp_processor_id(), next->cpu_vm_mask);
if (prev == next)
return;
- if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
- flush_slb(tsk, next);
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ asm volatile ("dssall");
+#endif /* CONFIG_ALTIVEC */
+
+ if (cpu_has_feature(CPU_FTR_SLB))
+ switch_slb(tsk, next);
else
- flush_stab(tsk, next);
+ switch_stab(tsk, next);
}
#define deactivate_mm(tsk,mm) do { } while (0)
local_irq_restore(flags);
}
-#define VSID_RANDOMIZER 42470972311UL
-#define VSID_MASK 0xfffffffffUL
-
-
-/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
- */
-static inline unsigned long
-get_kernel_vsid( unsigned long ea )
-{
- unsigned long ordinal, vsid;
-
- ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | (ea >> 60);
- vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
-
-#ifdef HTABSTRESS
- /* For debug, this path creates a very poor vsid distribuition.
- * A user program can access virtual addresses in the form
- * 0x0yyyyxxxx000 where yyyy = xxxx to cause multiple mappings
- * to hash to the same page table group.
- */
- ordinal = ((ea >> 28) & 0x1fff) | (ea >> 44);
- vsid = ordinal & VSID_MASK;
-#endif /* HTABSTRESS */
-
- return vsid;
-}
-
-/* This is only valid for user EA's (user EA's do not exceed 2^41 (EADDR_SIZE))
- */
-static inline unsigned long
-get_vsid( unsigned long context, unsigned long ea )
-{
- unsigned long ordinal, vsid;
-
- ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | context;
- vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
-
-#ifdef HTABSTRESS
- /* See comment above. */
- ordinal = ((ea >> 28) & 0x1fff) | (context << 16);
- vsid = ordinal & VSID_MASK;
-#endif /* HTABSTRESS */
-
- return vsid;
-}
-
#endif /* __PPC64_MMU_CONTEXT_H */