*/
#ifndef __ASM_SH_MMU_CONTEXT_H
#define __ASM_SH_MMU_CONTEXT_H
+#ifdef __KERNEL__
#include <asm/cpu/mmu_context.h>
#include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/io.h>
/*
* Get MMU context if needed.
*/
-static __inline__ void
-get_mmu_context(struct mm_struct *mm)
+static inline void get_mmu_context(struct mm_struct *mm)
{
- extern void flush_tlb_all(void);
unsigned long mc = mmu_context_cache;
/* Check if we have old version of context. */
- if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
+ if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
/* It's up to date, do nothing */
return;
* Flush all TLB and start new cycle.
*/
flush_tlb_all();
+
/*
* Fix version; Note that we avoid version #0
* to distingush NO_CONTEXT.
if (!mc)
mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
}
- mm->context = mc;
+ mm->context.id = mc;
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
-static __inline__ int init_new_context(struct task_struct *tsk,
+static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
- mm->context = NO_CONTEXT;
-
+ mm->context.id = NO_CONTEXT;
return 0;
}
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
-static __inline__ void destroy_context(struct mm_struct *mm)
+static inline void destroy_context(struct mm_struct *mm)
{
/* Do nothing */
}
-static __inline__ void set_asid(unsigned long asid)
+static inline void set_asid(unsigned long asid)
{
unsigned long __dummy;
"r" (0xffffff00));
}
-static __inline__ unsigned long get_asid(void)
+static inline unsigned long get_asid(void)
{
unsigned long asid;
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
-static __inline__ void activate_context(struct mm_struct *mm)
+static inline void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
- set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
+ set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
}
-/* MMU_TTB can be used for optimizing the fault handling.
- (Currently not used) */
-static __inline__ void switch_mm(struct mm_struct *prev,
- struct mm_struct *next,
- struct task_struct *tsk)
+/* MMU_TTB is used for optimizing the fault handling. */
+static inline void set_TTB(pgd_t *pgd)
{
- if (likely(prev != next)) {
- unsigned long __pgdir = (unsigned long)next->pgd;
+ ctrl_outl((unsigned long)pgd, MMU_TTB);
+}
- __asm__ __volatile__("mov.l %0, %1"
- : /* no output */
- : "r" (__pgdir), "m" (__m(MMU_TTB)));
+static inline pgd_t *get_TTB(void)
+{
+ return (pgd_t *)ctrl_inl(MMU_TTB);
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (likely(prev != next)) {
+ set_TTB(next->pgd);
activate_context(next);
}
}
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL)
-static __inline__ void
+static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
{
/* Enable MMU */
ctrl_outl(MMU_CONTROL_INIT, MMUCR);
-
- /* The manual suggests doing some nops after turning on the MMU */
- __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop\n\t");
+ ctrl_barrier();
if (mmu_context_cache == NO_CONTEXT)
mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
cr = ctrl_inl(MMUCR);
cr &= ~MMU_CONTROL_INIT;
ctrl_outl(cr, MMUCR);
- __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop\n\t");
+
+ ctrl_barrier();
}
#else
/*
#define disable_mmu() do { BUG(); } while (0)
#endif
+#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMU_CONTEXT_H */