vserver 2.0 rc7
[linux-2.6.git] / arch / ppc64 / mm / slb.c
index 12493ca..244150a 100644 (file)
@@ -33,8 +33,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
        return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
 }
 
-static inline void create_slbe(unsigned long ea, unsigned long vsid,
-                              unsigned long flags, unsigned long entry)
+static inline void create_slbe(unsigned long ea, unsigned long flags,
+                              unsigned long entry)
 {
        asm volatile("slbmte  %0,%1" :
                     : "r" (mk_vsid_data(ea, flags)),
@@ -51,7 +51,7 @@ static void slb_flush_and_rebolt(void)
 
        WARN_ON(!irqs_disabled());
 
-       if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
+       if (cpu_has_feature(CPU_FTR_16M_PAGE))
                ksp_flags |= SLB_VSID_L;
 
        ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
@@ -139,15 +139,14 @@ void slb_initialize(void)
        unsigned long flags = SLB_VSID_KERNEL;
 
        /* Invalidate the entire SLB (even slot 0) & all the ERATS */
-       if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
+       if (cpu_has_feature(CPU_FTR_16M_PAGE))
                flags |= SLB_VSID_L;
 
        asm volatile("isync":::"memory");
        asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
        asm volatile("isync; slbia; isync":::"memory");
-       create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), flags, 0);
-       create_slbe(VMALLOCBASE, get_kernel_vsid(KERNELBASE),
-                   SLB_VSID_KERNEL, 1);
+       create_slbe(KERNELBASE, flags, 0);
+       create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1);
        /* We don't bolt the stack for the time being - we're in boot,
         * so the stack is in the bolted segment.  By the time it goes
         * elsewhere, we'll call _switch() which will bolt in the new