X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fppc64%2Fkernel%2Fstab.c;h=3eea165b2433036f841089bd431690f57cbcdbb0;hb=9bf4aaab3e101692164d49b7ca357651eb691cb6;hp=b3ac3ac628d175bcbdd26e1c45db32ce3fe86b91;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/ppc64/kernel/stab.c b/arch/ppc64/kernel/stab.c index b3ac3ac62..3eea165b2 100644 --- a/arch/ppc64/kernel/stab.c +++ b/arch/ppc64/kernel/stab.c @@ -20,9 +20,10 @@ #include #include -static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid); -static void make_slbe(unsigned long esid, unsigned long vsid, int large, - int kernel_segment); +static int make_ste(unsigned long stab, unsigned long esid, + unsigned long vsid); + +void slb_initialize(void); /* * Build an entry for the base kernel segment and put it into @@ -31,29 +32,13 @@ static void make_slbe(unsigned long esid, unsigned long vsid, int large, */ void stab_initialize(unsigned long stab) { - unsigned long esid, vsid; - int seg0_largepages = 0; - - esid = GET_ESID(KERNELBASE); - vsid = get_kernel_vsid(esid << SID_SHIFT); - - if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) - seg0_largepages = 1; + unsigned long vsid = get_kernel_vsid(KERNELBASE); if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) { - /* Invalidate the entire SLB & all the ERATS */ -#ifdef CONFIG_PPC_ISERIES - asm volatile("isync; slbia; isync":::"memory"); -#else - asm volatile("isync":::"memory"); - asm volatile("slbmte %0,%0"::"r" (0) : "memory"); - asm volatile("isync; slbia; isync":::"memory"); - make_slbe(esid, vsid, seg0_largepages, 1); - asm volatile("isync":::"memory"); -#endif + slb_initialize(); } else { asm volatile("isync; slbia; isync":::"memory"); - make_ste(stab, esid, vsid); + make_ste(stab, GET_ESID(KERNELBASE), vsid); /* Order update */ asm volatile("sync":::"memory"); @@ -109,7 +94,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) * Could not find empty entry, pick one with a round robin selection. * Search all entries in the two groups. */ - castout_entry = get_paca()->xStab_data.next_round_robin; + castout_entry = get_paca()->stab_rr; for (i = 0; i < 16; i++) { if (castout_entry < 8) { global_entry = (esid & 0x1f) << 3; @@ -128,7 +113,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) castout_entry = (castout_entry + 1) & 0xf; } - get_paca()->xStab_data.next_round_robin = (castout_entry + 1) & 0xf; + get_paca()->stab_rr = (castout_entry + 1) & 0xf; /* Modify the old entry to the new value. */ @@ -161,7 +146,7 @@ static inline void __ste_allocate(unsigned long esid, unsigned long vsid) unsigned long offset; int region_id = REGION_ID(esid << SID_SHIFT); - stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid); + stab_entry = make_ste(get_paca()->stab_addr, esid, vsid); if (region_id != USER_REGION_ID) return; @@ -255,7 +240,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm) /* Flush all user entries from the segment table of the current processor. */ void flush_stab(struct task_struct *tsk, struct mm_struct *mm) { - STE *stab = (STE *) get_paca()->xStab_data.virt; + STE *stab = (STE *) get_paca()->stab_addr; STE *ste; unsigned long offset = __get_cpu_var(stab_cache_ptr); @@ -294,217 +279,3 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm) preload_stab(tsk, mm); } - -/* - * SLB stuff - */ - -/* - * Create a segment buffer entry for the given esid/vsid pair. - * - * NOTE: A context syncronising instruction is required before and after - * this, in the common case we use exception entry and rfid. - */ -static void make_slbe(unsigned long esid, unsigned long vsid, int large, - int kernel_segment) -{ - unsigned long entry, castout_entry; - union { - unsigned long word0; - slb_dword0 data; - } esid_data; - union { - unsigned long word0; - slb_dword1 data; - } vsid_data; - - /* - * We take the next entry, round robin. Previously we tried - * to find a free slot first but that took too long. Unfortunately - * we dont have any LRU information to help us choose a slot. - */ - - /* - * Never cast out the segment for our kernel stack. Since we - * dont invalidate the ERAT we could have a valid translation - * for the kernel stack during the first part of exception exit - * which gets invalidated due to a tlbie from another cpu at a - * non recoverable point (after setting srr0/1) - Anton - */ - castout_entry = get_paca()->xStab_data.next_round_robin; - do { - entry = castout_entry; - castout_entry++; - if (castout_entry >= naca->slb_size) - castout_entry = 1; - asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry)); - } while (esid_data.data.v && - esid_data.data.esid == GET_ESID(__get_SP())); - - get_paca()->xStab_data.next_round_robin = castout_entry; - - /* slbie not needed as the previous mapping is still valid. */ - - /* - * Write the new SLB entry. - */ - vsid_data.word0 = 0; - vsid_data.data.vsid = vsid; - vsid_data.data.kp = 1; - if (large) - vsid_data.data.l = 1; - if (kernel_segment) - vsid_data.data.c = 1; - else - vsid_data.data.ks = 1; - - esid_data.word0 = 0; - esid_data.data.esid = esid; - esid_data.data.v = 1; - esid_data.data.index = entry; - - /* - * No need for an isync before or after this slbmte. The exception - * we enter with and the rfid we exit with are context synchronizing. - */ - asm volatile("slbmte %0,%1" : : "r" (vsid_data), "r" (esid_data)); -} - -static inline void __slb_allocate(unsigned long esid, unsigned long vsid, - mm_context_t context) -{ - int large = 0; - int region_id = REGION_ID(esid << SID_SHIFT); - unsigned long offset; - - if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) { - if (region_id == KERNEL_REGION_ID) - large = 1; - else if (region_id == USER_REGION_ID) - large = in_hugepage_area(context, esid << SID_SHIFT); - } - - make_slbe(esid, vsid, large, region_id != USER_REGION_ID); - - if (region_id != USER_REGION_ID) - return; - - offset = __get_cpu_var(stab_cache_ptr); - if (offset < NR_STAB_CACHE_ENTRIES) - __get_cpu_var(stab_cache[offset++]) = esid; - else - offset = NR_STAB_CACHE_ENTRIES+1; - __get_cpu_var(stab_cache_ptr) = offset; -} - -/* - * Allocate a segment table entry for the given ea. - */ -int slb_allocate(unsigned long ea) -{ - unsigned long vsid, esid; - mm_context_t context; - - /* Check for invalid effective addresses. */ - if (unlikely(!IS_VALID_EA(ea))) - return 1; - - /* Kernel or user address? */ - if (REGION_ID(ea) >= KERNEL_REGION_ID) { - context = KERNEL_CONTEXT(ea); - vsid = get_kernel_vsid(ea); - } else { - if (unlikely(!current->mm)) - return 1; - - context = current->mm->context; - vsid = get_vsid(context.id, ea); - } - - esid = GET_ESID(ea); - __slb_allocate(esid, vsid, context); - - return 0; -} - -/* - * preload some userspace segments into the SLB. - */ -static void preload_slb(struct task_struct *tsk, struct mm_struct *mm) -{ - unsigned long pc = KSTK_EIP(tsk); - unsigned long stack = KSTK_ESP(tsk); - unsigned long unmapped_base; - unsigned long pc_esid = GET_ESID(pc); - unsigned long stack_esid = GET_ESID(stack); - unsigned long unmapped_base_esid; - unsigned long vsid; - - if (test_tsk_thread_flag(tsk, TIF_32BIT)) - unmapped_base = TASK_UNMAPPED_BASE_USER32; - else - unmapped_base = TASK_UNMAPPED_BASE_USER64; - - unmapped_base_esid = GET_ESID(unmapped_base); - - if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID)) - return; - vsid = get_vsid(mm->context.id, pc); - __slb_allocate(pc_esid, vsid, mm->context); - - if (pc_esid == stack_esid) - return; - - if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID)) - return; - vsid = get_vsid(mm->context.id, stack); - __slb_allocate(stack_esid, vsid, mm->context); - - if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid) - return; - - if (!IS_VALID_EA(unmapped_base) || - (REGION_ID(unmapped_base) >= KERNEL_REGION_ID)) - return; - vsid = get_vsid(mm->context.id, unmapped_base); - __slb_allocate(unmapped_base_esid, vsid, mm->context); -} - -/* Flush all user entries from the segment table of the current processor. */ -void flush_slb(struct task_struct *tsk, struct mm_struct *mm) -{ - unsigned long offset = __get_cpu_var(stab_cache_ptr); - union { - unsigned long word0; - slb_dword0 data; - } esid_data; - - - if (offset <= NR_STAB_CACHE_ENTRIES) { - int i; - asm volatile("isync" : : : "memory"); - for (i = 0; i < offset; i++) { - esid_data.word0 = 0; - esid_data.data.esid = __get_cpu_var(stab_cache[i]); - asm volatile("slbie %0" : : "r" (esid_data)); - } - asm volatile("isync" : : : "memory"); - } else { - asm volatile("isync; slbia; isync" : : : "memory"); - } - - /* Workaround POWER5 < DD2.1 issue */ - if (offset == 1 || offset > NR_STAB_CACHE_ENTRIES) { - /* - * flush segment in EEH region, we dont normally access - * addresses in this region. - */ - esid_data.word0 = 0; - esid_data.data.esid = EEH_REGION_ID; - asm volatile("slbie %0" : : "r" (esid_data)); - } - - __get_cpu_var(stab_cache_ptr) = 0; - - preload_slb(tsk, mm); -}