/*
+ * arch/ppc64/mm/slb_low.S
+ *
* Low-level SLB routines
*
* Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/config.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
li r11,0
b slb_finish_load
-1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
+1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
* will be patched by the kernel at boot
*/
-BEGIN_FTR_SECTION
- /* check whether this is in vmalloc or ioremap space */
- clrldi r11,r10,48
- cmpldi r11,(VMALLOC_SIZE >> 28) - 1
- bgt 5f
- lhz r11,PACAVMALLOCSLLP(r13)
- b slb_finish_load
-5:
-END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
-_GLOBAL(slb_miss_kernel_load_io)
+_GLOBAL(slb_miss_kernel_load_virtual)
li r11,0
b slb_finish_load
1:
#endif /* CONFIG_HUGETLB_PAGE */
- lhz r11,PACACONTEXTSLLP(r13)
+_GLOBAL(slb_miss_user_load_normal)
+ li r11,0
+
2:
ld r9,PACACONTEXTID(r13)
rldimi r10,r9,USER_ESID_BITS,0