* Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
+*/
#include <linux/config.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/pgtable.h>
-#include <asm/tsb.h>
.text
.align 32
-kvmap_itlb:
- /* g6: TAG TARGET */
- mov TLB_TAG_ACCESS, %g4
- ldxa [%g4] ASI_IMMU, %g4
-
- /* sun4v_itlb_miss branches here with the missing virtual
- * address already loaded into %g4
+/*
+ * On a second level vpte miss, check whether the original fault is to the OBP
+ * range (note that this is only possible for instruction miss, data misses to
+ * obp range do not use vpte). If so, go back directly to the faulting address.
+ * This is because we want to read the tpc, otherwise we have no way of knowing
+ * the 8k aligned faulting address if we are using >8k kernel pagesize. This
+ * also ensures no vpte range addresses are dropped into tlb while obp is
+ * executing (see inherit_locked_prom_mappings() rant).
+ */
+sparc64_vpte_nucleus:
+ /* Note that kvmap below has verified that the address is
+ * in the range MODULES_VADDR --> VMALLOC_END already. So
+ * here we need only check if it is an OBP address or not.
*/
-kvmap_itlb_4v:
-
-kvmap_itlb_nonlinear:
- /* Catch kernel NULL pointer calls. */
- sethi %hi(PAGE_SIZE), %g5
- cmp %g4, %g5
- bleu,pn %xcc, kvmap_dtlb_longpath
- nop
-
- KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
-
-kvmap_itlb_tsb_miss:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
- blu,pn %xcc, kvmap_itlb_vmalloc_addr
+ blu,pn %xcc, kern_vpte
mov 0x1, %g5
sllx %g5, 32, %g5
cmp %g4, %g5
- blu,pn %xcc, kvmap_itlb_obp
+ blu,pn %xcc, vpte_insn_obp
nop
-kvmap_itlb_vmalloc_addr:
- KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
+ /* These two instructions are patched by paginig_init(). */
+kern_vpte:
+ sethi %hi(swapper_pgd_zero), %g5
+ lduw [%g5 + %lo(swapper_pgd_zero)], %g5
- KTSB_LOCK_TAG(%g1, %g2, %g7)
+ /* With kernel PGD in %g5, branch back into dtlb_backend. */
+ ba,pt %xcc, sparc64_kpte_continue
+ andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
- /* Load and check PTE. */
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, kvmap_itlb_longpath
- KTSB_STORE(%g1, %g7)
-
- KTSB_WRITE(%g1, %g5, %g6)
-
- /* fallthrough to TLB load */
-
-kvmap_itlb_load:
-
-661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
- retry
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- nop
- nop
- .previous
-
- /* For sun4v the ASI_ITLB_DATA_IN store and the retry
- * instruction get nop'd out and we get here to branch
- * to the sun4v tlb load code. The registers are setup
- * as follows:
- *
- * %g4: vaddr
- * %g5: PTE
- * %g6: TAG
- *
- * The sun4v TLB load wants the PTE in %g3 so we fix that
- * up here.
+vpte_noent:
+ /* Restore previous TAG_ACCESS, %g5 is zero, and we will
+ * skip over the trap instruction so that the top level
+ * TLB miss handler will thing this %g5 value is just an
+ * invalid PTE, thus branching to full fault processing.
*/
- ba,pt %xcc, sun4v_itlb_load
- mov %g5, %g3
-
-kvmap_itlb_longpath:
-
-661: rdpr %pstate, %g5
- wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- SET_GL(1)
- nop
- .previous
-
- rdpr %tpc, %g5
- ba,pt %xcc, sparc64_realfault_common
- mov FAULT_CODE_ITLB, %g4
-
-kvmap_itlb_obp:
- OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
-
- KTSB_LOCK_TAG(%g1, %g2, %g7)
-
- KTSB_WRITE(%g1, %g5, %g6)
-
- ba,pt %xcc, kvmap_itlb_load
- nop
-
-kvmap_dtlb_obp:
- OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
-
- KTSB_LOCK_TAG(%g1, %g2, %g7)
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_DMMU
+ done
+
+vpte_insn_obp:
+ /* Behave as if we are at TL0. */
+ wrpr %g0, 1, %tl
+ rdpr %tpc, %g4 /* Find original faulting iaddr */
+ srlx %g4, 13, %g4 /* Throw out context bits */
+ sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
+
+ /* Restore previous TAG_ACCESS. */
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_IMMU
+
+ sethi %hi(prom_trans), %g5
+ or %g5, %lo(prom_trans), %g5
+
+1: ldx [%g5 + 0x00], %g6 ! base
+ brz,a,pn %g6, longpath ! no more entries, fail
+ mov TLB_SFSR, %g1 ! and restore %g1
+ ldx [%g5 + 0x08], %g1 ! len
+ add %g6, %g1, %g1 ! end
+ cmp %g6, %g4
+ bgu,pt %xcc, 2f
+ cmp %g4, %g1
+ bgeu,pt %xcc, 2f
+ ldx [%g5 + 0x10], %g1 ! PTE
+
+ /* TLB load, restore %g1, and return from trap. */
+ sub %g4, %g6, %g6
+ add %g1, %g6, %g5
+ mov TLB_SFSR, %g1
+ stxa %g5, [%g0] ASI_ITLB_DATA_IN
+ retry
- KTSB_WRITE(%g1, %g5, %g6)
+2: ba,pt %xcc, 1b
+ add %g5, (3 * 8), %g5 ! next entry
+
+kvmap_do_obp:
+ sethi %hi(prom_trans), %g5
+ or %g5, %lo(prom_trans), %g5
+ srlx %g4, 13, %g4
+ sllx %g4, 13, %g4
+
+1: ldx [%g5 + 0x00], %g6 ! base
+ brz,a,pn %g6, longpath ! no more entries, fail
+ mov TLB_SFSR, %g1 ! and restore %g1
+ ldx [%g5 + 0x08], %g1 ! len
+ add %g6, %g1, %g1 ! end
+ cmp %g6, %g4
+ bgu,pt %xcc, 2f
+ cmp %g4, %g1
+ bgeu,pt %xcc, 2f
+ ldx [%g5 + 0x10], %g1 ! PTE
+
+ /* TLB load, restore %g1, and return from trap. */
+ sub %g4, %g6, %g6
+ add %g1, %g6, %g5
+ mov TLB_SFSR, %g1
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN
+ retry
- ba,pt %xcc, kvmap_dtlb_load
- nop
+2: ba,pt %xcc, 1b
+ add %g5, (3 * 8), %g5 ! next entry
+/*
+ * On a first level data miss, check whether this is to the OBP range (note
+ * that such accesses can be made by prom, as well as by kernel using
+ * prom_getproperty on "address"), and if so, do not use vpte access ...
+ * rather, use information saved during inherit_prom_mappings() using 8k
+ * pagesize.
+ */
.align 32
-kvmap_dtlb_tsb4m_load:
- KTSB_LOCK_TAG(%g1, %g2, %g7)
- KTSB_WRITE(%g1, %g5, %g6)
- ba,pt %xcc, kvmap_dtlb_load
+kvmap:
+ brgez,pn %g4, kvmap_nonlinear
nop
-kvmap_dtlb:
- /* %g6: TAG TARGET */
- mov TLB_TAG_ACCESS, %g4
- ldxa [%g4] ASI_DMMU, %g4
-
- /* sun4v_dtlb_miss branches here with the missing virtual
- * address already loaded into %g4
- */
-kvmap_dtlb_4v:
- brgez,pn %g4, kvmap_dtlb_nonlinear
- nop
-
- /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
- KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
-
- /* TSB entry address left in %g1, lookup linear PTE.
- * Must preserve %g1 and %g6 (TAG).
- */
-kvmap_dtlb_tsb4m_miss:
- sethi %hi(kpte_linear_bitmap), %g2
- or %g2, %lo(kpte_linear_bitmap), %g2
-
- /* Clear the PAGE_OFFSET top virtual bits, then shift
- * down to get a 256MB physical address index.
- */
- sllx %g4, 21, %g5
- mov 1, %g7
- srlx %g5, 21 + 28, %g5
-
- /* Don't try this at home kids... this depends upon srlx
- * only taking the low 6 bits of the shift count in %g5.
- */
- sllx %g7, %g5, %g7
-
- /* Divide by 64 to get the offset into the bitmask. */
- srlx %g5, 6, %g5
- sllx %g5, 3, %g5
-
- /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
- ldx [%g2 + %g5], %g2
- andcc %g2, %g7, %g0
- sethi %hi(kern_linear_pte_xor), %g5
- or %g5, %lo(kern_linear_pte_xor), %g5
- bne,a,pt %xcc, 1f
- add %g5, 8, %g5
-
-1: ldx [%g5], %g2
-
+#ifdef CONFIG_DEBUG_PAGEALLOC
.globl kvmap_linear_patch
kvmap_linear_patch:
- ba,pt %xcc, kvmap_dtlb_tsb4m_load
+#endif
+ ba,pt %xcc, kvmap_load
xor %g2, %g4, %g5
-kvmap_dtlb_vmalloc_addr:
- KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
-
- KTSB_LOCK_TAG(%g1, %g2, %g7)
-
- /* Load and check PTE. */
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, kvmap_dtlb_longpath
- KTSB_STORE(%g1, %g7)
-
- KTSB_WRITE(%g1, %g5, %g6)
-
- /* fallthrough to TLB load */
-
-kvmap_dtlb_load:
-
-661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
- retry
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- nop
- nop
- .previous
-
- /* For sun4v the ASI_DTLB_DATA_IN store and the retry
- * instruction get nop'd out and we get here to branch
- * to the sun4v tlb load code. The registers are setup
- * as follows:
- *
- * %g4: vaddr
- * %g5: PTE
- * %g6: TAG
- *
- * The sun4v TLB load wants the PTE in %g3 so we fix that
- * up here.
- */
- ba,pt %xcc, sun4v_dtlb_load
- mov %g5, %g3
-
-kvmap_dtlb_nonlinear:
- /* Catch kernel NULL pointer derefs. */
- sethi %hi(PAGE_SIZE), %g5
- cmp %g4, %g5
- bleu,pn %xcc, kvmap_dtlb_longpath
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ sethi %hi(swapper_pg_dir), %g5
+ or %g5, %lo(swapper_pg_dir), %g5
+ sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ andn %g6, 0x3, %g6
+ lduw [%g5 + %g6], %g5
+ brz,pn %g5, longpath
+ sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ sllx %g5, 11, %g5
+ andn %g6, 0x3, %g6
+ lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
+ sllx %g4, 64 - PMD_SHIFT, %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ sllx %g5, 11, %g5
+ andn %g6, 0x7, %g6
+ ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
nop
+ ba,a,pt %xcc, kvmap_load
+#endif
- KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
-
-kvmap_dtlb_tsbmiss:
+kvmap_nonlinear:
sethi %hi(MODULES_VADDR), %g5
cmp %g4, %g5
- blu,pn %xcc, kvmap_dtlb_longpath
+ blu,pn %xcc, longpath
mov (VMALLOC_END >> 24), %g5
sllx %g5, 24, %g5
cmp %g4, %g5
- bgeu,pn %xcc, kvmap_dtlb_longpath
+ bgeu,pn %xcc, longpath
nop
kvmap_check_obp:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
- blu,pn %xcc, kvmap_dtlb_vmalloc_addr
+ blu,pn %xcc, kvmap_vmalloc_addr
mov 0x1, %g5
sllx %g5, 32, %g5
cmp %g4, %g5
- blu,pn %xcc, kvmap_dtlb_obp
+ blu,pn %xcc, kvmap_do_obp
nop
- ba,pt %xcc, kvmap_dtlb_vmalloc_addr
- nop
-
-kvmap_dtlb_longpath:
-
-661: rdpr %pstate, %g5
- wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- SET_GL(1)
- ldxa [%g0] ASI_SCRATCHPAD, %g5
- .previous
-
- rdpr %tl, %g3
- cmp %g3, 1
-661: mov TLB_TAG_ACCESS, %g4
- ldxa [%g4] ASI_DMMU, %g5
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
- nop
- .previous
-
- be,pt %xcc, sparc64_realfault_common
- mov FAULT_CODE_DTLB, %g4
- ba,pt %xcc, winfix_trampoline
+kvmap_vmalloc_addr:
+ /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
+ ldxa [%g3 + %g6] ASI_N, %g5
+ brgez,pn %g5, longpath
nop
+
+kvmap_load:
+ /* PTE is valid, load into TLB and return from trap. */
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
+ retry