X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fsparc64%2Fmm%2Fultra.S;h=f8479fad4047de364454bf26ceb85bd18c9e6f78;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=7a0934321010afbd603fb46378102b17bcc01bba;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 7a0934321..f8479fad4 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -10,10 +10,12 @@ #include #include #include +#include #include #include #include #include +#include /* Basically, most of the Spitfire vs. Cheetah madness * has to do with the fact that Cheetah does not support @@ -28,15 +30,19 @@ .text .align 32 .globl __flush_tlb_mm -__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ +__flush_tlb_mm: /* 18 insns */ + /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ldxa [%o1] ASI_DMMU, %g2 cmp %g2, %o0 bne,pn %icc, __spitfire_flush_tlb_mm_slow mov 0x50, %g3 stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP + sethi %hi(KERNBASE), %g3 + flush %g3 retl - flush %g6 + nop + nop nop nop nop @@ -48,7 +54,7 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ .align 32 .globl __flush_tlb_pending -__flush_tlb_pending: +__flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -69,13 +75,19 @@ __flush_tlb_pending: brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o4 + flush %o4 retl wrpr %g7, 0x0, %pstate + nop + nop + nop + nop .align 32 .globl __flush_tlb_kernel_range -__flush_tlb_kernel_range: /* %o0=start, %o1=end */ +__flush_tlb_kernel_range: /* 16 insns */ + /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sethi %hi(PAGE_SIZE), %o4 @@ -87,8 +99,11 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ membar #Sync brnz,pt %o3, 1b sub %o3, %o4, %o3 -2: retl - flush %g6 +2: sethi %hi(KERNBASE), %o3 + flush %o3 + retl + nop + nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 @@ -98,7 +113,8 @@ __spitfire_flush_tlb_mm_slow: stxa %g0, [%g3] ASI_IMMU_DEMAP flush %g6 stxa %g2, [%o1] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o1 + flush %o1 retl wrpr %g1, 0, %pstate @@ -112,6 +128,7 @@ __spitfire_flush_tlb_mm_slow: #else #error unsupported PAGE_SIZE #endif + .section .kprobes.text, "ax" .align 32 .globl __flush_icache_page __flush_icache_page: /* %o0 = phys_page */ @@ -136,42 +153,29 @@ __flush_icache_page: /* %o0 = phys_page */ #define DTAG_MASK 0x3 + /* This routine is Spitfire specific so the hardcoded + * D-cache size and line-size are OK. + */ .align 64 .globl __flush_dcache_page __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ sethi %uhi(PAGE_OFFSET), %g1 sllx %g1, 32, %g1 - sub %o0, %g1, %o0 - clr %o4 - srlx %o0, 11, %o0 - sethi %hi(1 << 14), %o2 -1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group - add %o4, (1 << 5), %o4 ! IEU0 - ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group - add %o4, (1 << 5), %o4 ! IEU0 - ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available - add %o4, (1 << 5), %o4 ! IEU0 - andn %o3, DTAG_MASK, %o3 ! IEU1 - ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group - add %o4, (1 << 5), %o4 ! IEU0 - andn %g1, DTAG_MASK, %g1 ! IEU1 - cmp %o0, %o3 ! IEU1 Group - be,a,pn %xcc, dflush1 ! CTI - sub %o4, (4 << 5), %o4 ! IEU0 (Group) - cmp %o0, %g1 ! IEU1 Group - andn %g2, DTAG_MASK, %g2 ! IEU0 - be,a,pn %xcc, dflush2 ! CTI - sub %o4, (3 << 5), %o4 ! IEU0 (Group) - cmp %o0, %g2 ! IEU1 Group - andn %g3, DTAG_MASK, %g3 ! IEU0 - be,a,pn %xcc, dflush3 ! CTI - sub %o4, (2 << 5), %o4 ! IEU0 (Group) - cmp %o0, %g3 ! IEU1 Group - be,a,pn %xcc, dflush4 ! CTI - sub %o4, (1 << 5), %o4 ! IEU0 -2: cmp %o4, %o2 ! IEU1 Group - bne,pt %xcc, 1b ! CTI - nop ! IEU0 + sub %o0, %g1, %o0 ! physical address + srlx %o0, 11, %o0 ! make D-cache TAG + sethi %hi(1 << 14), %o2 ! D-cache size + sub %o2, (1 << 5), %o2 ! D-cache line size +1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG + andcc %o3, DTAG_MASK, %g0 ! Valid? + be,pn %xcc, 2f ! Nope, branch + andn %o3, DTAG_MASK, %o3 ! Clear valid bits + cmp %o3, %o0 ! TAG match? + bne,pt %xcc, 2f ! Nope, branch + nop + stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG + membar #Sync +2: brnz,pt %o2, 1b + sub %o2, (1 << 5), %o2 ! D-cache line size /* The I-cache does not snoop local stores so we * better flush that too when necessary. @@ -181,58 +185,12 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ retl nop -dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 -dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 -dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 -dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 - membar #Sync - ba,pt %xcc, 2b - nop #endif /* DCACHE_ALIASING_POSSIBLE */ - .align 32 -__prefill_dtlb: - rdpr %pstate, %g7 - wrpr %g7, PSTATE_IE, %pstate - mov TLB_TAG_ACCESS, %g1 - stxa %o5, [%g1] ASI_DMMU - stxa %o2, [%g0] ASI_DTLB_DATA_IN - flush %g6 - retl - wrpr %g7, %pstate -__prefill_itlb: - rdpr %pstate, %g7 - wrpr %g7, PSTATE_IE, %pstate - mov TLB_TAG_ACCESS, %g1 - stxa %o5, [%g1] ASI_IMMU - stxa %o2, [%g0] ASI_ITLB_DATA_IN - flush %g6 - retl - wrpr %g7, %pstate - - .globl __update_mmu_cache -__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */ - srlx %o1, PAGE_SHIFT, %o1 - andcc %o3, FAULT_CODE_DTLB, %g0 - sllx %o1, PAGE_SHIFT, %o5 - bne,pt %xcc, __prefill_dtlb - or %o5, %o0, %o5 - ba,a,pt %xcc, __prefill_itlb - - /* Cheetah specific versions, patched at boot time. - * - * This writes of the PRIMARY_CONTEXT register in this file are - * safe even on Cheetah+ and later wrt. the page size fields. - * The nucleus page size fields do not matter because we make - * no data references, and these instructions execute out of a - * locked I-TLB entry sitting in the fully assosciative I-TLB. - * This sequence should also never trap. - */ -__cheetah_flush_tlb_mm: /* 15 insns */ + .previous + + /* Cheetah specific versions, patched at boot time. */ +__cheetah_flush_tlb_mm: /* 19 insns */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate @@ -240,16 +198,20 @@ __cheetah_flush_tlb_mm: /* 15 insns */ mov PRIMARY_CONTEXT, %o2 mov 0x40, %g3 ldxa [%o2] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 + sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 + or %o0, %o1, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o2] ASI_DMMU stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g2, [%o2] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o2 + flush %o2 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate -__cheetah_flush_tlb_pending: /* 22 insns */ +__cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -258,6 +220,9 @@ __cheetah_flush_tlb_pending: /* 22 insns */ wrpr %g0, 1, %tl mov PRIMARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o4] ASI_DMMU 1: sub %o1, (1 << 3), %o1 ldx [%o2 + %o1], %o3 @@ -266,16 +231,18 @@ __cheetah_flush_tlb_pending: /* 22 insns */ andn %o3, 1, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 2: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync brnz,pt %o1, 1b - membar #Sync + nop stxa %g2, [%o4] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o4 + flush %o4 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate #ifdef DCACHE_ALIASING_POSSIBLE -flush_dcpage_cheetah: /* 11 insns */ +__cheetah_flush_dcache_page: /* 11 insns */ sethi %uhi(PAGE_OFFSET), %g1 sllx %g1, 32, %g1 sub %o0, %g1, %o0 @@ -289,7 +256,76 @@ flush_dcpage_cheetah: /* 11 insns */ nop #endif /* DCACHE_ALIASING_POSSIBLE */ -cheetah_patch_one: + /* Hypervisor specific versions, patched at boot time. */ +__hypervisor_tlb_tl0_error: + save %sp, -192, %sp + mov %i0, %o0 + call hypervisor_tlbop_error + mov %i1, %o1 + ret + restore + +__hypervisor_flush_tlb_mm: /* 10 insns */ + mov %o0, %o2 /* ARG2: mmu context */ + mov 0, %o0 /* ARG0: CPU lists unimplemented */ + mov 0, %o1 /* ARG1: CPU lists unimplemented */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_FAST_MMU_DEMAP_CTX, %o1 + retl + nop + +__hypervisor_flush_tlb_pending: /* 16 insns */ + /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ + sllx %o1, 3, %g1 + mov %o2, %g2 + mov %o0, %g3 +1: sub %g1, (1 << 3), %g1 + ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g3, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + brnz,pt %g1, 1b + nop + retl + nop + +__hypervisor_flush_tlb_kernel_range: /* 16 insns */ + /* %o0=start, %o1=end */ + cmp %o0, %o1 + be,pn %xcc, 2f + sethi %hi(PAGE_SIZE), %g3 + mov %o0, %g1 + sub %o1, %g1, %g2 + sub %g2, %g3, %g2 +1: add %g1, %g2, %o0 /* ARG0: virtual address */ + mov 0, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + brnz,pt %g2, 1b + sub %g2, %g3, %g2 +2: retl + nop + +#ifdef DCACHE_ALIASING_POSSIBLE + /* XXX Niagara and friends have an 8K cache, so no aliasing is + * XXX possible, but nothing explicit in the Hypervisor API + * XXX guarantees this. + */ +__hypervisor_flush_dcache_page: /* 2 insns */ + retl + nop +#endif + +tlb_patch_one: 1: lduw [%o1], %g1 stw %g1, [%o0] flush %o0 @@ -308,22 +344,22 @@ cheetah_patch_cachetlbops: or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 - call cheetah_patch_one - mov 15, %o2 + call tlb_patch_one + mov 19, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 - call cheetah_patch_one - mov 22, %o2 + call tlb_patch_one + mov 27, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 or %o0, %lo(__flush_dcache_page), %o0 - sethi %hi(flush_dcpage_cheetah), %o1 - or %o1, %lo(flush_dcpage_cheetah), %o1 - call cheetah_patch_one + sethi %hi(__cheetah_flush_dcache_page), %o1 + or %o1, %lo(__cheetah_flush_dcache_page), %o1 + call tlb_patch_one mov 11, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ @@ -339,31 +375,46 @@ cheetah_patch_cachetlbops: * %g1 address arg 1 (tlb page and range flushes) * %g7 address arg 2 (tlb range flush only) * - * %g6 ivector table, don't touch - * %g2 scratch 1 - * %g3 scratch 2 - * %g4 scratch 3 - * - * TODO: Make xcall TLB range flushes use the tricks above... -DaveM + * %g6 scratch 1 + * %g2 scratch 2 + * %g3 scratch 3 + * %g4 scratch 4 */ .align 32 .globl xcall_flush_tlb_mm -xcall_flush_tlb_mm: +xcall_flush_tlb_mm: /* 21 insns */ mov PRIMARY_CONTEXT, %g2 - mov 0x40, %g4 ldxa [%g2] ASI_DMMU, %g3 + srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 + sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 + or %g5, %g4, %g5 /* Preserve nucleus page size fields */ stxa %g5, [%g2] ASI_DMMU + mov 0x40, %g4 stxa %g0, [%g4] ASI_DMMU_DEMAP stxa %g0, [%g4] ASI_IMMU_DEMAP stxa %g3, [%g2] ASI_DMMU retry + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: +xcall_flush_tlb_pending: /* 21 insns */ /* %g5=context, %g1=nr, %g7=vaddrs[] */ sllx %g1, 3, %g1 mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 + sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 + or %g5, %g4, %g5 + mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU 1: sub %g1, (1 << 3), %g1 ldx [%g7 + %g1], %g5 @@ -378,9 +429,10 @@ xcall_flush_tlb_pending: nop stxa %g2, [%g4] ASI_DMMU retry + nop .globl xcall_flush_tlb_kernel_range -xcall_flush_tlb_kernel_range: +xcall_flush_tlb_kernel_range: /* 25 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 @@ -397,14 +449,30 @@ xcall_flush_tlb_kernel_range: retry nop nop + nop + nop + nop + nop + nop + nop + nop + nop + nop /* This runs in a very controlled environment, so we do * not need to worry about BH races etc. */ .globl xcall_sync_tick xcall_sync_tick: - rdpr %pstate, %g2 + +661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %pil, %g2 wrpr %g0, 15, %pil sethi %hi(109f), %g7 @@ -427,8 +495,15 @@ xcall_sync_tick: */ .globl xcall_report_regs xcall_report_regs: - rdpr %pstate, %g2 + +661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %pil, %g2 wrpr %g0, 15, %pil sethi %hi(109f), %g7 @@ -490,78 +565,96 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop - .globl xcall_promstop -xcall_promstop: - rdpr %pstate, %g2 - wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - rdpr %pil, %g2 - wrpr %g0, 15, %pil - sethi %hi(109f), %g7 - b,pt %xcc, etrap_irq -109: or %g7, %lo(109b), %g7 - flushw - call prom_stopself - nop - /* We should not return, just spin if we do... */ -1: b,a,pt %xcc, 1b - nop - - .data - -errata32_hwbug: - .xword 0 - - .text - - /* These two are not performance critical... */ - .globl xcall_flush_tlb_all_spitfire -xcall_flush_tlb_all_spitfire: - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - - clr %g2 - clr %g3 -1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 - and %g4, _PAGE_L, %g5 - brnz,pn %g5, 2f - mov TLB_TAG_ACCESS, %g7 - - stxa %g0, [%g7] ASI_DMMU - membar #Sync - stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS + /* %g5: error + * %g6: tlb op + */ +__hypervisor_tlb_xcall_error: + mov %g5, %g4 + mov %g6, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o0 + call hypervisor_tlbop_error_xcall + mov %l5, %o1 + ba,a,pt %xcc, rtrap_clr_l6 + + .globl __hypervisor_xcall_flush_tlb_mm +__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ + /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ + mov %o0, %g2 + mov %o1, %g3 + mov %o2, %g4 + mov %o3, %g1 + mov %o5, %g7 + clr %o0 /* ARG0: CPU lists unimplemented */ + clr %o1 /* ARG1: CPU lists unimplemented */ + mov %g5, %o2 /* ARG2: mmu context */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + mov HV_FAST_MMU_DEMAP_CTX, %g6 + brnz,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 + mov %g2, %o0 + mov %g3, %o1 + mov %g4, %o2 + mov %g1, %o3 + mov %g7, %o5 membar #Sync + retry - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - -2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 - and %g4, _PAGE_L, %g5 - brnz,pn %g5, 2f - mov TLB_TAG_ACCESS, %g7 - - stxa %g0, [%g7] ASI_IMMU - membar #Sync - stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS + .globl __hypervisor_xcall_flush_tlb_pending +__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ + /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ + sllx %g1, 3, %g1 + mov %o0, %g2 + mov %o1, %g3 + mov %o2, %g4 +1: sub %g1, (1 << 3), %g1 + ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g5, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + mov HV_MMU_UNMAP_ADDR_TRAP, %g6 + brnz,a,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 + brnz,pt %g1, 1b + nop + mov %g2, %o0 + mov %g3, %o1 + mov %g4, %o2 membar #Sync - - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - -2: add %g2, 1, %g2 - cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT - ble,pt %icc, 1b - sll %g2, 3, %g3 - flush %g6 retry - .globl xcall_flush_tlb_all_cheetah -xcall_flush_tlb_all_cheetah: - mov 0x80, %g2 - stxa %g0, [%g2] ASI_DMMU_DEMAP - stxa %g0, [%g2] ASI_IMMU_DEMAP + .globl __hypervisor_xcall_flush_tlb_kernel_range +__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ + /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ + sethi %hi(PAGE_SIZE - 1), %g2 + or %g2, %lo(PAGE_SIZE - 1), %g2 + andn %g1, %g2, %g1 + andn %g7, %g2, %g7 + sub %g7, %g1, %g3 + add %g2, 1, %g2 + sub %g3, %g2, %g3 + mov %o0, %g2 + mov %o1, %g4 + mov %o2, %g7 +1: add %g1, %g3, %o0 /* ARG0: virtual address */ + mov 0, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + mov HV_MMU_UNMAP_ADDR_TRAP, %g6 + brnz,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 + sethi %hi(PAGE_SIZE), %o2 + brnz,pt %g3, 1b + sub %g3, %o2, %g3 + mov %g2, %o0 + mov %g4, %o1 + mov %g7, %o2 + membar #Sync retry /* These just get rescheduled to PIL vectors. */ @@ -580,4 +673,70 @@ xcall_capture: wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint retry + .globl xcall_new_mmu_context_version +xcall_new_mmu_context_version: + wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint + retry + #endif /* CONFIG_SMP */ + + + .globl hypervisor_patch_cachetlbops +hypervisor_patch_cachetlbops: + save %sp, -128, %sp + + sethi %hi(__flush_tlb_mm), %o0 + or %o0, %lo(__flush_tlb_mm), %o0 + sethi %hi(__hypervisor_flush_tlb_mm), %o1 + or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 + call tlb_patch_one + mov 10, %o2 + + sethi %hi(__flush_tlb_pending), %o0 + or %o0, %lo(__flush_tlb_pending), %o0 + sethi %hi(__hypervisor_flush_tlb_pending), %o1 + or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 + call tlb_patch_one + mov 16, %o2 + + sethi %hi(__flush_tlb_kernel_range), %o0 + or %o0, %lo(__flush_tlb_kernel_range), %o0 + sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 + or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 16, %o2 + +#ifdef DCACHE_ALIASING_POSSIBLE + sethi %hi(__flush_dcache_page), %o0 + or %o0, %lo(__flush_dcache_page), %o0 + sethi %hi(__hypervisor_flush_dcache_page), %o1 + or %o1, %lo(__hypervisor_flush_dcache_page), %o1 + call tlb_patch_one + mov 2, %o2 +#endif /* DCACHE_ALIASING_POSSIBLE */ + +#ifdef CONFIG_SMP + sethi %hi(xcall_flush_tlb_mm), %o0 + or %o0, %lo(xcall_flush_tlb_mm), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 + call tlb_patch_one + mov 21, %o2 + + sethi %hi(xcall_flush_tlb_pending), %o0 + or %o0, %lo(xcall_flush_tlb_pending), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + call tlb_patch_one + mov 21, %o2 + + sethi %hi(xcall_flush_tlb_kernel_range), %o0 + or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 25, %o2 +#endif /* CONFIG_SMP */ + + ret + restore