* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/config.h>
-#include <asm/offsets.h>
+#include <asm/asm-offsets.h>
/* we have the following possibilities to act on an interruption:
* - handle in assembly and use shadowed registers only
* - save registers to kernel stack and handle in assembly or C */
+#include <asm/psw.h>
+#include <asm/cache.h> /* for L1_CACHE_SHIFT */
#include <asm/assembly.h> /* for LDREG/STREG defines */
#include <asm/pgtable.h>
-#include <asm/psw.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
#define CMPIB cmpib,*
#define CMPB cmpb,*
+#define COND(x) *x
.level 2.0w
#else
#define CMPIB cmpib,
#define CMPB cmpb,
+#define COND(x) x
.level 2.0
#endif
/* Switch to virtual mapping, trashing only %r1 */
.macro virt_map
- rsm PSW_SM_Q,%r0
- tovirt_r1 %r29
- mfsp %sr7, %r1
- or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
- mtsp %r1, %sr3
+ /* pcxt_ssm_bug */
+ rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
mtsp %r0, %sr4
mtsp %r0, %sr5
+ mfsp %sr7, %r1
+ or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
+ mtsp %r1, %sr3
+ tovirt_r1 %r29
+ load32 KERNEL_PSW, %r1
+
+ rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
mtsp %r0, %sr6
mtsp %r0, %sr7
- ldil L%KERNEL_PSW, %r1
- ldo R%KERNEL_PSW(%r1), %r1
- mtctl %r1, %cr22
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
- ldil L%4f, %r1
- ldo R%4f(%r1), %r1
+ mtctl %r1, %ipsw
+ load32 4f, %r1
mtctl %r1, %cr18 /* Set IIAOQ tail */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* Set IIAOQ head */
/* HPMC handler */
.macro hpmc code
nop /* must be a NOP, will be patched later */
- ldil L%PA(os_hpmc), %r3
- ldo R%PA(os_hpmc)(%r3), %r3
+ load32 PA(os_hpmc), %r3
bv,n 0(%r3)
nop
.word 0 /* checksum (will be patched) */
va = r8 /* virtual address for which the trap occured */
spc = r24 /* space for which the trap occured */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* itlb miss interruption handler (parisc 1.1 - 32 bit)
.macro itlb_20 code
mfctl %pcsq, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b itlb_miss_20w
#else
b itlb_miss_20
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* naitlb miss interruption handler (parisc 1.1 - 32 bit)
*
.macro naitlb_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b itlb_miss_20w
#else
b itlb_miss_20
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* dtlb miss interruption handler (parisc 1.1 - 32 bit)
*/
.macro dtlb_20 code
mfctl %isr, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b dtlb_miss_20w
#else
b dtlb_miss_20
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
.macro nadtlb_11 code
.macro nadtlb_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b nadtlb_miss_20w
#else
b nadtlb_miss_20
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* dirty bit trap interruption handler (parisc 1.1 - 32 bit)
*/
.macro dbit_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b dbit_trap_20w
#else
b dbit_trap_20
.align 32
.endm
+ /* The following are simple 32 vs 64 bit instruction
+ * abstractions for the macros */
+ .macro EXTR reg1,start,length,reg2
+#ifdef CONFIG_64BIT
+ extrd,u \reg1,32+\start,\length,\reg2
+#else
+ extrw,u \reg1,\start,\length,\reg2
+#endif
+ .endm
+
+ .macro DEP reg1,start,length,reg2
+#ifdef CONFIG_64BIT
+ depd \reg1,32+\start,\length,\reg2
+#else
+ depw \reg1,\start,\length,\reg2
+#endif
+ .endm
+
+ .macro DEPI val,start,length,reg
+#ifdef CONFIG_64BIT
+ depdi \val,32+\start,\length,\reg
+#else
+ depwi \val,\start,\length,\reg
+#endif
+ .endm
+
+ /* In LP64, the space contains part of the upper 32 bits of the
+ * fault. We have to extract this and place it in the va,
+ * zeroing the corresponding bits in the space register */
+ .macro space_adjust spc,va,tmp
+#ifdef CONFIG_64BIT
+ extrd,u \spc,63,SPACEID_SHIFT,\tmp
+ depd %r0,63,SPACEID_SHIFT,\spc
+ depd \tmp,31,SPACEID_SHIFT,\va
+#endif
+ .endm
+
+ .import swapper_pg_dir,code
+
+ /* Get the pgd. For faults on space zero (kernel space), this
+ * is simply swapper_pg_dir. For user space faults, the
+ * pgd is stored in %cr25 */
+ .macro get_pgd spc,reg
+ ldil L%PA(swapper_pg_dir),\reg
+ ldo R%PA(swapper_pg_dir)(\reg),\reg
+ or,COND(=) %r0,\spc,%r0
+ mfctl %cr25,\reg
+ .endm
+
+ /*
+ space_check(spc,tmp,fault)
+
+ spc - The space we saw the fault with.
+ tmp - The place to store the current space.
+ fault - Function to call on failure.
+
+ Only allow faults on different spaces from the
+ currently active one if we're the kernel
+
+ */
+ .macro space_check spc,tmp,fault
+ mfsp %sr7,\tmp
+ or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
+ * as kernel, so defeat the space
+ * check if it is */
+ copy \spc,\tmp
+ or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
+ cmpb,COND(<>),n \tmp,\spc,\fault
+ .endm
+
+ /* Look up a PTE in a 2-Level scheme (faulting at each
+ * level if the entry isn't present
+ *
+ * NOTE: we use ldw even for LP64, since the short pointers
+ * can address up to 1TB
+ */
+ .macro L2_ptep pmd,pte,index,va,fault
+#if PT_NLEVELS == 3
+ EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+#else
+ EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+#endif
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ copy %r0,\pte
+ ldw,s \index(\pmd),\pmd
+ bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
+ DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+ copy \pmd,%r9
+ SHLREG %r9,PxD_VALUE_SHIFT,\pmd
+ EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
+ LDREG %r0(\pmd),\pte /* pmd is now pte */
+ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
+ .endm
+
+ /* Look up PTE in a 3-Level scheme.
+ *
+ * Here we implement a Hybrid L2/L3 scheme: we allocate the
+ * first pmd adjacent to the pgd. This means that we can
+ * subtract a constant offset to get to it. The pmd and pgd
+ * sizes are arranged so that a single pmd covers 4GB (giving
+ * a full LP64 process access to 8TB) so our lookups are
+ * effectively L2 for the first 4GB of the kernel (i.e. for
+ * all ILP32 processes and all the kernel for machines with
+ * under 4GB of memory) */
+ .macro L3_ptep pgd,pte,index,va,fault
+#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+ copy %r0,\pte
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ ldw,s \index(\pgd),\pgd
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ shld \pgd,PxD_VALUE_SHIFT,\index
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ copy \index,\pgd
+ extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+#endif
+ L2_ptep \pgd,\pte,\index,\va,\fault
+ .endm
+
+ /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
+ * don't needlessly dirty the cache line if it was already set */
+ .macro update_ptep ptep,pte,tmp,tmp1
+ ldi _PAGE_ACCESSED,\tmp1
+ or \tmp1,\pte,\tmp
+ and,COND(<>) \tmp1,\pte,%r0
+ STREG \tmp,0(\ptep)
+ .endm
+
+ /* Set the dirty bit (and accessed bit). No need to be
+ * clever, this is only used from the dirty fault */
+ .macro update_dirty ptep,pte,tmp
+ ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
+ or \tmp,\pte,\pte
+ STREG \pte,0(\ptep)
+ .endm
+
+ /* Convert the pte and prot to tlb insertion values. How
+ * this happens is quite subtle, read below */
+ .macro make_insert_tlb spc,pte,prot
+ space_to_prot \spc \prot /* create prot id from space */
+ /* The following is the real subtlety. This is depositing
+ * T <-> _PAGE_REFTRAP
+ * D <-> _PAGE_DIRTY
+ * B <-> _PAGE_DMB (memory break)
+ *
+ * Then incredible subtlety: The access rights are
+ * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
+ * See 3-14 of the parisc 2.0 manual
+ *
+ * Finally, _PAGE_READ goes in the top bit of PL1 (so we
+ * trigger an access rights trap in user space if the user
+ * tries to read an unreadable page */
+ depd \pte,8,7,\prot
+
+ /* PAGE_USER indicates the page can be read with user privileges,
+ * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
+ * contains _PAGE_READ */
+ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
+ depdi 7,11,3,\prot
+ /* If we're a gateway page, drop PL2 back to zero for promotion
+ * to kernel privilege (so we can execute the page as kernel).
+ * Any privilege promotion page always denys read and write */
+ extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
+ depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Enforce uncacheable pages.
+ * This should ONLY be use for MMIO on PA 2.0 machines.
+ * Memory/DMA is cache coherent on all PA2.0 machines we support
+ * (that means T-class is NOT supported) and the memory controllers
+ * on most of those machines only handles cache transactions.
+ */
+ extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
+ depi 1,12,1,\prot
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
+ .endm
+
+ /* Identical macro to make_insert_tlb above, except it
+ * makes the tlb entry for the differently formatted pa11
+ * insertion instructions */
+ .macro make_insert_tlb_11 spc,pte,prot
+ zdep \spc,30,15,\prot
+ dep \pte,8,7,\prot
+ extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
+ depi 1,12,1,\prot
+ extru,= \pte,_PAGE_USER_BIT,1,%r0
+ depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
+ extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
+ depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Get rid of prot bits and convert to page addr for iitlba */
+
+ depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
+ extru \pte,24,25,\pte
+ .endm
+
+ /* This is for ILP32 PA2.0 only. The TLB insertion needs
+ * to extend into I/O space if the address is 0xfXXXXXXX
+ * so we extend the f's into the top word of the pte in
+ * this case */
+ .macro f_extend pte,tmp
+ extrd,s \pte,42,4,\tmp
+ addi,<> 1,\tmp,%r0
+ extrd,s \pte,63,25,\pte
+ .endm
+
+ /* The alias region is an 8MB aligned 16MB to do clear and
+ * copy user pages at addresses congruent with the user
+ * virtual address.
+ *
+ * To use the alias page, you set %r26 up with the to TLB
+ * entry (identifying the physical page) and %r23 up with
+ * the from tlb entry (or nothing if only a to entry---for
+ * clear_user_page_asm) */
+ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
+ cmpib,COND(<>),n 0,\spc,\fault
+ ldil L%(TMPALIAS_MAP_START),\tmp
+#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
+ /* on LP64, ldi will sign extend into the upper 32 bits,
+ * which is behaviour we don't want */
+ depdi 0,31,32,\tmp
+#endif
+ copy \va,\tmp1
+ DEPI 0,31,23,\tmp1
+ cmpb,COND(<>),n \tmp,\tmp1,\fault
+ ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
+ depd,z \prot,8,7,\prot
+ /*
+ * OK, it is in the temp alias region, check whether "from" or "to".
+ * Check "subtle" note in pacache.S re: r23/r26.
+ */
+#ifdef CONFIG_64BIT
+ extrd,u,*= \va,41,1,%r0
+#else
+ extrw,u,= \va,9,1,%r0
+#endif
+ or,COND(tr) %r23,%r0,\pte
+ or %r26,%r0,\pte
+ .endm
+
+
/*
* Align fault_vector_20 on 4K boundary so that both
* fault_vector_11 and fault_vector_20 are on the
def 30
def 31
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
.export fault_vector_11
#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
#define CLONE_UNTRACED 0x00800000
+#define CLONE_KTHREAD 0x10000000
.export __kernel_thread, code
.import do_fork
copy %r30, %r1
ldo PT_SZ_ALGN(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Yo, function pointers in wide mode are little structs... -PB */
ldd 24(%r26), %r2
STREG %r2, PT_GR27(%r1) /* Store childs %dp */
or %r26, %r24, %r26 /* will have kernel mappings. */
ldi 1, %r25 /* stack_start, signals kernel thread */
stw %r0, -52(%r30) /* user_tid */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl do_fork, %r2
+ BL do_fork, %r2
copy %r1, %r24 /* pt_regs */
/* Parent Returns here */
ret_from_kernel_thread:
/* Call schedule_tail first though */
- bl schedule_tail, %r2
+ BL schedule_tail, %r2
nop
LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
LDREG TASK_PT_GR25(%r1), %r26
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
LDREG TASK_PT_GR27(%r1), %r27
LDREG TASK_PT_GR22(%r1), %r22
#endif
ble 0(%sr7, %r1)
copy %r31, %r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
loadgp /* Thread could have been in a module */
#endif
+#ifndef CONFIG_64BIT
b sys_exit
+#else
+ load32 sys_exit, %r1
+ bv %r0(%r1)
+#endif
ldi 0, %r26
.import sys_execve, code
STREG %r26, PT_GR26(%r16)
STREG %r25, PT_GR25(%r16)
STREG %r24, PT_GR24(%r16)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl sys_execve, %r2
+ BL sys_execve, %r2
copy %r16, %r26
cmpib,=,n 0,%r28,intr_return /* forward */
_switch_to:
STREG %r2, -RP_OFFSET(%r30)
+ callee_save_float
callee_save
- ldil L%_switch_to_ret, %r2
- ldo R%_switch_to_ret(%r2), %r2
+ load32 _switch_to_ret, %r2
STREG %r2, TASK_PT_KPC(%r26)
LDREG TASK_PT_KPC(%r25), %r2
_switch_to_ret:
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
+ callee_rest_float
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
* this way, then we will need to copy %sr3 in to PT_SR[3..7], and
* adjust IASQ[0..1].
*
- * Note that the following code uses a "relied upon translation".
- * See the parisc ACD for details. The ssm is necessary due to a
- * PCXT bug.
*/
.align 4096
depi 3,31,2,%r19
STREG %r19,PT_IAOQ1(%r16)
LDREG PT_PSW(%r16),%r19
- ldil L%USER_PSW_MASK,%r1
- ldo R%USER_PSW_MASK(%r1),%r1
-#ifdef __LP64__
- ldil L%USER_PSW_HI_MASK,%r20
- ldo R%USER_PSW_HI_MASK(%r20),%r20
+ load32 USER_PSW_MASK,%r1
+#ifdef CONFIG_64BIT
+ load32 USER_PSW_HI_MASK,%r20
depd %r20,31,32,%r1
#endif
and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
- ldil L%USER_PSW,%r1
- ldo R%USER_PSW(%r1),%r1
+ load32 USER_PSW,%r1
or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
STREG %r19,PT_PSW(%r16)
* to "proper" values now (otherwise we'll wind up restoring
* whatever was last stored in the task structure, which might
* be inconsistent if an interrupt occured while on the gateway
- * page) Note that we may be "trashing" values the user put in
- * them, but we don't support the the user changing them.
+ * page). Note that we may be "trashing" values the user put in
+ * them, but we don't support the user changing them.
*/
STREG %r0,PT_SR2(%r16)
STREG %r19,PT_SR7(%r16)
intr_return:
+ /* NOTE: Need to enable interrupts incase we schedule. */
ssm PSW_SM_I, %r0
/* Check for software interrupts */
.import irq_stat,data
- ldil L%irq_stat,%r19
- ldo R%irq_stat(%r19),%r19
+ load32 irq_stat,%r19
#ifdef CONFIG_SMP
mfctl %cr30,%r1
ldw TI_CPU(%r1),%r1 /* get cpu # - int */
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
** irq_stat[] is defined using ____cacheline_aligned.
*/
-#ifdef __LP64__
- shld %r1, 6, %r20
-#else
- shlw %r1, 5, %r20
-#endif
+ SHLREG %r1,L1_CACHE_SHIFT,%r20
add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
#endif /* CONFIG_SMP */
- LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
- cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
-
intr_check_resched:
/* check for reschedule */
ldo PT_FR31(%r29),%r1
rest_fp %r1
rest_general %r29
- ssm 0,%r0
- nop
- nop
- nop
- nop
- nop
- nop
- nop
+
+ /* inverse of virt_map */
+ pcxt_ssm_bug
+ rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
tophys_r1 %r29
- rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0
+
+ /* Restore space id's and special cr's from PT_REGS
+ * structure pointed to by r29
+ */
rest_specials %r29
+
+ /* IMPORTANT: rest_stack restores r29 last (we are using it)!
+ * It also restores r1 and r30.
+ */
rest_stack
+
rfi
nop
nop
nop
nop
- .import do_softirq,code
-intr_do_softirq:
- bl do_softirq,%r2
-#ifdef __LP64__
- ldo -16(%r30),%r29 /* Reference param save area */
-#else
- nop
-#endif
- b intr_check_resched
- nop
+#ifndef CONFIG_PREEMPT
+# define intr_do_preempt intr_restore
+#endif /* !CONFIG_PREEMPT */
.import schedule,code
intr_do_resched:
- /* Only do reschedule if we are returning to user space */
+ /* Only call schedule on return to userspace. If we're returning
+ * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+ * we jump back to intr_restore.
+ */
LDREG PT_IASQ0(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
+ CMPIB= 0, %r20, intr_do_preempt
nop
LDREG PT_IASQ1(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
+ CMPIB= 0, %r20, intr_do_preempt
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldil L%intr_check_sig, %r2
+#ifndef CONFIG_64BIT
b schedule
+#else
+ load32 schedule, %r20
+ bv %r0(%r20)
+#endif
ldo R%intr_check_sig(%r2), %r2
+ /* preempt the current task on returning to kernel
+ * mode from an interrupt, iff need_resched is set,
+ * and preempt_count is 0. otherwise, we continue on
+ * our merry way back to the current running task.
+ */
+#ifdef CONFIG_PREEMPT
+ .import preempt_schedule_irq,code
+intr_do_preempt:
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+
+ /* current_thread_info()->preempt_count */
+ mfctl %cr30, %r1
+ LDREG TI_PRE_COUNT(%r1), %r19
+ CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
+ nop /* prev insn branched backwards */
+
+ /* check if we interrupted a critical path */
+ LDREG PT_PSW(%r16), %r20
+ bb,<,n %r20, 31 - PSW_SM_I, intr_restore
+ nop
+
+ BL preempt_schedule_irq, %r2
+ nop
+
+ b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
+#endif /* CONFIG_PREEMPT */
.import do_signal,code
intr_do_signal:
- /* Only do signals if we are returning to user space */
+ /*
+ This check is critical to having LWS
+ working. The IASQ is zero on the gateway
+ page and we cannot deliver any signals until
+ we get off the gateway page.
+
+ Only do signals if we are returning to user space
+ */
LDREG PT_IASQ0(%r16), %r20
CMPIB= 0,%r20,intr_restore /* backward */
nop
copy %r0, %r24 /* unsigned long in_syscall */
copy %r16, %r25 /* struct pt_regs *regs */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl do_signal,%r2
+ BL do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
- b intr_restore
+ b intr_check_sig
nop
/*
mfctl %cr31,%r1
copy %r30,%r17
/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi 0,63,15,%r17
#else
depi 0,31,15,%r17
ldil L%intr_return, %r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
CMPIB=,n 6,%r26,skip_save_ior
- /* save_specials left ipsw value in r8 for us to test */
mfctl %cr20, %r16 /* isr */
+ nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
mfctl %cr21, %r17 /* ior */
-#ifdef __LP64__
+
+#ifdef CONFIG_64BIT
/*
* If the interrupted code was running with W bit off (32 bit),
* clear the b bits (bits 0 & 1) in the ior.
+ * save_specials left ipsw value in r8 for us to test.
*/
extrd,u,*<> %r8,PSW_W_BIT,1,%r0
depdi 0,1,2,%r17
*/
/* adjust isr/ior. */
-
- extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
- depd %r1,31,7,%r17 /* deposit them into ior */
- depdi 0,63,7,%r16 /* clear them from isr */
+ extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
+ depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
+ depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
#endif
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
loadgp
copy %r29, %r25 /* arg1 is pt_regs */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
spc = r24 /* space for which the trap occured */
ptp = r25 /* page directory/page table pointer */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
dtlb_miss_20w:
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- mfctl %cr25,ptp /* Assume user space miss */
- or,*<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extrd,u va,33,9,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,dtlb_fault /* forward */
-
- /* First level page table lookup */
-
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
- depdi 0,63,12,ptp /* clear prot bits */
-
- /* Second level page table lookup */
-
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
- depdi 0,63,12,ptp /* clear prot bits */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
- /* Third level page table lookup */
+ L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- shladd t0,3,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
+ update_ptep ptp,pte,t0,t1
- /* Check whether the "accessed" bit was set, otherwise do so */
-
- or t1,pte,t0 /* t0 has R bit set */
- and,*<> t1,pte,%r0 /* test and nullify if already set */
- std t0,0(ptp) /* write back pte */
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
+ make_insert_tlb spc,pte,prot
+
idtlbt pte,prot
rfir
nop
dtlb_check_alias_20w:
-
- /* Check to see if fault is in the temporary alias region */
-
- cmpib,*<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depdi 0,63,23,t1
- cmpb,*<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depd,z prot,8,7,prot
-
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
-
- extrd,u,*= va,41,1,r0
- or,*tr %r23,%r0,pte /* If "from" use "from" page */
- or,* %r26,%r0,pte /* else "to", use "to" page */
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
idtlbt pte,prot
nop
nadtlb_miss_20w:
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- mfctl %cr25,ptp /* Assume user space miss */
- or,*<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extrd,u va,33,9,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,nadtlb_fault /* forward */
-
- /* First level page table lookup */
-
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depdi 0,63,12,ptp /* clear prot bits */
-
- /* Second level page table lookup */
-
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depdi 0,63,12,ptp /* clear prot bits */
-
- /* Third level page table lookup */
-
- shladd t0,3,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_20w
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,nadtlb_fault
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
+ update_ptep ptp,pte,t0,t1
- /* Get rid of prot bits and convert to page addr for idtlbt */
+ make_insert_tlb spc,pte,prot
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
idtlbt pte,prot
rfir
#else
dtlb_miss_11:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dtlb_fault /* forward */
-
- /* First level page table lookup */
+ get_pgd spc,ptp
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_11
- depi 0,31,12,ptp /* clear prot bits */
+ space_check spc,t0,dtlb_fault
- /* Second level page table lookup */
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dtlb_check_alias_11
+ update_ptep ptp,pte,t0,t1
- /* Check whether the "accessed" bit was set, otherwise do so */
-
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
-
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
-
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
+ make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
nop
nadtlb_miss_11:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,nadtlb_fault /* forward */
+ get_pgd spc,ptp
- /* First level page table lookup */
+ space_check spc,t0,nadtlb_fault
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depi 0,31,12,ptp /* clear prot bits */
+ L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
- /* Second level page table lookup */
+ update_ptep ptp,pte,t0,t1
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_11
+ make_insert_tlb_11 spc,pte,prot
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
-
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
nop
dtlb_miss_20:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dtlb_fault /* forward */
-
- /* First level page table lookup */
-
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20
- depi 0,31,12,ptp /* clear prot bits */
-
- /* Second level page table lookup */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dtlb_check_alias_20
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- /* Check whether the "accessed" bit was set, otherwise do so */
+ update_ptep ptp,pte,t0,t1
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
+ make_insert_tlb spc,pte,prot
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ f_extend pte,t0
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
idtlbt pte,prot
rfir
nop
dtlb_check_alias_20:
-
- /* Check to see if fault is in the temporary alias region */
-
- cmpib,<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depwi 0,31,23,t1
- cmpb,<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depd,z prot,8,7,prot
-
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
-
- extrw,u,= va,9,1,r0
- or,tr %r23,%r0,pte /* If "from" use "from" page */
- or %r26,%r0,pte /* else "to", use "to" page */
-
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+
idtlbt pte,prot
rfir
nop
nadtlb_miss_20:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
+ get_pgd spc,ptp
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,nadtlb_fault /* forward */
+ space_check spc,t0,nadtlb_fault
- /* First level page table lookup */
+ L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depi 0,31,12,ptp /* clear prot bits */
+ update_ptep ptp,pte,t0,t1
- /* Second level page table lookup */
+ make_insert_tlb spc,pte,prot
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_20
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
+ f_extend pte,t0
+
idtlbt pte,prot
rfir
* of the instruction. Since we don't insert a translation
* we can get a lot of faults during a flush loop, so it makes
* sense to try to do it here with minimum overhead. We only
- * emulate fdc,fic & pdc instructions whose base and index
- * registers are not shadowed. We defer everything else to the
- * "slow" path.
+ * emulate fdc,fic,pdc,probew,prober instructions whose base
+ * and index registers are not shadowed. We defer everything
+ * else to the "slow" path.
*/
mfctl %cr19,%r9 /* Get iir */
+
+ /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
+ Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
+
+ /* Checks for fdc,fdce,pdc,"fic,4f" only */
ldi 0x280,%r16
and %r9,%r16,%r17
- cmpb,<>,n %r16,%r17,nadtlb_fault /* Not fdc,fic,pdc */
+ cmpb,<>,n %r16,%r17,nadtlb_probe_check
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
- b,l get_register,%r25
+ BL get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
- b,l get_register,%r25
+ BL get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
- b,l set_register,%r25
+ BL set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
nadtlb_nullify:
- mfctl %cr22,%r8 /* Get ipsw */
+ mfctl %ipsw,%r8
ldil L%PSW_N,%r9
or %r8,%r9,%r8 /* Set PSW_N */
- mtctl %r8,%cr22
+ mtctl %r8,%ipsw
rfir
nop
-#ifdef __LP64__
-itlb_miss_20w:
-
- /*
- * I miss is a little different, since we allow users to fault
- * on the gateway page which is in the kernel address space.
- */
-
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- cmpib,*= 0,spc,itlb_miss_kernel_20w
- extrd,u va,33,9,t1 /* Get pgd index */
-
- mfctl %cr25,ptp /* load user pgd */
-
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,itlb_fault /* forward */
-
- /* First level page table lookup */
-
-itlb_miss_common_20w:
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depdi 0,63,12,ptp /* clear prot bits */
+ /*
+ When there is no translation for the probe address then we
+ must nullify the insn and return zero in the target regsiter.
+ This will indicate to the calling code that it does not have
+ write/read privileges to this address.
- /* Second level page table lookup */
+ This should technically work for prober and probew in PA 1.1,
+ and also probe,r and probe,w in PA 2.0
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depdi 0,63,12,ptp /* clear prot bits */
+ WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
+ THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
- /* Third level page table lookup */
-
- shladd t0,3,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,itlb_fault
-
- /* Check whether the "accessed" bit was set, otherwise do so */
-
- or t1,pte,t0 /* t0 has R bit set */
- and,*<> t1,pte,%r0 /* test and nullify if already set */
- std t0,0(ptp) /* write back pte */
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for iitlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,32,pte
- iitlbt pte,prot
-
- rfir
+ */
+nadtlb_probe_check:
+ ldi 0x80,%r16
+ and %r9,%r16,%r17
+ cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
+ BL get_register,%r25 /* Find the target register */
+ extrw,u %r9,31,5,%r8 /* Get target register */
+ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ BL set_register,%r25
+ copy %r0,%r1 /* Write zero to target register */
+ b nadtlb_nullify /* Nullify return insn */
nop
-itlb_miss_kernel_20w:
- b itlb_miss_common_20w
- mfctl %cr24,ptp /* Load kernel pgd */
-#else
-itlb_miss_11:
+#ifdef CONFIG_64BIT
+itlb_miss_20w:
/*
* I miss is a little different, since we allow users to fault
* on the gateway page which is in the kernel address space.
*/
- cmpib,= 0,spc,itlb_miss_kernel_11
- extru va,9,10,t1 /* Get pgd index */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,itlb_fault
- mfctl %cr25,ptp /* load user pgd */
+ L3_ptep ptp,pte,t0,va,itlb_fault
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,itlb_fault /* forward */
+ update_ptep ptp,pte,t0,t1
- /* First level page table lookup */
-
-itlb_miss_common_11:
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depi 0,31,12,ptp /* clear prot bits */
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
- /* Second level page table lookup */
+ rfir
+ nop
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,itlb_fault
+#else
- /* Check whether the "accessed" bit was set, otherwise do so */
+itlb_miss_11:
+ get_pgd spc,ptp
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
+ space_check spc,t0,itlb_fault
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
+ L2_ptep ptp,pte,t0,va,itlb_fault
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
+ update_ptep ptp,pte,t0,t1
- /* Get rid of prot bits and convert to page addr for iitlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
+ make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
rfir
nop
-itlb_miss_kernel_11:
- b itlb_miss_common_11
- mfctl %cr24,ptp /* Load kernel pgd */
-
itlb_miss_20:
+ get_pgd spc,ptp
- /*
- * I miss is a little different, since we allow users to fault
- * on the gateway page which is in the kernel address space.
- */
-
- cmpib,= 0,spc,itlb_miss_kernel_20
- extru va,9,10,t1 /* Get pgd index */
-
- mfctl %cr25,ptp /* load user pgd */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,itlb_fault /* forward */
+ space_check spc,t0,itlb_fault
- /* First level page table lookup */
+ L2_ptep ptp,pte,t0,va,itlb_fault
-itlb_miss_common_20:
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depi 0,31,12,ptp /* clear prot bits */
+ update_ptep ptp,pte,t0,t1
- /* Second level page table lookup */
+ make_insert_tlb spc,pte,prot
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,itlb_fault
+ f_extend pte,t0
- /* Check whether the "accessed" bit was set, otherwise do so */
-
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for iitlbt */
-
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
iitlbt pte,prot
rfir
nop
-
-itlb_miss_kernel_20:
- b itlb_miss_common_20
- mfctl %cr24,ptp /* Load kernel pgd */
#endif
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
dbit_trap_20w:
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,1,2,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- mfctl %cr25,ptp /* Assume user space miss */
- or,*<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extrd,u va,33,9,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,dbit_fault /* forward */
-
- /* First level page table lookup */
-
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depdi 0,63,12,ptp /* clear prot bits */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dbit_fault
- /* Second level page table lookup */
+ L3_ptep ptp,pte,t0,va,dbit_fault
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depdi 0,63,12,ptp /* clear prot bits */
-
- /* Third level page table lookup */
-
- shladd t0,3,ptp,ptp
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nolock_20w
- ldil L%PA(pa_dbit_lock),t0
- ldo R%PA(pa_dbit_lock)(t0),t0
+ load32 PA(pa_dbit_lock),t0
dbit_spin_20w:
- ldcw 0(t0),t1
+ LDCW 0(t0),t1
cmpib,= 0,t1,dbit_spin_20w
nop
dbit_nolock_20w:
#endif
- ldi (_PAGE_ACCESSED|_PAGE_DIRTY),t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dbit_fault
-
- /* Set Accessed and Dirty bits in the pte */
-
- or t1,pte,pte
- std pte,0(ptp) /* write back pte */
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ update_dirty ptp,pte,t1
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
+ make_insert_tlb spc,pte,prot
+
idtlbt pte,prot
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nounlock_20w
#else
dbit_trap_11:
- mfctl %cr25,ptp /* Assume user space trap */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dbit_fault /* forward */
- /* First level page table lookup */
+ get_pgd spc,ptp
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depi 0,31,12,ptp /* clear prot bits */
+ space_check spc,t0,dbit_fault
- /* Second level page table lookup */
+ L2_ptep ptp,pte,t0,va,dbit_fault
- sh2addl t0,ptp,ptp
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nolock_11
- ldil L%PA(pa_dbit_lock),t0
- ldo R%PA(pa_dbit_lock)(t0),t0
+ load32 PA(pa_dbit_lock),t0
dbit_spin_11:
- ldcw 0(t0),t1
+ LDCW 0(t0),t1
cmpib,= 0,t1,dbit_spin_11
nop
dbit_nolock_11:
#endif
- ldi (_PAGE_ACCESSED|_PAGE_DIRTY),t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dbit_fault
-
- /* Set Accessed and Dirty bits in the pte */
-
- or t1,pte,pte
- stw pte,0(ptp) /* write back pte */
-
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
+ update_dirty ptp,pte,t1
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
+ make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
nop
dbit_trap_20:
- mfctl %cr25,ptp /* Assume user space trap */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
+ get_pgd spc,ptp
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dbit_fault /* forward */
+ space_check spc,t0,dbit_fault
- /* First level page table lookup */
+ L2_ptep ptp,pte,t0,va,dbit_fault
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depi 0,31,12,ptp /* clear prot bits */
-
- /* Second level page table lookup */
-
- sh2addl t0,ptp,ptp
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nolock_20
- ldil L%PA(pa_dbit_lock),t0
- ldo R%PA(pa_dbit_lock)(t0),t0
+ load32 PA(pa_dbit_lock),t0
dbit_spin_20:
- ldcw 0(t0),t1
+ LDCW 0(t0),t1
cmpib,= 0,t1,dbit_spin_20
nop
dbit_nolock_20:
#endif
- ldi (_PAGE_ACCESSED|_PAGE_DIRTY),t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dbit_fault
-
- /* Set Accessed and Dirty bits in the pte */
-
- or t1,pte,pte
- stw pte,0(ptp) /* write back pte */
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ update_dirty ptp,pte,t1
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
+ make_insert_tlb spc,pte,prot
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
+ f_extend pte,t1
+
idtlbt pte,prot
#ifdef CONFIG_SMP
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
LDREG PT_GR30(%r1),%r25
copy %r1,%r24
- bl sys_clone,%r2
+ BL sys_clone,%r2
ldi SIGCHLD,%r26
LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
/* Set the return value for the child */
child_return:
- bl schedule_tail, %r2
+ BL schedule_tail, %r2
nop
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
+ /* WARNING - Clobbers r19 and r21, userspace must save these! */
STREG %r2,PT_GR19(%r1) /* save for child */
STREG %r30,PT_GR21(%r1)
- bl sys_clone,%r2
+ BL sys_clone,%r2
copy %r1,%r24
b wrapper_exit
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
STREG %r2,PT_GR19(%r1) /* save for child */
STREG %r30,PT_GR21(%r1)
- bl sys_vfork,%r2
+ BL sys_vfork,%r2
copy %r1,%r26
b wrapper_exit
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl \execve,%r2
+ BL \execve,%r2
copy %r1,%arg0
ldo -FRAME_SIZE(%r30),%r30
sys_execve_wrapper:
execve_wrapper sys_execve
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
.export sys32_execve_wrapper
.import sys32_execve
ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
- bl sys_rt_sigreturn,%r2
+ BL sys_rt_sigreturn,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
- bl sys_rt_sigreturn,%r2
+ BL sys_rt_sigreturn,%r2
ldo FRAME_SIZE(%r30), %r30
#endif
ldo TASK_REGS(%r1),%r24 /* get pt regs */
LDREG TASK_PT_GR30(%r24),%r24
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
- bl do_sigaltstack,%r2
+ b,l do_sigaltstack,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
bl do_sigaltstack,%r2
bv %r0(%r2)
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
.export sys32_sigaltstack_wrapper
sys32_sigaltstack_wrapper:
/* Get the user stack pointer */
LDREG TASK_PT_GR30(%r24),%r24
STREG %r2, -RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30), %r30
- bl do_sigaltstack32,%r2
+ b,l do_sigaltstack32,%r2
ldo -16(%r30),%r29 /* Reference param save area */
ldo -FRAME_SIZE(%r30), %r30
reg_save %r24
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
- bl sys_rt_sigsuspend,%r2
+ b,l sys_rt_sigsuspend,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
bl sys_rt_sigsuspend,%r2
.export syscall_exit
syscall_exit:
+
/* NOTE: HP-UX syscalls also come through here
- after hpux_syscall_exit fixes up return
- values. */
+ * after hpux_syscall_exit fixes up return
+ * values. */
+
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process
* was running.
.import irq_stat,data
- ldil L%irq_stat,%r19
- ldo R%irq_stat(%r19),%r19
+ load32 irq_stat,%r19
#ifdef CONFIG_SMP
/* sched.h: int processor */
ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
-#ifdef __LP64__
- shld %r26, 6, %r20
-#else
- shlw %r26, 5, %r20
-#endif
+ SHLREG %r26,L1_CACHE_SHIFT,%r20
add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
#endif /* CONFIG_SMP */
- LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
- cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
-
syscall_check_resched:
/* check for reschedule */
bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
syscall_restore:
- LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
- bb,< %r19, 31-TIF_SYSCALL_TRACE,syscall_restore_rfi
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* delay slot! */
+ /* Are we being ptraced? */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+
+ LDREG TASK_PTRACE(%r1), %r19
+ bb,< %r19,31,syscall_restore_rfi
+ nop
+
ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
rest_fp %r19
LDREG TASK_PT_GR29(%r1),%r29
LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
+ /* NOTE: We use rsm/ssm pair to make this operation atomic */
rsm PSW_SM_I, %r0
LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
mfsp %sr3,%r1 /* Get users space id */
mtsp %r1,%sr7 /* Restore sr7 */
ssm PSW_SM_I, %r0
+
+ /* Set sr2 to zero for userspace syscalls to work. */
+ mtsp %r0,%sr2
mtsp %r1,%sr4 /* Restore sr4 */
mtsp %r1,%sr5 /* Restore sr5 */
mtsp %r1,%sr6 /* Restore sr6 */
depi 3,31,2,%r31 /* ensure return to user mode. */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
* the most efficient way of doing things, but it works.
*/
syscall_restore_rfi:
- LDREG TASK_PTRACE(%r1), %r19
ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
mtctl %r2,%cr0 /* for immediate trap */
LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
bb,< %r2,30,pt_regs_ok /* Branch if D set */
ldo TASK_REGS(%r1),%r25
reg_save %r25 /* Save r3 to r18 */
+
+ /* Save the current sr */
mfsp %sr0,%r2
STREG %r2,TASK_PT_SR0(%r1)
+
+ /* Save the scratch sr */
mfsp %sr1,%r2
STREG %r2,TASK_PT_SR1(%r1)
- mfsp %sr2,%r2
- STREG %r2,TASK_PT_SR2(%r1)
+
+ /* sr2 should be set to zero for userspace syscalls */
+ STREG %r0,TASK_PT_SR2(%r1)
+
pt_regs_ok:
LDREG TASK_PT_GR31(%r1),%r2
depi 3,31,2,%r2 /* ensure return to user mode. */
b intr_restore
nop
- .import do_softirq,code
-syscall_do_softirq:
- bl do_softirq,%r2
- nop
- b syscall_check_resched
- ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */
-
.import schedule,code
syscall_do_resched:
- bl schedule,%r2
-#ifdef __LP64__
+ BL schedule,%r2
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
nop
ldi 1, %r24 /* unsigned long in_syscall */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl do_signal,%r2
+ BL do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
reg_restore %r20
- b,n syscall_restore
+ b,n syscall_check_sig
/*
* get_register is used by the non access tlb miss handlers to