X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fparisc%2Fkernel%2Fentry.S;h=f7607a39403935fd0c2c3a716d9a4b657acf9178;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=9c284947db7c93ac99720fe23b96e0f07243e1d9;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 9c284947d..f7607a394 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -22,22 +22,22 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include -#include +#include /* we have the following possibilities to act on an interruption: * - handle in assembly and use shadowed registers only * - save registers to kernel stack and handle in assembly or C */ +#include +#include /* for L1_CACHE_SHIFT */ #include /* for LDREG/STREG defines */ #include -#include #include #include #include -#ifdef __LP64__ +#ifdef CONFIG_64BIT #define CMPIB cmpib,* #define CMPB cmpb,* #define COND(x) *x @@ -67,22 +67,23 @@ /* Switch to virtual mapping, trashing only %r1 */ .macro virt_map - rsm PSW_SM_Q,%r0 - tovirt_r1 %r29 - mfsp %sr7, %r1 - or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ - mtsp %r1, %sr3 + /* pcxt_ssm_bug */ + rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ mtsp %r0, %sr4 mtsp %r0, %sr5 + mfsp %sr7, %r1 + or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ + mtsp %r1, %sr3 + tovirt_r1 %r29 + load32 KERNEL_PSW, %r1 + + rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ mtsp %r0, %sr6 mtsp %r0, %sr7 - ldil L%KERNEL_PSW, %r1 - ldo R%KERNEL_PSW(%r1), %r1 - mtctl %r1, %cr22 mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ - ldil L%4f, %r1 - ldo R%4f(%r1), %r1 + mtctl %r1, %ipsw + load32 4f, %r1 mtctl %r1, %cr18 /* Set IIAOQ tail */ ldo 4(%r1), %r1 mtctl %r1, %cr18 /* Set IIAOQ head */ @@ -197,8 +198,7 @@ /* HPMC handler */ .macro hpmc code nop /* must be a NOP, will be patched later */ - ldil L%PA(os_hpmc), %r3 - ldo R%PA(os_hpmc)(%r3), %r3 + load32 PA(os_hpmc), %r3 bv,n 0(%r3) nop .word 0 /* checksum (will be patched) */ @@ -217,7 +217,7 @@ va = r8 /* virtual address for which the trap occured */ spc = r24 /* space for which the trap occured */ -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * itlb miss interruption handler (parisc 1.1 - 32 bit) @@ -239,7 +239,7 @@ .macro itlb_20 code mfctl %pcsq, spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b itlb_miss_20w #else b itlb_miss_20 @@ -249,7 +249,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * naitlb miss interruption handler (parisc 1.1 - 32 bit) * @@ -286,7 +286,7 @@ .macro naitlb_20 code mfctl %isr,spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b itlb_miss_20w #else b itlb_miss_20 @@ -299,7 +299,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * dtlb miss interruption handler (parisc 1.1 - 32 bit) */ @@ -321,7 +321,7 @@ .macro dtlb_20 code mfctl %isr, spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b dtlb_miss_20w #else b dtlb_miss_20 @@ -331,7 +331,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ .macro nadtlb_11 code @@ -349,7 +349,7 @@ .macro nadtlb_20 code mfctl %isr,spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b nadtlb_miss_20w #else b nadtlb_miss_20 @@ -359,7 +359,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * dirty bit trap interruption handler (parisc 1.1 - 32 bit) */ @@ -381,7 +381,7 @@ .macro dbit_20 code mfctl %isr,spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b dbit_trap_20w #else b dbit_trap_20 @@ -394,7 +394,7 @@ /* The following are simple 32 vs 64 bit instruction * abstractions for the macros */ .macro EXTR reg1,start,length,reg2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u \reg1,32+\start,\length,\reg2 #else extrw,u \reg1,\start,\length,\reg2 @@ -402,7 +402,7 @@ .endm .macro DEP reg1,start,length,reg2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depd \reg1,32+\start,\length,\reg2 #else depw \reg1,\start,\length,\reg2 @@ -410,7 +410,7 @@ .endm .macro DEPI val,start,length,reg -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi \val,32+\start,\length,\reg #else depwi \val,\start,\length,\reg @@ -421,7 +421,7 @@ * fault. We have to extract this and place it in the va, * zeroing the corresponding bits in the space register */ .macro space_adjust spc,va,tmp -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u \spc,63,SPACEID_SHIFT,\tmp depd %r0,63,SPACEID_SHIFT,\spc depd \tmp,31,SPACEID_SHIFT,\va @@ -440,8 +440,17 @@ mfctl %cr25,\reg .endm - /* Only allow faults on different spaces from the - * currently active one if we're the kernel */ + /* + space_check(spc,tmp,fault) + + spc - The space we saw the fault with. + tmp - The place to store the current space. + fault - Function to call on failure. + + Only allow faults on different spaces from the + currently active one if we're the kernel + + */ .macro space_check spc,tmp,fault mfsp %sr7,\tmp or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page @@ -455,20 +464,24 @@ /* Look up a PTE in a 2-Level scheme (faulting at each * level if the entry isn't present * - * NOTE: we use ldw even for LP64 because our pte - * and pmd are allocated <4GB */ + * NOTE: we use ldw even for LP64, since the short pointers + * can address up to 1TB + */ .macro L2_ptep pmd,pte,index,va,fault #if PT_NLEVELS == 3 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index #else EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index #endif - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ copy %r0,\pte ldw,s \index(\pmd),\pmd + bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault + DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ + copy \pmd,%r9 + SHLREG %r9,PxD_VALUE_SHIFT,\pmd EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index - bb,>=,n \pmd,_PAGE_PRESENT_BIT,\fault - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd LDREG %r0(\pmd),\pte /* pmd is now pte */ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault @@ -485,14 +498,20 @@ * all ILP32 processes and all the kernel for machines with * under 4GB of memory) */ .macro L3_ptep pgd,pte,index,va,fault +#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index copy %r0,\pte - extrd,u,*= \va,31,32,%r0 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd - extrd,u,*<> \va,31,32,%r0 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 + bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 + shld \pgd,PxD_VALUE_SHIFT,\index + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 + copy \index,\pgd + extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd - extrd,u,*= \va,31,32,%r0 - bb,>=,n \pgd,_PAGE_PRESENT_BIT,\fault +#endif L2_ptep \pgd,\pte,\index,\va,\fault .endm @@ -507,7 +526,7 @@ /* Set the dirty bit (and accessed bit). No need to be * clever, this is only used from the dirty fault */ - .macro update_dirty ptep,pte,tmp,tmp1 + .macro update_dirty ptep,pte,tmp ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp or \tmp,\pte,\pte STREG \pte,0(\ptep) @@ -542,10 +561,18 @@ extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ - /* Get rid of prot bits and convert to page addr for iitlbt */ + /* Enforce uncacheable pages. + * This should ONLY be use for MMIO on PA 2.0 machines. + * Memory/DMA is cache coherent on all PA2.0 machines we support + * (that means T-class is NOT supported) and the memory controllers + * on most of those machines only handles cache transactions. + */ + extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 + depi 1,12,1,\prot - depd %r0,63,PAGE_SHIFT,\pte - extrd,u \pte,56,32,\pte + /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ + extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte + depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte .endm /* Identical macro to make_insert_tlb above, except it @@ -563,9 +590,8 @@ /* Get rid of prot bits and convert to page addr for iitlba */ - depi 0,31,12,\pte + depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte extru \pte,24,25,\pte - .endm /* This is for ILP32 PA2.0 only. The TLB insertion needs @@ -589,7 +615,7 @@ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault cmpib,COND(<>),n 0,\spc,\fault ldil L%(TMPALIAS_MAP_START),\tmp -#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000) +#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) /* on LP64, ldi will sign extend into the upper 32 bits, * which is behaviour we don't want */ depdi 0,31,32,\tmp @@ -603,7 +629,7 @@ * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u,*= \va,41,1,%r0 #else extrw,u,= \va,9,1,%r0 @@ -670,7 +696,7 @@ fault_vector_20: def 30 def 31 -#ifndef __LP64__ +#ifndef CONFIG_64BIT .export fault_vector_11 @@ -735,6 +761,7 @@ fault_vector_11: #define CLONE_VM 0x100 /* Must agree with */ #define CLONE_UNTRACED 0x00800000 +#define CLONE_KTHREAD 0x10000000 .export __kernel_thread, code .import do_fork @@ -743,7 +770,7 @@ __kernel_thread: copy %r30, %r1 ldo PT_SZ_ALGN(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* Yo, function pointers in wide mode are little structs... -PB */ ldd 24(%r26), %r2 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ @@ -759,10 +786,10 @@ __kernel_thread: or %r26, %r24, %r26 /* will have kernel mappings. */ ldi 1, %r25 /* stack_start, signals kernel thread */ stw %r0, -52(%r30) /* user_tid */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl do_fork, %r2 + BL do_fork, %r2 copy %r1, %r24 /* pt_regs */ /* Parent Returns here */ @@ -783,12 +810,12 @@ __kernel_thread: ret_from_kernel_thread: /* Call schedule_tail first though */ - bl schedule_tail, %r2 + BL schedule_tail, %r2 nop LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 LDREG TASK_PT_GR25(%r1), %r26 -#ifdef __LP64__ +#ifdef CONFIG_64BIT LDREG TASK_PT_GR27(%r1), %r27 LDREG TASK_PT_GR22(%r1), %r22 #endif @@ -796,11 +823,16 @@ ret_from_kernel_thread: ble 0(%sr7, %r1) copy %r31, %r2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ loadgp /* Thread could have been in a module */ #endif +#ifndef CONFIG_64BIT b sys_exit +#else + load32 sys_exit, %r1 + bv %r0(%r1) +#endif ldi 0, %r26 .import sys_execve, code @@ -812,10 +844,10 @@ __execve: STREG %r26, PT_GR26(%r16) STREG %r25, PT_GR25(%r16) STREG %r24, PT_GR24(%r16) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl sys_execve, %r2 + BL sys_execve, %r2 copy %r16, %r26 cmpib,=,n 0,%r28,intr_return /* forward */ @@ -837,10 +869,10 @@ __execve: _switch_to: STREG %r2, -RP_OFFSET(%r30) + callee_save_float callee_save - ldil L%_switch_to_ret, %r2 - ldo R%_switch_to_ret(%r2), %r2 + load32 _switch_to_ret, %r2 STREG %r2, TASK_PT_KPC(%r26) LDREG TASK_PT_KPC(%r25), %r2 @@ -854,6 +886,7 @@ _switch_to: _switch_to_ret: mtctl %r0, %cr0 /* Needed for single stepping */ callee_rest + callee_rest_float LDREG -RP_OFFSET(%r30), %r2 bv %r0(%r2) @@ -871,9 +904,6 @@ _switch_to_ret: * this way, then we will need to copy %sr3 in to PT_SR[3..7], and * adjust IASQ[0..1]. * - * Note that the following code uses a "relied upon translation". - * See the parisc ACD for details. The ssm is necessary due to a - * PCXT bug. */ .align 4096 @@ -893,16 +923,13 @@ syscall_exit_rfi: depi 3,31,2,%r19 STREG %r19,PT_IAOQ1(%r16) LDREG PT_PSW(%r16),%r19 - ldil L%USER_PSW_MASK,%r1 - ldo R%USER_PSW_MASK(%r1),%r1 -#ifdef __LP64__ - ldil L%USER_PSW_HI_MASK,%r20 - ldo R%USER_PSW_HI_MASK(%r20),%r20 + load32 USER_PSW_MASK,%r1 +#ifdef CONFIG_64BIT + load32 USER_PSW_HI_MASK,%r20 depd %r20,31,32,%r1 #endif and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ - ldil L%USER_PSW,%r1 - ldo R%USER_PSW(%r1),%r1 + load32 USER_PSW,%r1 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ STREG %r19,PT_PSW(%r16) @@ -912,8 +939,8 @@ syscall_exit_rfi: * to "proper" values now (otherwise we'll wind up restoring * whatever was last stored in the task structure, which might * be inconsistent if an interrupt occured while on the gateway - * page) Note that we may be "trashing" values the user put in - * them, but we don't support the the user changing them. + * page). Note that we may be "trashing" values the user put in + * them, but we don't support the user changing them. */ STREG %r0,PT_SR2(%r16) @@ -927,31 +954,24 @@ syscall_exit_rfi: STREG %r19,PT_SR7(%r16) intr_return: + /* NOTE: Need to enable interrupts incase we schedule. */ ssm PSW_SM_I, %r0 /* Check for software interrupts */ .import irq_stat,data - ldil L%irq_stat,%r19 - ldo R%irq_stat(%r19),%r19 + load32 irq_stat,%r19 #ifdef CONFIG_SMP mfctl %cr30,%r1 ldw TI_CPU(%r1),%r1 /* get cpu # - int */ /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount ** irq_stat[] is defined using ____cacheline_aligned. */ -#ifdef __LP64__ - shld %r1, 6, %r20 -#else - shlw %r1, 5, %r20 -#endif + SHLREG %r1,L1_CACHE_SHIFT,%r20 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ #endif /* CONFIG_SMP */ - LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */ - cmpib,<>,n 0,%r20,intr_do_softirq /* forward */ - intr_check_resched: /* check for reschedule */ @@ -970,18 +990,22 @@ intr_restore: ldo PT_FR31(%r29),%r1 rest_fp %r1 rest_general %r29 - ssm 0,%r0 - nop - nop - nop - nop - nop - nop - nop + + /* inverse of virt_map */ + pcxt_ssm_bug + rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ tophys_r1 %r29 - rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0 + + /* Restore space id's and special cr's from PT_REGS + * structure pointed to by r29 + */ rest_specials %r29 + + /* IMPORTANT: rest_stack restores r29 last (we are using it)! + * It also restores r1 and r30. + */ rest_stack + rfi nop nop @@ -992,39 +1016,73 @@ intr_restore: nop nop - .import do_softirq,code -intr_do_softirq: - bl do_softirq,%r2 -#ifdef __LP64__ - ldo -16(%r30),%r29 /* Reference param save area */ -#else - nop -#endif - b intr_check_resched - nop +#ifndef CONFIG_PREEMPT +# define intr_do_preempt intr_restore +#endif /* !CONFIG_PREEMPT */ .import schedule,code intr_do_resched: - /* Only do reschedule if we are returning to user space */ + /* Only call schedule on return to userspace. If we're returning + * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise + * we jump back to intr_restore. + */ LDREG PT_IASQ0(%r16), %r20 - CMPIB= 0,%r20,intr_restore /* backward */ + CMPIB= 0, %r20, intr_do_preempt nop LDREG PT_IASQ1(%r16), %r20 - CMPIB= 0,%r20,intr_restore /* backward */ + CMPIB= 0, %r20, intr_do_preempt nop -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif ldil L%intr_check_sig, %r2 +#ifndef CONFIG_64BIT b schedule +#else + load32 schedule, %r20 + bv %r0(%r20) +#endif ldo R%intr_check_sig(%r2), %r2 + /* preempt the current task on returning to kernel + * mode from an interrupt, iff need_resched is set, + * and preempt_count is 0. otherwise, we continue on + * our merry way back to the current running task. + */ +#ifdef CONFIG_PREEMPT + .import preempt_schedule_irq,code +intr_do_preempt: + rsm PSW_SM_I, %r0 /* disable interrupts */ + + /* current_thread_info()->preempt_count */ + mfctl %cr30, %r1 + LDREG TI_PRE_COUNT(%r1), %r19 + CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */ + nop /* prev insn branched backwards */ + + /* check if we interrupted a critical path */ + LDREG PT_PSW(%r16), %r20 + bb,<,n %r20, 31 - PSW_SM_I, intr_restore + nop + + BL preempt_schedule_irq, %r2 + nop + + b,n intr_restore /* ssm PSW_SM_I done by intr_restore */ +#endif /* CONFIG_PREEMPT */ .import do_signal,code intr_do_signal: - /* Only do signals if we are returning to user space */ + /* + This check is critical to having LWS + working. The IASQ is zero on the gateway + page and we cannot deliver any signals until + we get off the gateway page. + + Only do signals if we are returning to user space + */ LDREG PT_IASQ0(%r16), %r20 CMPIB= 0,%r20,intr_restore /* backward */ nop @@ -1034,14 +1092,14 @@ intr_do_signal: copy %r0, %r24 /* unsigned long in_syscall */ copy %r16, %r25 /* struct pt_regs *regs */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl do_signal,%r2 + BL do_signal,%r2 copy %r0, %r26 /* sigset_t *oldset = NULL */ - b intr_restore + b intr_check_sig nop /* @@ -1058,7 +1116,7 @@ intr_extint: mfctl %cr31,%r1 copy %r30,%r17 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/ -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi 0,63,15,%r17 #else depi 0,31,15,%r17 @@ -1085,7 +1143,7 @@ intr_extint: ldil L%intr_return, %r2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1123,15 +1181,17 @@ intr_save: CMPIB=,n 6,%r26,skip_save_ior - /* save_specials left ipsw value in r8 for us to test */ mfctl %cr20, %r16 /* isr */ + nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ mfctl %cr21, %r17 /* ior */ -#ifdef __LP64__ + +#ifdef CONFIG_64BIT /* * If the interrupted code was running with W bit off (32 bit), * clear the b bits (bits 0 & 1) in the ior. + * save_specials left ipsw value in r8 for us to test. */ extrd,u,*<> %r8,PSW_W_BIT,1,%r0 depdi 0,1,2,%r17 @@ -1143,10 +1203,9 @@ intr_save: */ /* adjust isr/ior. */ - - extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */ - depd %r1,31,7,%r17 /* deposit them into ior */ - depdi 0,63,7,%r16 /* clear them from isr */ + extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ + depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ + depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ #endif STREG %r16, PT_ISR(%r29) STREG %r17, PT_IOR(%r29) @@ -1162,7 +1221,7 @@ skip_save_ior: loadgp copy %r29, %r25 /* arg1 is pt_regs */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1200,7 +1259,7 @@ skip_save_ior: spc = r24 /* space for which the trap occured */ ptp = r25 /* page directory/page table pointer */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT dtlb_miss_20w: space_adjust spc,va,t0 @@ -1431,36 +1490,67 @@ nadtlb_emulate: * of the instruction. Since we don't insert a translation * we can get a lot of faults during a flush loop, so it makes * sense to try to do it here with minimum overhead. We only - * emulate fdc,fic & pdc instructions whose base and index - * registers are not shadowed. We defer everything else to the - * "slow" path. + * emulate fdc,fic,pdc,probew,prober instructions whose base + * and index registers are not shadowed. We defer everything + * else to the "slow" path. */ mfctl %cr19,%r9 /* Get iir */ + + /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. + Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ + + /* Checks for fdc,fdce,pdc,"fic,4f" only */ ldi 0x280,%r16 and %r9,%r16,%r17 - cmpb,<>,n %r16,%r17,nadtlb_fault /* Not fdc,fic,pdc */ + cmpb,<>,n %r16,%r17,nadtlb_probe_check bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ - b,l get_register,%r25 + BL get_register,%r25 extrw,u %r9,15,5,%r8 /* Get index register # */ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ copy %r1,%r24 - b,l get_register,%r25 + BL get_register,%r25 extrw,u %r9,10,5,%r8 /* Get base register # */ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ - b,l set_register,%r25 + BL set_register,%r25 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ nadtlb_nullify: - mfctl %cr22,%r8 /* Get ipsw */ + mfctl %ipsw,%r8 ldil L%PSW_N,%r9 or %r8,%r9,%r8 /* Set PSW_N */ - mtctl %r8,%cr22 + mtctl %r8,%ipsw rfir nop -#ifdef __LP64__ + /* + When there is no translation for the probe address then we + must nullify the insn and return zero in the target regsiter. + This will indicate to the calling code that it does not have + write/read privileges to this address. + + This should technically work for prober and probew in PA 1.1, + and also probe,r and probe,w in PA 2.0 + + WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! + THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. + + */ +nadtlb_probe_check: + ldi 0x80,%r16 + and %r9,%r16,%r17 + cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ + BL get_register,%r25 /* Find the target register */ + extrw,u %r9,31,5,%r8 /* Get target register */ + CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ + BL set_register,%r25 + copy %r0,%r1 /* Write zero to target register */ + b nadtlb_nullify /* Nullify return insn */ + nop + + +#ifdef CONFIG_64BIT itlb_miss_20w: /* @@ -1527,7 +1617,7 @@ itlb_miss_20: #endif -#ifdef __LP64__ +#ifdef CONFIG_64BIT dbit_trap_20w: space_adjust spc,va,t0 @@ -1538,17 +1628,16 @@ dbit_trap_20w: #ifdef CONFIG_SMP CMPIB=,n 0,spc,dbit_nolock_20w - ldil L%PA(pa_dbit_lock),t0 - ldo R%PA(pa_dbit_lock)(t0),t0 + load32 PA(pa_dbit_lock),t0 dbit_spin_20w: - ldcw 0(t0),t1 + LDCW 0(t0),t1 cmpib,= 0,t1,dbit_spin_20w nop dbit_nolock_20w: #endif - update_dirty ptp,pte,t0,t1 + update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot @@ -1575,17 +1664,16 @@ dbit_trap_11: #ifdef CONFIG_SMP CMPIB=,n 0,spc,dbit_nolock_11 - ldil L%PA(pa_dbit_lock),t0 - ldo R%PA(pa_dbit_lock)(t0),t0 + load32 PA(pa_dbit_lock),t0 dbit_spin_11: - ldcw 0(t0),t1 + LDCW 0(t0),t1 cmpib,= 0,t1,dbit_spin_11 nop dbit_nolock_11: #endif - update_dirty ptp,pte,t0,t1 + update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1616,21 +1704,20 @@ dbit_trap_20: #ifdef CONFIG_SMP CMPIB=,n 0,spc,dbit_nolock_20 - ldil L%PA(pa_dbit_lock),t0 - ldo R%PA(pa_dbit_lock)(t0),t0 + load32 PA(pa_dbit_lock),t0 dbit_spin_20: - ldcw 0(t0),t1 + LDCW 0(t0),t1 cmpib,= 0,t1,dbit_spin_20 nop dbit_nolock_20: #endif - update_dirty ptp,pte,t0,t1 + update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot - f_extend pte,t0 + f_extend pte,t1 idtlbt pte,prot @@ -1739,7 +1826,7 @@ sys_fork_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1750,7 +1837,7 @@ sys_fork_wrapper: LDREG PT_GR30(%r1),%r25 copy %r1,%r24 - bl sys_clone,%r2 + BL sys_clone,%r2 ldi SIGCHLD,%r26 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 @@ -1770,7 +1857,7 @@ wrapper_exit: /* Set the return value for the child */ child_return: - bl schedule_tail, %r2 + BL schedule_tail, %r2 nop LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1 @@ -1789,13 +1876,14 @@ sys_clone_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif + /* WARNING - Clobbers r19 and r21, userspace must save these! */ STREG %r2,PT_GR19(%r1) /* save for child */ STREG %r30,PT_GR21(%r1) - bl sys_clone,%r2 + BL sys_clone,%r2 copy %r1,%r24 b wrapper_exit @@ -1811,14 +1899,14 @@ sys_vfork_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif STREG %r2,PT_GR19(%r1) /* save for child */ STREG %r30,PT_GR21(%r1) - bl sys_vfork,%r2 + BL sys_vfork,%r2 copy %r1,%r26 b wrapper_exit @@ -1839,10 +1927,10 @@ sys_vfork_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl \execve,%r2 + BL \execve,%r2 copy %r1,%arg0 ldo -FRAME_SIZE(%r30),%r30 @@ -1865,7 +1953,7 @@ error_\execve: sys_execve_wrapper: execve_wrapper sys_execve -#ifdef __LP64__ +#ifdef CONFIG_64BIT .export sys32_execve_wrapper .import sys32_execve @@ -1879,12 +1967,12 @@ sys_rt_sigreturn_wrapper: ldo TASK_REGS(%r26),%r26 /* get pt regs */ /* Don't save regs, we are going to restore them from sigcontext. */ STREG %r2, -RP_OFFSET(%r30) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo FRAME_SIZE(%r30), %r30 - bl sys_rt_sigreturn,%r2 + BL sys_rt_sigreturn,%r2 ldo -16(%r30),%r29 /* Reference param save area */ #else - bl sys_rt_sigreturn,%r2 + BL sys_rt_sigreturn,%r2 ldo FRAME_SIZE(%r30), %r30 #endif @@ -1910,9 +1998,9 @@ sys_sigaltstack_wrapper: ldo TASK_REGS(%r1),%r24 /* get pt regs */ LDREG TASK_PT_GR30(%r24),%r24 STREG %r2, -RP_OFFSET(%r30) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo FRAME_SIZE(%r30), %r30 - bl do_sigaltstack,%r2 + b,l do_sigaltstack,%r2 ldo -16(%r30),%r29 /* Reference param save area */ #else bl do_sigaltstack,%r2 @@ -1924,7 +2012,7 @@ sys_sigaltstack_wrapper: bv %r0(%r2) nop -#ifdef __LP64__ +#ifdef CONFIG_64BIT .export sys32_sigaltstack_wrapper sys32_sigaltstack_wrapper: /* Get the user stack pointer */ @@ -1932,7 +2020,7 @@ sys32_sigaltstack_wrapper: LDREG TASK_PT_GR30(%r24),%r24 STREG %r2, -RP_OFFSET(%r30) ldo FRAME_SIZE(%r30), %r30 - bl do_sigaltstack32,%r2 + b,l do_sigaltstack32,%r2 ldo -16(%r30),%r29 /* Reference param save area */ ldo -FRAME_SIZE(%r30), %r30 @@ -1948,9 +2036,9 @@ sys_rt_sigsuspend_wrapper: reg_save %r24 STREG %r2, -RP_OFFSET(%r30) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo FRAME_SIZE(%r30), %r30 - bl sys_rt_sigsuspend,%r2 + b,l sys_rt_sigsuspend,%r2 ldo -16(%r30),%r29 /* Reference param save area */ #else bl sys_rt_sigsuspend,%r2 @@ -1969,9 +2057,11 @@ sys_rt_sigsuspend_wrapper: .export syscall_exit syscall_exit: + /* NOTE: HP-UX syscalls also come through here - after hpux_syscall_exit fixes up return - values. */ + * after hpux_syscall_exit fixes up return + * values. */ + /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit * via syscall_exit_rfi if the signal was received while the process * was running. @@ -2011,8 +2101,7 @@ syscall_check_bh: .import irq_stat,data - ldil L%irq_stat,%r19 - ldo R%irq_stat(%r19),%r19 + load32 irq_stat,%r19 #ifdef CONFIG_SMP /* sched.h: int processor */ @@ -2020,17 +2109,10 @@ syscall_check_bh: ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ -#ifdef __LP64__ - shld %r26, 6, %r20 -#else - shlw %r26, 5, %r20 -#endif + SHLREG %r26,L1_CACHE_SHIFT,%r20 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ #endif /* CONFIG_SMP */ - LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */ - cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */ - syscall_check_resched: /* check for reschedule */ @@ -2070,18 +2152,22 @@ syscall_restore: LDREG TASK_PT_GR29(%r1),%r29 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ + /* NOTE: We use rsm/ssm pair to make this operation atomic */ rsm PSW_SM_I, %r0 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ mfsp %sr3,%r1 /* Get users space id */ mtsp %r1,%sr7 /* Restore sr7 */ ssm PSW_SM_I, %r0 + + /* Set sr2 to zero for userspace syscalls to work. */ + mtsp %r0,%sr2 mtsp %r1,%sr4 /* Restore sr4 */ mtsp %r1,%sr5 /* Restore sr5 */ mtsp %r1,%sr6 /* Restore sr6 */ depi 3,31,2,%r31 /* ensure return to user mode. */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* decide whether to reset the wide mode bit * * For a syscall, the W bit is stored in the lowest bit @@ -2142,12 +2228,18 @@ syscall_restore_rfi: bb,< %r2,30,pt_regs_ok /* Branch if D set */ ldo TASK_REGS(%r1),%r25 reg_save %r25 /* Save r3 to r18 */ + + /* Save the current sr */ mfsp %sr0,%r2 STREG %r2,TASK_PT_SR0(%r1) + + /* Save the scratch sr */ mfsp %sr1,%r2 STREG %r2,TASK_PT_SR1(%r1) - mfsp %sr2,%r2 - STREG %r2,TASK_PT_SR2(%r1) + + /* sr2 should be set to zero for userspace syscalls */ + STREG %r0,TASK_PT_SR2(%r1) + pt_regs_ok: LDREG TASK_PT_GR31(%r1),%r2 depi 3,31,2,%r2 /* ensure return to user mode. */ @@ -2158,17 +2250,10 @@ pt_regs_ok: b intr_restore nop - .import do_softirq,code -syscall_do_softirq: - bl do_softirq,%r2 - nop - b syscall_check_resched - ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */ - .import schedule,code syscall_do_resched: - bl schedule,%r2 -#ifdef __LP64__ + BL schedule,%r2 +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #else nop @@ -2188,17 +2273,17 @@ syscall_do_signal: ldi 1, %r24 /* unsigned long in_syscall */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl do_signal,%r2 + BL do_signal,%r2 copy %r0, %r26 /* sigset_t *oldset = NULL */ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ reg_restore %r20 - b,n syscall_restore + b,n syscall_check_sig /* * get_register is used by the non access tlb miss handlers to