fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / parisc / kernel / entry.S
index 9af4b22..f7607a3 100644 (file)
@@ -22,7 +22,6 @@
  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/config.h>
 #include <asm/asm-offsets.h>
 
 /* we have the following possibilities to act on an interruption:
@@ -31,6 +30,7 @@
 
 
 #include <asm/psw.h>
+#include <asm/cache.h>         /* for L1_CACHE_SHIFT */
 #include <asm/assembly.h>      /* for LDREG/STREG defines */
 #include <asm/pgtable.h>
 #include <asm/signal.h>
        bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
        DEP             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
        copy            \pmd,%r9
-#ifdef CONFIG_64BIT
-       shld            %r9,PxD_VALUE_SHIFT,\pmd
-#else
-       shlw            %r9,PxD_VALUE_SHIFT,\pmd
-#endif
+       SHLREG          %r9,PxD_VALUE_SHIFT,\pmd
        EXTR            \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
        DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
        shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
         * all ILP32 processes and all the kernel for machines with
         * under 4GB of memory) */
        .macro          L3_ptep pgd,pte,index,va,fault
+#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
        extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
        copy            %r0,\pte
-       extrd,u,*=      \va,31,32,%r0
+       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
        ldw,s           \index(\pgd),\pgd
-       extrd,u,*=      \va,31,32,%r0
+       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
        bb,>=,n         \pgd,_PxD_PRESENT_BIT,\fault
-       extrd,u,*=      \va,31,32,%r0
+       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
        shld            \pgd,PxD_VALUE_SHIFT,\index
-       extrd,u,*=      \va,31,32,%r0
+       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
        copy            \index,\pgd
-       extrd,u,*<>     \va,31,32,%r0
+       extrd,u,*<>     \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
        ldo             ASM_PGD_PMD_OFFSET(\pgd),\pgd
+#endif
        L2_ptep         \pgd,\pte,\index,\va,\fault
        .endm
 
        extrd,u,*=      \pte,_PAGE_GATEWAY_BIT+32,1,%r0
        depd            %r0,11,2,\prot  /* If Gateway, Set PL2 to 0 */
 
-       /* Get rid of prot bits and convert to page addr for iitlbt */
+       /* Enforce uncacheable pages.
+        * This should ONLY be use for MMIO on PA 2.0 machines.
+        * Memory/DMA is cache coherent on all PA2.0 machines we support
+        * (that means T-class is NOT supported) and the memory controllers
+        * on most of those machines only handles cache transactions.
+        */
+       extrd,u,*=      \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
+       depi            1,12,1,\prot
 
-       depd            %r0,63,PAGE_SHIFT,\pte
-       extrd,u         \pte,56,32,\pte
+       /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+       extrd,u         \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
+       depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
        .endm
 
        /* Identical macro to make_insert_tlb above, except it
 
        /* Get rid of prot bits and convert to page addr for iitlba */
 
-       depi            0,31,12,\pte
+       depi            _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
        extru           \pte,24,25,\pte
-
        .endm
 
        /* This is for ILP32 PA2.0 only.  The TLB insertion needs
@@ -756,6 +761,7 @@ fault_vector_11:
 
 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
 #define CLONE_UNTRACED 0x00800000
+#define CLONE_KTHREAD 0x10000000
 
        .export __kernel_thread, code
        .import do_fork
@@ -933,8 +939,8 @@ syscall_exit_rfi:
         * to "proper" values now (otherwise we'll wind up restoring
         * whatever was last stored in the task structure, which might
         * be inconsistent if an interrupt occured while on the gateway
-        * page) Note that we may be "trashing" values the user put in
-        * them, but we don't support the the user changing them.
+        * page). Note that we may be "trashing" values the user put in
+        * them, but we don't support the user changing them.
         */
 
        STREG   %r0,PT_SR2(%r16)
@@ -962,11 +968,7 @@ intr_return:
        /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
        ** irq_stat[] is defined using ____cacheline_aligned.
        */
-#ifdef CONFIG_64BIT
-       shld    %r1, 6, %r20
-#else
-       shlw    %r1, 5, %r20
-#endif
+       SHLREG  %r1,L1_CACHE_SHIFT,%r20
        add     %r19,%r20,%r19  /* now have &irq_stat[smp_processor_id()] */
 #endif /* CONFIG_SMP */
 
@@ -1014,14 +1016,21 @@ intr_restore:
        nop
        nop
 
+#ifndef CONFIG_PREEMPT
+# define intr_do_preempt       intr_restore
+#endif /* !CONFIG_PREEMPT */
+
        .import schedule,code
 intr_do_resched:
-       /* Only do reschedule if we are returning to user space */
+       /* Only call schedule on return to userspace. If we're returning
+        * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+        * we jump back to intr_restore.
+        */
        LDREG   PT_IASQ0(%r16), %r20
-       CMPIB= 0,%r20,intr_restore /* backward */
+       CMPIB=  0, %r20, intr_do_preempt
        nop
        LDREG   PT_IASQ1(%r16), %r20
-       CMPIB= 0,%r20,intr_restore /* backward */
+       CMPIB=  0, %r20, intr_do_preempt
        nop
 
 #ifdef CONFIG_64BIT
@@ -1037,6 +1046,32 @@ intr_do_resched:
 #endif
        ldo     R%intr_check_sig(%r2), %r2
 
+       /* preempt the current task on returning to kernel
+        * mode from an interrupt, iff need_resched is set,
+        * and preempt_count is 0. otherwise, we continue on
+        * our merry way back to the current running task.
+        */
+#ifdef CONFIG_PREEMPT
+       .import preempt_schedule_irq,code
+intr_do_preempt:
+       rsm     PSW_SM_I, %r0           /* disable interrupts */
+
+       /* current_thread_info()->preempt_count */
+       mfctl   %cr30, %r1
+       LDREG   TI_PRE_COUNT(%r1), %r19
+       CMPIB<> 0, %r19, intr_restore   /* if preempt_count > 0 */
+       nop                             /* prev insn branched backwards */
+
+       /* check if we interrupted a critical path */
+       LDREG   PT_PSW(%r16), %r20
+       bb,<,n  %r20, 31 - PSW_SM_I, intr_restore
+       nop
+
+       BL      preempt_schedule_irq, %r2
+       nop
+
+       b,n     intr_restore            /* ssm PSW_SM_I done by intr_restore */
+#endif /* CONFIG_PREEMPT */
 
        .import do_signal,code
 intr_do_signal:
@@ -1168,10 +1203,9 @@ intr_save:
         */
 
        /* adjust isr/ior. */
-
-       extrd,u         %r16,63,7,%r1    /* get high bits from isr for ior */
-       depd            %r1,31,7,%r17    /* deposit them into ior */
-       depdi           0,63,7,%r16      /* clear them from isr */
+       extrd,u         %r16,63,SPACEID_SHIFT,%r1       /* get high bits from isr for ior */
+       depd            %r1,31,SPACEID_SHIFT,%r17       /* deposit them into ior */
+       depdi           0,63,SPACEID_SHIFT,%r16         /* clear them from isr */
 #endif
        STREG           %r16, PT_ISR(%r29)
        STREG           %r17, PT_IOR(%r29)
@@ -1597,7 +1631,7 @@ dbit_trap_20w:
        load32          PA(pa_dbit_lock),t0
 
 dbit_spin_20w:
-       ldcw            0(t0),t1
+       LDCW            0(t0),t1
        cmpib,=         0,t1,dbit_spin_20w
        nop
 
@@ -1633,7 +1667,7 @@ dbit_trap_11:
        load32          PA(pa_dbit_lock),t0
 
 dbit_spin_11:
-       ldcw            0(t0),t1
+       LDCW            0(t0),t1
        cmpib,=         0,t1,dbit_spin_11
        nop
 
@@ -1673,7 +1707,7 @@ dbit_trap_20:
        load32          PA(pa_dbit_lock),t0
 
 dbit_spin_20:
-       ldcw            0(t0),t1
+       LDCW            0(t0),t1
        cmpib,=         0,t1,dbit_spin_20
        nop
 
@@ -2075,11 +2109,7 @@ syscall_check_bh:
        ldw     TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
 
        /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
-#ifdef CONFIG_64BIT
-       shld    %r26, 6, %r20
-#else
-       shlw    %r26, 5, %r20
-#endif
+       SHLREG  %r26,L1_CACHE_SHIFT,%r20
        add     %r19,%r20,%r19  /* now have &irq_stat[smp_processor_id()] */
 #endif /* CONFIG_SMP */