linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / arch / arm / mm / proc-v6.S
index 6f72549..92f3ca3 100644 (file)
@@ -2,7 +2,6 @@
  *  linux/arch/arm/mm/proc-v6.S
  *
  *  Copyright (C) 2001 Deep Blue Solutions Ltd.
- *  Modified by Catalin Marinas for noMMU support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <asm/asm-offsets.h>
 #include <asm/hardware/arm_scu.h>
 #include <asm/procinfo.h>
-#include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
 
 #include "proc-macros.S"
 
 #define D_CACHE_LINE_SIZE      32
 
-#define TTB_C          (1 << 0)
-#define TTB_S          (1 << 1)
-#define TTB_IMP                (1 << 2)
-#define TTB_RGN_NC     (0 << 3)
-#define TTB_RGN_WBWA   (1 << 3)
-#define TTB_RGN_WT     (2 << 3)
-#define TTB_RGN_WB     (3 << 3)
+       .macro  cpsie, flags
+       .ifc \flags, f
+       .long   0xf1080040
+       .exitm
+       .endif
+       .ifc \flags, i
+       .long   0xf1080080
+       .exitm
+       .endif
+       .ifc \flags, if
+       .long   0xf10800c0
+       .exitm
+       .endif
+       .err
+       .endm
+
+       .macro  cpsid, flags
+       .ifc \flags, f
+       .long   0xf10c0040
+       .exitm
+       .endif
+       .ifc \flags, i
+       .long   0xf10c0080
+       .exitm
+       .endif
+       .ifc \flags, if
+       .long   0xf10c00c0
+       .exitm
+       .endif
+       .err
+       .endm
 
 ENTRY(cpu_v6_proc_init)
        mov     pc, lr
@@ -89,17 +111,15 @@ ENTRY(cpu_v6_dcache_clean_area)
  *     - we are not using split page tables
  */
 ENTRY(cpu_v6_switch_mm)
-#ifdef CONFIG_MMU
        mov     r2, #0
        ldr     r1, [r1, #MM_CONTEXT_ID]        @ get mm->context.id
 #ifdef CONFIG_SMP
-       orr     r0, r0, #TTB_RGN_WBWA|TTB_S     @ mark PTWs shared, outer cacheable
+       orr     r0, r0, #2                      @ set shared pgtable
 #endif
        mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
        mcr     p15, 0, r2, c7, c10, 4          @ drain write buffer
        mcr     p15, 0, r0, c2, c0, 0           @ set TTB 0
        mcr     p15, 0, r1, c13, c0, 1          @ set context ID
-#endif
        mov     pc, lr
 
 /*
@@ -122,7 +142,6 @@ ENTRY(cpu_v6_switch_mm)
  *       1111   0   1   1      r/w     r/w
  */
 ENTRY(cpu_v6_set_pte)
-#ifdef CONFIG_MMU
        str     r1, [r0], #-2048                @ linux version
 
        bic     r2, r1, #0x000003f0
@@ -141,15 +160,14 @@ ENTRY(cpu_v6_set_pte)
        tst     r1, #L_PTE_YOUNG
        biceq   r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK
 
-       tst     r1, #L_PTE_EXEC
-       orreq   r2, r2, #PTE_EXT_XN
+@      tst     r1, #L_PTE_EXEC
+@      orreq   r2, r2, #PTE_EXT_XN
 
        tst     r1, #L_PTE_PRESENT
        moveq   r2, #0
 
        str     r2, [r0]
        mcr     p15, 0, r0, c7, c10, 1 @ flush_pte
-#endif
        mov     pc, lr
 
 
@@ -199,24 +217,22 @@ __v6_setup:
        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
        mcr     p15, 0, r0, c7, c15, 0          @ clean+invalidate cache
        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
-#ifdef CONFIG_MMU
        mcr     p15, 0, r0, c8, c7, 0           @ invalidate I + D TLBs
        mcr     p15, 0, r0, c2, c0, 2           @ TTB control register
 #ifdef CONFIG_SMP
-       orr     r4, r4, #TTB_RGN_WBWA|TTB_S     @ mark PTWs shared, outer cacheable
+       orr     r4, r4, #2                      @ set shared pgtable
 #endif
        mcr     p15, 0, r4, c2, c0, 1           @ load TTB1
-#endif /* CONFIG_MMU */
 #ifdef CONFIG_VFP
        mrc     p15, 0, r0, c1, c0, 2
        orr     r0, r0, #(0xf << 20)
        mcr     p15, 0, r0, c1, c0, 2           @ Enable full access to VFP
 #endif
-       adr     r5, v6_crval
-       ldmia   r5, {r5, r6}
        mrc     p15, 0, r0, c1, c0, 0           @ read control register
+       ldr     r5, v6_cr1_clear                @ get mask for bits to clear
        bic     r0, r0, r5                      @ clear bits them
-       orr     r0, r0, r6                      @ set them
+       ldr     r5, v6_cr1_set                  @ get mask for bits to set
+       orr     r0, r0, r5                      @ set them
        mov     pc, lr                          @ return to head.S:__ret
 
        /*
@@ -225,9 +241,12 @@ __v6_setup:
         * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
         *         0 110       0011 1.00 .111 1101 < we want
         */
-       .type   v6_crval, #object
-v6_crval:
-       crval   clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c
+       .type   v6_cr1_clear, #object
+       .type   v6_cr1_set, #object
+v6_cr1_clear:
+       .word   0x01e0fb7f
+v6_cr1_set:
+       .word   0x00c0387d
 
        .type   v6_processor_functions, #object
 ENTRY(v6_processor_functions)
@@ -266,10 +285,6 @@ __v6_proc_info:
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
-       .long   PMD_TYPE_SECT | \
-               PMD_SECT_XN | \
-               PMD_SECT_AP_WRITE | \
-               PMD_SECT_AP_READ
        b       __v6_setup
        .long   cpu_arch_name
        .long   cpu_elf_name