This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / include / asm-xtensa / fixmap.h
1 /*
2  * include/asm-xtensa/fixmap.h
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001 - 2005 Tensilica Inc.
9  */
10
11 #ifndef _XTENSA_FIXMAP_H
12 #define _XTENSA_FIXMAP_H
13
14 #include <asm/processor.h>
15
16 #ifdef CONFIG_MMU
17
18 /*
19  * Here we define all the compile-time virtual addresses.
20  */
21
22 #if XCHAL_SEG_MAPPABLE_VADDR != 0
23 # error "Current port requires virtual user space starting at 0"
24 #endif
25 #if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000
26 # error "Current port requires at least 0x8000000 bytes for user space"
27 #endif
28
29 /* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */
30
31 #define __IN_VMALLOC(addr)                                              \
32         (((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END))
33 #define __SPAN_VMALLOC(start,end)                                       \
34         (((start) < VMALLOC_START) && ((end) >= VMALLOC_END))
35 #define INSIDE_VMALLOC(start,end)                                       \
36         (__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end)))
37
38 #if XCHAL_NUM_INSTROM
39 # if XCHAL_NUM_INSTROM == 1
40 #  if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE)
41 #   error vmalloc range conflicts with instrom0
42 #  endif
43 # endif
44 # if XCHAL_NUM_INSTROM == 2
45 #  if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE)
46 #   error vmalloc range conflicts with instrom1
47 #  endif
48 # endif
49 #endif
50
51 #if XCHAL_NUM_INSTRAM
52 # if XCHAL_NUM_INSTRAM == 1
53 #  if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE)
54 #   error vmalloc range conflicts with instram0
55 #  endif
56 # endif
57 # if XCHAL_NUM_INSTRAM == 2
58 #  if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE)
59 #   error vmalloc range conflicts with instram1
60 #  endif
61 # endif
62 #endif
63
64 #if XCHAL_NUM_DATAROM
65 # if XCHAL_NUM_DATAROM == 1
66 #  if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE)
67 #   error vmalloc range conflicts with datarom0
68 #  endif
69 # endif
70 # if XCHAL_NUM_DATAROM == 2
71 #  if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE)
72 #   error vmalloc range conflicts with datarom1
73 #  endif
74 # endif
75 #endif
76
77 #if XCHAL_NUM_DATARAM
78 # if XCHAL_NUM_DATARAM == 1
79 #  if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE)
80 #   error vmalloc range conflicts with dataram0
81 #  endif
82 # endif
83 # if XCHAL_NUM_DATARAM == 2
84 #  if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE)
85 #   error vmalloc range conflicts with dataram1
86 #  endif
87 # endif
88 #endif
89
90 #if XCHAL_NUM_XLMI
91 # if XCHAL_NUM_XLMI == 1
92 #  if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE)
93 #   error vmalloc range conflicts with xlmi0
94 #  endif
95 # endif
96 # if XCHAL_NUM_XLMI == 2
97 #  if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE)
98 #   error vmalloc range conflicts with xlmi1
99 #  endif
100 # endif
101 #endif
102
103 #if (XCHAL_NUM_INSTROM > 2) || \
104     (XCHAL_NUM_INSTRAM > 2) || \
105     (XCHAL_NUM_DATARAM > 2) || \
106     (XCHAL_NUM_DATAROM > 2) || \
107     (XCHAL_NUM_XLMI    > 2)
108 # error Insufficient checks on vmalloc above for more than 2 devices
109 #endif
110
111 /*
112  * USER_VM_SIZE does not necessarily equal TASK_SIZE.  We bumped
113  * TASK_SIZE down to 0x4000000 to simplify the handling of windowed
114  * call instructions (currently limited to a range of 1 GByte).  User
115  * tasks may very well reclaim the VM space from 0x40000000 to
116  * 0x7fffffff in the future, so we do not want the kernel becoming
117  * accustomed to having any of its stuff (e.g., page tables) in this
118  * region.  This VM region is no-man's land for now.
119  */
120
121 #define USER_VM_START           XCHAL_SEG_MAPPABLE_VADDR
122 #define USER_VM_SIZE            0x80000000
123
124 /*  Size of page table:  */
125
126 #define PGTABLE_SIZE_BITS       (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2)
127 #define PGTABLE_SIZE            (1L << PGTABLE_SIZE_BITS)
128
129 /*  All kernel-mappable space:  */
130
131 #define KERNEL_ALLMAP_START     (USER_VM_START + USER_VM_SIZE)
132 #define KERNEL_ALLMAP_SIZE      (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START)
133
134 /*  Carve out page table at start of kernel-mappable area:  */
135
136 #if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE
137 #error "Gimme some space for page table!"
138 #endif
139 #define PGTABLE_START           KERNEL_ALLMAP_START
140
141 /*  Remaining kernel-mappable space:  */
142
143 #define KERNEL_MAPPED_START     (KERNEL_ALLMAP_START + PGTABLE_SIZE)
144 #define KERNEL_MAPPED_SIZE      (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE)
145
146 #if KERNEL_MAPPED_SIZE < 0x01000000     /* 16 MB is arbitrary for now */
147 # error "Shouldn't the kernel have at least *some* mappable space?"
148 #endif
149
150 #define MAX_LOW_MEMORY          XCHAL_KSEG_CACHED_SIZE
151
152 #endif
153
154 /*
155  *  Some constants used elsewhere, but perhaps only in Xtensa header
156  *  files, so maybe we can get rid of some and access compile-time HAL
157  *  directly...
158  *
159  *  Note:  We assume that system RAM is located at the very start of the
160  *         kernel segments !!
161  */
162 #define KERNEL_VM_LOW           XCHAL_KSEG_CACHED_VADDR
163 #define KERNEL_VM_HIGH          XCHAL_KSEG_BYPASS_VADDR
164 #define KERNEL_SPACE            XCHAL_KSEG_CACHED_VADDR
165
166 /*
167  * Returns the physical/virtual addresses of the kernel space
168  * (works with the cached kernel segment only, which is the
169  *  one normally used for kernel operation).
170  */
171
172 /*                      PHYSICAL        BYPASS          CACHED
173  *
174  *  bypass vaddr        bypass paddr    *               cached vaddr
175  *  cached vaddr        cached paddr    bypass vaddr    *
176  *  bypass paddr        *               bypass vaddr    cached vaddr
177  *  cached paddr        *               bypass vaddr    cached vaddr
178  *  other               *               *               *
179  */
180
181 #define PHYSADDR(a)                                                           \
182 (((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR                                    \
183   && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ?      \
184     (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR :       \
185     ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR                                 \
186      && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ?   \
187         (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR :   \
188         (unsigned)(a))
189
190 #define BYPASS_ADDR(a)                                                        \
191 (((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR                                    \
192   && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ?      \
193     (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR :       \
194     ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR                                 \
195      && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ?   \
196         (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR :   \
197         ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR                             \
198          && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)?  \
199             (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR:  \
200             (unsigned)(a))
201
202 #define CACHED_ADDR(a)                                                        \
203 (((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR                                    \
204   && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ?      \
205     (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR :       \
206     ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR                                 \
207      && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ?   \
208         (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR :   \
209         ((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR                             \
210          && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \
211             (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \
212             (unsigned)(a))
213
214 #define PHYSADDR_IO(a)                                                        \
215 (((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR                                     \
216   && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ?        \
217     (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR :         \
218     ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR                                  \
219      && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ?     \
220         (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR :     \
221         (unsigned)(a))
222
223 #define BYPASS_ADDR_IO(a)                                                     \
224 (((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR                                     \
225   && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ?        \
226     (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR :         \
227     ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR                                  \
228      && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ?     \
229         (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR :     \
230         ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR                              \
231          && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
232             (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \
233             (unsigned)(a))
234
235 #define CACHED_ADDR_IO(a)                                                     \
236 (((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR                                     \
237   && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ?        \
238     (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR :         \
239     ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR                                  \
240      && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ?     \
241         (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR :     \
242         ((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR                              \
243          && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
244             (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \
245             (unsigned)(a))
246
247 #endif /* _XTENSA_ADDRSPACE_H */
248
249
250
251
252