1 #ifndef __ALPHA_SYSTEM_H
2 #define __ALPHA_SYSTEM_H
4 #include <linux/config.h>
9 * System defines.. Note that this is included both from .c and .S
10 * files, so it does only defines, not any C code.
14 * We leave one page for the initial stack page, and one page for
15 * the initial process structure. Also, the console eats 3 MB for
16 * the initial bootloader (one of which we can reclaim later).
18 #define BOOT_PCB 0x20000000
19 #define BOOT_ADDR 0x20000000
20 /* Remove when official MILO sources have ELF support: */
21 #define BOOT_SIZE (16*1024)
23 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
24 #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
26 #define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
29 #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
30 #define SWAPPER_PGD KERNEL_START
31 #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
32 #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
33 #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
34 #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
36 #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
39 * This is setup by the secondary bootstrap loader. Because
40 * the zero page is zeroed out as soon as the vm system is
41 * initialized, we need to copy things out into a more permanent
44 #define PARAM ZERO_PGE
45 #define COMMAND_LINE ((char*)(PARAM + 0x0000))
46 #define COMMAND_LINE_SIZE 256
47 #define INITRD_START (*(unsigned long *) (PARAM+0x100))
48 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
51 #include <linux/kernel.h>
54 * This is the logout header that should be common to all platforms
55 * (assuming they are running OSF/1 PALcode, I guess).
58 unsigned int size; /* size in bytes of logout area */
59 int sbz1 : 30; /* should be zero */
60 int err2 : 1; /* second error */
61 int retry : 1; /* retry flag */
62 unsigned int proc_offset; /* processor-specific offset */
63 unsigned int sys_offset; /* system-specific offset */
64 unsigned int code; /* machine check code */
65 unsigned int frame_rev; /* frame revision */
68 /* Machine Check Frame for uncorrectable errors (Large format)
69 * --- This is used to log uncorrectable errors such as
70 * double bit ECC errors.
71 * --- These errors are detected by both processor and systems.
73 struct el_common_EV5_uncorrectable_mcheck {
74 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
75 unsigned long paltemp[24]; /* PAL TEMP REGS. */
76 unsigned long exc_addr; /* Address of excepting instruction*/
77 unsigned long exc_sum; /* Summary of arithmetic traps. */
78 unsigned long exc_mask; /* Exception mask (from exc_sum). */
79 unsigned long pal_base; /* Base address for PALcode. */
80 unsigned long isr; /* Interrupt Status Reg. */
81 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
82 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
84 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
85 <2> Data error in bank 0
86 <3> Data error in bank 1
87 <4> Tag error in bank 0
88 <5> Tag error in bank 1 */
89 unsigned long va; /* Effective VA of fault or miss. */
90 unsigned long mm_stat; /* Holds the reason for D-stream
91 fault or D-cache parity errors */
92 unsigned long sc_addr; /* Address that was being accessed
93 when EV5 detected Secondary cache
95 unsigned long sc_stat; /* Helps determine if the error was
96 TAG/Data parity(Secondary Cache)*/
97 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
98 unsigned long ei_addr; /* Physical address of any transfer
99 that is logged in EV5 EI_STAT */
100 unsigned long fill_syndrome; /* For correcting ECC errors. */
101 unsigned long ei_stat; /* Helps identify reason of any
102 processor uncorrectable error
103 at its external interface. */
104 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
107 struct el_common_EV6_mcheck {
108 unsigned int FrameSize; /* Bytes, including this field */
109 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
110 unsigned int CpuOffset; /* Offset to CPU-specific info */
111 unsigned int SystemOffset; /* Offset to system-specific info */
112 unsigned int MCHK_Code;
113 unsigned int MCHK_Frame_Rev;
114 unsigned long I_STAT; /* EV6 Internal Processor Registers */
115 unsigned long DC_STAT; /* (See the 21264 Spec) */
116 unsigned long C_ADDR;
117 unsigned long DC1_SYNDROME;
118 unsigned long DC0_SYNDROME;
119 unsigned long C_STAT;
121 unsigned long MM_STAT;
122 unsigned long EXC_ADDR;
123 unsigned long IER_CM;
125 unsigned long RESERVED0;
126 unsigned long PAL_BASE;
131 extern void halt(void) __attribute__((noreturn));
132 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
134 #define switch_to(P,N,L) \
136 (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \
137 check_mmu_context(); \
141 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
144 __asm__ __volatile__("mb": : :"memory")
147 __asm__ __volatile__("mb": : :"memory")
150 __asm__ __volatile__("wmb": : :"memory")
152 #define read_barrier_depends() \
153 __asm__ __volatile__("mb": : :"memory")
156 #define smp_mb() mb()
157 #define smp_rmb() rmb()
158 #define smp_wmb() wmb()
159 #define smp_read_barrier_depends() read_barrier_depends()
161 #define smp_mb() barrier()
162 #define smp_rmb() barrier()
163 #define smp_wmb() barrier()
164 #define smp_read_barrier_depends() barrier()
167 #define set_mb(var, value) \
168 do { var = value; mb(); } while (0)
170 #define set_wmb(var, value) \
171 do { var = value; wmb(); } while (0)
174 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
177 __asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
185 #ifdef CONFIG_ALPHA_GENERIC
187 ({ unsigned long __implver; \
188 __asm__ ("implver %0" : "=r"(__implver)); \
189 (enum implver_enum) __implver; })
191 /* Try to eliminate some dead code. */
192 #ifdef CONFIG_ALPHA_EV4
193 #define implver() IMPLVER_EV4
195 #ifdef CONFIG_ALPHA_EV5
196 #define implver() IMPLVER_EV5
198 #if defined(CONFIG_ALPHA_EV6)
199 #define implver() IMPLVER_EV6
204 AMASK_BWX = (1UL << 0),
205 AMASK_FIX = (1UL << 1),
206 AMASK_CIX = (1UL << 2),
207 AMASK_MAX = (1UL << 8),
208 AMASK_PRECISE_TRAP = (1UL << 9),
211 #define amask(mask) \
212 ({ unsigned long __amask, __input = (mask); \
213 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
216 #define __CALL_PAL_R0(NAME, TYPE) \
217 static inline TYPE NAME(void) \
219 register TYPE __r0 __asm__("$0"); \
220 __asm__ __volatile__( \
221 "call_pal %1 # " #NAME \
223 :"i" (PAL_ ## NAME) \
224 :"$1", "$16", "$22", "$23", "$24", "$25"); \
228 #define __CALL_PAL_W1(NAME, TYPE0) \
229 static inline void NAME(TYPE0 arg0) \
231 register TYPE0 __r16 __asm__("$16") = arg0; \
232 __asm__ __volatile__( \
233 "call_pal %1 # "#NAME \
235 : "i"(PAL_ ## NAME), "0"(__r16) \
236 : "$1", "$22", "$23", "$24", "$25"); \
239 #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
240 static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
242 register TYPE0 __r16 __asm__("$16") = arg0; \
243 register TYPE1 __r17 __asm__("$17") = arg1; \
244 __asm__ __volatile__( \
245 "call_pal %2 # "#NAME \
246 : "=r"(__r16), "=r"(__r17) \
247 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
248 : "$1", "$22", "$23", "$24", "$25"); \
251 #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
252 static inline RTYPE NAME(TYPE0 arg0) \
254 register RTYPE __r0 __asm__("$0"); \
255 register TYPE0 __r16 __asm__("$16") = arg0; \
256 __asm__ __volatile__( \
257 "call_pal %2 # "#NAME \
258 : "=r"(__r16), "=r"(__r0) \
259 : "i"(PAL_ ## NAME), "0"(__r16) \
260 : "$1", "$22", "$23", "$24", "$25"); \
264 #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
265 static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
267 register RTYPE __r0 __asm__("$0"); \
268 register TYPE0 __r16 __asm__("$16") = arg0; \
269 register TYPE1 __r17 __asm__("$17") = arg1; \
270 __asm__ __volatile__( \
271 "call_pal %3 # "#NAME \
272 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \
273 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
274 : "$1", "$22", "$23", "$24", "$25"); \
278 __CALL_PAL_W1(cflush, unsigned long);
279 __CALL_PAL_R0(rdmces, unsigned long);
280 __CALL_PAL_R0(rdps, unsigned long);
281 __CALL_PAL_R0(rdusp, unsigned long);
282 __CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
283 __CALL_PAL_R0(whami, unsigned long);
284 __CALL_PAL_W2(wrent, void*, unsigned long);
285 __CALL_PAL_W1(wripir, unsigned long);
286 __CALL_PAL_W1(wrkgp, unsigned long);
287 __CALL_PAL_W1(wrmces, unsigned long);
288 __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
289 __CALL_PAL_W1(wrusp, unsigned long);
290 __CALL_PAL_W1(wrvptptr, unsigned long);
299 #define IPL_POWERFAIL 6
303 #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
305 #define IPL_MIN __min_ipl
306 extern int __min_ipl;
309 #define getipl() (rdps() & 7)
310 #define setipl(ipl) ((void) swpipl(ipl))
312 #define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0)
313 #define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0)
314 #define local_save_flags(flags) ((flags) = rdps())
315 #define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
316 #define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0)
318 #define irqs_disabled() (getipl() == IPL_MAX)
323 #define __tbi(nr,arg,arg1...) \
325 register unsigned long __r16 __asm__("$16") = (nr); \
326 register unsigned long __r17 __asm__("$17"); arg; \
327 __asm__ __volatile__( \
328 "call_pal %3 #__tbi" \
329 :"=r" (__r16),"=r" (__r17) \
330 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \
331 :"$0", "$1", "$22", "$23", "$24", "$25"); \
334 #define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
335 #define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
336 #define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
337 #define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
338 #define tbiap() __tbi(-1, /* no second argument */)
339 #define tbia() __tbi(-2, /* no second argument */)
343 * Since it can be used to implement critical sections
344 * it must clobber "memory" (also for interrupts in UP).
347 static inline unsigned long
348 __xchg_u8(volatile char *m, unsigned long val)
350 unsigned long ret, tmp, addr64;
352 __asm__ __volatile__(
355 "1: ldq_l %2,0(%3)\n"
367 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
368 : "r" ((long)m), "1" (val) : "memory");
373 static inline unsigned long
374 __xchg_u16(volatile short *m, unsigned long val)
376 unsigned long ret, tmp, addr64;
378 __asm__ __volatile__(
381 "1: ldq_l %2,0(%3)\n"
393 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
394 : "r" ((long)m), "1" (val) : "memory");
399 static inline unsigned long
400 __xchg_u32(volatile int *m, unsigned long val)
404 __asm__ __volatile__(
415 : "=&r" (val), "=&r" (dummy), "=m" (*m)
416 : "rI" (val), "m" (*m) : "memory");
421 static inline unsigned long
422 __xchg_u64(volatile long *m, unsigned long val)
426 __asm__ __volatile__(
437 : "=&r" (val), "=&r" (dummy), "=m" (*m)
438 : "rI" (val), "m" (*m) : "memory");
443 /* This function doesn't exist, so you'll get a linker error
444 if something tries to do an invalid xchg(). */
445 extern void __xchg_called_with_bad_pointer(void);
447 static inline unsigned long
448 __xchg(volatile void *ptr, unsigned long x, int size)
452 return __xchg_u8(ptr, x);
454 return __xchg_u16(ptr, x);
456 return __xchg_u32(ptr, x);
458 return __xchg_u64(ptr, x);
460 __xchg_called_with_bad_pointer();
464 #define xchg(ptr,x) \
466 __typeof__(*(ptr)) _x_ = (x); \
467 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
470 #define tas(ptr) (xchg((ptr),1))
474 * Atomic compare and exchange. Compare OLD with MEM, if identical,
475 * store NEW in MEM. Return the initial value in MEM. Success is
476 * indicated by comparing RETURN with OLD.
478 * The memory barrier should be placed in SMP only when we actually
479 * make the change. If we don't change anything (so if the returned
480 * prev is equal to old) then we aren't acquiring anything new and
481 * we don't need any memory barrier as far I can tell.
484 #define __HAVE_ARCH_CMPXCHG 1
486 static inline unsigned long
487 __cmpxchg_u8(volatile char *m, long old, long new)
489 unsigned long prev, tmp, cmp, addr64;
491 __asm__ __volatile__(
494 "1: ldq_l %2,0(%4)\n"
509 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
510 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
515 static inline unsigned long
516 __cmpxchg_u16(volatile short *m, long old, long new)
518 unsigned long prev, tmp, cmp, addr64;
520 __asm__ __volatile__(
523 "1: ldq_l %2,0(%4)\n"
538 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
539 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
544 static inline unsigned long
545 __cmpxchg_u32(volatile int *m, int old, int new)
547 unsigned long prev, cmp;
549 __asm__ __volatile__(
563 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
564 : "r"((long) old), "r"(new), "m"(*m) : "memory");
569 static inline unsigned long
570 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
572 unsigned long prev, cmp;
574 __asm__ __volatile__(
588 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
589 : "r"((long) old), "r"(new), "m"(*m) : "memory");
594 /* This function doesn't exist, so you'll get a linker error
595 if something tries to do an invalid cmpxchg(). */
596 extern void __cmpxchg_called_with_bad_pointer(void);
598 static inline unsigned long
599 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
603 return __cmpxchg_u8(ptr, old, new);
605 return __cmpxchg_u16(ptr, old, new);
607 return __cmpxchg_u32(ptr, old, new);
609 return __cmpxchg_u64(ptr, old, new);
611 __cmpxchg_called_with_bad_pointer();
615 #define cmpxchg(ptr,o,n) \
617 __typeof__(*(ptr)) _o_ = (o); \
618 __typeof__(*(ptr)) _n_ = (n); \
619 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
620 (unsigned long)_n_, sizeof(*(ptr))); \
623 #endif /* __ASSEMBLY__ */