1 #ifndef _ASM_IA64_XEN_PRIVOP_H
2 #define _ASM_IA64_XEN_PRIVOP_H
5 * Copyright (C) 2005 Hewlett-Packard Co
6 * Dan Magenheimer <dan.magenheimer@hp.com>
8 * Paravirtualizations of privileged operations for Xen/ia64
13 #include <xen/interface/arch-ia64.h>
15 #define IA64_PARAVIRTUALIZED
19 /* At 1 MB, before per-cpu space but still addressable using addl instead
21 #define XSI_BASE 0xfffffffffff00000
24 /* Address of mapped regs. */
25 #define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
28 #define XEN_HYPER_RFI break HYPERPRIVOP_RFI
29 #define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
30 #define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
31 #define XEN_HYPER_COVER break HYPERPRIVOP_COVER
32 #define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
33 #define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
34 #define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
35 #define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
36 #define XEN_HYPER_GET_TPR break HYPERPRIVOP_GET_TPR
37 #define XEN_HYPER_SET_TPR break HYPERPRIVOP_SET_TPR
38 #define XEN_HYPER_EOI break HYPERPRIVOP_EOI
39 #define XEN_HYPER_SET_ITM break HYPERPRIVOP_SET_ITM
40 #define XEN_HYPER_THASH break HYPERPRIVOP_THASH
41 #define XEN_HYPER_PTC_GA break HYPERPRIVOP_PTC_GA
42 #define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
43 #define XEN_HYPER_GET_RR break HYPERPRIVOP_GET_RR
44 #define XEN_HYPER_SET_RR break HYPERPRIVOP_SET_RR
45 #define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
46 #define XEN_HYPER_FC break HYPERPRIVOP_FC
47 #define XEN_HYPER_GET_CPUID break HYPERPRIVOP_GET_CPUID
48 #define XEN_HYPER_GET_PMD break HYPERPRIVOP_GET_PMD
49 #define XEN_HYPER_GET_EFLAG break HYPERPRIVOP_GET_EFLAG
50 #define XEN_HYPER_SET_EFLAG break HYPERPRIVOP_SET_EFLAG
51 #define XEN_HYPER_RSM_BE break HYPERPRIVOP_RSM_BE
52 #define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
54 #define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
55 #define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
56 #define XSI_INCOMPL_REGFR (XSI_BASE + XSI_INCOMPL_REGFR_OFS)
57 #define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
58 #define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
59 #define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
60 #define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
61 #define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
62 #define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
63 #define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
64 #define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
65 #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
66 #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
67 #define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
71 #define XEN_HYPER_SSM_I asm("break %0" : : "i" (HYPERPRIVOP_SSM_I))
72 #define XEN_HYPER_GET_IVR asm("break %0" : : "i" (HYPERPRIVOP_GET_IVR))
74 /************************************************/
75 /* Instructions paravirtualized for correctness */
76 /************************************************/
78 /* "fc" and "thash" are privilege-sensitive instructions, meaning they
79 * may have different semantics depending on whether they are executed
80 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
81 * be allowed to execute directly, lest incorrect semantics result. */
82 extern unsigned long xen_fc(unsigned long addr);
83 #define ia64_fc(addr) xen_fc((unsigned long)(addr))
84 extern unsigned long xen_thash(unsigned long addr);
85 #define ia64_thash(addr) xen_thash((unsigned long)(addr))
86 /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
87 * is not currently used (though it may be in a long-format VHPT system!)
88 * and the semantics of cover only change if psr.ic is off which is very
89 * rare (and currently non-existent outside of assembly code */
91 /* There are also privilege-sensitive registers. These registers are
92 * readable at any privilege level but only writable at PL0. */
93 extern unsigned long xen_get_cpuid(int index);
94 #define ia64_get_cpuid(i) xen_get_cpuid(i)
95 extern unsigned long xen_get_pmd(int index);
96 #define ia64_get_pmd(i) xen_get_pmd(i)
97 extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
98 extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
100 /************************************************/
101 /* Instructions paravirtualized for performance */
102 /************************************************/
104 /* Xen uses memory-mapped virtual privileged registers for access to many
105 * performance-sensitive privileged registers. Some, like the processor
106 * status register (psr), are broken up into multiple memory locations.
107 * Others, like "pend", are abstractions based on privileged registers.
108 * "Pend" is guaranteed to be set if reading cr.ivr would return a
109 * (non-spurious) interrupt. */
110 #define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
112 (*XEN_MAPPEDREGS->interrupt_mask_addr)
113 #define xen_get_virtual_psr_i() \
115 #define xen_set_virtual_psr_i(_val) \
116 ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
117 #define xen_set_virtual_psr_ic(_val) \
118 ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
119 #define xen_get_virtual_pend() (XEN_MAPPEDREGS->pending_interruption)
121 /* Hyperprivops are "break" instructions with a well-defined API.
122 * In particular, the virtual psr.ic bit must be off; in this way
123 * it is guaranteed to never conflict with a linux break instruction.
124 * Normally, this is done in a xen stub but this one is frequent enough
125 * that we inline it */
126 #define xen_hyper_ssm_i() \
128 xen_set_virtual_psr_i(0); \
129 xen_set_virtual_psr_ic(0); \
133 /* turning off interrupts can be paravirtualized simply by writing
134 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
135 #define xen_rsm_i() xen_set_virtual_psr_i(0)
137 /* turning on interrupts is a bit more complicated.. write to the
138 * memory-mapped virtual psr.i bit first (to avoid race condition),
139 * then if any interrupts were pending, we have to execute a hyperprivop
140 * to ensure the pending interrupt gets delivered; else we're done! */
141 #define xen_ssm_i() \
143 int old = xen_get_virtual_psr_i(); \
144 xen_set_virtual_psr_i(1); \
145 if (!old && xen_get_virtual_pend()) xen_hyper_ssm_i(); \
148 #define xen_ia64_intrin_local_irq_restore(x) \
150 if (is_running_on_xen()) { \
151 if ((x) & IA64_PSR_I) { xen_ssm_i(); } \
152 else { xen_rsm_i(); } \
154 else __ia64_intrin_local_irq_restore((x)); \
157 #define xen_get_psr_i() \
159 (is_running_on_xen()) ? \
160 (xen_get_virtual_psr_i() ? IA64_PSR_I : 0) \
161 : __ia64_get_psr_i() \
164 #define xen_ia64_ssm(mask) \
166 if ((mask)==IA64_PSR_I) { \
167 if (is_running_on_xen()) { xen_ssm_i(); } \
168 else { __ia64_ssm(mask); } \
170 else { __ia64_ssm(mask); } \
173 #define xen_ia64_rsm(mask) \
175 if ((mask)==IA64_PSR_I) { \
176 if (is_running_on_xen()) { xen_rsm_i(); } \
177 else { __ia64_rsm(mask); } \
179 else { __ia64_rsm(mask); } \
183 /* Although all privileged operations can be left to trap and will
184 * be properly handled by Xen, some are frequent enough that we use
185 * hyperprivops for performance. */
187 extern unsigned long xen_get_ivr(void);
188 extern unsigned long xen_get_tpr(void);
189 extern void xen_set_itm(unsigned long);
190 extern void xen_set_tpr(unsigned long);
191 extern void xen_eoi(void);
192 extern void xen_set_rr(unsigned long index, unsigned long val);
193 extern unsigned long xen_get_rr(unsigned long index);
194 extern void xen_set_kr(unsigned long index, unsigned long val);
195 extern void xen_ptcga(unsigned long addr, unsigned long size);
197 /* Note: It may look wrong to test for is_running_on_xen() in each case.
198 * However regnum is always a constant so, as written, the compiler
199 * eliminates the switch statement, whereas is_running_on_xen() must be
200 * tested dynamically. */
201 #define xen_ia64_getreg(regnum) \
203 __u64 ia64_intri_res; \
206 case _IA64_REG_CR_IVR: \
207 ia64_intri_res = (is_running_on_xen()) ? \
209 __ia64_getreg(regnum); \
211 case _IA64_REG_CR_TPR: \
212 ia64_intri_res = (is_running_on_xen()) ? \
214 __ia64_getreg(regnum); \
216 case _IA64_REG_AR_EFLAG: \
217 ia64_intri_res = (is_running_on_xen()) ? \
219 __ia64_getreg(regnum); \
222 ia64_intri_res = __ia64_getreg(regnum); \
228 #define xen_ia64_setreg(regnum,val) \
231 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: \
232 (is_running_on_xen()) ? \
233 xen_set_kr((regnum-_IA64_REG_AR_KR0), val) : \
234 __ia64_setreg(regnum,val); \
236 case _IA64_REG_CR_ITM: \
237 (is_running_on_xen()) ? \
239 __ia64_setreg(regnum,val); \
241 case _IA64_REG_CR_TPR: \
242 (is_running_on_xen()) ? \
244 __ia64_setreg(regnum,val); \
246 case _IA64_REG_CR_EOI: \
247 (is_running_on_xen()) ? \
249 __ia64_setreg(regnum,val); \
251 case _IA64_REG_AR_EFLAG: \
252 (is_running_on_xen()) ? \
253 xen_set_eflag(val) : \
254 __ia64_setreg(regnum,val); \
257 __ia64_setreg(regnum,val); \
262 #define ia64_ssm xen_ia64_ssm
263 #define ia64_rsm xen_ia64_rsm
264 #define ia64_intrin_local_irq_restore xen_ia64_intrin_local_irq_restore
265 #define ia64_ptcga xen_ptcga
266 #define ia64_set_rr(index,val) xen_set_rr(index,val)
267 #define ia64_get_rr(index) xen_get_rr(index)
268 #define ia64_getreg xen_ia64_getreg
269 #define ia64_setreg xen_ia64_setreg
270 #define ia64_get_psr_i xen_get_psr_i
272 /* the remainder of these are not performance-sensitive so its
273 * OK to not paravirtualize and just take a privop trap and emulate */
274 #define ia64_hint __ia64_hint
275 #define ia64_set_pmd __ia64_set_pmd
276 #define ia64_itci __ia64_itci
277 #define ia64_itcd __ia64_itcd
278 #define ia64_itri __ia64_itri
279 #define ia64_itrd __ia64_itrd
280 #define ia64_tpa __ia64_tpa
281 #define ia64_set_ibr __ia64_set_ibr
282 #define ia64_set_pkr __ia64_set_pkr
283 #define ia64_set_pmc __ia64_set_pmc
284 #define ia64_get_ibr __ia64_get_ibr
285 #define ia64_get_pkr __ia64_get_pkr
286 #define ia64_get_pmc __ia64_get_pmc
287 #define ia64_ptce __ia64_ptce
288 #define ia64_ptcl __ia64_ptcl
289 #define ia64_ptri __ia64_ptri
290 #define ia64_ptrd __ia64_ptrd
292 #endif /* !__ASSEMBLY__ */
294 /* these routines utilize privilege-sensitive or performance-sensitive
295 * privileged instructions so the code must be replaced with
296 * paravirtualized versions */
297 #define ia64_pal_halt_light xen_pal_halt_light
298 #define ia64_leave_kernel xen_leave_kernel
299 #define ia64_leave_syscall xen_leave_syscall
300 #define ia64_trace_syscall xen_trace_syscall
301 #define ia64_switch_to xen_switch_to
302 #define ia64_pal_call_static xen_pal_call_static
304 #endif /* _ASM_IA64_XEN_PRIVOP_H */