1 /******************************************************************************
4 * Guest OS interface to x86 32-bit Xen.
6 * Copyright (c) 2004, K A Fraser
9 #ifndef __XEN_PUBLIC_ARCH_X86_32_H__
10 #define __XEN_PUBLIC_ARCH_X86_32_H__
13 * Hypercall interface:
14 * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
16 * Access is via hypercall page (set up by guest loader or via a Xen MSR):
17 * call hypercall_page + hypercall-number * 32
18 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
21 #if __XEN_INTERFACE_VERSION__ < 0x00030203
23 * Legacy hypercall interface:
24 * As above, except the entry sequence to the hypervisor is:
25 * mov $hypercall-number*32,%eax ; int $0x82
27 #define TRAP_INSTR "int $0x82"
30 /* Structural guest handles introduced in 0x00030201. */
31 #if __XEN_INTERFACE_VERSION__ >= 0x00030201
32 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
33 typedef struct { type *p; } __guest_handle_ ## name
35 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
36 typedef type * __guest_handle_ ## name
39 #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
40 #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
41 #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
43 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
47 /* Guest handles for primitive C types. */
48 __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
49 __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
50 __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
51 DEFINE_XEN_GUEST_HANDLE(char);
52 DEFINE_XEN_GUEST_HANDLE(int);
53 DEFINE_XEN_GUEST_HANDLE(long);
54 DEFINE_XEN_GUEST_HANDLE(void);
56 typedef unsigned long xen_pfn_t;
57 DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
61 * SEGMENT DESCRIPTOR TABLES
64 * A number of GDT entries are reserved by Xen. These are not situated at the
65 * start of the GDT because some stupid OSes export hard-coded selector values
66 * in their ABI. These hard-coded values are always near the start of the GDT,
67 * so Xen places itself out of the way, at the far end of the GDT.
69 #define FIRST_RESERVED_GDT_PAGE 14
70 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
71 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
74 * These flat segments are in the Xen-private section of every GDT. Since these
75 * are also present in the initial GDT, many OSes will be able to avoid
76 * installing their own GDT.
78 #define FLAT_RING1_CS 0xe019 /* GDT index 259 */
79 #define FLAT_RING1_DS 0xe021 /* GDT index 260 */
80 #define FLAT_RING1_SS 0xe021 /* GDT index 260 */
81 #define FLAT_RING3_CS 0xe02b /* GDT index 261 */
82 #define FLAT_RING3_DS 0xe033 /* GDT index 262 */
83 #define FLAT_RING3_SS 0xe033 /* GDT index 262 */
85 #define FLAT_KERNEL_CS FLAT_RING1_CS
86 #define FLAT_KERNEL_DS FLAT_RING1_DS
87 #define FLAT_KERNEL_SS FLAT_RING1_SS
88 #define FLAT_USER_CS FLAT_RING3_CS
89 #define FLAT_USER_DS FLAT_RING3_DS
90 #define FLAT_USER_SS FLAT_RING3_SS
93 * Virtual addresses beyond this are not modifiable by guest OSes. The
94 * machine->physical mapping table starts at this address, read-only.
97 #define __HYPERVISOR_VIRT_START 0xF5800000
98 #define __MACH2PHYS_VIRT_START 0xF5800000
99 #define __MACH2PHYS_VIRT_END 0xF6800000
101 #define __HYPERVISOR_VIRT_START 0xFC000000
102 #define __MACH2PHYS_VIRT_START 0xFC000000
103 #define __MACH2PHYS_VIRT_END 0xFC400000
106 #ifndef HYPERVISOR_VIRT_START
107 #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
110 #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
111 #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
112 #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
113 #ifndef machine_to_phys_mapping
114 #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
117 /* Maximum number of virtual CPUs in multi-processor guests. */
118 #define MAX_VIRT_CPUS 32
122 typedef unsigned long xen_ulong_t;
125 * Send an array of these to HYPERVISOR_set_trap_table()
127 #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
128 #define TI_GET_IF(_ti) ((_ti)->flags & 4)
129 #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
130 #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
132 uint8_t vector; /* exception vector */
133 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
134 uint16_t cs; /* code selector */
135 unsigned long address; /* code offset */
137 typedef struct trap_info trap_info_t;
138 DEFINE_XEN_GUEST_HANDLE(trap_info_t);
140 struct cpu_user_regs {
148 uint16_t error_code; /* private */
149 uint16_t entry_vector; /* private */
152 uint8_t saved_upcall_mask;
154 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
162 typedef struct cpu_user_regs cpu_user_regs_t;
163 DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
165 typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
168 * The following is all CPU context. Note that the fpu_ctxt block is filled
169 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
171 struct vcpu_guest_context {
172 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
173 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
174 #define VGCF_I387_VALID (1<<0)
175 #define VGCF_HVM_GUEST (1<<1)
176 #define VGCF_IN_KERNEL (1<<2)
177 #define _VGCF_i387_valid 0
178 #define VGCF_i387_valid (1<<_VGCF_i387_valid)
179 #define _VGCF_hvm_guest 1
180 #define VGCF_hvm_guest (1<<_VGCF_hvm_guest)
181 #define _VGCF_in_kernel 2
182 #define VGCF_in_kernel (1<<_VGCF_in_kernel)
183 #define _VGCF_failsafe_disables_events 3
184 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
185 unsigned long flags; /* VGCF_* flags */
186 struct cpu_user_regs user_regs; /* User-level CPU registers */
187 struct trap_info trap_ctxt[256]; /* Virtual IDT */
188 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
189 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
190 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
191 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
192 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
193 unsigned long event_callback_cs; /* CS:EIP of event callback */
194 unsigned long event_callback_eip;
195 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
196 unsigned long failsafe_callback_eip;
197 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
199 typedef struct vcpu_guest_context vcpu_guest_context_t;
200 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
203 * Page-directory addresses above 4GB do not fit into architectural %cr3.
204 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
205 * must use the following accessor macros to pack/unpack valid MFNs.
207 #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
208 #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
210 struct arch_shared_info {
211 unsigned long max_pfn; /* max pfn that appears in table */
212 /* Frame containing list of mfns containing list of mfns containing p2m. */
213 xen_pfn_t pfn_to_mfn_frame_list_list;
214 unsigned long nmi_reason;
217 typedef struct arch_shared_info arch_shared_info_t;
219 struct arch_vcpu_info {
221 unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
223 typedef struct arch_vcpu_info arch_vcpu_info_t;
225 struct xen_callback {
229 typedef struct xen_callback xen_callback_t;
231 #endif /* !__ASSEMBLY__ */
234 * Prefix forces emulation of some non-trapping instructions.
235 * Currently only CPUID.
238 #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
239 #define XEN_CPUID XEN_EMULATE_PREFIX cpuid
241 #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
242 #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
253 * indent-tabs-mode: nil