/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(char); DEFINE_XEN_GUEST_HANDLE(int); DEFINE_XEN_GUEST_HANDLE(long); DEFINE_XEN_GUEST_HANDLE(void); typedef unsigned long xen_pfn_t; DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS /* * Virtual addresses beyond this are not modifiable by guest OSes. The * machine->physical mapping table starts at this address, read-only. */ #ifdef CONFIG_X86_PAE #define __HYPERVISOR_VIRT_START 0xF5800000 #define __MACH2PHYS_VIRT_START 0xF5800000 #define __MACH2PHYS_VIRT_END 0xF6800000 #else #define __HYPERVISOR_VIRT_START 0xFC000000 #define __MACH2PHYS_VIRT_START 0xFC000000 #define __MACH2PHYS_VIRT_END 0xFC400000 #endif #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table() */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_HVM_GUEST (1<<1) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_hvm_guest 1 #define VGCF_hvm_guest (1<<_VGCF_hvm_guest) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */