1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
22 #include <linux/kthread.h>
23 #include <asm/hypervisor.h>
25 #include <asm/pgalloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/uaccess.h>
29 #include <asm/hypervisor.h>
30 #include <xen/public/privcmd.h>
31 #include <xen/interface/xen.h>
32 #include <xen/interface/dom0_ops.h>
33 #include <xen/xen_proc.h>
35 static struct proc_dir_entry *privcmd_intf;
36 static struct proc_dir_entry *capabilities_intf;
38 #define NR_HYPERCALLS 64
39 static DECLARE_BITMAP(hypercall_permission_map, NR_HYPERCALLS);
41 static int privcmd_ioctl(struct inode *inode, struct file *file,
42 unsigned int cmd, unsigned long data)
45 void __user *udata = (void __user *) data;
48 case IOCTL_PRIVCMD_HYPERCALL: {
49 privcmd_hypercall_t hypercall;
51 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
54 /* Check hypercall number for validity. */
55 if (hypercall.op >= NR_HYPERCALLS)
57 if (!test_bit(hypercall.op, hypercall_permission_map))
61 __asm__ __volatile__ (
62 "pushl %%ebx; pushl %%ecx; pushl %%edx; "
63 "pushl %%esi; pushl %%edi; "
64 "movl 8(%%eax),%%ebx ;"
65 "movl 16(%%eax),%%ecx ;"
66 "movl 24(%%eax),%%edx ;"
67 "movl 32(%%eax),%%esi ;"
68 "movl 40(%%eax),%%edi ;"
69 "movl (%%eax),%%eax ;"
71 "addl $hypercall_page,%%eax ;"
73 "popl %%edi; popl %%esi; popl %%edx; "
74 "popl %%ecx; popl %%ebx"
75 : "=a" (ret) : "0" (&hypercall) : "memory" );
76 #elif defined (__x86_64__)
78 long ign1, ign2, ign3;
79 __asm__ __volatile__ (
80 "movq %8,%%r10; movq %9,%%r8;"
82 "addq $hypercall_page,%%rax ;"
84 : "=a" (ret), "=D" (ign1),
85 "=S" (ign2), "=d" (ign3)
86 : "0" ((unsigned long)hypercall.op),
87 "1" ((unsigned long)hypercall.arg[0]),
88 "2" ((unsigned long)hypercall.arg[1]),
89 "3" ((unsigned long)hypercall.arg[2]),
90 "g" ((unsigned long)hypercall.arg[3]),
91 "g" ((unsigned long)hypercall.arg[4])
92 : "r8", "r10", "memory" );
94 #elif defined (__ia64__)
95 __asm__ __volatile__ (
96 ";; mov r14=%2; mov r15=%3; "
97 "mov r16=%4; mov r17=%5; mov r18=%6;"
98 "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
100 : "r" (hypercall.op),
101 "r" (hypercall.arg[0]),
102 "r" (hypercall.arg[1]),
103 "r" (hypercall.arg[2]),
104 "r" (hypercall.arg[3]),
105 "r" (hypercall.arg[4])
106 : "r14","r15","r16","r17","r18","r2","r8","memory");
111 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
112 case IOCTL_PRIVCMD_MMAP: {
113 #define PRIVCMD_MMAP_SZ 32
114 privcmd_mmap_t mmapcmd;
115 privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ];
116 privcmd_mmap_entry_t __user *p;
119 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
124 for (i = 0; i < mmapcmd.num;
125 i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
126 int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
127 PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
129 if (copy_from_user(&msg, p,
130 n*sizeof(privcmd_mmap_entry_t)))
133 for (j = 0; j < n; j++) {
134 struct vm_area_struct *vma =
135 find_vma( current->mm, msg[j].va );
140 if (msg[j].va > PAGE_OFFSET)
143 if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
147 if ((rc = direct_remap_pfn_range(
151 msg[j].npages<<PAGE_SHIFT,
161 case IOCTL_PRIVCMD_MMAPBATCH: {
162 privcmd_mmapbatch_t m;
163 struct vm_area_struct *vma = NULL;
165 unsigned long addr, mfn;
168 if (copy_from_user(&m, udata, sizeof(m))) {
173 if (m.dom == DOMID_SELF) {
178 vma = find_vma(current->mm, m.addr);
184 if (m.addr > PAGE_OFFSET) {
189 if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
196 for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
197 if (get_user(mfn, p))
200 ret = direct_remap_pfn_range(vma, addr & PAGE_MASK,
202 vma->vm_page_prot, m.dom);
204 put_user(0xF0000000 | mfn, p);
211 printk("batch_err ret=%d vma=%p addr=%lx "
212 "num=%d arr=%p %lx-%lx\n",
213 ret, vma, (unsigned long)m.addr, m.num, m.arr,
214 vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
228 #ifndef HAVE_ARCH_PRIVCMD_MMAP
229 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
231 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
232 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
238 static struct file_operations privcmd_file_ops = {
239 .ioctl = privcmd_ioctl,
240 .mmap = privcmd_mmap,
243 static int capabilities_read(char *page, char **start, off_t off,
244 int count, int *eof, void *data)
249 if (xen_start_info->flags & SIF_INITDOMAIN)
250 len = sprintf( page, "control_d\n" );
256 static int __init privcmd_init(void)
258 if (!is_running_on_xen())
261 /* Set of hypercalls that privileged applications may execute. */
262 set_bit(__HYPERVISOR_acm_op, hypercall_permission_map);
263 set_bit(__HYPERVISOR_dom0_op, hypercall_permission_map);
264 set_bit(__HYPERVISOR_event_channel_op, hypercall_permission_map);
265 set_bit(__HYPERVISOR_memory_op, hypercall_permission_map);
266 set_bit(__HYPERVISOR_mmu_update, hypercall_permission_map);
267 set_bit(__HYPERVISOR_mmuext_op, hypercall_permission_map);
268 set_bit(__HYPERVISOR_xen_version, hypercall_permission_map);
269 set_bit(__HYPERVISOR_sched_op, hypercall_permission_map);
270 set_bit(__HYPERVISOR_sched_op_compat, hypercall_permission_map);
271 set_bit(__HYPERVISOR_event_channel_op_compat,
272 hypercall_permission_map);
273 set_bit(__HYPERVISOR_hvm_op, hypercall_permission_map);
275 privcmd_intf = create_xen_proc_entry("privcmd", 0400);
276 if (privcmd_intf != NULL)
277 privcmd_intf->proc_fops = &privcmd_file_ops;
279 capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
280 if (capabilities_intf != NULL)
281 capabilities_intf->read_proc = capabilities_read;
286 __initcall(privcmd_init);