1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/highmem.h>
19 #include <linux/pagemap.h>
20 #include <linux/seq_file.h>
21 #include <linux/kthread.h>
22 #include <asm/hypervisor.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
28 #include <asm/hypervisor.h>
29 #include <xen/public/privcmd.h>
30 #include <xen/interface/xen.h>
31 #include <xen/interface/dom0_ops.h>
32 #include <xen/xen_proc.h>
34 static struct proc_dir_entry *privcmd_intf;
35 static struct proc_dir_entry *capabilities_intf;
37 #ifndef HAVE_ARCH_PRIVCMD_MMAP
38 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
41 static int privcmd_ioctl(struct inode *inode, struct file *file,
42 unsigned int cmd, unsigned long data)
45 void __user *udata = (void __user *) data;
48 case IOCTL_PRIVCMD_HYPERCALL: {
49 privcmd_hypercall_t hypercall;
51 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
55 __asm__ __volatile__ (
56 "pushl %%ebx; pushl %%ecx; pushl %%edx; "
57 "pushl %%esi; pushl %%edi; "
58 "movl 8(%%eax),%%ebx ;"
59 "movl 16(%%eax),%%ecx ;"
60 "movl 24(%%eax),%%edx ;"
61 "movl 32(%%eax),%%esi ;"
62 "movl 40(%%eax),%%edi ;"
63 "movl (%%eax),%%eax ;"
65 "addl $hypercall_page,%%eax ;"
67 "popl %%edi; popl %%esi; popl %%edx; "
68 "popl %%ecx; popl %%ebx"
69 : "=a" (ret) : "0" (&hypercall) : "memory" );
70 #elif defined (__x86_64__)
72 long ign1, ign2, ign3;
73 __asm__ __volatile__ (
74 "movq %8,%%r10; movq %9,%%r8;"
76 "addq $hypercall_page,%%rax ;"
78 : "=a" (ret), "=D" (ign1),
79 "=S" (ign2), "=d" (ign3)
80 : "0" ((unsigned long)hypercall.op),
81 "1" ((unsigned long)hypercall.arg[0]),
82 "2" ((unsigned long)hypercall.arg[1]),
83 "3" ((unsigned long)hypercall.arg[2]),
84 "g" ((unsigned long)hypercall.arg[3]),
85 "g" ((unsigned long)hypercall.arg[4])
86 : "r8", "r10", "memory" );
88 #elif defined (__ia64__)
89 __asm__ __volatile__ (
90 ";; mov r14=%2; mov r15=%3; "
91 "mov r16=%4; mov r17=%5; mov r18=%6;"
92 "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
95 "r" (hypercall.arg[0]),
96 "r" (hypercall.arg[1]),
97 "r" (hypercall.arg[2]),
98 "r" (hypercall.arg[3]),
99 "r" (hypercall.arg[4])
100 : "r14","r15","r16","r17","r18","r2","r8","memory");
105 case IOCTL_PRIVCMD_MMAP: {
106 privcmd_mmap_t mmapcmd;
107 privcmd_mmap_entry_t msg;
108 privcmd_mmap_entry_t __user *p;
109 struct mm_struct *mm = current->mm;
110 struct vm_area_struct *vma;
114 if (!is_initial_xendomain())
117 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
121 if (copy_from_user(&msg, p, sizeof(msg)))
124 down_read(&mm->mmap_sem);
126 vma = find_vma(mm, msg.va);
128 if (!vma || (msg.va != vma->vm_start) ||
129 !privcmd_enforce_singleshot_mapping(vma))
134 for (i = 0; i < mmapcmd.num; i++) {
136 if (copy_from_user(&msg, p, sizeof(msg)))
139 /* Do not allow range to wrap the address space. */
141 if ((msg.npages > (LONG_MAX >> PAGE_SHIFT)) ||
142 ((unsigned long)(msg.npages << PAGE_SHIFT) >= -va))
145 /* Range chunks must be contiguous in va space. */
146 if ((msg.va != va) ||
147 ((msg.va+(msg.npages<<PAGE_SHIFT)) > vma->vm_end))
150 if ((rc = direct_remap_pfn_range(
154 msg.npages << PAGE_SHIFT,
160 va += msg.npages << PAGE_SHIFT;
166 up_read(&mm->mmap_sem);
171 case IOCTL_PRIVCMD_MMAPBATCH: {
172 privcmd_mmapbatch_t m;
173 struct mm_struct *mm = current->mm;
174 struct vm_area_struct *vma;
176 unsigned long addr, mfn;
179 if (!is_initial_xendomain())
182 if (copy_from_user(&m, udata, sizeof(m)))
185 if ((m.num <= 0) || (m.num > (LONG_MAX >> PAGE_SHIFT)))
188 down_read(&mm->mmap_sem);
190 vma = find_vma(mm, m.addr);
192 (m.addr != vma->vm_start) ||
193 ((m.addr + ((unsigned long)m.num<<PAGE_SHIFT)) !=
195 !privcmd_enforce_singleshot_mapping(vma)) {
196 up_read(&mm->mmap_sem);
202 for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
203 if (get_user(mfn, p)) {
204 up_read(&mm->mmap_sem);
208 ret = direct_remap_pfn_range(vma, addr & PAGE_MASK,
210 vma->vm_page_prot, m.dom);
212 put_user(0xF0000000 | mfn, p);
215 up_read(&mm->mmap_sem);
228 #ifndef HAVE_ARCH_PRIVCMD_MMAP
229 static struct page *privcmd_nopage(struct vm_area_struct *vma,
230 unsigned long address,
233 return NOPAGE_SIGBUS;
236 static struct vm_operations_struct privcmd_vm_ops = {
237 .nopage = privcmd_nopage
240 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
242 /* Unsupported for auto-translate guests. */
243 if (xen_feature(XENFEAT_auto_translated_physmap))
246 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
247 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
248 vma->vm_ops = &privcmd_vm_ops;
249 vma->vm_private_data = NULL;
254 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
256 return (xchg(&vma->vm_private_data, (void *)1) == NULL);
260 static struct file_operations privcmd_file_ops = {
261 .ioctl = privcmd_ioctl,
262 .mmap = privcmd_mmap,
265 static int capabilities_read(char *page, char **start, off_t off,
266 int count, int *eof, void *data)
271 if (is_initial_xendomain())
272 len = sprintf( page, "control_d\n" );
278 static int __init privcmd_init(void)
280 if (!is_running_on_xen())
283 privcmd_intf = create_xen_proc_entry("privcmd", 0400);
284 if (privcmd_intf != NULL)
285 privcmd_intf->proc_fops = &privcmd_file_ops;
287 capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
288 if (capabilities_intf != NULL)
289 capabilities_intf->read_proc = capabilities_read;
294 __initcall(privcmd_init);