- for ( j = 0; j < n; j++ )
- {
- struct vm_area_struct *vma =
- find_vma( current->mm, msg[j].va );
-
- if ( !vma )
- return -EINVAL;
-
- if ( msg[j].va > PAGE_OFFSET )
- return -EINVAL;
-
- if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
- return -EINVAL;
-
- if ( (rc = direct_remap_area_pages(vma->vm_mm,
- msg[j].va&PAGE_MASK,
- msg[j].mfn<<PAGE_SHIFT,
- msg[j].npages<<PAGE_SHIFT,
- vma->vm_page_prot,
- mmapcmd.dom)) < 0 )
- return rc;
- }
- }
- ret = 0;
- }
- break;
-
- case IOCTL_PRIVCMD_MMAPBATCH:
- {
- mmu_update_t u;
- privcmd_mmapbatch_t m;
- struct vm_area_struct *vma = NULL;
- unsigned long *p, addr;
- unsigned long mfn;
- int i;
-
- if ( copy_from_user(&m, (void *)data, sizeof(m)) )
- { ret = -EFAULT; goto batch_err; }
-
- vma = find_vma( current->mm, m.addr );
-
- if ( !vma )
- { ret = -EINVAL; goto batch_err; }
-
- if ( m.addr > PAGE_OFFSET )
- { ret = -EFAULT; goto batch_err; }
-
- if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
- { ret = -EFAULT; goto batch_err; }
-
- p = m.arr;
- addr = m.addr;
- for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
- {
- if ( get_user(mfn, p) )
- return -EFAULT;
-
- u.val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
-
- __direct_remap_area_pages(vma->vm_mm,
- addr,
- PAGE_SIZE,
- &u);
-
- if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
- put_user(0xF0000000 | mfn, p);
- }
-
- ret = 0;
- break;
-
- batch_err:
- printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
- ret, vma, m.addr, m.num, m.arr,
- vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
- break;
- }
- break;
+ for (j = 0; j < n; j++) {
+ struct vm_area_struct *vma =
+ find_vma( current->mm, msg[j].va );
+
+ if (!vma)
+ return -EINVAL;
+
+ if (msg[j].va > PAGE_OFFSET)
+ return -EINVAL;
+
+ if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
+ > vma->vm_end )
+ return -EINVAL;
+
+ if ((rc = direct_remap_pfn_range(
+ vma,
+ msg[j].va&PAGE_MASK,
+ msg[j].mfn,
+ msg[j].npages<<PAGE_SHIFT,
+ vma->vm_page_prot,
+ mmapcmd.dom)) < 0)
+ return rc;
+ }
+ }
+ ret = 0;
+ }
+ break;
+
+ case IOCTL_PRIVCMD_MMAPBATCH: {
+ privcmd_mmapbatch_t m;
+ struct vm_area_struct *vma = NULL;
+ xen_pfn_t __user *p;
+ unsigned long addr, mfn;
+ int i;
+
+ if (copy_from_user(&m, udata, sizeof(m))) {
+ ret = -EFAULT;
+ goto batch_err;
+ }
+
+ if (m.dom == DOMID_SELF) {
+ ret = -EINVAL;
+ goto batch_err;
+ }
+
+ vma = find_vma(current->mm, m.addr);
+ if (!vma) {
+ ret = -EINVAL;
+ goto batch_err;
+ }
+
+ if (m.addr > PAGE_OFFSET) {
+ ret = -EFAULT;
+ goto batch_err;
+ }
+
+ if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
+ ret = -EFAULT;
+ goto batch_err;
+ }
+
+ p = m.arr;
+ addr = m.addr;
+ for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
+ if (get_user(mfn, p))
+ return -EFAULT;
+
+ ret = direct_remap_pfn_range(vma, addr & PAGE_MASK,
+ mfn, PAGE_SIZE,
+ vma->vm_page_prot, m.dom);
+ if (ret < 0)
+ put_user(0xF0000000 | mfn, p);
+ }
+
+ ret = 0;
+ break;
+
+ batch_err:
+ printk("batch_err ret=%d vma=%p addr=%lx "
+ "num=%d arr=%p %lx-%lx\n",
+ ret, vma, (unsigned long)m.addr, m.num, m.arr,
+ vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
+ break;
+ }
+ break;