3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <asm/uaccess.h>
7 #include <xen/driver_util.h>
9 static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
11 /* apply_to_page_range() does all the hard work. */
15 struct vm_struct *alloc_vm_area(unsigned long size)
17 struct vm_struct *area;
19 area = get_vm_area(size, VM_IOREMAP);
24 * This ensures that page tables are constructed for this region
25 * of kernel virtual address space and mapped into init_mm.
27 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
28 area->size, f, NULL)) {
35 EXPORT_SYMBOL_GPL(alloc_vm_area);
37 void free_vm_area(struct vm_struct *area)
39 struct vm_struct *ret;
40 ret = remove_vm_area(area->addr);
44 EXPORT_SYMBOL_GPL(free_vm_area);
46 void lock_vm_area(struct vm_struct *area)
52 * Prevent context switch to a lazy mm that doesn't have this area
53 * mapped into its page tables.
58 * Ensure that the page tables are mapped into the current mm. The
59 * page-fault path will copy the page directory pointers from init_mm.
61 for (i = 0; i < area->size; i += PAGE_SIZE)
62 (void)__get_user(c, (char __user *)area->addr + i);
64 EXPORT_SYMBOL_GPL(lock_vm_area);
66 void unlock_vm_area(struct vm_struct *area)
70 EXPORT_SYMBOL_GPL(unlock_vm_area);