+
+
+static struct page *
+special_mapping_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ struct page **pages;
+
+ BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+
+ address -= vma->vm_start;
+ for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
+ address -= PAGE_SIZE;
+
+ if (*pages) {
+ get_page(*pages);
+ return *pages;
+ }
+
+ return NOPAGE_SIGBUS;
+}
+
+static struct vm_operations_struct special_mapping_vmops = {
+ .nopage = special_mapping_nopage,
+};
+
+unsigned int vdso_populate = 1;
+
+/*
+ * Insert a new vma covering the given region, with the given flags and
+ * protections. Its pages are supplied by the given null-terminated array.
+ * The region past the last page supplied will always produce SIGBUS.
+ * The array pointer and the pages it points to are assumed to stay alive
+ * for as long as this mapping might exist.
+ */
+int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long vm_flags, pgprot_t pgprot,
+ struct page **pages)
+{
+ struct vm_area_struct *vma;
+ int err;
+
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (unlikely(vma == NULL))
+ return -ENOMEM;
+ memset(vma, 0, sizeof(*vma));
+
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+
+ vma->vm_flags = vm_flags;
+ vma->vm_page_prot = pgprot;
+
+ vma->vm_ops = &special_mapping_vmops;
+ vma->vm_private_data = pages;
+
+ insert_vm_struct(mm, vma);
+ mm->total_vm += len >> PAGE_SHIFT;
+
+ if (!vdso_populate)
+ return 0;
+
+ err = 0;
+ while (*pages) {
+ struct page *page = *pages++;
+ get_page(page);
+ err = install_page(mm, vma, addr, page, vma->vm_page_prot);
+ if (err) {
+ put_page(page);
+ break;
+ }
+ addr += PAGE_SIZE;
+ }
+
+ return err;
+}