static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
- return 1;
+ return 0;
}
__setup("norandmaps", disable_randmaps);
anon_vma_unlink(vma);
unlink_file_vma(vma);
- if (is_vm_hugetlb_page(vma)) {
+ if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
} else {
* Optimization: gather nearby vmas into one call down
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
- && !is_vm_hugetlb_page(next)) {
+ && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
+ HPAGE_SIZE)) {
vma = next;
next = vma->vm_next;
anon_vma_unlink(vma);
{
unsigned long pfn = pte_pfn(pte);
- if (unlikely(vma->vm_flags & VM_PFNMAP)) {
+ if (vma->vm_flags & VM_PFNMAP) {
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
* we should just do "return pfn_to_page(pfn)", but
* in the meantime we check that we get a valid pfn,
* and that the resulting page looks ok.
+ *
+ * Remove this test eventually!
*/
if (unlikely(!pfn_valid(pfn))) {
- if (!(vma->vm_flags & VM_RESERVED))
- print_bad_pte(vma, pte, addr);
+ print_bad_pte(vma, pte, addr);
return NULL;
}
continue;
}
-#ifdef CONFIG_XEN
- if (vma && (vma->vm_flags & VM_FOREIGN)) {
- struct page **map = vma->vm_private_data;
- int offset = (start - vma->vm_start) >> PAGE_SHIFT;
- if (map[offset] != NULL) {
- if (pages) {
- struct page *page = map[offset];
-
- pages[i] = page;
- get_page(page);
- }
- if (vmas)
- vmas[i] = vma;
- i++;
- start += PAGE_SIZE;
- len--;
- continue;
- }
- }
-#endif
if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
}
if (pages) {
pages[i] = page;
-
- flush_anon_page(page, start);
flush_dcache_page(page);
}
if (vmas)
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
- * (see split_page()).
+ * (which is mainly an issue of doing "set_page_count(page, 1)" for
+ * each sub-page, and then freeing them one by one when you free
+ * them rather than freeing it as a compound page).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
}
EXPORT_SYMBOL(remap_pfn_range);
-#ifdef CONFIG_XEN
-static inline int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data)
-{
- pte_t *pte;
- int err;
- struct page *pmd_page;
- spinlock_t *ptl;
-
- pte = (mm == &init_mm) ?
- pte_alloc_kernel(pmd, addr) :
- pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (!pte)
- return -ENOMEM;
-
- BUG_ON(pmd_huge(*pmd));
-
- pmd_page = pmd_page(*pmd);
-
- do {
- err = fn(pte, pmd_page, addr, data);
- if (err)
- break;
- } while (pte++, addr += PAGE_SIZE, addr != end);
-
- if (mm != &init_mm)
- pte_unmap_unlock(pte-1, ptl);
- return err;
-}
-
-static inline int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data)
-{
- pmd_t *pmd;
- unsigned long next;
- int err;
-
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
- err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
- if (err)
- break;
- } while (pmd++, addr = next, addr != end);
- return err;
-}
-
-static inline int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data)
-{
- pud_t *pud;
- unsigned long next;
- int err;
-
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
- err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
- if (err)
- break;
- } while (pud++, addr = next, addr != end);
- return err;
-}
-
-/*
- * Scan a region of virtual memory, filling in page tables as necessary
- * and calling a provided function on each leaf page table.
- */
-int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
- unsigned long size, pte_fn_t fn, void *data)
-{
- pgd_t *pgd;
- unsigned long next;
- unsigned long end = addr + size;
- int err;
-
- BUG_ON(addr >= end);
- pgd = pgd_offset(mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
- if (err)
- break;
- } while (pgd++, addr = next, addr != end);
- return err;
-}
-EXPORT_SYMBOL_GPL(apply_to_page_range);
-#endif
-
/*
* handle_pte_fault chooses page fault handler according to an entry
* which was read non-atomically. Before making any commitment, on
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
memset(kaddr, 0, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
+ flush_dcache_page(dst);
return;
}
if (!vma)
return -1;
write = (vma->vm_flags & VM_WRITE) != 0;
- BUG_ON(addr >= end);
- BUG_ON(end > vma->vm_end);
+ if (addr >= end)
+ BUG();
+ if (end > vma->vm_end)
+ BUG();
len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
ret = get_user_pages(current, current->mm, addr,
len, write, 0, NULL, NULL);