4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
23 unsigned long addr, pte_t *ptep)
29 if (pte_present(pte)) {
30 unsigned long pfn = pte_pfn(pte);
32 flush_cache_page(vma, addr);
33 pte = ptep_clear_flush(vma, addr, ptep);
35 struct page *page = pfn_to_page(pfn);
36 if (!PageReserved(page)) {
39 page_remove_rmap(page, ptep);
40 page_cache_release(page);
46 free_swap_and_cache(pte_to_swp_entry(pte));
52 * Install a page to a given virtual memory address, release any
53 * previously existing mapping.
55 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
56 unsigned long addr, struct page *page, pgprot_t prot)
63 struct pte_chain *pte_chain;
65 pte_chain = pte_chain_alloc(GFP_KERNEL);
68 pgd = pgd_offset(mm, addr);
69 spin_lock(&mm->page_table_lock);
71 pmd = pmd_alloc(mm, pgd, addr);
75 pte = pte_alloc_map(mm, pmd, addr);
79 zap_pte(mm, vma, addr, pte);
82 flush_icache_page(vma, page);
83 set_pte(pte, mk_pte(page, prot));
84 pte_chain = page_add_rmap(page, pte, pte_chain);
87 update_mmu_cache(vma, addr, pte_val);
88 spin_unlock(&mm->page_table_lock);
89 pte_chain_free(pte_chain);
93 spin_unlock(&mm->page_table_lock);
94 pte_chain_free(pte_chain);
98 EXPORT_SYMBOL(install_page);
102 * Install a file pte to a given virtual memory address, release any
103 * previously existing mapping.
105 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
106 unsigned long addr, unsigned long pgoff, pgprot_t prot)
114 pgd = pgd_offset(mm, addr);
115 spin_lock(&mm->page_table_lock);
117 pmd = pmd_alloc(mm, pgd, addr);
121 pte = pte_alloc_map(mm, pmd, addr);
125 zap_pte(mm, vma, addr, pte);
127 set_pte(pte, pgoff_to_pte(pgoff));
130 update_mmu_cache(vma, addr, pte_val);
131 spin_unlock(&mm->page_table_lock);
135 spin_unlock(&mm->page_table_lock);
141 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
142 * file within an existing vma.
143 * @start: start of the remapped virtual memory range
144 * @size: size of the remapped virtual memory range
145 * @prot: new protection bits of the range
146 * @pgoff: to be mapped page of the backing store file
147 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
149 * this syscall works purely via pagetables, so it's the most efficient
150 * way to map the same (large) file into a given virtual window. Unlike
151 * mmap()/mremap() it does not create any new vmas. The new mappings are
152 * also safe across swapout.
154 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
155 * protection is used. Arbitrary protections might be implemented in the
158 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
159 unsigned long __prot, unsigned long pgoff, unsigned long flags)
161 struct mm_struct *mm = current->mm;
162 unsigned long end = start + size;
163 struct vm_area_struct *vma;
169 * Sanitize the syscall parameters:
171 start = start & PAGE_MASK;
172 size = size & PAGE_MASK;
174 /* Does the address range wrap, or is the span zero-sized? */
175 if (start + size <= start)
178 /* Can we represent this offset inside this architecture's pte's? */
179 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
180 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
184 /* We need down_write() to change vma->vm_flags. */
185 down_write(&mm->mmap_sem);
186 vma = find_vma(mm, start);
189 * Make sure the vma is shared, that it supports prefaulting,
190 * and that the remapped range is valid and fully within
191 * the single existing vma:
193 if (vma && (vma->vm_flags & VM_SHARED) &&
194 vma->vm_ops && vma->vm_ops->populate &&
195 end > start && start >= vma->vm_start &&
196 end <= vma->vm_end) {
198 /* Must set VM_NONLINEAR before any pages are populated. */
199 if (pgoff != ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff)
200 vma->vm_flags |= VM_NONLINEAR;
202 /* ->populate can take a long time, so downgrade the lock. */
203 downgrade_write(&mm->mmap_sem);
204 err = vma->vm_ops->populate(vma, start, size,
206 pgoff, flags & MAP_NONBLOCK);
209 * We can't clear VM_NONLINEAR because we'd have to do
210 * it after ->populate completes, and that would prevent
211 * downgrading the lock. (Locks can't be upgraded).
213 up_read(&mm->mmap_sem);
215 up_write(&mm->mmap_sem);