4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
23 unsigned long addr, pte_t *ptep)
29 if (pte_present(pte)) {
30 unsigned long pfn = pte_pfn(pte);
32 flush_cache_page(vma, addr);
33 pte = ptep_clear_flush(vma, addr, ptep);
35 struct page *page = pfn_to_page(pfn);
36 if (!PageReserved(page)) {
39 page_remove_rmap(page);
40 page_cache_release(page);
46 free_swap_and_cache(pte_to_swp_entry(pte));
52 * Install a file page to a given virtual memory address, release any
53 * previously existing mapping.
55 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
56 unsigned long addr, struct page *page, pgprot_t prot)
65 * We use page_add_file_rmap below: if install_page is
66 * ever extended to anonymous pages, this will warn us.
68 BUG_ON(!page_mapping(page));
70 pgd = pgd_offset(mm, addr);
71 spin_lock(&mm->page_table_lock);
73 pmd = pmd_alloc(mm, pgd, addr);
77 pte = pte_alloc_map(mm, pmd, addr);
81 zap_pte(mm, vma, addr, pte);
84 flush_icache_page(vma, page);
85 set_pte(pte, mk_pte(page, prot));
86 page_add_file_rmap(page);
89 update_mmu_cache(vma, addr, pte_val);
93 spin_unlock(&mm->page_table_lock);
96 EXPORT_SYMBOL(install_page);
100 * Install a file pte to a given virtual memory address, release any
101 * previously existing mapping.
103 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
104 unsigned long addr, unsigned long pgoff, pgprot_t prot)
112 pgd = pgd_offset(mm, addr);
113 spin_lock(&mm->page_table_lock);
115 pmd = pmd_alloc(mm, pgd, addr);
119 pte = pte_alloc_map(mm, pmd, addr);
123 zap_pte(mm, vma, addr, pte);
125 set_pte(pte, pgoff_to_pte(pgoff));
128 update_mmu_cache(vma, addr, pte_val);
129 spin_unlock(&mm->page_table_lock);
133 spin_unlock(&mm->page_table_lock);
139 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
140 * file within an existing vma.
141 * @start: start of the remapped virtual memory range
142 * @size: size of the remapped virtual memory range
143 * @prot: new protection bits of the range
144 * @pgoff: to be mapped page of the backing store file
145 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
147 * this syscall works purely via pagetables, so it's the most efficient
148 * way to map the same (large) file into a given virtual window. Unlike
149 * mmap()/mremap() it does not create any new vmas. The new mappings are
150 * also safe across swapout.
152 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
153 * protection is used. Arbitrary protections might be implemented in the
156 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
157 unsigned long __prot, unsigned long pgoff, unsigned long flags)
159 struct mm_struct *mm = current->mm;
160 struct address_space *mapping;
161 unsigned long end = start + size;
162 struct vm_area_struct *vma;
168 * Sanitize the syscall parameters:
170 start = start & PAGE_MASK;
171 size = size & PAGE_MASK;
173 /* Does the address range wrap, or is the span zero-sized? */
174 if (start + size <= start)
177 /* Can we represent this offset inside this architecture's pte's? */
178 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
179 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
183 /* We need down_write() to change vma->vm_flags. */
184 down_write(&mm->mmap_sem);
185 vma = find_vma(mm, start);
188 * Make sure the vma is shared, that it supports prefaulting,
189 * and that the remapped range is valid and fully within
190 * the single existing vma. vm_private_data is used as a
191 * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
192 * or VM_LOCKED, but VM_LOCKED could be revoked later on).
194 if (vma && (vma->vm_flags & VM_SHARED) &&
195 (!vma->vm_private_data || (vma->vm_flags & VM_RESERVED)) &&
196 vma->vm_ops && vma->vm_ops->populate &&
197 end > start && start >= vma->vm_start &&
198 end <= vma->vm_end) {
200 /* Must set VM_NONLINEAR before any pages are populated. */
201 if (pgoff != linear_page_index(vma, start) &&
202 !(vma->vm_flags & VM_NONLINEAR)) {
203 mapping = vma->vm_file->f_mapping;
204 spin_lock(&mapping->i_mmap_lock);
205 flush_dcache_mmap_lock(mapping);
206 vma->vm_flags |= VM_NONLINEAR;
207 vma_prio_tree_remove(vma, &mapping->i_mmap);
208 vma_prio_tree_init(vma);
209 list_add_tail(&vma->shared.vm_set.list,
210 &mapping->i_mmap_nonlinear);
211 flush_dcache_mmap_unlock(mapping);
212 spin_unlock(&mapping->i_mmap_lock);
215 /* ->populate can take a long time, so downgrade the lock. */
216 downgrade_write(&mm->mmap_sem);
217 err = vma->vm_ops->populate(vma, start, size,
219 pgoff, flags & MAP_NONBLOCK);
222 * We can't clear VM_NONLINEAR because we'd have to do
223 * it after ->populate completes, and that would prevent
224 * downgrading the lock. (Locks can't be upgraded).
226 up_read(&mm->mmap_sem);
228 up_write(&mm->mmap_sem);