4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
18 #include <linux/vs_base.h>
19 #include <linux/vs_memory.h>
21 #include <asm/mmu_context.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
25 static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
26 unsigned long addr, pte_t *ptep)
29 struct page *page = NULL;
31 if (pte_present(pte)) {
32 flush_cache_page(vma, addr, pte_pfn(pte));
33 pte = ptep_clear_flush(vma, addr, ptep);
34 page = vm_normal_page(vma, addr, pte);
38 page_remove_rmap(page, vma);
39 page_cache_release(page);
43 free_swap_and_cache(pte_to_swp_entry(pte));
44 pte_clear(mm, addr, ptep);
50 * Install a file page to a given virtual memory address, release any
51 * previously existing mapping.
53 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
54 unsigned long addr, struct page *page, pgprot_t prot)
63 pte = get_locked_pte(mm, addr, &ptl);
68 * This page may have been truncated. Tell the
73 inode = vma->vm_file->f_mapping->host;
74 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
75 if (!page->mapping || page->index >= size)
78 if (page_mapcount(page) > INT_MAX/2)
82 if (!vx_rsspages_avail(mm, 1))
85 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
86 inc_mm_counter(mm, file_rss);
88 flush_icache_page(vma, page);
89 pte_val = mk_pte(page, prot);
90 set_pte_at(mm, addr, pte, pte_val);
91 page_add_file_rmap(page);
92 update_mmu_cache(vma, addr, pte_val);
93 lazy_mmu_prot_update(pte_val);
96 pte_unmap_unlock(pte, ptl);
100 EXPORT_SYMBOL(install_page);
103 * Install a file pte to a given virtual memory address, release any
104 * previously existing mapping.
106 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
107 unsigned long addr, unsigned long pgoff, pgprot_t prot)
114 pte = get_locked_pte(mm, addr, &ptl);
118 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
119 update_hiwater_rss(mm);
120 dec_mm_counter(mm, file_rss);
123 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
126 * We don't need to run update_mmu_cache() here because the "file pte"
127 * being installed by install_file_pte() is not a real pte - it's a
128 * non-present entry (like a swap entry), noting what file offset should
129 * be mapped there when there's a fault (in a non-linear vma where
130 * that's not obvious).
132 pte_unmap_unlock(pte, ptl);
139 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
140 * file within an existing vma.
141 * @start: start of the remapped virtual memory range
142 * @size: size of the remapped virtual memory range
143 * @prot: new protection bits of the range
144 * @pgoff: to be mapped page of the backing store file
145 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
147 * this syscall works purely via pagetables, so it's the most efficient
148 * way to map the same (large) file into a given virtual window. Unlike
149 * mmap()/mremap() it does not create any new vmas. The new mappings are
150 * also safe across swapout.
152 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
153 * protection is used. Arbitrary protections might be implemented in the
156 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
157 unsigned long __prot, unsigned long pgoff, unsigned long flags)
159 struct mm_struct *mm = current->mm;
160 struct address_space *mapping;
161 unsigned long end = start + size;
162 struct vm_area_struct *vma;
164 int has_write_lock = 0;
169 * Sanitize the syscall parameters:
171 start = start & PAGE_MASK;
172 size = size & PAGE_MASK;
174 /* Does the address range wrap, or is the span zero-sized? */
175 if (start + size <= start)
178 /* Can we represent this offset inside this architecture's pte's? */
179 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
180 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
184 /* We need down_write() to change vma->vm_flags. */
185 down_read(&mm->mmap_sem);
187 vma = find_vma(mm, start);
190 * Make sure the vma is shared, that it supports prefaulting,
191 * and that the remapped range is valid and fully within
192 * the single existing vma. vm_private_data is used as a
193 * swapout cursor in a VM_NONLINEAR vma.
195 if (vma && (vma->vm_flags & VM_SHARED) &&
196 (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) &&
197 vma->vm_ops && vma->vm_ops->populate &&
198 end > start && start >= vma->vm_start &&
199 end <= vma->vm_end) {
201 /* Must set VM_NONLINEAR before any pages are populated. */
202 if (pgoff != linear_page_index(vma, start) &&
203 !(vma->vm_flags & VM_NONLINEAR)) {
204 if (!has_write_lock) {
205 up_read(&mm->mmap_sem);
206 down_write(&mm->mmap_sem);
210 mapping = vma->vm_file->f_mapping;
211 spin_lock(&mapping->i_mmap_lock);
212 flush_dcache_mmap_lock(mapping);
213 vma->vm_flags |= VM_NONLINEAR;
214 vma_prio_tree_remove(vma, &mapping->i_mmap);
215 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
216 flush_dcache_mmap_unlock(mapping);
217 spin_unlock(&mapping->i_mmap_lock);
220 err = vma->vm_ops->populate(vma, start, size,
222 pgoff, flags & MAP_NONBLOCK);
225 * We can't clear VM_NONLINEAR because we'd have to do
226 * it after ->populate completes, and that would prevent
227 * downgrading the lock. (Locks can't be upgraded).
230 if (likely(!has_write_lock))
231 up_read(&mm->mmap_sem);
233 up_write(&mm->mmap_sem);