#include <linux/swapops.h>
#include <linux/rmap.h>
#include <linux/module.h>
+#include <linux/syscalls.h>
+#include <linux/vs_base.h>
+#include <linux/vs_memory.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
+ struct page *page = NULL;
- if (pte_none(pte))
- return;
if (pte_present(pte)) {
- unsigned long pfn = pte_pfn(pte);
-
- flush_cache_page(vma, addr);
+ flush_cache_page(vma, addr, pte_pfn(pte));
pte = ptep_clear_flush(vma, addr, ptep);
- if (pfn_valid(pfn)) {
- struct page *page = pfn_to_page(pfn);
- if (!PageReserved(page)) {
- if (pte_dirty(pte))
- set_page_dirty(page);
- page_remove_rmap(page);
- page_cache_release(page);
- mm->rss--;
- }
+ page = vm_normal_page(vma, addr, pte);
+ if (page) {
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+ page_remove_rmap(page);
+ page_cache_release(page);
}
} else {
if (!pte_file(pte))
free_swap_and_cache(pte_to_swp_entry(pte));
- pte_clear(ptep);
+ pte_clear(mm, addr, ptep);
}
+ return !!page;
}
/*
int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, struct page *page, pgprot_t prot)
{
+ struct inode *inode;
+ pgoff_t size;
int err = -ENOMEM;
pte_t *pte;
- pgd_t *pgd;
- pmd_t *pmd;
pte_t pte_val;
+ spinlock_t *ptl;
- pgd = pgd_offset(mm, addr);
- spin_lock(&mm->page_table_lock);
-
- pmd = pmd_alloc(mm, pgd, addr);
- if (!pmd)
- goto err_unlock;
-
- pte = pte_alloc_map(mm, pmd, addr);
+ pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
- goto err_unlock;
+ goto out;
- zap_pte(mm, vma, addr, pte);
+ /*
+ * This page may have been truncated. Tell the
+ * caller about it.
+ */
+ err = -EINVAL;
+ inode = vma->vm_file->f_mapping->host;
+ size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (!page->mapping || page->index >= size)
+ goto unlock;
+ err = -ENOMEM;
+ if (page_mapcount(page) > INT_MAX/2)
+ goto unlock;
+ if (!vx_rsspages_avail(mm, 1))
+ goto unlock;
+
+ if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
+ inc_mm_counter(mm, file_rss);
- mm->rss++;
flush_icache_page(vma, page);
- set_pte(pte, mk_pte(page, prot));
+ set_pte_at(mm, addr, pte, mk_pte(page, prot));
page_add_file_rmap(page);
pte_val = *pte;
- pte_unmap(pte);
update_mmu_cache(vma, addr, pte_val);
-
err = 0;
-err_unlock:
- spin_unlock(&mm->page_table_lock);
+unlock:
+ pte_unmap_unlock(pte, ptl);
+out:
return err;
}
EXPORT_SYMBOL(install_page);
-
/*
* Install a file pte to a given virtual memory address, release any
* previously existing mapping.
{
int err = -ENOMEM;
pte_t *pte;
- pgd_t *pgd;
- pmd_t *pmd;
pte_t pte_val;
+ spinlock_t *ptl;
- pgd = pgd_offset(mm, addr);
- spin_lock(&mm->page_table_lock);
-
- pmd = pmd_alloc(mm, pgd, addr);
- if (!pmd)
- goto err_unlock;
-
- pte = pte_alloc_map(mm, pmd, addr);
+ pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
- goto err_unlock;
+ goto out;
- zap_pte(mm, vma, addr, pte);
+ if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
+ update_hiwater_rss(mm);
+ dec_mm_counter(mm, file_rss);
+ }
- set_pte(pte, pgoff_to_pte(pgoff));
+ set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
pte_val = *pte;
- pte_unmap(pte);
update_mmu_cache(vma, addr, pte_val);
- spin_unlock(&mm->page_table_lock);
- return 0;
-
-err_unlock:
- spin_unlock(&mm->page_table_lock);
+ pte_unmap_unlock(pte, ptl);
+ err = 0;
+out:
return err;
}
-
/***
* sys_remap_file_pages - remap arbitrary pages of a shared backing store
* file within an existing vma.
unsigned long end = start + size;
struct vm_area_struct *vma;
int err = -EINVAL;
+ int has_write_lock = 0;
if (__prot)
return err;
#endif
/* We need down_write() to change vma->vm_flags. */
- down_write(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
+ retry:
vma = find_vma(mm, start);
/*
* Make sure the vma is shared, that it supports prefaulting,
* and that the remapped range is valid and fully within
* the single existing vma. vm_private_data is used as a
- * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
- * or VM_LOCKED, but VM_LOCKED could be revoked later on).
+ * swapout cursor in a VM_NONLINEAR vma.
*/
if (vma && (vma->vm_flags & VM_SHARED) &&
- (!vma->vm_private_data || (vma->vm_flags & VM_RESERVED)) &&
+ (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) &&
vma->vm_ops && vma->vm_ops->populate &&
end > start && start >= vma->vm_start &&
end <= vma->vm_end) {
/* Must set VM_NONLINEAR before any pages are populated. */
if (pgoff != linear_page_index(vma, start) &&
!(vma->vm_flags & VM_NONLINEAR)) {
+ if (!has_write_lock) {
+ up_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ has_write_lock = 1;
+ goto retry;
+ }
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
flush_dcache_mmap_lock(mapping);
vma->vm_flags |= VM_NONLINEAR;
vma_prio_tree_remove(vma, &mapping->i_mmap);
- vma_prio_tree_init(vma);
- list_add_tail(&vma->shared.vm_set.list,
- &mapping->i_mmap_nonlinear);
+ vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
flush_dcache_mmap_unlock(mapping);
spin_unlock(&mapping->i_mmap_lock);
}
- /* ->populate can take a long time, so downgrade the lock. */
- downgrade_write(&mm->mmap_sem);
err = vma->vm_ops->populate(vma, start, size,
vma->vm_page_prot,
pgoff, flags & MAP_NONBLOCK);
* it after ->populate completes, and that would prevent
* downgrading the lock. (Locks can't be upgraded).
*/
+ }
+ if (likely(!has_write_lock))
up_read(&mm->mmap_sem);
- } else {
+ else
up_write(&mm->mmap_sem);
- }
return err;
}