page = pmd_page(*dir);
pmd_clear(dir);
dec_page_state(nr_page_table_pages);
+ tlb->mm->nr_ptes--;
pte_free_tlb(tlb, page);
}
spin_lock(&mm->page_table_lock);
if (!new)
return NULL;
-
/*
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
pte_free(new);
goto out;
}
+ mm->nr_ptes++;
inc_page_state(nr_page_table_pages);
pmd_populate(mm, pmd, new);
}
struct page *page;
unsigned long pfn;
+ if (!vx_rsspages_avail(dst, 1)) {
+ spin_unlock(&src->page_table_lock);
+ goto nomem;
+ }
/* copy_one_pte */
if (pte_none(pte))
goto cont_copy_pte_range_noset;
/* pte contains position in swap, so copy. */
if (!pte_present(pte)) {
- if (!pte_file(pte))
+ if (!pte_file(pte)) {
swap_duplicate(pte_to_swp_entry(pte));
+ if (list_empty(&dst->mmlist)) {
+ spin_lock(&mmlist_lock);
+ list_add(&dst->mmlist,
+ &src->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ }
set_pte(dst_pte, pte);
goto cont_copy_pte_range_noset;
}
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
get_page(page);
- dst->rss++;
+ // dst->rss++;
+ vx_rsspages_inc(dst);
+ if (PageAnon(page))
+ dst->anon_rss++;
set_pte(dst_pte, pte);
page_dup_rmap(page);
cont_copy_pte_range_noset:
set_pte(ptep, pgoff_to_pte(page->index));
if (pte_dirty(pte))
set_page_dirty(page);
- if (pte_young(pte) && !PageAnon(page))
+ if (PageAnon(page))
+ tlb->mm->anon_rss--;
+ else if (pte_young(pte))
mark_page_accessed(page);
tlb->freed++;
page_remove_rmap(page);
pte_t *pte;
if (write) /* user gate pages are read-only */
return i ? : -EFAULT;
- pgd = pgd_offset_gate(mm, pg);
- if (!pgd)
- return i ? : -EFAULT;
+ if (pg > TASK_SIZE)
+ pgd = pgd_offset_k(pg);
+ else
+ pgd = pgd_offset_gate(mm, pg);
+ BUG_ON(pgd_none(*pgd));
pmd = pmd_offset(pgd, pg);
- if (!pmd)
- return i ? : -EFAULT;
+ BUG_ON(pmd_none(*pmd));
pte = pte_offset_map(pmd, pg);
- if (!pte)
- return i ? : -EFAULT;
- if (!pte_present(*pte)) {
- pte_unmap(pte);
- return i ? : -EFAULT;
- }
+ BUG_ON(pte_none(*pte));
if (pages) {
pages[i] = pte_page(*pte);
get_page(pages[i]);
continue;
}
- if (!vma || (pages && (vma->vm_flags & VM_IO))
+ if (!vma || (vma->vm_flags & VM_IO)
|| !(flags & vma->vm_flags))
return i ? : -EFAULT;
* in null mappings (currently treated as "copy-on-access")
*/
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, pgprot_t prot)
+ unsigned long pfn, pgprot_t prot)
{
unsigned long end;
- unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
- pfn = phys_addr >> PAGE_SHIFT;
do {
BUG_ON(!pte_none(*pte));
if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
}
static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, pgprot_t prot)
+ unsigned long pfn, pgprot_t prot)
{
unsigned long base, end;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
- phys_addr -= address;
+ pfn -= address >> PAGE_SHIFT;
do {
pte_t * pte = pte_alloc_map(mm, pmd, base + address);
if (!pte)
return -ENOMEM;
- remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
+ remap_pte_range(pte, base + address, end - address, pfn + (address >> PAGE_SHIFT), prot);
pte_unmap(pte);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
}
/* Note: this is only safe if the mm semaphore is held when called. */
-int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot)
{
int error = 0;
pgd_t * dir;
unsigned long end = from + size;
struct mm_struct *mm = vma->vm_mm;
- phys_addr -= from;
+ pfn -= from >> PAGE_SHIFT;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
if (from >= end)
BUG();
+ /*
+ * Physically remapped pages are special. Tell the
+ * rest of the world about it:
+ * VM_IO tells people not to look at these pages
+ * (accesses can have side effects).
+ * VM_RESERVED tells swapout not to try to touch
+ * this region.
+ */
+ vma->vm_flags |= VM_IO | VM_RESERVED;
spin_lock(&mm->page_table_lock);
do {
pmd_t *pmd = pmd_alloc(mm, dir, from);
error = -ENOMEM;
if (!pmd)
break;
- error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
+ error = remap_pmd_range(mm, pmd, from, end - from, pfn + (from >> PAGE_SHIFT), prot);
if (error)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
spin_unlock(&mm->page_table_lock);
return error;
}
-
-EXPORT_SYMBOL(remap_page_range);
+EXPORT_SYMBOL(remap_pfn_range);
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address);
if (likely(pte_same(*page_table, pte))) {
+ if (PageAnon(old_page))
+ mm->anon_rss--;
if (PageReserved(old_page))
- ++mm->rss;
+ // ++mm->rss;
+ vx_rsspages_inc(mm);
else
page_remove_rmap(old_page);
break_cow(vma, new_page, address, page_table);
static inline void unmap_mapping_range_list(struct prio_tree_root *root,
struct zap_details *details)
{
- struct vm_area_struct *vma = NULL;
+ struct vm_area_struct *vma;
struct prio_tree_iter iter;
pgoff_t vba, vea, zba, zea;
- while ((vma = vma_prio_tree_next(vma, root, &iter,
- details->first_index, details->last_index)) != NULL) {
+ vma_prio_tree_foreach(vma, &iter, root,
+ details->first_index, details->last_index) {
vba = vma->vm_pgoff;
vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
goto out_truncate;
do_expand:
- limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
if (limit != RLIM_INFINITY && offset > limit)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
+ grab_swap_token();
}
+ if (!vx_rsspages_avail(mm, 1)) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
mark_page_accessed(page);
lock_page(page);
if (vm_swap_full())
remove_exclusive_swap_page(page);
- mm->rss++;
+ // mm->rss++;
+ vx_rsspages_inc(mm);
pte = mk_pte(page, vma->vm_page_prot);
if (write_access && can_share_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
if (unlikely(anon_vma_prepare(vma)))
goto no_mem;
+ if (!vx_rsspages_avail(mm, 1))
+ goto no_mem;
+
page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (!page)
goto no_mem;
spin_unlock(&mm->page_table_lock);
goto out;
}
- mm->rss++;
+ // mm->rss++;
+ vx_rsspages_inc(mm);
entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
vma->vm_page_prot)),
vma);
return VM_FAULT_SIGBUS;
if (new_page == NOPAGE_OOM)
return VM_FAULT_OOM;
+ if (!vx_rsspages_avail(mm, 1))
+ return VM_FAULT_OOM;
/*
* Should we do an early C-O-W break?
*/
/* Only go through if we didn't race with anybody else... */
if (pte_none(*page_table)) {
- if (!PageReserved(new_page))
- ++mm->rss;
+ if (!PageReserved(new_page))
+ //++mm->rss;
+ vx_rsspages_inc(mm);
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access)
* We need the page table lock to synchronize with kswapd
* and the SMP-safe atomic PTE updates.
*/
+ set_delay_flag(current,PF_MEMIO);
spin_lock(&mm->page_table_lock);
pmd = pmd_alloc(mm, pgd, address);
if (pmd) {
pte_t * pte = pte_alloc_map(mm, pmd, address);
- if (pte)
- return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+ if (pte) {
+ int rc = handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+ clear_delay_flag(current,PF_MEMIO);
+ return rc;
+ }
}
spin_unlock(&mm->page_table_lock);
+ clear_delay_flag(current,PF_MEMIO);
return VM_FAULT_OOM;
}
struct vm_area_struct * vma;
vma = find_vma(current->mm, addr);
+ if (!vma)
+ return -1;
write = (vma->vm_flags & VM_WRITE) != 0;
if (addr >= end)
BUG();
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, addr);
if (!pmd_none(*pmd)) {
- preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
- preempt_enable();
}
}
return page;
EXPORT_SYMBOL(vmalloc_to_page);
+/*
+ * Map a vmalloc()-space virtual address to the physical page frame number.
+ */
+unsigned long vmalloc_to_pfn(void * vmalloc_addr)
+{
+ return page_to_pfn(vmalloc_to_page(vmalloc_addr));
+}
+
+EXPORT_SYMBOL(vmalloc_to_pfn);
+
#if !defined(CONFIG_ARCH_GATE_AREA)
#if defined(AT_SYSINFO_EHDR)