struct page *page;
unsigned long pfn;
+ if (!vx_rsspages_avail(dst, 1)) {
+ spin_unlock(&src->page_table_lock);
+ goto nomem;
+ }
/* copy_one_pte */
if (pte_none(pte))
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
get_page(page);
- dst->rss++;
+ // dst->rss++;
+ vx_rsspages_inc(dst);
set_pte(dst_pte, pte);
page_dup_rmap(page);
cont_copy_pte_range_noset:
return NULL;
}
+struct page *
+follow_page_pfn(struct mm_struct *mm, unsigned long address, int write,
+ unsigned long *pfn_ptr)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ unsigned long pfn;
+ struct page *page;
+
+ *pfn_ptr = 0;
+ page = follow_huge_addr(mm, address, write);
+ if (!IS_ERR(page))
+ return page;
+
+ pgd = pgd_offset(mm, address);
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ goto out;
+
+ pmd = pmd_offset(pgd, address);
+ if (pmd_none(*pmd))
+ goto out;
+ if (pmd_huge(*pmd))
+ return follow_huge_pmd(mm, address, pmd, write);
+ if (pmd_bad(*pmd))
+ goto out;
+
+ ptep = pte_offset_map(pmd, address);
+ if (!ptep)
+ goto out;
+
+ pte = *ptep;
+ pte_unmap(ptep);
+ if (pte_present(pte)) {
+ if (write && !pte_write(pte))
+ goto out;
+ if (write && !pte_dirty(pte)) {
+ struct page *page = pte_page(pte);
+ if (!PageDirty(page))
+ set_page_dirty(page);
+ }
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+
+ mark_page_accessed(page);
+ return page;
+ } else {
+ *pfn_ptr = pfn;
+ return NULL;
+ }
+ }
+
+out:
+ return NULL;
+}
+
+
/*
* Given a physical address, is there a useful struct page pointing to
* it? This may become more complex in the future if we start dealing
page_table = pte_offset_map(pmd, address);
if (likely(pte_same(*page_table, pte))) {
if (PageReserved(old_page))
- ++mm->rss;
+ // ++mm->rss;
+ vx_rsspages_inc(mm);
else
page_remove_rmap(old_page);
break_cow(vma, new_page, address, page_table);
inc_page_state(pgmajfault);
}
+ if (!vx_rsspages_avail(mm, 1)) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
mark_page_accessed(page);
lock_page(page);
if (vm_swap_full())
remove_exclusive_swap_page(page);
- mm->rss++;
+ // mm->rss++;
+ vx_rsspages_inc(mm);
pte = mk_pte(page, vma->vm_page_prot);
if (write_access && can_share_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
pte_t entry;
struct page * page = ZERO_PAGE(addr);
+ if (!vx_rsspages_avail(mm, 1)) {
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_OOM;
+ }
+
/* Read-only mapping of ZERO_PAGE. */
entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
spin_unlock(&mm->page_table_lock);
goto out;
}
- mm->rss++;
+ // mm->rss++;
+ vx_rsspages_inc(mm);
entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
vma->vm_page_prot)),
vma);
return VM_FAULT_SIGBUS;
if (new_page == NOPAGE_OOM)
return VM_FAULT_OOM;
+ if (!vx_rsspages_avail(mm, 1))
+ return VM_FAULT_OOM;
/*
* Should we do an early C-O-W break?
/* Only go through if we didn't race with anybody else... */
if (pte_none(*page_table)) {
if (!PageReserved(new_page))
- ++mm->rss;
+ // ++mm->rss;
+ vx_rsspages_inc(mm);
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access)
* We need the page table lock to synchronize with kswapd
* and the SMP-safe atomic PTE updates.
*/
+ set_delay_flag(current,PF_MEMIO);
spin_lock(&mm->page_table_lock);
pmd = pmd_alloc(mm, pgd, address);
if (pmd) {
pte_t * pte = pte_alloc_map(mm, pmd, address);
- if (pte)
- return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+ if (pte) {
+ int rc = handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+ clear_delay_flag(current,PF_MEMIO);
+ return rc;
+ }
}
spin_unlock(&mm->page_table_lock);
+ clear_delay_flag(current,PF_MEMIO);
return VM_FAULT_OOM;
}