page = pmd_page(*dir);
pmd_clear(dir);
dec_page_state(nr_page_table_pages);
- tlb->mm->nr_ptes--;
pte_free_tlb(tlb, page);
}
spin_lock(&mm->page_table_lock);
if (!new)
return NULL;
+
/*
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
pte_free(new);
goto out;
}
- mm->nr_ptes++;
inc_page_state(nr_page_table_pages);
pmd_populate(mm, pmd, new);
}
goto cont_copy_pte_range_noset;
/* pte contains position in swap, so copy. */
if (!pte_present(pte)) {
- if (!pte_file(pte)) {
+ if (!pte_file(pte))
swap_duplicate(pte_to_swp_entry(pte));
- if (list_empty(&dst->mmlist)) {
- spin_lock(&mmlist_lock);
- list_add(&dst->mmlist,
- &src->mmlist);
- spin_unlock(&mmlist_lock);
- }
- }
set_pte(dst_pte, pte);
goto cont_copy_pte_range_noset;
}
tlb_end_vma(tlb, vma);
}
+#ifdef CONFIG_PREEMPT_VOLUNTARY
+# define ZAP_BLOCK_SIZE (128 * PAGE_SIZE)
+#else
+
/* Dispose of an entire struct mmu_gather per rescheduling point */
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE)
#define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
#endif
+#endif
+
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlbp: address of the caller's struct mmu_gather
start += block;
zap_bytes -= block;
- if ((long)zap_bytes > 0)
- continue;
if (!atomic && need_resched()) {
int fullmm = tlb_is_full_mm(*tlbp);
tlb_finish_mmu(*tlbp, tlb_start, start);
*tlbp = tlb_gather_mmu(mm, fullmm);
tlb_start_valid = 0;
}
+ if ((long)zap_bytes > 0)
+ continue;
zap_bytes = ZAP_BLOCK_SIZE;
}
}
* in null mappings (currently treated as "copy-on-access")
*/
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long pfn, pgprot_t prot)
+ unsigned long phys_addr, pgprot_t prot)
{
unsigned long end;
+ unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
+ pfn = phys_addr >> PAGE_SHIFT;
do {
BUG_ON(!pte_none(*pte));
if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
}
static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long pfn, pgprot_t prot)
+ unsigned long phys_addr, pgprot_t prot)
{
unsigned long base, end;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
- pfn -= address >> PAGE_SHIFT;
+ phys_addr -= address;
do {
pte_t * pte = pte_alloc_map(mm, pmd, base + address);
if (!pte)
return -ENOMEM;
- remap_pte_range(pte, base + address, end - address, pfn + (address >> PAGE_SHIFT), prot);
+ remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
pte_unmap(pte);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
}
/* Note: this is only safe if the mm semaphore is held when called. */
-int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot)
+int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
int error = 0;
pgd_t * dir;
unsigned long end = from + size;
struct mm_struct *mm = vma->vm_mm;
- pfn -= from >> PAGE_SHIFT;
+ phys_addr -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
if (from >= end)
* this region.
*/
vma->vm_flags |= VM_IO | VM_RESERVED;
+
spin_lock(&mm->page_table_lock);
do {
pmd_t *pmd = pmd_alloc(mm, dir, from);
error = -ENOMEM;
if (!pmd)
break;
- error = remap_pmd_range(mm, pmd, from, end - from, pfn + (from >> PAGE_SHIFT), prot);
+ error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
if (error)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
spin_unlock(&mm->page_table_lock);
return error;
}
-EXPORT_SYMBOL(remap_pfn_range);
+
+EXPORT_SYMBOL(remap_page_range);
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
goto out_truncate;
do_expand:
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
if (limit != RLIM_INFINITY && offset > limit)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
*/
/* Only go through if we didn't race with anybody else... */
if (pte_none(*page_table)) {
- if (!PageReserved(new_page))
- // ++mm->rss;
- vx_rsspages_inc(mm);
+ if (!PageReserved(new_page))
+ //++mm->rss;
+ vx_rsspages_inc(mm);
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access)
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, addr);
if (!pmd_none(*pmd)) {
+ preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
+ preempt_enable();
}
}
return page;
EXPORT_SYMBOL(vmalloc_to_page);
-/*
- * Map a vmalloc()-space virtual address to the physical page frame number.
- */
-unsigned long vmalloc_to_pfn(void * vmalloc_addr)
-{
- return page_to_pfn(vmalloc_to_page(vmalloc_addr));
-}
-
-EXPORT_SYMBOL(vmalloc_to_pfn);
-
#if !defined(CONFIG_ARCH_GATE_AREA)
#if defined(AT_SYSINFO_EHDR)