static struct list_head df_list = LIST_HEAD_INIT(df_list);
-static inline pte_t *lookup_address(unsigned long address)
+pte_t *lookup_address(unsigned long address)
{
pgd_t *pgd = pgd_offset_k(address);
pmd_t *pmd;
static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{
- struct page *page;
- unsigned long flags;
-
set_pte_atomic(kpte, pte); /* change init_mm */
- if (PTRS_PER_PMD > 1)
- return;
-
- spin_lock_irqsave(&pgd_lock, flags);
- for (page = pgd_list; page; page = (struct page *)page->index) {
- pgd_t *pgd;
- pmd_t *pmd;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
- pmd = pmd_offset(pgd, address);
- set_pte_atomic((pte_t *)pmd, pte);
+#ifndef CONFIG_X86_PAE
+ {
+ struct list_head *l;
+ if (TASK_SIZE > PAGE_OFFSET)
+ return;
+ spin_lock(&mmlist_lock);
+ list_for_each(l, &init_mm.mmlist) {
+ struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
+ pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
+ set_pte_atomic((pte_t *)pmd, pte);
+ }
+ spin_unlock(&mmlist_lock);
}
- spin_unlock_irqrestore(&pgd_lock, flags);
+#endif
}
/*
pte_t standard = mk_pte(page, PAGE_KERNEL);
set_pte_atomic(kpte, mk_pte(page, prot));
if (pte_same(old,standard))
- atomic_inc(&kpte_page->count);
+ get_page(kpte_page);
} else {
struct page *split = split_large_page(address, prot);
if (!split)
return -ENOMEM;
- atomic_inc(&kpte_page->count);
+ get_page(kpte_page);
set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
}
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
- atomic_dec(&kpte_page->count);
+ __put_page(kpte_page);
}
- if (cpu_has_pse && (atomic_read(&kpte_page->count) == 1)) {
+ if (cpu_has_pse && (page_count(kpte_page) == 1)) {
list_add(&kpte_page->lru, &df_list);
revert_page(kpte_page, address);
}