* Thanks to Ben LaHaise for precious feedback.
*/
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <asm/sections.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
if (!base)
return NULL;
- /*
- * page_private is used to track the number of entries in
- * the page table page that have non standard attributes.
- */
- SetPagePrivate(base);
- page_private(base) = 0;
-
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
unsigned long flags;
set_pte_atomic(kpte, pte); /* change init_mm */
- if (HAVE_SHARED_KERNEL_PMD)
+ if (PTRS_PER_PMD > 1)
return;
spin_lock_irqsave(&pgd_lock, flags);
BUG_ON(PageHighMem(page));
address = (unsigned long)page_address(page);
- if (address >= (unsigned long)__start_rodata && address <= (unsigned long)__end_rodata &&
- (pgprot_val(prot) & _PAGE_RW)) {
- pgprot_val(prot) &= ~(_PAGE_RW);
- add_taint(TAINT_MACHINE_CHECK);
- }
-
kpte = lookup_address(address);
if (!kpte)
return -EINVAL;
return -ENOMEM;
set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
kpte_page = split;
- }
- page_private(kpte_page)++;
+ }
+ get_page(kpte_page);
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
- BUG_ON(page_private(kpte_page) == 0);
- page_private(kpte_page)--;
+ __put_page(kpte_page);
} else
BUG();
* replace it with a largepage.
*/
if (!PageReserved(kpte_page)) {
- if (cpu_has_pse && (page_private(kpte_page) == 0)) {
- ClearPagePrivate(kpte_page);
+ /* memleak and potential failed 2M page regeneration */
+ BUG_ON(!page_count(kpte_page));
+
+ if (cpu_has_pse && (page_count(kpte_page) == 1)) {
list_add(&kpte_page->lru, &df_list);
revert_page(kpte_page, address);
}
}
void global_flush_tlb(void)
-{
- struct list_head l;
+{
+ LIST_HEAD(l);
struct page *pg, *next;
BUG_ON(irqs_disabled());
spin_lock_irq(&cpa_lock);
- list_replace_init(&df_list, &l);
+ list_splice_init(&df_list, &l);
spin_unlock_irq(&cpa_lock);
flush_map();
list_for_each_entry_safe(pg, next, &l, lru)
__free_page(pg);
-}
+}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
if (PageHighMem(page))
return;
if (!enable)
- debug_check_no_locks_freed(page_address(page),
- numpages * PAGE_SIZE);
+ mutex_debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
/* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.