* Thanks to Ben LaHaise for precious feedback.
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <asm/sections.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
-static spinlock_t cpa_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(cpa_lock);
static struct list_head df_list = LIST_HEAD_INIT(df_list);
pte_t *lookup_address(unsigned long address)
{
- pgd_t *pgd = pgd_offset_k(address);
+ pgd_t *pgd = pgd_offset_k(address);
+ pud_t *pud;
pmd_t *pmd;
if (pgd_none(*pgd))
return NULL;
- pmd = pmd_offset(pgd, address);
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud))
+ return NULL;
+ pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
if (pmd_large(*pmd))
return pte_offset_kernel(pmd, address);
}
-static struct page *split_large_page(unsigned long address, pgprot_t prot)
+static struct page *split_large_page(unsigned long address, pgprot_t prot,
+ pgprot_t ref_prot)
{
int i;
unsigned long addr;
if (!base)
return NULL;
+ /*
+ * page_private is used to track the number of entries in
+ * the page table page that have non standard attributes.
+ */
+ SetPagePrivate(base);
+ page_private(base) = 0;
+
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
- pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
- addr == address ? prot : PAGE_KERNEL);
+ set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
+ addr == address ? prot : ref_prot));
}
return base;
}
-static void flush_kernel_map(void *dummy)
+static void flush_kernel_map(void *arg)
{
- /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
- if (boot_cpu_data.x86_model >= 4)
- asm volatile("wbinvd":::"memory");
+ unsigned long adr = (unsigned long)arg;
+
+ if (adr && cpu_has_clflush) {
+ int i;
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ asm volatile("clflush (%0)" :: "r" (adr + i));
+ } else if (boot_cpu_data.x86_model >= 4)
+ wbinvd();
+
/* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{
+ struct page *page;
+ unsigned long flags;
+
set_pte_atomic(kpte, pte); /* change init_mm */
-#ifndef CONFIG_X86_PAE
- {
- struct list_head *l;
- if (TASK_SIZE > PAGE_OFFSET)
- return;
- spin_lock(&mmlist_lock);
- list_for_each(l, &init_mm.mmlist) {
- struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
- pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
- set_pte_atomic((pte_t *)pmd, pte);
- }
- spin_unlock(&mmlist_lock);
+ if (HAVE_SHARED_KERNEL_PMD)
+ return;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ for (page = pgd_list; page; page = (struct page *)page->index) {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ set_pte_atomic((pte_t *)pmd, pte);
}
-#endif
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
/*
*/
static inline void revert_page(struct page *kpte_page, unsigned long address)
{
- pte_t *linear = (pte_t *)
- pmd_offset(pgd_offset(&init_mm, address), address);
+ pgprot_t ref_prot;
+ pte_t *linear;
+
+ ref_prot =
+ ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
+ ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
+
+ linear = (pte_t *)
+ pmd_offset(pud_offset(pgd_offset_k(address), address), address);
set_pmd_pte(linear, address,
pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
- PAGE_KERNEL_LARGE));
+ ref_prot));
}
static int
unsigned long address;
struct page *kpte_page;
-#ifdef CONFIG_HIGHMEM
- if (page >= highmem_start_page)
- BUG();
-#endif
+ BUG_ON(PageHighMem(page));
address = (unsigned long)page_address(page);
+ if (address >= (unsigned long)__start_rodata && address <= (unsigned long)__end_rodata &&
+ (pgprot_val(prot) & _PAGE_RW)) {
+ pgprot_val(prot) &= ~(_PAGE_RW);
+ add_taint(TAINT_MACHINE_CHECK);
+ }
+
kpte = lookup_address(address);
if (!kpte)
return -EINVAL;
- kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
+ kpte_page = virt_to_page(kpte);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- pte_t old = *kpte;
- pte_t standard = mk_pte(page, PAGE_KERNEL);
set_pte_atomic(kpte, mk_pte(page, prot));
- if (pte_same(old,standard))
- get_page(kpte_page);
} else {
- struct page *split = split_large_page(address, prot);
+ pgprot_t ref_prot;
+ struct page *split;
+
+ ref_prot =
+ ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
+ ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
+ split = split_large_page(address, prot, ref_prot);
if (!split)
return -ENOMEM;
- get_page(kpte_page);
- set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
- }
+ set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
+ kpte_page = split;
+ }
+ page_private(kpte_page)++;
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
- __put_page(kpte_page);
- }
+ BUG_ON(page_private(kpte_page) == 0);
+ page_private(kpte_page)--;
+ } else
+ BUG();
- if (cpu_has_pse && (page_count(kpte_page) == 1)) {
- list_add(&kpte_page->lru, &df_list);
- revert_page(kpte_page, address);
- }
+ /*
+ * If the pte was reserved, it means it was created at boot
+ * time (not via split_large_page) and in turn we must not
+ * replace it with a largepage.
+ */
+ if (!PageReserved(kpte_page)) {
+ if (cpu_has_pse && (page_private(kpte_page) == 0)) {
+ ClearPagePrivate(kpte_page);
+ list_add(&kpte_page->lru, &df_list);
+ revert_page(kpte_page, address);
+ }
+ }
return 0;
}
-static inline void flush_map(void)
+static inline void flush_map(void *adr)
{
- on_each_cpu(flush_kernel_map, NULL, 1, 1);
+ on_each_cpu(flush_kernel_map, adr, 1, 1);
}
/*
}
void global_flush_tlb(void)
-{
- LIST_HEAD(l);
- struct list_head* n;
+{
+ struct list_head l;
+ struct page *pg, *next;
BUG_ON(irqs_disabled());
spin_lock_irq(&cpa_lock);
- list_splice_init(&df_list, &l);
+ list_replace_init(&df_list, &l);
spin_unlock_irq(&cpa_lock);
- flush_map();
- n = l.next;
- while (n != &l) {
- struct page *pg = list_entry(n, struct page, lru);
- n = n->next;
+ if (!cpu_has_clflush)
+ flush_map(0);
+ list_for_each_entry_safe(pg, next, &l, lru) {
+ if (cpu_has_clflush)
+ flush_map(page_address(pg));
__free_page(pg);
}
-}
+}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
+ if (!enable)
+ debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
+
/* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.
*/
*/
__flush_tlb_all();
}
-EXPORT_SYMBOL(kernel_map_pages);
#endif
EXPORT_SYMBOL(change_page_attr);