#ifndef _SPARC64_PGALLOC_H
#define _SPARC64_PGALLOC_H
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/slab.h>
-#include <asm/page.h>
#include <asm/spitfire.h>
-#include <asm/pgtable.h>
#include <asm/cpudata.h>
+#include <asm/cacheflush.h>
+#include <asm/page.h>
/* Page table allocation/freeing. */
-#ifdef CONFIG_SMP
-/* Sliiiicck */
-#define pgt_quicklists cpu_data(smp_processor_id())
-#else
-extern struct pgtable_cache_struct {
- unsigned long *pgd_cache;
- unsigned long *pte_cache[2];
- unsigned int pgcache_size;
- unsigned int pgdcache_size;
-} pgt_quicklists;
-#endif
-#define pgd_quicklist (pgt_quicklists.pgd_cache)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (pgt_quicklists.pte_cache)
-#define pgtable_cache_size (pgt_quicklists.pgcache_size)
-#define pgd_cache_size (pgt_quicklists.pgdcache_size)
-
-#ifndef CONFIG_SMP
-
-static __inline__ void free_pgd_fast(pgd_t *pgd)
+extern struct kmem_cache *pgtable_cache;
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- struct page *page = virt_to_page(pgd);
-
- preempt_disable();
- if (!page->lru.prev) {
- page->lru.next = (void *) pgd_quicklist;
- pgd_quicklist = (unsigned long *)page;
- }
- page->lru.prev = (void *)
- (((unsigned long)page->lru.prev) |
- (((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1));
- pgd_cache_size++;
- preempt_enable();
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
-static __inline__ pgd_t *get_pgd_fast(void)
+static inline void pgd_free(pgd_t *pgd)
{
- struct page *ret;
-
- preempt_disable();
- if ((ret = (struct page *)pgd_quicklist) != NULL) {
- unsigned long mask = (unsigned long)ret->lru.prev;
- unsigned long off = 0;
-
- if (mask & 1)
- mask &= ~1;
- else {
- off = PAGE_SIZE / 2;
- mask &= ~2;
- }
- ret->lru.prev = (void *) mask;
- if (!mask)
- pgd_quicklist = (unsigned long *)ret->lru.next;
- ret = (struct page *)(__page_address(ret) + off);
- pgd_cache_size--;
- preempt_enable();
- } else {
- struct page *page;
-
- preempt_enable();
- page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
- if (page) {
- ret = (struct page *)page_address(page);
- clear_page(ret);
- page->lru.prev = (void *) 2UL;
-
- preempt_disable();
- page->lru.next = (void *) pgd_quicklist;
- pgd_quicklist = (unsigned long *)page;
- pgd_cache_size++;
- preempt_enable();
- }
- }
- return (pgd_t *)ret;
+ kmem_cache_free(pgtable_cache, pgd);
}
-#else /* CONFIG_SMP */
+#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
-static __inline__ void free_pgd_fast(pgd_t *pgd)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- preempt_disable();
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
- preempt_enable();
+ return kmem_cache_alloc(pgtable_cache,
+ GFP_KERNEL|__GFP_REPEAT);
}
-static __inline__ pgd_t *get_pgd_fast(void)
+static inline void pmd_free(pmd_t *pmd)
{
- unsigned long *ret;
-
- preempt_disable();
- if((ret = pgd_quicklist) != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- preempt_enable();
- } else {
- preempt_enable();
- ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
- if(ret)
- memset(ret, 0, PAGE_SIZE);
- }
- return (pgd_t *)ret;
+ kmem_cache_free(pgtable_cache, pmd);
}
-static __inline__ void free_pgd_slow(pgd_t *pgd)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
{
- free_page((unsigned long)pgd);
+ return kmem_cache_alloc(pgtable_cache,
+ GFP_KERNEL|__GFP_REPEAT);
}
-#endif /* CONFIG_SMP */
-
-#if (L1DCACHE_SIZE > PAGE_SIZE) /* is there D$ aliasing problem */
-#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
-#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
-#else
-#define VPTE_COLOR(address) 0
-#define DCACHE_COLOR(address) 0
-#endif
-
-#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
-
-static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
{
- unsigned long *ret;
- int color = 0;
-
- preempt_disable();
- if (pte_quicklist[color] == NULL)
- color = 1;
-
- if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
- pte_quicklist[color] = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- }
- preempt_enable();
-
- return (pmd_t *)ret;
+ return virt_to_page(pte_alloc_one_kernel(mm, address));
}
-
-static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+
+static inline void pte_free_kernel(pte_t *pte)
{
- pmd_t *pmd;
-
- pmd = pmd_alloc_one_fast(mm, address);
- if (!pmd) {
- pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
- if (pmd)
- memset(pmd, 0, PAGE_SIZE);
- }
- return pmd;
+ kmem_cache_free(pgtable_cache, pte);
}
-static __inline__ void free_pmd_fast(pmd_t *pmd)
+static inline void pte_free(struct page *ptepage)
{
- unsigned long color = DCACHE_COLOR((unsigned long)pmd);
-
- preempt_disable();
- *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
- pte_quicklist[color] = (unsigned long *) pmd;
- pgtable_cache_size++;
- preempt_enable();
+ pte_free_kernel(page_address(ptepage));
}
-static __inline__ void free_pmd_slow(pmd_t *pmd)
-{
- free_page((unsigned long)pmd);
-}
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
-
-static inline struct page *
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- pte_t *pte = pte_alloc_one_kernel(mm, addr);
- if (pte)
- return virt_to_page(pte);
- return 0;
-}
-
-static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
-{
- unsigned long color = VPTE_COLOR(address);
- unsigned long *ret;
-
- preempt_disable();
- if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
- pte_quicklist[color] = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- }
- preempt_enable();
- return (pte_t *)ret;
-}
-
-static __inline__ void free_pte_fast(pte_t *pte)
-{
- unsigned long color = DCACHE_COLOR((unsigned long)pte);
-
- preempt_disable();
- *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
- pte_quicklist[color] = (unsigned long *) pte;
- pgtable_cache_size++;
- preempt_enable();
-}
-
-static __inline__ void free_pte_slow(pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-#define pte_free_kernel(pte) free_pte_fast(pte)
-#define pte_free(pte) free_pte_fast(page_address(pte))
-#define pmd_free(pmd) free_pmd_fast(pmd)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc(mm) get_pgd_fast()
+#define check_pgt_cache() do { } while (0)
#define arch_add_exec_range(mm, limit) do { ; } while (0)
#define arch_flush_exec_range(mm) do { ; } while (0)