1 #ifndef _PPC64_PGALLOC_H
2 #define _PPC64_PGALLOC_H
5 #include <linux/slab.h>
6 #include <linux/cpumask.h>
7 #include <linux/percpu.h>
8 #include <asm/processor.h>
11 extern kmem_cache_t *zero_cache;
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 pgd_alloc(struct mm_struct *mm)
23 return kmem_cache_alloc(zero_cache, GFP_KERNEL);
29 kmem_cache_free(zero_cache, pgd);
32 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
35 pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
37 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
43 kmem_cache_free(zero_cache, pmd);
46 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
47 #define pmd_populate(mm, pmd, pte_page) \
48 pmd_populate_kernel(mm, pmd, page_address(pte_page))
51 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
53 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
56 static inline struct page *
57 pte_alloc_one(struct mm_struct *mm, unsigned long address)
59 pte_t *pte = pte_alloc_one_kernel(mm, address);
62 return virt_to_page(pte);
67 static inline void pte_free_kernel(pte_t *pte)
69 kmem_cache_free(zero_cache, pte);
72 #define pte_free(pte_page) pte_free_kernel(page_address(pte_page))
74 struct pte_freelist_batch
78 struct page * pages[0];
81 #define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \
82 sizeof(struct page *))
84 extern void pte_free_now(struct page *ptepage);
85 extern void pte_free_submit(struct pte_freelist_batch *batch);
87 DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
89 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
91 /* This is safe as we are holding page_table_lock */
92 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
93 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
95 if (atomic_read(&tlb->mm->mm_users) < 2 ||
96 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
101 if (*batchp == NULL) {
102 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
103 if (*batchp == NULL) {
104 pte_free_now(ptepage);
107 (*batchp)->index = 0;
109 (*batchp)->pages[(*batchp)->index++] = ptepage;
110 if ((*batchp)->index == PTE_FREELIST_SIZE) {
111 pte_free_submit(*batchp);
116 #define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd))
118 #define check_pgt_cache() do { } while (0)
120 #endif /* _PPC64_PGALLOC_H */