1 #ifndef _PPC64_PGALLOC_H
2 #define _PPC64_PGALLOC_H
5 #include <linux/slab.h>
6 #include <linux/cpumask.h>
7 #include <linux/percpu.h>
8 #include <asm/processor.h>
11 extern kmem_cache_t *zero_cache;
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 pgd_alloc(struct mm_struct *mm)
23 return kmem_cache_alloc(zero_cache, GFP_KERNEL);
29 kmem_cache_free(zero_cache, pgd);
32 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
35 pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
37 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
43 kmem_cache_free(zero_cache, pmd);
46 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
47 #define pmd_populate(mm, pmd, pte_page) \
48 pmd_populate_kernel(mm, pmd, page_address(pte_page))
51 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
54 pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
56 struct page *ptepage = virt_to_page(pte);
57 ptepage->mapping = (void *) mm;
58 ptepage->index = address & PMD_MASK;
63 static inline struct page *
64 pte_alloc_one(struct mm_struct *mm, unsigned long address)
67 pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
69 struct page *ptepage = virt_to_page(pte);
70 ptepage->mapping = (void *) mm;
71 ptepage->index = address & PMD_MASK;
77 static inline void pte_free_kernel(pte_t *pte)
79 virt_to_page(pte)->mapping = NULL;
80 kmem_cache_free(zero_cache, pte);
83 static inline void pte_free(struct page *ptepage)
85 ptepage->mapping = NULL;
86 kmem_cache_free(zero_cache, page_address(ptepage));
89 struct pte_freelist_batch
93 struct page * pages[0];
96 #define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \
97 sizeof(struct page *))
99 extern void pte_free_now(struct page *ptepage);
100 extern void pte_free_submit(struct pte_freelist_batch *batch);
102 DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
104 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
106 /* This is safe as we are holding page_table_lock */
107 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
108 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
110 if (atomic_read(&tlb->mm->mm_users) < 2 ||
111 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
116 if (*batchp == NULL) {
117 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
118 if (*batchp == NULL) {
119 pte_free_now(ptepage);
122 (*batchp)->index = 0;
124 (*batchp)->pages[(*batchp)->index++] = ptepage;
125 if ((*batchp)->index == PTE_FREELIST_SIZE) {
126 pte_free_submit(*batchp);
131 #define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd))
133 #define check_pgt_cache() do { } while (0)
135 #endif /* _PPC64_PGALLOC_H */