1 #ifndef _PPC64_PGALLOC_H
2 #define _PPC64_PGALLOC_H
5 #include <linux/slab.h>
6 #include <linux/cpumask.h>
7 #include <linux/percpu.h>
8 #include <asm/processor.h>
11 extern kmem_cache_t *zero_cache;
13 /* Dummy functions since we don't support execshield on ppc */
14 #define arch_add_exec_range(mm, limit) do { ; } while (0)
15 #define arch_flush_exec_range(mm) do { ; } while (0)
16 #define arch_remove_exec_range(mm, limit) do { ; } while (0)
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
26 pgd_alloc(struct mm_struct *mm)
28 return kmem_cache_alloc(zero_cache, GFP_KERNEL);
34 kmem_cache_free(zero_cache, pgd);
37 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
40 pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
42 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
48 kmem_cache_free(zero_cache, pmd);
51 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
52 #define pmd_populate(mm, pmd, pte_page) \
53 pmd_populate_kernel(mm, pmd, page_address(pte_page))
56 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
59 pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
61 struct page *ptepage = virt_to_page(pte);
62 ptepage->mapping = (void *) mm;
63 ptepage->index = address & PMD_MASK;
68 static inline struct page *
69 pte_alloc_one(struct mm_struct *mm, unsigned long address)
72 pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
74 struct page *ptepage = virt_to_page(pte);
75 ptepage->mapping = (void *) mm;
76 ptepage->index = address & PMD_MASK;
82 static inline void pte_free_kernel(pte_t *pte)
84 virt_to_page(pte)->mapping = NULL;
85 kmem_cache_free(zero_cache, pte);
88 static inline void pte_free(struct page *ptepage)
90 ptepage->mapping = NULL;
91 kmem_cache_free(zero_cache, page_address(ptepage));
94 struct pte_freelist_batch
98 struct page * pages[0];
101 #define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \
102 sizeof(struct page *))
104 extern void pte_free_now(struct page *ptepage);
105 extern void pte_free_submit(struct pte_freelist_batch *batch);
107 DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
109 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
111 /* This is safe as we are holding page_table_lock */
112 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
113 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
115 if (atomic_read(&tlb->mm->mm_users) < 2 ||
116 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
121 if (*batchp == NULL) {
122 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
123 if (*batchp == NULL) {
124 pte_free_now(ptepage);
127 (*batchp)->index = 0;
129 (*batchp)->pages[(*batchp)->index++] = ptepage;
130 if ((*batchp)->index == PTE_FREELIST_SIZE) {
131 pte_free_submit(*batchp);
136 #define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd))
138 #define check_pgt_cache() do { } while (0)
140 #endif /* _PPC64_PGALLOC_H */