2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/config.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
18 #include <asm/system.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/fixmap.h>
24 #include <asm/tlbflush.h>
28 int total = 0, reserved = 0;
29 int shared = 0, cached = 0;
35 printk("Mem-info:\n");
37 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
38 for_each_pgdat(pgdat) {
39 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
40 page = pgdat->node_mem_map + i;
42 if (PageHighMem(page))
44 if (PageReserved(page))
46 else if (PageSwapCache(page))
48 else if (page_count(page))
49 shared += page_count(page) - 1;
52 printk("%d pages of RAM\n", total);
53 printk("%d pages of HIGHMEM\n",highmem);
54 printk("%d reserved pages\n",reserved);
55 printk("%d pages shared\n",shared);
56 printk("%d pages swap cached\n",cached);
59 EXPORT_SYMBOL_GPL(show_mem);
62 * Associate a virtual page frame with a given physical page frame
63 * and protection flags for that frame.
65 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
72 pgd = swapper_pg_dir + pgd_index(vaddr);
77 pud = pud_offset(pgd, vaddr);
82 pmd = pmd_offset(pud, vaddr);
87 pte = pte_offset_kernel(pmd, vaddr);
88 /* <pfn,flags> stored as-is, to permit clearing entries */
89 set_pte(pte, pfn_pte(pfn, flags));
92 * It's enough to flush this one mapping.
93 * (PGE mappings get flushed as well)
95 __flush_tlb_one(vaddr);
99 * Associate a large virtual page frame with a given physical page frame
100 * and protection flags for that frame. pfn is for the base of the page,
101 * vaddr is what the page gets mapped to - both must be properly aligned.
102 * The pmd must already be instantiated. Assumes PAE mode.
104 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
110 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
111 printk ("set_pmd_pfn: vaddr misaligned\n");
114 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
115 printk ("set_pmd_pfn: pfn misaligned\n");
118 pgd = swapper_pg_dir + pgd_index(vaddr);
119 if (pgd_none(*pgd)) {
120 printk ("set_pmd_pfn: pgd_none\n");
123 pud = pud_offset(pgd, vaddr);
124 pmd = pmd_offset(pud, vaddr);
125 set_pmd(pmd, pfn_pmd(pfn, flags));
127 * It's enough to flush this one mapping.
128 * (PGE mappings get flushed as well)
130 __flush_tlb_one(vaddr);
133 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
135 unsigned long address = __fix_to_virt(idx);
137 if (idx >= __end_of_fixed_addresses) {
141 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
144 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
146 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
149 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
153 #ifdef CONFIG_HIGHPTE
154 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
156 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
161 void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
163 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
167 * List of all pgd's needed for non-PAE so it can invalidate entries
168 * in both cached and uncached pgd's; not needed for PAE since the
169 * kernel pmd is shared. If PAE were not to share the pmd a similar
170 * tactic would be needed. This is essentially codepath-based locking
171 * against pageattr.c; it is the unique case in which a valid change
172 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
173 * vmalloc faults work because attached pagetables are never freed.
174 * The locking scheme was chosen on the basis of manfred's
175 * recommendations and having no core impact whatsoever.
178 DEFINE_SPINLOCK(pgd_lock);
179 struct page *pgd_list;
181 static inline void pgd_list_add(pgd_t *pgd)
183 struct page *page = virt_to_page(pgd);
184 page->index = (unsigned long)pgd_list;
186 pgd_list->private = (unsigned long)&page->index;
188 page->private = (unsigned long)&pgd_list;
191 static inline void pgd_list_del(pgd_t *pgd)
193 struct page *next, **pprev, *page = virt_to_page(pgd);
194 next = (struct page *)page->index;
195 pprev = (struct page **)page->private;
198 next->private = (unsigned long)pprev;
201 void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
205 if (PTRS_PER_PMD == 1)
206 spin_lock_irqsave(&pgd_lock, flags);
208 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
209 swapper_pg_dir + USER_PTRS_PER_PGD,
210 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
212 if (PTRS_PER_PMD > 1)
216 spin_unlock_irqrestore(&pgd_lock, flags);
217 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
220 /* never called when PTRS_PER_PMD > 1 */
221 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
223 unsigned long flags; /* can be called from interrupt context */
225 spin_lock_irqsave(&pgd_lock, flags);
227 spin_unlock_irqrestore(&pgd_lock, flags);
230 pgd_t *pgd_alloc(struct mm_struct *mm)
233 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
235 if (PTRS_PER_PMD == 1 || !pgd)
238 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
239 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
242 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
247 for (i--; i >= 0; i--)
248 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
249 kmem_cache_free(pgd_cache, pgd);
253 void pgd_free(pgd_t *pgd)
257 /* in the PAE case user pgd entries are overwritten before usage */
258 if (PTRS_PER_PMD > 1)
259 for (i = 0; i < USER_PTRS_PER_PGD; ++i)
260 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
261 /* in the non-PAE case, free_pgtables() clears user pgd entries */
262 kmem_cache_free(pgd_cache, pgd);