2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/config.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/fixmap.h>
23 #include <asm/tlbflush.h>
26 #include <asm-xen/foreign_page.h>
30 int total = 0, reserved = 0;
31 int shared = 0, cached = 0;
37 printk("Mem-info:\n");
39 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
40 for_each_pgdat(pgdat) {
41 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
42 page = pgdat->node_mem_map + i;
44 if (PageHighMem(page))
46 if (PageReserved(page))
48 else if (PageSwapCache(page))
50 else if (page_count(page))
51 shared += page_count(page) - 1;
54 printk("%d pages of RAM\n", total);
55 printk("%d pages of HIGHMEM\n",highmem);
56 printk("%d reserved pages\n",reserved);
57 printk("%d pages shared\n",shared);
58 printk("%d pages swap cached\n",cached);
62 * Associate a virtual page frame with a given physical page frame
63 * and protection flags for that frame.
65 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
72 pgd = swapper_pg_dir + pgd_index(vaddr);
77 pud = pud_offset(pgd, vaddr);
82 pmd = pmd_offset(pud, vaddr);
87 pte = pte_offset_kernel(pmd, vaddr);
88 /* <pfn,flags> stored as-is, to permit clearing entries */
89 set_pte(pte, pfn_pte(pfn, flags));
92 * It's enough to flush this one mapping.
93 * (PGE mappings get flushed as well)
95 __flush_tlb_one(vaddr);
99 * Associate a virtual page frame with a given physical page frame
100 * and protection flags for that frame.
102 static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
110 pgd = swapper_pg_dir + pgd_index(vaddr);
111 if (pgd_none(*pgd)) {
115 pud = pud_offset(pgd, vaddr);
116 if (pud_none(*pud)) {
120 pmd = pmd_offset(pud, vaddr);
121 if (pmd_none(*pmd)) {
125 pte = pte_offset_kernel(pmd, vaddr);
126 /* <pfn,flags> stored as-is, to permit clearing entries */
127 set_pte(pte, pfn_pte_ma(pfn, flags));
130 * It's enough to flush this one mapping.
131 * (PGE mappings get flushed as well)
133 __flush_tlb_one(vaddr);
137 * Associate a large virtual page frame with a given physical page frame
138 * and protection flags for that frame. pfn is for the base of the page,
139 * vaddr is what the page gets mapped to - both must be properly aligned.
140 * The pmd must already be instantiated. Assumes PAE mode.
142 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
148 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
149 printk ("set_pmd_pfn: vaddr misaligned\n");
152 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
153 printk ("set_pmd_pfn: pfn misaligned\n");
156 pgd = swapper_pg_dir + pgd_index(vaddr);
157 if (pgd_none(*pgd)) {
158 printk ("set_pmd_pfn: pgd_none\n");
161 pud = pud_offset(pgd, vaddr);
162 pmd = pmd_offset(pud, vaddr);
163 set_pmd(pmd, pfn_pmd(pfn, flags));
165 * It's enough to flush this one mapping.
166 * (PGE mappings get flushed as well)
168 __flush_tlb_one(vaddr);
171 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
173 unsigned long address = __fix_to_virt(idx);
175 if (idx >= __end_of_fixed_addresses) {
179 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
182 void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
184 unsigned long address = __fix_to_virt(idx);
186 if (idx >= __end_of_fixed_addresses) {
190 set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
193 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
195 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
197 make_page_readonly(pte);
201 void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
203 struct page *page = virt_to_page(pte);
204 SetPageForeign(page, pte_free);
205 set_page_count(page, 1);
208 make_page_readonly(pte);
209 xen_pte_pin(__pa(pte));
212 void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
214 struct page *page = virt_to_page(pte);
215 ClearPageForeign(page);
217 xen_pte_unpin(__pa(pte));
218 make_page_writable(pte);
221 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
225 #ifdef CONFIG_HIGHPTE
228 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
231 if (PageHighMem(pte))
233 /* not a highmem page -- free page and grab one from the cache */
236 ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
238 return virt_to_page(ptep);
242 void pte_free(struct page *pte)
244 set_page_count(pte, 1);
245 #ifdef CONFIG_HIGHPTE
246 if (!PageHighMem(pte))
248 kmem_cache_free(pte_cache,
249 phys_to_virt(page_to_pseudophys(pte)));
250 #ifdef CONFIG_HIGHPTE
256 void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
258 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
262 * List of all pgd's needed for non-PAE so it can invalidate entries
263 * in both cached and uncached pgd's; not needed for PAE since the
264 * kernel pmd is shared. If PAE were not to share the pmd a similar
265 * tactic would be needed. This is essentially codepath-based locking
266 * against pageattr.c; it is the unique case in which a valid change
267 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
268 * vmalloc faults work because attached pagetables are never freed.
269 * The locking scheme was chosen on the basis of manfred's
270 * recommendations and having no core impact whatsoever.
273 DEFINE_SPINLOCK(pgd_lock);
274 struct page *pgd_list;
276 static inline void pgd_list_add(pgd_t *pgd)
278 struct page *page = virt_to_page(pgd);
279 page->index = (unsigned long)pgd_list;
281 pgd_list->private = (unsigned long)&page->index;
283 page->private = (unsigned long)&pgd_list;
286 static inline void pgd_list_del(pgd_t *pgd)
288 struct page *next, **pprev, *page = virt_to_page(pgd);
289 next = (struct page *)page->index;
290 pprev = (struct page **)page->private;
293 next->private = (unsigned long)pprev;
296 void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
300 if (PTRS_PER_PMD == 1)
301 spin_lock_irqsave(&pgd_lock, flags);
303 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
304 swapper_pg_dir + USER_PTRS_PER_PGD,
305 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
307 if (PTRS_PER_PMD > 1)
311 spin_unlock_irqrestore(&pgd_lock, flags);
312 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
314 make_page_readonly(pgd);
315 xen_pgd_pin(__pa(pgd));
318 /* never called when PTRS_PER_PMD > 1 */
319 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
321 unsigned long flags; /* can be called from interrupt context */
323 xen_pgd_unpin(__pa(pgd));
324 make_page_writable(pgd);
326 if (PTRS_PER_PMD > 1)
329 spin_lock_irqsave(&pgd_lock, flags);
331 spin_unlock_irqrestore(&pgd_lock, flags);
334 pgd_t *pgd_alloc(struct mm_struct *mm)
337 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
339 if (PTRS_PER_PMD == 1 || !pgd)
342 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
343 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
346 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
351 for (i--; i >= 0; i--)
352 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
353 kmem_cache_free(pgd_cache, pgd);
357 void pgd_free(pgd_t *pgd)
361 /* in the PAE case user pgd entries are overwritten before usage */
362 if (PTRS_PER_PMD > 1)
363 for (i = 0; i < USER_PTRS_PER_PGD; ++i)
364 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
365 /* in the non-PAE case, clear_page_range() clears user pgd entries */
366 kmem_cache_free(pgd_cache, pgd);
369 #ifndef CONFIG_XEN_SHADOW_MODE
370 void make_lowmem_page_readonly(void *va)
372 pgd_t *pgd = pgd_offset_k((unsigned long)va);
373 pud_t *pud = pud_offset(pgd, (unsigned long)va);
374 pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
375 pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
376 set_pte(pte, pte_wrprotect(*pte));
379 void make_lowmem_page_writable(void *va)
381 pgd_t *pgd = pgd_offset_k((unsigned long)va);
382 pud_t *pud = pud_offset(pgd, (unsigned long)va);
383 pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
384 pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
385 set_pte(pte, pte_mkwrite(*pte));
388 void make_page_readonly(void *va)
390 pgd_t *pgd = pgd_offset_k((unsigned long)va);
391 pud_t *pud = pud_offset(pgd, (unsigned long)va);
392 pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
393 pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
394 set_pte(pte, pte_wrprotect(*pte));
395 if ( (unsigned long)va >= (unsigned long)high_memory )
398 phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
399 #ifdef CONFIG_HIGHMEM
400 if ( (phys >> PAGE_SHIFT) < highstart_pfn )
402 make_lowmem_page_readonly(phys_to_virt(phys));
406 void make_page_writable(void *va)
408 pgd_t *pgd = pgd_offset_k((unsigned long)va);
409 pud_t *pud = pud_offset(pgd, (unsigned long)va);
410 pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
411 pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
412 set_pte(pte, pte_mkwrite(*pte));
413 if ( (unsigned long)va >= (unsigned long)high_memory )
416 phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
417 #ifdef CONFIG_HIGHMEM
418 if ( (phys >> PAGE_SHIFT) < highstart_pfn )
420 make_lowmem_page_writable(phys_to_virt(phys));
424 void make_pages_readonly(void *va, unsigned int nr)
428 make_page_readonly(va);
429 va = (void *)((unsigned long)va + PAGE_SIZE);
433 void make_pages_writable(void *va, unsigned int nr)
437 make_page_writable(va);
438 va = (void *)((unsigned long)va + PAGE_SIZE);
441 #endif /* CONFIG_XEN_SHADOW_MODE */