2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/poison.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/scatterlist.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
41 #include <asm/fixmap.h>
45 #include <asm/tlbflush.h>
46 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
48 #include <asm/swiotlb.h>
50 extern unsigned long *contiguous_bitmap;
52 unsigned int __VMALLOC_RESERVE = 128 << 20;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55 unsigned long highstart_pfn, highend_pfn;
57 static int noinline do_test_wp_bit(void);
60 * Creates a middle page table and puts a pointer to it in the
61 * given global directory entry. This only returns the gd entry
62 * in non-PAE compilation mode, since the middle layer is folded.
64 static pmd_t * __init one_md_table_init(pgd_t *pgd)
70 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
71 make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
72 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
73 pud = pud_offset(pgd, 0);
74 if (pmd_table != pmd_offset(pud, 0))
77 pud = pud_offset(pgd, 0);
78 pmd_table = pmd_offset(pud, 0);
85 * Create a page table and place a pointer to it in a middle page
88 static pte_t * __init one_page_table_init(pmd_t *pmd)
91 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
92 make_lowmem_page_readonly(page_table,
93 XENFEAT_writable_page_tables);
94 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
95 if (page_table != pte_offset_kernel(pmd, 0))
101 return pte_offset_kernel(pmd, 0);
105 * This function initializes a certain range of kernel virtual memory
106 * with new bootmem page tables, everywhere page tables are missing in
111 * NOTE: The pagetables are allocated contiguous on the physical space
112 * so we can cache the place of the first one and move around without
113 * checking the pgd every time.
115 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
120 int pgd_idx, pmd_idx;
124 pgd_idx = pgd_index(vaddr);
125 pmd_idx = pmd_index(vaddr);
126 pgd = pgd_base + pgd_idx;
128 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
130 one_md_table_init(pgd);
131 pud = pud_offset(pgd, vaddr);
132 pmd = pmd_offset(pud, vaddr);
133 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
134 if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd))
135 one_page_table_init(pmd);
143 static inline int is_kernel_text(unsigned long addr)
145 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
151 * This maps the physical memory to kernel virtual address space, a total
152 * of max_low_pfn pages, by creating page tables starting from address
155 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
161 int pgd_idx, pmd_idx, pte_ofs;
163 unsigned long max_ram_pfn = xen_start_info->nr_pages;
164 if (max_ram_pfn > max_low_pfn)
165 max_ram_pfn = max_low_pfn;
167 pgd_idx = pgd_index(PAGE_OFFSET);
168 pgd = pgd_base + pgd_idx;
170 pmd_idx = pmd_index(PAGE_OFFSET);
171 pte_ofs = pte_index(PAGE_OFFSET);
173 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
176 * Native linux hasn't PAE-paging enabled yet at this
177 * point. When running as xen domain we are in PAE
178 * mode already, thus we can't simply hook a empty
179 * pmd. That would kill the mappings we are currently
182 pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
184 pmd = one_md_table_init(pgd);
186 if (pfn >= max_low_pfn)
189 for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
190 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
191 if (address >= HYPERVISOR_VIRT_START)
194 /* Map with big pages if possible, otherwise create normal page tables. */
196 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
198 if (is_kernel_text(address) || is_kernel_text(address2))
199 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
201 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
204 pte = one_page_table_init(pmd);
207 for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
208 /* XEN: Only map initial RAM allocation. */
209 if ((pfn >= max_ram_pfn) || pte_present(*pte))
211 if (is_kernel_text(address))
212 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
214 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
225 static inline int page_kills_ppro(unsigned long pagenr)
227 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
234 #define page_kills_ppro(p) 0
238 int page_is_ram(unsigned long pagenr)
241 unsigned long addr, end;
244 efi_memory_desc_t *md;
247 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
249 if (!is_available_memory(md))
251 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
252 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
254 if ((pagenr >= addr) && (pagenr < end))
260 for (i = 0; i < e820.nr_map; i++) {
262 if (e820.map[i].type != E820_RAM) /* not usable memory */
265 * !!!FIXME!!! Some BIOSen report areas as RAM that
266 * are not. Notably the 640->1Mb area. We need a sanity
269 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
270 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
271 if ((pagenr >= addr) && (pagenr < end))
276 EXPORT_SYMBOL_GPL(page_is_ram);
279 * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
280 * valid. The argument is a physical page number.
283 * On x86, access has to be given to the first megabyte of ram because that area
284 * contains bios code and data regions used by X and dosemu and similar apps.
285 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
286 * mmio resources as well as potential bios/acpi data regions.
288 int devmem_is_allowed(unsigned long pagenr)
292 if (!page_is_ram(pagenr))
297 #ifdef CONFIG_HIGHMEM
301 #define kmap_get_fixmap_pte(vaddr) \
302 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
304 static void __init kmap_init(void)
306 unsigned long kmap_vstart;
308 /* cache the first kmap pte */
309 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
310 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
312 kmap_prot = PAGE_KERNEL;
315 static void __init permanent_kmaps_init(pgd_t *pgd_base)
324 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
326 pgd = swapper_pg_dir + pgd_index(vaddr);
327 pud = pud_offset(pgd, vaddr);
328 pmd = pmd_offset(pud, vaddr);
329 pte = pte_offset_kernel(pmd, vaddr);
330 pkmap_page_table = pte;
333 static void __meminit free_new_highpage(struct page *page, int pfn)
335 init_page_count(page);
336 if (pfn < xen_start_info->nr_pages)
341 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
343 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
344 ClearPageReserved(page);
345 free_new_highpage(page, pfn);
347 SetPageReserved(page);
350 static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
352 free_new_highpage(page, pfn);
354 #ifdef CONFIG_FLATMEM
355 max_mapnr = max(pfn, max_mapnr);
362 * Not currently handling the NUMA case.
363 * Assuming single node and all memory that
364 * has been added dynamically that would be
365 * onlined here is in HIGHMEM
367 void __meminit online_page(struct page *page)
369 ClearPageReserved(page);
370 add_one_highpage_hotplug(page, page_to_pfn(page));
375 extern void set_highmem_pages_init(int);
377 static void __init set_highmem_pages_init(int bad_ppro)
380 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
381 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
382 totalram_pages += totalhigh_pages;
384 #endif /* CONFIG_FLATMEM */
387 #define kmap_init() do { } while (0)
388 #define permanent_kmaps_init(pgd_base) do { } while (0)
389 #define set_highmem_pages_init(bad_ppro) do { } while (0)
390 #endif /* CONFIG_HIGHMEM */
392 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
393 EXPORT_SYMBOL(__PAGE_KERNEL);
394 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
397 extern void __init remap_numa_kva(void);
399 #define remap_numa_kva() do {} while (0)
402 pgd_t *swapper_pg_dir;
404 static void __init pagetable_init (void)
407 pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
409 swapper_pg_dir = pgd_base;
410 init_mm.pgd = pgd_base;
412 /* Enable PSE if available */
414 set_in_cr4(X86_CR4_PSE);
417 /* Enable PGE if available */
419 set_in_cr4(X86_CR4_PGE);
420 __PAGE_KERNEL |= _PAGE_GLOBAL;
421 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
424 kernel_physical_mapping_init(pgd_base);
428 * Fixed mappings, only the page table structure has to be
429 * created - mappings will be set by set_fixmap():
431 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
432 page_table_range_init(vaddr, 0, pgd_base);
434 permanent_kmaps_init(pgd_base);
437 #if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
439 * Swap suspend & friends need this for resume because things like the intel-agp
440 * driver might have split up a kernel 4MB mapping.
442 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
443 __attribute__ ((aligned (PAGE_SIZE)));
445 static inline void save_pg_dir(void)
447 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
450 static inline void save_pg_dir(void)
455 void zap_low_mappings (void)
462 * Zap initial low-memory mappings.
464 * Note that "pgd_clear()" doesn't do it for
465 * us, because pgd_clear() is a no-op on i386.
467 for (i = 0; i < USER_PTRS_PER_PGD; i++)
468 #if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
469 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
471 set_pgd(swapper_pg_dir+i, __pgd(0));
476 static int disable_nx __initdata = 0;
477 u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
478 EXPORT_SYMBOL(__supported_pte_mask);
483 * Control non executable mappings.
486 * off Disable (disables exec-shield too)
488 static int __init noexec_setup(char *str)
490 if (!str || !strcmp(str, "on")) {
492 __supported_pte_mask |= _PAGE_NX;
495 } else if (!strcmp(str,"off")) {
497 __supported_pte_mask &= ~_PAGE_NX;
504 early_param("noexec", noexec_setup);
507 #ifdef CONFIG_X86_PAE
509 static void __init set_nx(void)
511 unsigned int v[4], l, h;
513 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
514 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
515 if ((v[3] & (1 << 20)) && !disable_nx) {
516 rdmsr(MSR_EFER, l, h);
518 wrmsr(MSR_EFER, l, h);
520 __supported_pte_mask |= _PAGE_NX;
526 * Enables/disables executability of a given kernel page and
527 * returns the previous setting.
529 int __init set_kernel_exec(unsigned long vaddr, int enable)
537 pte = lookup_address(vaddr);
540 if (!pte_exec_kernel(*pte))
544 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
546 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
547 pte_update_defer(&init_mm, vaddr, pte);
556 * paging_init() sets up the page tables - note that the first 8MB are
557 * already mapped by head.S.
559 * This routines also unmaps the page at virtual kernel address 0, so
560 * that we can trap those pesky NULL-reference errors in the kernel.
562 void __init paging_init(void)
566 #ifdef CONFIG_X86_PAE
569 printk("NX (Execute Disable) protection: active\n");
573 printk("Using x86 segment limits to approximate NX protection\n");
577 #if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
579 * We will bail out later - printk doesn't work right now so
580 * the user would just see a hanging kernel.
581 * when running as xen domain we are already in PAE mode at
585 set_in_cr4(X86_CR4_PAE);
591 /* Switch to the real shared_info page, and clear the
593 set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
594 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
595 memset(empty_zero_page, 0, sizeof(empty_zero_page));
597 /* Setup mapping of lower 1st MB */
598 for (i = 0; i < NR_FIX_ISAMAPS; i++)
599 if (is_initial_xendomain())
600 set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
602 __set_fixmap(FIX_ISAMAP_BEGIN - i,
603 virt_to_machine(empty_zero_page),
608 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
609 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
610 * used to involve black magic jumps to work around some nasty CPU bugs,
611 * but fortunately the switch to using exceptions got rid of all that.
614 static void __init test_wp_bit(void)
616 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
618 /* Any page-aligned address will do, the test is non-destructive */
619 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
620 boot_cpu_data.wp_works_ok = do_test_wp_bit();
621 clear_fixmap(FIX_WP_TEST);
623 if (!boot_cpu_data.wp_works_ok) {
625 #ifdef CONFIG_X86_WP_WORKS_OK
626 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
633 static struct kcore_list kcore_mem, kcore_vmalloc;
635 void __init mem_init(void)
637 extern int ppro_with_ram_bug(void);
638 int codesize, reservedpages, datasize, initsize;
643 contiguous_bitmap = alloc_bootmem_low_pages(
644 (max_low_pfn + 2*BITS_PER_LONG) >> 3);
645 BUG_ON(!contiguous_bitmap);
646 memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
648 #if defined(CONFIG_SWIOTLB)
652 #ifdef CONFIG_FLATMEM
656 bad_ppro = ppro_with_ram_bug();
658 #ifdef CONFIG_HIGHMEM
659 /* check that fixmap and pkmap do not overlap */
660 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
661 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
662 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
663 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
668 printk("vmalloc area: %lx-%lx, maxmem %lx\n",
669 VMALLOC_START,VMALLOC_END,MAXMEM);
670 BUG_ON(VMALLOC_START > VMALLOC_END);
672 /* this will put all low memory onto the freelists */
673 totalram_pages += free_all_bootmem();
674 /* XEN: init and count low-mem pages outside initial allocation. */
675 for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
676 ClearPageReserved(&mem_map[pfn]);
677 init_page_count(&mem_map[pfn]);
682 for (tmp = 0; tmp < max_low_pfn; tmp++)
684 * Only count reserved RAM pages
686 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
689 set_highmem_pages_init(bad_ppro);
691 codesize = (unsigned long) &_etext - (unsigned long) &_text;
692 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
693 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
695 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
696 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
697 VMALLOC_END-VMALLOC_START);
699 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
700 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
701 num_physpages << (PAGE_SHIFT-10),
703 reservedpages << (PAGE_SHIFT-10),
706 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
709 #if 1 /* double-sanity-check paranoia */
710 printk("virtual kernel memory layout:\n"
711 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
712 #ifdef CONFIG_HIGHMEM
713 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
715 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
716 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
717 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
718 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
719 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
720 FIXADDR_START, FIXADDR_TOP,
721 (FIXADDR_TOP - FIXADDR_START) >> 10,
723 #ifdef CONFIG_HIGHMEM
724 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
725 (LAST_PKMAP*PAGE_SIZE) >> 10,
728 VMALLOC_START, VMALLOC_END,
729 (VMALLOC_END - VMALLOC_START) >> 20,
731 (unsigned long)__va(0), (unsigned long)high_memory,
732 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
734 (unsigned long)&__init_begin, (unsigned long)&__init_end,
735 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
737 (unsigned long)&_etext, (unsigned long)&_edata,
738 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
740 (unsigned long)&_text, (unsigned long)&_etext,
741 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
743 #ifdef CONFIG_HIGHMEM
744 BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
745 BUG_ON(VMALLOC_END > PKMAP_BASE);
747 BUG_ON(VMALLOC_START > VMALLOC_END);
748 BUG_ON((unsigned long)high_memory > VMALLOC_START);
749 #endif /* double-sanity-check paranoia */
751 #ifdef CONFIG_X86_PAE
753 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
755 if (boot_cpu_data.wp_works_ok < 0)
759 * Subtle. SMP is doing it's boot stuff late (because it has to
760 * fork idle threads) - but it also needs low mappings for the
761 * protected-mode entry to work. We zap these entries only after
762 * the WP-bit has been tested.
768 set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
771 #ifdef CONFIG_MEMORY_HOTPLUG
772 int arch_add_memory(int nid, u64 start, u64 size)
774 struct pglist_data *pgdata = NODE_DATA(nid);
775 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
776 unsigned long start_pfn = start >> PAGE_SHIFT;
777 unsigned long nr_pages = size >> PAGE_SHIFT;
779 return __add_pages(zone, start_pfn, nr_pages);
782 int remove_memory(u64 start, u64 size)
786 EXPORT_SYMBOL_GPL(remove_memory);
789 struct kmem_cache *pgd_cache;
790 struct kmem_cache *pmd_cache;
792 void __init pgtable_cache_init(void)
794 if (PTRS_PER_PMD > 1) {
795 pmd_cache = kmem_cache_create("pmd",
796 PTRS_PER_PMD*sizeof(pmd_t),
797 PTRS_PER_PMD*sizeof(pmd_t),
802 panic("pgtable_cache_init(): cannot create pmd cache");
804 pgd_cache = kmem_cache_create("pgd",
806 PTRS_PER_PGD*sizeof(pgd_t),
807 PTRS_PER_PGD*sizeof(pgd_t),
814 PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
816 panic("pgtable_cache_init(): Cannot create pgd cache");
820 * This function cannot be __init, since exceptions don't work in that
821 * section. Put this after the callers, so that it cannot be inlined.
823 static int noinline do_test_wp_bit(void)
828 __asm__ __volatile__(
833 ".section __ex_table,\"a\"\n"
837 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
846 #ifdef CONFIG_DEBUG_RODATA
848 void mark_rodata_ro(void)
850 unsigned long addr = (unsigned long)__start_rodata;
852 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
853 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
855 printk("Write protecting the kernel read-only data: %uk\n",
856 (__end_rodata - __start_rodata) >> 10);
859 * change_page_attr() requires a global_flush_tlb() call after it.
860 * We do this after the printk so that if something went wrong in the
861 * change, the printk gets out at least to give a better debug hint
862 * of who is the culprit.
868 void free_init_pages(char *what, unsigned long begin, unsigned long end)
872 for (addr = begin; addr < end; addr += PAGE_SIZE) {
873 ClearPageReserved(virt_to_page(addr));
874 init_page_count(virt_to_page(addr));
875 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
879 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
882 void free_initmem(void)
884 free_init_pages("unused kernel memory",
885 (unsigned long)(&__init_begin),
886 (unsigned long)(&__init_end));
889 #ifdef CONFIG_BLK_DEV_INITRD
890 void free_initrd_mem(unsigned long start, unsigned long end)
892 free_init_pages("initrd memory", start, end);