a008d43c00e6f3a857e3d84edd76e7958ac252e9
[linux-2.6.git] / arch / i386 / mm / init.c
1 /*
2  *  linux/arch/i386/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7  */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
30
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/dma.h>
36 #include <asm/fixmap.h>
37 #include <asm/e820.h>
38 #include <asm/apic.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/sections.h>
42 #include <asm/desc.h>
43
44 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
45 unsigned long highstart_pfn, highend_pfn;
46
47 static int do_test_wp_bit(void);
48
49 static inline int page_kills_ppro(unsigned long pagenr)
50 {
51         if (pagenr >= 0x70000 && pagenr <= 0x7003F)
52                 return 1;
53         return 0;
54 }
55
56 extern int is_available_memory(efi_memory_desc_t *);
57
58 static inline int page_is_ram(unsigned long pagenr)
59 {
60         int i;
61         unsigned long addr, end;
62
63         if (efi_enabled) {
64                 efi_memory_desc_t *md;
65
66                 for (i = 0; i < memmap.nr_map; i++) {
67                         md = &memmap.map[i];
68                         if (!is_available_memory(md))
69                                 continue;
70                         addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
71                         end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
72
73                         if ((pagenr >= addr) && (pagenr < end))
74                                 return 1;
75                 }
76                 return 0;
77         }
78
79         for (i = 0; i < e820.nr_map; i++) {
80
81                 if (e820.map[i].type != E820_RAM)       /* not usable memory */
82                         continue;
83                 /*
84                  *      !!!FIXME!!! Some BIOSen report areas as RAM that
85                  *      are not. Notably the 640->1Mb area. We need a sanity
86                  *      check here.
87                  */
88                 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
89                 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
90                 if  ((pagenr >= addr) && (pagenr < end))
91                         return 1;
92         }
93         return 0;
94 }
95
96 /* To enable modules to check if a page is in RAM */
97 int pfn_is_ram(unsigned long pfn)
98 {
99         return (page_is_ram(pfn));
100 }
101
102
103 /*
104  * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
105  * valid. The argument is a physical page number.
106  *
107  *
108  * On x86, access has to be given to the first megabyte of ram because that area
109  * contains bios code and data regions used by X and dosemu and similar apps.
110  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
111  * mmio resources as well as potential bios/acpi data regions.
112  */
113 int devmem_is_allowed(unsigned long pagenr)
114 {
115         if (pagenr <= 256)
116                 return 1;
117         if (!page_is_ram(pagenr))
118                 return 1;
119         return 0;
120 }
121
122
123 pte_t *kmap_pte;
124
125 EXPORT_SYMBOL(kmap_pte);
126
127 #define kmap_get_fixmap_pte(vaddr)                                      \
128         pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
129
130 void __init kmap_init(void)
131 {
132         kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
133 }
134
135 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
136 {
137         if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
138                 ClearPageReserved(page);
139                 set_bit(PG_highmem, &page->flags);
140                 set_page_count(page, 1);
141                 __free_page(page);
142                 totalhigh_pages++;
143         } else
144                 SetPageReserved(page);
145 }
146
147 EXPORT_SYMBOL_GPL(page_is_ram);
148
149 #ifdef CONFIG_HIGHMEM
150
151 #ifndef CONFIG_DISCONTIGMEM
152 void __init set_highmem_pages_init(int bad_ppro) 
153 {
154         int pfn;
155         for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
156                 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
157         totalram_pages += totalhigh_pages;
158 }
159 #else
160 extern void set_highmem_pages_init(int);
161 #endif /* !CONFIG_DISCONTIGMEM */
162 #else
163 # define set_highmem_pages_init(bad_ppro) do { } while (0)
164 #endif
165
166 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
167 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
168
169 #ifndef CONFIG_DISCONTIGMEM
170 #define remap_numa_kva() do {} while (0)
171 #else
172 extern void __init remap_numa_kva(void);
173 #endif
174
175 static __init void prepare_pagetables(pgd_t *pgd_base, unsigned long address)
176 {
177         pgd_t *pgd;
178         pmd_t *pmd;
179         pte_t *pte;
180
181         pgd = pgd_base + pgd_index(address);
182         pmd = pmd_offset(pgd, address);
183         if (!pmd_present(*pmd)) {
184                 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
185                 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));
186         }
187 }
188
189 static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
190 {
191         unsigned long vaddr;
192
193         for (vaddr = start; vaddr != end; vaddr += PAGE_SIZE)
194                 prepare_pagetables(pgd_base, vaddr);
195 }
196
197 void setup_identity_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end)
198 {
199         unsigned long vaddr;
200         pgd_t *pgd;
201         int i, j, k;
202         pmd_t *pmd;
203         pte_t *pte, *pte_base;
204
205         pgd = pgd_base;
206
207         for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
208                 vaddr = i*PGDIR_SIZE;
209                 if (end && (vaddr >= end))
210                         break;
211                 pmd = pmd_offset(pgd, 0);
212                 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
213                         vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
214                         if (end && (vaddr >= end))
215                                 break;
216                         if (vaddr < start)
217                                 continue;
218                         if (cpu_has_pse) {
219                                 unsigned long __pe;
220
221                                 set_in_cr4(X86_CR4_PSE);
222                                 boot_cpu_data.wp_works_ok = 1;
223                                 __pe = _KERNPG_TABLE + _PAGE_PSE + vaddr - start;
224                                 /* Make it "global" too if supported */
225                                 if (cpu_has_pge) {
226                                         set_in_cr4(X86_CR4_PGE);
227 #if !defined(CONFIG_X86_SWITCH_PAGETABLES)
228                                         __pe += _PAGE_GLOBAL;
229                                         __PAGE_KERNEL |= _PAGE_GLOBAL;
230 #endif
231                                 }
232                                 set_pmd(pmd, __pmd(__pe));
233                                 continue;
234                         }
235                         if (!pmd_present(*pmd))
236                                 pte_base = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
237                         else
238                                 pte_base = pte_offset_kernel(pmd, 0);
239                         pte = pte_base;
240                         for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
241                                 vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
242                                 if (end && (vaddr >= end))
243                                         break;
244                                 if (vaddr < start)
245                                         continue;
246                                 *pte = mk_pte_phys(vaddr-start, PAGE_KERNEL);
247                         }
248                         set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
249                 }
250         }
251 }
252
253 static void __init pagetable_init (void)
254 {
255         unsigned long vaddr, end;
256         pgd_t *pgd_base;
257 #ifdef CONFIG_X86_PAE
258         int i;
259 #endif
260
261         /*
262          * This can be zero as well - no problem, in that case we exit
263          * the loops anyway due to the PTRS_PER_* conditions.
264          */
265         end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
266
267         pgd_base = swapper_pg_dir;
268 #ifdef CONFIG_X86_PAE
269         /*
270          * It causes too many problems if there's no proper pmd set up
271          * for all 4 entries of the PGD - so we allocate all of them.
272          * PAE systems will not miss this extra 4-8K anyway ...
273          */
274         for (i = 0; i < PTRS_PER_PGD; i++) {
275                 pmd_t *pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
276                 set_pgd(pgd_base + i, __pgd(__pa(pmd) + 0x1));
277         }
278 #endif
279         /*
280          * Set up lowmem-sized identity mappings at PAGE_OFFSET:
281          */
282         setup_identity_mappings(pgd_base, PAGE_OFFSET, end);
283
284         /*
285          * Add flat-mode identity-mappings - SMP needs it when
286          * starting up on an AP from real-mode. (In the non-PAE
287          * case we already have these mappings through head.S.)
288          * All user-space mappings are explicitly cleared after
289          * SMP startup.
290          */
291 #if defined(CONFIG_SMP) && defined(CONFIG_X86_PAE)
292         setup_identity_mappings(pgd_base, 0, 16*1024*1024);
293 #endif
294         remap_numa_kva();
295
296         /*
297          * Fixed mappings, only the page table structure has to be
298          * created - mappings will be set by set_fixmap():
299          */
300         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
301         fixrange_init(vaddr, 0, pgd_base);
302
303 #ifdef CONFIG_HIGHMEM
304         {
305                 pgd_t *pgd;
306                 pmd_t *pmd;
307                 pte_t *pte;
308
309                 /*
310                  * Permanent kmaps:
311                  */
312                 vaddr = PKMAP_BASE;
313                 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
314
315                 pgd = swapper_pg_dir + pgd_index(vaddr);
316                 pmd = pmd_offset(pgd, vaddr);
317                 pte = pte_offset_kernel(pmd, vaddr);
318                 pkmap_page_table = pte;
319         }
320 #endif
321 }
322
323 /*
324  * Clear kernel pagetables in a PMD_SIZE-aligned range.
325  */
326 static void clear_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end)
327 {
328         unsigned long vaddr;
329         pgd_t *pgd;
330         pmd_t *pmd;
331         int i, j;
332
333         pgd = pgd_base;
334
335         for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
336                 vaddr = i*PGDIR_SIZE;
337                 if (end && (vaddr >= end))
338                         break;
339                 pmd = pmd_offset(pgd, 0);
340                 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
341                         vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
342                         if (end && (vaddr >= end))
343                                 break;
344                         if (vaddr < start)
345                                 continue;
346                         pmd_clear(pmd);
347                 }
348         }
349         flush_tlb_all();
350 }
351
352 void zap_low_mappings(void)
353 {
354         printk("zapping low mappings.\n");
355         /*
356          * Zap initial low-memory mappings.
357          */
358         clear_mappings(swapper_pg_dir, 0, 16*1024*1024);
359 }
360
361 #ifndef CONFIG_DISCONTIGMEM
362 void __init zone_sizes_init(void)
363 {
364         unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
365         unsigned int max_dma, high, low;
366         
367         max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
368         low = max_low_pfn;
369         high = highend_pfn;
370         
371         if (low < max_dma)
372                 zones_size[ZONE_DMA] = low;
373         else {
374                 zones_size[ZONE_DMA] = max_dma;
375                 zones_size[ZONE_NORMAL] = low - max_dma;
376 #ifdef CONFIG_HIGHMEM
377                 zones_size[ZONE_HIGHMEM] = high - low;
378 #endif
379         }
380         free_area_init(zones_size);     
381 }
382 #else
383 extern void zone_sizes_init(void);
384 #endif /* !CONFIG_DISCONTIGMEM */
385
386 static int disable_nx __initdata = 0;
387 u64 __supported_pte_mask = ~_PAGE_NX;
388
389 /*
390  * noexec = on|off
391  *
392  * Control non executable mappings.
393  *
394  * on      Enable
395  * off     Disable (disables exec-shield too)
396  */
397 static int __init noexec_setup(char *str)
398 {
399         if (!strncmp(str, "on",2) && cpu_has_nx) {
400                 __supported_pte_mask |= _PAGE_NX;
401                 disable_nx = 0;
402         } else if (!strncmp(str,"off",3)) {
403                 disable_nx = 1;
404                 __supported_pte_mask &= ~_PAGE_NX;
405                 exec_shield = 0;
406         }
407         return 1;
408 }
409
410 __setup("noexec=", noexec_setup);
411
412 #ifdef CONFIG_X86_PAE
413 int nx_enabled = 0;
414
415 static void __init set_nx(void)
416 {
417         unsigned int v[4], l, h;
418
419         if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
420                 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
421                 if ((v[3] & (1 << 20)) && !disable_nx) {
422                         rdmsr(MSR_EFER, l, h);
423                         l |= EFER_NX;
424                         wrmsr(MSR_EFER, l, h);
425                         nx_enabled = 1;
426                         __supported_pte_mask |= _PAGE_NX;
427                 }
428         }
429 }
430 /*
431  * Enables/disables executability of a given kernel page and
432  * returns the previous setting.
433  */
434 int __init set_kernel_exec(unsigned long vaddr, int enable)
435 {
436         pte_t *pte;
437         int ret = 1;
438
439         if (!nx_enabled)
440                 goto out;
441
442         pte = lookup_address(vaddr);
443         BUG_ON(!pte);
444
445         if (!pte_exec_kernel(*pte))
446                 ret = 0;
447
448         if (enable)
449                 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
450         else
451                 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
452         __flush_tlb_all();
453 out:
454         return ret;
455 }
456
457 #endif
458
459 /*
460  * paging_init() sets up the page tables - note that the first 8MB are
461  * already mapped by head.S.
462  *
463  * This routines also unmaps the page at virtual kernel address 0, so
464  * that we can trap those pesky NULL-reference errors in the kernel.
465  */
466 void __init paging_init(void)
467 {
468 #ifdef CONFIG_X86_PAE
469         set_nx();
470         if (nx_enabled)
471                 printk("NX (Execute Disable) protection: active\n");
472 #endif
473
474         pagetable_init();
475
476         load_cr3(swapper_pg_dir);
477
478 #ifdef CONFIG_X86_PAE
479         /*
480          * We will bail out later - printk doesn't work right now so
481          * the user would just see a hanging kernel.
482          */
483         if (cpu_has_pae)
484                 set_in_cr4(X86_CR4_PAE);
485 #endif
486         __flush_tlb_all();
487         /*
488          * Subtle. SMP is doing it's boot stuff late (because it has to
489          * fork idle threads) - but it also needs low mappings for the
490          * protected-mode entry to work. We zap these entries only after
491          * the WP-bit has been tested.
492          */
493 #ifndef CONFIG_SMP
494         zap_low_mappings();
495 #endif
496         kmap_init();
497         zone_sizes_init();
498 }
499
500 /*
501  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
502  * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
503  * used to involve black magic jumps to work around some nasty CPU bugs,
504  * but fortunately the switch to using exceptions got rid of all that.
505  */
506
507 void __init test_wp_bit(void)
508 {
509         printk("Checking if this processor honours the WP bit even in supervisor mode... ");
510
511         /* Any page-aligned address will do, the test is non-destructive */
512         __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
513         boot_cpu_data.wp_works_ok = do_test_wp_bit();
514         clear_fixmap(FIX_WP_TEST);
515
516         if (!boot_cpu_data.wp_works_ok) {
517                 printk("No.\n");
518 #ifdef CONFIG_X86_WP_WORKS_OK
519                 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
520 #endif
521         } else {
522                 printk("Ok.\n");
523         }
524 }
525
526 #ifndef CONFIG_DISCONTIGMEM
527 static void __init set_max_mapnr_init(void)
528 {
529 #ifdef CONFIG_HIGHMEM
530         highmem_start_page = pfn_to_page(highstart_pfn);
531         max_mapnr = num_physpages = highend_pfn;
532 #else
533         max_mapnr = num_physpages = max_low_pfn;
534 #endif
535 }
536 #define __free_all_bootmem() free_all_bootmem()
537 #else
538 #define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
539 extern void set_max_mapnr_init(void);
540 #endif /* !CONFIG_DISCONTIGMEM */
541
542 static struct kcore_list kcore_mem, kcore_vmalloc; 
543
544 void __init mem_init(void)
545 {
546         extern int ppro_with_ram_bug(void);
547         int codesize, reservedpages, datasize, initsize;
548         int tmp;
549         int bad_ppro;
550
551 #ifndef CONFIG_DISCONTIGMEM
552         if (!mem_map)
553                 BUG();
554 #endif
555         
556         bad_ppro = ppro_with_ram_bug();
557
558 #ifdef CONFIG_HIGHMEM
559         /* check that fixmap and pkmap do not overlap */
560         if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
561                 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
562                 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
563                                 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
564                 BUG();
565         }
566 #endif
567  
568         set_max_mapnr_init();
569
570 #ifdef CONFIG_HIGHMEM
571         high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
572 #else
573         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
574 #endif
575
576         /* this will put all low memory onto the freelists */
577         totalram_pages += __free_all_bootmem();
578
579         reservedpages = 0;
580         for (tmp = 0; tmp < max_low_pfn; tmp++)
581                 /*
582                  * Only count reserved RAM pages
583                  */
584                 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
585                         reservedpages++;
586
587         set_highmem_pages_init(bad_ppro);
588
589         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
590         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
591         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
592
593         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
594         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
595                    VMALLOC_END-VMALLOC_START);
596
597         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
598                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
599                 num_physpages << (PAGE_SHIFT-10),
600                 codesize >> 10,
601                 reservedpages << (PAGE_SHIFT-10),
602                 datasize >> 10,
603                 initsize >> 10,
604                 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
605                );
606
607 #ifdef CONFIG_X86_PAE
608         if (!cpu_has_pae)
609                 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
610 #endif
611         if (boot_cpu_data.wp_works_ok < 0)
612                 test_wp_bit();
613
614         entry_trampoline_setup();
615         default_ldt_page = virt_to_page(default_ldt);
616         load_LDT(&init_mm.context);
617 }
618
619 kmem_cache_t *pgd_cache, *pmd_cache, *kpmd_cache;
620
621 void __init pgtable_cache_init(void)
622 {
623         void (*ctor)(void *, kmem_cache_t *, unsigned long);
624         void (*dtor)(void *, kmem_cache_t *, unsigned long);
625
626         if (PTRS_PER_PMD > 1) {
627                 pmd_cache = kmem_cache_create("pmd",
628                                         PTRS_PER_PMD*sizeof(pmd_t),
629                                         PTRS_PER_PMD*sizeof(pmd_t),
630                                         0,
631                                         pmd_ctor,
632                                         NULL);
633                 if (!pmd_cache)
634                         panic("pgtable_cache_init(): cannot create pmd cache");
635
636                 if (TASK_SIZE > PAGE_OFFSET) {
637                         kpmd_cache = kmem_cache_create("kpmd",
638                                         PTRS_PER_PMD*sizeof(pmd_t),
639                                         PTRS_PER_PMD*sizeof(pmd_t),
640                                         0,
641                                         kpmd_ctor,
642                                         NULL);
643                         if (!kpmd_cache)
644                                 panic("pgtable_cache_init(): "
645                                                 "cannot create kpmd cache");
646                 }
647         }
648
649         if (PTRS_PER_PMD == 1 || TASK_SIZE <= PAGE_OFFSET)
650                 ctor = pgd_ctor;
651         else
652                 ctor = NULL;
653
654         if (PTRS_PER_PMD == 1 && TASK_SIZE <= PAGE_OFFSET)
655                 dtor = pgd_dtor;
656         else
657                 dtor = NULL;
658
659         pgd_cache = kmem_cache_create("pgd",
660                                 PTRS_PER_PGD*sizeof(pgd_t),
661                                 PTRS_PER_PGD*sizeof(pgd_t),
662                                 0,
663                                 ctor,
664                                 dtor);
665         if (!pgd_cache)
666                 panic("pgtable_cache_init(): Cannot create pgd cache");
667 }
668
669 /*
670  * This function cannot be __init, since exceptions don't work in that
671  * section.  Put this after the callers, so that it cannot be inlined.
672  */
673 static int noinline do_test_wp_bit(void)
674 {
675         char tmp_reg;
676         int flag;
677
678         __asm__ __volatile__(
679                 "       movb %0,%1      \n"
680                 "1:     movb %1,%0      \n"
681                 "       xorl %2,%2      \n"
682                 "2:                     \n"
683                 ".section __ex_table,\"a\"\n"
684                 "       .align 4        \n"
685                 "       .long 1b,2b     \n"
686                 ".previous              \n"
687                 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
688                  "=q" (tmp_reg),
689                  "=r" (flag)
690                 :"2" (1)
691                 :"memory");
692         
693         return flag;
694 }
695
696 void free_initmem(void)
697 {
698         unsigned long addr;
699
700         addr = (unsigned long)(&__init_begin);
701         for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
702                 ClearPageReserved(virt_to_page(addr));
703                 set_page_count(virt_to_page(addr), 1);
704                 free_page(addr);
705                 totalram_pages++;
706         }
707         printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
708 }
709
710 #ifdef CONFIG_BLK_DEV_INITRD
711 void free_initrd_mem(unsigned long start, unsigned long end)
712 {
713         if (start < end)
714                 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
715         for (; start < end; start += PAGE_SIZE) {
716                 ClearPageReserved(virt_to_page(start));
717                 set_page_count(virt_to_page(start), 1);
718                 free_page(start);
719                 totalram_pages++;
720         }
721 }
722 #endif