2 * linux/arch/parisc/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
12 #include <linux/config.h>
14 #include <linux/module.h>
16 #include <linux/bootmem.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
20 #include <linux/initrd.h>
21 #include <linux/swap.h>
22 #include <linux/unistd.h>
23 #include <linux/nodemask.h> /* for node_online_map */
25 #include <asm/pgalloc.h>
27 #include <asm/pdc_chassis.h>
28 #include <asm/mmzone.h>
30 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 extern char _text; /* start of kernel code, defined by linker */
33 extern int data_start;
34 extern char _end; /* end of BSS, defined by linker */
35 extern char __init_begin, __init_end;
37 #ifdef CONFIG_DISCONTIGMEM
38 struct node_map_data node_data[MAX_NUMNODES];
39 bootmem_data_t bmem_data[MAX_NUMNODES];
40 unsigned char pfnnid_map[PFNNID_MAP_MAX];
43 static struct resource data_resource = {
44 .name = "Kernel data",
45 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
48 static struct resource code_resource = {
49 .name = "Kernel code",
50 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
53 static struct resource pdcdata_resource = {
54 .name = "PDC data (Page Zero)",
57 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
60 static struct resource sysram_resources[MAX_PHYSMEM_RANGES];
62 static unsigned long max_pfn;
64 /* The following array is initialized from the firmware specific
65 * information retrieved in kernel/inventory.c.
68 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES];
72 #define MAX_MEM (~0UL)
74 #define MAX_MEM (3584U*1024U*1024U)
75 #endif /* !__LP64__ */
77 static unsigned long mem_limit = MAX_MEM;
79 static void __init mem_limit_func(void)
83 extern char saved_command_line[];
85 /* We need this before __setup() functions are called */
88 for (cp = saved_command_line; *cp; ) {
89 if (memcmp(cp, "mem=", 4) == 0) {
91 limit = memparse(cp, &end);
96 while (*cp != ' ' && *cp)
103 if (limit < mem_limit)
107 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
109 static void __init setup_bootmem(void)
111 unsigned long bootmap_size;
112 unsigned long mem_max;
113 unsigned long bootmap_pages;
114 unsigned long bootmap_start_pfn;
115 unsigned long bootmap_pfn;
116 #ifndef CONFIG_DISCONTIGMEM
117 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
120 int i, sysram_resource_count;
122 disable_sr_hashing(); /* Turn off space register hashing */
125 * Sort the ranges. Since the number of ranges is typically
126 * small, and performance is not an issue here, just do
127 * a simple insertion sort.
130 for (i = 1; i < npmem_ranges; i++) {
133 for (j = i; j > 0; j--) {
136 if (pmem_ranges[j-1].start_pfn <
137 pmem_ranges[j].start_pfn) {
141 tmp = pmem_ranges[j-1].start_pfn;
142 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
143 pmem_ranges[j].start_pfn = tmp;
144 tmp = pmem_ranges[j-1].pages;
145 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
146 pmem_ranges[j].pages = tmp;
150 #ifndef CONFIG_DISCONTIGMEM
152 * Throw out ranges that are too far apart (controlled by
156 for (i = 1; i < npmem_ranges; i++) {
157 if (pmem_ranges[i].start_pfn -
158 (pmem_ranges[i-1].start_pfn +
159 pmem_ranges[i-1].pages) > MAX_GAP) {
161 printk("Large gap in memory detected (%ld pages). "
162 "Consider turning on CONFIG_DISCONTIGMEM\n",
163 pmem_ranges[i].start_pfn -
164 (pmem_ranges[i-1].start_pfn +
165 pmem_ranges[i-1].pages));
171 if (npmem_ranges > 1) {
173 /* Print the memory ranges */
175 printk(KERN_INFO "Memory Ranges:\n");
177 for (i = 0; i < npmem_ranges; i++) {
181 size = (pmem_ranges[i].pages << PAGE_SHIFT);
182 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
183 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld Mb\n",
184 i,start, start + (size - 1), size >> 20);
188 sysram_resource_count = npmem_ranges;
189 for (i = 0; i < sysram_resource_count; i++) {
190 struct resource *res = &sysram_resources[i];
191 res->name = "System RAM";
192 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
193 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
194 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
195 request_resource(&iomem_resource, res);
199 * For 32 bit kernels we limit the amount of memory we can
200 * support, in order to preserve enough kernel address space
201 * for other purposes. For 64 bit kernels we don't normally
202 * limit the memory, but this mechanism can be used to
203 * artificially limit the amount of memory (and it is written
204 * to work with multiple memory ranges).
207 mem_limit_func(); /* check for "mem=" argument */
211 for (i = 0; i < npmem_ranges; i++) {
214 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
215 if ((mem_max + rsize) > mem_limit) {
216 printk(KERN_WARNING "Memory truncated to %ld Mb\n", mem_limit >> 20);
217 if (mem_max == mem_limit)
220 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
221 - (mem_max >> PAGE_SHIFT);
222 npmem_ranges = i + 1;
225 num_physpages += pmem_ranges[i].pages;
228 num_physpages += pmem_ranges[i].pages;
232 printk(KERN_INFO "Total Memory: %ld Mb\n",mem_max >> 20);
234 #ifndef CONFIG_DISCONTIGMEM
235 /* Merge the ranges, keeping track of the holes */
238 unsigned long end_pfn;
239 unsigned long hole_pages;
242 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
243 for (i = 1; i < npmem_ranges; i++) {
245 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
247 pmem_holes[npmem_holes].start_pfn = end_pfn;
248 pmem_holes[npmem_holes++].pages = hole_pages;
249 end_pfn += hole_pages;
251 end_pfn += pmem_ranges[i].pages;
254 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
260 for (i = 0; i < npmem_ranges; i++)
261 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
263 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
265 #ifdef CONFIG_DISCONTIGMEM
266 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
267 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
268 NODE_DATA(i)->bdata = &bmem_data[i];
270 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
272 for (i = 0; i < npmem_ranges; i++)
277 * Initialize and free the full range of memory in each range.
278 * Note that the only writing these routines do are to the bootmap,
279 * and we've made sure to locate the bootmap properly so that they
280 * won't be writing over anything important.
283 bootmap_pfn = bootmap_start_pfn;
285 for (i = 0; i < npmem_ranges; i++) {
286 unsigned long start_pfn;
287 unsigned long npages;
289 start_pfn = pmem_ranges[i].start_pfn;
290 npages = pmem_ranges[i].pages;
292 bootmap_size = init_bootmem_node(NODE_DATA(i),
295 (start_pfn + npages) );
296 free_bootmem_node(NODE_DATA(i),
297 (start_pfn << PAGE_SHIFT),
298 (npages << PAGE_SHIFT) );
299 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
300 if ((start_pfn + npages) > max_pfn)
301 max_pfn = start_pfn + npages;
304 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
305 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
309 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
311 #define PDC_CONSOLE_IO_IODC_SIZE 32768
313 reserve_bootmem_node(NODE_DATA(0), 0UL,
314 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
315 reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
316 (unsigned long)(&_end - &_text));
317 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
318 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
320 #ifndef CONFIG_DISCONTIGMEM
322 /* reserve the holes */
324 for (i = 0; i < npmem_holes; i++) {
325 reserve_bootmem_node(NODE_DATA(0),
326 (pmem_holes[i].start_pfn << PAGE_SHIFT),
327 (pmem_holes[i].pages << PAGE_SHIFT));
331 #ifdef CONFIG_BLK_DEV_INITRD
333 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
334 if (__pa(initrd_start) < mem_max) {
335 unsigned long initrd_reserve;
337 if (__pa(initrd_end) > mem_max) {
338 initrd_reserve = mem_max - __pa(initrd_start);
340 initrd_reserve = initrd_end - initrd_start;
342 initrd_below_start_ok = 1;
343 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
345 reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
350 data_resource.start = virt_to_phys(&data_start);
351 data_resource.end = virt_to_phys(&_end)-1;
352 code_resource.start = virt_to_phys(&_text);
353 code_resource.end = virt_to_phys(&data_start)-1;
355 /* We don't know which region the kernel will be in, so try
358 for (i = 0; i < sysram_resource_count; i++) {
359 struct resource *res = &sysram_resources[i];
360 request_resource(res, &code_resource);
361 request_resource(res, &data_resource);
363 request_resource(&sysram_resources[0], &pdcdata_resource);
366 void free_initmem(void)
370 printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
371 (&__init_end - &__init_begin) >> 10);
376 printk(KERN_INFO "Freeing unused kernel memory: ");
379 /* Attempt to catch anyone trying to execute code here
380 * by filling the page with BRK insns.
382 * If we disable interrupts for all CPUs, then IPI stops working.
383 * Kinda breaks the global cache flushing.
387 memset(&__init_begin, 0x00,
388 (unsigned long)&__init_end - (unsigned long)&__init_begin);
391 asm volatile("sync" : : );
392 flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end);
393 asm volatile("sync" : : );
398 addr = (unsigned long)(&__init_begin);
399 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
400 ClearPageReserved(virt_to_page(addr));
401 set_page_count(virt_to_page(addr), 1);
407 /* set up a new led state on systems shipped LED State panel */
408 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
410 printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
415 * Just an arbitrary offset to serve as a "hole" between mapping areas
416 * (between top of physical memory and a potential pcxl dma mapping
417 * area, and below the vmalloc mapping area).
419 * The current 32K value just means that there will be a 32K "hole"
420 * between mapping areas. That means that any out-of-bounds memory
421 * accesses will hopefully be caught. The vmalloc() routines leaves
422 * a hole of 4kB between each vmalloced area for the same reason.
425 /* Leave room for gateway page expansion */
426 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
427 #error KERNEL_MAP_START is in gateway reserved region
429 #define MAP_START (KERNEL_MAP_START)
431 #define VM_MAP_OFFSET (32*1024)
432 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
433 & ~(VM_MAP_OFFSET-1)))
436 EXPORT_SYMBOL(vmalloc_start);
439 unsigned long pcxl_dma_start;
442 void __init mem_init(void)
444 high_memory = __va((max_pfn << PAGE_SHIFT));
446 #ifndef CONFIG_DISCONTIGMEM
447 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
448 mem_map = zone_table[ZONE_DMA]->zone_mem_map;
449 totalram_pages += free_all_bootmem();
454 for (i = 0; i < npmem_ranges; i++)
455 totalram_pages += free_all_bootmem_node(NODE_DATA(i));
459 printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
462 if (hppa_dma_ops == &pcxl_dma_ops) {
463 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
464 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
467 vmalloc_start = SET_MAP_OFFSET(MAP_START);
470 vmalloc_start = SET_MAP_OFFSET(MAP_START);
475 int do_check_pgt_cache(int low, int high)
480 unsigned long *empty_zero_page;
484 int i,free = 0,total = 0,reserved = 0;
485 int shared = 0, cached = 0;
487 printk(KERN_INFO "Mem-info:\n");
489 printk(KERN_INFO "Free swap: %6ldkB\n",
490 nr_swap_pages<<(PAGE_SHIFT-10));
491 #ifndef CONFIG_DISCONTIGMEM
495 if (PageReserved(mem_map+i))
497 else if (PageSwapCache(mem_map+i))
499 else if (!page_count(&mem_map[i]))
502 shared += page_count(&mem_map[i]) - 1;
505 for (i = 0; i < npmem_ranges; i++) {
508 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
511 p = node_mem_map(i) + j - node_start_pfn(i);
516 else if (PageSwapCache(p))
518 else if (!page_count(p))
521 shared += page_count(p) - 1;
525 printk(KERN_INFO "%d pages of RAM\n", total);
526 printk(KERN_INFO "%d reserved pages\n", reserved);
527 printk(KERN_INFO "%d pages shared\n", shared);
528 printk(KERN_INFO "%d pages swap cached\n", cached);
531 #ifdef CONFIG_DISCONTIGMEM
536 for (i = 0; i < npmem_ranges; i++) {
537 for (j = 0; j < MAX_NR_ZONES; j++) {
538 zl = NODE_DATA(i)->node_zonelists + j;
540 printk("Zone list for zone %d on node %d: ", j, i);
541 for (k = 0; zl->zones[k] != NULL; k++)
542 printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
551 static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
556 unsigned long end_paddr;
557 unsigned long start_pmd;
558 unsigned long start_pte;
561 unsigned long address;
562 unsigned long ro_start;
563 unsigned long ro_end;
564 unsigned long fv_addr;
565 unsigned long gw_addr;
566 extern const unsigned long fault_vector_20;
567 extern void * const linux_gateway_page;
569 ro_start = __pa((unsigned long)&_text);
570 ro_end = __pa((unsigned long)&data_start);
571 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
572 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
574 end_paddr = start_paddr + size;
576 pg_dir = pgd_offset_k(start_vaddr);
578 #if PTRS_PER_PMD == 1
581 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
583 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
585 address = start_paddr;
586 while (address < end_paddr) {
587 #if PTRS_PER_PMD == 1
588 pmd = (pmd_t *)__pa(pg_dir);
590 pmd = (pmd_t *)pgd_address(*pg_dir);
593 * pmd is physical at this point
597 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
598 pmd = (pmd_t *) __pa(pmd);
601 pgd_populate(NULL, pg_dir, __va(pmd));
605 /* now change pmd to kernel virtual addresses */
607 pmd = (pmd_t *)__va(pmd) + start_pmd;
608 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
611 * pg_table is physical at this point
614 pg_table = (pte_t *)pmd_address(*pmd);
617 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
618 pg_table = (pte_t *) __pa(pg_table);
621 pmd_populate_kernel(NULL, pmd, __va(pg_table));
623 /* now change pg_table to kernel virtual addresses */
625 pg_table = (pte_t *) __va(pg_table) + start_pte;
626 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
630 * Map the fault vector writable so we can
631 * write the HPMC checksum.
633 if (address >= ro_start && address < ro_end
634 && address != fv_addr
635 && address != gw_addr)
636 pte = __mk_pte(address, PAGE_KERNEL_RO);
638 pte = __mk_pte(address, pgprot);
640 if (address >= end_paddr)
643 set_pte(pg_table, pte);
645 address += PAGE_SIZE;
649 if (address >= end_paddr)
657 * pagetable_init() sets up the page tables
659 * Note that gateway_init() places the Linux gateway page at page 0.
660 * Since gateway pages cannot be dereferenced this has the desirable
661 * side effect of trapping those pesky NULL-reference errors in the
664 static void __init pagetable_init(void)
668 /* Map each physical memory range to its kernel vaddr */
670 for (range = 0; range < npmem_ranges; range++) {
671 unsigned long start_paddr;
672 unsigned long end_paddr;
675 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
676 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
677 size = pmem_ranges[range].pages << PAGE_SHIFT;
679 map_pages((unsigned long)__va(start_paddr), start_paddr,
683 #ifdef CONFIG_BLK_DEV_INITRD
684 if (initrd_end && initrd_end > mem_limit) {
685 printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
686 map_pages(initrd_start, __pa(initrd_start),
687 initrd_end - initrd_start, PAGE_KERNEL);
691 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
692 memset(empty_zero_page, 0, PAGE_SIZE);
695 static void __init gateway_init(void)
697 unsigned long linux_gateway_page_addr;
698 /* FIXME: This is 'const' in order to trick the compiler
699 into not treating it as DP-relative data. */
700 extern void * const linux_gateway_page;
702 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
705 * Setup Linux Gateway page.
707 * The Linux gateway page will reside in kernel space (on virtual
708 * page 0), so it doesn't need to be aliased into user space.
711 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
712 PAGE_SIZE, PAGE_GATEWAY);
717 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
722 unsigned long start_pmd;
723 unsigned long start_pte;
724 unsigned long address;
725 unsigned long hpux_gw_page_addr;
726 /* FIXME: This is 'const' in order to trick the compiler
727 into not treating it as DP-relative data. */
728 extern void * const hpux_gateway_page;
730 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
733 * Setup HP-UX Gateway page.
735 * The HP-UX gateway page resides in the user address space,
736 * so it needs to be aliased into each process.
739 pg_dir = pgd_offset(mm,hpux_gw_page_addr);
741 #if PTRS_PER_PMD == 1
744 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
746 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
748 address = __pa(&hpux_gateway_page);
749 #if PTRS_PER_PMD == 1
750 pmd = (pmd_t *)__pa(pg_dir);
752 pmd = (pmd_t *) pgd_address(*pg_dir);
755 * pmd is physical at this point
759 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
760 pmd = (pmd_t *) __pa(pmd);
763 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
765 /* now change pmd to kernel virtual addresses */
767 pmd = (pmd_t *)__va(pmd) + start_pmd;
770 * pg_table is physical at this point
773 pg_table = (pte_t *) pmd_address(*pmd);
775 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
777 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
779 /* now change pg_table to kernel virtual addresses */
781 pg_table = (pte_t *) __va(pg_table) + start_pte;
782 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
784 EXPORT_SYMBOL(map_hpux_gateway_page);
787 extern void flush_tlb_all_local(void);
789 void __init paging_init(void)
796 flush_cache_all_local(); /* start with known state */
797 flush_tlb_all_local();
799 for (i = 0; i < npmem_ranges; i++) {
800 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
802 /* We have an IOMMU, so all memory can go into a single
804 zones_size[ZONE_DMA] = pmem_ranges[i].pages;
806 #ifdef CONFIG_DISCONTIGMEM
807 /* Need to initialize the pfnnid_map before we can initialize
811 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
812 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
819 free_area_init_node(i, NODE_DATA(i), zones_size,
820 pmem_ranges[i].start_pfn, NULL);
827 * Currently, all PA20 chips have 18 bit protection id's, which is the
828 * limiting factor (space ids are 32 bits).
831 #define NR_SPACE_IDS 262144
836 * Currently we have a one-to-one relationship between space id's and
837 * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
838 * support 15 bit protection id's, so that is the limiting factor.
839 * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
840 * probably not worth the effort for a special case here.
843 #define NR_SPACE_IDS 32768
845 #endif /* !CONFIG_PA20 */
847 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
848 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
850 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
851 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
852 static unsigned long space_id_index;
853 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
854 static unsigned long dirty_space_ids = 0;
856 static DEFINE_SPINLOCK(sid_lock);
858 unsigned long alloc_sid(void)
862 spin_lock(&sid_lock);
864 if (free_space_ids == 0) {
865 if (dirty_space_ids != 0) {
866 spin_unlock(&sid_lock);
867 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
868 spin_lock(&sid_lock);
870 if (free_space_ids == 0)
876 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
877 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
878 space_id_index = index;
880 spin_unlock(&sid_lock);
882 return index << SPACEID_SHIFT;
885 void free_sid(unsigned long spaceid)
887 unsigned long index = spaceid >> SPACEID_SHIFT;
888 unsigned long *dirty_space_offset;
890 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
891 index &= (BITS_PER_LONG - 1);
893 spin_lock(&sid_lock);
895 if (*dirty_space_offset & (1L << index))
896 BUG(); /* attempt to free space id twice */
898 *dirty_space_offset |= (1L << index);
901 spin_unlock(&sid_lock);
906 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
910 /* NOTE: sid_lock must be held upon entry */
912 *ndirtyptr = dirty_space_ids;
913 if (dirty_space_ids != 0) {
914 for (i = 0; i < SID_ARRAY_SIZE; i++) {
915 dirty_array[i] = dirty_space_id[i];
916 dirty_space_id[i] = 0;
924 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
928 /* NOTE: sid_lock must be held upon entry */
931 for (i = 0; i < SID_ARRAY_SIZE; i++) {
932 space_id[i] ^= dirty_array[i];
935 free_space_ids += ndirty;
940 #else /* CONFIG_SMP */
942 static void recycle_sids(void)
946 /* NOTE: sid_lock must be held upon entry */
948 if (dirty_space_ids != 0) {
949 for (i = 0; i < SID_ARRAY_SIZE; i++) {
950 space_id[i] ^= dirty_space_id[i];
951 dirty_space_id[i] = 0;
954 free_space_ids += dirty_space_ids;
962 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
963 * purged, we can safely reuse the space ids that were released but
964 * not flushed from the tlb.
969 static unsigned long recycle_ndirty;
970 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
971 static unsigned int recycle_inuse = 0;
973 void flush_tlb_all(void)
978 spin_lock(&sid_lock);
979 if (dirty_space_ids > RECYCLE_THRESHOLD) {
981 BUG(); /* FIXME: Use a semaphore/wait queue here */
983 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
987 spin_unlock(&sid_lock);
988 on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
990 spin_lock(&sid_lock);
991 recycle_sids(recycle_ndirty,recycle_dirty_array);
993 spin_unlock(&sid_lock);
997 void flush_tlb_all(void)
999 spin_lock(&sid_lock);
1000 flush_tlb_all_local();
1002 spin_unlock(&sid_lock);
1006 #ifdef CONFIG_BLK_DEV_INITRD
1007 void free_initrd_mem(unsigned long start, unsigned long end)
1011 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1012 for (; start < end; start += PAGE_SIZE) {
1013 ClearPageReserved(virt_to_page(start));
1014 set_page_count(virt_to_page(start), 1);