2 * linux/arch/parisc/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
11 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/bootmem.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
19 #include <linux/initrd.h>
20 #include <linux/swap.h>
21 #include <linux/unistd.h>
23 #include <asm/pgalloc.h>
25 #include <asm/pdc_chassis.h>
27 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29 extern char _text; /* start of kernel code, defined by linker */
30 extern int data_start;
31 extern char _end; /* end of BSS, defined by linker */
32 extern char __init_begin, __init_end;
34 #ifdef CONFIG_DISCONTIGMEM
35 struct node_map_data node_data[MAX_PHYSMEM_RANGES];
36 bootmem_data_t bmem_data[MAX_PHYSMEM_RANGES];
37 unsigned char *chunkmap;
38 unsigned int maxchunkmap;
41 static struct resource data_resource = {
42 .name = "Kernel data",
43 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
46 static struct resource code_resource = {
47 .name = "Kernel code",
48 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
51 static struct resource pdcdata_resource = {
52 .name = "PDC data (Page Zero)",
55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
58 static struct resource sysram_resources[MAX_PHYSMEM_RANGES];
60 static unsigned long max_pfn;
62 /* The following array is initialized from the firmware specific
63 * information retrieved in kernel/inventory.c.
66 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES];
70 #define MAX_MEM (~0UL)
72 #define MAX_MEM (3584U*1024U*1024U)
73 #endif /* !__LP64__ */
75 static unsigned long mem_limit = MAX_MEM;
77 static void __init mem_limit_func(void)
81 extern char saved_command_line[];
83 /* We need this before __setup() functions are called */
86 for (cp = saved_command_line; *cp; ) {
87 if (memcmp(cp, "mem=", 4) == 0) {
89 limit = memparse(cp, &end);
94 while (*cp != ' ' && *cp)
101 if (limit < mem_limit)
105 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
107 static void __init setup_bootmem(void)
109 unsigned long bootmap_size;
110 unsigned long mem_max;
111 unsigned long bootmap_pages;
112 unsigned long bootmap_start_pfn;
113 unsigned long bootmap_pfn;
114 #ifndef CONFIG_DISCONTIGMEM
115 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
118 int i, sysram_resource_count;
120 disable_sr_hashing(); /* Turn off space register hashing */
122 #ifdef CONFIG_DISCONTIGMEM
124 * The below is still true as of 2.4.2. If this is ever fixed,
125 * we can remove this warning!
128 printk(KERN_WARNING "\n\n");
129 printk(KERN_WARNING "CONFIG_DISCONTIGMEM is enabled, which is probably a mistake. This\n");
130 printk(KERN_WARNING "option can lead to heavy swapping, even when there are gigabytes\n");
131 printk(KERN_WARNING "of free memory.\n\n");
136 #ifndef CONFIG_DISCONTIGMEM
138 * Sort the ranges. Since the number of ranges is typically
139 * small, and performance is not an issue here, just do
140 * a simple insertion sort.
143 for (i = 1; i < npmem_ranges; i++) {
146 for (j = i; j > 0; j--) {
149 if (pmem_ranges[j-1].start_pfn <
150 pmem_ranges[j].start_pfn) {
154 tmp = pmem_ranges[j-1].start_pfn;
155 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
156 pmem_ranges[j].start_pfn = tmp;
157 tmp = pmem_ranges[j-1].pages;
158 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
159 pmem_ranges[j].pages = tmp;
164 * Throw out ranges that are too far apart (controlled by
165 * MAX_GAP). If CONFIG_DISCONTIGMEM wasn't implemented so
166 * poorly, we would recommend enabling that option, but,
167 * until it is fixed, this is the best way to go.
170 for (i = 1; i < npmem_ranges; i++) {
171 if (pmem_ranges[i].start_pfn -
172 (pmem_ranges[i-1].start_pfn +
173 pmem_ranges[i-1].pages) > MAX_GAP) {
180 if (npmem_ranges > 1) {
182 /* Print the memory ranges */
184 printk(KERN_INFO "Memory Ranges:\n");
186 for (i = 0; i < npmem_ranges; i++) {
190 size = (pmem_ranges[i].pages << PAGE_SHIFT);
191 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
192 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld Mb\n",
193 i,start, start + (size - 1), size >> 20);
197 #endif /* __LP64__ */
199 sysram_resource_count = npmem_ranges;
200 for (i = 0; i < sysram_resource_count; i++) {
201 struct resource *res = &sysram_resources[i];
202 res->name = "System RAM";
203 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
204 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
205 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
206 request_resource(&iomem_resource, res);
210 * For 32 bit kernels we limit the amount of memory we can
211 * support, in order to preserve enough kernel address space
212 * for other purposes. For 64 bit kernels we don't normally
213 * limit the memory, but this mechanism can be used to
214 * artificially limit the amount of memory (and it is written
215 * to work with multiple memory ranges).
218 mem_limit_func(); /* check for "mem=" argument */
221 for (i = 0; i < npmem_ranges; i++) {
224 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
225 if ((mem_max + rsize) > mem_limit) {
226 printk(KERN_WARNING "Memory truncated to %ld Mb\n", mem_limit >> 20);
227 if (mem_max == mem_limit)
230 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
231 - (mem_max >> PAGE_SHIFT);
232 npmem_ranges = i + 1;
240 printk(KERN_INFO "Total Memory: %ld Mb\n",mem_max >> 20);
242 #ifndef CONFIG_DISCONTIGMEM
244 /* Merge the ranges, keeping track of the holes */
247 unsigned long end_pfn;
248 unsigned long hole_pages;
251 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
252 for (i = 1; i < npmem_ranges; i++) {
254 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
256 pmem_holes[npmem_holes].start_pfn = end_pfn;
257 pmem_holes[npmem_holes++].pages = hole_pages;
258 end_pfn += hole_pages;
260 end_pfn += pmem_ranges[i].pages;
263 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
269 for (i = 0; i < npmem_ranges; i++)
270 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
272 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
274 #ifdef CONFIG_DISCONTIGMEM
275 for (i = 0; i < npmem_ranges; i++)
276 node_data[i].pg_data.bdata = &bmem_data[i];
279 * Initialize and free the full range of memory in each range.
280 * Note that the only writing these routines do are to the bootmap,
281 * and we've made sure to locate the bootmap properly so that they
282 * won't be writing over anything important.
285 bootmap_pfn = bootmap_start_pfn;
287 for (i = 0; i < npmem_ranges; i++) {
288 unsigned long start_pfn;
289 unsigned long npages;
291 start_pfn = pmem_ranges[i].start_pfn;
292 npages = pmem_ranges[i].pages;
294 bootmap_size = init_bootmem_node(NODE_DATA(i),
297 (start_pfn + npages) );
298 free_bootmem_node(NODE_DATA(i),
299 (start_pfn << PAGE_SHIFT),
300 (npages << PAGE_SHIFT) );
301 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
302 if ((start_pfn + npages) > max_pfn)
303 max_pfn = start_pfn + npages;
306 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
307 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
311 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
313 #define PDC_CONSOLE_IO_IODC_SIZE 32768
315 reserve_bootmem_node(NODE_DATA(0), 0UL,
316 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
317 reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
318 (unsigned long)(&_end - &_text));
319 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
320 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
322 #ifndef CONFIG_DISCONTIGMEM
324 /* reserve the holes */
326 for (i = 0; i < npmem_holes; i++) {
327 reserve_bootmem_node(NODE_DATA(0),
328 (pmem_holes[i].start_pfn << PAGE_SHIFT),
329 (pmem_holes[i].pages << PAGE_SHIFT));
333 #ifdef CONFIG_BLK_DEV_INITRD
335 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
336 if (__pa(initrd_start) < mem_max) {
337 unsigned long initrd_reserve;
339 if (__pa(initrd_end) > mem_max) {
340 initrd_reserve = mem_max - __pa(initrd_start);
342 initrd_reserve = initrd_end - initrd_start;
344 initrd_below_start_ok = 1;
345 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
347 reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
352 data_resource.start = virt_to_phys(&data_start);
353 data_resource.end = virt_to_phys(&_end)-1;
354 code_resource.start = virt_to_phys(&_text);
355 code_resource.end = virt_to_phys(&data_start)-1;
357 /* We don't know which region the kernel will be in, so try
360 for (i = 0; i < sysram_resource_count; i++) {
361 struct resource *res = &sysram_resources[i];
362 request_resource(res, &code_resource);
363 request_resource(res, &data_resource);
365 request_resource(&sysram_resources[0], &pdcdata_resource);
368 void free_initmem(void)
372 printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
373 (&__init_end - &__init_begin) >> 10);
378 printk(KERN_INFO "Freeing unused kernel memory: ");
381 /* Attempt to catch anyone trying to execute code here
382 * by filling the page with BRK insns.
384 * If we disable interrupts for all CPUs, then IPI stops working.
385 * Kinda breaks the global cache flushing.
389 memset(&__init_begin, 0x00,
390 (unsigned long)&__init_end - (unsigned long)&__init_begin);
393 asm volatile("sync" : : );
394 flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end);
395 asm volatile("sync" : : );
400 addr = (unsigned long)(&__init_begin);
401 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
402 ClearPageReserved(virt_to_page(addr));
403 set_page_count(virt_to_page(addr), 1);
409 /* set up a new led state on systems shipped LED State panel */
410 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
412 printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
417 * Just an arbitrary offset to serve as a "hole" between mapping areas
418 * (between top of physical memory and a potential pcxl dma mapping
419 * area, and below the vmalloc mapping area).
421 * The current 32K value just means that there will be a 32K "hole"
422 * between mapping areas. That means that any out-of-bounds memory
423 * accesses will hopefully be caught. The vmalloc() routines leaves
424 * a hole of 4kB between each vmalloced area for the same reason.
427 /* Leave room for gateway page expansion */
428 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
429 #error KERNEL_MAP_START is in gateway reserved region
431 #define MAP_START (KERNEL_MAP_START)
433 #define VM_MAP_OFFSET (32*1024)
434 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
435 & ~(VM_MAP_OFFSET-1)))
438 EXPORT_SYMBOL(vmalloc_start);
441 unsigned long pcxl_dma_start;
444 void __init mem_init(void)
448 high_memory = __va((max_pfn << PAGE_SHIFT));
449 max_mapnr = (virt_to_page(high_memory - 1) - mem_map) + 1;
452 mem_map = zone_table[0]->zone_mem_map;
453 for (i = 0; i < npmem_ranges; i++)
454 num_physpages += free_all_bootmem_node(NODE_DATA(i));
455 totalram_pages = num_physpages;
457 printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
460 if (hppa_dma_ops == &pcxl_dma_ops) {
461 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
462 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
465 vmalloc_start = SET_MAP_OFFSET(MAP_START);
468 vmalloc_start = SET_MAP_OFFSET(MAP_START);
473 int do_check_pgt_cache(int low, int high)
478 unsigned long *empty_zero_page;
482 int i,free = 0,total = 0,reserved = 0;
483 int shared = 0, cached = 0;
485 printk(KERN_INFO "Mem-info:\n");
487 printk(KERN_INFO "Free swap: %6ldkB\n",
488 nr_swap_pages<<(PAGE_SHIFT-10));
492 if (PageReserved(mem_map+i))
494 else if (PageSwapCache(mem_map+i))
496 else if (!atomic_read(&mem_map[i].count))
499 shared += atomic_read(&mem_map[i].count) - 1;
501 printk(KERN_INFO "%d pages of RAM\n", total);
502 printk(KERN_INFO "%d reserved pages\n", reserved);
503 printk(KERN_INFO "%d pages shared\n", shared);
504 printk(KERN_INFO "%d pages swap cached\n", cached);
508 static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
513 unsigned long end_paddr;
514 unsigned long start_pmd;
515 unsigned long start_pte;
518 unsigned long address;
519 unsigned long ro_start;
520 unsigned long ro_end;
521 unsigned long fv_addr;
522 unsigned long gw_addr;
523 extern const unsigned long fault_vector_20;
524 extern void * const linux_gateway_page;
526 ro_start = __pa((unsigned long)&_text);
527 ro_end = __pa((unsigned long)&data_start);
528 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
529 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
531 end_paddr = start_paddr + size;
533 pg_dir = pgd_offset_k(start_vaddr);
535 #if PTRS_PER_PMD == 1
538 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
540 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
542 address = start_paddr;
543 while (address < end_paddr) {
544 #if PTRS_PER_PMD == 1
545 pmd = (pmd_t *)__pa(pg_dir);
547 pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));
550 * pmd is physical at this point
554 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
555 pmd = (pmd_t *) __pa(pmd);
558 pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;
562 /* now change pmd to kernel virtual addresses */
564 pmd = (pmd_t *)__va(pmd) + start_pmd;
565 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
568 * pg_table is physical at this point
571 pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));
574 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
575 pg_table = (pte_t *) __pa(pg_table);
578 pmd_val(*pmd) = _PAGE_TABLE |
579 (unsigned long) pg_table;
581 /* now change pg_table to kernel virtual addresses */
583 pg_table = (pte_t *) __va(pg_table) + start_pte;
584 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
587 #if !defined(CONFIG_STI_CONSOLE)
588 #warning STI console should explicitly allocate executable pages but does not
590 * Map the fault vector writable so we can
591 * write the HPMC checksum.
593 if (address >= ro_start && address < ro_end
594 && address != fv_addr
595 && address != gw_addr)
596 pte = __mk_pte(address, PAGE_KERNEL_RO);
599 pte = __mk_pte(address, pgprot);
601 if (address >= end_paddr)
604 set_pte(pg_table, pte);
606 address += PAGE_SIZE;
610 if (address >= end_paddr)
618 * pagetable_init() sets up the page tables
620 * Note that gateway_init() places the Linux gateway page at page 0.
621 * Since gateway pages cannot be dereferenced this has the desirable
622 * side effect of trapping those pesky NULL-reference errors in the
625 static void __init pagetable_init(void)
629 /* Map each physical memory range to its kernel vaddr */
631 for (range = 0; range < npmem_ranges; range++) {
632 unsigned long start_paddr;
633 unsigned long end_paddr;
636 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
637 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
638 size = pmem_ranges[range].pages << PAGE_SHIFT;
640 map_pages((unsigned long)__va(start_paddr), start_paddr,
644 #ifdef CONFIG_BLK_DEV_INITRD
645 if (initrd_end && initrd_end > mem_limit) {
646 printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
647 map_pages(initrd_start, __pa(initrd_start),
648 initrd_end - initrd_start, PAGE_KERNEL);
652 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
653 memset(empty_zero_page, 0, PAGE_SIZE);
656 static void __init gateway_init(void)
658 unsigned long linux_gateway_page_addr;
659 /* FIXME: This is 'const' in order to trick the compiler
660 into not treating it as DP-relative data. */
661 extern void * const linux_gateway_page;
663 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
666 * Setup Linux Gateway page.
668 * The Linux gateway page will reside in kernel space (on virtual
669 * page 0), so it doesn't need to be aliased into user space.
672 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
673 PAGE_SIZE, PAGE_GATEWAY);
678 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
683 unsigned long start_pmd;
684 unsigned long start_pte;
685 unsigned long address;
686 unsigned long hpux_gw_page_addr;
687 /* FIXME: This is 'const' in order to trick the compiler
688 into not treating it as DP-relative data. */
689 extern void * const hpux_gateway_page;
691 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
694 * Setup HP-UX Gateway page.
696 * The HP-UX gateway page resides in the user address space,
697 * so it needs to be aliased into each process.
700 pg_dir = pgd_offset(mm,hpux_gw_page_addr);
702 #if PTRS_PER_PMD == 1
705 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
707 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
709 address = __pa(&hpux_gateway_page);
710 #if PTRS_PER_PMD == 1
711 pmd = (pmd_t *)__pa(pg_dir);
713 pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));
716 * pmd is physical at this point
720 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
721 pmd = (pmd_t *) __pa(pmd);
724 pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;
726 /* now change pmd to kernel virtual addresses */
728 pmd = (pmd_t *)__va(pmd) + start_pmd;
731 * pg_table is physical at this point
734 pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));
736 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
738 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) pg_table;
740 /* now change pg_table to kernel virtual addresses */
742 pg_table = (pte_t *) __va(pg_table) + start_pte;
743 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
745 EXPORT_SYMBOL(map_hpux_gateway_page);
748 extern void flush_tlb_all_local(void);
750 void __init paging_init(void)
757 flush_cache_all_local(); /* start with known state */
758 flush_tlb_all_local();
760 for (i = 0; i < npmem_ranges; i++) {
761 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0, };
763 zones_size[ZONE_DMA] = pmem_ranges[i].pages;
764 free_area_init_node(i,NODE_DATA(i),NULL,zones_size,
765 (pmem_ranges[i].start_pfn << PAGE_SHIFT),0);
768 #ifdef CONFIG_DISCONTIGMEM
770 * Initialize support for virt_to_page() macro.
772 * Note that MAX_ADDRESS is the largest virtual address that
773 * we can map. However, since we map all physical memory into
774 * the kernel address space, it also has an effect on the maximum
775 * physical address we can map (MAX_ADDRESS - PAGE_OFFSET).
778 maxchunkmap = MAX_ADDRESS >> CHUNKSHIFT;
779 chunkmap = (unsigned char *)alloc_bootmem(maxchunkmap);
781 for (i = 0; i < maxchunkmap; i++)
782 chunkmap[i] = BADCHUNK;
784 for (i = 0; i < npmem_ranges; i++) {
786 ADJ_NODE_MEM_MAP(i) = NODE_MEM_MAP(i) - pmem_ranges[i].start_pfn;
788 unsigned long chunk_paddr;
789 unsigned long end_paddr;
792 chunk_paddr = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
793 end_paddr = chunk_paddr + (pmem_ranges[i].pages << PAGE_SHIFT);
794 chunk_paddr &= CHUNKMASK;
796 chunknum = (int)CHUNKNUM(chunk_paddr);
797 while (chunk_paddr < end_paddr) {
798 if (chunknum >= maxchunkmap)
800 if (chunkmap[chunknum] != BADCHUNK)
802 chunkmap[chunknum] = (unsigned char)i;
803 chunk_paddr += CHUNKSZ;
812 panic("paging_init: Physical address exceeds maximum address space!\n");
814 panic("paging_init: Collision in chunk map array. CHUNKSZ needs to be smaller\n");
821 * Currently, all PA20 chips have 18 bit protection id's, which is the
822 * limiting factor (space ids are 32 bits).
825 #define NR_SPACE_IDS 262144
830 * Currently we have a one-to-one relationship between space id's and
831 * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
832 * support 15 bit protection id's, so that is the limiting factor.
833 * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
834 * probably not worth the effort for a special case here.
837 #define NR_SPACE_IDS 32768
839 #endif /* !CONFIG_PA20 */
841 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
842 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
844 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
845 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
846 static unsigned long space_id_index;
847 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
848 static unsigned long dirty_space_ids = 0;
850 static spinlock_t sid_lock = SPIN_LOCK_UNLOCKED;
852 unsigned long alloc_sid(void)
856 spin_lock(&sid_lock);
858 if (free_space_ids == 0) {
859 if (dirty_space_ids != 0) {
860 spin_unlock(&sid_lock);
861 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
862 spin_lock(&sid_lock);
864 if (free_space_ids == 0)
870 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
871 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
872 space_id_index = index;
874 spin_unlock(&sid_lock);
876 return index << SPACEID_SHIFT;
879 void free_sid(unsigned long spaceid)
881 unsigned long index = spaceid >> SPACEID_SHIFT;
882 unsigned long *dirty_space_offset;
884 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
885 index &= (BITS_PER_LONG - 1);
887 spin_lock(&sid_lock);
889 if (*dirty_space_offset & (1L << index))
890 BUG(); /* attempt to free space id twice */
892 *dirty_space_offset |= (1L << index);
895 spin_unlock(&sid_lock);
900 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
904 /* NOTE: sid_lock must be held upon entry */
906 *ndirtyptr = dirty_space_ids;
907 if (dirty_space_ids != 0) {
908 for (i = 0; i < SID_ARRAY_SIZE; i++) {
909 dirty_array[i] = dirty_space_id[i];
910 dirty_space_id[i] = 0;
918 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
922 /* NOTE: sid_lock must be held upon entry */
925 for (i = 0; i < SID_ARRAY_SIZE; i++) {
926 space_id[i] ^= dirty_array[i];
929 free_space_ids += ndirty;
934 #else /* CONFIG_SMP */
936 static void recycle_sids(void)
940 /* NOTE: sid_lock must be held upon entry */
942 if (dirty_space_ids != 0) {
943 for (i = 0; i < SID_ARRAY_SIZE; i++) {
944 space_id[i] ^= dirty_space_id[i];
945 dirty_space_id[i] = 0;
948 free_space_ids += dirty_space_ids;
956 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
957 * purged, we can safely reuse the space ids that were released but
958 * not flushed from the tlb.
963 static unsigned long recycle_ndirty;
964 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
965 static unsigned int recycle_inuse = 0;
967 void flush_tlb_all(void)
972 spin_lock(&sid_lock);
973 if (dirty_space_ids > RECYCLE_THRESHOLD) {
975 BUG(); /* FIXME: Use a semaphore/wait queue here */
977 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
981 spin_unlock(&sid_lock);
982 on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
984 spin_lock(&sid_lock);
985 recycle_sids(recycle_ndirty,recycle_dirty_array);
987 spin_unlock(&sid_lock);
991 void flush_tlb_all(void)
993 spin_lock(&sid_lock);
994 flush_tlb_all_local();
996 spin_unlock(&sid_lock);
1000 #ifdef CONFIG_BLK_DEV_INITRD
1001 void free_initrd_mem(unsigned long start, unsigned long end)
1005 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1006 for (; start < end; start += PAGE_SIZE) {
1007 ClearPageReserved(virt_to_page(start));
1008 set_page_count(virt_to_page(start), 1);