2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10 * Copyright (C) 1999 VA Linux Systems
11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
17 #include <linux/config.h>
18 #include <linux/bootmem.h>
19 #include <linux/efi.h>
21 #include <linux/swap.h>
23 #include <asm/meminit.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/sections.h>
28 #ifdef CONFIG_VIRTUAL_MEM_MAP
29 static unsigned long num_dma_physpages;
33 * show_mem - display a memory statistics summary
35 * Just walks the pages in the system and describes where they're allocated.
40 int i, total = 0, reserved = 0;
41 int shared = 0, cached = 0;
43 printk("Mem-info:\n");
46 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
52 if (PageReserved(mem_map+i))
54 else if (PageSwapCache(mem_map+i))
56 else if (page_count(mem_map + i))
57 shared += page_count(mem_map + i) - 1;
59 printk("%d pages of RAM\n", total);
60 printk("%d reserved pages\n", reserved);
61 printk("%d pages shared\n", shared);
62 printk("%d pages swap cached\n", cached);
63 printk("%ld pages in page table cache\n", pgtable_cache_size);
66 /* physical address where the bootmem map is located */
67 unsigned long bootmap_start;
70 * find_max_pfn - adjust the maximum page number callback
71 * @start: start of range
73 * @arg: address of pointer to global max_pfn variable
75 * Passed as a callback function to efi_memmap_walk() to determine the highest
76 * available page frame number in the system.
79 find_max_pfn (unsigned long start, unsigned long end, void *arg)
81 unsigned long *max_pfnp = arg, pfn;
83 pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
90 * find_bootmap_location - callback to find a memory area for the bootmap
91 * @start: start of region
93 * @arg: unused callback data
95 * Find a place to put the bootmap and return its starting address in
96 * bootmap_start. This address must be page-aligned.
99 find_bootmap_location (unsigned long start, unsigned long end, void *arg)
101 unsigned long needed = *(unsigned long *)arg;
102 unsigned long range_start, range_end, free_start;
106 if (start == PAGE_OFFSET) {
113 free_start = PAGE_OFFSET;
115 for (i = 0; i < num_rsvd_regions; i++) {
116 range_start = max(start, free_start);
117 range_end = min(end, rsvd_region[i].start & PAGE_MASK);
119 if (range_end <= range_start)
120 continue; /* skip over empty range */
122 if (range_end - range_start >= needed) {
123 bootmap_start = __pa(range_start);
127 /* nothing more available in this segment */
128 if (range_end == end)
131 free_start = PAGE_ALIGN(rsvd_region[i].end);
137 * find_memory - setup memory map
139 * Walk the EFI memory map and find usable memory for the system, taking
140 * into account reserved areas.
145 unsigned long bootmap_size;
149 /* first find highest page frame number */
151 efi_memmap_walk(find_max_pfn, &max_pfn);
153 /* how many bytes to cover all the pages */
154 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
156 /* look for a location to hold the bootmap */
157 bootmap_start = ~0UL;
158 efi_memmap_walk(find_bootmap_location, &bootmap_size);
159 if (bootmap_start == ~0UL)
160 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
162 bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
164 /* Free all available memory, then mark bootmem-map as being in use. */
165 efi_memmap_walk(filter_rsvd_memory, free_bootmem);
166 reserve_bootmem(bootmap_start, bootmap_size);
173 * per_cpu_init - setup per-cpu variables
175 * Allocate and setup per-cpu data areas.
184 * get_free_pages() cannot be used before cpu_init() done. BSP
185 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
188 if (smp_processor_id() == 0) {
189 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
190 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
191 for (cpu = 0; cpu < NR_CPUS; cpu++) {
192 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
193 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
194 cpu_data += PERCPU_PAGE_SIZE;
195 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
198 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
200 #endif /* CONFIG_SMP */
203 count_pages (u64 start, u64 end, void *arg)
205 unsigned long *count = arg;
207 *count += (end - start) >> PAGE_SHIFT;
211 #ifdef CONFIG_VIRTUAL_MEM_MAP
213 count_dma_pages (u64 start, u64 end, void *arg)
215 unsigned long *count = arg;
217 if (end <= MAX_DMA_ADDRESS)
218 *count += (end - start) >> PAGE_SHIFT;
224 * Set up the page tables.
230 unsigned long max_dma;
231 unsigned long zones_size[MAX_NR_ZONES];
232 #ifdef CONFIG_VIRTUAL_MEM_MAP
233 unsigned long zholes_size[MAX_NR_ZONES];
234 unsigned long max_gap;
237 /* initialize mem_map[] */
239 memset(zones_size, 0, sizeof(zones_size));
242 efi_memmap_walk(count_pages, &num_physpages);
244 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
246 #ifdef CONFIG_VIRTUAL_MEM_MAP
247 memset(zholes_size, 0, sizeof(zholes_size));
249 num_dma_physpages = 0;
250 efi_memmap_walk(count_dma_pages, &num_dma_physpages);
252 if (max_low_pfn < max_dma) {
253 zones_size[ZONE_DMA] = max_low_pfn;
254 zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
256 zones_size[ZONE_DMA] = max_dma;
257 zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
258 if (num_physpages > num_dma_physpages) {
259 zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
260 zholes_size[ZONE_NORMAL] =
261 ((max_low_pfn - max_dma) -
262 (num_physpages - num_dma_physpages));
267 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
268 if (max_gap < LARGE_GAP) {
269 vmem_map = (struct page *) 0;
270 free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
272 mem_map = contig_page_data.node_mem_map;
274 unsigned long map_size;
276 /* allocate virtual_mem_map */
278 map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
279 vmalloc_end -= map_size;
280 vmem_map = (struct page *) vmalloc_end;
281 efi_memmap_walk(create_mem_map_page_table, 0);
283 free_area_init_node(0, &contig_page_data, vmem_map, zones_size,
286 mem_map = contig_page_data.node_mem_map;
287 printk("Virtual mem_map starts at 0x%p\n", mem_map);
289 #else /* !CONFIG_VIRTUAL_MEM_MAP */
290 if (max_low_pfn < max_dma)
291 zones_size[ZONE_DMA] = max_low_pfn;
293 zones_size[ZONE_DMA] = max_dma;
294 zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
296 free_area_init(zones_size);
297 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
298 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));