#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
+#include <asm/mca.h>
#ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages;
printk("%d reserved pages\n", reserved);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
- printk("%ld pages in page table cache\n", pgtable_cache_size);
+ printk("%ld pages in page table cache\n",
+ pgtable_quicklist_total_size());
}
/* physical address where the bootmem map is located */
* Find a place to put the bootmap and return its starting address in
* bootmap_start. This address must be page-aligned.
*/
-int
+static int __init
find_bootmap_location (unsigned long start, unsigned long end, void *arg)
{
unsigned long needed = *(unsigned long *)arg;
range_start = max(start, free_start);
range_end = min(end, rsvd_region[i].start & PAGE_MASK);
+ free_start = PAGE_ALIGN(rsvd_region[i].end);
+
if (range_end <= range_start)
continue; /* skip over empty range */
- if (range_end - range_start >= needed) {
+ if (range_end - range_start >= needed) {
bootmap_start = __pa(range_start);
- return 1; /* done */
+ return -1; /* done */
}
/* nothing more available in this segment */
if (range_end == end)
return 0;
-
- free_start = PAGE_ALIGN(rsvd_region[i].end);
}
return 0;
}
* Walk the EFI memory map and find usable memory for the system, taking
* into account reserved areas.
*/
-void
+void __init
find_memory (void)
{
unsigned long bootmap_size;
*
* Allocate and setup per-cpu data areas.
*/
-void *
+void * __cpuinit
per_cpu_init (void)
{
void *cpu_data;
int cpu;
+ static int first_time=1;
/*
* get_free_pages() cannot be used before cpu_init() done. BSP
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
* get_zeroed_page().
*/
- if (smp_processor_id() == 0) {
+ if (first_time) {
+ first_time=0;
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
for (cpu = 0; cpu < NR_CPUS; cpu++) {
{
unsigned long *count = arg;
- if (end <= MAX_DMA_ADDRESS)
- *count += (end - start) >> PAGE_SHIFT;
+ if (start < MAX_DMA_ADDRESS)
+ *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
return 0;
}
#endif
* Set up the page tables.
*/
-void
+void __init
paging_init (void)
{
unsigned long max_dma;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
- free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
+ free_area_init_node(0, NODE_DATA(0), zones_size, 0,
zholes_size);
- mem_map = contig_page_data.node_mem_map;
} else {
unsigned long map_size;
map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
- efi_memmap_walk(create_mem_map_page_table, 0);
+ efi_memmap_walk(create_mem_map_page_table, NULL);
- free_area_init_node(0, &contig_page_data, vmem_map, zones_size,
+ NODE_DATA(0)->node_mem_map = vmem_map;
+ free_area_init_node(0, NODE_DATA(0), zones_size,
0, zholes_size);
- mem_map = contig_page_data.node_mem_map;
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#else /* !CONFIG_VIRTUAL_MEM_MAP */