*/
struct early_node_data {
struct ia64_node_data *node_data;
+ pg_data_t *pgdat;
unsigned long pernode_addr;
unsigned long pernode_size;
struct bootmem_data bootmem_data;
static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
static nodemask_t memory_less_mask __initdata;
-static pg_data_t *pgdat_list[MAX_NUMNODES];
-
/*
* To prevent cache aliasing effects, align per-node structures so that they
* start at addresses that are strided by node number.
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus.
*/
-static int __meminit early_nr_cpus_node(int node)
+static int __init early_nr_cpus_node(int node)
{
int cpu, n = 0;
* compute_pernodesize - compute size of pernode data
* @node: the node id.
*/
-static unsigned long __meminit compute_pernodesize(int node)
+static unsigned long __init compute_pernodesize(int node)
{
unsigned long pernodesize = 0, cpus;
pernode += PERCPU_PAGE_SIZE * cpus;
pernode += node * L1_CACHE_BYTES;
- pgdat_list[node] = __va(pernode);
+ mem_data[node].pgdat = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mem_data[node].node_data = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
- pgdat_list[node]->bdata = bdp;
+ mem_data[node].pgdat->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
cpu_data = per_cpu_node_setup(cpu_data, node);
static int __init free_node_bootmem(unsigned long start, unsigned long len,
int node)
{
- free_bootmem_node(pgdat_list[node], start, len);
+ free_bootmem_node(mem_data[node].pgdat, start, len);
return 0;
}
int node;
for_each_online_node(node) {
- pg_data_t *pdp = pgdat_list[node];
+ pg_data_t *pdp = mem_data[node].pgdat;
if (node_isset(node, memory_less_mask))
continue;
}
}
-static void __meminit scatter_node_data(void)
-{
- pg_data_t **dst;
- int node;
-
- /*
- * for_each_online_node() can't be used at here.
- * node_online_map is not set for hot-added nodes at this time,
- * because we are halfway through initialization of the new node's
- * structures. If for_each_online_node() is used, a new node's
- * pg_data_ptrs will be not initialized. Insted of using it,
- * pgdat_list[] is checked.
- */
- for_each_node(node) {
- if (pgdat_list[node]) {
- dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
- memcpy(dst, pgdat_list, sizeof(pgdat_list));
- }
- }
-}
-
/**
* initialize_pernode_data - fixup per-cpu & per-node pointers
*
*/
static void __init initialize_pernode_data(void)
{
+ pg_data_t *pgdat_list[MAX_NUMNODES];
int cpu, node;
- scatter_node_data();
+ for_each_online_node(node)
+ pgdat_list[node] = mem_data[node].pgdat;
+ /* Copy the pg_data_t list to each node and init the node field */
+ for_each_online_node(node) {
+ memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
+ sizeof(pgdat_list));
+ }
#ifdef CONFIG_SMP
/* Set the node_data pointer for each per-cpu struct */
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (bestnode == -1)
bestnode = anynode;
- ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
+ ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
return ptr;
}
+/**
+ * pgdat_insert - insert the pgdat into global pgdat_list
+ * @pgdat: the pgdat for a node.
+ */
+static void __init pgdat_insert(pg_data_t *pgdat)
+{
+ pg_data_t *prev = NULL, *next;
+
+ for_each_pgdat(next)
+ if (pgdat->node_id < next->node_id)
+ break;
+ else
+ prev = next;
+
+ if (prev) {
+ prev->pgdat_next = pgdat;
+ pgdat->pgdat_next = next;
+ } else {
+ pgdat->pgdat_next = pgdat_list;
+ pgdat_list = pgdat;
+ }
+
+ return;
+}
+
/**
* memory_less_nodes - allocate and initialize CPU only nodes pernode
* information.
pernodesize = mem_data[node].pernode_size;
map = pernode + pernodesize;
- init_bootmem_node(pgdat_list[node],
+ init_bootmem_node(mem_data[node].pgdat,
map>>PAGE_SHIFT,
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
* find_pernode_space() does most of this already, we just need to set
* local_per_cpu_offset
*/
-void __cpuinit *per_cpu_init(void)
+void *per_cpu_init(void)
{
int cpu;
- static int first_time = 1;
-
if (smp_processor_id() != 0)
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
- if (first_time) {
- first_time = 0;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- }
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_online_pgdat(pgdat) {
+ for_each_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
int shared = 0, cached = 0, reserved = 0;
struct page *page;
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
- else {
- i = vmemmap_find_next_valid_pfn(pgdat->node_id,
- i) - 1;
+ else
continue;
- }
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
+ vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
pfn_offset, zholes_size);
}
- zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
-}
-
-pg_data_t *arch_alloc_nodedata(int nid)
-{
- unsigned long size = compute_pernodesize(nid);
-
- return kzalloc(size, GFP_KERNEL);
-}
-
-void arch_free_nodedata(pg_data_t *pgdat)
-{
- kfree(pgdat);
-}
+ /*
+ * Make memory less nodes become a member of the known nodes.
+ */
+ for_each_node_mask(node, memory_less_mask)
+ pgdat_insert(mem_data[node].pgdat);
-void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
-{
- pgdat_list[update_node] = update_pgdat;
- scatter_node_data();
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}