X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fmips%2Fsgi-ip27%2Fip27-memory.c;h=0a44a98d7adc78efd05e1d0e89765872164f32c0;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=e3cdb958265f0e08875567c62513ac4967710b8f;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index e3cdb9582..0a44a98d7 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -3,17 +3,20 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2000 by Ralf Baechle + * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2000 by Silicon Graphics, Inc. * Copyright (C) 2004 by Christoph Hellwig * * On SGI IP27 the ARC memory configuration data is completly bogus but * alternate easier to use mechanisms are available. */ +#include #include #include #include #include +#include +#include #include #include #include @@ -36,8 +39,226 @@ static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES]; static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS]; static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES]; -struct pglist_data *node_data[MAX_COMPACT_NODES]; -struct hub_data *hub_data[MAX_COMPACT_NODES]; +struct node_data *__node_data[MAX_COMPACT_NODES]; + +EXPORT_SYMBOL(__node_data); + +static int fine_mode; + +static int is_fine_dirmode(void) +{ + return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) + >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE); +} + +static hubreg_t get_region(cnodeid_t cnode) +{ + if (fine_mode) + return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT; + else + return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT; +} + +static hubreg_t region_mask; + +static void gen_region_mask(hubreg_t *region_mask) +{ + cnodeid_t cnode; + + (*region_mask) = 0; + for_each_online_node(cnode) { + (*region_mask) |= 1ULL << get_region(cnode); + } +} + +#define rou_rflag rou_flags + +static int router_distance; + +static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) +{ + klrou_t *router; + lboard_t *brd; + int port; + + if (router_a->rou_rflag == 1) + return; + + if (depth >= router_distance) + return; + + router_a->rou_rflag = 1; + + for (port = 1; port <= MAX_ROUTER_PORTS; port++) { + if (router_a->rou_port[port].port_nasid == INVALID_NASID) + continue; + + brd = (lboard_t *)NODE_OFFSET_TO_K0( + router_a->rou_port[port].port_nasid, + router_a->rou_port[port].port_offset); + + if (brd->brd_type == KLTYPE_ROUTER) { + router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); + if (router == router_b) { + if (depth < router_distance) + router_distance = depth; + } + else + router_recurse(router, router_b, depth + 1); + } + } + + router_a->rou_rflag = 0; +} + +unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; + +static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) +{ + klrou_t *router, *router_a = NULL, *router_b = NULL; + lboard_t *brd, *dest_brd; + cnodeid_t cnode; + nasid_t nasid; + int port; + + /* Figure out which routers nodes in question are connected to */ + for_each_online_node(cnode) { + nasid = COMPACT_TO_NASID_NODEID(cnode); + + if (nasid == -1) continue; + + brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid), + KLTYPE_ROUTER); + + if (!brd) + continue; + + do { + if (brd->brd_flags & DUPLICATE_BOARD) + continue; + + router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); + router->rou_rflag = 0; + + for (port = 1; port <= MAX_ROUTER_PORTS; port++) { + if (router->rou_port[port].port_nasid == INVALID_NASID) + continue; + + dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( + router->rou_port[port].port_nasid, + router->rou_port[port].port_offset); + + if (dest_brd->brd_type == KLTYPE_IP27) { + if (dest_brd->brd_nasid == nasid_a) + router_a = router; + if (dest_brd->brd_nasid == nasid_b) + router_b = router; + } + } + + } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER))); + } + + if (router_a == NULL) { + printk("node_distance: router_a NULL\n"); + return -1; + } + if (router_b == NULL) { + printk("node_distance: router_b NULL\n"); + return -1; + } + + if (nasid_a == nasid_b) + return 0; + + if (router_a == router_b) + return 1; + + router_distance = 100; + router_recurse(router_a, router_b, 2); + + return router_distance; +} + +static void __init init_topology_matrix(void) +{ + nasid_t nasid, nasid2; + cnodeid_t row, col; + + for (row = 0; row < MAX_COMPACT_NODES; row++) + for (col = 0; col < MAX_COMPACT_NODES; col++) + __node_distances[row][col] = -1; + + for_each_online_node(row) { + nasid = COMPACT_TO_NASID_NODEID(row); + for_each_online_node(col) { + nasid2 = COMPACT_TO_NASID_NODEID(col); + __node_distances[row][col] = + compute_node_distance(nasid, nasid2); + } + } +} + +static void __init dump_topology(void) +{ + nasid_t nasid; + cnodeid_t cnode; + lboard_t *brd, *dest_brd; + int port; + int router_num = 0; + klrou_t *router; + cnodeid_t row, col; + + printk("************** Topology ********************\n"); + + printk(" "); + for_each_online_node(col) + printk("%02d ", col); + printk("\n"); + for_each_online_node(row) { + printk("%02d ", row); + for_each_online_node(col) + printk("%2d ", node_distance(row, col)); + printk("\n"); + } + + for_each_online_node(cnode) { + nasid = COMPACT_TO_NASID_NODEID(cnode); + + if (nasid == -1) continue; + + brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid), + KLTYPE_ROUTER); + + if (!brd) + continue; + + do { + if (brd->brd_flags & DUPLICATE_BOARD) + continue; + printk("Router %d:", router_num); + router_num++; + + router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); + + for (port = 1; port <= MAX_ROUTER_PORTS; port++) { + if (router->rou_port[port].port_nasid == INVALID_NASID) + continue; + + dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( + router->rou_port[port].port_nasid, + router->rou_port[port].port_offset); + + if (dest_brd->brd_type == KLTYPE_IP27) + printk(" %d", dest_brd->brd_nasid); + if (dest_brd->brd_type == KLTYPE_ROUTER) + printk(" r"); + } + printk("\n"); + + } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) ); + } +} static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) { @@ -126,6 +347,60 @@ static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) } } +static void __init mlreset(void) +{ + int i; + + master_nasid = get_nasid(); + fine_mode = is_fine_dirmode(); + + /* + * Probe for all CPUs - this creates the cpumask and sets up the + * mapping tables. We need to do this as early as possible. + */ +#ifdef CONFIG_SMP + cpu_node_probe(); +#endif + + init_topology_matrix(); + dump_topology(); + + gen_region_mask(®ion_mask); + + setup_replication_mask(); + + /* + * Set all nodes' calias sizes to 8k + */ + for_each_online_node(i) { + nasid_t nasid; + + nasid = COMPACT_TO_NASID_NODEID(i); + + /* + * Always have node 0 in the region mask, otherwise + * CALIAS accesses get exceptions since the hub + * thinks it is a node 0 address. + */ + REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1)); +#ifdef CONFIG_REPLICATE_EXHANDLERS + REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K); +#else + REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0); +#endif + +#ifdef LATER + /* + * Set up all hubs to have a big window pointing at + * widget 0. Memory mode, widget 0, offset 0 + */ + REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN), + ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) | + (0 << IIO_ITTE_WIDGET_SHIFT))); +#endif + } +} + static void __init szmem(void) { pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ @@ -134,7 +409,7 @@ static void __init szmem(void) num_physpages = 0; - for (node = 0; node < numnodes; node++) { + for_each_online_node(node) { ignore = nodebytes = 0; for (slot = 0; slot < MAX_MEM_SLOTS; slot++) { slot_psize = slot_psize_compute(node, slot); @@ -164,6 +439,45 @@ static void __init szmem(void) } } +static void __init node_mem_init(cnodeid_t node) +{ + pfn_t slot_firstpfn = slot_getbasepfn(node, 0); + pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0); + pfn_t slot_freepfn = node_getfirstfree(node); + struct pglist_data *pd; + unsigned long bootmap_size; + + /* + * Allocate the node data structures on the node first. + */ + __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); + + pd = NODE_DATA(node); + pd->bdata = &plat_node_bdata[node]; + + cpus_clear(hub_data(node)->h_cpus); + + slot_freepfn += PFN_UP(sizeof(struct pglist_data) + + sizeof(struct hub_data)); + + bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn, + slot_firstpfn, slot_lastpfn); + free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, + (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT); + reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, + ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size); +} + +/* + * A node with nothing. We use it to avoid any special casing in + * node_to_cpumask + */ +static struct node_data null_node = { + .hub = { + .h_cpus = CPU_MASK_NONE + } +}; + /* * Currently, the intranode memory hole support assumes that each slot * contains at least 32 MBytes of memory. We assume all bootmem data @@ -176,31 +490,12 @@ void __init prom_meminit(void) mlreset(); szmem(); - for (node = 0; node < numnodes; node++) { - pfn_t slot_firstpfn = slot_getbasepfn(node, 0); - pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0); - pfn_t slot_freepfn = node_getfirstfree(node); - unsigned long bootmap_size; - - /* - * Allocate the node data structures on the node first. - */ - node_data[node] = __va(slot_freepfn << PAGE_SHIFT); - node_data[node]->bdata = &plat_node_bdata[node]; - - hub_data[node] = (struct hub_data *)(node_data[node] + 1); - - cpus_clear(hub_data[node]->h_cpus); - - slot_freepfn += PFN_UP(sizeof(struct pglist_data) + - sizeof(struct hub_data)); - - bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn, - slot_firstpfn, slot_lastpfn); - free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, - (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT); - reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, - ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size); + for (node = 0; node < MAX_COMPACT_NODES; node++) { + if (node_online(node)) { + node_mem_init(node); + continue; + } + __node_data[node] = &null_node; } } @@ -220,7 +515,7 @@ void __init paging_init(void) pagetable_init(); - for (node = 0; node < numnodes; node++) { + for_each_online_node(node) { pfn_t start_pfn = slot_getbasepfn(node, 0); pfn_t end_pfn = node_getmaxclick(node) + 1; @@ -240,7 +535,7 @@ void __init mem_init(void) high_memory = (void *) __va(num_physpages << PAGE_SHIFT); - for (node = 0; node < numnodes; node++) { + for_each_online_node(node) { unsigned slot, numslots; struct page *end, *p;