X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fkernel%2Ftopology.c;fp=arch%2Fia64%2Fkernel%2Ftopology.c;h=6e5eea19fa672bb0199f997e0888fc91d98ad77c;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=5629b45e89c6bc50892c4a8e69b66d0f0be67d86;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 5629b45e8..6e5eea19f 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -9,10 +9,9 @@ * 2002/08/07 Erich Focht * Populate cpu entries in sysfs for non-numa systems as well * Intel Corporation - Ashok Raj - * 02/27/2006 Zhang, Yanmin - * Populate cpu cache entries in sysfs for cpu cache info */ +#include #include #include #include @@ -20,34 +19,47 @@ #include #include #include -#include #include #include #include +#ifdef CONFIG_NUMA +static struct node *sysfs_nodes; +#endif static struct ia64_cpu *sysfs_cpus; int arch_register_cpu(int num) { -#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) + struct node *parent = NULL; + +#ifdef CONFIG_NUMA + parent = &sysfs_nodes[cpu_to_node(num)]; +#endif /* CONFIG_NUMA */ + +#ifdef CONFIG_ACPI /* * If CPEI cannot be re-targetted, and this is * CPEI target, then dont create the control file */ if (!can_cpei_retarget() && is_cpu_cpei_target(num)) sysfs_cpus[num].cpu.no_control = 1; - map_cpu_to_node(num, node_cpuid[num].nid); #endif - return register_cpu(&sysfs_cpus[num].cpu, num); + return register_cpu(&sysfs_cpus[num].cpu, num, parent); } #ifdef CONFIG_HOTPLUG_CPU void arch_unregister_cpu(int num) { - unregister_cpu(&sysfs_cpus[num].cpu); - unmap_cpu_from_node(num, cpu_to_node(num)); + struct node *parent = NULL; + +#ifdef CONFIG_NUMA + int node = cpu_to_node(num); + parent = &sysfs_nodes[node]; +#endif /* CONFIG_NUMA */ + + return unregister_cpu(&sysfs_cpus[num].cpu, parent); } EXPORT_SYMBOL(arch_register_cpu); EXPORT_SYMBOL(arch_unregister_cpu); @@ -59,18 +71,26 @@ static int __init topology_init(void) int i, err = 0; #ifdef CONFIG_NUMA + sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL); + if (!sysfs_nodes) { + err = -ENOMEM; + goto out; + } + /* * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? */ for_each_online_node(i) { - if ((err = register_one_node(i))) + if ((err = register_node(&sysfs_nodes[i], i, 0))) goto out; } #endif sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); - if (!sysfs_cpus) - panic("kzalloc in topology_init failed - NR_CPUS too big?"); + if (!sysfs_cpus) { + err = -ENOMEM; + goto out; + } for_each_present_cpu(i) { if((err = arch_register_cpu(i))) @@ -81,364 +101,3 @@ out: } subsys_initcall(topology_init); - - -/* - * Export cpu cache information through sysfs - */ - -/* - * A bunch of string array to get pretty printing - */ -static const char *cache_types[] = { - "", /* not used */ - "Instruction", - "Data", - "Unified" /* unified */ -}; - -static const char *cache_mattrib[]={ - "WriteThrough", - "WriteBack", - "", /* reserved */ - "" /* reserved */ -}; - -struct cache_info { - pal_cache_config_info_t cci; - cpumask_t shared_cpu_map; - int level; - int type; - struct kobject kobj; -}; - -struct cpu_cache_info { - struct cache_info *cache_leaves; - int num_cache_leaves; - struct kobject kobj; -}; - -static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; -#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) - -#ifdef CONFIG_SMP -static void cache_shared_cpu_map_setup( unsigned int cpu, - struct cache_info * this_leaf) -{ - pal_cache_shared_info_t csi; - int num_shared, i = 0; - unsigned int j; - - if (cpu_data(cpu)->threads_per_core <= 1 && - cpu_data(cpu)->cores_per_socket <= 1) { - cpu_set(cpu, this_leaf->shared_cpu_map); - return; - } - - if (ia64_pal_cache_shared_info(this_leaf->level, - this_leaf->type, - 0, - &csi) != PAL_STATUS_SUCCESS) - return; - - num_shared = (int) csi.num_shared; - do { - for_each_possible_cpu(j) - if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id - && cpu_data(j)->core_id == csi.log1_cid - && cpu_data(j)->thread_id == csi.log1_tid) - cpu_set(j, this_leaf->shared_cpu_map); - - i++; - } while (i < num_shared && - ia64_pal_cache_shared_info(this_leaf->level, - this_leaf->type, - i, - &csi) == PAL_STATUS_SUCCESS); -} -#else -static void cache_shared_cpu_map_setup(unsigned int cpu, - struct cache_info * this_leaf) -{ - cpu_set(cpu, this_leaf->shared_cpu_map); - return; -} -#endif - -static ssize_t show_coherency_line_size(struct cache_info *this_leaf, - char *buf) -{ - return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size); -} - -static ssize_t show_ways_of_associativity(struct cache_info *this_leaf, - char *buf) -{ - return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc); -} - -static ssize_t show_attributes(struct cache_info *this_leaf, char *buf) -{ - return sprintf(buf, - "%s\n", - cache_mattrib[this_leaf->cci.pcci_cache_attr]); -} - -static ssize_t show_size(struct cache_info *this_leaf, char *buf) -{ - return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024); -} - -static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf) -{ - unsigned number_of_sets = this_leaf->cci.pcci_cache_size; - number_of_sets /= this_leaf->cci.pcci_assoc; - number_of_sets /= 1 << this_leaf->cci.pcci_line_size; - - return sprintf(buf, "%u\n", number_of_sets); -} - -static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) -{ - ssize_t len; - cpumask_t shared_cpu_map; - - cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); - len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); - len += sprintf(buf+len, "\n"); - return len; -} - -static ssize_t show_type(struct cache_info *this_leaf, char *buf) -{ - int type = this_leaf->type + this_leaf->cci.pcci_unified; - return sprintf(buf, "%s\n", cache_types[type]); -} - -static ssize_t show_level(struct cache_info *this_leaf, char *buf) -{ - return sprintf(buf, "%u\n", this_leaf->level); -} - -struct cache_attr { - struct attribute attr; - ssize_t (*show)(struct cache_info *, char *); - ssize_t (*store)(struct cache_info *, const char *, size_t count); -}; - -#ifdef define_one_ro - #undef define_one_ro -#endif -#define define_one_ro(_name) \ - static struct cache_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(level); -define_one_ro(type); -define_one_ro(coherency_line_size); -define_one_ro(ways_of_associativity); -define_one_ro(size); -define_one_ro(number_of_sets); -define_one_ro(shared_cpu_map); -define_one_ro(attributes); - -static struct attribute * cache_default_attrs[] = { - &type.attr, - &level.attr, - &coherency_line_size.attr, - &ways_of_associativity.attr, - &attributes.attr, - &size.attr, - &number_of_sets.attr, - &shared_cpu_map.attr, - NULL -}; - -#define to_object(k) container_of(k, struct cache_info, kobj) -#define to_attr(a) container_of(a, struct cache_attr, attr) - -static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf) -{ - struct cache_attr *fattr = to_attr(attr); - struct cache_info *this_leaf = to_object(kobj); - ssize_t ret; - - ret = fattr->show ? fattr->show(this_leaf, buf) : 0; - return ret; -} - -static struct sysfs_ops cache_sysfs_ops = { - .show = cache_show -}; - -static struct kobj_type cache_ktype = { - .sysfs_ops = &cache_sysfs_ops, - .default_attrs = cache_default_attrs, -}; - -static struct kobj_type cache_ktype_percpu_entry = { - .sysfs_ops = &cache_sysfs_ops, -}; - -static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) -{ - kfree(all_cpu_cache_info[cpu].cache_leaves); - all_cpu_cache_info[cpu].cache_leaves = NULL; - all_cpu_cache_info[cpu].num_cache_leaves = 0; - memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); - return; -} - -static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) -{ - u64 i, levels, unique_caches; - pal_cache_config_info_t cci; - int j; - s64 status; - struct cache_info *this_cache; - int num_cache_leaves = 0; - - if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { - printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); - return -1; - } - - this_cache=kzalloc(sizeof(struct cache_info)*unique_caches, - GFP_KERNEL); - if (this_cache == NULL) - return -ENOMEM; - - for (i=0; i < levels; i++) { - for (j=2; j >0 ; j--) { - if ((status=ia64_pal_cache_config_info(i,j, &cci)) != - PAL_STATUS_SUCCESS) - continue; - - this_cache[num_cache_leaves].cci = cci; - this_cache[num_cache_leaves].level = i + 1; - this_cache[num_cache_leaves].type = j; - - cache_shared_cpu_map_setup(cpu, - &this_cache[num_cache_leaves]); - num_cache_leaves ++; - } - } - - all_cpu_cache_info[cpu].cache_leaves = this_cache; - all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; - - memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); - - return 0; -} - -/* Add cache interface for CPU device */ -static int __cpuinit cache_add_dev(struct sys_device * sys_dev) -{ - unsigned int cpu = sys_dev->id; - unsigned long i, j; - struct cache_info *this_object; - int retval = 0; - cpumask_t oldmask; - - if (all_cpu_cache_info[cpu].kobj.parent) - return 0; - - oldmask = current->cpus_allowed; - retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); - if (unlikely(retval)) - return retval; - - retval = cpu_cache_sysfs_init(cpu); - set_cpus_allowed(current, oldmask); - if (unlikely(retval < 0)) - return retval; - - all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj; - kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache"); - all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry; - retval = kobject_register(&all_cpu_cache_info[cpu].kobj); - - for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { - this_object = LEAF_KOBJECT_PTR(cpu,i); - this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj; - kobject_set_name(&(this_object->kobj), "index%1lu", i); - this_object->kobj.ktype = &cache_ktype; - retval = kobject_register(&(this_object->kobj)); - if (unlikely(retval)) { - for (j = 0; j < i; j++) { - kobject_unregister( - &(LEAF_KOBJECT_PTR(cpu,j)->kobj)); - } - kobject_unregister(&all_cpu_cache_info[cpu].kobj); - cpu_cache_sysfs_exit(cpu); - break; - } - } - return retval; -} - -/* Remove cache interface for CPU device */ -static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) -{ - unsigned int cpu = sys_dev->id; - unsigned long i; - - for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) - kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); - - if (all_cpu_cache_info[cpu].kobj.parent) { - kobject_unregister(&all_cpu_cache_info[cpu].kobj); - memset(&all_cpu_cache_info[cpu].kobj, - 0, - sizeof(struct kobject)); - } - - cpu_cache_sysfs_exit(cpu); - - return 0; -} - -/* - * When a cpu is hot-plugged, do a check and initiate - * cache kobject if necessary - */ -static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; - - sys_dev = get_cpu_sysdev(cpu); - switch (action) { - case CPU_ONLINE: - cache_add_dev(sys_dev); - break; - case CPU_DEAD: - cache_remove_dev(sys_dev); - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata cache_cpu_notifier = -{ - .notifier_call = cache_cpu_callback -}; - -static int __cpuinit cache_sysfs_init(void) -{ - int i; - - for_each_online_cpu(i) { - cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE, - (void *)(long)i); - } - - register_hotcpu_notifier(&cache_cpu_notifier); - - return 0; -} - -device_initcall(cache_sysfs_init); -