21c08eaa5e39d1efbc233e33b03359c6576f0c81
[linux-2.6.git] / arch / ia64 / sn / kernel / sn2 / sn_hwperf.c
1 /* 
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
7  *
8  * SGI Altix topology and hardware performance monitoring API.
9  * Mark Goodwin <markgw@sgi.com>. 
10  *
11  * Creates /proc/sgi_sn/sn_topology (read-only) to export
12  * info about Altix nodes, routers, CPUs and NumaLink
13  * interconnection/topology.
14  *
15  * Also creates a dynamic misc device named "sn_hwperf"
16  * that supports an ioctl interface to call down into SAL
17  * to discover hw objects, topology and to read/write
18  * memory mapped registers, e.g. for performance monitoring.
19  * The "sn_hwperf" device is registered only after the procfs
20  * file is first opened, i.e. only if/when it's needed. 
21  *
22  * This API is used by SGI Performance Co-Pilot and other
23  * tools, see http://oss.sgi.com/projects/pcp
24  */
25
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/seq_file.h>
30 #include <linux/miscdevice.h>
31 #include <linux/cpumask.h>
32 #include <linux/smp_lock.h>
33 #include <asm/processor.h>
34 #include <asm/topology.h>
35 #include <asm/smp.h>
36 #include <asm/semaphore.h>
37 #include <asm/segment.h>
38 #include <asm/uaccess.h>
39 #include <asm/sal.h>
40 #include <asm/sn/io.h>
41 #include <asm/sn/sn_sal.h>
42 #include <asm/sn/module.h>
43 #include <asm/sn/geo.h>
44 #include <asm/sn/sn2/sn_hwperf.h>
45
46 static void *sn_hwperf_salheap = NULL;
47 static int sn_hwperf_obj_cnt = 0;
48 static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
49 static int sn_hwperf_init(void);
50 static DECLARE_MUTEX(sn_hwperf_init_mutex);
51
52 static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
53 {
54         int e;
55         u64 sz;
56         struct sn_hwperf_object_info *objbuf = NULL;
57
58         if ((e = sn_hwperf_init()) < 0) {
59                 printk("sn_hwperf_init failed: err %d\n", e);
60                 goto out;
61         }
62
63         sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
64         if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) {
65                 printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
66                 e = -ENOMEM;
67                 goto out;
68         }
69
70         e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS,
71                 0, sz, (u64) objbuf, 0, 0, NULL);
72         if (e != SN_HWPERF_OP_OK) {
73                 e = -EINVAL;
74                 vfree(objbuf);
75         }
76
77 out:
78         *nobj = sn_hwperf_obj_cnt;
79         *ret = objbuf;
80         return e;
81 }
82
83 static int sn_hwperf_geoid_to_cnode(char *location)
84 {
85         int cnode;
86         geoid_t geoid;
87         moduleid_t module_id;
88         char type;
89         int rack, slot, slab;
90         int this_rack, this_slot, this_slab;
91
92         if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
93                 return -1;
94
95         for (cnode = 0; cnode < numionodes; cnode++) {
96                 geoid = cnodeid_get_geoid(cnode);
97                 module_id = geo_module(geoid);
98                 this_rack = MODULE_GET_RACK(module_id);
99                 this_slot = MODULE_GET_BPOS(module_id);
100                 this_slab = geo_slab(geoid);
101                 if (rack == this_rack && slot == this_slot && slab == this_slab)
102                         break;
103         }
104
105         return cnode < numionodes ? cnode : -1;
106 }
107
108 static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
109 {
110         if (!obj->sn_hwp_this_part)
111                 return -1;
112         return sn_hwperf_geoid_to_cnode(obj->location);
113 }
114
115 static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj,
116                                 struct sn_hwperf_object_info *objs)
117 {
118         int ordinal;
119         struct sn_hwperf_object_info *p;
120
121         for (ordinal=0, p=objs; p != obj; p++) {
122                 if (SN_HWPERF_FOREIGN(p))
123                         continue;
124                 if (SN_HWPERF_SAME_OBJTYPE(p, obj))
125                         ordinal++;
126         }
127
128         return ordinal;
129 }
130
131 static const char *slabname_node =      "node"; /* SHub asic */
132 static const char *slabname_ionode =    "ionode"; /* TIO asic */
133 static const char *slabname_router =    "router"; /* NL3R or NL4R */
134 static const char *slabname_other =     "other"; /* unknown asic */
135
136 static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
137                         struct sn_hwperf_object_info *objs, int *ordinal)
138 {
139         int isnode;
140         const char *slabname = slabname_other;
141
142         if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) {
143                 slabname = isnode ? slabname_node : slabname_ionode;
144                 *ordinal = sn_hwperf_obj_to_cnode(obj);
145         }
146         else {
147                 *ordinal = sn_hwperf_generic_ordinal(obj, objs);
148                 if (SN_HWPERF_IS_ROUTER(obj))
149                         slabname = slabname_router;
150         }
151
152         return slabname;
153 }
154
155 static int sn_topology_show(struct seq_file *s, void *d)
156 {
157         int sz;
158         int pt;
159         int e;
160         int i;
161         int j;
162         const char *slabname;
163         int ordinal;
164         cpumask_t cpumask;
165         char slice;
166         struct cpuinfo_ia64 *c;
167         struct sn_hwperf_port_info *ptdata;
168         struct sn_hwperf_object_info *p;
169         struct sn_hwperf_object_info *obj = d;  /* this object */
170         struct sn_hwperf_object_info *objs = s->private; /* all objects */
171
172         if (obj == objs) {
173                 seq_printf(s, "# sn_topology version 1\n");
174                 seq_printf(s, "# objtype ordinal location partition"
175                         " [attribute value [, ...]]\n");
176         }
177
178         if (SN_HWPERF_FOREIGN(obj)) {
179                 /* private in another partition: not interesting */
180                 return 0;
181         }
182
183         for (i = 0; obj->name[i]; i++) {
184                 if (obj->name[i] == ' ')
185                         obj->name[i] = '_';
186         }
187
188         slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
189         seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
190                 obj->sn_hwp_this_part ? "local" : "shared", obj->name);
191
192         if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
193                 seq_putc(s, '\n');
194         else {
195                 seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
196                 for (i=0; i < numionodes; i++) {
197                         seq_printf(s, i ? ":%d" : ", dist %d",
198                                 node_distance(ordinal, i));
199                 }
200                 seq_putc(s, '\n');
201
202                 /*
203                  * CPUs on this node, if any
204                  */
205                 cpumask = node_to_cpumask(ordinal);
206                 for_each_online_cpu(i) {
207                         if (cpu_isset(i, cpumask)) {
208                                 slice = 'a' + cpuid_to_slice(i);
209                                 c = cpu_data(i);
210                                 seq_printf(s, "cpu %d %s%c local"
211                                         " freq %luMHz, arch ia64",
212                                         i, obj->location, slice,
213                                         c->proc_freq / 1000000);
214                                 for_each_online_cpu(j) {
215                                         seq_printf(s, j ? ":%d" : ", dist %d",
216                                                 node_distance(
217                                                     cpuid_to_cnodeid(i),
218                                                     cpuid_to_cnodeid(j)));
219                                 }
220                                 seq_putc(s, '\n');
221                         }
222                 }
223         }
224
225         if (obj->ports) {
226                 /*
227                  * numalink ports
228                  */
229                 sz = obj->ports * sizeof(struct sn_hwperf_port_info);
230                 if ((ptdata = vmalloc(sz)) == NULL)
231                         return -ENOMEM;
232                 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
233                                       SN_HWPERF_ENUM_PORTS, obj->id, sz,
234                                       (u64) ptdata, 0, 0, NULL);
235                 if (e != SN_HWPERF_OP_OK)
236                         return -EINVAL;
237                 for (ordinal=0, p=objs; p != obj; p++) {
238                         if (!SN_HWPERF_FOREIGN(p))
239                                 ordinal += p->ports;
240                 }
241                 for (pt = 0; pt < obj->ports; pt++) {
242                         for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
243                                 if (ptdata[pt].conn_id == p->id) {
244                                         break;
245                                 }
246                         }
247                         seq_printf(s, "numalink %d %s-%d",
248                             ordinal+pt, obj->location, ptdata[pt].port);
249
250                         if (i >= sn_hwperf_obj_cnt) {
251                                 /* no connection */
252                                 seq_puts(s, " local endpoint disconnected"
253                                             ", protocol unknown\n");
254                                 continue;
255                         }
256
257                         if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
258                                 /* both ends local to this partition */
259                                 seq_puts(s, " local");
260                         else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
261                                 /* both ends of the link in foreign partiton */
262                                 seq_puts(s, " foreign");
263                         else
264                                 /* link straddles a partition */
265                                 seq_puts(s, " shared");
266
267                         /*
268                          * Unlikely, but strictly should query the LLP config
269                          * registers because an NL4R can be configured to run
270                          * NL3 protocol, even when not talking to an NL3 router.
271                          * Ditto for node-node.
272                          */
273                         seq_printf(s, " endpoint %s-%d, protocol %s\n",
274                                 p->location, ptdata[pt].conn_port,
275                                 (SN_HWPERF_IS_NL3ROUTER(obj) ||
276                                 SN_HWPERF_IS_NL3ROUTER(p)) ?  "LLP3" : "LLP4");
277                 }
278                 vfree(ptdata);
279         }
280
281         return 0;
282 }
283
284 static void *sn_topology_start(struct seq_file *s, loff_t * pos)
285 {
286         struct sn_hwperf_object_info *objs = s->private;
287
288         if (*pos < sn_hwperf_obj_cnt)
289                 return (void *)(objs + *pos);
290
291         return NULL;
292 }
293
294 static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos)
295 {
296         ++*pos;
297         return sn_topology_start(s, pos);
298 }
299
300 static void sn_topology_stop(struct seq_file *m, void *v)
301 {
302         return;
303 }
304
305 /*
306  * /proc/sgi_sn/sn_topology, read-only using seq_file
307  */
308 static struct seq_operations sn_topology_seq_ops = {
309         .start = sn_topology_start,
310         .next = sn_topology_next,
311         .stop = sn_topology_stop,
312         .show = sn_topology_show
313 };
314
315 struct sn_hwperf_op_info {
316         u64 op;
317         struct sn_hwperf_ioctl_args *a;
318         void *p;
319         int *v0;
320         int ret;
321 };
322
323 static void sn_hwperf_call_sal(void *info)
324 {
325         struct sn_hwperf_op_info *op_info = info;
326         int r;
327
328         r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op,
329                       op_info->a->arg, op_info->a->sz,
330                       (u64) op_info->p, 0, 0, op_info->v0);
331         op_info->ret = r;
332 }
333
334 static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
335 {
336         u32 cpu;
337         u32 use_ipi;
338         int r = 0;
339         cpumask_t save_allowed;
340         
341         cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
342         use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
343         op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
344
345         if (cpu != SN_HWPERF_ARG_ANY_CPU) {
346                 if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
347                         r = -EINVAL;
348                         goto out;
349                 }
350         }
351
352         if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) {
353                 /* don't care, or already on correct cpu */
354                 sn_hwperf_call_sal(op_info);
355         }
356         else {
357                 if (use_ipi) {
358                         /* use an interprocessor interrupt to call SAL */
359                         smp_call_function_single(cpu, sn_hwperf_call_sal,
360                                 op_info, 1, 1);
361                 }
362                 else {
363                         /* migrate the task before calling SAL */ 
364                         save_allowed = current->cpus_allowed;
365                         set_cpus_allowed(current, cpumask_of_cpu(cpu));
366                         sn_hwperf_call_sal(op_info);
367                         set_cpus_allowed(current, save_allowed);
368                 }
369         }
370         r = op_info->ret;
371
372 out:
373         return r;
374 }
375
376 /*
377  * ioctl for "sn_hwperf" misc device
378  */
379 static int
380 sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
381 {
382         struct sn_hwperf_ioctl_args a;
383         struct cpuinfo_ia64 *cdata;
384         struct sn_hwperf_object_info *objs;
385         struct sn_hwperf_object_info *cpuobj;
386         struct sn_hwperf_op_info op_info;
387         void *p = NULL;
388         int nobj;
389         char slice;
390         int node;
391         int r;
392         int v0;
393         int i;
394         int j;
395
396         unlock_kernel();
397
398         /* only user requests are allowed here */
399         if ((op & SN_HWPERF_OP_MASK) < 10) {
400                 r = -EINVAL;
401                 goto error;
402         }
403         r = copy_from_user(&a, (const void __user *)arg,
404                 sizeof(struct sn_hwperf_ioctl_args));
405         if (r != 0) {
406                 r = -EFAULT;
407                 goto error;
408         }
409
410         /*
411          * Allocate memory to hold a kernel copy of the user buffer. The
412          * buffer contents are either copied in or out (or both) of user
413          * space depending on the flags encoded in the requested operation.
414          */
415         if (a.ptr) {
416                 p = vmalloc(a.sz);
417                 if (!p) {
418                         r = -ENOMEM;
419                         goto error;
420                 }
421         }
422
423         if (op & SN_HWPERF_OP_MEM_COPYIN) {
424                 r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
425                 if (r != 0) {
426                         r = -EFAULT;
427                         goto error;
428                 }
429         }
430
431         switch (op) {
432         case SN_HWPERF_GET_CPU_INFO:
433                 if (a.sz == sizeof(u64)) {
434                         /* special case to get size needed */
435                         *(u64 *) p = (u64) num_online_cpus() *
436                                 sizeof(struct sn_hwperf_object_info);
437                 } else
438                 if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
439                         r = -ENOMEM;
440                         goto error;
441                 } else
442                 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
443                         memset(p, 0, a.sz);
444                         for (i = 0; i < nobj; i++) {
445                                 node = sn_hwperf_obj_to_cnode(objs + i);
446                                 for_each_online_cpu(j) {
447                                         if (node != cpu_to_node(j))
448                                                 continue;
449                                         cpuobj = (struct sn_hwperf_object_info *) p + j;
450                                         slice = 'a' + cpuid_to_slice(j);
451                                         cdata = cpu_data(j);
452                                         cpuobj->id = j;
453                                         snprintf(cpuobj->name,
454                                                  sizeof(cpuobj->name),
455                                                  "CPU %luMHz %s",
456                                                  cdata->proc_freq / 1000000,
457                                                  cdata->vendor);
458                                         snprintf(cpuobj->location,
459                                                  sizeof(cpuobj->location),
460                                                  "%s%c", objs[i].location,
461                                                  slice);
462                                 }
463                         }
464
465                         vfree(objs);
466                 }
467                 break;
468
469         case SN_HWPERF_GET_NODE_NASID:
470                 if (a.sz != sizeof(u64) ||
471                    (node = a.arg) < 0 || node >= numionodes) {
472                         r = -EINVAL;
473                         goto error;
474                 }
475                 *(u64 *)p = (u64)cnodeid_to_nasid(node);
476                 break;
477
478         case SN_HWPERF_GET_OBJ_NODE:
479                 if (a.sz != sizeof(u64) || a.arg < 0) {
480                         r = -EINVAL;
481                         goto error;
482                 }
483                 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
484                         if (a.arg >= nobj) {
485                                 r = -EINVAL;
486                                 vfree(objs);
487                                 goto error;
488                         }
489                         if (objs[(i = a.arg)].id != a.arg) {
490                                 for (i = 0; i < nobj; i++) {
491                                         if (objs[i].id == a.arg)
492                                                 break;
493                                 }
494                         }
495                         if (i == nobj) {
496                                 r = -EINVAL;
497                                 vfree(objs);
498                                 goto error;
499                         }
500                         *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
501                         vfree(objs);
502                 }
503                 break;
504
505         case SN_HWPERF_GET_MMRS:
506         case SN_HWPERF_SET_MMRS:
507         case SN_HWPERF_OBJECT_DISTANCE:
508                 op_info.p = p;
509                 op_info.a = &a;
510                 op_info.v0 = &v0;
511                 op_info.op = op;
512                 r = sn_hwperf_op_cpu(&op_info);
513                 break;
514
515         default:
516                 /* all other ops are a direct SAL call */
517                 r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
518                               a.arg, a.sz, (u64) p, 0, 0, &v0);
519                 a.v0 = v0;
520                 break;
521         }
522
523         if (op & SN_HWPERF_OP_MEM_COPYOUT) {
524                 r = copy_to_user((void __user *)a.ptr, p, a.sz);
525                 if (r != 0) {
526                         r = -EFAULT;
527                         goto error;
528                 }
529         }
530
531 error:
532         if (p)
533                 vfree(p);
534
535         lock_kernel();
536         return r;
537 }
538
539 static struct file_operations sn_hwperf_fops = {
540         .ioctl = sn_hwperf_ioctl,
541 };
542
543 static struct miscdevice sn_hwperf_dev = {
544         MISC_DYNAMIC_MINOR,
545         "sn_hwperf",
546         &sn_hwperf_fops
547 };
548
549 static int sn_hwperf_init(void)
550 {
551         u64 v;
552         int salr;
553         int e = 0;
554
555         /* single threaded, once-only initialization */
556         down(&sn_hwperf_init_mutex);
557         if (sn_hwperf_salheap) {
558                 up(&sn_hwperf_init_mutex);
559                 return e;
560         }
561
562         /*
563          * The PROM code needs a fixed reference node. For convenience the
564          * same node as the console I/O is used.
565          */
566         sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid();
567
568         /*
569          * Request the needed size and install the PROM scratch area.
570          * The PROM keeps various tracking bits in this memory area.
571          */
572         salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
573                                  (u64) SN_HWPERF_GET_HEAPSIZE, 0,
574                                  (u64) sizeof(u64), (u64) &v, 0, 0, NULL);
575         if (salr != SN_HWPERF_OP_OK) {
576                 e = -EINVAL;
577                 goto out;
578         }
579
580         if ((sn_hwperf_salheap = vmalloc(v)) == NULL) {
581                 e = -ENOMEM;
582                 goto out;
583         }
584         salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
585                                  SN_HWPERF_INSTALL_HEAP, 0, v,
586                                  (u64) sn_hwperf_salheap, 0, 0, NULL);
587         if (salr != SN_HWPERF_OP_OK) {
588                 e = -EINVAL;
589                 goto out;
590         }
591
592         salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
593                                  SN_HWPERF_OBJECT_COUNT, 0,
594                                  sizeof(u64), (u64) &v, 0, 0, NULL);
595         if (salr != SN_HWPERF_OP_OK) {
596                 e = -EINVAL;
597                 goto out;
598         }
599         sn_hwperf_obj_cnt = (int)v;
600
601 out:
602         if (e < 0 && sn_hwperf_salheap) {
603                 vfree(sn_hwperf_salheap);
604                 sn_hwperf_salheap = NULL;
605                 sn_hwperf_obj_cnt = 0;
606         }
607
608         if (!e) {
609                 /*
610                  * Register a dynamic misc device for ioctl. Platforms
611                  * supporting hotplug will create /dev/sn_hwperf, else
612                  * user can to look up the minor number in /proc/misc.
613                  */
614                 if ((e = misc_register(&sn_hwperf_dev)) != 0) {
615                         printk(KERN_ERR "sn_hwperf_init: misc register "
616                                "for \"sn_hwperf\" failed, err %d\n", e);
617                 }
618         }
619
620         up(&sn_hwperf_init_mutex);
621         return e;
622 }
623
624 int sn_topology_open(struct inode *inode, struct file *file)
625 {
626         int e;
627         struct seq_file *seq;
628         struct sn_hwperf_object_info *objbuf;
629         int nobj;
630
631         if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
632                 e = seq_open(file, &sn_topology_seq_ops);
633                 seq = file->private_data;
634                 seq->private = objbuf;
635         }
636
637         return e;
638 }
639
640 int sn_topology_release(struct inode *inode, struct file *file)
641 {
642         struct seq_file *seq = file->private_data;
643
644         if (seq->private)
645                 vfree(seq->private);
646         return seq_release(inode, file);
647 }