4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Modified by Aravind Menon and Jose Renato Santos for Xen
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/sysdev.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/vmalloc.h>
25 #include <asm/pgtable.h>
26 #include <xen/evtchn.h>
27 #include "op_counter.h"
29 #include <xen/interface/xen.h>
30 #include <xen/interface/xenoprof.h>
31 #include <../../../drivers/oprofile/cpu_buffer.h>
32 #include <../../../drivers/oprofile/event_buffer.h>
34 static int xenoprof_start(void);
35 static void xenoprof_stop(void);
37 void * vm_map_xen_pages(unsigned long maddr, int vm_size, pgprot_t prot);
39 static int xenoprof_enabled = 0;
40 static unsigned int num_events = 0;
41 static int is_primary = 0;
42 static int active_defined;
44 /* sample buffers shared with Xen */
45 xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
46 /* Shared buffer area */
48 /* Number of buffers in shared area (one per VCPU) */
50 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
52 /* cpu model type string - copied from Xen memory space on XENOPROF_init command */
53 char cpu_type[XENOPROF_CPU_TYPE_SIZE];
55 /* Passive sample buffers shared with Xen */
56 xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
57 /* Passive shared buffer area */
58 char *p_shared_buffer[MAX_OPROF_DOMAINS];
62 static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
64 if (xenoprof_enabled == 1)
70 static int xenoprof_resume(struct sys_device * dev)
72 if (xenoprof_enabled == 1)
78 static struct sysdev_class oprofile_sysclass = {
79 set_kset_name("oprofile"),
80 .resume = xenoprof_resume,
81 .suspend = xenoprof_suspend
85 static struct sys_device device_oprofile = {
87 .cls = &oprofile_sysclass,
91 static int __init init_driverfs(void)
94 if (!(error = sysdev_class_register(&oprofile_sysclass)))
95 error = sysdev_register(&device_oprofile);
100 static void __exit exit_driverfs(void)
102 sysdev_unregister(&device_oprofile);
103 sysdev_class_unregister(&oprofile_sysclass);
107 #define init_driverfs() do { } while (0)
108 #define exit_driverfs() do { } while (0)
109 #endif /* CONFIG_PM */
111 unsigned long long oprofile_samples = 0;
112 unsigned long long p_oprofile_samples = 0;
114 unsigned int pdomains;
115 struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
117 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
119 int head, tail, size;
121 head = buf->event_head;
122 tail = buf->event_tail;
123 size = buf->event_size;
126 while (tail < size) {
127 oprofile_add_pc(buf->event_log[tail].eip,
128 buf->event_log[tail].mode,
129 buf->event_log[tail].event);
133 p_oprofile_samples++;
138 while (tail < head) {
139 oprofile_add_pc(buf->event_log[tail].eip,
140 buf->event_log[tail].mode,
141 buf->event_log[tail].event);
145 p_oprofile_samples++;
149 buf->event_tail = tail;
152 static void xenoprof_handle_passive(void)
155 int flag_domain, flag_switch = 0;
157 for (i = 0; i < pdomains; i++) {
159 for (j = 0; j < passive_domains[i].nbuf; j++) {
160 xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
161 if (buf->event_head == buf->event_tail)
164 if (!oprofile_add_domain_switch(passive_domains[i].domain_id))
168 xenoprof_add_pc(buf, 1);
174 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
179 xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
181 struct xenoprof_buf * buf;
183 static unsigned long flag;
185 cpu = smp_processor_id();
186 buf = xenoprof_buf[cpu];
188 xenoprof_add_pc(buf, 0);
190 if (is_primary && !test_and_set_bit(0, &flag)) {
191 xenoprof_handle_passive();
199 static void unbind_virq(void)
204 if (ovf_irq[i] >= 0) {
205 unbind_from_irqhandler(ovf_irq[i], NULL);
212 static int bind_virq(void)
217 result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
219 xenoprof_ovf_interrupt,
236 static int xenoprof_setup(void)
246 struct xenoprof_counter counter;
248 /* Define dom0 as an active domain if not done yet */
249 if (!active_defined) {
251 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
255 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
261 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
264 for (i=0; i<num_events; i++) {
266 counter.count = (uint64_t)counter_config[i].count;
267 counter.enabled = (uint32_t)counter_config[i].enabled;
268 counter.event = (uint32_t)counter_config[i].event;
269 counter.kernel = (uint32_t)counter_config[i].kernel;
270 counter.user = (uint32_t)counter_config[i].user;
271 counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
272 HYPERVISOR_xenoprof_op(XENOPROF_counter,
275 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
281 ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
285 xenoprof_enabled = 1;
293 static void xenoprof_shutdown(void)
295 xenoprof_enabled = 0;
297 HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
300 HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
309 static int xenoprof_start(void)
314 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
320 static void xenoprof_stop(void)
323 HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
327 static int xenoprof_set_active(int * active_domains,
328 unsigned int adomains)
338 if (adomains > MAX_OPROF_DOMAINS)
341 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
345 for (i=0; i<adomains; i++) {
346 domid = active_domains[i];
347 if (domid != active_domains[i]) {
351 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
354 if (active_domains[i] == 0)
357 /* dom0 must always be active but may not be in the list */
360 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
365 HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
366 active_defined = !ret;
370 static int xenoprof_set_passive(int * p_domains,
377 struct xenoprof_buf *buf;
378 pgprot_t prot = __pgprot(_KERNPG_TABLE);
383 if (pdoms > MAX_OPROF_DOMAINS)
386 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
390 for (i = 0; i < pdoms; i++) {
391 passive_domains[i].domain_id = p_domains[i];
392 passive_domains[i].max_samples = 2048;
393 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, &passive_domains[i]);
397 npages = (passive_domains[i].bufsize * passive_domains[i].nbuf - 1) / PAGE_SIZE + 1;
398 vm_size = npages * PAGE_SIZE;
400 p_shared_buffer[i] = (char *)vm_map_xen_pages(passive_domains[i].buf_maddr,
402 if (!p_shared_buffer[i]) {
407 for (j = 0; j < passive_domains[i].nbuf; j++) {
408 buf = (struct xenoprof_buf *)
409 &p_shared_buffer[i][j * passive_domains[i].bufsize];
410 BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
411 p_xenoprof_buf[i][buf->vcpu_id] = buf;
420 for (j = 0; j < i; j++) {
421 vunmap(p_shared_buffer[j]);
422 p_shared_buffer[j] = NULL;
428 struct op_counter_config counter_config[OP_MAX_COUNTER];
430 static int xenoprof_create_files(struct super_block * sb, struct dentry * root)
434 for (i = 0; i < num_events; ++i) {
438 snprintf(buf, 2, "%d", i);
439 dir = oprofilefs_mkdir(sb, root, buf);
440 oprofilefs_create_ulong(sb, dir, "enabled",
441 &counter_config[i].enabled);
442 oprofilefs_create_ulong(sb, dir, "event",
443 &counter_config[i].event);
444 oprofilefs_create_ulong(sb, dir, "count",
445 &counter_config[i].count);
446 oprofilefs_create_ulong(sb, dir, "unit_mask",
447 &counter_config[i].unit_mask);
448 oprofilefs_create_ulong(sb, dir, "kernel",
449 &counter_config[i].kernel);
450 oprofilefs_create_ulong(sb, dir, "user",
451 &counter_config[i].user);
458 struct oprofile_operations xenoprof_ops = {
459 .create_files = xenoprof_create_files,
460 .set_active = xenoprof_set_active,
461 .set_passive = xenoprof_set_passive,
462 .setup = xenoprof_setup,
463 .shutdown = xenoprof_shutdown,
464 .start = xenoprof_start,
465 .stop = xenoprof_stop
469 /* in order to get driverfs right */
470 static int using_xenoprof;
472 int __init oprofile_arch_init(struct oprofile_operations * ops)
474 struct xenoprof_init init;
475 struct xenoprof_buf * buf;
481 init.max_samples = 16;
482 ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
485 pgprot_t prot = __pgprot(_KERNPG_TABLE);
487 num_events = init.num_events;
488 is_primary = init.is_primary;
491 /* just in case - make sure we do not overflow event list
492 (i.e. counter_config list) */
493 if (num_events > OP_MAX_COUNTER)
494 num_events = OP_MAX_COUNTER;
496 npages = (init.bufsize * nbuf - 1) / PAGE_SIZE + 1;
497 vm_size = npages * PAGE_SIZE;
499 shared_buffer = (char *)vm_map_xen_pages(init.buf_maddr,
501 if (!shared_buffer) {
506 for (i=0; i< nbuf; i++) {
507 buf = (struct xenoprof_buf*)
508 &shared_buffer[i * init.bufsize];
509 BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
510 xenoprof_buf[buf->vcpu_id] = buf;
513 /* cpu_type is detected by Xen */
514 cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
515 strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
516 xenoprof_ops.cpu_type = cpu_type;
522 for (i=0; i<NR_CPUS; i++)
528 printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, "
529 "is_primary %d\n", ret, num_events, is_primary);
534 void __exit oprofile_arch_exit(void)
542 vunmap(shared_buffer);
543 shared_buffer = NULL;
546 for (i = 0; i < pdomains; i++)
547 if (p_shared_buffer[i]) {
548 vunmap(p_shared_buffer[i]);
549 p_shared_buffer[i] = NULL;
551 HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);