4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Modified by Aravind Menon and Jose Renato Santos for Xen
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/sysdev.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/vmalloc.h>
25 #include <asm/pgtable.h>
26 #include <xen/evtchn.h>
27 #include "op_counter.h"
29 #include <xen/driver_util.h>
30 #include <xen/interface/xen.h>
31 #include <xen/interface/xenoprof.h>
32 #include <../../../drivers/oprofile/cpu_buffer.h>
33 #include <../../../drivers/oprofile/event_buffer.h>
35 #define MAX_XENOPROF_SAMPLES 16
37 static int xenoprof_start(void);
38 static void xenoprof_stop(void);
40 static int xenoprof_enabled = 0;
41 static unsigned int num_events = 0;
42 static int is_primary = 0;
43 static int active_defined;
45 /* sample buffers shared with Xen */
46 xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
47 /* Shared buffer area */
48 char * shared_buffer = NULL;
49 /* Number of buffers in shared area (one per VCPU) */
51 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
53 /* cpu model type string - copied from Xen memory space on XENOPROF_init command */
54 char cpu_type[XENOPROF_CPU_TYPE_SIZE];
56 /* Passive sample buffers shared with Xen */
57 xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
58 /* Passive shared buffer area */
59 char *p_shared_buffer[MAX_OPROF_DOMAINS];
63 static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
65 if (xenoprof_enabled == 1)
71 static int xenoprof_resume(struct sys_device * dev)
73 if (xenoprof_enabled == 1)
79 static struct sysdev_class oprofile_sysclass = {
80 set_kset_name("oprofile"),
81 .resume = xenoprof_resume,
82 .suspend = xenoprof_suspend
86 static struct sys_device device_oprofile = {
88 .cls = &oprofile_sysclass,
92 static int __init init_driverfs(void)
95 if (!(error = sysdev_class_register(&oprofile_sysclass)))
96 error = sysdev_register(&device_oprofile);
101 static void __exit exit_driverfs(void)
103 sysdev_unregister(&device_oprofile);
104 sysdev_class_unregister(&oprofile_sysclass);
108 #define init_driverfs() do { } while (0)
109 #define exit_driverfs() do { } while (0)
110 #endif /* CONFIG_PM */
112 unsigned long long oprofile_samples = 0;
113 unsigned long long p_oprofile_samples = 0;
115 unsigned int pdomains;
116 struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
118 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
120 int head, tail, size;
122 head = buf->event_head;
123 tail = buf->event_tail;
124 size = buf->event_size;
127 while (tail < size) {
128 oprofile_add_pc(buf->event_log[tail].eip,
129 buf->event_log[tail].mode,
130 buf->event_log[tail].event);
134 p_oprofile_samples++;
139 while (tail < head) {
140 oprofile_add_pc(buf->event_log[tail].eip,
141 buf->event_log[tail].mode,
142 buf->event_log[tail].event);
146 p_oprofile_samples++;
150 buf->event_tail = tail;
153 static void xenoprof_handle_passive(void)
156 int flag_domain, flag_switch = 0;
158 for (i = 0; i < pdomains; i++) {
160 for (j = 0; j < passive_domains[i].nbuf; j++) {
161 xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
162 if (buf->event_head == buf->event_tail)
165 if (!oprofile_add_domain_switch(passive_domains[i].
170 xenoprof_add_pc(buf, 1);
176 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
180 xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
182 struct xenoprof_buf * buf;
184 static unsigned long flag;
186 cpu = smp_processor_id();
187 buf = xenoprof_buf[cpu];
189 xenoprof_add_pc(buf, 0);
191 if (is_primary && !test_and_set_bit(0, &flag)) {
192 xenoprof_handle_passive();
193 smp_mb__before_clear_bit();
201 static void unbind_virq(void)
205 for_each_possible_cpu(i) {
206 if (ovf_irq[i] >= 0) {
207 unbind_from_irqhandler(ovf_irq[i], NULL);
214 static int bind_virq(void)
218 for_each_possible_cpu(i) {
219 result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
221 xenoprof_ovf_interrupt,
238 static int map_xenoprof_buffer(int max_samples)
240 struct xenoprof_get_buffer get_buffer;
241 struct xenoprof_buf *buf;
243 struct vm_struct *area;
248 get_buffer.max_samples = max_samples;
250 if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, &get_buffer)) )
253 nbuf = get_buffer.nbuf;
254 npages = (get_buffer.bufsize * nbuf - 1) / PAGE_SIZE + 1;
256 area = alloc_vm_area(npages * PAGE_SIZE);
260 if ( (ret = direct_kernel_remap_pfn_range(
261 (unsigned long)area->addr,
262 get_buffer.buf_maddr >> PAGE_SHIFT,
263 npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE), DOMID_SELF)) ) {
268 shared_buffer = area->addr;
269 for (i=0; i< nbuf; i++) {
270 buf = (struct xenoprof_buf*)
271 &shared_buffer[i * get_buffer.bufsize];
272 BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
273 xenoprof_buf[buf->vcpu_id] = buf;
280 static int xenoprof_setup(void)
285 if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
288 if ( (ret = bind_virq()) )
292 struct xenoprof_counter counter;
294 /* Define dom0 as an active domain if not done yet */
295 if (!active_defined) {
297 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
301 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
307 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
310 for (i=0; i<num_events; i++) {
312 counter.count = (uint64_t)counter_config[i].count;
313 counter.enabled = (uint32_t)counter_config[i].enabled;
314 counter.event = (uint32_t)counter_config[i].event;
315 counter.kernel = (uint32_t)counter_config[i].kernel;
316 counter.user = (uint32_t)counter_config[i].user;
317 counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
318 HYPERVISOR_xenoprof_op(XENOPROF_counter,
321 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
327 ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
331 xenoprof_enabled = 1;
339 static void xenoprof_shutdown(void)
341 xenoprof_enabled = 0;
343 HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
346 HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
355 static int xenoprof_start(void)
360 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
366 static void xenoprof_stop(void)
369 HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
373 static int xenoprof_set_active(int * active_domains,
374 unsigned int adomains)
384 if (adomains > MAX_OPROF_DOMAINS)
387 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
391 for (i=0; i<adomains; i++) {
392 domid = active_domains[i];
393 if (domid != active_domains[i]) {
397 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
400 if (active_domains[i] == 0)
403 /* dom0 must always be active but may not be in the list */
406 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
411 HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
412 active_defined = !ret;
416 static int xenoprof_set_passive(int * p_domains,
422 struct xenoprof_buf *buf;
423 struct vm_struct *area;
424 pgprot_t prot = __pgprot(_KERNPG_TABLE);
429 if (pdoms > MAX_OPROF_DOMAINS)
432 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
436 for (i = 0; i < pdoms; i++) {
437 passive_domains[i].domain_id = p_domains[i];
438 passive_domains[i].max_samples = 2048;
439 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive,
440 &passive_domains[i]);
444 npages = (passive_domains[i].bufsize * passive_domains[i].nbuf - 1) / PAGE_SIZE + 1;
446 area = alloc_vm_area(npages * PAGE_SIZE);
452 ret = direct_kernel_remap_pfn_range(
453 (unsigned long)area->addr,
454 passive_domains[i].buf_maddr >> PAGE_SHIFT,
455 npages * PAGE_SIZE, prot, DOMID_SELF);
461 p_shared_buffer[i] = area->addr;
463 for (j = 0; j < passive_domains[i].nbuf; j++) {
464 buf = (struct xenoprof_buf *)
465 &p_shared_buffer[i][j * passive_domains[i].bufsize];
466 BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
467 p_xenoprof_buf[i][buf->vcpu_id] = buf;
476 for (j = 0; j < i; j++) {
477 vunmap(p_shared_buffer[j]);
478 p_shared_buffer[j] = NULL;
484 struct op_counter_config counter_config[OP_MAX_COUNTER];
486 static int xenoprof_create_files(struct super_block * sb, struct dentry * root)
490 for (i = 0; i < num_events; ++i) {
494 snprintf(buf, 2, "%d", i);
495 dir = oprofilefs_mkdir(sb, root, buf);
496 oprofilefs_create_ulong(sb, dir, "enabled",
497 &counter_config[i].enabled);
498 oprofilefs_create_ulong(sb, dir, "event",
499 &counter_config[i].event);
500 oprofilefs_create_ulong(sb, dir, "count",
501 &counter_config[i].count);
502 oprofilefs_create_ulong(sb, dir, "unit_mask",
503 &counter_config[i].unit_mask);
504 oprofilefs_create_ulong(sb, dir, "kernel",
505 &counter_config[i].kernel);
506 oprofilefs_create_ulong(sb, dir, "user",
507 &counter_config[i].user);
514 struct oprofile_operations xenoprof_ops = {
515 .create_files = xenoprof_create_files,
516 .set_active = xenoprof_set_active,
517 .set_passive = xenoprof_set_passive,
518 .setup = xenoprof_setup,
519 .shutdown = xenoprof_shutdown,
520 .start = xenoprof_start,
521 .stop = xenoprof_stop
525 /* in order to get driverfs right */
526 static int using_xenoprof;
528 int __init oprofile_arch_init(struct oprofile_operations * ops)
530 struct xenoprof_init init;
533 ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
536 num_events = init.num_events;
537 is_primary = init.is_primary;
539 /* just in case - make sure we do not overflow event list
540 (i.e. counter_config list) */
541 if (num_events > OP_MAX_COUNTER)
542 num_events = OP_MAX_COUNTER;
544 /* cpu_type is detected by Xen */
545 cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
546 strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
547 xenoprof_ops.cpu_type = cpu_type;
553 for (i=0; i<NR_CPUS; i++)
558 printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, "
559 "is_primary %d\n", ret, num_events, is_primary);
564 void __exit oprofile_arch_exit(void)
572 vunmap(shared_buffer);
573 shared_buffer = NULL;
576 for (i = 0; i < pdomains; i++)
577 if (p_shared_buffer[i]) {
578 vunmap(p_shared_buffer[i]);
579 p_shared_buffer[i] = NULL;
581 HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);