*
* @author John Levon <levon@movementarian.org>
*
+ * Modified by Aravind Menon for Xen
+ * These modifications are:
+ * Copyright (C) 2005 Hewlett-Packard Co.
+ *
* Each CPU has a local buffer that stores PC value/event
* pairs. We also log context switches when we notice them.
* Eventually each CPU's buffer is processed into the global
struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
-static void wq_sync_buffer(void *);
+static void wq_sync_buffer(struct work_struct *work);
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
static int work_enabled;
+static int32_t current_domain = COORDINATOR_DOMAIN;
+
void free_cpu_buffers(void)
{
int i;
- for_each_online_cpu(i) {
+ for_each_online_cpu(i)
vfree(cpu_buffer[i].buffer);
- }
}
int alloc_cpu_buffers(void)
goto fail;
b->last_task = NULL;
- b->last_is_kernel = -1;
+ b->last_cpu_mode = -1;
b->tracing = 0;
b->buffer_size = buffer_size;
b->tail_pos = 0;
b->sample_received = 0;
b->sample_lost_overflow = 0;
b->cpu = i;
- INIT_WORK(&b->work, wq_sync_buffer, b);
+ INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
}
return 0;
* collected will populate the buffer with proper
* values to initialize the buffer
*/
- cpu_buf->last_is_kernel = -1;
+ cpu_buf->last_cpu_mode = -1;
cpu_buf->last_task = NULL;
}
* because of the head/tail separation of the writer and reader
* of the CPU buffer.
*
- * is_kernel is needed because on some architectures you cannot
+ * cpu_mode is needed because on some architectures you cannot
* tell if you are in kernel or user space simply by looking at
- * pc. We tag this in the buffer by generating kernel enter/exit
- * events whenever is_kernel changes
+ * pc. We tag this in the buffer by generating kernel/user (and xen)
+ * enter events whenever cpu_mode changes
*/
static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
- int is_kernel, unsigned long event)
+ int cpu_mode, unsigned long event)
{
struct task_struct * task;
return 0;
}
- is_kernel = !!is_kernel;
+ WARN_ON(cpu_mode > CPU_MODE_XEN);
task = current;
/* notice a switch from user->kernel or vice versa */
- if (cpu_buf->last_is_kernel != is_kernel) {
- cpu_buf->last_is_kernel = is_kernel;
- add_code(cpu_buf, is_kernel);
+ if (cpu_buf->last_cpu_mode != cpu_mode) {
+ cpu_buf->last_cpu_mode = cpu_mode;
+ add_code(cpu_buf, cpu_mode);
}
-
+
/* notice a task switch */
- if (cpu_buf->last_task != task) {
+ /* if not processing other domain samples */
+ if ((cpu_buf->last_task != task) &&
+ (current_domain == COORDINATOR_DOMAIN)) {
cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task);
}
cpu_buf->tracing = 0;
}
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
+void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
+ unsigned long event, int is_kernel)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
- unsigned long pc = profile_pc(regs);
- int is_kernel = !user_mode(regs);
if (!backtrace_depth) {
log_sample(cpu_buf, pc, is_kernel, event);
oprofile_end_trace(cpu_buf);
}
+void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
+{
+ int is_kernel = !user_mode(regs);
+ unsigned long pc = profile_pc(regs);
+
+ oprofile_add_ext_sample(pc, regs, event, is_kernel);
+}
+
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
add_sample(cpu_buf, pc, 0);
}
+int oprofile_add_domain_switch(int32_t domain_id)
+{
+ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+
+ /* should have space for switching into and out of domain
+ (2 slots each) plus one sample and one cpu mode switch */
+ if (((nr_available_slots(cpu_buf) < 6) &&
+ (domain_id != COORDINATOR_DOMAIN)) ||
+ (nr_available_slots(cpu_buf) < 2))
+ return 0;
+
+ add_code(cpu_buf, CPU_DOMAIN_SWITCH);
+ add_sample(cpu_buf, domain_id, 0);
+
+ current_domain = domain_id;
+
+ return 1;
+}
+
/*
* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses
* By using schedule_delayed_work_on and then schedule_delayed_work
* we guarantee this will stay on the correct cpu
*/
-static void wq_sync_buffer(void * data)
+static void wq_sync_buffer(struct work_struct *work)
{
- struct oprofile_cpu_buffer * b = data;
+ struct oprofile_cpu_buffer * b =
+ container_of(work, struct oprofile_cpu_buffer, work.work);
if (b->cpu != smp_processor_id()) {
printk("WQ on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu);