*
* @author John Levon <levon@movementarian.org>
*
+ * Modified by Aravind Menon for Xen
+ * These modifications are:
+ * Copyright (C) 2005 Hewlett-Packard Co.
+ *
* This is the core of the buffer management. Each
* CPU buffer is processed and entered into the
* global event buffer. Such processing is necessary
static LIST_HEAD(dying_tasks);
static LIST_HEAD(dead_tasks);
-cpumask_t marked_cpus = CPU_MASK_NONE;
-static spinlock_t task_mortuary = SPIN_LOCK_UNLOCKED;
-void process_task_mortuary(void);
+static cpumask_t marked_cpus = CPU_MASK_NONE;
+static DEFINE_SPINLOCK(task_mortuary);
+static void process_task_mortuary(void);
/* Take ownership of the task struct and place it on the
* list for processing. Only after two full buffer syncs
* does the task eventually get freed, because by then
* we are sure we will not reference it again.
+ * Can be invoked from softirq via RCU callback due to
+ * call_rcu() of the task struct, hence the _irqsave.
*/
static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
{
- struct task_struct * task = (struct task_struct *)data;
- spin_lock(&task_mortuary);
+ unsigned long flags;
+ struct task_struct * task = data;
+ spin_lock_irqsave(&task_mortuary, flags);
list_add(&task->tasks, &dying_tasks);
- spin_unlock(&task_mortuary);
+ spin_unlock_irqrestore(&task_mortuary, flags);
return NOTIFY_OK;
}
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
- sync_buffer(smp_processor_id());
+ sync_buffer(raw_smp_processor_id());
return 0;
}
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
- sync_buffer(smp_processor_id());
+ sync_buffer(raw_smp_processor_id());
return 0;
}
return 0;
/* FIXME: should we process all CPU buffers ? */
- down(&buffer_sem);
+ mutex_lock(&buffer_mutex);
add_event_entry(ESCAPE_CODE);
add_event_entry(MODULE_LOADED_CODE);
- up(&buffer_sem);
+ mutex_unlock(&buffer_mutex);
#endif
return 0;
}
*/
static unsigned long get_exec_dcookie(struct mm_struct * mm)
{
- unsigned long cookie = 0;
+ unsigned long cookie = NO_COOKIE;
struct vm_area_struct * vma;
if (!mm)
continue;
if (!(vma->vm_flags & VM_EXECUTABLE))
continue;
- cookie = fast_get_dcookie(vma->vm_file->f_dentry,
- vma->vm_file->f_vfsmnt);
+ cookie = fast_get_dcookie(vma->vm_file->f_path.dentry,
+ vma->vm_file->f_path.mnt);
break;
}
*/
static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset)
{
- unsigned long cookie = 0;
+ unsigned long cookie = NO_COOKIE;
struct vm_area_struct * vma;
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
- if (!vma->vm_file)
- continue;
-
if (addr < vma->vm_start || addr >= vma->vm_end)
continue;
- cookie = fast_get_dcookie(vma->vm_file->f_dentry,
- vma->vm_file->f_vfsmnt);
- *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start;
+ if (vma->vm_file) {
+ cookie = fast_get_dcookie(vma->vm_file->f_path.dentry,
+ vma->vm_file->f_path.mnt);
+ *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
+ vma->vm_start;
+ } else {
+ /* must be an anonymous map */
+ *offset = addr;
+ }
+
break;
}
+ if (!vma)
+ cookie = INVALID_COOKIE;
+
return cookie;
}
-static unsigned long last_cookie = ~0UL;
+static unsigned long last_cookie = INVALID_COOKIE;
static void add_cpu_switch(int i)
{
add_event_entry(ESCAPE_CODE);
add_event_entry(CPU_SWITCH_CODE);
add_event_entry(i);
- last_cookie = ~0UL;
+ last_cookie = INVALID_COOKIE;
}
-static void add_kernel_ctx_switch(unsigned int in_kernel)
+static void add_cpu_mode_switch(unsigned int cpu_mode)
{
add_event_entry(ESCAPE_CODE);
- if (in_kernel)
- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
- else
- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
+ switch (cpu_mode) {
+ case CPU_MODE_USER:
+ add_event_entry(USER_ENTER_SWITCH_CODE);
+ break;
+ case CPU_MODE_KERNEL:
+ add_event_entry(KERNEL_ENTER_SWITCH_CODE);
+ break;
+ case CPU_MODE_XEN:
+ add_event_entry(XEN_ENTER_SWITCH_CODE);
+ break;
+ default:
+ break;
+ }
}
-
+
+static void add_domain_switch(unsigned long domain_id)
+{
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(DOMAIN_SWITCH_CODE);
+ add_event_entry(domain_id);
+}
+
static void
add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
{
}
+static void add_trace_begin(void)
+{
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(TRACE_BEGIN_CODE);
+}
+
+
static void add_sample_entry(unsigned long offset, unsigned long event)
{
add_event_entry(offset);
}
-static void add_us_sample(struct mm_struct * mm, struct op_sample * s)
+static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
{
unsigned long cookie;
off_t offset;
cookie = lookup_dcookie(mm, s->eip, &offset);
- if (!cookie) {
+ if (cookie == INVALID_COOKIE) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- return;
+ return 0;
}
if (cookie != last_cookie) {
}
add_sample_entry(offset, s->event);
+
+ return 1;
}
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace.
*/
-static void add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
+static int
+add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
{
- if (in_kernel) {
+ if (cpu_mode >= CPU_MODE_KERNEL) {
add_sample_entry(s->eip, s->event);
+ return 1;
} else if (mm) {
- add_us_sample(mm, s);
+ return add_us_sample(mm, s);
} else {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
}
+ return 0;
}
}
-static inline int is_ctx_switch(unsigned long val)
+static inline int is_code(unsigned long val)
{
- return val == ~0UL;
+ return val == ESCAPE_CODE;
}
rmb();
- if (new_tail < (b->buffer_size))
+ if (new_tail < b->buffer_size)
b->tail_pos = new_tail;
else
b->tail_pos = 0;
* and to have reached the list, it must have gone through
* one full sync already.
*/
-void process_task_mortuary(void)
+static void process_task_mortuary(void)
{
- struct list_head * pos;
- struct list_head * pos2;
+ unsigned long flags;
+ LIST_HEAD(local_dead_tasks);
struct task_struct * task;
+ struct task_struct * ttask;
- spin_lock(&task_mortuary);
+ spin_lock_irqsave(&task_mortuary, flags);
- list_for_each_safe(pos, pos2, &dead_tasks) {
- task = list_entry(pos, struct task_struct, tasks);
- list_del(&task->tasks);
- free_task(task);
- }
+ list_splice_init(&dead_tasks, &local_dead_tasks);
+ list_splice_init(&dying_tasks, &dead_tasks);
- list_for_each_safe(pos, pos2, &dying_tasks) {
- task = list_entry(pos, struct task_struct, tasks);
+ spin_unlock_irqrestore(&task_mortuary, flags);
+
+ list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
list_del(&task->tasks);
- list_add_tail(&task->tasks, &dead_tasks);
+ free_task(task);
}
-
- spin_unlock(&task_mortuary);
}
}
+/* FIXME: this is not sufficient if we implement syscall barrier backtrace
+ * traversal, the code switch to sb_sample_start at first kernel enter/exit
+ * switch so we need a fifth state and some special handling in sync_buffer()
+ */
+typedef enum {
+ sb_bt_ignore = -2,
+ sb_buffer_start,
+ sb_bt_start,
+ sb_sample_start,
+} sync_buffer_state;
+
/* Sync one of the CPU's buffers into the global event buffer.
* Here we need to go through each batch of samples punctuated
* by context switch notes, taking the task's mmap_sem and doing
struct mm_struct *mm = NULL;
struct task_struct * new;
unsigned long cookie = 0;
- int in_kernel = 1;
+ int cpu_mode = 1;
unsigned int i;
+ sync_buffer_state state = sb_buffer_start;
unsigned long available;
+ int domain_switch = 0;
- down(&buffer_sem);
+ mutex_lock(&buffer_mutex);
add_cpu_switch(cpu);
available = get_slots(cpu_buf);
- for (i=0; i < available; ++i) {
+ for (i = 0; i < available; ++i) {
struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
- if (is_ctx_switch(s->eip)) {
- if (s->event <= 1) {
- /* kernel/userspace switch */
- in_kernel = s->event;
- add_kernel_ctx_switch(s->event);
+ if (is_code(s->eip) && !domain_switch) {
+ if (s->event <= CPU_MODE_XEN) {
+ /* xen/kernel/userspace switch */
+ cpu_mode = s->event;
+ if (state == sb_buffer_start)
+ state = sb_sample_start;
+ add_cpu_mode_switch(s->event);
+ } else if (s->event == CPU_TRACE_BEGIN) {
+ state = sb_bt_start;
+ add_trace_begin();
+ } else if (s->event == CPU_DOMAIN_SWITCH) {
+ domain_switch = 1;
} else {
struct mm_struct * oldmm = mm;
add_user_ctx_switch(new, cookie);
}
} else {
- add_sample(mm, s, in_kernel);
+ if (domain_switch) {
+ add_domain_switch(s->eip);
+ domain_switch = 0;
+ } else {
+ if (state >= sb_bt_start &&
+ !add_sample(mm, s, cpu_mode)) {
+ if (state == sb_bt_start) {
+ state = sb_bt_ignore;
+ atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
+ }
}
increment_tail(cpu_buf);
mark_done(cpu);
- up(&buffer_sem);
+ mutex_unlock(&buffer_mutex);
}