#include "cpu_buffer.h"
#include "buffer_sync.h"
-#define DEFAULT_EXPIRE (HZ / 4)
-
-static void wq_sync_buffers(void *);
-static DECLARE_WORK(sync_wq, wq_sync_buffers, NULL);
-
-static struct timer_list sync_timer;
-static void timer_ping(unsigned long data);
-static void sync_cpu_buffers(void);
-
-
-/* We must make sure to process every entry in the CPU buffers
- * before a task got the PF_EXITING flag, otherwise we will hold
- * references to a possibly freed task_struct. We are safe with
- * samples past the PF_EXITING point in do_exit(), because we
- * explicitly check for that in cpu_buffer.c
+static LIST_HEAD(dying_tasks);
+static LIST_HEAD(dead_tasks);
+static cpumask_t marked_cpus = CPU_MASK_NONE;
+static DEFINE_SPINLOCK(task_mortuary);
+static void process_task_mortuary(void);
+
+
+/* Take ownership of the task struct and place it on the
+ * list for processing. Only after two full buffer syncs
+ * does the task eventually get freed, because by then
+ * we are sure we will not reference it again.
+ * Can be invoked from softirq via RCU callback due to
+ * call_rcu() of the task struct, hence the _irqsave.
*/
-static int exit_task_notify(struct notifier_block * self, unsigned long val, void * data)
+static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
{
- sync_cpu_buffers();
- return 0;
+ unsigned long flags;
+ struct task_struct * task = data;
+ spin_lock_irqsave(&task_mortuary, flags);
+ list_add(&task->tasks, &dying_tasks);
+ spin_unlock_irqrestore(&task_mortuary, flags);
+ return NOTIFY_OK;
}
-
-/* There are two cases of tasks modifying task->mm->mmap list we
- * must concern ourselves with. First, when a task is about to
- * exit (exit_mmap()), we should process the buffer to deal with
- * any samples in the CPU buffer, before we lose the ->mmap information
- * we need. It is vital to get this case correct, otherwise we can
- * end up trying to access a freed task_struct.
+
+
+/* The task is on its way out. A sync of the buffer means we can catch
+ * any remaining samples for this task.
*/
-static int mm_notify(struct notifier_block * self, unsigned long val, void * data)
+static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data)
{
- sync_cpu_buffers();
- return 0;
+ /* To avoid latency problems, we only process the current CPU,
+ * hoping that most samples for the task are on this CPU
+ */
+ sync_buffer(raw_smp_processor_id());
+ return 0;
}
-/* Second, a task may unmap (part of) an executable mmap,
- * so we want to process samples before that happens too. This is merely
- * a QOI issue not a correctness one.
+/* The task is about to try a do_munmap(). We peek at what it's going to
+ * do, and if it's an executable region, process the samples first, so
+ * we don't lose any. This does not have to be exact, it's a QoI issue
+ * only.
*/
static int munmap_notify(struct notifier_block * self, unsigned long val, void * data)
{
- /* Note that we cannot sync the buffers directly, because we might end up
- * taking the the mmap_sem that we hold now inside of event_buffer_read()
- * on a page fault, whilst holding buffer_sem - deadlock.
- *
- * This would mean a threaded reader of the event buffer, but we should
- * prevent it anyway.
- *
- * Delaying the work in a context that doesn't hold the mmap_sem means
- * that we won't lose samples from other mappings that current() may
- * have. Note that either way, we lose any pending samples for what is
- * being unmapped.
- */
- schedule_work(&sync_wq);
+ unsigned long addr = (unsigned long)data;
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * mpnt;
+
+ down_read(&mm->mmap_sem);
+
+ mpnt = find_vma(mm, addr);
+ if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
+ up_read(&mm->mmap_sem);
+ /* To avoid latency problems, we only process the current CPU,
+ * hoping that most samples for the task are on this CPU
+ */
+ sync_buffer(raw_smp_processor_id());
+ return 0;
+ }
+
+ up_read(&mm->mmap_sem);
return 0;
}
if (val != MODULE_STATE_COMING)
return 0;
- sync_cpu_buffers();
+ /* FIXME: should we process all CPU buffers ? */
down(&buffer_sem);
add_event_entry(ESCAPE_CODE);
add_event_entry(MODULE_LOADED_CODE);
}
-static struct notifier_block exit_task_nb = {
- .notifier_call = exit_task_notify,
+static struct notifier_block task_free_nb = {
+ .notifier_call = task_free_notify,
};
-static struct notifier_block exec_unmap_nb = {
- .notifier_call = munmap_notify,
+static struct notifier_block task_exit_nb = {
+ .notifier_call = task_exit_notify,
};
-static struct notifier_block exit_mmap_nb = {
- .notifier_call = mm_notify,
+static struct notifier_block munmap_nb = {
+ .notifier_call = munmap_notify,
};
static struct notifier_block module_load_nb = {
};
-static void end_sync_timer(void)
+static void end_sync(void)
{
- del_timer_sync(&sync_timer);
- /* timer might have queued work, make sure it's completed. */
- flush_scheduled_work();
+ end_cpu_work();
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
}
{
int err;
- init_timer(&sync_timer);
- sync_timer.function = timer_ping;
- sync_timer.expires = jiffies + DEFAULT_EXPIRE;
- add_timer(&sync_timer);
+ start_cpu_work();
- err = profile_event_register(EXIT_TASK, &exit_task_nb);
+ err = task_handoff_register(&task_free_nb);
if (err)
goto out1;
- err = profile_event_register(EXIT_MMAP, &exit_mmap_nb);
+ err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
if (err)
goto out2;
- err = profile_event_register(EXEC_UNMAP, &exec_unmap_nb);
+ err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
if (err)
goto out3;
err = register_module_notifier(&module_load_nb);
out:
return err;
out4:
- profile_event_unregister(EXEC_UNMAP, &exec_unmap_nb);
+ profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
out3:
- profile_event_unregister(EXIT_MMAP, &exit_mmap_nb);
+ profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
out2:
- profile_event_unregister(EXIT_TASK, &exit_task_nb);
+ task_handoff_unregister(&task_free_nb);
out1:
- end_sync_timer();
+ end_sync();
goto out;
}
void sync_stop(void)
{
unregister_module_notifier(&module_load_nb);
- profile_event_unregister(EXIT_TASK, &exit_task_nb);
- profile_event_unregister(EXIT_MMAP, &exit_mmap_nb);
- profile_event_unregister(EXEC_UNMAP, &exec_unmap_nb);
- end_sync_timer();
+ profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
+ profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
+ task_handoff_unregister(&task_free_nb);
+ end_sync();
}
*/
static unsigned long get_exec_dcookie(struct mm_struct * mm)
{
- unsigned long cookie = 0;
+ unsigned long cookie = NO_COOKIE;
struct vm_area_struct * vma;
if (!mm)
*/
static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset)
{
- unsigned long cookie = 0;
+ unsigned long cookie = NO_COOKIE;
struct vm_area_struct * vma;
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
- if (!vma->vm_file)
- continue;
-
if (addr < vma->vm_start || addr >= vma->vm_end)
continue;
- cookie = fast_get_dcookie(vma->vm_file->f_dentry,
- vma->vm_file->f_vfsmnt);
- *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start;
+ if (vma->vm_file) {
+ cookie = fast_get_dcookie(vma->vm_file->f_dentry,
+ vma->vm_file->f_vfsmnt);
+ *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
+ vma->vm_start;
+ } else {
+ /* must be an anonymous map */
+ *offset = addr;
+ }
+
break;
}
+ if (!vma)
+ cookie = INVALID_COOKIE;
+
return cookie;
}
-static unsigned long last_cookie = ~0UL;
+static unsigned long last_cookie = INVALID_COOKIE;
static void add_cpu_switch(int i)
{
add_event_entry(ESCAPE_CODE);
add_event_entry(CPU_SWITCH_CODE);
add_event_entry(i);
- last_cookie = ~0UL;
+ last_cookie = INVALID_COOKIE;
}
static void add_kernel_ctx_switch(unsigned int in_kernel)
}
+static void add_trace_begin(void)
+{
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(TRACE_BEGIN_CODE);
+}
+
+
static void add_sample_entry(unsigned long offset, unsigned long event)
{
add_event_entry(offset);
}
-static void add_us_sample(struct mm_struct * mm, struct op_sample * s)
+static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
{
unsigned long cookie;
off_t offset;
cookie = lookup_dcookie(mm, s->eip, &offset);
- if (!cookie) {
+ if (cookie == INVALID_COOKIE) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- return;
+ return 0;
}
if (cookie != last_cookie) {
}
add_sample_entry(offset, s->event);
+
+ return 1;
}
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace.
*/
-static void add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
+static int
+add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
{
if (in_kernel) {
add_sample_entry(s->eip, s->event);
+ return 1;
} else if (mm) {
- add_us_sample(mm, s);
+ return add_us_sample(mm, s);
} else {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
}
+ return 0;
}
-
+
static void release_mm(struct mm_struct * mm)
{
- if (mm)
- up_read(&mm->mmap_sem);
+ if (!mm)
+ return;
+ up_read(&mm->mmap_sem);
+ mmput(mm);
}
-/* Take the task's mmap_sem to protect ourselves from
- * races when we do lookup_dcookie().
- */
static struct mm_struct * take_tasks_mm(struct task_struct * task)
{
- struct mm_struct * mm;
-
- /* Subtle. We don't need to keep a reference to this task's mm,
- * because, for the mm to be freed on another CPU, that would have
- * to go through the task exit notifier, which ends up sleeping
- * on the buffer_sem we hold, so we end up with mutual exclusion
- * anyway.
- */
- task_lock(task);
- mm = task->mm;
- task_unlock(task);
-
- if (mm) {
- /* needed to walk the task's VMAs */
+ struct mm_struct * mm = get_task_mm(task);
+ if (mm)
down_read(&mm->mmap_sem);
- }
-
return mm;
}
-
-
-static inline int is_ctx_switch(unsigned long val)
+
+
+static inline int is_code(unsigned long val)
{
- return val == ~0UL;
+ return val == ESCAPE_CODE;
}
rmb();
- if (new_tail < (b->buffer_size))
+ if (new_tail < b->buffer_size)
b->tail_pos = new_tail;
else
b->tail_pos = 0;
}
+/* Move tasks along towards death. Any tasks on dead_tasks
+ * will definitely have no remaining references in any
+ * CPU buffers at this point, because we use two lists,
+ * and to have reached the list, it must have gone through
+ * one full sync already.
+ */
+static void process_task_mortuary(void)
+{
+ unsigned long flags;
+ LIST_HEAD(local_dead_tasks);
+ struct task_struct * task;
+ struct task_struct * ttask;
+
+ spin_lock_irqsave(&task_mortuary, flags);
+
+ list_splice_init(&dead_tasks, &local_dead_tasks);
+ list_splice_init(&dying_tasks, &dead_tasks);
+
+ spin_unlock_irqrestore(&task_mortuary, flags);
+
+ list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
+ list_del(&task->tasks);
+ free_task(task);
+ }
+}
+
+
+static void mark_done(int cpu)
+{
+ int i;
+
+ cpu_set(cpu, marked_cpus);
+
+ for_each_online_cpu(i) {
+ if (!cpu_isset(i, marked_cpus))
+ return;
+ }
+
+ /* All CPUs have been processed at least once,
+ * we can process the mortuary once
+ */
+ process_task_mortuary();
+
+ cpus_clear(marked_cpus);
+}
+
+
+/* FIXME: this is not sufficient if we implement syscall barrier backtrace
+ * traversal, the code switch to sb_sample_start at first kernel enter/exit
+ * switch so we need a fifth state and some special handling in sync_buffer()
+ */
+typedef enum {
+ sb_bt_ignore = -2,
+ sb_buffer_start,
+ sb_bt_start,
+ sb_sample_start,
+} sync_buffer_state;
+
/* Sync one of the CPU's buffers into the global event buffer.
* Here we need to go through each batch of samples punctuated
* by context switch notes, taking the task's mmap_sem and doing
* lookup in task->mm->mmap to convert EIP into dcookie/offset
* value.
*/
-static void sync_buffer(struct oprofile_cpu_buffer * cpu_buf)
+void sync_buffer(int cpu)
{
+ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu];
struct mm_struct *mm = NULL;
struct task_struct * new;
unsigned long cookie = 0;
int in_kernel = 1;
unsigned int i;
+ sync_buffer_state state = sb_buffer_start;
+ unsigned long available;
+
+ down(&buffer_sem);
+ add_cpu_switch(cpu);
+
/* Remember, only we can modify tail_pos */
- unsigned long const available = get_slots(cpu_buf);
-
- for (i=0; i < available; ++i) {
+ available = get_slots(cpu_buf);
+
+ for (i = 0; i < available; ++i) {
struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
- if (is_ctx_switch(s->eip)) {
- if (s->event <= 1) {
+ if (is_code(s->eip)) {
+ if (s->event <= CPU_IS_KERNEL) {
/* kernel/userspace switch */
in_kernel = s->event;
+ if (state == sb_buffer_start)
+ state = sb_sample_start;
add_kernel_ctx_switch(s->event);
+ } else if (s->event == CPU_TRACE_BEGIN) {
+ state = sb_bt_start;
+ add_trace_begin();
} else {
struct mm_struct * oldmm = mm;
add_user_ctx_switch(new, cookie);
}
} else {
- add_sample(mm, s, in_kernel);
+ if (state >= sb_bt_start &&
+ !add_sample(mm, s, in_kernel)) {
+ if (state == sb_bt_start) {
+ state = sb_bt_ignore;
+ atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
}
increment_tail(cpu_buf);
}
release_mm(mm);
-}
-
-
-/* Process each CPU's local buffer into the global
- * event buffer.
- */
-static void sync_cpu_buffers(void)
-{
- int i;
- down(&buffer_sem);
-
- for (i = 0; i < NR_CPUS; ++i) {
- struct oprofile_cpu_buffer * cpu_buf;
-
- if (!cpu_possible(i))
- continue;
-
- cpu_buf = &cpu_buffer[i];
-
- add_cpu_switch(i);
- sync_buffer(cpu_buf);
- }
+ mark_done(cpu);
up(&buffer_sem);
-
- mod_timer(&sync_timer, jiffies + DEFAULT_EXPIRE);
-}
-
-
-static void wq_sync_buffers(void * data)
-{
- sync_cpu_buffers();
-}
-
-
-/* It is possible that we could have no munmap() or
- * other events for a period of time. This will lead
- * the CPU buffers to overflow and lose samples and
- * context switches. We try to reduce the problem
- * by timing out when nothing happens for a while.
- */
-static void timer_ping(unsigned long data)
-{
- schedule_work(&sync_wq);
- /* timer is re-added by the scheduled task */
}