static LIST_HEAD(dying_tasks);
static LIST_HEAD(dead_tasks);
cpumask_t marked_cpus = CPU_MASK_NONE;
-static spinlock_t task_mortuary = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(task_mortuary);
void process_task_mortuary(void);
*/
static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
{
- struct task_struct * task = (struct task_struct *)data;
+ struct task_struct * task = data;
spin_lock(&task_mortuary);
list_add(&task->tasks, &dying_tasks);
spin_unlock(&task_mortuary);
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
- sync_buffer(smp_processor_id());
+ sync_buffer(_smp_processor_id());
return 0;
}
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
- sync_buffer(smp_processor_id());
+ sync_buffer(_smp_processor_id());
return 0;
}
}
+static void add_trace_begin(void)
+{
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(TRACE_BEGIN_CODE);
+}
+
+
static void add_sample_entry(unsigned long offset, unsigned long event)
{
add_event_entry(offset);
}
-static void add_us_sample(struct mm_struct * mm, struct op_sample * s)
+static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
{
unsigned long cookie;
off_t offset;
if (!cookie) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- return;
+ return 0;
}
if (cookie != last_cookie) {
}
add_sample_entry(offset, s->event);
+
+ return 1;
}
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace.
*/
-static void add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
+static int
+add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
{
if (in_kernel) {
add_sample_entry(s->eip, s->event);
+ return 1;
} else if (mm) {
- add_us_sample(mm, s);
+ return add_us_sample(mm, s);
} else {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
}
+ return 0;
}
}
-static inline int is_ctx_switch(unsigned long val)
+static inline int is_code(unsigned long val)
{
- return val == ~0UL;
+ return val == ESCAPE_CODE;
}
rmb();
- if (new_tail < (b->buffer_size))
+ if (new_tail < b->buffer_size)
b->tail_pos = new_tail;
else
b->tail_pos = 0;
}
+/* FIXME: this is not sufficient if we implement syscall barrier backtrace
+ * traversal, the code switch to sb_sample_start at first kernel enter/exit
+ * switch so we need a fifth state and some special handling in sync_buffer()
+ */
+typedef enum {
+ sb_bt_ignore = -2,
+ sb_buffer_start,
+ sb_bt_start,
+ sb_sample_start,
+} sync_buffer_state;
+
/* Sync one of the CPU's buffers into the global event buffer.
* Here we need to go through each batch of samples punctuated
* by context switch notes, taking the task's mmap_sem and doing
unsigned long cookie = 0;
int in_kernel = 1;
unsigned int i;
+ sync_buffer_state state = sb_buffer_start;
unsigned long available;
down(&buffer_sem);
available = get_slots(cpu_buf);
- for (i=0; i < available; ++i) {
+ for (i = 0; i < available; ++i) {
struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
- if (is_ctx_switch(s->eip)) {
- if (s->event <= 1) {
+ if (is_code(s->eip)) {
+ if (s->event <= CPU_IS_KERNEL) {
/* kernel/userspace switch */
in_kernel = s->event;
+ if (state == sb_buffer_start)
+ state = sb_sample_start;
add_kernel_ctx_switch(s->event);
+ } else if (s->event == CPU_TRACE_BEGIN) {
+ state = sb_bt_start;
+ add_trace_begin();
} else {
struct mm_struct * oldmm = mm;
add_user_ctx_switch(new, cookie);
}
} else {
- add_sample(mm, s, in_kernel);
+ if (state >= sb_bt_start &&
+ !add_sample(mm, s, in_kernel)) {
+ if (state == sb_bt_start) {
+ state = sb_bt_ignore;
+ atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
}
increment_tail(cpu_buf);