4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Modified by Aravind Menon for Xen
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
13 * This is the core of the buffer management. Each
14 * CPU buffer is processed and entered into the
15 * global event buffer. Such processing is necessary
16 * in several circumstances, mentioned below.
18 * The processing does the job of converting the
19 * transitory EIP value into a persistent dentry/offset
20 * value that the profiler can record at its leisure.
22 * See fs/dcookies.c for a description of the dentry/offset
27 #include <linux/workqueue.h>
28 #include <linux/notifier.h>
29 #include <linux/dcookies.h>
30 #include <linux/profile.h>
31 #include <linux/module.h>
34 #include "oprofile_stats.h"
35 #include "event_buffer.h"
36 #include "cpu_buffer.h"
37 #include "buffer_sync.h"
39 static LIST_HEAD(dying_tasks);
40 static LIST_HEAD(dead_tasks);
41 static cpumask_t marked_cpus = CPU_MASK_NONE;
42 static DEFINE_SPINLOCK(task_mortuary);
43 static void process_task_mortuary(void);
46 /* Take ownership of the task struct and place it on the
47 * list for processing. Only after two full buffer syncs
48 * does the task eventually get freed, because by then
49 * we are sure we will not reference it again.
50 * Can be invoked from softirq via RCU callback due to
51 * call_rcu() of the task struct, hence the _irqsave.
53 static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
56 struct task_struct * task = data;
57 spin_lock_irqsave(&task_mortuary, flags);
58 list_add(&task->tasks, &dying_tasks);
59 spin_unlock_irqrestore(&task_mortuary, flags);
64 /* The task is on its way out. A sync of the buffer means we can catch
65 * any remaining samples for this task.
67 static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data)
69 /* To avoid latency problems, we only process the current CPU,
70 * hoping that most samples for the task are on this CPU
72 sync_buffer(raw_smp_processor_id());
77 /* The task is about to try a do_munmap(). We peek at what it's going to
78 * do, and if it's an executable region, process the samples first, so
79 * we don't lose any. This does not have to be exact, it's a QoI issue
82 static int munmap_notify(struct notifier_block * self, unsigned long val, void * data)
84 unsigned long addr = (unsigned long)data;
85 struct mm_struct * mm = current->mm;
86 struct vm_area_struct * mpnt;
88 down_read(&mm->mmap_sem);
90 mpnt = find_vma(mm, addr);
91 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
92 up_read(&mm->mmap_sem);
93 /* To avoid latency problems, we only process the current CPU,
94 * hoping that most samples for the task are on this CPU
96 sync_buffer(raw_smp_processor_id());
100 up_read(&mm->mmap_sem);
105 /* We need to be told about new modules so we don't attribute to a previously
106 * loaded module, or drop the samples on the floor.
108 static int module_load_notify(struct notifier_block * self, unsigned long val, void * data)
110 #ifdef CONFIG_MODULES
111 if (val != MODULE_STATE_COMING)
114 /* FIXME: should we process all CPU buffers ? */
115 mutex_lock(&buffer_mutex);
116 add_event_entry(ESCAPE_CODE);
117 add_event_entry(MODULE_LOADED_CODE);
118 mutex_unlock(&buffer_mutex);
124 static struct notifier_block task_free_nb = {
125 .notifier_call = task_free_notify,
128 static struct notifier_block task_exit_nb = {
129 .notifier_call = task_exit_notify,
132 static struct notifier_block munmap_nb = {
133 .notifier_call = munmap_notify,
136 static struct notifier_block module_load_nb = {
137 .notifier_call = module_load_notify,
141 static void end_sync(void)
144 /* make sure we don't leak task structs */
145 process_task_mortuary();
146 process_task_mortuary();
156 err = task_handoff_register(&task_free_nb);
159 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
162 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
165 err = register_module_notifier(&module_load_nb);
172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
174 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
176 task_handoff_unregister(&task_free_nb);
185 unregister_module_notifier(&module_load_nb);
186 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
187 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
188 task_handoff_unregister(&task_free_nb);
193 /* Optimisation. We can manage without taking the dcookie sem
194 * because we cannot reach this code without at least one
195 * dcookie user still being registered (namely, the reader
196 * of the event buffer). */
197 static inline unsigned long fast_get_dcookie(struct dentry * dentry,
198 struct vfsmount * vfsmnt)
200 unsigned long cookie;
202 if (dentry->d_cookie)
203 return (unsigned long)dentry;
204 get_dcookie(dentry, vfsmnt, &cookie);
209 /* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
210 * which corresponds loosely to "application name". This is
211 * not strictly necessary but allows oprofile to associate
212 * shared-library samples with particular applications
214 static unsigned long get_exec_dcookie(struct mm_struct * mm)
216 unsigned long cookie = NO_COOKIE;
217 struct vm_area_struct * vma;
222 for (vma = mm->mmap; vma; vma = vma->vm_next) {
225 if (!(vma->vm_flags & VM_EXECUTABLE))
227 cookie = fast_get_dcookie(vma->vm_file->f_dentry,
228 vma->vm_file->f_vfsmnt);
237 /* Convert the EIP value of a sample into a persistent dentry/offset
238 * pair that can then be added to the global event buffer. We make
239 * sure to do this lookup before a mm->mmap modification happens so
240 * we don't lose track.
242 static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset)
244 unsigned long cookie = NO_COOKIE;
245 struct vm_area_struct * vma;
247 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
249 if (addr < vma->vm_start || addr >= vma->vm_end)
253 cookie = fast_get_dcookie(vma->vm_file->f_dentry,
254 vma->vm_file->f_vfsmnt);
255 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
258 /* must be an anonymous map */
266 cookie = INVALID_COOKIE;
272 static unsigned long last_cookie = INVALID_COOKIE;
274 static void add_cpu_switch(int i)
276 add_event_entry(ESCAPE_CODE);
277 add_event_entry(CPU_SWITCH_CODE);
279 last_cookie = INVALID_COOKIE;
282 static void add_cpu_mode_switch(unsigned int cpu_mode)
284 add_event_entry(ESCAPE_CODE);
287 add_event_entry(USER_ENTER_SWITCH_CODE);
289 case CPU_MODE_KERNEL:
290 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
293 add_event_entry(XEN_ENTER_SWITCH_CODE);
300 static void add_domain_switch(unsigned long domain_id)
302 add_event_entry(ESCAPE_CODE);
303 add_event_entry(DOMAIN_SWITCH_CODE);
304 add_event_entry(domain_id);
308 add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
310 add_event_entry(ESCAPE_CODE);
311 add_event_entry(CTX_SWITCH_CODE);
312 add_event_entry(task->pid);
313 add_event_entry(cookie);
314 /* Another code for daemon back-compat */
315 add_event_entry(ESCAPE_CODE);
316 add_event_entry(CTX_TGID_CODE);
317 add_event_entry(task->tgid);
321 static void add_cookie_switch(unsigned long cookie)
323 add_event_entry(ESCAPE_CODE);
324 add_event_entry(COOKIE_SWITCH_CODE);
325 add_event_entry(cookie);
329 static void add_trace_begin(void)
331 add_event_entry(ESCAPE_CODE);
332 add_event_entry(TRACE_BEGIN_CODE);
336 static void add_sample_entry(unsigned long offset, unsigned long event)
338 add_event_entry(offset);
339 add_event_entry(event);
343 static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
345 unsigned long cookie;
348 cookie = lookup_dcookie(mm, s->eip, &offset);
350 if (cookie == INVALID_COOKIE) {
351 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
355 if (cookie != last_cookie) {
356 add_cookie_switch(cookie);
357 last_cookie = cookie;
360 add_sample_entry(offset, s->event);
366 /* Add a sample to the global event buffer. If possible the
367 * sample is converted into a persistent dentry/offset pair
368 * for later lookup from userspace.
371 add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
373 if (cpu_mode >= CPU_MODE_KERNEL) {
374 add_sample_entry(s->eip, s->event);
377 return add_us_sample(mm, s);
379 atomic_inc(&oprofile_stats.sample_lost_no_mm);
385 static void release_mm(struct mm_struct * mm)
389 up_read(&mm->mmap_sem);
394 static struct mm_struct * take_tasks_mm(struct task_struct * task)
396 struct mm_struct * mm = get_task_mm(task);
398 down_read(&mm->mmap_sem);
403 static inline int is_code(unsigned long val)
405 return val == ESCAPE_CODE;
409 /* "acquire" as many cpu buffer slots as we can */
410 static unsigned long get_slots(struct oprofile_cpu_buffer * b)
412 unsigned long head = b->head_pos;
413 unsigned long tail = b->tail_pos;
416 * Subtle. This resets the persistent last_task
417 * and in_kernel values used for switching notes.
418 * BUT, there is a small window between reading
419 * head_pos, and this call, that means samples
420 * can appear at the new head position, but not
421 * be prefixed with the notes for switching
422 * kernel mode or a task switch. This small hole
423 * can lead to mis-attribution or samples where
424 * we don't know if it's in the kernel or not,
425 * at the start of an event buffer.
432 return head + (b->buffer_size - tail);
436 static void increment_tail(struct oprofile_cpu_buffer * b)
438 unsigned long new_tail = b->tail_pos + 1;
442 if (new_tail < b->buffer_size)
443 b->tail_pos = new_tail;
449 /* Move tasks along towards death. Any tasks on dead_tasks
450 * will definitely have no remaining references in any
451 * CPU buffers at this point, because we use two lists,
452 * and to have reached the list, it must have gone through
453 * one full sync already.
455 static void process_task_mortuary(void)
458 LIST_HEAD(local_dead_tasks);
459 struct task_struct * task;
460 struct task_struct * ttask;
462 spin_lock_irqsave(&task_mortuary, flags);
464 list_splice_init(&dead_tasks, &local_dead_tasks);
465 list_splice_init(&dying_tasks, &dead_tasks);
467 spin_unlock_irqrestore(&task_mortuary, flags);
469 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
470 list_del(&task->tasks);
476 static void mark_done(int cpu)
480 cpu_set(cpu, marked_cpus);
482 for_each_online_cpu(i) {
483 if (!cpu_isset(i, marked_cpus))
487 /* All CPUs have been processed at least once,
488 * we can process the mortuary once
490 process_task_mortuary();
492 cpus_clear(marked_cpus);
496 /* FIXME: this is not sufficient if we implement syscall barrier backtrace
497 * traversal, the code switch to sb_sample_start at first kernel enter/exit
498 * switch so we need a fifth state and some special handling in sync_buffer()
507 /* Sync one of the CPU's buffers into the global event buffer.
508 * Here we need to go through each batch of samples punctuated
509 * by context switch notes, taking the task's mmap_sem and doing
510 * lookup in task->mm->mmap to convert EIP into dcookie/offset
513 void sync_buffer(int cpu)
515 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu];
516 struct mm_struct *mm = NULL;
517 struct task_struct * new;
518 unsigned long cookie = 0;
521 sync_buffer_state state = sb_buffer_start;
522 unsigned long available;
523 int domain_switch = 0;
525 mutex_lock(&buffer_mutex);
529 /* Remember, only we can modify tail_pos */
531 available = get_slots(cpu_buf);
533 for (i = 0; i < available; ++i) {
534 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
536 if (is_code(s->eip) && !domain_switch) {
537 if (s->event <= CPU_MODE_XEN) {
538 /* xen/kernel/userspace switch */
540 if (state == sb_buffer_start)
541 state = sb_sample_start;
542 add_cpu_mode_switch(s->event);
543 } else if (s->event == CPU_TRACE_BEGIN) {
546 } else if (s->event == CPU_DOMAIN_SWITCH) {
549 struct mm_struct * oldmm = mm;
551 /* userspace context switch */
552 new = (struct task_struct *)s->event;
555 mm = take_tasks_mm(new);
557 cookie = get_exec_dcookie(mm);
558 add_user_ctx_switch(new, cookie);
562 add_domain_switch(s->eip);
565 if (state >= sb_bt_start &&
566 !add_sample(mm, s, cpu_mode)) {
567 if (state == sb_bt_start) {
568 state = sb_bt_ignore;
569 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
575 increment_tail(cpu_buf);
581 mutex_unlock(&buffer_mutex);