4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Modified by Aravind Menon for Xen
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
13 * Each CPU has a local buffer that stores PC value/event
14 * pairs. We also log context switches when we notice them.
15 * Eventually each CPU's buffer is processed into the global
16 * event buffer by sync_buffer().
18 * We use a local buffer for two reasons: an NMI or similar
19 * interrupt cannot synchronise, and high sampling rates
20 * would lead to catastrophic global synchronisation if
21 * a global buffer was used.
24 #include <linux/sched.h>
25 #include <linux/oprofile.h>
26 #include <linux/vmalloc.h>
27 #include <linux/errno.h>
29 #include "event_buffer.h"
30 #include "cpu_buffer.h"
31 #include "buffer_sync.h"
34 struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
36 static void wq_sync_buffer(void *);
38 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
39 static int work_enabled;
41 static int32_t current_domain = COORDINATOR_DOMAIN;
43 void free_cpu_buffers(void)
47 for_each_online_cpu(i)
48 vfree(cpu_buffer[i].buffer);
51 int alloc_cpu_buffers(void)
55 unsigned long buffer_size = fs_cpu_buffer_size;
57 for_each_online_cpu(i) {
58 struct oprofile_cpu_buffer * b = &cpu_buffer[i];
60 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
66 b->last_cpu_mode = -1;
68 b->buffer_size = buffer_size;
71 b->sample_received = 0;
72 b->sample_lost_overflow = 0;
74 INIT_WORK(&b->work, wq_sync_buffer, b);
83 void start_cpu_work(void)
89 for_each_online_cpu(i) {
90 struct oprofile_cpu_buffer * b = &cpu_buffer[i];
93 * Spread the work by 1 jiffy per cpu so they dont all
96 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
100 void end_cpu_work(void)
106 for_each_online_cpu(i) {
107 struct oprofile_cpu_buffer * b = &cpu_buffer[i];
109 cancel_delayed_work(&b->work);
112 flush_scheduled_work();
115 /* Resets the cpu buffer to a sane state. */
116 void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
118 /* reset these to invalid values; the next sample
119 * collected will populate the buffer with proper
120 * values to initialize the buffer
122 cpu_buf->last_cpu_mode = -1;
123 cpu_buf->last_task = NULL;
126 /* compute number of available slots in cpu_buffer queue */
127 static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
129 unsigned long head = b->head_pos;
130 unsigned long tail = b->tail_pos;
133 return (tail - head) - 1;
135 return tail + (b->buffer_size - head) - 1;
138 static void increment_head(struct oprofile_cpu_buffer * b)
140 unsigned long new_head = b->head_pos + 1;
142 /* Ensure anything written to the slot before we
143 * increment is visible */
146 if (new_head < b->buffer_size)
147 b->head_pos = new_head;
153 add_sample(struct oprofile_cpu_buffer * cpu_buf,
154 unsigned long pc, unsigned long event)
156 struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
158 entry->event = event;
159 increment_head(cpu_buf);
163 add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
165 add_sample(buffer, ESCAPE_CODE, value);
168 /* This must be safe from any context. It's safe writing here
169 * because of the head/tail separation of the writer and reader
172 * cpu_mode is needed because on some architectures you cannot
173 * tell if you are in kernel or user space simply by looking at
174 * pc. We tag this in the buffer by generating kernel/user (and xen)
175 * enter events whenever cpu_mode changes
177 static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
178 int cpu_mode, unsigned long event)
180 struct task_struct * task;
182 cpu_buf->sample_received++;
184 if (nr_available_slots(cpu_buf) < 3) {
185 cpu_buf->sample_lost_overflow++;
189 WARN_ON(cpu_mode > CPU_MODE_XEN);
193 /* notice a switch from user->kernel or vice versa */
194 if (cpu_buf->last_cpu_mode != cpu_mode) {
195 cpu_buf->last_cpu_mode = cpu_mode;
196 add_code(cpu_buf, cpu_mode);
199 /* notice a task switch */
200 /* if not processing other domain samples */
201 if ((cpu_buf->last_task != task) &&
202 (current_domain == COORDINATOR_DOMAIN)) {
203 cpu_buf->last_task = task;
204 add_code(cpu_buf, (unsigned long)task);
207 add_sample(cpu_buf, pc, event);
211 static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
213 if (nr_available_slots(cpu_buf) < 4) {
214 cpu_buf->sample_lost_overflow++;
218 add_code(cpu_buf, CPU_TRACE_BEGIN);
219 cpu_buf->tracing = 1;
223 static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
225 cpu_buf->tracing = 0;
228 void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
229 unsigned long event, int is_kernel)
231 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
233 if (!backtrace_depth) {
234 log_sample(cpu_buf, pc, is_kernel, event);
238 if (!oprofile_begin_trace(cpu_buf))
241 /* if log_sample() fail we can't backtrace since we lost the source
243 if (log_sample(cpu_buf, pc, is_kernel, event))
244 oprofile_ops.backtrace(regs, backtrace_depth);
245 oprofile_end_trace(cpu_buf);
248 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
250 int is_kernel = !user_mode(regs);
251 unsigned long pc = profile_pc(regs);
253 oprofile_add_ext_sample(pc, regs, event, is_kernel);
256 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
258 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
259 log_sample(cpu_buf, pc, is_kernel, event);
262 void oprofile_add_trace(unsigned long pc)
264 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
266 if (!cpu_buf->tracing)
269 if (nr_available_slots(cpu_buf) < 1) {
270 cpu_buf->tracing = 0;
271 cpu_buf->sample_lost_overflow++;
275 /* broken frame can give an eip with the same value as an escape code,
276 * abort the trace if we get it */
277 if (pc == ESCAPE_CODE) {
278 cpu_buf->tracing = 0;
279 cpu_buf->backtrace_aborted++;
283 add_sample(cpu_buf, pc, 0);
286 int oprofile_add_domain_switch(int32_t domain_id)
288 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
290 /* should have space for switching into and out of domain
291 (2 slots each) plus one sample and one cpu mode switch */
292 if (((nr_available_slots(cpu_buf) < 6) &&
293 (domain_id != COORDINATOR_DOMAIN)) ||
294 (nr_available_slots(cpu_buf) < 2))
297 add_code(cpu_buf, CPU_DOMAIN_SWITCH);
298 add_sample(cpu_buf, domain_id, 0);
300 current_domain = domain_id;
306 * This serves to avoid cpu buffer overflow, and makes sure
307 * the task mortuary progresses
309 * By using schedule_delayed_work_on and then schedule_delayed_work
310 * we guarantee this will stay on the correct cpu
312 static void wq_sync_buffer(void * data)
314 struct oprofile_cpu_buffer * b = data;
315 if (b->cpu != smp_processor_id()) {
316 printk("WQ on CPU%d, prefer CPU%d\n",
317 smp_processor_id(), b->cpu);
321 /* don't re-add the work if we're shutting down */
323 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);