1 diff -Nurb linux-2.6.22-580/arch/i386/Kconfig linux-2.6.22-590/arch/i386/Kconfig
2 --- linux-2.6.22-580/arch/i386/Kconfig 2008-02-27 14:59:40.000000000 -0500
3 +++ linux-2.6.22-590/arch/i386/Kconfig 2008-02-28 07:55:57.000000000 -0500
6 source "arch/i386/oprofile/Kconfig"
9 + bool "Chopstix (PlanetLab)"
10 + depends on MODULES && OPROFILE
12 + Chopstix allows you to monitor various events by summarizing them
13 + in lossy data structures and transferring these data structures
14 + into user space. If in doubt, say "N".
17 bool "Kprobes (EXPERIMENTAL)"
18 depends on KALLSYMS && EXPERIMENTAL && MODULES
19 diff -Nurb linux-2.6.22-580/drivers/oprofile/cpu_buffer.c linux-2.6.22-590/drivers/oprofile/cpu_buffer.c
20 --- linux-2.6.22-580/drivers/oprofile/cpu_buffer.c 2007-07-08 19:32:17.000000000 -0400
21 +++ linux-2.6.22-590/drivers/oprofile/cpu_buffer.c 2008-02-28 07:40:16.000000000 -0500
23 #include <linux/oprofile.h>
24 #include <linux/vmalloc.h>
25 #include <linux/errno.h>
26 +#include <linux/arrays.h>
28 #include "event_buffer.h"
29 #include "cpu_buffer.h"
34 +#ifdef CONFIG_CHOPSTIX
38 + unsigned long dcookie;
42 +extern void (*rec_event)(void *,unsigned int);
46 add_sample(struct oprofile_cpu_buffer * cpu_buf,
47 unsigned long pc, unsigned long event)
51 increment_head(cpu_buf);
57 oprofile_end_trace(cpu_buf);
60 +#ifdef CONFIG_CHOPSTIX
62 +static int proc_pid_cmdline(struct task_struct *task, char * buffer)
66 + struct mm_struct *mm = get_task_mm(task);
70 + goto out_mm; /* Shh! No looking before we're done */
72 + len = mm->arg_end - mm->arg_start;
74 + if (len > PAGE_SIZE)
77 + res = access_process_vm(task, mm->arg_start, buffer, len, 0);
79 + // If the nul at the end of args has been overwritten, then
80 + // assume application is using setproctitle(3).
81 + if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
82 + len = strnlen(buffer, res);
86 + len = mm->env_end - mm->env_start;
87 + if (len > PAGE_SIZE - res)
88 + len = PAGE_SIZE - res;
89 + res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
90 + res = strnlen(buffer, res);
102 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
104 int is_kernel = !user_mode(regs);
105 unsigned long pc = profile_pc(regs);
108 +#ifdef CONFIG_CHOPSTIX
111 + struct event_spec espec;
112 + /*res = proc_pid_cmdline(current, espec->appname);*/
113 + esig.task = current;
116 + esig.event_data=&espec;
117 + esig.event_type=event; /* index in the event array currently set up */
118 + /* make sure the counters are loaded in the order we want them to show up*/
119 + (*rec_event)(&esig, 1);
122 oprofile_add_ext_sample(pc, regs, event, is_kernel);
125 + oprofile_add_ext_sample(pc, regs, event, is_kernel);
131 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
132 diff -Nurb linux-2.6.22-580/fs/exec.c linux-2.6.22-590/fs/exec.c
133 --- linux-2.6.22-580/fs/exec.c 2008-02-27 14:59:40.000000000 -0500
134 +++ linux-2.6.22-590/fs/exec.c 2008-02-28 07:40:48.000000000 -0500
136 #include <linux/audit.h>
137 #include <linux/signalfd.h>
138 #include <linux/vs_memory.h>
139 +#include <linux/dcookies.h>
141 #include <asm/uaccess.h>
142 #include <asm/mmu_context.h>
146 struct inode *inode = nd.dentry->d_inode;
147 +#ifdef CONFIG_CHOPSTIX
148 + unsigned long cookie;
149 + if (!nd.dentry->d_cookie)
150 + get_dcookie(nd.dentry, nd.mnt, &cookie);
153 file = ERR_PTR(-EACCES);
154 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
155 S_ISREG(inode->i_mode)) {
156 diff -Nurb linux-2.6.22-580/include/linux/arrays.h linux-2.6.22-590/include/linux/arrays.h
157 --- linux-2.6.22-580/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
158 +++ linux-2.6.22-590/include/linux/arrays.h 2008-02-27 14:59:52.000000000 -0500
160 +#ifndef __ARRAYS_H__
161 +#define __ARRAYS_H__
162 +#include <linux/list.h>
164 +#define SAMPLING_METHOD_DEFAULT 0
165 +#define SAMPLING_METHOD_LOG 1
167 +/* Every probe has an array handler */
169 +/* XXX - Optimize this structure */
171 +struct array_handler {
172 + struct list_head link;
173 + unsigned int (*hash_func)(void *);
174 + unsigned int (*sampling_func)(void *,int,void *);
175 + unsigned short size;
176 + unsigned int threshold;
177 + unsigned char **expcount;
178 + unsigned int sampling_method;
179 + unsigned int **arrays;
180 + unsigned int arraysize;
181 + unsigned int num_samples[2];
182 + void **epoch_samples; /* size-sized lists of samples */
183 + unsigned int (*serialize)(void *, void *);
184 + unsigned char code[5];
188 + struct list_head link;
190 + unsigned int count;
191 + unsigned int event_type;
192 + struct task_struct *task;
195 diff -Nurb linux-2.6.22-580/include/linux/sched.h linux-2.6.22-590/include/linux/sched.h
196 --- linux-2.6.22-580/include/linux/sched.h 2008-02-27 14:59:41.000000000 -0500
197 +++ linux-2.6.22-590/include/linux/sched.h 2008-02-28 07:41:28.000000000 -0500
199 unsigned int btrace_seq;
201 unsigned long sleep_avg;
202 +#ifdef CONFIG_CHOPSTIX
203 + unsigned long long timestamp, last_ran, last_interrupted, last_ran_j;
205 unsigned long long timestamp, last_ran;
207 unsigned long long sched_time; /* sched_clock time spent running */
208 enum sleep_type sleep_type;
210 diff -Nurb linux-2.6.22-580/kernel/fork.c linux-2.6.22-590/kernel/fork.c
211 --- linux-2.6.22-580/kernel/fork.c 2008-02-27 14:59:41.000000000 -0500
212 +++ linux-2.6.22-590/kernel/fork.c 2008-02-28 07:42:09.000000000 -0500
216 tsk->splice_pipe = NULL;
217 + //tsk->cmdline[0]='\0';
218 +#ifdef CONFIG_CHOPSTIX
219 + tsk->last_interrupted = 0;
220 + tsk->last_ran_j = 0;
225 diff -Nurb linux-2.6.22-580/kernel/sched.c linux-2.6.22-590/kernel/sched.c
226 --- linux-2.6.22-580/kernel/sched.c 2008-02-27 14:59:41.000000000 -0500
227 +++ linux-2.6.22-590/kernel/sched.c 2008-02-28 07:45:45.000000000 -0500
229 * 1998-11-19 Implemented schedule_timeout() and related stuff
230 * by Andrea Arcangeli
231 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
232 - * hybrid priority-list and round-robin design with
233 + * hybrid priority-list and round-robin deventn with
234 * an array-switch method of distributing timeslices
235 * and per-CPU runqueues. Cleanups and useful suggestions
236 * by Davide Libenzi, preemptible kernel bits by Robert Love.
240 #include <asm/unistd.h>
241 +#include <linux/arrays.h>
242 #include <linux/vs_sched.h>
243 #include <linux/vs_cvirt.h>
245 @@ -3608,6 +3609,7 @@
250 static inline int interactive_sleep(enum sleep_type sleep_type)
252 return (sleep_type == SLEEP_INTERACTIVE ||
253 @@ -3617,16 +3619,54 @@
255 * schedule() is the main scheduler function.
258 +#ifdef CONFIG_CHOPSTIX
259 +extern void (*rec_event)(void *,unsigned int);
262 + unsigned long dcookie;
264 + unsigned char reason;
267 +#define top_esp (THREAD_SIZE - sizeof(unsigned long))
268 +#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
270 +static inline unsigned long my_get_wchan(struct task_struct *p)
272 + unsigned long ebp, esp, eip;
273 + unsigned long stack_page;
275 + stack_page = (unsigned long)task_stack_page(p);
276 + esp = p->thread.esp;
277 + if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
279 + /* include/asm-i386/system.h:switch_to() pushes ebp last. */
280 + ebp = *(unsigned long *) esp;
282 + if (ebp < stack_page || ebp > top_ebp+stack_page)
284 + eip = *(unsigned long *) (ebp+4);
285 + if (!in_sched_functions(eip))
287 + ebp = *(unsigned long *) ebp;
288 + } while (count++ < 16);
294 asmlinkage void __sched schedule(void)
296 struct task_struct *prev, *next;
297 struct prio_array *array;
298 struct list_head *queue;
299 unsigned long long now;
300 - unsigned long run_time;
301 + unsigned long run_time, diff;
302 int cpu, idx, new_prio;
305 + int sampling_reason;
308 * Test if we are atomic. Since do_exit() needs to call into
309 @@ -3680,6 +3720,7 @@
310 switch_count = &prev->nivcsw;
311 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
312 switch_count = &prev->nvcsw;
314 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
315 unlikely(signal_pending(prev))))
316 prev->state = TASK_RUNNING;
317 @@ -3689,6 +3730,11 @@
318 vx_uninterruptible_inc(prev);
320 deactivate_task(prev, rq);
321 +#ifdef CONFIG_CHOPSTIX
322 + if (prev->state & TASK_INTERRUPTIBLE) {
323 + prev->last_interrupted=jiffies;
329 @@ -3763,8 +3809,45 @@
330 prev->sleep_avg -= run_time;
331 if ((long)prev->sleep_avg <= 0)
334 prev->timestamp = prev->last_ran = now;
335 +#ifdef CONFIG_CHOPSTIX
339 + prev->last_ran_j = jiffies;
340 + if (next->last_interrupted) {
341 + diff = (jiffies-next->last_interrupted);
342 + next->last_interrupted = 0;
343 + sampling_reason = 0;
346 + diff = jiffies-next->last_ran_j;
347 + sampling_reason = 1;
350 + if (rec_event && (diff>HZ/5)) {
351 + struct event event;
352 + struct event_spec espec;
354 + unsigned int state = next->state;
356 + espec.reason = sampling_reason;
359 + eip = next->thread.esp;
360 + next->state = state;
362 + next->last_interrupted = 0;
363 + event.event_data=&espec;
366 + event.event_type=2;
367 + /* index in the event array currently set up */
368 + /* make sure the counters are loaded in the order we want them to show up*/
369 + (*rec_event)(&event, diff);
372 sched_info_switch(prev, next);
373 if (likely(prev != next)) {
374 next->timestamp = next->last_ran = now;
375 @@ -7275,3 +7358,9 @@
380 +#ifdef CONFIG_CHOPSTIX
381 +void (*rec_event)(void *,unsigned int);
382 +EXPORT_SYMBOL(rec_event);
383 +EXPORT_SYMBOL(in_sched_functions);