1 diff -Nurb linux-2.6.22-580/drivers/oprofile/cpu_buffer.c linux-2.6.22-590/drivers/oprofile/cpu_buffer.c
2 --- linux-2.6.22-580/drivers/oprofile/cpu_buffer.c 2007-07-08 19:32:17.000000000 -0400
3 +++ linux-2.6.22-590/drivers/oprofile/cpu_buffer.c 2008-02-27 13:53:47.000000000 -0500
5 #include <linux/oprofile.h>
6 #include <linux/vmalloc.h>
7 #include <linux/errno.h>
8 +#include <linux/arrays.h>
10 #include "event_buffer.h"
11 #include "cpu_buffer.h"
18 + unsigned long dcookie;
22 +extern void (*rec_event)(void *,unsigned int);
25 add_sample(struct oprofile_cpu_buffer * cpu_buf,
26 unsigned long pc, unsigned long event)
30 increment_head(cpu_buf);
36 oprofile_end_trace(cpu_buf);
39 +static int proc_pid_cmdline(struct task_struct *task, char * buffer)
43 + struct mm_struct *mm = get_task_mm(task);
47 + goto out_mm; /* Shh! No looking before we're done */
49 + len = mm->arg_end - mm->arg_start;
51 + if (len > PAGE_SIZE)
54 + res = access_process_vm(task, mm->arg_start, buffer, len, 0);
56 + // If the nul at the end of args has been overwritten, then
57 + // assume application is using setproctitle(3).
58 + if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
59 + len = strnlen(buffer, res);
63 + len = mm->env_end - mm->env_start;
64 + if (len > PAGE_SIZE - res)
65 + len = PAGE_SIZE - res;
66 + res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
67 + res = strnlen(buffer, res);
78 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
80 int is_kernel = !user_mode(regs);
81 unsigned long pc = profile_pc(regs);
86 + struct event_spec espec;
87 + /*res = proc_pid_cmdline(current, espec->appname);*/
88 + esig.task = current;
91 + esig.event_data=&espec;
92 + esig.event_type=event; /* index in the event array currently set up */
93 + /* make sure the counters are loaded in the order we want them to show up*/
94 + (*rec_event)(&esig, 1);
97 oprofile_add_ext_sample(pc, regs, event, is_kernel);
101 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
102 diff -Nurb linux-2.6.22-580/fs/exec.c linux-2.6.22-590/fs/exec.c
103 --- linux-2.6.22-580/fs/exec.c 2008-02-27 13:46:38.000000000 -0500
104 +++ linux-2.6.22-590/fs/exec.c 2008-02-27 13:49:58.000000000 -0500
106 #include <linux/audit.h>
107 #include <linux/signalfd.h>
108 #include <linux/vs_memory.h>
109 +#include <linux/dcookies.h>
111 #include <asm/uaccess.h>
112 #include <asm/mmu_context.h>
116 struct inode *inode = nd.dentry->d_inode;
117 + unsigned long cookie;
118 + if (!nd.dentry->d_cookie)
119 + get_dcookie(nd.dentry, nd.mnt, &cookie);
121 file = ERR_PTR(-EACCES);
122 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
123 S_ISREG(inode->i_mode)) {
124 diff -Nurb linux-2.6.22-580/include/linux/arrays.h linux-2.6.22-590/include/linux/arrays.h
125 --- linux-2.6.22-580/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
126 +++ linux-2.6.22-590/include/linux/arrays.h 2008-02-27 13:48:29.000000000 -0500
128 +#ifndef __ARRAYS_H__
129 +#define __ARRAYS_H__
130 +#include <linux/list.h>
132 +#define SAMPLING_METHOD_DEFAULT 0
133 +#define SAMPLING_METHOD_LOG 1
135 +/* Every probe has an array handler */
137 +/* XXX - Optimize this structure */
139 +struct array_handler {
140 + struct list_head link;
141 + unsigned int (*hash_func)(void *);
142 + unsigned int (*sampling_func)(void *,int,void *);
143 + unsigned short size;
144 + unsigned int threshold;
145 + unsigned char **expcount;
146 + unsigned int sampling_method;
147 + unsigned int **arrays;
148 + unsigned int arraysize;
149 + unsigned int num_samples[2];
150 + void **epoch_samples; /* size-sized lists of samples */
151 + unsigned int (*serialize)(void *, void *);
152 + unsigned char code[5];
156 + struct list_head link;
158 + unsigned int count;
159 + unsigned int event_type;
160 + struct task_struct *task;
163 diff -Nurb linux-2.6.22-580/include/linux/sched.h linux-2.6.22-590/include/linux/sched.h
164 --- linux-2.6.22-580/include/linux/sched.h 2008-02-27 13:46:40.000000000 -0500
165 +++ linux-2.6.22-590/include/linux/sched.h 2008-02-27 13:48:29.000000000 -0500
167 unsigned int btrace_seq;
169 unsigned long sleep_avg;
170 - unsigned long long timestamp, last_ran;
171 + unsigned long long timestamp, last_ran, last_interrupted, last_ran_j;
172 unsigned long long sched_time; /* sched_clock time spent running */
173 enum sleep_type sleep_type;
175 diff -Nurb linux-2.6.22-580/kernel/fork.c linux-2.6.22-590/kernel/fork.c
176 --- linux-2.6.22-580/kernel/fork.c 2008-02-27 13:46:40.000000000 -0500
177 +++ linux-2.6.22-590/kernel/fork.c 2008-02-27 13:48:29.000000000 -0500
181 tsk->splice_pipe = NULL;
182 + //tsk->cmdline[0]='\0';
183 + tsk->last_interrupted = 0;
187 diff -Nurb linux-2.6.22-580/kernel/sched.c linux-2.6.22-590/kernel/sched.c
188 --- linux-2.6.22-580/kernel/sched.c 2008-02-27 13:46:40.000000000 -0500
189 +++ linux-2.6.22-590/kernel/sched.c 2008-02-27 14:08:26.000000000 -0500
191 * 1998-11-19 Implemented schedule_timeout() and related stuff
192 * by Andrea Arcangeli
193 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
194 - * hybrid priority-list and round-robin design with
195 + * hybrid priority-list and round-robin deventn with
196 * an array-switch method of distributing timeslices
197 * and per-CPU runqueues. Cleanups and useful suggestions
198 * by Davide Libenzi, preemptible kernel bits by Robert Love.
202 #include <asm/unistd.h>
203 +#include <linux/arrays.h>
204 #include <linux/vs_sched.h>
205 #include <linux/vs_cvirt.h>
207 @@ -3608,6 +3609,8 @@
211 +extern void (*rec_event)(void *,unsigned int);
213 static inline int interactive_sleep(enum sleep_type sleep_type)
215 return (sleep_type == SLEEP_INTERACTIVE ||
216 @@ -3617,16 +3620,51 @@
218 * schedule() is the main scheduler function.
223 + unsigned long dcookie;
225 + unsigned char reason;
228 +#define top_esp (THREAD_SIZE - sizeof(unsigned long))
229 +#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
231 +static inline unsigned long my_get_wchan(struct task_struct *p)
233 + unsigned long ebp, esp, eip;
234 + unsigned long stack_page;
236 + stack_page = (unsigned long)task_stack_page(p);
237 + esp = p->thread.esp;
238 + if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
240 + /* include/asm-i386/system.h:switch_to() pushes ebp last. */
241 + ebp = *(unsigned long *) esp;
243 + if (ebp < stack_page || ebp > top_ebp+stack_page)
245 + eip = *(unsigned long *) (ebp+4);
246 + if (!in_sched_functions(eip))
248 + ebp = *(unsigned long *) ebp;
249 + } while (count++ < 16);
254 asmlinkage void __sched schedule(void)
256 struct task_struct *prev, *next;
257 struct prio_array *array;
258 struct list_head *queue;
259 unsigned long long now;
260 - unsigned long run_time;
261 + unsigned long run_time, diff;
262 int cpu, idx, new_prio;
265 + int sampling_reason;
268 * Test if we are atomic. Since do_exit() needs to call into
269 @@ -3680,6 +3718,7 @@
270 switch_count = &prev->nivcsw;
271 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
272 switch_count = &prev->nvcsw;
274 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
275 unlikely(signal_pending(prev))))
276 prev->state = TASK_RUNNING;
277 @@ -3689,6 +3728,9 @@
278 vx_uninterruptible_inc(prev);
280 deactivate_task(prev, rq);
281 + if (prev->state & TASK_INTERRUPTIBLE) {
282 + prev->last_interrupted=jiffies;
287 @@ -3763,8 +3805,44 @@
288 prev->sleep_avg -= run_time;
289 if ((long)prev->sleep_avg <= 0)
292 prev->timestamp = prev->last_ran = now;
296 + prev->last_ran_j = jiffies;
297 + if (next->last_interrupted) {
298 + diff = (jiffies-next->last_interrupted);
299 + next->last_interrupted = 0;
300 + sampling_reason = 0;
303 + diff = jiffies-next->last_ran_j;
304 + sampling_reason = 1;
307 + if (rec_event && (diff>HZ/5)) {
308 + struct event event;
309 + struct event_spec espec;
311 + unsigned int state = next->state;
313 + espec.reason = sampling_reason;
316 + eip = next->thread.esp;
317 + next->state = state;
319 + next->last_interrupted = 0;
320 + event.event_data=&espec;
323 + event.event_type=2;
324 + /* index in the event array currently set up */
325 + /* make sure the counters are loaded in the order we want them to show up*/
326 + (*rec_event)(&event, diff);
329 sched_info_switch(prev, next);
330 if (likely(prev != next)) {
331 next->timestamp = next->last_ran = now;
332 @@ -7275,3 +7353,7 @@
337 +void (*rec_event)(void *,unsigned int);
338 +EXPORT_SYMBOL(rec_event);
339 +EXPORT_SYMBOL(in_sched_functions);
340 diff -Nurb linux-2.6.22-580/kernel/sched.c.rej linux-2.6.22-590/kernel/sched.c.rej
341 --- linux-2.6.22-580/kernel/sched.c.rej 1969-12-31 19:00:00.000000000 -0500
342 +++ linux-2.6.22-590/kernel/sched.c.rej 2008-02-27 13:48:29.000000000 -0500
347 + #include <asm/tlb.h>
348 + #include <asm/unistd.h>
351 + * Scheduler clock - returns current time in nanosec units.
354 + #include <asm/tlb.h>
355 + #include <asm/unistd.h>
356 ++ #include <linux/arrays.h>
361 + * Scheduler clock - returns current time in nanosec units.