remove duplicated definitions
[linux-2.6.git] / linux-2.6-591-chopstix-intern.patch
1 Index: linux-2.6.27.y/arch/Kconfig
2 ===================================================================
3 --- linux-2.6.27.y.orig/arch/Kconfig
4 +++ linux-2.6.27.y/arch/Kconfig
5 @@ -13,9 +13,18 @@ config OPROFILE
6  
7           If unsure, say N.
8  
9 +config CHOPSTIX
10 +       bool "Chopstix (PlanetLab)"
11 +       depends on MODULES && OPROFILE
12 +       help
13 +         Chopstix allows you to monitor various events by summarizing them
14 +         in lossy data structures and transferring these data structures
15 +         into user space. If in doubt, say "N".
16 +
17  config HAVE_OPROFILE
18         def_bool n
19  
20 +
21  config KPROBES
22         bool "Kprobes"
23         depends on KALLSYMS && MODULES
24 Index: linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c
25 ===================================================================
26 --- linux-2.6.27.y.orig/arch/x86/kernel/asm-offsets_32.c
27 +++ linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c
28 @@ -9,6 +9,7 @@
29  #include <linux/signal.h>
30  #include <linux/personality.h>
31  #include <linux/suspend.h>
32 +#include <linux/arrays.h>
33  #include <linux/kbuild.h>
34  #include <asm/ucontext.h>
35  #include "sigframe.h"
36 @@ -24,9 +25,20 @@
37  #include <linux/lguest.h>
38  #include "../../../drivers/lguest/lg.h"
39  
40 +
41 +#define STACKOFFSET(sym, str, mem) \
42 +       DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
43 +
44  /* workaround for a warning with -Wmissing-prototypes */
45  void foo(void);
46  
47 +struct event_spec {
48 +       unsigned long pc;
49 +       unsigned long dcookie;
50 +       unsigned count;
51 +       unsigned int number;
52 +};
53 +
54  void foo(void)
55  {
56         OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
57 @@ -50,6 +62,16 @@ void foo(void)
58         OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
59         BLANK();
60  
61 +    STACKOFFSET(TASK_thread, task_struct, thread);
62 +    STACKOFFSET(THREAD_esp, thread_struct, sp);
63 +    STACKOFFSET(EVENT_event_data, event, event_data);
64 +    STACKOFFSET(EVENT_task, event, task);
65 +    STACKOFFSET(EVENT_event_type, event, event_type);
66 +    STACKOFFSET(SPEC_number, event_spec, number);
67 +    DEFINE(EVENT_SIZE, sizeof(struct event));
68 +    DEFINE(SPEC_SIZE, sizeof(struct event_spec));
69 +    DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
70 +
71         OFFSET(TI_task, thread_info, task);
72         OFFSET(TI_exec_domain, thread_info, exec_domain);
73         OFFSET(TI_flags, thread_info, flags);
74 Index: linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c.rej
75 ===================================================================
76 --- /dev/null
77 +++ linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c.rej
78 @@ -0,0 +1,17 @@
79 +***************
80 +*** 63,69 ****
81 +       BLANK();
82 +  
83 +      STACKOFFSET(TASK_thread, task_struct, thread);
84 +-     STACKOFFSET(THREAD_esp, thread_struct, esp);
85 +      STACKOFFSET(EVENT_event_data, event, event_data);
86 +      STACKOFFSET(EVENT_task, event, task);
87 +      STACKOFFSET(EVENT_event_type, event, event_type);
88 +--- 63,69 ----
89 +       BLANK();
90 +  
91 +      STACKOFFSET(TASK_thread, task_struct, thread);
92 ++     STACKOFFSET(THREAD_esp, thread_struct, sp);
93 +      STACKOFFSET(EVENT_event_data, event, event_data);
94 +      STACKOFFSET(EVENT_task, event, task);
95 +      STACKOFFSET(EVENT_event_type, event, event_type);
96 Index: linux-2.6.27.y/arch/x86/kernel/entry_32.S
97 ===================================================================
98 --- linux-2.6.27.y.orig/arch/x86/kernel/entry_32.S
99 +++ linux-2.6.27.y/arch/x86/kernel/entry_32.S
100 @@ -426,6 +426,33 @@ ENTRY(system_call)
101         cmpl $(nr_syscalls), %eax
102         jae syscall_badsys
103  syscall_call:
104 +    /* Move Chopstix syscall probe here */
105 +    /* Save and clobber: eax, ecx, ebp  */
106 +    pushl   %eax
107 +    pushl   %ecx
108 +    pushl   %ebp
109 +    movl    %esp, %ebp
110 +    subl    $SPEC_EVENT_SIZE, %esp 
111 +    movl    rec_event, %ecx
112 +    testl   %ecx, %ecx
113 +    jz  carry_on
114 +    # struct event is first, just below %ebp
115 +    movl    %eax, (SPEC_number-EVENT_SIZE)(%ebp)
116 +    leal    -SPEC_EVENT_SIZE(%ebp), %eax
117 +    movl    %eax, EVENT_event_data(%ebp)
118 +    movl    $6, EVENT_event_type(%ebp)
119 +    movl    rec_event, %edx
120 +    movl    $1, 4(%esp)
121 +    leal    -EVENT_SIZE(%ebp), %eax
122 +    movl    %eax, (%esp)
123 +    call    rec_event_asm 
124 +carry_on: 
125 +    addl $SPEC_EVENT_SIZE, %esp
126 +    popl %ebp
127 +    popl %ecx
128 +    popl %eax
129 +     /* End chopstix */
130 +
131         call *sys_call_table(,%eax,4)
132         movl %eax,PT_EAX(%esp)          # store the return value
133  syscall_exit:
134 Index: linux-2.6.27.y/arch/x86/mm/fault.c
135 ===================================================================
136 --- linux-2.6.27.y.orig/arch/x86/mm/fault.c
137 +++ linux-2.6.27.y/arch/x86/mm/fault.c
138 @@ -79,6 +79,15 @@ static inline int notify_page_fault(stru
139  #endif
140  }
141  
142 +
143 +extern void (*rec_event)(void *,unsigned int);
144 +struct event_spec {
145 +       unsigned long pc;
146 +       unsigned long dcookie; 
147 +       unsigned count;
148 +       unsigned char reason;
149 +};
150 +
151  /*
152   * X86_32
153   * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
154 Index: linux-2.6.27.y/drivers/oprofile/cpu_buffer.c
155 ===================================================================
156 --- linux-2.6.27.y.orig/drivers/oprofile/cpu_buffer.c
157 +++ linux-2.6.27.y/drivers/oprofile/cpu_buffer.c
158 @@ -21,6 +21,7 @@
159  #include <linux/oprofile.h>
160  #include <linux/vmalloc.h>
161  #include <linux/errno.h>
162 +#include <linux/arrays.h>
163   
164  #include "event_buffer.h"
165  #include "cpu_buffer.h"
166 @@ -147,6 +148,17 @@ static void increment_head(struct oprofi
167                 b->head_pos = 0;
168  }
169  
170 +#ifdef CONFIG_CHOPSTIX
171 +
172 +struct event_spec {
173 +       unsigned int pc;
174 +       unsigned long dcookie;
175 +       unsigned count;
176 +};
177 +
178 +extern void (*rec_event)(void *,unsigned int);
179 +#endif
180 +
181  static inline void
182  add_sample(struct oprofile_cpu_buffer * cpu_buf,
183             unsigned long pc, unsigned long event)
184 @@ -155,6 +167,7 @@ add_sample(struct oprofile_cpu_buffer * 
185         entry->eip = pc;
186         entry->event = event;
187         increment_head(cpu_buf);
188 +
189  }
190  
191  static inline void
192 @@ -250,8 +263,28 @@ void oprofile_add_sample(struct pt_regs 
193  {
194         int is_kernel = !user_mode(regs);
195         unsigned long pc = profile_pc(regs);
196 +       int res=0;
197  
198 +#ifdef CONFIG_CHOPSTIX
199 +       if (rec_event) {
200 +               struct event esig;
201 +               struct event_spec espec;
202 +               esig.task = current;
203 +               espec.pc=pc;
204 +               espec.count=1;
205 +               esig.event_data=&espec;
206 +               esig.event_type=event; /* index in the event array currently set up */
207 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
208 +               (*rec_event)(&esig, 1);
209 +       }
210 +       else {
211         oprofile_add_ext_sample(pc, regs, event, is_kernel);
212 +       }
213 +#else
214 +       oprofile_add_ext_sample(pc, regs, event, is_kernel);
215 +#endif
216 +
217 +
218  }
219  
220  void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
221 Index: linux-2.6.27.y/fs/bio.c
222 ===================================================================
223 --- linux-2.6.27.y.orig/fs/bio.c
224 +++ linux-2.6.27.y/fs/bio.c
225 @@ -27,6 +27,7 @@
226  #include <linux/workqueue.h>
227  #include <linux/blktrace_api.h>
228  #include <scsi/sg.h>           /* for struct sg_iovec */
229 +#include <linux/arrays.h>
230  
231  static struct kmem_cache *bio_slab __read_mostly;
232  
233 @@ -44,6 +45,7 @@ static struct biovec_slab bvec_slabs[BIO
234  };
235  #undef BV
236  
237 +
238  /*
239   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
240   * IO code that does not need private memory pools.
241 @@ -1171,6 +1173,14 @@ void bio_check_pages_dirty(struct bio *b
242         }
243  }
244  
245 +struct event_spec {
246 +       unsigned long pc;
247 +       unsigned long dcookie;
248 +       unsigned count;
249 +       unsigned char reason;
250 +};
251 +
252 +extern void (*rec_event)(void *,unsigned int);
253  /**
254   * bio_endio - end I/O on a bio
255   * @bio:       bio
256 @@ -1192,6 +1202,24 @@ void bio_endio(struct bio *bio, int erro
257         else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
258                 error = -EIO;
259  
260 +#if 0
261 +               if (rec_event) {
262 +                       struct event event;
263 +                       struct event_spec espec;
264 +                       unsigned long eip;
265 +                       
266 +                       espec.reason = 1;/*response */
267 +
268 +                       eip = bio->bi_end_io;
269 +                       event.event_data=&espec;
270 +                       espec.pc=eip;
271 +                       event.event_type=3; 
272 +                       /* index in the event array currently set up */
273 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
274 +                       (*rec_event)(&event, bytes_done);
275 +               }
276 +#endif
277 +
278         if (bio->bi_end_io)
279                 bio->bi_end_io(bio, error);
280  }
281 Index: linux-2.6.27.y/fs/exec.c
282 ===================================================================
283 --- linux-2.6.27.y.orig/fs/exec.c
284 +++ linux-2.6.27.y/fs/exec.c
285 @@ -27,6 +27,7 @@
286  #include <linux/fdtable.h>
287  #include <linux/mm.h>
288  #include <linux/stat.h>
289 +#include <linux/dcookies.h>
290  #include <linux/fcntl.h>
291  #include <linux/smp_lock.h>
292  #include <linux/swap.h>
293 @@ -698,6 +699,13 @@ struct file *open_exec(const char *name)
294                 goto out;
295         }
296  
297 + #ifdef CONFIG_CHOPSTIX
298 +    unsigned long cookie;
299 +    extern void (*rec_event)(void *, unsigned int);
300 +    if (rec_event && !nd.dentry->d_cookie)
301 +        get_dcookie(nd.dentry, nd.mnt, &cookie);
302 + #endif
303 +
304         return file;
305  
306   out_path_put:
307 Index: linux-2.6.27.y/fs/exec.c.rej
308 ===================================================================
309 --- /dev/null
310 +++ linux-2.6.27.y/fs/exec.c.rej
311 @@ -0,0 +1,36 @@
312 +***************
313 +*** 40,46 ****
314 +  #include <linux/personality.h>
315 +  #include <linux/binfmts.h>
316 +  #include <linux/utsname.h>
317 +- /*#include <linux/pid_namespace.h>*/
318 +  #include <linux/module.h>
319 +  #include <linux/namei.h>
320 +  #include <linux/proc_fs.h>
321 +--- 40,46 ----
322 +  #include <linux/personality.h>
323 +  #include <linux/binfmts.h>
324 +  #include <linux/utsname.h>
325 ++ #include <linux/pid_namespace.h>
326 +  #include <linux/module.h>
327 +  #include <linux/namei.h>
328 +  #include <linux/proc_fs.h>
329 +***************
330 +*** 702,709 ****
331 +   #ifdef CONFIG_CHOPSTIX
332 +      unsigned long cookie;
333 +      extern void (*rec_event)(void *, unsigned int);
334 +-     if (rec_event && !nd.dentry->d_cookie)
335 +-         get_dcookie(nd.dentry, nd.mnt, &cookie);
336 +   #endif
337 +  
338 +       return file;
339 +--- 702,709 ----
340 +   #ifdef CONFIG_CHOPSTIX
341 +      unsigned long cookie;
342 +      extern void (*rec_event)(void *, unsigned int);
343 ++     if (rec_event && !nd.path.dentry->d_cookie)
344 ++         get_dcookie(&nd.path, &cookie);
345 +   #endif
346 +  
347 +       return file;
348 Index: linux-2.6.27.y/include/linux/arrays.h
349 ===================================================================
350 --- /dev/null
351 +++ linux-2.6.27.y/include/linux/arrays.h
352 @@ -0,0 +1,36 @@
353 +#ifndef __ARRAYS_H__
354 +#define __ARRAYS_H__
355 +#include <linux/list.h>
356 +
357 +#define SAMPLING_METHOD_DEFAULT 0
358 +#define SAMPLING_METHOD_LOG 1
359 +
360 +/* Every probe has an array handler */
361 +
362 +/* XXX - Optimize this structure */
363 +
364 +extern void (*rec_event)(void *,unsigned int);
365 +struct array_handler {
366 +       struct list_head link;
367 +       unsigned int (*hash_func)(void *);
368 +       unsigned int (*sampling_func)(void *,int,void *);
369 +       unsigned short size;
370 +       unsigned int threshold;
371 +       unsigned char **expcount;
372 +       unsigned int sampling_method;
373 +       unsigned int **arrays;
374 +       unsigned int arraysize;
375 +       unsigned int num_samples[2];
376 +       void **epoch_samples; /* size-sized lists of samples */
377 +       unsigned int (*serialize)(void *, void *);
378 +       unsigned char code[5];
379 +};
380 +
381 +struct event {
382 +       struct list_head link;
383 +       void *event_data;
384 +       unsigned int count;
385 +       unsigned int event_type;
386 +       struct task_struct *task;
387 +};
388 +#endif
389 Index: linux-2.6.27.y/include/linux/sched.h.rej
390 ===================================================================
391 --- /dev/null
392 +++ linux-2.6.27.y/include/linux/sched.h.rej
393 @@ -0,0 +1,19 @@
394 +***************
395 +*** 850,855 ****
396 +  #endif
397 +       unsigned long sleep_avg;
398 +       unsigned long long timestamp, last_ran;
399 +       unsigned long long sched_time; /* sched_clock time spent running */
400 +       enum sleep_type sleep_type;
401 +  
402 +--- 850,859 ----
403 +  #endif
404 +       unsigned long sleep_avg;
405 +       unsigned long long timestamp, last_ran;
406 ++ #ifdef CONFIG_CHOPSTIX
407 ++      unsigned long last_interrupted, last_ran_j;
408 ++ #endif
409 ++ 
410 +       unsigned long long sched_time; /* sched_clock time spent running */
411 +       enum sleep_type sleep_type;
412 +  
413 Index: linux-2.6.27.y/kernel/sched.c
414 ===================================================================
415 --- linux-2.6.27.y.orig/kernel/sched.c
416 +++ linux-2.6.27.y/kernel/sched.c
417 @@ -10,7 +10,7 @@
418   *  1998-11-19 Implemented schedule_timeout() and related stuff
419   *             by Andrea Arcangeli
420   *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
421 - *             hybrid priority-list and round-robin design with
422 + *             hybrid priority-list and round-robin deventn with
423   *             an array-switch method of distributing timeslices
424   *             and per-CPU runqueues.  Cleanups and useful suggestions
425   *             by Davide Libenzi, preemptible kernel bits by Robert Love.
426 @@ -73,12 +73,16 @@
427  #include <linux/ftrace.h>
428  #include <linux/vs_sched.h>
429  #include <linux/vs_cvirt.h>
430 +#include <linux/arrays.h>
431  
432  #include <asm/tlb.h>
433  #include <asm/irq_regs.h>
434  
435  #include "sched_cpupri.h"
436  
437 +#define INTERRUPTIBLE   -1
438 +#define RUNNING         0
439 +
440  /*
441   * Convert user-nice values [ -20 ... 0 ... 19 ]
442   * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
443 @@ -4436,6 +4440,29 @@ pick_next_task(struct rq *rq, struct tas
444         }
445  }
446  
447 +void (*rec_event)(void *,unsigned int) = NULL;
448 +EXPORT_SYMBOL(rec_event);
449 +#ifdef CONFIG_CHOPSTIX
450 +
451 +struct event_spec {
452 +    unsigned long pc;
453 +    unsigned long dcookie;
454 +    unsigned int count;
455 +    unsigned int reason;
456 +};
457 +
458 +/* To support safe calling from asm */
459 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
460 +    struct pt_regs *regs;
461 +    struct event_spec *es = event_signature_in->event_data;
462 +    regs = task_pt_regs(current);
463 +    event_signature_in->task=current;
464 +    es->pc=regs->ip;
465 +    event_signature_in->count=1;
466 +    (*rec_event)(event_signature_in, count);
467 +}
468 +#endif
469 +
470  /*
471   * schedule() is the main scheduler function.
472   */
473 @@ -5382,6 +5409,7 @@ long sched_setaffinity(pid_t pid, const 
474         get_task_struct(p);
475         read_unlock(&tasklist_lock);
476  
477 +
478         retval = -EPERM;
479         if ((current->euid != p->euid) && (current->euid != p->uid) &&
480                         !capable(CAP_SYS_NICE))
481 Index: linux-2.6.27.y/kernel/sched.c.rej
482 ===================================================================
483 --- /dev/null
484 +++ linux-2.6.27.y/kernel/sched.c.rej
485 @@ -0,0 +1,258 @@
486 +***************
487 +*** 23,28 ****
488 +  #include <linux/nmi.h>
489 +  #include <linux/init.h>
490 +  #include <asm/uaccess.h>
491 +  #include <linux/highmem.h>
492 +  #include <linux/smp_lock.h>
493 +  #include <asm/mmu_context.h>
494 +--- 23,29 ----
495 +  #include <linux/nmi.h>
496 +  #include <linux/init.h>
497 +  #include <asm/uaccess.h>
498 ++ #include <linux/arrays.h>
499 +  #include <linux/highmem.h>
500 +  #include <linux/smp_lock.h>
501 +  #include <asm/mmu_context.h>
502 +***************
503 +*** 451,456 ****
504 +  
505 +  repeat_lock_task:
506 +       rq = task_rq(p);
507 +       spin_lock(&rq->lock);
508 +       if (unlikely(rq != task_rq(p))) {
509 +               spin_unlock(&rq->lock);
510 +--- 455,461 ----
511 +  
512 +  repeat_lock_task:
513 +       rq = task_rq(p);
514 ++ 
515 +       spin_lock(&rq->lock);
516 +       if (unlikely(rq != task_rq(p))) {
517 +               spin_unlock(&rq->lock);
518 +***************
519 +*** 1761,1766 ****
520 +        * event cannot wake it up and insert it on the runqueue either.
521 +        */
522 +       p->state = TASK_RUNNING;
523 +  
524 +       /*
525 +        * Make sure we do not leak PI boosting priority to the child:
526 +--- 1766,1786 ----
527 +        * event cannot wake it up and insert it on the runqueue either.
528 +        */
529 +       p->state = TASK_RUNNING;
530 ++ #ifdef CONFIG_CHOPSTIX
531 ++     /* The jiffy of last interruption */
532 ++     if (p->state & TASK_UNINTERRUPTIBLE) {
533 ++                              p->last_interrupted=jiffies;
534 ++      }
535 ++     else
536 ++     if (p->state & TASK_INTERRUPTIBLE) {
537 ++                              p->last_interrupted=INTERRUPTIBLE;
538 ++      }
539 ++     else
540 ++          p->last_interrupted=RUNNING;
541 ++ 
542 ++     /* The jiffy of last execution */ 
543 ++      p->last_ran_j=jiffies;
544 ++ #endif
545 +  
546 +       /*
547 +        * Make sure we do not leak PI boosting priority to the child:
548 +***************
549 +*** 3628,3633 ****
550 +  
551 +  #endif
552 +  
553 +  static inline int interactive_sleep(enum sleep_type sleep_type)
554 +  {
555 +       return (sleep_type == SLEEP_INTERACTIVE ||
556 +--- 3648,3654 ----
557 +  
558 +  #endif
559 +  
560 ++ 
561 +  static inline int interactive_sleep(enum sleep_type sleep_type)
562 +  {
563 +       return (sleep_type == SLEEP_INTERACTIVE ||
564 +***************
565 +*** 3637,3652 ****
566 +  /*
567 +   * schedule() is the main scheduler function.
568 +   */
569 +  asmlinkage void __sched schedule(void)
570 +  {
571 +       struct task_struct *prev, *next;
572 +       struct prio_array *array;
573 +       struct list_head *queue;
574 +       unsigned long long now;
575 +-      unsigned long run_time;
576 +       int cpu, idx, new_prio;
577 +       long *switch_count;
578 +       struct rq *rq;
579 +  
580 +       /*
581 +        * Test if we are atomic.  Since do_exit() needs to call into
582 +--- 3658,3685 ----
583 +  /*
584 +   * schedule() is the main scheduler function.
585 +   */
586 ++ 
587 ++ #ifdef CONFIG_CHOPSTIX
588 ++ extern void (*rec_event)(void *,unsigned int);
589 ++ struct event_spec {
590 ++      unsigned long pc;
591 ++      unsigned long dcookie;
592 ++      unsigned int count;
593 ++      unsigned int reason;
594 ++ };
595 ++ #endif
596 ++ 
597 +  asmlinkage void __sched schedule(void)
598 +  {
599 +       struct task_struct *prev, *next;
600 +       struct prio_array *array;
601 +       struct list_head *queue;
602 +       unsigned long long now;
603 ++      unsigned long run_time, diff;
604 +       int cpu, idx, new_prio;
605 +       long *switch_count;
606 +       struct rq *rq;
607 ++      int sampling_reason;
608 +  
609 +       /*
610 +        * Test if we are atomic.  Since do_exit() needs to call into
611 +***************
612 +*** 3700,3705 ****
613 +       switch_count = &prev->nivcsw;
614 +       if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
615 +               switch_count = &prev->nvcsw;
616 +               if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
617 +                               unlikely(signal_pending(prev))))
618 +                       prev->state = TASK_RUNNING;
619 +--- 3733,3739 ----
620 +       switch_count = &prev->nivcsw;
621 +       if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
622 +               switch_count = &prev->nvcsw;
623 ++ 
624 +               if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
625 +                               unlikely(signal_pending(prev))))
626 +                       prev->state = TASK_RUNNING;
627 +***************
628 +*** 3709,3714 ****
629 +                               vx_uninterruptible_inc(prev);
630 +                       }
631 +                       deactivate_task(prev, rq);
632 +               }
633 +       }
634 +  
635 +--- 3743,3759 ----
636 +                               vx_uninterruptible_inc(prev);
637 +                       }
638 +                       deactivate_task(prev, rq);
639 ++ #ifdef CONFIG_CHOPSTIX
640 ++             /* An uninterruptible process just yielded. Record the current jiffie */
641 ++                      if (prev->state & TASK_UNINTERRUPTIBLE) {
642 ++                              prev->last_interrupted=jiffies;
643 ++                      }
644 ++             /* An interruptible process just yielded, or it got preempted. 
645 ++              * Mark it as interruptible */
646 ++                      else if (prev->state & TASK_INTERRUPTIBLE) {
647 ++                              prev->last_interrupted=INTERRUPTIBLE;
648 ++                      }
649 ++ #endif
650 +               }
651 +       }
652 +  
653 +***************
654 +*** 3785,3790 ****
655 +               prev->sleep_avg = 0;
656 +       prev->timestamp = prev->last_ran = now;
657 +  
658 +       sched_info_switch(prev, next);
659 +       if (likely(prev != next)) {
660 +               next->timestamp = next->last_ran = now;
661 +--- 3830,3869 ----
662 +               prev->sleep_avg = 0;
663 +       prev->timestamp = prev->last_ran = now;
664 +  
665 ++ #ifdef CONFIG_CHOPSTIX
666 ++      /* Run only if the Chopstix module so decrees it */
667 ++      if (rec_event) {
668 ++              prev->last_ran_j = jiffies;
669 ++              if (next->last_interrupted!=INTERRUPTIBLE) {
670 ++                      if (next->last_interrupted!=RUNNING) {
671 ++                              diff = (jiffies-next->last_interrupted);
672 ++                              sampling_reason = 0;/* BLOCKING */
673 ++                      }
674 ++                      else {
675 ++                              diff = jiffies-next->last_ran_j; 
676 ++                              sampling_reason = 1;/* PREEMPTION */
677 ++                      }
678 ++ 
679 ++                      if (diff >= HZ/10) {
680 ++                              struct event event;
681 ++                              struct event_spec espec;
682 ++                 struct pt_regs *regs;
683 ++                 regs = task_pt_regs(current);
684 ++ 
685 ++                              espec.reason = sampling_reason;
686 ++                              event.event_data=&espec;
687 ++                              event.task=next;
688 ++                              espec.pc=regs->eip;
689 ++                              event.event_type=2; 
690 ++                              /* index in the event array currently set up */
691 ++                              /* make sure the counters are loaded in the order we want them to show up*/ 
692 ++                              (*rec_event)(&event, diff);
693 ++                      }
694 ++              }
695 ++         /* next has been elected to run */
696 ++              next->last_interrupted=0;
697 ++      }
698 ++ #endif
699 +       sched_info_switch(prev, next);
700 +       if (likely(prev != next)) {
701 +               next->timestamp = next->last_ran = now;
702 +***************
703 +*** 5737,5742 ****
704 +       jiffies_to_timespec(p->policy == SCHED_FIFO ?
705 +                               0 : task_timeslice(p), &t);
706 +       read_unlock(&tasklist_lock);
707 +       retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
708 +  out_nounlock:
709 +       return retval;
710 +--- 5817,5823 ----
711 +       jiffies_to_timespec(p->policy == SCHED_FIFO ?
712 +                               0 : task_timeslice(p), &t);
713 +       read_unlock(&tasklist_lock);
714 ++ 
715 +       retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
716 +  out_nounlock:
717 +       return retval;
718 +***************
719 +*** 7980,7982 ****
720 +  }
721 +  
722 +  #endif
723 +--- 8061,8080 ----
724 +  }
725 +  
726 +  #endif
727 ++ 
728 ++ #ifdef CONFIG_CHOPSTIX
729 ++ void (*rec_event)(void *,unsigned int) = NULL;
730 ++ 
731 ++ /* To support safe calling from asm */
732 ++ asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
733 ++     struct pt_regs *regs;
734 ++     struct event_spec *es = event_signature_in->event_data;
735 ++     regs = task_pt_regs(current);
736 ++      event_signature_in->task=current;
737 ++      es->pc=regs->eip;
738 ++     event_signature_in->count=1;
739 ++     (*rec_event)(event_signature_in, count);
740 ++ }
741 ++ EXPORT_SYMBOL(rec_event);
742 ++ EXPORT_SYMBOL(in_sched_functions);
743 ++ #endif
744 Index: linux-2.6.27.y/mm/memory.c
745 ===================================================================
746 --- linux-2.6.27.y.orig/mm/memory.c
747 +++ linux-2.6.27.y/mm/memory.c
748 @@ -61,6 +61,7 @@
749  
750  #include <linux/swapops.h>
751  #include <linux/elf.h>
752 +#include <linux/arrays.h>
753  
754  #include "internal.h"
755  
756 @@ -2753,6 +2754,15 @@ out:
757         return ret;
758  }
759  
760 +extern void (*rec_event)(void *,unsigned int);
761 +struct event_spec {
762 +       unsigned long pc;
763 +       unsigned long dcookie; 
764 +       unsigned count;
765 +       unsigned char reason;
766 +};
767 +
768 +
769  /*
770   * By the time we get here, we already hold the mm semaphore
771   */
772 @@ -2782,6 +2792,24 @@ int handle_mm_fault(struct mm_struct *mm
773         if (!pte)
774                 return VM_FAULT_OOM;
775  
776 +#ifdef CONFIG_CHOPSTIX
777 +       if (rec_event) {
778 +               struct event event;
779 +               struct event_spec espec;
780 +        struct pt_regs *regs;
781 +        unsigned int pc;
782 +        regs = task_pt_regs(current);
783 +        pc = regs->ip & (unsigned int) ~4095;
784 +
785 +               espec.reason = 0; /* alloc */
786 +               event.event_data=&espec;
787 +               event.task = current;
788 +               espec.pc=pc;
789 +               event.event_type=5; 
790 +               (*rec_event)(&event, 1);
791 +       }
792 +#endif
793 +
794         return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
795  }
796  
797 Index: linux-2.6.27.y/mm/slab.c
798 ===================================================================
799 --- linux-2.6.27.y.orig/mm/slab.c
800 +++ linux-2.6.27.y/mm/slab.c
801 @@ -110,6 +110,7 @@
802  #include       <linux/fault-inject.h>
803  #include       <linux/rtmutex.h>
804  #include       <linux/reciprocal_div.h>
805 +#include <linux/arrays.h>
806  #include       <linux/debugobjects.h>
807  
808  #include       <asm/cacheflush.h>
809 @@ -248,6 +249,14 @@ struct slab_rcu {
810         void *addr;
811  };
812  
813 +extern void (*rec_event)(void *,unsigned int);
814 +struct event_spec {
815 +       unsigned long pc;
816 +       unsigned long dcookie; 
817 +       unsigned count;
818 +       unsigned char reason;
819 +};
820 +
821  /*
822   * struct array_cache
823   *
824 @@ -3469,6 +3478,19 @@ __cache_alloc(struct kmem_cache *cachep,
825         local_irq_restore(save_flags);
826         objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
827         prefetchw(objp);
828 +#ifdef CONFIG_CHOPSTIX
829 +       if (rec_event && objp) {
830 +               struct event event;
831 +               struct event_spec espec;
832 +
833 +               espec.reason = 0; /* alloc */
834 +               event.event_data=&espec;
835 +               event.task = current;
836 +               espec.pc=caller;
837 +               event.event_type=5; 
838 +               (*rec_event)(&event, cachep->buffer_size);
839 +       }
840 +#endif
841  
842         if (unlikely((flags & __GFP_ZERO) && objp))
843                 memset(objp, 0, obj_size(cachep));
844 @@ -3578,12 +3600,26 @@ free_done:
845   * Release an obj back to its cache. If the obj has a constructed state, it must
846   * be in this state _before_ it is released.  Called with disabled ints.
847   */
848 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
849 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
850  {
851         struct array_cache *ac = cpu_cache_get(cachep);
852  
853         check_irq_off();
854 -       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
855 +       objp = cache_free_debugcheck(cachep, objp, caller);
856 + #ifdef CONFIG_CHOPSTIX
857 +       if (rec_event && objp) {
858 +               struct event event;
859 +               struct event_spec espec;
860 +  
861 +               espec.reason = 1; /* free */
862 +               event.event_data=&espec;
863 +               event.task = current;
864 +               espec.pc=caller;
865 +               event.event_type=4; 
866 +               (*rec_event)(&event, cachep->buffer_size);
867 +       }
868 + #endif
869 +
870         vx_slab_free(cachep);
871  
872         /*
873 @@ -3714,6 +3750,7 @@ static __always_inline void *__do_kmallo
874                                           void *caller)
875  {
876         struct kmem_cache *cachep;
877 +       void *ret;
878  
879         /* If you want to save a few bytes .text space: replace
880          * __ with kmem_.
881 @@ -3741,10 +3778,17 @@ void *__kmalloc_track_caller(size_t size
882  EXPORT_SYMBOL(__kmalloc_track_caller);
883  
884  #else
885 +#ifdef CONFIG_CHOPSTIX
886 +void *__kmalloc(size_t size, gfp_t flags)
887 +{
888 +       return __do_kmalloc(size, flags, __builtin_return_address(0));
889 +}
890 +#else
891  void *__kmalloc(size_t size, gfp_t flags)
892  {
893         return __do_kmalloc(size, flags, NULL);
894  }
895 +#endif
896  EXPORT_SYMBOL(__kmalloc);
897  #endif
898  
899 @@ -3764,7 +3808,7 @@ void kmem_cache_free(struct kmem_cache *
900         debug_check_no_locks_freed(objp, obj_size(cachep));
901         if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
902                 debug_check_no_obj_freed(objp, obj_size(cachep));
903 -       __cache_free(cachep, objp);
904 +       __cache_free(cachep, objp,__builtin_return_address(0));
905         local_irq_restore(flags);
906  }
907  EXPORT_SYMBOL(kmem_cache_free);
908 @@ -3790,7 +3834,7 @@ void kfree(const void *objp)
909         c = virt_to_cache(objp);
910         debug_check_no_locks_freed(objp, obj_size(c));
911         debug_check_no_obj_freed(objp, obj_size(c));
912 -       __cache_free(c, (void *)objp);
913 +       __cache_free(c, (void *)objp,__builtin_return_address(0));
914         local_irq_restore(flags);
915  }
916  EXPORT_SYMBOL(kfree);