add linux-2.6-591-chopstix-intern.patch
[linux-2.6.git] / linux-2.6-591-chopstix-intern.patch
1 diff --git a/arch/Kconfig b/arch/Kconfig
2 index 4e312ff..ef6a721 100644
3 --- a/arch/Kconfig
4 +++ b/arch/Kconfig
5 @@ -43,6 +43,14 @@ config OPROFILE_EVENT_MULTIPLEX
6  
7           If unsure, say N.
8  
9 +config CHOPSTIX
10 +       bool "Chopstix (PlanetLab)"
11 +       depends on MODULES && OPROFILE
12 +       help
13 +         Chopstix allows you to monitor various events by summarizing them
14 +         in lossy data structures and transferring these data structures
15 +         into user space. If in doubt, say "N".
16 +
17  config HAVE_OPROFILE
18         bool
19  
20 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
21 index dfdbf64..29c79b8 100644
22 --- a/arch/x86/kernel/asm-offsets_32.c
23 +++ b/arch/x86/kernel/asm-offsets_32.c
24 @@ -9,6 +9,7 @@
25  #include <linux/signal.h>
26  #include <linux/personality.h>
27  #include <linux/suspend.h>
28 +#include <linux/arrays.h>
29  #include <linux/kbuild.h>
30  #include <asm/ucontext.h>
31  #include <asm/sigframe.h>
32 @@ -25,6 +26,18 @@
33  #include <linux/lguest.h>
34  #include "../../../drivers/lguest/lg.h"
35  
36 +#ifdef CONFIG_CHOPSTIX
37 +#define STACKOFFSET(sym, str, mem) \
38 +       DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
39 +
40 +struct event_spec {
41 +       unsigned long pc;
42 +       unsigned long dcookie;
43 +       unsigned count;
44 +       unsigned int number;
45 +};
46 +#endif
47 +
48  /* workaround for a warning with -Wmissing-prototypes */
49  void foo(void);
50  
51 @@ -51,6 +64,18 @@ void foo(void)
52         OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
53         BLANK();
54  
55 +#ifdef CONFIG_CHOPSTIX
56 +       STACKOFFSET(TASK_thread, task_struct, thread);
57 +       STACKOFFSET(THREAD_esp, thread_struct, sp);
58 +       STACKOFFSET(EVENT_event_data, event, event_data);
59 +       STACKOFFSET(EVENT_task, event, task);
60 +       STACKOFFSET(EVENT_event_type, event, event_type);
61 +       STACKOFFSET(SPEC_number, event_spec, number);
62 +       DEFINE(EVENT_SIZE, sizeof(struct event));
63 +       DEFINE(SPEC_SIZE, sizeof(struct event_spec));
64 +       DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
65 +#endif
66 +
67         OFFSET(TI_task, thread_info, task);
68         OFFSET(TI_exec_domain, thread_info, exec_domain);
69         OFFSET(TI_flags, thread_info, flags);
70 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
71 index c097e7d..8eff053 100644
72 --- a/arch/x86/kernel/entry_32.S
73 +++ b/arch/x86/kernel/entry_32.S
74 @@ -526,6 +526,34 @@ ENTRY(system_call)
75         cmpl $(nr_syscalls), %eax
76         jae syscall_badsys
77  syscall_call:
78 +#ifdef CONFIG_CHOPSTIX
79 +       /* Move Chopstix syscall probe here */
80 +       /* Save and clobber: eax, ecx, ebp  */
81 +       pushl   %eax
82 +       pushl   %ecx
83 +       pushl   %ebp
84 +       movl    %esp, %ebp
85 +       subl    $SPEC_EVENT_SIZE, %esp 
86 +       movl    rec_event, %ecx
87 +       testl   %ecx, %ecx
88 +       jz  carry_on
89 +       # struct event is first, just below %ebp
90 +       movl    %eax, (SPEC_number-EVENT_SIZE)(%ebp)
91 +       leal    -SPEC_EVENT_SIZE(%ebp), %eax
92 +       movl    %eax, EVENT_event_data(%ebp)
93 +       movl    $7, EVENT_event_type(%ebp)
94 +       movl    rec_event, %edx
95 +       movl    $1, 4(%esp)
96 +       leal    -EVENT_SIZE(%ebp), %eax
97 +       movl    %eax, (%esp)
98 +       call    rec_event_asm 
99 +carry_on: 
100 +       addl $SPEC_EVENT_SIZE, %esp
101 +       popl %ebp
102 +       popl %ecx
103 +       popl %eax
104 +       /* End chopstix */
105 +#endif
106         call *sys_call_table(,%eax,4)
107         movl %eax,PT_EAX(%esp)          # store the return value
108  syscall_exit:
109 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
110 index 4302583..85bf9f2 100644
111 --- a/arch/x86/mm/fault.c
112 +++ b/arch/x86/mm/fault.c
113 @@ -62,6 +62,16 @@ static inline int notify_page_fault(struct pt_regs *regs)
114         return ret;
115  }
116  
117 +#ifdef CONFIG_CHOPSTIX
118 +extern void (*rec_event)(void *,unsigned int);
119 +struct event_spec {
120 +       unsigned long pc;
121 +       unsigned long dcookie; 
122 +       unsigned count;
123 +       unsigned char reason;
124 +};
125 +#endif
126 +
127  /*
128   * Prefetch quirks:
129   *
130 diff --git a/block/blk-core.c b/block/blk-core.c
131 index 71da511..1cefcaa 100644
132 --- a/block/blk-core.c
133 +++ b/block/blk-core.c
134 @@ -27,12 +27,23 @@
135  #include <linux/writeback.h>
136  #include <linux/task_io_accounting_ops.h>
137  #include <linux/fault-inject.h>
138 +#include <linux/arrays.h>
139  
140  #define CREATE_TRACE_POINTS
141  #include <trace/events/block.h>
142  
143  #include "blk.h"
144  
145 +#ifdef CONFIG_CHOPSTIX
146 +extern void (*rec_event)(void *,unsigned int);
147 +struct event_spec {
148 +       unsigned long pc;
149 +       unsigned long dcookie;
150 +       unsigned count;
151 +       unsigned char reason;
152 +};
153 +#endif
154 +
155  EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
156  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
157  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
158 @@ -1478,6 +1489,24 @@ static inline void __generic_make_request(struct bio *bio)
159  
160                 trace_block_bio_queue(q, bio);
161  
162 +#ifdef CONFIG_CHOPSTIX
163 +               if (rec_event) {
164 +                       struct event event;
165 +                       struct event_spec espec;
166 +                       unsigned long eip;
167 +                       
168 +                       espec.reason = 0;/*request */
169 +
170 +                       eip = bio->bi_end_io;
171 +                       event.event_data=&espec;
172 +                       espec.pc=eip;
173 +                       event.event_type=3; 
174 +                       /* index in the event array currently set up */
175 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
176 +                       (*rec_event)(&event, bio->bi_size);
177 +               }
178 +#endif
179 +
180                 ret = q->make_request_fn(q, bio);
181         } while (ret);
182  
183 diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
184 index a7aae24..9817d91 100644
185 --- a/drivers/oprofile/cpu_buffer.c
186 +++ b/drivers/oprofile/cpu_buffer.c
187 @@ -22,6 +22,7 @@
188  #include <linux/sched.h>
189  #include <linux/oprofile.h>
190  #include <linux/errno.h>
191 +#include <linux/arrays.h>
192  
193  #include "event_buffer.h"
194  #include "cpu_buffer.h"
195 @@ -326,6 +327,17 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
196         cpu_buf->tracing = 0;
197  }
198  
199 +#ifdef CONFIG_CHOPSTIX
200 +
201 +struct event_spec {
202 +       unsigned int pc;
203 +       unsigned long dcookie;
204 +       unsigned count;
205 +};
206 +
207 +extern void (*rec_event)(void *,unsigned int);
208 +#endif
209 +
210  static inline void
211  __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
212                           unsigned long event, int is_kernel)
213 @@ -360,7 +372,25 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
214         int is_kernel = !user_mode(regs);
215         unsigned long pc = profile_pc(regs);
216  
217 +#ifdef CONFIG_CHOPSTIX
218 +       if (rec_event) {
219 +               struct event esig;
220 +               struct event_spec espec;
221 +               esig.task = current;
222 +               espec.pc = pc;
223 +               espec.count = 1;
224 +               esig.event_data = &espec;
225 +               esig.event_type = event; /* index in the event array currently set up */
226 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
227 +               (*rec_event)(&esig, 1);
228 +       }
229 +       else {
230 +               __oprofile_add_ext_sample(pc, regs, event, is_kernel);
231 +       }
232 +#else
233         __oprofile_add_ext_sample(pc, regs, event, is_kernel);
234 +#endif
235 +
236  }
237  
238  /*
239 diff --git a/fs/bio.c b/fs/bio.c
240 index e0c9e71..796767d 100644
241 --- a/fs/bio.c
242 +++ b/fs/bio.c
243 @@ -26,6 +26,7 @@
244  #include <linux/mempool.h>
245  #include <linux/workqueue.h>
246  #include <scsi/sg.h>           /* for struct sg_iovec */
247 +#include <linux/arrays.h>
248  
249  #include <trace/events/block.h>
250  
251 @@ -48,6 +49,7 @@ struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
252  };
253  #undef BV
254  
255 +
256  /*
257   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
258   * IO code that does not need private memory pools.
259 @@ -1398,6 +1400,17 @@ void bio_check_pages_dirty(struct bio *bio)
260         }
261  }
262  
263 +#ifdef CONFIG_CHOPSTIX
264 +struct event_spec {
265 +       unsigned long pc;
266 +       unsigned long dcookie;
267 +       unsigned count;
268 +       unsigned char reason;
269 +};
270 +
271 +extern void (*rec_event)(void *,unsigned int);
272 +#endif
273 +
274  /**
275   * bio_endio - end I/O on a bio
276   * @bio:       bio
277 @@ -1419,6 +1432,24 @@ void bio_endio(struct bio *bio, int error)
278         else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
279                 error = -EIO;
280  
281 +#ifdef CONFIG_CHOPSTIX
282 +               if (rec_event) {
283 +                       struct event event;
284 +                       struct event_spec espec;
285 +                       unsigned long eip;
286 +                       
287 +                       espec.reason = 1;/*response */
288 +
289 +                       eip = bio->bi_end_io;
290 +                       event.event_data=&espec;
291 +                       espec.pc=eip;
292 +                       event.event_type=3; 
293 +                       /* index in the event array currently set up */
294 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
295 +                       (*rec_event)(&event, bio->bi_size);
296 +               }
297 +#endif
298 +
299         if (bio->bi_end_io)
300                 bio->bi_end_io(bio, error);
301  }
302 diff --git a/fs/exec.c b/fs/exec.c
303 index 0a049b8..6c6bcc5 100644
304 --- a/fs/exec.c
305 +++ b/fs/exec.c
306 @@ -27,6 +27,7 @@
307  #include <linux/fdtable.h>
308  #include <linux/mm.h>
309  #include <linux/stat.h>
310 +#include <linux/dcookies.h>
311  #include <linux/fcntl.h>
312  #include <linux/smp_lock.h>
313  #include <linux/swap.h>
314 @@ -673,6 +674,13 @@ struct file *open_exec(const char *name)
315         if (err)
316                 goto exit;
317  
318 +#ifdef CONFIG_CHOPSTIX
319 +       unsigned long cookie;
320 +       extern void (*rec_event)(void *, unsigned int);
321 +       if (rec_event && !nd.path.dentry->d_cookie)
322 +               get_dcookie(&nd.path, &cookie);
323 +#endif
324 +
325  out:
326         return file;
327  
328 diff --git a/include/linux/arrays.h b/include/linux/arrays.h
329 new file mode 100644
330 index 0000000..7641a3c
331 --- /dev/null
332 +++ b/include/linux/arrays.h
333 @@ -0,0 +1,39 @@
334 +#ifndef __ARRAYS_H__
335 +#define __ARRAYS_H__
336 +#include <linux/list.h>
337 +
338 +#define SAMPLING_METHOD_DEFAULT 0
339 +#define SAMPLING_METHOD_LOG 1
340 +
341 +#define DEFAULT_ARRAY_SIZE 2048
342 +
343 +/* Every probe has an array handler */
344 +
345 +/* XXX - Optimize this structure */
346 +
347 +extern void (*rec_event)(void *,unsigned int);
348 +struct array_handler {
349 +       struct list_head link;
350 +       unsigned int (*hash_func)(void *);
351 +       unsigned int (*sampling_func)(void *,int,void *);
352 +       unsigned short size;
353 +       unsigned int threshold;
354 +       unsigned char **expcount;
355 +       unsigned int sampling_method;
356 +       unsigned int **arrays;
357 +       unsigned int arraysize;
358 +       unsigned int num_samples[2];
359 +       void **epoch_samples; /* size-sized lists of samples */
360 +       unsigned int (*serialize)(void *, void *);
361 +       unsigned char code[5];
362 +       unsigned int last_threshold;
363 +};
364 +
365 +struct event {
366 +       struct list_head link;
367 +       void *event_data;
368 +       unsigned int count;
369 +       unsigned int event_type;
370 +       struct task_struct *task;
371 +};
372 +#endif
373 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
374 index 878cab4..8bac64d 100644
375 --- a/include/linux/mutex.h
376 +++ b/include/linux/mutex.h
377 @@ -57,6 +57,9 @@ struct mutex {
378         const char              *name;
379         void                    *magic;
380  #endif
381 +#ifdef CONFIG_CHOPSTIX
382 +       struct thread_info      *owner;
383 +#endif
384  #ifdef CONFIG_DEBUG_LOCK_ALLOC
385         struct lockdep_map      dep_map;
386  #endif
387 diff --git a/include/linux/sched.h b/include/linux/sched.h
388 index c9d3cae..dd62888 100644
389 --- a/include/linux/sched.h
390 +++ b/include/linux/sched.h
391 @@ -1349,6 +1349,11 @@ struct task_struct {
392         cputime_t utime, stime, utimescaled, stimescaled;
393         cputime_t gtime;
394         cputime_t prev_utime, prev_stime;
395 +
396 +    #ifdef CONFIG_CHOPSTIX
397 +            unsigned long last_interrupted, last_ran_j;
398 +    #endif
399 +
400         unsigned long nvcsw, nivcsw; /* context switch counts */
401         struct timespec start_time;             /* monotonic time */
402         struct timespec real_start_time;        /* boot based time */
403 diff --git a/kernel/mutex.c b/kernel/mutex.c
404 index 947b3ad..ae1dc67 100644
405 --- a/kernel/mutex.c
406 +++ b/kernel/mutex.c
407 @@ -23,6 +23,16 @@
408  #include <linux/spinlock.h>
409  #include <linux/interrupt.h>
410  #include <linux/debug_locks.h>
411 +#include <linux/arrays.h>
412 +
413 +#ifdef CONFIG_CHOPSTIX
414 +struct event_spec {
415 +       unsigned long pc;
416 +       unsigned long dcookie;
417 +       unsigned count;
418 +       unsigned char reason;
419 +};
420 +#endif
421  
422  /*
423   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
424 @@ -49,6 +59,9 @@ void
425  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
426  {
427         atomic_set(&lock->count, 1);
428 +#ifdef CONFIG_CHOPSTIX
429 +       lock->owner = NULL;
430 +#endif
431         spin_lock_init(&lock->wait_lock);
432         INIT_LIST_HEAD(&lock->wait_list);
433         mutex_clear_owner(lock);
434 @@ -247,6 +260,25 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
435                 }
436                 __set_task_state(task, state);
437  
438 +#if 0 && CONFIG_CHOPSTIX
439 +               if (rec_event) {
440 +                       if (lock->owner) {
441 +                               struct event event;
442 +                               struct event_spec espec;
443 +                               struct task_struct *p = lock->owner->task;
444 +
445 +                               espec.reason = 0; /* lock */
446 +                               event.event_data = &espec;
447 +                               event.task = p;
448 +                               espec.pc = lock;
449 +                               event.event_type = 5;
450 +                               (*rec_event)(&event, 1);
451 +                       } else {
452 +                               BUG();
453 +                       }
454 +               }
455 +#endif
456 +
457                 /* didnt get the lock, go to sleep: */
458                 spin_unlock_mutex(&lock->wait_lock, flags);
459                 preempt_enable_no_resched();
460 @@ -261,6 +293,10 @@ done:
461         mutex_remove_waiter(lock, &waiter, current_thread_info());
462         mutex_set_owner(lock);
463  
464 +#ifdef CONFIG_CHOPSTIX
465 +       lock->owner = task_thread_info(task);
466 +#endif
467 +
468         /* set it to 0 if there are no waiters left: */
469         if (likely(list_empty(&lock->wait_list)))
470                 atomic_set(&lock->count, 0);
471 @@ -331,6 +367,25 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
472  
473                 debug_mutex_wake_waiter(lock, waiter);
474  
475 +#if 0 && CONFIG_CHOPSTIX
476 +               if (rec_event) {
477 +                       if (lock->owner) {
478 +                               struct event event;
479 +                               struct event_spec espec;
480 +                               struct task_struct *p = lock->owner->task;
481 +
482 +                               espec.reason = 1; /* unlock */
483 +                               event.event_data = &espec;
484 +                               event.task = p;
485 +                               espec.pc = lock;
486 +                               event.event_type = 5;
487 +                               (*rec_event)(&event, 1);
488 +                       } else {
489 +                               BUG();
490 +                       }
491 +               }
492 +#endif
493 +
494                 wake_up_process(waiter->task);
495         }
496  
497 diff --git a/kernel/sched.c b/kernel/sched.c
498 index 90b63b8..43b728e 100644
499 --- a/kernel/sched.c
500 +++ b/kernel/sched.c
501 @@ -10,7 +10,7 @@
502   *  1998-11-19 Implemented schedule_timeout() and related stuff
503   *             by Andrea Arcangeli
504   *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
505 - *             hybrid priority-list and round-robin design with
506 + *             hybrid priority-list and round-robin deventn with
507   *             an array-switch method of distributing timeslices
508   *             and per-CPU runqueues.  Cleanups and useful suggestions
509   *             by Davide Libenzi, preemptible kernel bits by Robert Love.
510 @@ -73,12 +73,16 @@
511  #include <linux/ftrace.h>
512  #include <linux/vs_sched.h>
513  #include <linux/vs_cvirt.h>
514 +#include <linux/arrays.h>
515  
516  #include <asm/tlb.h>
517  #include <asm/irq_regs.h>
518  
519  #include "sched_cpupri.h"
520  
521 +#define INTERRUPTIBLE   -1
522 +#define RUNNING         0
523 +
524  #define CREATE_TRACE_POINTS
525  #include <trace/events/sched.h>
526  
527 @@ -2742,6 +2746,10 @@ static void __sched_fork(struct task_struct *p)
528         INIT_HLIST_HEAD(&p->preempt_notifiers);
529  #endif
530  
531 +#ifdef CONFIG_CHOPSTIX
532 +    p->last_ran_j = jiffies;
533 +    p->last_interrupted = INTERRUPTIBLE;
534 +#endif
535         /*
536          * We mark the process as running here, but have not actually
537          * inserted it onto the runqueue yet. This guarantees that
538 @@ -5659,6 +5667,30 @@ pick_next_task(struct rq *rq)
539         }
540  }
541  
542 +#ifdef CONFIG_CHOPSTIX
543 +void (*rec_event)(void *,unsigned int) = NULL;
544 +EXPORT_SYMBOL(rec_event);
545 +EXPORT_SYMBOL(in_sched_functions);
546 +
547 +struct event_spec {
548 +    unsigned long pc;
549 +    unsigned long dcookie;
550 +    unsigned int count;
551 +    unsigned int reason;
552 +};
553 +
554 +/* To support safe calling from asm */
555 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
556 +    struct pt_regs *regs;
557 +    struct event_spec *es = event_signature_in->event_data;
558 +    regs = task_pt_regs(current);
559 +    event_signature_in->task=current;
560 +    es->pc=regs->ip;
561 +    event_signature_in->count=1;
562 +    (*rec_event)(event_signature_in, count);
563 +}
564 +#endif
565 +
566  /*
567   * schedule() is the main scheduler function.
568   */
569 @@ -5706,6 +5738,54 @@ need_resched_nonpreemptible:
570         next = pick_next_task(rq);
571  
572         if (likely(prev != next)) {
573 +
574 +#ifdef CONFIG_CHOPSTIX
575 +               /* Run only if the Chopstix module so decrees it */
576 +               if (rec_event) {
577 +                       unsigned long diff;
578 +                       int sampling_reason;
579 +                       prev->last_ran_j = jiffies;
580 +                       if (next->last_interrupted!=INTERRUPTIBLE) {
581 +                               if (next->last_interrupted!=RUNNING) {
582 +                                       diff = (jiffies-next->last_interrupted);
583 +                                       sampling_reason = 0;/* BLOCKING */
584 +                               }
585 +                               else {
586 +                                       diff = jiffies-next->last_ran_j; 
587 +                                       sampling_reason = 1;/* PREEMPTION */
588 +                               }
589 +
590 +                               if (diff >= HZ/10) {
591 +                                       struct event event;
592 +                                       struct event_spec espec;
593 +                                       struct pt_regs *regs;
594 +                                       regs = task_pt_regs(current);
595 +       
596 +                                       espec.reason = sampling_reason;
597 +                                       event.event_data=&espec;
598 +                                       event.task=next;
599 +                                       espec.pc=regs->ip;
600 +                                       event.event_type=2; 
601 +                                       /* index in the event array currently set up */
602 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
603 +                                       (*rec_event)(&event, diff);
604 +                               }
605 +                       }
606 +                       /* next has been elected to run */
607 +                       next->last_interrupted=0;
608 +
609 +                       /* An uninterruptible process just yielded. Record the current jiffy */
610 +                       if (prev->state & TASK_UNINTERRUPTIBLE) {
611 +                               prev->last_interrupted=jiffies;
612 +                       }
613 +                       /* An interruptible process just yielded, or it got preempted. 
614 +                        * Mark it as interruptible */
615 +                       else if (prev->state & TASK_INTERRUPTIBLE) {
616 +                               prev->last_interrupted=INTERRUPTIBLE;
617 +                       }
618 +               }
619 +#endif
620 +
621                 sched_info_switch(prev, next);
622                 perf_event_task_sched_out(prev, next, cpu);
623  
624 diff --git a/mm/memory.c b/mm/memory.c
625 index e828063..6e88fed 100644
626 --- a/mm/memory.c
627 +++ b/mm/memory.c
628 @@ -57,6 +57,7 @@
629  #include <linux/swapops.h>
630  #include <linux/elf.h>
631  // #include <linux/vs_memory.h>
632 +#include <linux/arrays.h>
633  
634  #include <asm/io.h>
635  #include <asm/pgalloc.h>
636 @@ -3070,6 +3071,16 @@ out:
637         return ret;
638  }
639  
640 +#ifdef CONFIG_CHOPSTIX
641 +extern void (*rec_event)(void *,unsigned int);
642 +struct event_spec {
643 +       unsigned long pc;
644 +       unsigned long dcookie; 
645 +       unsigned count;
646 +       unsigned char reason;
647 +};
648 +#endif
649 +
650  /*
651   * By the time we get here, we already hold the mm semaphore
652   */
653 @@ -3115,6 +3126,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
654         if (!pte)
655                 return VM_FAULT_OOM;
656  
657 +#ifdef CONFIG_CHOPSTIX
658 +       if (rec_event) {
659 +               struct event event;
660 +               struct event_spec espec;
661 +               struct pt_regs *regs;
662 +               unsigned int pc;
663 +               regs = task_pt_regs(current);
664 +               pc = regs->ip & (unsigned int) ~4095;
665 +
666 +               espec.reason = 0; /* alloc */
667 +               event.event_data=&espec;
668 +               event.task = current;
669 +               espec.pc=pc;
670 +               event.event_type = 6;
671 +               (*rec_event)(&event, 1);
672 +       }
673 +#endif
674 +
675         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
676  }
677  
678 diff --git a/mm/slab.c b/mm/slab.c
679 index ad2828e..5acdf6c 100644
680 --- a/mm/slab.c
681 +++ b/mm/slab.c
682 @@ -113,6 +113,7 @@
683  #include       <linux/fault-inject.h>
684  #include       <linux/rtmutex.h>
685  #include       <linux/reciprocal_div.h>
686 +#include <linux/arrays.h>
687  #include       <linux/debugobjects.h>
688  #include       <linux/kmemcheck.h>
689  
690 @@ -252,6 +253,16 @@ struct slab_rcu {
691         void *addr;
692  };
693  
694 +#ifdef CONFIG_CHOPSTIX
695 +extern void (*rec_event)(void *,unsigned int);
696 +struct event_spec {
697 +       unsigned long pc;
698 +       unsigned long dcookie; 
699 +       unsigned count;
700 +       unsigned char reason;
701 +};
702 +#endif
703 +
704  /*
705   * struct array_cache
706   *
707 @@ -3400,6 +3411,19 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
708         kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
709                                  flags);
710         prefetchw(objp);
711 +#ifdef CONFIG_CHOPSTIX
712 +       if (rec_event && objp) {
713 +               struct event event;
714 +               struct event_spec espec;
715 +
716 +               espec.reason = 0; /* alloc */
717 +               event.event_data=&espec;
718 +               event.task = current;
719 +               espec.pc=caller;
720 +               event.event_type=4; 
721 +               (*rec_event)(&event, cachep->buffer_size);
722 +       }
723 +#endif
724  
725         if (likely(objp))
726                 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
727 @@ -3512,13 +3536,28 @@ free_done:
728   * Release an obj back to its cache. If the obj has a constructed state, it must
729   * be in this state _before_ it is released.  Called with disabled ints.
730   */
731 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
732 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
733  {
734         struct array_cache *ac = cpu_cache_get(cachep);
735  
736         check_irq_off();
737         kmemleak_free_recursive(objp, cachep->flags);
738 -       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
739 +       objp = cache_free_debugcheck(cachep, objp, caller);
740 +
741 +#ifdef CONFIG_CHOPSTIX
742 +       if (rec_event && objp) {
743 +               struct event event;
744 +               struct event_spec espec;
745 +
746 +               espec.reason = 1; /* free */
747 +               event.event_data = &espec;
748 +               event.task = current;
749 +               espec.pc = caller;
750 +               event.event_type = 4; 
751 +               (*rec_event)(&event, cachep->buffer_size);
752 +       }
753 +#endif
754 +
755         vx_slab_free(cachep);
756  
757         kmemcheck_slab_free(cachep, objp, obj_size(cachep));
758 @@ -3720,10 +3759,17 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
759  EXPORT_SYMBOL(__kmalloc_track_caller);
760  
761  #else
762 +#ifdef CONFIG_CHOPSTIX
763 +void *__kmalloc(size_t size, gfp_t flags)
764 +{
765 +       return __do_kmalloc(size, flags, __builtin_return_address(0));
766 +}
767 +#else
768  void *__kmalloc(size_t size, gfp_t flags)
769  {
770         return __do_kmalloc(size, flags, NULL);
771  }
772 +#endif
773  EXPORT_SYMBOL(__kmalloc);
774  #endif
775  
776 @@ -3743,7 +3789,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
777         debug_check_no_locks_freed(objp, obj_size(cachep));
778         if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
779                 debug_check_no_obj_freed(objp, obj_size(cachep));
780 -       __cache_free(cachep, objp);
781 +       __cache_free(cachep, objp,__builtin_return_address(0));
782         local_irq_restore(flags);
783  
784         trace_kmem_cache_free(_RET_IP_, objp);
785 @@ -3773,7 +3819,7 @@ void kfree(const void *objp)
786         c = virt_to_cache(objp);
787         debug_check_no_locks_freed(objp, obj_size(c));
788         debug_check_no_obj_freed(objp, obj_size(c));
789 -       __cache_free(c, (void *)objp);
790 +       __cache_free(c, (void *)objp,__builtin_return_address(0));
791         local_irq_restore(flags);
792  }
793  EXPORT_SYMBOL(kfree);