Setting tag linux-2.6-32-36
[linux-2.6.git] / linux-2.6-591-chopstix-intern.patch
1 From 6f68de5f723e57e2709b468f55914fd0f963ce90 Mon Sep 17 00:00:00 2001
2 From: S.Çağlar Onur <caglar@cs.princeton.edu>
3 Date: Tue, 7 Dec 2010 11:09:43 -0500
4 Subject: [PATCH] linux-2.6-591-chopstix-intern.patch
5
6 block/blk-core.c: In function '__generic_make_request':
7 block/blk-core.c:1557: warning: assignment makes integer from pointer without a cast
8 fs/exec.c: In function 'open_exec':
9 fs/exec.c:698: warning: ISO C90 forbids mixed declarations and code
10 fs/bio.c: In function 'bio_endio':
11 fs/bio.c:1440: warning: assignment makes integer from pointer without a cast
12 mm/slab.c: In function '__cache_alloc':
13 mm/slab.c:3513: warning: assignment makes integer from pointer without a cast
14 mm/slab.c: In function '__cache_free':
15 mm/slab.c:3646: warning: assignment makes integer from pointer without a cast
16
17 ---
18  arch/Kconfig                     |    8 ++++
19  arch/x86/kernel/asm-offsets_32.c |   25 +++++++++++
20  arch/x86/kernel/entry_32.S       |   28 +++++++++++++
21  arch/x86/mm/fault.c              |   10 +++++
22  block/blk-core.c                 |   29 +++++++++++++
23  drivers/oprofile/cpu_buffer.c    |   30 ++++++++++++++
24  fs/bio.c                         |   31 ++++++++++++++
25  fs/exec.c                        |    8 ++++
26  include/linux/arrays.h           |   39 ++++++++++++++++++
27  include/linux/mutex.h            |    2 +-
28  include/linux/sched.h            |    5 ++
29  kernel/mutex.c                   |   55 +++++++++++++++++++++++++
30  kernel/sched.c                   |   82 +++++++++++++++++++++++++++++++++++++-
31  mm/memory.c                      |   29 +++++++++++++
32  mm/slab.c                        |   54 +++++++++++++++++++++++--
33  15 files changed, 429 insertions(+), 6 deletions(-)
34  create mode 100644 include/linux/arrays.h
35
36 diff --git a/arch/Kconfig b/arch/Kconfig
37 index cdea504..608c64d 100644
38 --- a/arch/Kconfig
39 +++ b/arch/Kconfig
40 @@ -27,6 +27,14 @@ config OPROFILE_EVENT_MULTIPLEX
41  
42           If unsure, say N.
43  
44 +config CHOPSTIX
45 +       bool "Chopstix (PlanetLab)"
46 +       depends on MODULES && OPROFILE
47 +       help
48 +         Chopstix allows you to monitor various events by summarizing them
49 +         in lossy data structures and transferring these data structures
50 +         into user space. If in doubt, say "N".
51 +
52  config HAVE_OPROFILE
53         bool
54  
55 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
56 index dfdbf64..29c79b8 100644
57 --- a/arch/x86/kernel/asm-offsets_32.c
58 +++ b/arch/x86/kernel/asm-offsets_32.c
59 @@ -9,6 +9,7 @@
60  #include <linux/signal.h>
61  #include <linux/personality.h>
62  #include <linux/suspend.h>
63 +#include <linux/arrays.h>
64  #include <linux/kbuild.h>
65  #include <asm/ucontext.h>
66  #include <asm/sigframe.h>
67 @@ -25,6 +26,18 @@
68  #include <linux/lguest.h>
69  #include "../../../drivers/lguest/lg.h"
70  
71 +#ifdef CONFIG_CHOPSTIX
72 +#define STACKOFFSET(sym, str, mem) \
73 +       DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
74 +
75 +struct event_spec {
76 +       unsigned long pc;
77 +       unsigned long dcookie;
78 +       unsigned count;
79 +       unsigned int number;
80 +};
81 +#endif
82 +
83  /* workaround for a warning with -Wmissing-prototypes */
84  void foo(void);
85  
86 @@ -51,6 +64,18 @@ void foo(void)
87         OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
88         BLANK();
89  
90 +#ifdef CONFIG_CHOPSTIX
91 +       STACKOFFSET(TASK_thread, task_struct, thread);
92 +       STACKOFFSET(THREAD_esp, thread_struct, sp);
93 +       STACKOFFSET(EVENT_event_data, event, event_data);
94 +       STACKOFFSET(EVENT_task, event, task);
95 +       STACKOFFSET(EVENT_event_type, event, event_type);
96 +       STACKOFFSET(SPEC_number, event_spec, number);
97 +       DEFINE(EVENT_SIZE, sizeof(struct event));
98 +       DEFINE(SPEC_SIZE, sizeof(struct event_spec));
99 +       DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
100 +#endif
101 +
102         OFFSET(TI_task, thread_info, task);
103         OFFSET(TI_exec_domain, thread_info, exec_domain);
104         OFFSET(TI_flags, thread_info, flags);
105 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
106 index 8b8db35..fc82d32 100644
107 --- a/arch/x86/kernel/entry_32.S
108 +++ b/arch/x86/kernel/entry_32.S
109 @@ -538,6 +538,34 @@ ENTRY(system_call)
110         cmpl $(nr_syscalls), %eax
111         jae syscall_badsys
112  syscall_call:
113 +#ifdef CONFIG_CHOPSTIX
114 +       /* Move Chopstix syscall probe here */
115 +       /* Save and clobber: eax, ecx, ebp  */
116 +       pushl   %eax
117 +       pushl   %ecx
118 +       pushl   %ebp
119 +       movl    %esp, %ebp
120 +       subl    $SPEC_EVENT_SIZE, %esp 
121 +       movl    rec_event, %ecx
122 +       testl   %ecx, %ecx
123 +       jz  carry_on
124 +       # struct event is first, just below %ebp
125 +       movl    %eax, (SPEC_number-EVENT_SIZE)(%ebp)
126 +       leal    -SPEC_EVENT_SIZE(%ebp), %eax
127 +       movl    %eax, EVENT_event_data(%ebp)
128 +       movl    $7, EVENT_event_type(%ebp)
129 +       movl    rec_event, %edx
130 +       movl    $1, 4(%esp)
131 +       leal    -EVENT_SIZE(%ebp), %eax
132 +       movl    %eax, (%esp)
133 +       call    rec_event_asm 
134 +carry_on: 
135 +       addl $SPEC_EVENT_SIZE, %esp
136 +       popl %ebp
137 +       popl %ecx
138 +       popl %eax
139 +       /* End chopstix */
140 +#endif
141         call *sys_call_table(,%eax,4)
142         movl %eax,PT_EAX(%esp)          # store the return value
143  syscall_exit:
144 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
145 index b899fb7..c827e81 100644
146 --- a/arch/x86/mm/fault.c
147 +++ b/arch/x86/mm/fault.c
148 @@ -65,6 +65,16 @@ static inline int notify_page_fault(struct pt_regs *regs)
149         return ret;
150  }
151  
152 +#ifdef CONFIG_CHOPSTIX
153 +extern void (*rec_event)(void *,unsigned int);
154 +struct event_spec {
155 +       unsigned long pc;
156 +       unsigned long dcookie; 
157 +       unsigned count;
158 +       unsigned char reason;
159 +};
160 +#endif
161 +
162  /*
163   * Prefetch quirks:
164   *
165 diff --git a/block/blk-core.c b/block/blk-core.c
166 index 48dbd8d..94030b1 100644
167 --- a/block/blk-core.c
168 +++ b/block/blk-core.c
169 @@ -27,12 +27,23 @@
170  #include <linux/writeback.h>
171  #include <linux/task_io_accounting_ops.h>
172  #include <linux/fault-inject.h>
173 +#include <linux/arrays.h>
174  
175  #define CREATE_TRACE_POINTS
176  #include <trace/events/block.h>
177  
178  #include "blk.h"
179  
180 +#ifdef CONFIG_CHOPSTIX
181 +extern void (*rec_event)(void *,unsigned int);
182 +struct event_spec {
183 +       unsigned long pc;
184 +       unsigned long dcookie;
185 +       unsigned count;
186 +       unsigned char reason;
187 +};
188 +#endif
189 +
190  EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
191  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
192  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
193 @@ -1568,6 +1579,24 @@ static inline void __generic_make_request(struct bio *bio)
194  
195                 trace_block_bio_queue(q, bio);
196  
197 +#ifdef CONFIG_CHOPSTIX
198 +               if (rec_event) {
199 +                       struct event event;
200 +                       struct event_spec espec;
201 +                       unsigned long eip;
202 +                       
203 +                       espec.reason = 0;/*request */
204 +
205 +                       eip = bio->bi_end_io;
206 +                       event.event_data=&espec;
207 +                       espec.pc=eip;
208 +                       event.event_type=3; 
209 +                       /* index in the event array currently set up */
210 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
211 +                       (*rec_event)(&event, bio->bi_size);
212 +               }
213 +#endif
214 +
215                 ret = q->make_request_fn(q, bio);
216         } while (ret);
217  
218 diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
219 index 9e0ef46..f6217fd 100644
220 --- a/drivers/oprofile/cpu_buffer.c
221 +++ b/drivers/oprofile/cpu_buffer.c
222 @@ -22,6 +22,7 @@
223  #include <linux/sched.h>
224  #include <linux/oprofile.h>
225  #include <linux/errno.h>
226 +#include <linux/arrays.h>
227  
228  #include "event_buffer.h"
229  #include "cpu_buffer.h"
230 @@ -286,6 +287,17 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
231         cpu_buf->tracing = 0;
232  }
233  
234 +#ifdef CONFIG_CHOPSTIX
235 +
236 +struct event_spec {
237 +       unsigned int pc;
238 +       unsigned long dcookie;
239 +       unsigned count;
240 +};
241 +
242 +extern void (*rec_event)(void *,unsigned int);
243 +#endif
244 +
245  static inline void
246  __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
247                           unsigned long event, int is_kernel)
248 @@ -328,7 +340,25 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
249                 pc = ESCAPE_CODE; /* as this causes an early return. */
250         }
251  
252 +#ifdef CONFIG_CHOPSTIX
253 +       if (rec_event) {
254 +               struct event esig;
255 +               struct event_spec espec;
256 +               esig.task = current;
257 +               espec.pc = pc;
258 +               espec.count = 1;
259 +               esig.event_data = &espec;
260 +               esig.event_type = event; /* index in the event array currently set up */
261 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
262 +               (*rec_event)(&esig, 1);
263 +       }
264 +       else {
265 +               __oprofile_add_ext_sample(pc, regs, event, is_kernel);
266 +       }
267 +#else
268         __oprofile_add_ext_sample(pc, regs, event, is_kernel);
269 +#endif
270 +
271  }
272  
273  /*
274 diff --git a/fs/bio.c b/fs/bio.c
275 index 06f71fc..56fc42d 100644
276 --- a/fs/bio.c
277 +++ b/fs/bio.c
278 @@ -26,6 +26,7 @@
279  #include <linux/mempool.h>
280  #include <linux/workqueue.h>
281  #include <scsi/sg.h>           /* for struct sg_iovec */
282 +#include <linux/arrays.h>
283  
284  #include <trace/events/block.h>
285  
286 @@ -48,6 +49,7 @@ struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
287  };
288  #undef BV
289  
290 +
291  /*
292   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
293   * IO code that does not need private memory pools.
294 @@ -1408,6 +1410,17 @@ void bio_check_pages_dirty(struct bio *bio)
295         }
296  }
297  
298 +#ifdef CONFIG_CHOPSTIX
299 +struct event_spec {
300 +       unsigned long pc;
301 +       unsigned long dcookie;
302 +       unsigned count;
303 +       unsigned char reason;
304 +};
305 +
306 +extern void (*rec_event)(void *,unsigned int);
307 +#endif
308 +
309  /**
310   * bio_endio - end I/O on a bio
311   * @bio:       bio
312 @@ -1429,6 +1442,24 @@ void bio_endio(struct bio *bio, int error)
313         else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
314                 error = -EIO;
315  
316 +#ifdef CONFIG_CHOPSTIX
317 +               if (rec_event) {
318 +                       struct event event;
319 +                       struct event_spec espec;
320 +                       unsigned long eip;
321 +                       
322 +                       espec.reason = 1;/*response */
323 +
324 +                       eip = bio->bi_end_io;
325 +                       event.event_data=&espec;
326 +                       espec.pc=eip;
327 +                       event.event_type=3; 
328 +                       /* index in the event array currently set up */
329 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
330 +                       (*rec_event)(&event, bio->bi_size);
331 +               }
332 +#endif
333 +
334         if (bio->bi_end_io)
335                 bio->bi_end_io(bio, error);
336  }
337 diff --git a/fs/exec.c b/fs/exec.c
338 index 7fdbf49..6f2d772 100644
339 --- a/fs/exec.c
340 +++ b/fs/exec.c
341 @@ -27,6 +27,7 @@
342  #include <linux/fdtable.h>
343  #include <linux/mm.h>
344  #include <linux/stat.h>
345 +#include <linux/dcookies.h>
346  #include <linux/fcntl.h>
347  #include <linux/smp_lock.h>
348  #include <linux/swap.h>
349 @@ -735,6 +736,13 @@ struct file *open_exec(const char *name)
350         if (err)
351                 goto exit;
352  
353 +#ifdef CONFIG_CHOPSTIX
354 +       unsigned long cookie;
355 +       extern void (*rec_event)(void *, unsigned int);
356 +       if (rec_event && !(file->f_path.dentry->d_flags & DCACHE_COOKIE))
357 +               get_dcookie(&file->f_path, &cookie);
358 +#endif
359 +
360  out:
361         return file;
362  
363 diff --git a/include/linux/arrays.h b/include/linux/arrays.h
364 new file mode 100644
365 index 0000000..7641a3c
366 --- /dev/null
367 +++ b/include/linux/arrays.h
368 @@ -0,0 +1,39 @@
369 +#ifndef __ARRAYS_H__
370 +#define __ARRAYS_H__
371 +#include <linux/list.h>
372 +
373 +#define SAMPLING_METHOD_DEFAULT 0
374 +#define SAMPLING_METHOD_LOG 1
375 +
376 +#define DEFAULT_ARRAY_SIZE 2048
377 +
378 +/* Every probe has an array handler */
379 +
380 +/* XXX - Optimize this structure */
381 +
382 +extern void (*rec_event)(void *,unsigned int);
383 +struct array_handler {
384 +       struct list_head link;
385 +       unsigned int (*hash_func)(void *);
386 +       unsigned int (*sampling_func)(void *,int,void *);
387 +       unsigned short size;
388 +       unsigned int threshold;
389 +       unsigned char **expcount;
390 +       unsigned int sampling_method;
391 +       unsigned int **arrays;
392 +       unsigned int arraysize;
393 +       unsigned int num_samples[2];
394 +       void **epoch_samples; /* size-sized lists of samples */
395 +       unsigned int (*serialize)(void *, void *);
396 +       unsigned char code[5];
397 +       unsigned int last_threshold;
398 +};
399 +
400 +struct event {
401 +       struct list_head link;
402 +       void *event_data;
403 +       unsigned int count;
404 +       unsigned int event_type;
405 +       struct task_struct *task;
406 +};
407 +#endif
408 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
409 index 878cab4..6c21914 100644
410 --- a/include/linux/mutex.h
411 +++ b/include/linux/mutex.h
412 @@ -50,7 +50,7 @@ struct mutex {
413         atomic_t                count;
414         spinlock_t              wait_lock;
415         struct list_head        wait_list;
416 -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
417 +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) || defined(CONFIG_CHOPSTIX)
418         struct thread_info      *owner;
419  #endif
420  #ifdef CONFIG_DEBUG_MUTEXES
421 diff --git a/include/linux/sched.h b/include/linux/sched.h
422 index b0cb58b..45f69c3 100644
423 --- a/include/linux/sched.h
424 +++ b/include/linux/sched.h
425 @@ -1398,6 +1398,11 @@ struct task_struct {
426         cputime_t utime, stime, utimescaled, stimescaled;
427         cputime_t gtime;
428         cputime_t prev_utime, prev_stime;
429 +
430 +    #ifdef CONFIG_CHOPSTIX
431 +            unsigned long last_interrupted, last_ran_j;
432 +    #endif
433 +
434         unsigned long nvcsw, nivcsw; /* context switch counts */
435         struct timespec start_time;             /* monotonic time */
436         struct timespec real_start_time;        /* boot based time */
437 diff --git a/kernel/mutex.c b/kernel/mutex.c
438 index e04aa45..196ac04 100644
439 --- a/kernel/mutex.c
440 +++ b/kernel/mutex.c
441 @@ -23,6 +23,16 @@
442  #include <linux/spinlock.h>
443  #include <linux/interrupt.h>
444  #include <linux/debug_locks.h>
445 +#include <linux/arrays.h>
446 +
447 +#ifdef CONFIG_CHOPSTIX
448 +struct event_spec {
449 +       unsigned long pc;
450 +       unsigned long dcookie;
451 +       unsigned count;
452 +       unsigned char reason;
453 +};
454 +#endif
455  
456  /*
457   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
458 @@ -49,6 +59,9 @@ void
459  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
460  {
461         atomic_set(&lock->count, 1);
462 +#ifdef CONFIG_CHOPSTIX
463 +       lock->owner = NULL;
464 +#endif
465         spin_lock_init(&lock->wait_lock);
466         INIT_LIST_HEAD(&lock->wait_list);
467         mutex_clear_owner(lock);
468 @@ -254,6 +267,25 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
469                 }
470                 __set_task_state(task, state);
471  
472 +#if 0 && CONFIG_CHOPSTIX
473 +               if (rec_event) {
474 +                       if (lock->owner) {
475 +                               struct event event;
476 +                               struct event_spec espec;
477 +                               struct task_struct *p = lock->owner->task;
478 +
479 +                               espec.reason = 0; /* lock */
480 +                               event.event_data = &espec;
481 +                               event.task = p;
482 +                               espec.pc = lock;
483 +                               event.event_type = 5;
484 +                               (*rec_event)(&event, 1);
485 +                       } else {
486 +                               BUG();
487 +                       }
488 +               }
489 +#endif
490 +
491                 /* didnt get the lock, go to sleep: */
492                 spin_unlock_mutex(&lock->wait_lock, flags);
493                 preempt_enable_no_resched();
494 @@ -268,6 +300,10 @@ done:
495         mutex_remove_waiter(lock, &waiter, current_thread_info());
496         mutex_set_owner(lock);
497  
498 +#ifdef CONFIG_CHOPSTIX
499 +       lock->owner = task_thread_info(task);
500 +#endif
501 +
502         /* set it to 0 if there are no waiters left: */
503         if (likely(list_empty(&lock->wait_list)))
504                 atomic_set(&lock->count, 0);
505 @@ -338,6 +374,25 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
506  
507                 debug_mutex_wake_waiter(lock, waiter);
508  
509 +#if 0 && CONFIG_CHOPSTIX
510 +               if (rec_event) {
511 +                       if (lock->owner) {
512 +                               struct event event;
513 +                               struct event_spec espec;
514 +                               struct task_struct *p = lock->owner->task;
515 +
516 +                               espec.reason = 1; /* unlock */
517 +                               event.event_data = &espec;
518 +                               event.task = p;
519 +                               espec.pc = lock;
520 +                               event.event_type = 5;
521 +                               (*rec_event)(&event, 1);
522 +                       } else {
523 +                               BUG();
524 +                       }
525 +               }
526 +#endif
527 +
528                 wake_up_process(waiter->task);
529         }
530  
531 diff --git a/kernel/sched.c b/kernel/sched.c
532 index 1e90fc0..aa4d3d7 100644
533 --- a/kernel/sched.c
534 +++ b/kernel/sched.c
535 @@ -10,7 +10,7 @@
536   *  1998-11-19 Implemented schedule_timeout() and related stuff
537   *             by Andrea Arcangeli
538   *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
539 - *             hybrid priority-list and round-robin design with
540 + *             hybrid priority-list and round-robin deventn with
541   *             an array-switch method of distributing timeslices
542   *             and per-CPU runqueues.  Cleanups and useful suggestions
543   *             by Davide Libenzi, preemptible kernel bits by Robert Love.
544 @@ -73,6 +73,7 @@
545  #include <linux/ftrace.h>
546  #include <linux/vs_sched.h>
547  #include <linux/vs_cvirt.h>
548 +#include <linux/arrays.h>
549  
550  #include <asm/tlb.h>
551  #include <asm/irq_regs.h>
552 @@ -80,6 +81,9 @@
553  #include "sched_cpupri.h"
554  #include "sched_autogroup.h"
555  
556 +#define INTERRUPTIBLE   -1
557 +#define RUNNING         0
558 +
559  #define CREATE_TRACE_POINTS
560  #include <trace/events/sched.h>
561  
562 @@ -2670,6 +2674,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
563         rq = cpu_rq(cpu);
564         spin_lock(&rq->lock);
565  
566 +#ifdef CONFIG_CHOPSTIX
567 +    p->last_ran_j = jiffies;
568 +    p->last_interrupted = INTERRUPTIBLE;
569 +#endif
570         /*
571          * We migrated the task without holding either rq->lock, however
572          * since the task is not on the task list itself, nobody else
573 @@ -5988,6 +5996,30 @@ pick_next_task(struct rq *rq)
574         }
575  }
576  
577 +#ifdef CONFIG_CHOPSTIX
578 +void (*rec_event)(void *,unsigned int) = NULL;
579 +EXPORT_SYMBOL(rec_event);
580 +EXPORT_SYMBOL(in_sched_functions);
581 +
582 +struct event_spec {
583 +    unsigned long pc;
584 +    unsigned long dcookie;
585 +    unsigned int count;
586 +    unsigned int reason;
587 +};
588 +
589 +/* To support safe calling from asm */
590 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
591 +    struct pt_regs *regs;
592 +    struct event_spec *es = event_signature_in->event_data;
593 +    regs = task_pt_regs(current);
594 +    event_signature_in->task=current;
595 +    es->pc=regs->ip;
596 +    event_signature_in->count=1;
597 +    (*rec_event)(event_signature_in, count);
598 +}
599 +#endif
600 +
601  /*
602   * schedule() is the main scheduler function.
603   */
604 @@ -6034,6 +6066,54 @@ need_resched_nonpreemptible:
605         next = pick_next_task(rq);
606  
607         if (likely(prev != next)) {
608 +
609 +#ifdef CONFIG_CHOPSTIX
610 +               /* Run only if the Chopstix module so decrees it */
611 +               if (rec_event) {
612 +                       unsigned long diff;
613 +                       int sampling_reason;
614 +                       prev->last_ran_j = jiffies;
615 +                       if (next->last_interrupted!=INTERRUPTIBLE) {
616 +                               if (next->last_interrupted!=RUNNING) {
617 +                                       diff = (jiffies-next->last_interrupted);
618 +                                       sampling_reason = 0;/* BLOCKING */
619 +                               }
620 +                               else {
621 +                                       diff = jiffies-next->last_ran_j; 
622 +                                       sampling_reason = 1;/* PREEMPTION */
623 +                               }
624 +
625 +                               if (diff >= HZ/10) {
626 +                                       struct event event;
627 +                                       struct event_spec espec;
628 +                                       struct pt_regs *regs;
629 +                                       regs = task_pt_regs(current);
630 +       
631 +                                       espec.reason = sampling_reason;
632 +                                       event.event_data=&espec;
633 +                                       event.task=next;
634 +                                       espec.pc=regs->ip;
635 +                                       event.event_type=2; 
636 +                                       /* index in the event array currently set up */
637 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
638 +                                       (*rec_event)(&event, diff);
639 +                               }
640 +                       }
641 +                       /* next has been elected to run */
642 +                       next->last_interrupted=0;
643 +
644 +                       /* An uninterruptible process just yielded. Record the current jiffy */
645 +                       if (prev->state & TASK_UNINTERRUPTIBLE) {
646 +                               prev->last_interrupted=jiffies;
647 +                       }
648 +                       /* An interruptible process just yielded, or it got preempted. 
649 +                        * Mark it as interruptible */
650 +                       else if (prev->state & TASK_INTERRUPTIBLE) {
651 +                               prev->last_interrupted=INTERRUPTIBLE;
652 +                       }
653 +               }
654 +#endif
655 +
656                 sched_info_switch(prev, next);
657                 perf_event_task_sched_out(prev, next);
658  
659 diff --git a/mm/memory.c b/mm/memory.c
660 index dbd2c19..7f3667d 100644
661 --- a/mm/memory.c
662 +++ b/mm/memory.c
663 @@ -57,6 +57,7 @@
664  #include <linux/swapops.h>
665  #include <linux/elf.h>
666  // #include <linux/vs_memory.h>
667 +#include <linux/arrays.h>
668  
669  #include <asm/io.h>
670  #include <asm/pgalloc.h>
671 @@ -3168,6 +3169,16 @@ out:
672         return ret;
673  }
674  
675 +#ifdef CONFIG_CHOPSTIX
676 +extern void (*rec_event)(void *,unsigned int);
677 +struct event_spec {
678 +       unsigned long pc;
679 +       unsigned long dcookie; 
680 +       unsigned count;
681 +       unsigned char reason;
682 +};
683 +#endif
684 +
685  /*
686   * By the time we get here, we already hold the mm semaphore
687   */
688 @@ -3213,6 +3224,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
689         if (!pte)
690                 return VM_FAULT_OOM;
691  
692 +#ifdef CONFIG_CHOPSTIX
693 +       if (rec_event) {
694 +               struct event event;
695 +               struct event_spec espec;
696 +               struct pt_regs *regs;
697 +               unsigned int pc;
698 +               regs = task_pt_regs(current);
699 +               pc = regs->ip & (unsigned int) ~4095;
700 +
701 +               espec.reason = 0; /* alloc */
702 +               event.event_data=&espec;
703 +               event.task = current;
704 +               espec.pc=pc;
705 +               event.event_type = 6;
706 +               (*rec_event)(&event, 1);
707 +       }
708 +#endif
709 +
710         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
711  }
712  
713 diff --git a/mm/slab.c b/mm/slab.c
714 index c3ceb66..ad2f1a9 100644
715 --- a/mm/slab.c
716 +++ b/mm/slab.c
717 @@ -113,6 +113,7 @@
718  #include       <linux/fault-inject.h>
719  #include       <linux/rtmutex.h>
720  #include       <linux/reciprocal_div.h>
721 +#include <linux/arrays.h>
722  #include       <linux/debugobjects.h>
723  #include       <linux/kmemcheck.h>
724  #include       <linux/memory.h>
725 @@ -253,6 +254,16 @@ struct slab_rcu {
726         void *addr;
727  };
728  
729 +#ifdef CONFIG_CHOPSTIX
730 +extern void (*rec_event)(void *,unsigned int);
731 +struct event_spec {
732 +       unsigned long pc;
733 +       unsigned long dcookie; 
734 +       unsigned count;
735 +       unsigned char reason;
736 +};
737 +#endif
738 +
739  /*
740   * struct array_cache
741   *
742 @@ -3497,6 +3508,19 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
743         kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
744                                  flags);
745         prefetchw(objp);
746 +#ifdef CONFIG_CHOPSTIX
747 +       if (rec_event && objp) {
748 +               struct event event;
749 +               struct event_spec espec;
750 +
751 +               espec.reason = 0; /* alloc */
752 +               event.event_data=&espec;
753 +               event.task = current;
754 +               espec.pc=caller;
755 +               event.event_type=4; 
756 +               (*rec_event)(&event, cachep->buffer_size);
757 +       }
758 +#endif
759  
760         if (likely(objp))
761                 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
762 @@ -3609,13 +3633,28 @@ free_done:
763   * Release an obj back to its cache. If the obj has a constructed state, it must
764   * be in this state _before_ it is released.  Called with disabled ints.
765   */
766 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
767 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
768  {
769         struct array_cache *ac = cpu_cache_get(cachep);
770  
771         check_irq_off();
772         kmemleak_free_recursive(objp, cachep->flags);
773 -       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
774 +       objp = cache_free_debugcheck(cachep, objp, caller);
775 +
776 +#ifdef CONFIG_CHOPSTIX
777 +       if (rec_event && objp) {
778 +               struct event event;
779 +               struct event_spec espec;
780 +
781 +               espec.reason = 1; /* free */
782 +               event.event_data = &espec;
783 +               event.task = current;
784 +               espec.pc = caller;
785 +               event.event_type = 4; 
786 +               (*rec_event)(&event, cachep->buffer_size);
787 +       }
788 +#endif
789 +
790         vx_slab_free(cachep);
791  
792         kmemcheck_slab_free(cachep, objp, obj_size(cachep));
793 @@ -3817,10 +3856,17 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
794  EXPORT_SYMBOL(__kmalloc_track_caller);
795  
796  #else
797 +#ifdef CONFIG_CHOPSTIX
798 +void *__kmalloc(size_t size, gfp_t flags)
799 +{
800 +       return __do_kmalloc(size, flags, __builtin_return_address(0));
801 +}
802 +#else
803  void *__kmalloc(size_t size, gfp_t flags)
804  {
805         return __do_kmalloc(size, flags, NULL);
806  }
807 +#endif
808  EXPORT_SYMBOL(__kmalloc);
809  #endif
810  
811 @@ -3840,7 +3886,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
812         debug_check_no_locks_freed(objp, obj_size(cachep));
813         if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
814                 debug_check_no_obj_freed(objp, obj_size(cachep));
815 -       __cache_free(cachep, objp);
816 +       __cache_free(cachep, objp,__builtin_return_address(0));
817         local_irq_restore(flags);
818  
819         trace_kmem_cache_free(_RET_IP_, objp);
820 @@ -3870,7 +3916,7 @@ void kfree(const void *objp)
821         c = virt_to_cache(objp);
822         debug_check_no_locks_freed(objp, obj_size(c));
823         debug_check_no_obj_freed(objp, obj_size(c));
824 -       __cache_free(c, (void *)objp);
825 +       __cache_free(c, (void *)objp,__builtin_return_address(0));
826         local_irq_restore(flags);
827  }
828  EXPORT_SYMBOL(kfree);