fix compile errors
[linux-2.6.git] / linux-2.6-591-chopstix-intern.patch
1 diff --git a/arch/Kconfig b/arch/Kconfig
2 index 4e312ff..ef6a721 100644
3 --- a/arch/Kconfig
4 +++ b/arch/Kconfig
5 @@ -43,6 +43,14 @@ config OPROFILE_EVENT_MULTIPLEX
6  
7           If unsure, say N.
8  
9 +config CHOPSTIX
10 +       bool "Chopstix (PlanetLab)"
11 +       depends on MODULES && OPROFILE
12 +       help
13 +         Chopstix allows you to monitor various events by summarizing them
14 +         in lossy data structures and transferring these data structures
15 +         into user space. If in doubt, say "N".
16 +
17  config HAVE_OPROFILE
18         bool
19  
20 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
21 index dfdbf64..29c79b8 100644
22 --- a/arch/x86/kernel/asm-offsets_32.c
23 +++ b/arch/x86/kernel/asm-offsets_32.c
24 @@ -9,6 +9,7 @@
25  #include <linux/signal.h>
26  #include <linux/personality.h>
27  #include <linux/suspend.h>
28 +#include <linux/arrays.h>
29  #include <linux/kbuild.h>
30  #include <asm/ucontext.h>
31  #include <asm/sigframe.h>
32 @@ -25,6 +26,18 @@
33  #include <linux/lguest.h>
34  #include "../../../drivers/lguest/lg.h"
35  
36 +#ifdef CONFIG_CHOPSTIX
37 +#define STACKOFFSET(sym, str, mem) \
38 +       DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
39 +
40 +struct event_spec {
41 +       unsigned long pc;
42 +       unsigned long dcookie;
43 +       unsigned count;
44 +       unsigned int number;
45 +};
46 +#endif
47 +
48  /* workaround for a warning with -Wmissing-prototypes */
49  void foo(void);
50  
51 @@ -51,6 +64,18 @@ void foo(void)
52         OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
53         BLANK();
54  
55 +#ifdef CONFIG_CHOPSTIX
56 +       STACKOFFSET(TASK_thread, task_struct, thread);
57 +       STACKOFFSET(THREAD_esp, thread_struct, sp);
58 +       STACKOFFSET(EVENT_event_data, event, event_data);
59 +       STACKOFFSET(EVENT_task, event, task);
60 +       STACKOFFSET(EVENT_event_type, event, event_type);
61 +       STACKOFFSET(SPEC_number, event_spec, number);
62 +       DEFINE(EVENT_SIZE, sizeof(struct event));
63 +       DEFINE(SPEC_SIZE, sizeof(struct event_spec));
64 +       DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
65 +#endif
66 +
67         OFFSET(TI_task, thread_info, task);
68         OFFSET(TI_exec_domain, thread_info, exec_domain);
69         OFFSET(TI_flags, thread_info, flags);
70 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
71 index c097e7d..8eff053 100644
72 --- a/arch/x86/kernel/entry_32.S
73 +++ b/arch/x86/kernel/entry_32.S
74 @@ -526,6 +526,34 @@ ENTRY(system_call)
75         cmpl $(nr_syscalls), %eax
76         jae syscall_badsys
77  syscall_call:
78 +#ifdef CONFIG_CHOPSTIX
79 +       /* Move Chopstix syscall probe here */
80 +       /* Save and clobber: eax, ecx, ebp  */
81 +       pushl   %eax
82 +       pushl   %ecx
83 +       pushl   %ebp
84 +       movl    %esp, %ebp
85 +       subl    $SPEC_EVENT_SIZE, %esp 
86 +       movl    rec_event, %ecx
87 +       testl   %ecx, %ecx
88 +       jz  carry_on
89 +       # struct event is first, just below %ebp
90 +       movl    %eax, (SPEC_number-EVENT_SIZE)(%ebp)
91 +       leal    -SPEC_EVENT_SIZE(%ebp), %eax
92 +       movl    %eax, EVENT_event_data(%ebp)
93 +       movl    $7, EVENT_event_type(%ebp)
94 +       movl    rec_event, %edx
95 +       movl    $1, 4(%esp)
96 +       leal    -EVENT_SIZE(%ebp), %eax
97 +       movl    %eax, (%esp)
98 +       call    rec_event_asm 
99 +carry_on: 
100 +       addl $SPEC_EVENT_SIZE, %esp
101 +       popl %ebp
102 +       popl %ecx
103 +       popl %eax
104 +       /* End chopstix */
105 +#endif
106         call *sys_call_table(,%eax,4)
107         movl %eax,PT_EAX(%esp)          # store the return value
108  syscall_exit:
109 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
110 index 4302583..85bf9f2 100644
111 --- a/arch/x86/mm/fault.c
112 +++ b/arch/x86/mm/fault.c
113 @@ -62,6 +62,16 @@ static inline int notify_page_fault(struct pt_regs *regs)
114         return ret;
115  }
116  
117 +#ifdef CONFIG_CHOPSTIX
118 +extern void (*rec_event)(void *,unsigned int);
119 +struct event_spec {
120 +       unsigned long pc;
121 +       unsigned long dcookie; 
122 +       unsigned count;
123 +       unsigned char reason;
124 +};
125 +#endif
126 +
127  /*
128   * Prefetch quirks:
129   *
130 diff --git a/block/blk-core.c b/block/blk-core.c
131 index 71da511..1cefcaa 100644
132 --- a/block/blk-core.c
133 +++ b/block/blk-core.c
134 @@ -27,12 +27,23 @@
135  #include <linux/writeback.h>
136  #include <linux/task_io_accounting_ops.h>
137  #include <linux/fault-inject.h>
138 +#include <linux/arrays.h>
139  
140  #define CREATE_TRACE_POINTS
141  #include <trace/events/block.h>
142  
143  #include "blk.h"
144  
145 +#ifdef CONFIG_CHOPSTIX
146 +extern void (*rec_event)(void *,unsigned int);
147 +struct event_spec {
148 +       unsigned long pc;
149 +       unsigned long dcookie;
150 +       unsigned count;
151 +       unsigned char reason;
152 +};
153 +#endif
154 +
155  EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
156  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
157  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
158 @@ -1478,6 +1489,24 @@ static inline void __generic_make_request(struct bio *bio)
159  
160                 trace_block_bio_queue(q, bio);
161  
162 +#ifdef CONFIG_CHOPSTIX
163 +               if (rec_event) {
164 +                       struct event event;
165 +                       struct event_spec espec;
166 +                       unsigned long eip;
167 +                       
168 +                       espec.reason = 0;/*request */
169 +
170 +                       eip = bio->bi_end_io;
171 +                       event.event_data=&espec;
172 +                       espec.pc=eip;
173 +                       event.event_type=3; 
174 +                       /* index in the event array currently set up */
175 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
176 +                       (*rec_event)(&event, bio->bi_size);
177 +               }
178 +#endif
179 +
180                 ret = q->make_request_fn(q, bio);
181         } while (ret);
182  
183 diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
184 index a7aae24..9817d91 100644
185 --- a/drivers/oprofile/cpu_buffer.c
186 +++ b/drivers/oprofile/cpu_buffer.c
187 @@ -22,6 +22,7 @@
188  #include <linux/sched.h>
189  #include <linux/oprofile.h>
190  #include <linux/errno.h>
191 +#include <linux/arrays.h>
192  
193  #include "event_buffer.h"
194  #include "cpu_buffer.h"
195 @@ -326,6 +327,17 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
196         cpu_buf->tracing = 0;
197  }
198  
199 +#ifdef CONFIG_CHOPSTIX
200 +
201 +struct event_spec {
202 +       unsigned int pc;
203 +       unsigned long dcookie;
204 +       unsigned count;
205 +};
206 +
207 +extern void (*rec_event)(void *,unsigned int);
208 +#endif
209 +
210  static inline void
211  __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
212                           unsigned long event, int is_kernel)
213 @@ -360,7 +372,25 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
214         int is_kernel = !user_mode(regs);
215         unsigned long pc = profile_pc(regs);
216  
217 +#ifdef CONFIG_CHOPSTIX
218 +       if (rec_event) {
219 +               struct event esig;
220 +               struct event_spec espec;
221 +               esig.task = current;
222 +               espec.pc = pc;
223 +               espec.count = 1;
224 +               esig.event_data = &espec;
225 +               esig.event_type = event; /* index in the event array currently set up */
226 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
227 +               (*rec_event)(&esig, 1);
228 +       }
229 +       else {
230 +               __oprofile_add_ext_sample(pc, regs, event, is_kernel);
231 +       }
232 +#else
233         __oprofile_add_ext_sample(pc, regs, event, is_kernel);
234 +#endif
235 +
236  }
237  
238  /*
239 diff --git a/fs/bio.c b/fs/bio.c
240 index e0c9e71..796767d 100644
241 --- a/fs/bio.c
242 +++ b/fs/bio.c
243 @@ -26,6 +26,7 @@
244  #include <linux/mempool.h>
245  #include <linux/workqueue.h>
246  #include <scsi/sg.h>           /* for struct sg_iovec */
247 +#include <linux/arrays.h>
248  
249  #include <trace/events/block.h>
250  
251 @@ -48,6 +49,7 @@ struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
252  };
253  #undef BV
254  
255 +
256  /*
257   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
258   * IO code that does not need private memory pools.
259 @@ -1398,6 +1400,17 @@ void bio_check_pages_dirty(struct bio *bio)
260         }
261  }
262  
263 +#ifdef CONFIG_CHOPSTIX
264 +struct event_spec {
265 +       unsigned long pc;
266 +       unsigned long dcookie;
267 +       unsigned count;
268 +       unsigned char reason;
269 +};
270 +
271 +extern void (*rec_event)(void *,unsigned int);
272 +#endif
273 +
274  /**
275   * bio_endio - end I/O on a bio
276   * @bio:       bio
277 @@ -1419,6 +1432,24 @@ void bio_endio(struct bio *bio, int error)
278         else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
279                 error = -EIO;
280  
281 +#ifdef CONFIG_CHOPSTIX
282 +               if (rec_event) {
283 +                       struct event event;
284 +                       struct event_spec espec;
285 +                       unsigned long eip;
286 +                       
287 +                       espec.reason = 1;/*response */
288 +
289 +                       eip = bio->bi_end_io;
290 +                       event.event_data=&espec;
291 +                       espec.pc=eip;
292 +                       event.event_type=3; 
293 +                       /* index in the event array currently set up */
294 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
295 +                       (*rec_event)(&event, bio->bi_size);
296 +               }
297 +#endif
298 +
299         if (bio->bi_end_io)
300                 bio->bi_end_io(bio, error);
301  }
302 diff --git a/fs/exec.c b/fs/exec.c
303 index 0a049b8..c2296b5 100644
304 --- a/fs/exec.c
305 +++ b/fs/exec.c
306 @@ -27,6 +27,7 @@
307  #include <linux/fdtable.h>
308  #include <linux/mm.h>
309  #include <linux/stat.h>
310 +#include <linux/dcookies.h>
311  #include <linux/fcntl.h>
312  #include <linux/smp_lock.h>
313  #include <linux/swap.h>
314 @@ -673,6 +674,13 @@ struct file *open_exec(const char *name)
315         if (err)
316                 goto exit;
317  
318 +#ifdef CONFIG_CHOPSTIX
319 +       unsigned long cookie;
320 +       extern void (*rec_event)(void *, unsigned int);
321 +       if (rec_event && !(file->f_path.dentry->d_flags & DCACHE_COOKIE))
322 +               get_dcookie(&file->f_path, &cookie);
323 +#endif
324 +
325  out:
326         return file;
327  
328 diff --git a/include/linux/arrays.h b/include/linux/arrays.h
329 new file mode 100644
330 index 0000000..7641a3c
331 --- /dev/null
332 +++ b/include/linux/arrays.h
333 @@ -0,0 +1,39 @@
334 +#ifndef __ARRAYS_H__
335 +#define __ARRAYS_H__
336 +#include <linux/list.h>
337 +
338 +#define SAMPLING_METHOD_DEFAULT 0
339 +#define SAMPLING_METHOD_LOG 1
340 +
341 +#define DEFAULT_ARRAY_SIZE 2048
342 +
343 +/* Every probe has an array handler */
344 +
345 +/* XXX - Optimize this structure */
346 +
347 +extern void (*rec_event)(void *,unsigned int);
348 +struct array_handler {
349 +       struct list_head link;
350 +       unsigned int (*hash_func)(void *);
351 +       unsigned int (*sampling_func)(void *,int,void *);
352 +       unsigned short size;
353 +       unsigned int threshold;
354 +       unsigned char **expcount;
355 +       unsigned int sampling_method;
356 +       unsigned int **arrays;
357 +       unsigned int arraysize;
358 +       unsigned int num_samples[2];
359 +       void **epoch_samples; /* size-sized lists of samples */
360 +       unsigned int (*serialize)(void *, void *);
361 +       unsigned char code[5];
362 +       unsigned int last_threshold;
363 +};
364 +
365 +struct event {
366 +       struct list_head link;
367 +       void *event_data;
368 +       unsigned int count;
369 +       unsigned int event_type;
370 +       struct task_struct *task;
371 +};
372 +#endif
373 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
374 index 878cab4..6c21914 100644
375 --- a/include/linux/mutex.h
376 +++ b/include/linux/mutex.h
377 @@ -50,7 +50,7 @@ struct mutex {
378         atomic_t                count;
379         spinlock_t              wait_lock;
380         struct list_head        wait_list;
381 -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
382 +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) || defined(CONFIG_CHOPSTIX)
383         struct thread_info      *owner;
384  #endif
385  #ifdef CONFIG_DEBUG_MUTEXES
386 diff --git a/include/linux/sched.h b/include/linux/sched.h
387 index c9d3cae..dd62888 100644
388 --- a/include/linux/sched.h
389 +++ b/include/linux/sched.h
390 @@ -1349,6 +1349,11 @@ struct task_struct {
391         cputime_t utime, stime, utimescaled, stimescaled;
392         cputime_t gtime;
393         cputime_t prev_utime, prev_stime;
394 +
395 +    #ifdef CONFIG_CHOPSTIX
396 +            unsigned long last_interrupted, last_ran_j;
397 +    #endif
398 +
399         unsigned long nvcsw, nivcsw; /* context switch counts */
400         struct timespec start_time;             /* monotonic time */
401         struct timespec real_start_time;        /* boot based time */
402 diff --git a/kernel/mutex.c b/kernel/mutex.c
403 index 947b3ad..ae1dc67 100644
404 --- a/kernel/mutex.c
405 +++ b/kernel/mutex.c
406 @@ -23,6 +23,16 @@
407  #include <linux/spinlock.h>
408  #include <linux/interrupt.h>
409  #include <linux/debug_locks.h>
410 +#include <linux/arrays.h>
411 +
412 +#ifdef CONFIG_CHOPSTIX
413 +struct event_spec {
414 +       unsigned long pc;
415 +       unsigned long dcookie;
416 +       unsigned count;
417 +       unsigned char reason;
418 +};
419 +#endif
420  
421  /*
422   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
423 @@ -49,6 +59,9 @@ void
424  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
425  {
426         atomic_set(&lock->count, 1);
427 +#ifdef CONFIG_CHOPSTIX
428 +       lock->owner = NULL;
429 +#endif
430         spin_lock_init(&lock->wait_lock);
431         INIT_LIST_HEAD(&lock->wait_list);
432         mutex_clear_owner(lock);
433 @@ -247,6 +260,25 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
434                 }
435                 __set_task_state(task, state);
436  
437 +#if 0 && CONFIG_CHOPSTIX
438 +               if (rec_event) {
439 +                       if (lock->owner) {
440 +                               struct event event;
441 +                               struct event_spec espec;
442 +                               struct task_struct *p = lock->owner->task;
443 +
444 +                               espec.reason = 0; /* lock */
445 +                               event.event_data = &espec;
446 +                               event.task = p;
447 +                               espec.pc = lock;
448 +                               event.event_type = 5;
449 +                               (*rec_event)(&event, 1);
450 +                       } else {
451 +                               BUG();
452 +                       }
453 +               }
454 +#endif
455 +
456                 /* didnt get the lock, go to sleep: */
457                 spin_unlock_mutex(&lock->wait_lock, flags);
458                 preempt_enable_no_resched();
459 @@ -261,6 +293,10 @@ done:
460         mutex_remove_waiter(lock, &waiter, current_thread_info());
461         mutex_set_owner(lock);
462  
463 +#ifdef CONFIG_CHOPSTIX
464 +       lock->owner = task_thread_info(task);
465 +#endif
466 +
467         /* set it to 0 if there are no waiters left: */
468         if (likely(list_empty(&lock->wait_list)))
469                 atomic_set(&lock->count, 0);
470 @@ -331,6 +367,25 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
471  
472                 debug_mutex_wake_waiter(lock, waiter);
473  
474 +#if 0 && CONFIG_CHOPSTIX
475 +               if (rec_event) {
476 +                       if (lock->owner) {
477 +                               struct event event;
478 +                               struct event_spec espec;
479 +                               struct task_struct *p = lock->owner->task;
480 +
481 +                               espec.reason = 1; /* unlock */
482 +                               event.event_data = &espec;
483 +                               event.task = p;
484 +                               espec.pc = lock;
485 +                               event.event_type = 5;
486 +                               (*rec_event)(&event, 1);
487 +                       } else {
488 +                               BUG();
489 +                       }
490 +               }
491 +#endif
492 +
493                 wake_up_process(waiter->task);
494         }
495  
496 diff --git a/kernel/sched.c b/kernel/sched.c
497 index 90b63b8..43b728e 100644
498 --- a/kernel/sched.c
499 +++ b/kernel/sched.c
500 @@ -10,7 +10,7 @@
501   *  1998-11-19 Implemented schedule_timeout() and related stuff
502   *             by Andrea Arcangeli
503   *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
504 - *             hybrid priority-list and round-robin design with
505 + *             hybrid priority-list and round-robin deventn with
506   *             an array-switch method of distributing timeslices
507   *             and per-CPU runqueues.  Cleanups and useful suggestions
508   *             by Davide Libenzi, preemptible kernel bits by Robert Love.
509 @@ -73,12 +73,16 @@
510  #include <linux/ftrace.h>
511  #include <linux/vs_sched.h>
512  #include <linux/vs_cvirt.h>
513 +#include <linux/arrays.h>
514  
515  #include <asm/tlb.h>
516  #include <asm/irq_regs.h>
517  
518  #include "sched_cpupri.h"
519  
520 +#define INTERRUPTIBLE   -1
521 +#define RUNNING         0
522 +
523  #define CREATE_TRACE_POINTS
524  #include <trace/events/sched.h>
525  
526 @@ -2742,6 +2746,10 @@ static void __sched_fork(struct task_struct *p)
527         INIT_HLIST_HEAD(&p->preempt_notifiers);
528  #endif
529  
530 +#ifdef CONFIG_CHOPSTIX
531 +    p->last_ran_j = jiffies;
532 +    p->last_interrupted = INTERRUPTIBLE;
533 +#endif
534         /*
535          * We mark the process as running here, but have not actually
536          * inserted it onto the runqueue yet. This guarantees that
537 @@ -5659,6 +5667,30 @@ pick_next_task(struct rq *rq)
538         }
539  }
540  
541 +#ifdef CONFIG_CHOPSTIX
542 +void (*rec_event)(void *,unsigned int) = NULL;
543 +EXPORT_SYMBOL(rec_event);
544 +EXPORT_SYMBOL(in_sched_functions);
545 +
546 +struct event_spec {
547 +    unsigned long pc;
548 +    unsigned long dcookie;
549 +    unsigned int count;
550 +    unsigned int reason;
551 +};
552 +
553 +/* To support safe calling from asm */
554 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
555 +    struct pt_regs *regs;
556 +    struct event_spec *es = event_signature_in->event_data;
557 +    regs = task_pt_regs(current);
558 +    event_signature_in->task=current;
559 +    es->pc=regs->ip;
560 +    event_signature_in->count=1;
561 +    (*rec_event)(event_signature_in, count);
562 +}
563 +#endif
564 +
565  /*
566   * schedule() is the main scheduler function.
567   */
568 @@ -5706,6 +5738,54 @@ need_resched_nonpreemptible:
569         next = pick_next_task(rq);
570  
571         if (likely(prev != next)) {
572 +
573 +#ifdef CONFIG_CHOPSTIX
574 +               /* Run only if the Chopstix module so decrees it */
575 +               if (rec_event) {
576 +                       unsigned long diff;
577 +                       int sampling_reason;
578 +                       prev->last_ran_j = jiffies;
579 +                       if (next->last_interrupted!=INTERRUPTIBLE) {
580 +                               if (next->last_interrupted!=RUNNING) {
581 +                                       diff = (jiffies-next->last_interrupted);
582 +                                       sampling_reason = 0;/* BLOCKING */
583 +                               }
584 +                               else {
585 +                                       diff = jiffies-next->last_ran_j; 
586 +                                       sampling_reason = 1;/* PREEMPTION */
587 +                               }
588 +
589 +                               if (diff >= HZ/10) {
590 +                                       struct event event;
591 +                                       struct event_spec espec;
592 +                                       struct pt_regs *regs;
593 +                                       regs = task_pt_regs(current);
594 +       
595 +                                       espec.reason = sampling_reason;
596 +                                       event.event_data=&espec;
597 +                                       event.task=next;
598 +                                       espec.pc=regs->ip;
599 +                                       event.event_type=2; 
600 +                                       /* index in the event array currently set up */
601 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
602 +                                       (*rec_event)(&event, diff);
603 +                               }
604 +                       }
605 +                       /* next has been elected to run */
606 +                       next->last_interrupted=0;
607 +
608 +                       /* An uninterruptible process just yielded. Record the current jiffy */
609 +                       if (prev->state & TASK_UNINTERRUPTIBLE) {
610 +                               prev->last_interrupted=jiffies;
611 +                       }
612 +                       /* An interruptible process just yielded, or it got preempted. 
613 +                        * Mark it as interruptible */
614 +                       else if (prev->state & TASK_INTERRUPTIBLE) {
615 +                               prev->last_interrupted=INTERRUPTIBLE;
616 +                       }
617 +               }
618 +#endif
619 +
620                 sched_info_switch(prev, next);
621                 perf_event_task_sched_out(prev, next, cpu);
622  
623 diff --git a/mm/memory.c b/mm/memory.c
624 index e828063..6e88fed 100644
625 --- a/mm/memory.c
626 +++ b/mm/memory.c
627 @@ -57,6 +57,7 @@
628  #include <linux/swapops.h>
629  #include <linux/elf.h>
630  // #include <linux/vs_memory.h>
631 +#include <linux/arrays.h>
632  
633  #include <asm/io.h>
634  #include <asm/pgalloc.h>
635 @@ -3070,6 +3071,16 @@ out:
636         return ret;
637  }
638  
639 +#ifdef CONFIG_CHOPSTIX
640 +extern void (*rec_event)(void *,unsigned int);
641 +struct event_spec {
642 +       unsigned long pc;
643 +       unsigned long dcookie; 
644 +       unsigned count;
645 +       unsigned char reason;
646 +};
647 +#endif
648 +
649  /*
650   * By the time we get here, we already hold the mm semaphore
651   */
652 @@ -3115,6 +3126,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
653         if (!pte)
654                 return VM_FAULT_OOM;
655  
656 +#ifdef CONFIG_CHOPSTIX
657 +       if (rec_event) {
658 +               struct event event;
659 +               struct event_spec espec;
660 +               struct pt_regs *regs;
661 +               unsigned int pc;
662 +               regs = task_pt_regs(current);
663 +               pc = regs->ip & (unsigned int) ~4095;
664 +
665 +               espec.reason = 0; /* alloc */
666 +               event.event_data=&espec;
667 +               event.task = current;
668 +               espec.pc=pc;
669 +               event.event_type = 6;
670 +               (*rec_event)(&event, 1);
671 +       }
672 +#endif
673 +
674         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
675  }
676  
677 diff --git a/mm/slab.c b/mm/slab.c
678 index ad2828e..5acdf6c 100644
679 --- a/mm/slab.c
680 +++ b/mm/slab.c
681 @@ -113,6 +113,7 @@
682  #include       <linux/fault-inject.h>
683  #include       <linux/rtmutex.h>
684  #include       <linux/reciprocal_div.h>
685 +#include <linux/arrays.h>
686  #include       <linux/debugobjects.h>
687  #include       <linux/kmemcheck.h>
688  
689 @@ -252,6 +253,16 @@ struct slab_rcu {
690         void *addr;
691  };
692  
693 +#ifdef CONFIG_CHOPSTIX
694 +extern void (*rec_event)(void *,unsigned int);
695 +struct event_spec {
696 +       unsigned long pc;
697 +       unsigned long dcookie; 
698 +       unsigned count;
699 +       unsigned char reason;
700 +};
701 +#endif
702 +
703  /*
704   * struct array_cache
705   *
706 @@ -3400,6 +3411,19 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
707         kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
708                                  flags);
709         prefetchw(objp);
710 +#ifdef CONFIG_CHOPSTIX
711 +       if (rec_event && objp) {
712 +               struct event event;
713 +               struct event_spec espec;
714 +
715 +               espec.reason = 0; /* alloc */
716 +               event.event_data=&espec;
717 +               event.task = current;
718 +               espec.pc=caller;
719 +               event.event_type=4; 
720 +               (*rec_event)(&event, cachep->buffer_size);
721 +       }
722 +#endif
723  
724         if (likely(objp))
725                 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
726 @@ -3512,13 +3536,28 @@ free_done:
727   * Release an obj back to its cache. If the obj has a constructed state, it must
728   * be in this state _before_ it is released.  Called with disabled ints.
729   */
730 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
731 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
732  {
733         struct array_cache *ac = cpu_cache_get(cachep);
734  
735         check_irq_off();
736         kmemleak_free_recursive(objp, cachep->flags);
737 -       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
738 +       objp = cache_free_debugcheck(cachep, objp, caller);
739 +
740 +#ifdef CONFIG_CHOPSTIX
741 +       if (rec_event && objp) {
742 +               struct event event;
743 +               struct event_spec espec;
744 +
745 +               espec.reason = 1; /* free */
746 +               event.event_data = &espec;
747 +               event.task = current;
748 +               espec.pc = caller;
749 +               event.event_type = 4; 
750 +               (*rec_event)(&event, cachep->buffer_size);
751 +       }
752 +#endif
753 +
754         vx_slab_free(cachep);
755  
756         kmemcheck_slab_free(cachep, objp, obj_size(cachep));
757 @@ -3720,10 +3759,17 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
758  EXPORT_SYMBOL(__kmalloc_track_caller);
759  
760  #else
761 +#ifdef CONFIG_CHOPSTIX
762 +void *__kmalloc(size_t size, gfp_t flags)
763 +{
764 +       return __do_kmalloc(size, flags, __builtin_return_address(0));
765 +}
766 +#else
767  void *__kmalloc(size_t size, gfp_t flags)
768  {
769         return __do_kmalloc(size, flags, NULL);
770  }
771 +#endif
772  EXPORT_SYMBOL(__kmalloc);
773  #endif
774  
775 @@ -3743,7 +3789,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
776         debug_check_no_locks_freed(objp, obj_size(cachep));
777         if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
778                 debug_check_no_obj_freed(objp, obj_size(cachep));
779 -       __cache_free(cachep, objp);
780 +       __cache_free(cachep, objp,__builtin_return_address(0));
781         local_irq_restore(flags);
782  
783         trace_kmem_cache_free(_RET_IP_, objp);
784 @@ -3773,7 +3819,7 @@ void kfree(const void *objp)
785         c = virt_to_cache(objp);
786         debug_check_no_locks_freed(objp, obj_size(c));
787         debug_check_no_obj_freed(objp, obj_size(c));
788 -       __cache_free(c, (void *)objp);
789 +       __cache_free(c, (void *)objp,__builtin_return_address(0));
790         local_irq_restore(flags);
791  }
792  EXPORT_SYMBOL(kfree);