1 Index: linux-2.6.27.y/arch/Kconfig
2 ===================================================================
3 --- linux-2.6.27.y.orig/arch/Kconfig
4 +++ linux-2.6.27.y/arch/Kconfig
5 @@ -13,9 +13,18 @@ config OPROFILE
10 + bool "Chopstix (PlanetLab)"
11 + depends on MODULES && OPROFILE
13 + Chopstix allows you to monitor various events by summarizing them
14 + in lossy data structures and transferring these data structures
15 + into user space. If in doubt, say "N".
23 depends on KALLSYMS && MODULES
24 Index: linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c
25 ===================================================================
26 --- linux-2.6.27.y.orig/arch/x86/kernel/asm-offsets_32.c
27 +++ linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c
29 #include <linux/signal.h>
30 #include <linux/personality.h>
31 #include <linux/suspend.h>
32 +#include <linux/arrays.h>
33 #include <linux/kbuild.h>
34 #include <asm/ucontext.h>
37 #include <linux/lguest.h>
38 #include "../../../drivers/lguest/lg.h"
41 +#define STACKOFFSET(sym, str, mem) \
42 + DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
44 /* workaround for a warning with -Wmissing-prototypes */
49 + unsigned long dcookie;
51 + unsigned int number;
56 OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
57 @@ -50,6 +62,16 @@ void foo(void)
58 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
61 + STACKOFFSET(TASK_thread, task_struct, thread);
62 + STACKOFFSET(THREAD_esp, thread_struct, sp);
63 + STACKOFFSET(EVENT_event_data, event, event_data);
64 + STACKOFFSET(EVENT_task, event, task);
65 + STACKOFFSET(EVENT_event_type, event, event_type);
66 + STACKOFFSET(SPEC_number, event_spec, number);
67 + DEFINE(EVENT_SIZE, sizeof(struct event));
68 + DEFINE(SPEC_SIZE, sizeof(struct event_spec));
69 + DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
71 OFFSET(TI_task, thread_info, task);
72 OFFSET(TI_exec_domain, thread_info, exec_domain);
73 OFFSET(TI_flags, thread_info, flags);
74 Index: linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c.rej
75 ===================================================================
77 +++ linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c.rej
83 + STACKOFFSET(TASK_thread, task_struct, thread);
84 +- STACKOFFSET(THREAD_esp, thread_struct, esp);
85 + STACKOFFSET(EVENT_event_data, event, event_data);
86 + STACKOFFSET(EVENT_task, event, task);
87 + STACKOFFSET(EVENT_event_type, event, event_type);
91 + STACKOFFSET(TASK_thread, task_struct, thread);
92 ++ STACKOFFSET(THREAD_esp, thread_struct, sp);
93 + STACKOFFSET(EVENT_event_data, event, event_data);
94 + STACKOFFSET(EVENT_task, event, task);
95 + STACKOFFSET(EVENT_event_type, event, event_type);
96 Index: linux-2.6.27.y/arch/x86/kernel/entry_32.S
97 ===================================================================
98 --- linux-2.6.27.y.orig/arch/x86/kernel/entry_32.S
99 +++ linux-2.6.27.y/arch/x86/kernel/entry_32.S
100 @@ -426,6 +426,33 @@ ENTRY(system_call)
101 cmpl $(nr_syscalls), %eax
104 + /* Move Chopstix syscall probe here */
105 + /* Save and clobber: eax, ecx, ebp */
110 + subl $SPEC_EVENT_SIZE, %esp
111 + movl rec_event, %ecx
114 + # struct event is first, just below %ebp
115 + movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
116 + leal -SPEC_EVENT_SIZE(%ebp), %eax
117 + movl %eax, EVENT_event_data(%ebp)
118 + movl $6, EVENT_event_type(%ebp)
119 + movl rec_event, %edx
121 + leal -EVENT_SIZE(%ebp), %eax
125 + addl $SPEC_EVENT_SIZE, %esp
131 call *sys_call_table(,%eax,4)
132 movl %eax,PT_EAX(%esp) # store the return value
134 Index: linux-2.6.27.y/arch/x86/mm/fault.c
135 ===================================================================
136 --- linux-2.6.27.y.orig/arch/x86/mm/fault.c
137 +++ linux-2.6.27.y/arch/x86/mm/fault.c
138 @@ -79,6 +79,15 @@ static inline int notify_page_fault(stru
143 +extern void (*rec_event)(void *,unsigned int);
146 + unsigned long dcookie;
148 + unsigned char reason;
153 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
154 Index: linux-2.6.27.y/drivers/oprofile/cpu_buffer.c
155 ===================================================================
156 --- linux-2.6.27.y.orig/drivers/oprofile/cpu_buffer.c
157 +++ linux-2.6.27.y/drivers/oprofile/cpu_buffer.c
159 #include <linux/oprofile.h>
160 #include <linux/vmalloc.h>
161 #include <linux/errno.h>
162 +#include <linux/arrays.h>
164 #include "event_buffer.h"
165 #include "cpu_buffer.h"
166 @@ -147,6 +148,17 @@ static void increment_head(struct oprofi
170 +#ifdef CONFIG_CHOPSTIX
174 + unsigned long dcookie;
178 +extern void (*rec_event)(void *,unsigned int);
182 add_sample(struct oprofile_cpu_buffer * cpu_buf,
183 unsigned long pc, unsigned long event)
184 @@ -155,6 +167,7 @@ add_sample(struct oprofile_cpu_buffer *
186 entry->event = event;
187 increment_head(cpu_buf);
192 @@ -250,8 +263,28 @@ void oprofile_add_sample(struct pt_regs
194 int is_kernel = !user_mode(regs);
195 unsigned long pc = profile_pc(regs);
198 +#ifdef CONFIG_CHOPSTIX
201 + struct event_spec espec;
202 + esig.task = current;
205 + esig.event_data=&espec;
206 + esig.event_type=event; /* index in the event array currently set up */
207 + /* make sure the counters are loaded in the order we want them to show up*/
208 + (*rec_event)(&esig, 1);
211 oprofile_add_ext_sample(pc, regs, event, is_kernel);
214 + oprofile_add_ext_sample(pc, regs, event, is_kernel);
220 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
221 Index: linux-2.6.27.y/fs/bio.c
222 ===================================================================
223 --- linux-2.6.27.y.orig/fs/bio.c
224 +++ linux-2.6.27.y/fs/bio.c
226 #include <linux/workqueue.h>
227 #include <linux/blktrace_api.h>
228 #include <scsi/sg.h> /* for struct sg_iovec */
229 +#include <linux/arrays.h>
231 static struct kmem_cache *bio_slab __read_mostly;
233 @@ -44,6 +45,7 @@ static struct biovec_slab bvec_slabs[BIO
239 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
240 * IO code that does not need private memory pools.
241 @@ -1171,6 +1173,14 @@ void bio_check_pages_dirty(struct bio *b
247 + unsigned long dcookie;
249 + unsigned char reason;
252 +extern void (*rec_event)(void *,unsigned int);
254 * bio_endio - end I/O on a bio
256 @@ -1192,6 +1202,24 @@ void bio_endio(struct bio *bio, int erro
257 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
262 + struct event event;
263 + struct event_spec espec;
266 + espec.reason = 1;/*response */
268 + eip = bio->bi_end_io;
269 + event.event_data=&espec;
271 + event.event_type=3;
272 + /* index in the event array currently set up */
273 + /* make sure the counters are loaded in the order we want them to show up*/
274 + (*rec_event)(&event, bytes_done);
279 bio->bi_end_io(bio, error);
281 Index: linux-2.6.27.y/fs/exec.c
282 ===================================================================
283 --- linux-2.6.27.y.orig/fs/exec.c
284 +++ linux-2.6.27.y/fs/exec.c
286 #include <linux/fdtable.h>
287 #include <linux/mm.h>
288 #include <linux/stat.h>
289 +#include <linux/dcookies.h>
290 #include <linux/fcntl.h>
291 #include <linux/smp_lock.h>
292 #include <linux/swap.h>
293 @@ -698,6 +699,13 @@ struct file *open_exec(const char *name)
297 + #ifdef CONFIG_CHOPSTIX
298 + unsigned long cookie;
299 + extern void (*rec_event)(void *, unsigned int);
300 + if (rec_event && !nd.dentry->d_cookie)
301 + get_dcookie(nd.dentry, nd.mnt, &cookie);
307 Index: linux-2.6.27.y/fs/exec.c.rej
308 ===================================================================
310 +++ linux-2.6.27.y/fs/exec.c.rej
314 + #include <linux/personality.h>
315 + #include <linux/binfmts.h>
316 + #include <linux/utsname.h>
317 +- /*#include <linux/pid_namespace.h>*/
318 + #include <linux/module.h>
319 + #include <linux/namei.h>
320 + #include <linux/proc_fs.h>
322 + #include <linux/personality.h>
323 + #include <linux/binfmts.h>
324 + #include <linux/utsname.h>
325 ++ #include <linux/pid_namespace.h>
326 + #include <linux/module.h>
327 + #include <linux/namei.h>
328 + #include <linux/proc_fs.h>
331 + #ifdef CONFIG_CHOPSTIX
332 + unsigned long cookie;
333 + extern void (*rec_event)(void *, unsigned int);
334 +- if (rec_event && !nd.dentry->d_cookie)
335 +- get_dcookie(nd.dentry, nd.mnt, &cookie);
340 + #ifdef CONFIG_CHOPSTIX
341 + unsigned long cookie;
342 + extern void (*rec_event)(void *, unsigned int);
343 ++ if (rec_event && !nd.path.dentry->d_cookie)
344 ++ get_dcookie(&nd.path, &cookie);
348 Index: linux-2.6.27.y/include/linux/arrays.h
349 ===================================================================
351 +++ linux-2.6.27.y/include/linux/arrays.h
353 +#ifndef __ARRAYS_H__
354 +#define __ARRAYS_H__
355 +#include <linux/list.h>
357 +#define SAMPLING_METHOD_DEFAULT 0
358 +#define SAMPLING_METHOD_LOG 1
360 +/* Every probe has an array handler */
362 +/* XXX - Optimize this structure */
364 +extern void (*rec_event)(void *,unsigned int);
365 +struct array_handler {
366 + struct list_head link;
367 + unsigned int (*hash_func)(void *);
368 + unsigned int (*sampling_func)(void *,int,void *);
369 + unsigned short size;
370 + unsigned int threshold;
371 + unsigned char **expcount;
372 + unsigned int sampling_method;
373 + unsigned int **arrays;
374 + unsigned int arraysize;
375 + unsigned int num_samples[2];
376 + void **epoch_samples; /* size-sized lists of samples */
377 + unsigned int (*serialize)(void *, void *);
378 + unsigned char code[5];
382 + struct list_head link;
384 + unsigned int count;
385 + unsigned int event_type;
386 + struct task_struct *task;
389 Index: linux-2.6.27.y/include/linux/sched.h.rej
390 ===================================================================
392 +++ linux-2.6.27.y/include/linux/sched.h.rej
397 + unsigned long sleep_avg;
398 + unsigned long long timestamp, last_ran;
399 + unsigned long long sched_time; /* sched_clock time spent running */
400 + enum sleep_type sleep_type;
404 + unsigned long sleep_avg;
405 + unsigned long long timestamp, last_ran;
406 ++ #ifdef CONFIG_CHOPSTIX
407 ++ unsigned long last_interrupted, last_ran_j;
410 + unsigned long long sched_time; /* sched_clock time spent running */
411 + enum sleep_type sleep_type;
413 Index: linux-2.6.27.y/kernel/sched.c
414 ===================================================================
415 --- linux-2.6.27.y.orig/kernel/sched.c
416 +++ linux-2.6.27.y/kernel/sched.c
418 * 1998-11-19 Implemented schedule_timeout() and related stuff
419 * by Andrea Arcangeli
420 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
421 - * hybrid priority-list and round-robin design with
422 + * hybrid priority-list and round-robin deventn with
423 * an array-switch method of distributing timeslices
424 * and per-CPU runqueues. Cleanups and useful suggestions
425 * by Davide Libenzi, preemptible kernel bits by Robert Love.
427 #include <linux/ftrace.h>
428 #include <linux/vs_sched.h>
429 #include <linux/vs_cvirt.h>
430 +#include <linux/arrays.h>
433 #include <asm/irq_regs.h>
435 #include "sched_cpupri.h"
437 +#define INTERRUPTIBLE -1
441 * Convert user-nice values [ -20 ... 0 ... 19 ]
442 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
443 @@ -4436,6 +4440,29 @@ pick_next_task(struct rq *rq, struct tas
447 +void (*rec_event)(void *,unsigned int) = NULL;
448 +EXPORT_SYMBOL(rec_event);
449 +#ifdef CONFIG_CHOPSTIX
453 + unsigned long dcookie;
454 + unsigned int count;
455 + unsigned int reason;
458 +/* To support safe calling from asm */
459 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
460 + struct pt_regs *regs;
461 + struct event_spec *es = event_signature_in->event_data;
462 + regs = task_pt_regs(current);
463 + event_signature_in->task=current;
465 + event_signature_in->count=1;
466 + (*rec_event)(event_signature_in, count);
471 * schedule() is the main scheduler function.
473 @@ -5382,6 +5409,7 @@ long sched_setaffinity(pid_t pid, const
475 read_unlock(&tasklist_lock);
479 if ((current->euid != p->euid) && (current->euid != p->uid) &&
480 !capable(CAP_SYS_NICE))
481 Index: linux-2.6.27.y/kernel/sched.c.rej
482 ===================================================================
484 +++ linux-2.6.27.y/kernel/sched.c.rej
488 + #include <linux/nmi.h>
489 + #include <linux/init.h>
490 + #include <asm/uaccess.h>
491 + #include <linux/highmem.h>
492 + #include <linux/smp_lock.h>
493 + #include <asm/mmu_context.h>
495 + #include <linux/nmi.h>
496 + #include <linux/init.h>
497 + #include <asm/uaccess.h>
498 ++ #include <linux/arrays.h>
499 + #include <linux/highmem.h>
500 + #include <linux/smp_lock.h>
501 + #include <asm/mmu_context.h>
507 + spin_lock(&rq->lock);
508 + if (unlikely(rq != task_rq(p))) {
509 + spin_unlock(&rq->lock);
515 + spin_lock(&rq->lock);
516 + if (unlikely(rq != task_rq(p))) {
517 + spin_unlock(&rq->lock);
520 + * event cannot wake it up and insert it on the runqueue either.
522 + p->state = TASK_RUNNING;
525 + * Make sure we do not leak PI boosting priority to the child:
527 + * event cannot wake it up and insert it on the runqueue either.
529 + p->state = TASK_RUNNING;
530 ++ #ifdef CONFIG_CHOPSTIX
531 ++ /* The jiffy of last interruption */
532 ++ if (p->state & TASK_UNINTERRUPTIBLE) {
533 ++ p->last_interrupted=jiffies;
536 ++ if (p->state & TASK_INTERRUPTIBLE) {
537 ++ p->last_interrupted=INTERRUPTIBLE;
540 ++ p->last_interrupted=RUNNING;
542 ++ /* The jiffy of last execution */
543 ++ p->last_ran_j=jiffies;
547 + * Make sure we do not leak PI boosting priority to the child:
553 + static inline int interactive_sleep(enum sleep_type sleep_type)
555 + return (sleep_type == SLEEP_INTERACTIVE ||
561 + static inline int interactive_sleep(enum sleep_type sleep_type)
563 + return (sleep_type == SLEEP_INTERACTIVE ||
567 + * schedule() is the main scheduler function.
569 + asmlinkage void __sched schedule(void)
571 + struct task_struct *prev, *next;
572 + struct prio_array *array;
573 + struct list_head *queue;
574 + unsigned long long now;
575 +- unsigned long run_time;
576 + int cpu, idx, new_prio;
577 + long *switch_count;
581 + * Test if we are atomic. Since do_exit() needs to call into
584 + * schedule() is the main scheduler function.
587 ++ #ifdef CONFIG_CHOPSTIX
588 ++ extern void (*rec_event)(void *,unsigned int);
589 ++ struct event_spec {
591 ++ unsigned long dcookie;
592 ++ unsigned int count;
593 ++ unsigned int reason;
597 + asmlinkage void __sched schedule(void)
599 + struct task_struct *prev, *next;
600 + struct prio_array *array;
601 + struct list_head *queue;
602 + unsigned long long now;
603 ++ unsigned long run_time, diff;
604 + int cpu, idx, new_prio;
605 + long *switch_count;
607 ++ int sampling_reason;
610 + * Test if we are atomic. Since do_exit() needs to call into
613 + switch_count = &prev->nivcsw;
614 + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
615 + switch_count = &prev->nvcsw;
616 + if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
617 + unlikely(signal_pending(prev))))
618 + prev->state = TASK_RUNNING;
620 + switch_count = &prev->nivcsw;
621 + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
622 + switch_count = &prev->nvcsw;
624 + if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
625 + unlikely(signal_pending(prev))))
626 + prev->state = TASK_RUNNING;
629 + vx_uninterruptible_inc(prev);
631 + deactivate_task(prev, rq);
636 + vx_uninterruptible_inc(prev);
638 + deactivate_task(prev, rq);
639 ++ #ifdef CONFIG_CHOPSTIX
640 ++ /* An uninterruptible process just yielded. Record the current jiffie */
641 ++ if (prev->state & TASK_UNINTERRUPTIBLE) {
642 ++ prev->last_interrupted=jiffies;
644 ++ /* An interruptible process just yielded, or it got preempted.
645 ++ * Mark it as interruptible */
646 ++ else if (prev->state & TASK_INTERRUPTIBLE) {
647 ++ prev->last_interrupted=INTERRUPTIBLE;
655 + prev->sleep_avg = 0;
656 + prev->timestamp = prev->last_ran = now;
658 + sched_info_switch(prev, next);
659 + if (likely(prev != next)) {
660 + next->timestamp = next->last_ran = now;
662 + prev->sleep_avg = 0;
663 + prev->timestamp = prev->last_ran = now;
665 ++ #ifdef CONFIG_CHOPSTIX
666 ++ /* Run only if the Chopstix module so decrees it */
668 ++ prev->last_ran_j = jiffies;
669 ++ if (next->last_interrupted!=INTERRUPTIBLE) {
670 ++ if (next->last_interrupted!=RUNNING) {
671 ++ diff = (jiffies-next->last_interrupted);
672 ++ sampling_reason = 0;/* BLOCKING */
675 ++ diff = jiffies-next->last_ran_j;
676 ++ sampling_reason = 1;/* PREEMPTION */
679 ++ if (diff >= HZ/10) {
680 ++ struct event event;
681 ++ struct event_spec espec;
682 ++ struct pt_regs *regs;
683 ++ regs = task_pt_regs(current);
685 ++ espec.reason = sampling_reason;
686 ++ event.event_data=&espec;
688 ++ espec.pc=regs->eip;
689 ++ event.event_type=2;
690 ++ /* index in the event array currently set up */
691 ++ /* make sure the counters are loaded in the order we want them to show up*/
692 ++ (*rec_event)(&event, diff);
695 ++ /* next has been elected to run */
696 ++ next->last_interrupted=0;
699 + sched_info_switch(prev, next);
700 + if (likely(prev != next)) {
701 + next->timestamp = next->last_ran = now;
704 + jiffies_to_timespec(p->policy == SCHED_FIFO ?
705 + 0 : task_timeslice(p), &t);
706 + read_unlock(&tasklist_lock);
707 + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
711 + jiffies_to_timespec(p->policy == SCHED_FIFO ?
712 + 0 : task_timeslice(p), &t);
713 + read_unlock(&tasklist_lock);
715 + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
728 ++ #ifdef CONFIG_CHOPSTIX
729 ++ void (*rec_event)(void *,unsigned int) = NULL;
731 ++ /* To support safe calling from asm */
732 ++ asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
733 ++ struct pt_regs *regs;
734 ++ struct event_spec *es = event_signature_in->event_data;
735 ++ regs = task_pt_regs(current);
736 ++ event_signature_in->task=current;
738 ++ event_signature_in->count=1;
739 ++ (*rec_event)(event_signature_in, count);
741 ++ EXPORT_SYMBOL(rec_event);
742 ++ EXPORT_SYMBOL(in_sched_functions);
744 Index: linux-2.6.27.y/mm/memory.c
745 ===================================================================
746 --- linux-2.6.27.y.orig/mm/memory.c
747 +++ linux-2.6.27.y/mm/memory.c
750 #include <linux/swapops.h>
751 #include <linux/elf.h>
752 +#include <linux/arrays.h>
754 #include "internal.h"
756 @@ -2753,6 +2754,15 @@ out:
760 +extern void (*rec_event)(void *,unsigned int);
763 + unsigned long dcookie;
765 + unsigned char reason;
770 * By the time we get here, we already hold the mm semaphore
772 @@ -2782,6 +2792,24 @@ int handle_mm_fault(struct mm_struct *mm
776 +#ifdef CONFIG_CHOPSTIX
778 + struct event event;
779 + struct event_spec espec;
780 + struct pt_regs *regs;
782 + regs = task_pt_regs(current);
783 + pc = regs->ip & (unsigned int) ~4095;
785 + espec.reason = 0; /* alloc */
786 + event.event_data=&espec;
787 + event.task = current;
789 + event.event_type=5;
790 + (*rec_event)(&event, 1);
794 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
797 Index: linux-2.6.27.y/mm/slab.c
798 ===================================================================
799 --- linux-2.6.27.y.orig/mm/slab.c
800 +++ linux-2.6.27.y/mm/slab.c
802 #include <linux/fault-inject.h>
803 #include <linux/rtmutex.h>
804 #include <linux/reciprocal_div.h>
805 +#include <linux/arrays.h>
806 #include <linux/debugobjects.h>
808 #include <asm/cacheflush.h>
809 @@ -248,6 +249,14 @@ struct slab_rcu {
813 +extern void (*rec_event)(void *,unsigned int);
816 + unsigned long dcookie;
818 + unsigned char reason;
824 @@ -3469,6 +3478,19 @@ __cache_alloc(struct kmem_cache *cachep,
825 local_irq_restore(save_flags);
826 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
828 +#ifdef CONFIG_CHOPSTIX
829 + if (rec_event && objp) {
830 + struct event event;
831 + struct event_spec espec;
833 + espec.reason = 0; /* alloc */
834 + event.event_data=&espec;
835 + event.task = current;
837 + event.event_type=5;
838 + (*rec_event)(&event, cachep->buffer_size);
842 if (unlikely((flags & __GFP_ZERO) && objp))
843 memset(objp, 0, obj_size(cachep));
844 @@ -3578,12 +3600,26 @@ free_done:
845 * Release an obj back to its cache. If the obj has a constructed state, it must
846 * be in this state _before_ it is released. Called with disabled ints.
848 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
849 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
851 struct array_cache *ac = cpu_cache_get(cachep);
854 - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
855 + objp = cache_free_debugcheck(cachep, objp, caller);
856 + #ifdef CONFIG_CHOPSTIX
857 + if (rec_event && objp) {
858 + struct event event;
859 + struct event_spec espec;
861 + espec.reason = 1; /* free */
862 + event.event_data=&espec;
863 + event.task = current;
865 + event.event_type=4;
866 + (*rec_event)(&event, cachep->buffer_size);
870 vx_slab_free(cachep);
873 @@ -3714,6 +3750,7 @@ static __always_inline void *__do_kmallo
876 struct kmem_cache *cachep;
879 /* If you want to save a few bytes .text space: replace
881 @@ -3741,10 +3778,17 @@ void *__kmalloc_track_caller(size_t size
882 EXPORT_SYMBOL(__kmalloc_track_caller);
885 +#ifdef CONFIG_CHOPSTIX
886 +void *__kmalloc(size_t size, gfp_t flags)
888 + return __do_kmalloc(size, flags, __builtin_return_address(0));
891 void *__kmalloc(size_t size, gfp_t flags)
893 return __do_kmalloc(size, flags, NULL);
896 EXPORT_SYMBOL(__kmalloc);
899 @@ -3764,7 +3808,7 @@ void kmem_cache_free(struct kmem_cache *
900 debug_check_no_locks_freed(objp, obj_size(cachep));
901 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
902 debug_check_no_obj_freed(objp, obj_size(cachep));
903 - __cache_free(cachep, objp);
904 + __cache_free(cachep, objp,__builtin_return_address(0));
905 local_irq_restore(flags);
907 EXPORT_SYMBOL(kmem_cache_free);
908 @@ -3790,7 +3834,7 @@ void kfree(const void *objp)
909 c = virt_to_cache(objp);
910 debug_check_no_locks_freed(objp, obj_size(c));
911 debug_check_no_obj_freed(objp, obj_size(c));
912 - __cache_free(c, (void *)objp);
913 + __cache_free(c, (void *)objp,__builtin_return_address(0));
914 local_irq_restore(flags);
916 EXPORT_SYMBOL(kfree);