1 diff -Nurb linux-2.6.27-590/arch/Kconfig linux-2.6.27-591/arch/Kconfig
2 --- linux-2.6.27-590/arch/Kconfig 2010-01-29 16:29:46.000000000 -0500
3 +++ linux-2.6.27-591/arch/Kconfig 2010-01-29 16:30:22.000000000 -0500
9 + bool "Chopstix (PlanetLab)"
10 + depends on MODULES && OPROFILE
12 + Chopstix allows you to monitor various events by summarizing them
13 + in lossy data structures and transferring these data structures
14 + into user space. If in doubt, say "N".
22 depends on KALLSYMS && MODULES
23 diff -Nurb linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c
24 --- linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c 2008-10-09 18:13:53.000000000 -0400
25 +++ linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c 2010-01-29 16:45:48.000000000 -0500
27 #include <linux/signal.h>
28 #include <linux/personality.h>
29 #include <linux/suspend.h>
30 +#include <linux/arrays.h>
31 #include <linux/kbuild.h>
32 #include <asm/ucontext.h>
35 #include <linux/lguest.h>
36 #include "../../../drivers/lguest/lg.h"
39 +#define STACKOFFSET(sym, str, mem) \
40 + DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
42 /* workaround for a warning with -Wmissing-prototypes */
47 + unsigned long dcookie;
49 + unsigned int number;
54 OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
56 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
59 + STACKOFFSET(TASK_thread, task_struct, thread);
60 + STACKOFFSET(THREAD_esp, thread_struct, sp);
61 + STACKOFFSET(EVENT_event_data, event, event_data);
62 + STACKOFFSET(EVENT_task, event, task);
63 + STACKOFFSET(EVENT_event_type, event, event_type);
64 + STACKOFFSET(SPEC_number, event_spec, number);
65 + DEFINE(EVENT_SIZE, sizeof(struct event));
66 + DEFINE(SPEC_SIZE, sizeof(struct event_spec));
67 + DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
69 OFFSET(TI_task, thread_info, task);
70 OFFSET(TI_exec_domain, thread_info, exec_domain);
71 OFFSET(TI_flags, thread_info, flags);
72 diff -Nurb linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c.rej linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c.rej
73 --- linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c.rej 1969-12-31 19:00:00.000000000 -0500
74 +++ linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c.rej 2010-01-31 22:21:08.000000000 -0500
80 + STACKOFFSET(TASK_thread, task_struct, thread);
81 +- STACKOFFSET(THREAD_esp, thread_struct, esp);
82 + STACKOFFSET(EVENT_event_data, event, event_data);
83 + STACKOFFSET(EVENT_task, event, task);
84 + STACKOFFSET(EVENT_event_type, event, event_type);
88 + STACKOFFSET(TASK_thread, task_struct, thread);
89 ++ STACKOFFSET(THREAD_esp, thread_struct, sp);
90 + STACKOFFSET(EVENT_event_data, event, event_data);
91 + STACKOFFSET(EVENT_task, event, task);
92 + STACKOFFSET(EVENT_event_type, event, event_type);
93 diff -Nurb linux-2.6.27-590/arch/x86/kernel/entry_32.S linux-2.6.27-591/arch/x86/kernel/entry_32.S
94 --- linux-2.6.27-590/arch/x86/kernel/entry_32.S 2008-10-09 18:13:53.000000000 -0400
95 +++ linux-2.6.27-591/arch/x86/kernel/entry_32.S 2010-01-29 16:30:22.000000000 -0500
97 cmpl $(nr_syscalls), %eax
100 + /* Move Chopstix syscall probe here */
101 + /* Save and clobber: eax, ecx, ebp */
106 + subl $SPEC_EVENT_SIZE, %esp
107 + movl rec_event, %ecx
110 + # struct event is first, just below %ebp
111 + movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
112 + leal -SPEC_EVENT_SIZE(%ebp), %eax
113 + movl %eax, EVENT_event_data(%ebp)
114 + movl $6, EVENT_event_type(%ebp)
115 + movl rec_event, %edx
117 + leal -EVENT_SIZE(%ebp), %eax
121 + addl $SPEC_EVENT_SIZE, %esp
127 call *sys_call_table(,%eax,4)
128 movl %eax,PT_EAX(%esp) # store the return value
130 diff -Nurb linux-2.6.27-590/arch/x86/mm/fault.c linux-2.6.27-591/arch/x86/mm/fault.c
131 --- linux-2.6.27-590/arch/x86/mm/fault.c 2010-01-29 16:29:46.000000000 -0500
132 +++ linux-2.6.27-591/arch/x86/mm/fault.c 2010-01-29 16:30:22.000000000 -0500
138 +extern void (*rec_event)(void *,unsigned int);
141 + unsigned long dcookie;
143 + unsigned char reason;
148 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
149 diff -Nurb linux-2.6.27-590/drivers/oprofile/cpu_buffer.c linux-2.6.27-591/drivers/oprofile/cpu_buffer.c
150 --- linux-2.6.27-590/drivers/oprofile/cpu_buffer.c 2008-10-09 18:13:53.000000000 -0400
151 +++ linux-2.6.27-591/drivers/oprofile/cpu_buffer.c 2010-01-29 16:30:22.000000000 -0500
153 #include <linux/oprofile.h>
154 #include <linux/vmalloc.h>
155 #include <linux/errno.h>
156 +#include <linux/arrays.h>
158 #include "event_buffer.h"
159 #include "cpu_buffer.h"
164 +#ifdef CONFIG_CHOPSTIX
168 + unsigned long dcookie;
172 +extern void (*rec_event)(void *,unsigned int);
176 add_sample(struct oprofile_cpu_buffer * cpu_buf,
177 unsigned long pc, unsigned long event)
180 entry->event = event;
181 increment_head(cpu_buf);
188 int is_kernel = !user_mode(regs);
189 unsigned long pc = profile_pc(regs);
192 +#ifdef CONFIG_CHOPSTIX
195 + struct event_spec espec;
196 + esig.task = current;
199 + esig.event_data=&espec;
200 + esig.event_type=event; /* index in the event array currently set up */
201 + /* make sure the counters are loaded in the order we want them to show up*/
202 + (*rec_event)(&esig, 1);
205 oprofile_add_ext_sample(pc, regs, event, is_kernel);
208 + oprofile_add_ext_sample(pc, regs, event, is_kernel);
214 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
215 diff -Nurb linux-2.6.27-590/fs/bio.c linux-2.6.27-591/fs/bio.c
216 --- linux-2.6.27-590/fs/bio.c 2008-10-09 18:13:53.000000000 -0400
217 +++ linux-2.6.27-591/fs/bio.c 2010-01-31 22:21:09.000000000 -0500
219 #include <linux/workqueue.h>
220 #include <linux/blktrace_api.h>
221 #include <scsi/sg.h> /* for struct sg_iovec */
222 +#include <linux/arrays.h>
224 static struct kmem_cache *bio_slab __read_mostly;
232 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
233 * IO code that does not need private memory pools.
234 @@ -1171,6 +1173,14 @@
240 + unsigned long dcookie;
242 + unsigned char reason;
245 +extern void (*rec_event)(void *,unsigned int);
247 * bio_endio - end I/O on a bio
249 @@ -1192,6 +1202,24 @@
250 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
255 + struct event event;
256 + struct event_spec espec;
259 + espec.reason = 1;/*response */
261 + eip = bio->bi_end_io;
262 + event.event_data=&espec;
264 + event.event_type=3;
265 + /* index in the event array currently set up */
266 + /* make sure the counters are loaded in the order we want them to show up*/
267 + (*rec_event)(&event, bytes_done);
272 bio->bi_end_io(bio, error);
274 diff -Nurb linux-2.6.27-590/fs/exec.c linux-2.6.27-591/fs/exec.c
275 --- linux-2.6.27-590/fs/exec.c 2010-01-29 16:29:48.000000000 -0500
276 +++ linux-2.6.27-591/fs/exec.c 2010-01-29 16:45:48.000000000 -0500
278 #include <linux/fdtable.h>
279 #include <linux/mm.h>
280 #include <linux/stat.h>
281 +#include <linux/dcookies.h>
282 #include <linux/fcntl.h>
283 #include <linux/smp_lock.h>
284 #include <linux/swap.h>
289 + #ifdef CONFIG_CHOPSTIX
290 + unsigned long cookie;
291 + extern void (*rec_event)(void *, unsigned int);
292 + if (rec_event && !nd.dentry->d_cookie)
293 + get_dcookie(nd.dentry, nd.mnt, &cookie);
299 diff -Nurb linux-2.6.27-590/fs/exec.c.rej linux-2.6.27-591/fs/exec.c.rej
300 --- linux-2.6.27-590/fs/exec.c.rej 1969-12-31 19:00:00.000000000 -0500
301 +++ linux-2.6.27-591/fs/exec.c.rej 2010-01-31 22:21:18.000000000 -0500
305 + #include <linux/personality.h>
306 + #include <linux/binfmts.h>
307 + #include <linux/utsname.h>
308 +- /*#include <linux/pid_namespace.h>*/
309 + #include <linux/module.h>
310 + #include <linux/namei.h>
311 + #include <linux/proc_fs.h>
313 + #include <linux/personality.h>
314 + #include <linux/binfmts.h>
315 + #include <linux/utsname.h>
316 ++ #include <linux/pid_namespace.h>
317 + #include <linux/module.h>
318 + #include <linux/namei.h>
319 + #include <linux/proc_fs.h>
322 + #ifdef CONFIG_CHOPSTIX
323 + unsigned long cookie;
324 + extern void (*rec_event)(void *, unsigned int);
325 +- if (rec_event && !nd.dentry->d_cookie)
326 +- get_dcookie(nd.dentry, nd.mnt, &cookie);
331 + #ifdef CONFIG_CHOPSTIX
332 + unsigned long cookie;
333 + extern void (*rec_event)(void *, unsigned int);
334 ++ if (rec_event && !nd.path.dentry->d_cookie)
335 ++ get_dcookie(&nd.path, &cookie);
339 diff -Nurb linux-2.6.27-590/include/linux/arrays.h linux-2.6.27-591/include/linux/arrays.h
340 --- linux-2.6.27-590/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
341 +++ linux-2.6.27-591/include/linux/arrays.h 2010-01-29 16:30:22.000000000 -0500
343 +#ifndef __ARRAYS_H__
344 +#define __ARRAYS_H__
345 +#include <linux/list.h>
347 +#define SAMPLING_METHOD_DEFAULT 0
348 +#define SAMPLING_METHOD_LOG 1
350 +/* Every probe has an array handler */
352 +/* XXX - Optimize this structure */
354 +extern void (*rec_event)(void *,unsigned int);
355 +struct array_handler {
356 + struct list_head link;
357 + unsigned int (*hash_func)(void *);
358 + unsigned int (*sampling_func)(void *,int,void *);
359 + unsigned short size;
360 + unsigned int threshold;
361 + unsigned char **expcount;
362 + unsigned int sampling_method;
363 + unsigned int **arrays;
364 + unsigned int arraysize;
365 + unsigned int num_samples[2];
366 + void **epoch_samples; /* size-sized lists of samples */
367 + unsigned int (*serialize)(void *, void *);
368 + unsigned char code[5];
372 + struct list_head link;
374 + unsigned int count;
375 + unsigned int event_type;
376 + struct task_struct *task;
379 diff -Nurb linux-2.6.27-590/include/linux/sched.h.rej linux-2.6.27-591/include/linux/sched.h.rej
380 --- linux-2.6.27-590/include/linux/sched.h.rej 1969-12-31 19:00:00.000000000 -0500
381 +++ linux-2.6.27-591/include/linux/sched.h.rej 2010-01-29 16:30:22.000000000 -0500
386 + unsigned long sleep_avg;
387 + unsigned long long timestamp, last_ran;
388 + unsigned long long sched_time; /* sched_clock time spent running */
389 + enum sleep_type sleep_type;
393 + unsigned long sleep_avg;
394 + unsigned long long timestamp, last_ran;
395 ++ #ifdef CONFIG_CHOPSTIX
396 ++ unsigned long last_interrupted, last_ran_j;
399 + unsigned long long sched_time; /* sched_clock time spent running */
400 + enum sleep_type sleep_type;
402 diff -Nurb linux-2.6.27-590/kernel/sched.c linux-2.6.27-591/kernel/sched.c
403 --- linux-2.6.27-590/kernel/sched.c 2010-01-29 16:29:48.000000000 -0500
404 +++ linux-2.6.27-591/kernel/sched.c 2010-01-31 22:21:08.000000000 -0500
406 * 1998-11-19 Implemented schedule_timeout() and related stuff
407 * by Andrea Arcangeli
408 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
409 - * hybrid priority-list and round-robin design with
410 + * hybrid priority-list and round-robin deventn with
411 * an array-switch method of distributing timeslices
412 * and per-CPU runqueues. Cleanups and useful suggestions
413 * by Davide Libenzi, preemptible kernel bits by Robert Love.
415 #include <linux/ftrace.h>
416 #include <linux/vs_sched.h>
417 #include <linux/vs_cvirt.h>
418 +#include <linux/arrays.h>
421 #include <asm/irq_regs.h>
423 #include "sched_cpupri.h"
425 +#define INTERRUPTIBLE -1
429 * Convert user-nice values [ -20 ... 0 ... 19 ]
430 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
431 @@ -4428,6 +4432,29 @@
435 +void (*rec_event)(void *,unsigned int) = NULL;
436 +EXPORT_SYMBOL(rec_event);
437 +#ifdef CONFIG_CHOPSTIX
441 + unsigned long dcookie;
442 + unsigned int count;
443 + unsigned int reason;
446 +/* To support safe calling from asm */
447 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
448 + struct pt_regs *regs;
449 + struct event_spec *es = event_signature_in->event_data;
450 + regs = task_pt_regs(current);
451 + event_signature_in->task=current;
453 + event_signature_in->count=1;
454 + (*rec_event)(event_signature_in, count);
459 * schedule() is the main scheduler function.
461 @@ -5369,6 +5396,7 @@
463 read_unlock(&tasklist_lock);
467 if ((current->euid != p->euid) && (current->euid != p->uid) &&
468 !capable(CAP_SYS_NICE))
469 @@ -9296,3 +9324,26 @@
470 .subsys_id = cpuacct_subsys_id,
472 #endif /* CONFIG_CGROUP_CPUACCT */
474 +#ifdef CONFIG_CHOPSTIX
475 +void (*rec_event)();
476 +EXPORT_SYMBOL(rec_event);
480 + unsigned long dcookie;
481 + unsigned int count;
482 + unsigned int reason;
485 +/* To support safe calling from asm */
486 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
487 + struct pt_regs *regs;
488 + struct event_spec *es = event_signature_in->event_data;
489 + regs = task_pt_regs(current);
490 + event_signature_in->task=current;
492 + event_signature_in->count=1;
493 + (*rec_event)(event_signature_in, count);
496 diff -Nurb linux-2.6.27-590/kernel/sched.c.rej linux-2.6.27-591/kernel/sched.c.rej
497 --- linux-2.6.27-590/kernel/sched.c.rej 1969-12-31 19:00:00.000000000 -0500
498 +++ linux-2.6.27-591/kernel/sched.c.rej 2010-01-29 16:30:22.000000000 -0500
502 + #include <linux/nmi.h>
503 + #include <linux/init.h>
504 + #include <asm/uaccess.h>
505 + #include <linux/highmem.h>
506 + #include <linux/smp_lock.h>
507 + #include <asm/mmu_context.h>
509 + #include <linux/nmi.h>
510 + #include <linux/init.h>
511 + #include <asm/uaccess.h>
512 ++ #include <linux/arrays.h>
513 + #include <linux/highmem.h>
514 + #include <linux/smp_lock.h>
515 + #include <asm/mmu_context.h>
521 + spin_lock(&rq->lock);
522 + if (unlikely(rq != task_rq(p))) {
523 + spin_unlock(&rq->lock);
529 + spin_lock(&rq->lock);
530 + if (unlikely(rq != task_rq(p))) {
531 + spin_unlock(&rq->lock);
534 + * event cannot wake it up and insert it on the runqueue either.
536 + p->state = TASK_RUNNING;
539 + * Make sure we do not leak PI boosting priority to the child:
541 + * event cannot wake it up and insert it on the runqueue either.
543 + p->state = TASK_RUNNING;
544 ++ #ifdef CONFIG_CHOPSTIX
545 ++ /* The jiffy of last interruption */
546 ++ if (p->state & TASK_UNINTERRUPTIBLE) {
547 ++ p->last_interrupted=jiffies;
550 ++ if (p->state & TASK_INTERRUPTIBLE) {
551 ++ p->last_interrupted=INTERRUPTIBLE;
554 ++ p->last_interrupted=RUNNING;
556 ++ /* The jiffy of last execution */
557 ++ p->last_ran_j=jiffies;
561 + * Make sure we do not leak PI boosting priority to the child:
567 + static inline int interactive_sleep(enum sleep_type sleep_type)
569 + return (sleep_type == SLEEP_INTERACTIVE ||
575 + static inline int interactive_sleep(enum sleep_type sleep_type)
577 + return (sleep_type == SLEEP_INTERACTIVE ||
581 + * schedule() is the main scheduler function.
583 + asmlinkage void __sched schedule(void)
585 + struct task_struct *prev, *next;
586 + struct prio_array *array;
587 + struct list_head *queue;
588 + unsigned long long now;
589 +- unsigned long run_time;
590 + int cpu, idx, new_prio;
591 + long *switch_count;
595 + * Test if we are atomic. Since do_exit() needs to call into
598 + * schedule() is the main scheduler function.
601 ++ #ifdef CONFIG_CHOPSTIX
602 ++ extern void (*rec_event)(void *,unsigned int);
603 ++ struct event_spec {
605 ++ unsigned long dcookie;
606 ++ unsigned int count;
607 ++ unsigned int reason;
611 + asmlinkage void __sched schedule(void)
613 + struct task_struct *prev, *next;
614 + struct prio_array *array;
615 + struct list_head *queue;
616 + unsigned long long now;
617 ++ unsigned long run_time, diff;
618 + int cpu, idx, new_prio;
619 + long *switch_count;
621 ++ int sampling_reason;
624 + * Test if we are atomic. Since do_exit() needs to call into
627 + switch_count = &prev->nivcsw;
628 + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
629 + switch_count = &prev->nvcsw;
630 + if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
631 + unlikely(signal_pending(prev))))
632 + prev->state = TASK_RUNNING;
634 + switch_count = &prev->nivcsw;
635 + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
636 + switch_count = &prev->nvcsw;
638 + if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
639 + unlikely(signal_pending(prev))))
640 + prev->state = TASK_RUNNING;
643 + vx_uninterruptible_inc(prev);
645 + deactivate_task(prev, rq);
650 + vx_uninterruptible_inc(prev);
652 + deactivate_task(prev, rq);
653 ++ #ifdef CONFIG_CHOPSTIX
654 ++ /* An uninterruptible process just yielded. Record the current jiffie */
655 ++ if (prev->state & TASK_UNINTERRUPTIBLE) {
656 ++ prev->last_interrupted=jiffies;
658 ++ /* An interruptible process just yielded, or it got preempted.
659 ++ * Mark it as interruptible */
660 ++ else if (prev->state & TASK_INTERRUPTIBLE) {
661 ++ prev->last_interrupted=INTERRUPTIBLE;
669 + prev->sleep_avg = 0;
670 + prev->timestamp = prev->last_ran = now;
672 + sched_info_switch(prev, next);
673 + if (likely(prev != next)) {
674 + next->timestamp = next->last_ran = now;
676 + prev->sleep_avg = 0;
677 + prev->timestamp = prev->last_ran = now;
679 ++ #ifdef CONFIG_CHOPSTIX
680 ++ /* Run only if the Chopstix module so decrees it */
682 ++ prev->last_ran_j = jiffies;
683 ++ if (next->last_interrupted!=INTERRUPTIBLE) {
684 ++ if (next->last_interrupted!=RUNNING) {
685 ++ diff = (jiffies-next->last_interrupted);
686 ++ sampling_reason = 0;/* BLOCKING */
689 ++ diff = jiffies-next->last_ran_j;
690 ++ sampling_reason = 1;/* PREEMPTION */
693 ++ if (diff >= HZ/10) {
694 ++ struct event event;
695 ++ struct event_spec espec;
696 ++ struct pt_regs *regs;
697 ++ regs = task_pt_regs(current);
699 ++ espec.reason = sampling_reason;
700 ++ event.event_data=&espec;
702 ++ espec.pc=regs->eip;
703 ++ event.event_type=2;
704 ++ /* index in the event array currently set up */
705 ++ /* make sure the counters are loaded in the order we want them to show up*/
706 ++ (*rec_event)(&event, diff);
709 ++ /* next has been elected to run */
710 ++ next->last_interrupted=0;
713 + sched_info_switch(prev, next);
714 + if (likely(prev != next)) {
715 + next->timestamp = next->last_ran = now;
718 + jiffies_to_timespec(p->policy == SCHED_FIFO ?
719 + 0 : task_timeslice(p), &t);
720 + read_unlock(&tasklist_lock);
721 + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
725 + jiffies_to_timespec(p->policy == SCHED_FIFO ?
726 + 0 : task_timeslice(p), &t);
727 + read_unlock(&tasklist_lock);
729 + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
742 ++ #ifdef CONFIG_CHOPSTIX
743 ++ void (*rec_event)(void *,unsigned int) = NULL;
745 ++ /* To support safe calling from asm */
746 ++ asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
747 ++ struct pt_regs *regs;
748 ++ struct event_spec *es = event_signature_in->event_data;
749 ++ regs = task_pt_regs(current);
750 ++ event_signature_in->task=current;
752 ++ event_signature_in->count=1;
753 ++ (*rec_event)(event_signature_in, count);
755 ++ EXPORT_SYMBOL(rec_event);
756 ++ EXPORT_SYMBOL(in_sched_functions);
758 diff -Nurb linux-2.6.27-590/mm/memory.c linux-2.6.27-591/mm/memory.c
759 --- linux-2.6.27-590/mm/memory.c 2010-01-29 16:29:48.000000000 -0500
760 +++ linux-2.6.27-591/mm/memory.c 2010-01-31 22:21:18.000000000 -0500
763 #include <linux/swapops.h>
764 #include <linux/elf.h>
765 +#include <linux/arrays.h>
767 #include "internal.h"
769 @@ -2690,6 +2691,15 @@
773 +extern void (*rec_event)(void *,unsigned int);
776 + unsigned long dcookie;
778 + unsigned char reason;
783 * By the time we get here, we already hold the mm semaphore
785 @@ -2719,6 +2729,24 @@
789 +#ifdef CONFIG_CHOPSTIX
791 + struct event event;
792 + struct event_spec espec;
793 + struct pt_regs *regs;
795 + regs = task_pt_regs(current);
796 + pc = regs->ip & (unsigned int) ~4095;
798 + espec.reason = 0; /* alloc */
799 + event.event_data=&espec;
800 + event.task = current;
802 + event.event_type=5;
803 + (*rec_event)(&event, 1);
807 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
810 diff -Nurb linux-2.6.27-590/mm/slab.c linux-2.6.27-591/mm/slab.c
811 --- linux-2.6.27-590/mm/slab.c 2010-01-29 16:29:48.000000000 -0500
812 +++ linux-2.6.27-591/mm/slab.c 2010-01-29 16:30:22.000000000 -0500
814 #include <linux/fault-inject.h>
815 #include <linux/rtmutex.h>
816 #include <linux/reciprocal_div.h>
817 +#include <linux/arrays.h>
818 #include <linux/debugobjects.h>
820 #include <asm/cacheflush.h>
825 +extern void (*rec_event)(void *,unsigned int);
828 + unsigned long dcookie;
830 + unsigned char reason;
836 @@ -3469,6 +3478,19 @@
837 local_irq_restore(save_flags);
838 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
840 +#ifdef CONFIG_CHOPSTIX
841 + if (rec_event && objp) {
842 + struct event event;
843 + struct event_spec espec;
845 + espec.reason = 0; /* alloc */
846 + event.event_data=&espec;
847 + event.task = current;
849 + event.event_type=5;
850 + (*rec_event)(&event, cachep->buffer_size);
854 if (unlikely((flags & __GFP_ZERO) && objp))
855 memset(objp, 0, obj_size(cachep));
856 @@ -3578,12 +3600,26 @@
857 * Release an obj back to its cache. If the obj has a constructed state, it must
858 * be in this state _before_ it is released. Called with disabled ints.
860 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
861 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
863 struct array_cache *ac = cpu_cache_get(cachep);
866 - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
867 + objp = cache_free_debugcheck(cachep, objp, caller);
868 + #ifdef CONFIG_CHOPSTIX
869 + if (rec_event && objp) {
870 + struct event event;
871 + struct event_spec espec;
873 + espec.reason = 1; /* free */
874 + event.event_data=&espec;
875 + event.task = current;
877 + event.event_type=4;
878 + (*rec_event)(&event, cachep->buffer_size);
882 vx_slab_free(cachep);
885 @@ -3714,6 +3750,7 @@
888 struct kmem_cache *cachep;
891 /* If you want to save a few bytes .text space: replace
893 @@ -3741,10 +3778,17 @@
894 EXPORT_SYMBOL(__kmalloc_track_caller);
897 +#ifdef CONFIG_CHOPSTIX
898 +void *__kmalloc(size_t size, gfp_t flags)
900 + return __do_kmalloc(size, flags, __builtin_return_address(0));
903 void *__kmalloc(size_t size, gfp_t flags)
905 return __do_kmalloc(size, flags, NULL);
908 EXPORT_SYMBOL(__kmalloc);
911 @@ -3764,7 +3808,7 @@
912 debug_check_no_locks_freed(objp, obj_size(cachep));
913 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
914 debug_check_no_obj_freed(objp, obj_size(cachep));
915 - __cache_free(cachep, objp);
916 + __cache_free(cachep, objp,__builtin_return_address(0));
917 local_irq_restore(flags);
919 EXPORT_SYMBOL(kmem_cache_free);
920 @@ -3790,7 +3834,7 @@
921 c = virt_to_cache(objp);
922 debug_check_no_locks_freed(objp, obj_size(c));
923 debug_check_no_obj_freed(objp, obj_size(c));
924 - __cache_free(c, (void *)objp);
925 + __cache_free(c, (void *)objp,__builtin_return_address(0));
926 local_irq_restore(flags);
928 EXPORT_SYMBOL(kfree);