1 diff -Nurb --exclude='*.cmd' --exclude='*.orig' --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' --exclude='*.svn*' linux-2.6.22-590/arch/i386/Kconfig linux-2.6.22-591/arch/i386/Kconfig
2 --- linux-2.6.22-590/arch/i386/Kconfig 2009-03-16 20:49:42.000000000 -0400
3 +++ linux-2.6.22-591/arch/i386/Kconfig 2009-03-16 20:58:59.000000000 -0400
6 source "arch/i386/oprofile/Kconfig"
9 + bool "Chopstix (PlanetLab)"
10 + depends on MODULES && OPROFILE
12 + Chopstix allows you to monitor various events by summarizing them
13 + in lossy data structures and transferring these data structures
14 + into user space. If in doubt, say "N".
17 bool "Kprobes (EXPERIMENTAL)"
18 depends on KALLSYMS && EXPERIMENTAL && MODULES
19 --- linux-2.6.22-590/arch/i386/kernel/asm-offsets.c 2007-07-08 19:32:17.000000000 -0400
20 +++ linux-2.6.22-591/arch/i386/kernel/asm-offsets.c 2009-03-16 20:58:59.000000000 -0400
22 #include <linux/signal.h>
23 #include <linux/personality.h>
24 #include <linux/suspend.h>
25 +#include <linux/arrays.h>
26 #include <asm/ucontext.h>
28 #include <asm/pgtable.h>
30 #define OFFSET(sym, str, mem) \
31 DEFINE(sym, offsetof(struct str, mem));
33 +#define STACKOFFSET(sym, str, mem) \
34 + DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
36 /* workaround for a warning with -Wmissing-prototypes */
41 + unsigned long dcookie;
43 + unsigned int number;
48 OFFSET(SIGCONTEXT_eax, sigcontext, eax);
50 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
53 - OFFSET(TI_task, thread_info, task);
54 + STACKOFFSET(TASK_thread, task_struct, thread);
55 + STACKOFFSET(THREAD_esp, thread_struct, esp);
56 + STACKOFFSET(EVENT_event_data, event, event_data);
57 + STACKOFFSET(EVENT_task, event, task);
58 + STACKOFFSET(EVENT_event_type, event, event_type);
59 + STACKOFFSET(SPEC_number, event_spec, number);
60 + DEFINE(EVENT_SIZE, sizeof(struct event));
61 + DEFINE(SPEC_SIZE, sizeof(struct event_spec));
62 + DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
64 OFFSET(TI_exec_domain, thread_info, exec_domain);
65 OFFSET(TI_flags, thread_info, flags);
66 OFFSET(TI_status, thread_info, status);
67 --- linux-2.6.22-590/arch/i386/kernel/entry.S 2009-03-16 20:49:07.000000000 -0400
68 +++ linux-2.6.22-591/arch/i386/kernel/entry.S 2009-03-16 20:58:59.000000000 -0400
70 cmpl $(nr_syscalls), %eax
73 + /* Move Chopstix syscall probe here */
74 + /* Save and clobber: eax, ecx, ebp */
79 + subl $SPEC_EVENT_SIZE, %esp
80 + movl rec_event, %ecx
83 + # struct event is first, just below %ebp
84 + movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
85 + leal -SPEC_EVENT_SIZE(%ebp), %eax
86 + movl %eax, EVENT_event_data(%ebp)
87 + movl $6, EVENT_event_type(%ebp)
88 + movl rec_event, %edx
90 + leal -EVENT_SIZE(%ebp), %eax
94 + addl $SPEC_EVENT_SIZE, %esp
100 call *sys_call_table(,%eax,4)
101 movl %eax,PT_EAX(%esp) # store the return value
103 --- linux-2.6.22-590/arch/i386/mm/fault.c 2009-03-16 20:49:42.000000000 -0400
104 +++ linux-2.6.22-591/arch/i386/mm/fault.c 2009-03-16 20:58:59.000000000 -0400
106 DIE_PAGE_FAULT, &args);
110 +extern void (*rec_event)(void *,unsigned int);
113 + unsigned long dcookie;
115 + unsigned char reason;
119 * Return EIP plus the CS segment base. The segment limit is also
120 * adjusted, clamped to the kernel/user address space (whichever is
122 * bit 3 == 1 means use of reserved bit detected
123 * bit 4 == 1 means fault was an instruction fetch
127 fastcall void __kprobes do_page_fault(struct pt_regs *regs,
128 unsigned long error_code)
130 --- linux-2.6.22-590/block/ll_rw_blk.c 2009-03-16 20:49:07.000000000 -0400
131 +++ linux-2.6.22-591/block/ll_rw_blk.c 2009-03-16 20:58:59.000000000 -0400
133 #include <linux/cpu.h>
134 #include <linux/blktrace_api.h>
135 #include <linux/fault-inject.h>
136 +#include <linux/arrays.h>
140 @@ -3102,6 +3103,13 @@
142 #endif /* CONFIG_FAIL_MAKE_REQUEST */
144 +extern void (*rec_event)(void *,unsigned int);
147 + unsigned long dcookie;
149 + unsigned char reason;
152 * generic_make_request: hand a buffer to its device driver for I/O
153 * @bio: The bio describing the location in memory and on the device.
154 @@ -3220,7 +3228,23 @@
159 +#ifdef CONFIG_CHOPSTIX
161 + struct event event;
162 + struct event_spec espec;
165 + espec.reason = 0;/*request */
167 + eip = bio->bi_end_io;
168 + event.event_data=&espec;
170 + event.event_type=3;
171 + /* index in the event array currently set up */
172 + /* make sure the counters are loaded in the order we want them to show up*/
173 + (*rec_event)(&event, bio->bi_size);
176 ret = q->make_request_fn(q, bio);
179 --- linux-2.6.22-590/drivers/oprofile/cpu_buffer.c 2007-07-08 19:32:17.000000000 -0400
180 +++ linux-2.6.22-591/drivers/oprofile/cpu_buffer.c 2009-03-16 20:58:59.000000000 -0400
182 #include <linux/oprofile.h>
183 #include <linux/vmalloc.h>
184 #include <linux/errno.h>
185 +#include <linux/arrays.h>
187 #include "event_buffer.h"
188 #include "cpu_buffer.h"
193 +#ifdef CONFIG_CHOPSTIX
197 + unsigned long dcookie;
201 +extern void (*rec_event)(void *,unsigned int);
205 add_sample(struct oprofile_cpu_buffer * cpu_buf,
206 unsigned long pc, unsigned long event)
209 entry->event = event;
210 increment_head(cpu_buf);
217 int is_kernel = !user_mode(regs);
218 unsigned long pc = profile_pc(regs);
221 +#ifdef CONFIG_CHOPSTIX
224 + struct event_spec espec;
225 + esig.task = current;
228 + esig.event_data=&espec;
229 + esig.event_type=event; /* index in the event array currently set up */
230 + /* make sure the counters are loaded in the order we want them to show up*/
231 + (*rec_event)(&esig, 1);
234 oprofile_add_ext_sample(pc, regs, event, is_kernel);
237 + oprofile_add_ext_sample(pc, regs, event, is_kernel);
243 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
244 --- linux-2.6.22-590/fs/bio.c 2007-07-08 19:32:17.000000000 -0400
245 +++ linux-2.6.22-591/fs/bio.c 2009-03-16 20:58:59.000000000 -0400
247 #include <linux/workqueue.h>
248 #include <linux/blktrace_api.h>
249 #include <scsi/sg.h> /* for struct sg_iovec */
250 +#include <linux/arrays.h>
252 #define BIO_POOL_SIZE 2
255 struct kmem_cache *slab;
260 * if you change this list, also change bvec_alloc or things will
261 * break badly! cannot be bigger than what you can fit into an
262 @@ -999,6 +1001,14 @@
268 + unsigned long dcookie;
270 + unsigned char reason;
273 +extern void (*rec_event)(void *,unsigned int);
275 * bio_endio - end I/O on a bio
277 @@ -1028,6 +1038,24 @@
278 bio->bi_size -= bytes_done;
279 bio->bi_sector += (bytes_done >> 9);
281 +#ifdef CONFIG_CHOPSTIX
283 + struct event event;
284 + struct event_spec espec;
287 + espec.reason = 1;/*response */
289 + eip = bio->bi_end_io;
290 + event.event_data=&espec;
292 + event.event_type=3;
293 + /* index in the event array currently set up */
294 + /* make sure the counters are loaded in the order we want them to show up*/
295 + (*rec_event)(&event, bytes_done);
300 bio->bi_end_io(bio, bytes_done, error);
302 --- linux-2.6.22-580/fs/exec.c 2009-04-08 16:36:16.000000000 -0400
303 +++ linux-2.6.22-590/fs/exec.c 2009-04-08 16:40:34.000000000 -0400
305 #include <linux/mman.h>
306 #include <linux/a.out.h>
307 #include <linux/stat.h>
308 +#include <linux/dcookies.h>
309 #include <linux/fcntl.h>
310 #include <linux/smp_lock.h>
311 #include <linux/init.h>
313 #include <linux/binfmts.h>
314 #include <linux/swap.h>
315 #include <linux/utsname.h>
316 -#include <linux/pid_namespace.h>
317 +/*#include <linux/pid_namespace.h>*/
318 #include <linux/module.h>
319 #include <linux/namei.h>
320 #include <linux/proc_fs.h>
324 struct inode *inode = nd.dentry->d_inode;
325 +#ifdef CONFIG_CHOPSTIX
326 + unsigned long cookie;
327 + extern void (*rec_event)(void *, unsigned int);
328 + if (rec_event && !nd.dentry->d_cookie)
329 + get_dcookie(nd.dentry, nd.mnt, &cookie);
332 file = ERR_PTR(-EACCES);
333 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
334 S_ISREG(inode->i_mode)) {
336 * Reparenting needs write_lock on tasklist_lock,
337 * so it is safe to do it under read_lock.
340 if (unlikely(tsk->group_leader == child_reaper(tsk)))
341 tsk->nsproxy->pid_ns->child_reaper = tsk;
344 zap_other_threads(tsk);
345 read_unlock(&tasklist_lock);
346 --- linux-2.6.22-590/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
347 +++ linux-2.6.22-591/include/linux/arrays.h 2009-03-16 20:58:59.000000000 -0400
349 +#ifndef __ARRAYS_H__
350 +#define __ARRAYS_H__
351 +#include <linux/list.h>
353 +#define SAMPLING_METHOD_DEFAULT 0
354 +#define SAMPLING_METHOD_LOG 1
356 +/* Every probe has an array handler */
358 +/* XXX - Optimize this structure */
360 +extern void (*rec_event)(void *,unsigned int);
361 +struct array_handler {
362 + struct list_head link;
363 + unsigned int (*hash_func)(void *);
364 + unsigned int (*sampling_func)(void *,int,void *);
365 + unsigned short size;
366 + unsigned int threshold;
367 + unsigned char **expcount;
368 + unsigned int sampling_method;
369 + unsigned int **arrays;
370 + unsigned int arraysize;
371 + unsigned int num_samples[2];
372 + void **epoch_samples; /* size-sized lists of samples */
373 + unsigned int (*serialize)(void *, void *);
374 + unsigned char code[5];
378 + struct list_head link;
380 + unsigned int count;
381 + unsigned int event_type;
382 + struct task_struct *task;
385 --- linux-2.6.22-590/include/linux/mutex.h 2007-07-08 19:32:17.000000000 -0400
386 +++ linux-2.6.22-591/include/linux/mutex.h 2009-03-16 20:58:59.000000000 -0400
388 struct thread_info *owner;
392 +#ifdef CONFIG_CHOPSTIX
393 + struct thread_info *owner;
396 #ifdef CONFIG_DEBUG_LOCK_ALLOC
397 struct lockdep_map dep_map;
398 --- linux-2.6.22-590/include/linux/sched.h 2009-03-16 20:49:42.000000000 -0400
399 +++ linux-2.6.22-591/include/linux/sched.h 2009-03-16 20:58:59.000000000 -0400
402 unsigned long sleep_avg;
403 unsigned long long timestamp, last_ran;
404 +#ifdef CONFIG_CHOPSTIX
405 + unsigned long last_interrupted, last_ran_j;
408 unsigned long long sched_time; /* sched_clock time spent running */
409 enum sleep_type sleep_type;
411 --- linux-2.6.22-590/kernel/mutex.c 2007-07-08 19:32:17.000000000 -0400
412 +++ linux-2.6.22-591/kernel/mutex.c 2009-03-16 20:58:59.000000000 -0400
414 #include <linux/spinlock.h>
415 #include <linux/interrupt.h>
416 #include <linux/debug_locks.h>
417 +#include <linux/arrays.h>
419 +#undef CONFIG_CHOPSTIX
420 +#ifdef CONFIG_CHOPSTIX
423 + unsigned long dcookie;
425 + unsigned char reason;
430 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
432 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
434 atomic_set(&lock->count, 1);
435 +#ifdef CONFIG_CHOPSTIX
438 spin_lock_init(&lock->wait_lock);
439 INIT_LIST_HEAD(&lock->wait_list);
442 * The locking fastpath is the 1->0 transition from
443 * 'unlocked' into 'locked' state.
446 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
451 __set_task_state(task, state);
453 +#ifdef CONFIG_CHOPSTIX
456 + struct event event;
457 + struct event_spec espec;
458 + struct task_struct *p = lock->owner->task;
459 + /*spin_lock(&p->alloc_lock);*/
460 + espec.reason = 0; /* lock */
461 + event.event_data=&espec;
464 + event.event_type=5;
465 + (*rec_event)(&event, 1);
466 + /*spin_unlock(&p->alloc_lock);*/
474 /* didnt get the lock, go to sleep: */
475 spin_unlock_mutex(&lock->wait_lock, flags);
478 /* got the lock - rejoice! */
479 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
480 debug_mutex_set_owner(lock, task_thread_info(task));
481 +#ifdef CONFIG_CHOPSTIX
482 + lock->owner = task_thread_info(task);
485 /* set it to 0 if there are no waiters left: */
486 if (likely(list_empty(&lock->wait_list)))
488 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
492 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
496 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
500 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
505 debug_mutex_wake_waiter(lock, waiter);
507 +#ifdef CONFIG_CHOPSTIX
510 + struct event event;
511 + struct event_spec espec;
513 + espec.reason = 1; /* unlock */
514 + event.event_data=&espec;
515 + event.task = lock->owner->task;
517 + event.event_type=5;
518 + (*rec_event)(&event, 1);
524 wake_up_process(waiter->task);
527 --- linux-2.6.22-590/kernel/sched.c 2009-03-16 20:49:42.000000000 -0400
528 +++ linux-2.6.22-591/kernel/sched.c 2009-03-16 20:58:59.000000000 -0400
530 * 1998-11-19 Implemented schedule_timeout() and related stuff
531 * by Andrea Arcangeli
532 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
533 - * hybrid priority-list and round-robin design with
534 + * hybrid priority-list and round-robin deventn with
535 * an array-switch method of distributing timeslices
536 * and per-CPU runqueues. Cleanups and useful suggestions
537 * by Davide Libenzi, preemptible kernel bits by Robert Love.
539 #include <linux/nmi.h>
540 #include <linux/init.h>
541 #include <asm/uaccess.h>
542 +#include <linux/arrays.h>
543 #include <linux/highmem.h>
544 #include <linux/smp_lock.h>
545 #include <asm/mmu_context.h>
547 #include <linux/vs_sched.h>
548 #include <linux/vs_cvirt.h>
550 +#define INTERRUPTIBLE -1
554 * Scheduler clock - returns current time in nanosec units.
555 * This is default implementation.
561 spin_lock(&rq->lock);
562 if (unlikely(rq != task_rq(p))) {
563 spin_unlock(&rq->lock);
564 @@ -1741,6 +1746,21 @@
565 * event cannot wake it up and insert it on the runqueue either.
567 p->state = TASK_RUNNING;
568 +#ifdef CONFIG_CHOPSTIX
569 + /* The jiffy of last interruption */
570 + if (p->state & TASK_UNINTERRUPTIBLE) {
571 + p->last_interrupted=jiffies;
574 + if (p->state & TASK_INTERRUPTIBLE) {
575 + p->last_interrupted=INTERRUPTIBLE;
578 + p->last_interrupted=RUNNING;
580 + /* The jiffy of last execution */
581 + p->last_ran_j=jiffies;
585 * Make sure we do not leak PI boosting priority to the child:
586 @@ -3608,6 +3628,7 @@
591 static inline int interactive_sleep(enum sleep_type sleep_type)
593 return (sleep_type == SLEEP_INTERACTIVE ||
594 @@ -3617,16 +3638,28 @@
596 * schedule() is the main scheduler function.
599 +#ifdef CONFIG_CHOPSTIX
600 +extern void (*rec_event)(void *,unsigned int);
603 + unsigned long dcookie;
604 + unsigned int count;
605 + unsigned int reason;
609 asmlinkage void __sched schedule(void)
611 struct task_struct *prev, *next;
612 struct prio_array *array;
613 struct list_head *queue;
614 unsigned long long now;
615 - unsigned long run_time;
616 + unsigned long run_time, diff;
617 int cpu, idx, new_prio;
620 + int sampling_reason;
623 * Test if we are atomic. Since do_exit() needs to call into
624 @@ -3680,6 +3713,7 @@
625 switch_count = &prev->nivcsw;
626 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
627 switch_count = &prev->nvcsw;
629 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
630 unlikely(signal_pending(prev))))
631 prev->state = TASK_RUNNING;
632 @@ -3689,6 +3723,17 @@
633 vx_uninterruptible_inc(prev);
635 deactivate_task(prev, rq);
636 +#ifdef CONFIG_CHOPSTIX
637 + /* An uninterruptible process just yielded. Record the current jiffie */
638 + if (prev->state & TASK_UNINTERRUPTIBLE) {
639 + prev->last_interrupted=jiffies;
641 + /* An interruptible process just yielded, or it got preempted.
642 + * Mark it as interruptible */
643 + else if (prev->state & TASK_INTERRUPTIBLE) {
644 + prev->last_interrupted=INTERRUPTIBLE;
650 @@ -3765,6 +3810,40 @@
652 prev->timestamp = prev->last_ran = now;
654 +#ifdef CONFIG_CHOPSTIX
655 + /* Run only if the Chopstix module so decrees it */
657 + prev->last_ran_j = jiffies;
658 + if (next->last_interrupted!=INTERRUPTIBLE) {
659 + if (next->last_interrupted!=RUNNING) {
660 + diff = (jiffies-next->last_interrupted);
661 + sampling_reason = 0;/* BLOCKING */
664 + diff = jiffies-next->last_ran_j;
665 + sampling_reason = 1;/* PREEMPTION */
668 + if (diff >= HZ/10) {
669 + struct event event;
670 + struct event_spec espec;
671 + struct pt_regs *regs;
672 + regs = task_pt_regs(current);
674 + espec.reason = sampling_reason;
675 + event.event_data=&espec;
677 + espec.pc=regs->eip;
678 + event.event_type=2;
679 + /* index in the event array currently set up */
680 + /* make sure the counters are loaded in the order we want them to show up*/
681 + (*rec_event)(&event, diff);
684 + /* next has been elected to run */
685 + next->last_interrupted=0;
688 sched_info_switch(prev, next);
689 if (likely(prev != next)) {
690 next->timestamp = next->last_ran = now;
691 @@ -4664,6 +4743,7 @@
693 read_unlock(&tasklist_lock);
697 if ((current->euid != p->euid) && (current->euid != p->uid) &&
698 !capable(CAP_SYS_NICE))
699 @@ -5032,6 +5112,7 @@
700 jiffies_to_timespec(p->policy == SCHED_FIFO ?
701 0 : task_timeslice(p), &t);
702 read_unlock(&tasklist_lock);
704 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
707 @@ -7275,3 +7356,20 @@
712 +#ifdef CONFIG_CHOPSTIX
713 +void (*rec_event)(void *,unsigned int) = NULL;
715 +/* To support safe calling from asm */
716 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
717 + struct pt_regs *regs;
718 + struct event_spec *es = event_signature_in->event_data;
719 + regs = task_pt_regs(current);
720 + event_signature_in->task=current;
722 + event_signature_in->count=1;
723 + (*rec_event)(event_signature_in, count);
725 +EXPORT_SYMBOL(rec_event);
726 +EXPORT_SYMBOL(in_sched_functions);
728 --- linux-2.6.22-590/mm/memory.c 2009-03-16 20:49:42.000000000 -0400
729 +++ linux-2.6.22-591/mm/memory.c 2009-03-16 20:58:59.000000000 -0400
732 #include <linux/swapops.h>
733 #include <linux/elf.h>
734 +#include <linux/arrays.h>
736 #ifndef CONFIG_NEED_MULTIPLE_NODES
737 /* use the per-pgdat data instead for discontigmem - mbligh */
738 @@ -2601,6 +2602,15 @@
742 +extern void (*rec_event)(void *,unsigned int);
745 + unsigned long dcookie;
747 + unsigned char reason;
752 * By the time we get here, we already hold the mm semaphore
754 @@ -2630,6 +2640,24 @@
758 +#ifdef CONFIG_CHOPSTIX
760 + struct event event;
761 + struct event_spec espec;
762 + struct pt_regs *regs;
764 + regs = task_pt_regs(current);
765 + pc = regs->eip & (unsigned int) ~4095;
767 + espec.reason = 0; /* alloc */
768 + event.event_data=&espec;
769 + event.task = current;
771 + event.event_type=5;
772 + (*rec_event)(&event, 1);
776 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
779 --- linux-2.6.22-590/mm/slab.c 2009-03-16 20:49:42.000000000 -0400
780 +++ linux-2.6.22-591/mm/slab.c 2009-03-16 21:00:27.000000000 -0400
781 @@ -110,11 +110,13 @@
782 #include <linux/fault-inject.h>
783 #include <linux/rtmutex.h>
784 #include <linux/reciprocal_div.h>
785 +#include <linux/arrays.h>
787 #include <asm/cacheflush.h>
788 #include <asm/tlbflush.h>
789 #include <asm/page.h>
793 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
794 * 0 for faster, smaller code (especially in the critical paths).
799 +extern void (*rec_event)(void *,unsigned int);
802 + unsigned long dcookie;
804 + unsigned char reason;
810 @@ -3443,6 +3453,19 @@
811 local_irq_restore(save_flags);
812 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
814 +#ifdef CONFIG_CHOPSTIX
815 + if (rec_event && objp) {
816 + struct event event;
817 + struct event_spec espec;
819 + espec.reason = 0; /* alloc */
820 + event.event_data=&espec;
821 + event.task = current;
823 + event.event_type=5;
824 + (*rec_event)(&event, cachep->buffer_size);
830 @@ -3549,12 +3572,26 @@
831 * Release an obj back to its cache. If the obj has a constructed state, it must
832 * be in this state _before_ it is released. Called with disabled ints.
834 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
835 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
837 struct array_cache *ac = cpu_cache_get(cachep);
840 - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
841 + objp = cache_free_debugcheck(cachep, objp, caller);
842 + #ifdef CONFIG_CHOPSTIX
843 + if (rec_event && objp) {
844 + struct event event;
845 + struct event_spec espec;
847 + espec.reason = 1; /* free */
848 + event.event_data=&espec;
849 + event.task = current;
851 + event.event_type=4;
852 + (*rec_event)(&event, cachep->buffer_size);
856 vx_slab_free(cachep);
858 if (cache_free_alien(cachep, objp))
859 @@ -3651,16 +3688,19 @@
860 __builtin_return_address(0));
862 EXPORT_SYMBOL(kmem_cache_alloc_node);
864 static __always_inline void *
865 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
867 struct kmem_cache *cachep;
871 cachep = kmem_find_general_cachep(size, flags);
872 if (unlikely(cachep == NULL))
874 - return kmem_cache_alloc_node(cachep, flags, node);
875 + ret = kmem_cache_alloc_node(cachep, flags, node);
880 #ifdef CONFIG_DEBUG_SLAB
881 @@ -3696,6 +3736,7 @@
884 struct kmem_cache *cachep;
887 /* If you want to save a few bytes .text space: replace
889 @@ -3705,9 +3746,10 @@
890 cachep = __find_general_cachep(size, flags);
891 if (unlikely(cachep == NULL))
893 - return __cache_alloc(cachep, flags, caller);
895 + ret = __cache_alloc(cachep, flags, caller);
900 #ifdef CONFIG_DEBUG_SLAB
901 void *__kmalloc(size_t size, gfp_t flags)
902 @@ -3723,10 +3765,17 @@
903 EXPORT_SYMBOL(__kmalloc_track_caller);
906 +#ifdef CONFIG_CHOPSTIX
907 +void *__kmalloc(size_t size, gfp_t flags)
909 + return __do_kmalloc(size, flags, __builtin_return_address(0));
912 void *__kmalloc(size_t size, gfp_t flags)
914 return __do_kmalloc(size, flags, NULL);
917 EXPORT_SYMBOL(__kmalloc);
920 @@ -3792,7 +3841,7 @@
922 local_irq_save(flags);
923 debug_check_no_locks_freed(objp, obj_size(cachep));
924 - __cache_free(cachep, objp);
925 + __cache_free(cachep, objp,__builtin_return_address(0));
926 local_irq_restore(flags);
928 EXPORT_SYMBOL(kmem_cache_free);
929 @@ -3817,7 +3866,7 @@
930 kfree_debugcheck(objp);
931 c = virt_to_cache(objp);
932 debug_check_no_locks_freed(objp, obj_size(c));
933 - __cache_free(c, (void *)objp);
934 + __cache_free(c, (void *)objp,__builtin_return_address(0));
935 local_irq_restore(flags);
937 EXPORT_SYMBOL(kfree);