1 diff -Nurb linux-2.6.27-590/arch/Kconfig linux-2.6.27-591/arch/Kconfig
2 --- linux-2.6.27-590/arch/Kconfig 2010-01-29 16:29:46.000000000 -0500
3 +++ linux-2.6.27-591/arch/Kconfig 2010-01-29 16:30:22.000000000 -0500
9 + bool "Chopstix (PlanetLab)"
10 + depends on MODULES && OPROFILE
12 + Chopstix allows you to monitor various events by summarizing them
13 + in lossy data structures and transferring these data structures
14 + into user space. If in doubt, say "N".
22 depends on KALLSYMS && MODULES
23 diff -Nurb linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c
24 --- linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c 2008-10-09 18:13:53.000000000 -0400
25 +++ linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c 2010-01-29 16:45:48.000000000 -0500
27 #include <linux/signal.h>
28 #include <linux/personality.h>
29 #include <linux/suspend.h>
30 +#include <linux/arrays.h>
31 #include <linux/kbuild.h>
32 #include <asm/ucontext.h>
35 #include <linux/lguest.h>
36 #include "../../../drivers/lguest/lg.h"
39 +#define STACKOFFSET(sym, str, mem) \
40 + DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
42 /* workaround for a warning with -Wmissing-prototypes */
47 + unsigned long dcookie;
49 + unsigned int number;
54 OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
56 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
59 + STACKOFFSET(TASK_thread, task_struct, thread);
60 + STACKOFFSET(THREAD_esp, thread_struct, sp);
61 + STACKOFFSET(EVENT_event_data, event, event_data);
62 + STACKOFFSET(EVENT_task, event, task);
63 + STACKOFFSET(EVENT_event_type, event, event_type);
64 + STACKOFFSET(SPEC_number, event_spec, number);
65 + DEFINE(EVENT_SIZE, sizeof(struct event));
66 + DEFINE(SPEC_SIZE, sizeof(struct event_spec));
67 + DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
69 OFFSET(TI_task, thread_info, task);
70 OFFSET(TI_exec_domain, thread_info, exec_domain);
71 OFFSET(TI_flags, thread_info, flags);
72 diff -Nurb linux-2.6.27-590/arch/x86/kernel/entry_32.S linux-2.6.27-591/arch/x86/kernel/entry_32.S
73 --- linux-2.6.27-590/arch/x86/kernel/entry_32.S 2008-10-09 18:13:53.000000000 -0400
74 +++ linux-2.6.27-591/arch/x86/kernel/entry_32.S 2010-01-29 16:30:22.000000000 -0500
76 cmpl $(nr_syscalls), %eax
79 + /* Move Chopstix syscall probe here */
80 + /* Save and clobber: eax, ecx, ebp */
85 + subl $SPEC_EVENT_SIZE, %esp
86 + movl rec_event, %ecx
89 + # struct event is first, just below %ebp
90 + movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
91 + leal -SPEC_EVENT_SIZE(%ebp), %eax
92 + movl %eax, EVENT_event_data(%ebp)
93 + movl $6, EVENT_event_type(%ebp)
94 + movl rec_event, %edx
96 + leal -EVENT_SIZE(%ebp), %eax
100 + addl $SPEC_EVENT_SIZE, %esp
106 call *sys_call_table(,%eax,4)
107 movl %eax,PT_EAX(%esp) # store the return value
109 diff -Nurb linux-2.6.27-590/arch/x86/mm/fault.c linux-2.6.27-591/arch/x86/mm/fault.c
110 --- linux-2.6.27-590/arch/x86/mm/fault.c 2010-01-29 16:29:46.000000000 -0500
111 +++ linux-2.6.27-591/arch/x86/mm/fault.c 2010-01-29 16:30:22.000000000 -0500
117 +extern void (*rec_event)(void *,unsigned int);
120 + unsigned long dcookie;
122 + unsigned char reason;
127 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
128 diff -Nurb linux-2.6.27-590/drivers/oprofile/cpu_buffer.c linux-2.6.27-591/drivers/oprofile/cpu_buffer.c
129 --- linux-2.6.27-590/drivers/oprofile/cpu_buffer.c 2008-10-09 18:13:53.000000000 -0400
130 +++ linux-2.6.27-591/drivers/oprofile/cpu_buffer.c 2010-01-29 16:30:22.000000000 -0500
132 #include <linux/oprofile.h>
133 #include <linux/vmalloc.h>
134 #include <linux/errno.h>
135 +#include <linux/arrays.h>
137 #include "event_buffer.h"
138 #include "cpu_buffer.h"
143 +#ifdef CONFIG_CHOPSTIX
147 + unsigned long dcookie;
151 +extern void (*rec_event)(void *,unsigned int);
155 add_sample(struct oprofile_cpu_buffer * cpu_buf,
156 unsigned long pc, unsigned long event)
159 entry->event = event;
160 increment_head(cpu_buf);
167 int is_kernel = !user_mode(regs);
168 unsigned long pc = profile_pc(regs);
171 +#ifdef CONFIG_CHOPSTIX
174 + struct event_spec espec;
175 + esig.task = current;
178 + esig.event_data=&espec;
179 + esig.event_type=event; /* index in the event array currently set up */
180 + /* make sure the counters are loaded in the order we want them to show up*/
181 + (*rec_event)(&esig, 1);
184 oprofile_add_ext_sample(pc, regs, event, is_kernel);
187 + oprofile_add_ext_sample(pc, regs, event, is_kernel);
193 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
194 diff -Nurb linux-2.6.27-590/fs/bio.c linux-2.6.27-591/fs/bio.c
195 --- linux-2.6.27-590/fs/bio.c 2008-10-09 18:13:53.000000000 -0400
196 +++ linux-2.6.27-591/fs/bio.c 2010-01-29 16:30:22.000000000 -0500
198 #include <linux/workqueue.h>
199 #include <linux/blktrace_api.h>
200 #include <scsi/sg.h> /* for struct sg_iovec */
201 +#include <linux/arrays.h>
203 static struct kmem_cache *bio_slab __read_mostly;
211 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
212 * IO code that does not need private memory pools.
213 @@ -1171,6 +1173,14 @@
219 + unsigned long dcookie;
221 + unsigned char reason;
224 +extern void (*rec_event)(void *,unsigned int);
226 * bio_endio - end I/O on a bio
228 @@ -1192,6 +1202,24 @@
229 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
232 +#ifdef CONFIG_CHOPSTIX
234 + struct event event;
235 + struct event_spec espec;
238 + espec.reason = 1;/*response */
240 + eip = bio->bi_end_io;
241 + event.event_data=&espec;
243 + event.event_type=3;
244 + /* index in the event array currently set up */
245 + /* make sure the counters are loaded in the order we want them to show up*/
246 + (*rec_event)(&event, bytes_done);
251 bio->bi_end_io(bio, error);
253 diff -Nurb linux-2.6.27-590/fs/exec.c linux-2.6.27-591/fs/exec.c
254 --- linux-2.6.27-590/fs/exec.c 2010-01-29 16:29:48.000000000 -0500
255 +++ linux-2.6.27-591/fs/exec.c 2010-01-29 16:45:48.000000000 -0500
257 #include <linux/fdtable.h>
258 #include <linux/mm.h>
259 #include <linux/stat.h>
260 +#include <linux/dcookies.h>
261 #include <linux/fcntl.h>
262 #include <linux/smp_lock.h>
263 #include <linux/swap.h>
268 + #ifdef CONFIG_CHOPSTIX
269 + unsigned long cookie;
270 + extern void (*rec_event)(void *, unsigned int);
271 + if (rec_event && !nd.dentry->d_cookie)
272 + get_dcookie(nd.dentry, nd.mnt, &cookie);
278 diff -Nurb linux-2.6.27-590/include/linux/arrays.h linux-2.6.27-591/include/linux/arrays.h
279 --- linux-2.6.27-590/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
280 +++ linux-2.6.27-591/include/linux/arrays.h 2010-01-29 16:30:22.000000000 -0500
282 +#ifndef __ARRAYS_H__
283 +#define __ARRAYS_H__
284 +#include <linux/list.h>
286 +#define SAMPLING_METHOD_DEFAULT 0
287 +#define SAMPLING_METHOD_LOG 1
289 +/* Every probe has an array handler */
291 +/* XXX - Optimize this structure */
293 +extern void (*rec_event)(void *,unsigned int);
294 +struct array_handler {
295 + struct list_head link;
296 + unsigned int (*hash_func)(void *);
297 + unsigned int (*sampling_func)(void *,int,void *);
298 + unsigned short size;
299 + unsigned int threshold;
300 + unsigned char **expcount;
301 + unsigned int sampling_method;
302 + unsigned int **arrays;
303 + unsigned int arraysize;
304 + unsigned int num_samples[2];
305 + void **epoch_samples; /* size-sized lists of samples */
306 + unsigned int (*serialize)(void *, void *);
307 + unsigned char code[5];
311 + struct list_head link;
313 + unsigned int count;
314 + unsigned int event_type;
315 + struct task_struct *task;
318 diff -Nurb linux-2.6.27-590/include/linux/sched.h.rej linux-2.6.27-591/include/linux/sched.h.rej
319 --- linux-2.6.27-590/include/linux/sched.h.rej 1969-12-31 19:00:00.000000000 -0500
320 +++ linux-2.6.27-591/include/linux/sched.h.rej 2010-01-29 16:30:22.000000000 -0500
325 + unsigned long sleep_avg;
326 + unsigned long long timestamp, last_ran;
327 + unsigned long long sched_time; /* sched_clock time spent running */
328 + enum sleep_type sleep_type;
332 + unsigned long sleep_avg;
333 + unsigned long long timestamp, last_ran;
334 ++ #ifdef CONFIG_CHOPSTIX
335 ++ unsigned long last_interrupted, last_ran_j;
338 + unsigned long long sched_time; /* sched_clock time spent running */
339 + enum sleep_type sleep_type;
341 diff -Nurb linux-2.6.27-590/kernel/sched.c linux-2.6.27-591/kernel/sched.c
342 --- linux-2.6.27-590/kernel/sched.c 2010-01-29 16:29:48.000000000 -0500
343 +++ linux-2.6.27-591/kernel/sched.c 2010-01-29 17:38:44.000000000 -0500
345 * 1998-11-19 Implemented schedule_timeout() and related stuff
346 * by Andrea Arcangeli
347 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
348 - * hybrid priority-list and round-robin design with
349 + * hybrid priority-list and round-robin deventn with
350 * an array-switch method of distributing timeslices
351 * and per-CPU runqueues. Cleanups and useful suggestions
352 * by Davide Libenzi, preemptible kernel bits by Robert Love.
355 #include "sched_cpupri.h"
357 +#define INTERRUPTIBLE -1
361 * Convert user-nice values [ -20 ... 0 ... 19 ]
362 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
363 @@ -4428,6 +4431,29 @@
367 +#ifdef CONFIG_CHOPSTIX
368 +void (*rec_event)(void *,unsigned int) = NULL;
369 +EXPORT_SYMBOL(rec_event);
373 + unsigned long dcookie;
374 + unsigned int count;
375 + unsigned int reason;
378 +/* To support safe calling from asm */
379 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
380 + struct pt_regs *regs;
381 + struct event_spec *es = event_signature_in->event_data;
382 + regs = task_pt_regs(current);
383 + event_signature_in->task=current;
385 + event_signature_in->count=1;
386 + (*rec_event)(event_signature_in, count);
391 * schedule() is the main scheduler function.
393 @@ -5369,6 +5395,7 @@
395 read_unlock(&tasklist_lock);
399 if ((current->euid != p->euid) && (current->euid != p->uid) &&
400 !capable(CAP_SYS_NICE))
401 diff -Nurb linux-2.6.27-590/kernel/sched.c.rej linux-2.6.27-591/kernel/sched.c.rej
402 --- linux-2.6.27-590/kernel/sched.c.rej 1969-12-31 19:00:00.000000000 -0500
403 +++ linux-2.6.27-591/kernel/sched.c.rej 2010-01-29 16:30:22.000000000 -0500
407 + #include <linux/nmi.h>
408 + #include <linux/init.h>
409 + #include <asm/uaccess.h>
410 + #include <linux/highmem.h>
411 + #include <linux/smp_lock.h>
412 + #include <asm/mmu_context.h>
414 + #include <linux/nmi.h>
415 + #include <linux/init.h>
416 + #include <asm/uaccess.h>
417 ++ #include <linux/arrays.h>
418 + #include <linux/highmem.h>
419 + #include <linux/smp_lock.h>
420 + #include <asm/mmu_context.h>
426 + spin_lock(&rq->lock);
427 + if (unlikely(rq != task_rq(p))) {
428 + spin_unlock(&rq->lock);
434 + spin_lock(&rq->lock);
435 + if (unlikely(rq != task_rq(p))) {
436 + spin_unlock(&rq->lock);
439 + * event cannot wake it up and insert it on the runqueue either.
441 + p->state = TASK_RUNNING;
444 + * Make sure we do not leak PI boosting priority to the child:
446 + * event cannot wake it up and insert it on the runqueue either.
448 + p->state = TASK_RUNNING;
449 ++ #ifdef CONFIG_CHOPSTIX
450 ++ /* The jiffy of last interruption */
451 ++ if (p->state & TASK_UNINTERRUPTIBLE) {
452 ++ p->last_interrupted=jiffies;
455 ++ if (p->state & TASK_INTERRUPTIBLE) {
456 ++ p->last_interrupted=INTERRUPTIBLE;
459 ++ p->last_interrupted=RUNNING;
461 ++ /* The jiffy of last execution */
462 ++ p->last_ran_j=jiffies;
466 + * Make sure we do not leak PI boosting priority to the child:
472 + static inline int interactive_sleep(enum sleep_type sleep_type)
474 + return (sleep_type == SLEEP_INTERACTIVE ||
480 + static inline int interactive_sleep(enum sleep_type sleep_type)
482 + return (sleep_type == SLEEP_INTERACTIVE ||
486 + * schedule() is the main scheduler function.
488 + asmlinkage void __sched schedule(void)
490 + struct task_struct *prev, *next;
491 + struct prio_array *array;
492 + struct list_head *queue;
493 + unsigned long long now;
494 +- unsigned long run_time;
495 + int cpu, idx, new_prio;
496 + long *switch_count;
500 + * Test if we are atomic. Since do_exit() needs to call into
503 + * schedule() is the main scheduler function.
506 ++ #ifdef CONFIG_CHOPSTIX
507 ++ extern void (*rec_event)(void *,unsigned int);
508 ++ struct event_spec {
510 ++ unsigned long dcookie;
511 ++ unsigned int count;
512 ++ unsigned int reason;
516 + asmlinkage void __sched schedule(void)
518 + struct task_struct *prev, *next;
519 + struct prio_array *array;
520 + struct list_head *queue;
521 + unsigned long long now;
522 ++ unsigned long run_time, diff;
523 + int cpu, idx, new_prio;
524 + long *switch_count;
526 ++ int sampling_reason;
529 + * Test if we are atomic. Since do_exit() needs to call into
532 + switch_count = &prev->nivcsw;
533 + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
534 + switch_count = &prev->nvcsw;
535 + if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
536 + unlikely(signal_pending(prev))))
537 + prev->state = TASK_RUNNING;
539 + switch_count = &prev->nivcsw;
540 + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
541 + switch_count = &prev->nvcsw;
543 + if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
544 + unlikely(signal_pending(prev))))
545 + prev->state = TASK_RUNNING;
548 + vx_uninterruptible_inc(prev);
550 + deactivate_task(prev, rq);
555 + vx_uninterruptible_inc(prev);
557 + deactivate_task(prev, rq);
558 ++ #ifdef CONFIG_CHOPSTIX
559 ++ /* An uninterruptible process just yielded. Record the current jiffie */
560 ++ if (prev->state & TASK_UNINTERRUPTIBLE) {
561 ++ prev->last_interrupted=jiffies;
563 ++ /* An interruptible process just yielded, or it got preempted.
564 ++ * Mark it as interruptible */
565 ++ else if (prev->state & TASK_INTERRUPTIBLE) {
566 ++ prev->last_interrupted=INTERRUPTIBLE;
574 + prev->sleep_avg = 0;
575 + prev->timestamp = prev->last_ran = now;
577 + sched_info_switch(prev, next);
578 + if (likely(prev != next)) {
579 + next->timestamp = next->last_ran = now;
581 + prev->sleep_avg = 0;
582 + prev->timestamp = prev->last_ran = now;
584 ++ #ifdef CONFIG_CHOPSTIX
585 ++ /* Run only if the Chopstix module so decrees it */
587 ++ prev->last_ran_j = jiffies;
588 ++ if (next->last_interrupted!=INTERRUPTIBLE) {
589 ++ if (next->last_interrupted!=RUNNING) {
590 ++ diff = (jiffies-next->last_interrupted);
591 ++ sampling_reason = 0;/* BLOCKING */
594 ++ diff = jiffies-next->last_ran_j;
595 ++ sampling_reason = 1;/* PREEMPTION */
598 ++ if (diff >= HZ/10) {
599 ++ struct event event;
600 ++ struct event_spec espec;
601 ++ struct pt_regs *regs;
602 ++ regs = task_pt_regs(current);
604 ++ espec.reason = sampling_reason;
605 ++ event.event_data=&espec;
607 ++ espec.pc=regs->eip;
608 ++ event.event_type=2;
609 ++ /* index in the event array currently set up */
610 ++ /* make sure the counters are loaded in the order we want them to show up*/
611 ++ (*rec_event)(&event, diff);
614 ++ /* next has been elected to run */
615 ++ next->last_interrupted=0;
618 + sched_info_switch(prev, next);
619 + if (likely(prev != next)) {
620 + next->timestamp = next->last_ran = now;
623 + jiffies_to_timespec(p->policy == SCHED_FIFO ?
624 + 0 : task_timeslice(p), &t);
625 + read_unlock(&tasklist_lock);
626 + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
630 + jiffies_to_timespec(p->policy == SCHED_FIFO ?
631 + 0 : task_timeslice(p), &t);
632 + read_unlock(&tasklist_lock);
634 + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
647 ++ #ifdef CONFIG_CHOPSTIX
648 ++ void (*rec_event)(void *,unsigned int) = NULL;
650 ++ /* To support safe calling from asm */
651 ++ asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
652 ++ struct pt_regs *regs;
653 ++ struct event_spec *es = event_signature_in->event_data;
654 ++ regs = task_pt_regs(current);
655 ++ event_signature_in->task=current;
657 ++ event_signature_in->count=1;
658 ++ (*rec_event)(event_signature_in, count);
660 ++ EXPORT_SYMBOL(rec_event);
661 ++ EXPORT_SYMBOL(in_sched_functions);
663 diff -Nurb linux-2.6.27-590/mm/memory.c linux-2.6.27-591/mm/memory.c
664 --- linux-2.6.27-590/mm/memory.c 2010-01-29 16:29:48.000000000 -0500
665 +++ linux-2.6.27-591/mm/memory.c 2010-01-29 16:30:22.000000000 -0500
668 #include <linux/swapops.h>
669 #include <linux/elf.h>
670 +#include <linux/arrays.h>
672 #include "internal.h"
674 @@ -2690,6 +2691,15 @@
678 +extern void (*rec_event)(void *,unsigned int);
681 + unsigned long dcookie;
683 + unsigned char reason;
688 * By the time we get here, we already hold the mm semaphore
690 @@ -2719,6 +2729,24 @@
694 +#ifdef CONFIG_CHOPSTIX
696 + struct event event;
697 + struct event_spec espec;
698 + struct pt_regs *regs;
700 + regs = task_pt_regs(current);
701 + pc = regs->eip & (unsigned int) ~4095;
703 + espec.reason = 0; /* alloc */
704 + event.event_data=&espec;
705 + event.task = current;
707 + event.event_type=5;
708 + (*rec_event)(&event, 1);
712 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
715 diff -Nurb linux-2.6.27-590/mm/slab.c linux-2.6.27-591/mm/slab.c
716 --- linux-2.6.27-590/mm/slab.c 2010-01-29 16:29:48.000000000 -0500
717 +++ linux-2.6.27-591/mm/slab.c 2010-01-29 16:30:22.000000000 -0500
719 #include <linux/fault-inject.h>
720 #include <linux/rtmutex.h>
721 #include <linux/reciprocal_div.h>
722 +#include <linux/arrays.h>
723 #include <linux/debugobjects.h>
725 #include <asm/cacheflush.h>
730 +extern void (*rec_event)(void *,unsigned int);
733 + unsigned long dcookie;
735 + unsigned char reason;
741 @@ -3469,6 +3478,19 @@
742 local_irq_restore(save_flags);
743 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
745 +#ifdef CONFIG_CHOPSTIX
746 + if (rec_event && objp) {
747 + struct event event;
748 + struct event_spec espec;
750 + espec.reason = 0; /* alloc */
751 + event.event_data=&espec;
752 + event.task = current;
754 + event.event_type=5;
755 + (*rec_event)(&event, cachep->buffer_size);
759 if (unlikely((flags & __GFP_ZERO) && objp))
760 memset(objp, 0, obj_size(cachep));
761 @@ -3578,12 +3600,26 @@
762 * Release an obj back to its cache. If the obj has a constructed state, it must
763 * be in this state _before_ it is released. Called with disabled ints.
765 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
766 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
768 struct array_cache *ac = cpu_cache_get(cachep);
771 - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
772 + objp = cache_free_debugcheck(cachep, objp, caller);
773 + #ifdef CONFIG_CHOPSTIX
774 + if (rec_event && objp) {
775 + struct event event;
776 + struct event_spec espec;
778 + espec.reason = 1; /* free */
779 + event.event_data=&espec;
780 + event.task = current;
782 + event.event_type=4;
783 + (*rec_event)(&event, cachep->buffer_size);
787 vx_slab_free(cachep);
790 @@ -3714,6 +3750,7 @@
793 struct kmem_cache *cachep;
796 /* If you want to save a few bytes .text space: replace
798 @@ -3741,10 +3778,17 @@
799 EXPORT_SYMBOL(__kmalloc_track_caller);
802 +#ifdef CONFIG_CHOPSTIX
803 +void *__kmalloc(size_t size, gfp_t flags)
805 + return __do_kmalloc(size, flags, __builtin_return_address(0));
808 void *__kmalloc(size_t size, gfp_t flags)
810 return __do_kmalloc(size, flags, NULL);
813 EXPORT_SYMBOL(__kmalloc);
816 @@ -3764,7 +3808,7 @@
817 debug_check_no_locks_freed(objp, obj_size(cachep));
818 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
819 debug_check_no_obj_freed(objp, obj_size(cachep));
820 - __cache_free(cachep, objp);
821 + __cache_free(cachep, objp,__builtin_return_address(0));
822 local_irq_restore(flags);
824 EXPORT_SYMBOL(kmem_cache_free);
825 @@ -3790,7 +3834,7 @@
826 c = virt_to_cache(objp);
827 debug_check_no_locks_freed(objp, obj_size(c));
828 debug_check_no_obj_freed(objp, obj_size(c));
829 - __cache_free(c, (void *)objp);
830 + __cache_free(c, (void *)objp,__builtin_return_address(0));
831 local_irq_restore(flags);
833 EXPORT_SYMBOL(kfree);