1 From 6f68de5f723e57e2709b468f55914fd0f963ce90 Mon Sep 17 00:00:00 2001
2 From: S.Çağlar Onur <caglar@cs.princeton.edu>
3 Date: Tue, 7 Dec 2010 11:09:43 -0500
4 Subject: [PATCH] linux-2.6-591-chopstix-intern.patch
6 block/blk-core.c: In function '__generic_make_request':
7 block/blk-core.c:1557: warning: assignment makes integer from pointer without a cast
8 fs/exec.c: In function 'open_exec':
9 fs/exec.c:698: warning: ISO C90 forbids mixed declarations and code
10 fs/bio.c: In function 'bio_endio':
11 fs/bio.c:1440: warning: assignment makes integer from pointer without a cast
12 mm/slab.c: In function '__cache_alloc':
13 mm/slab.c:3513: warning: assignment makes integer from pointer without a cast
14 mm/slab.c: In function '__cache_free':
15 mm/slab.c:3646: warning: assignment makes integer from pointer without a cast
19 arch/x86/kernel/asm-offsets_32.c | 25 +++++++++++
20 arch/x86/kernel/entry_32.S | 28 +++++++++++++
21 arch/x86/mm/fault.c | 10 +++++
22 block/blk-core.c | 29 +++++++++++++
23 drivers/oprofile/cpu_buffer.c | 30 ++++++++++++++
24 fs/bio.c | 31 ++++++++++++++
26 include/linux/arrays.h | 39 ++++++++++++++++++
27 include/linux/mutex.h | 2 +-
28 include/linux/sched.h | 5 ++
29 kernel/mutex.c | 55 +++++++++++++++++++++++++
30 kernel/sched.c | 82 +++++++++++++++++++++++++++++++++++++-
31 mm/memory.c | 29 +++++++++++++
32 mm/slab.c | 54 +++++++++++++++++++++++--
33 15 files changed, 429 insertions(+), 6 deletions(-)
34 create mode 100644 include/linux/arrays.h
36 diff --git a/arch/Kconfig b/arch/Kconfig
37 index cdea504..608c64d 100644
40 @@ -27,6 +27,14 @@ config OPROFILE_EVENT_MULTIPLEX
45 + bool "Chopstix (PlanetLab)"
46 + depends on MODULES && OPROFILE
48 + Chopstix allows you to monitor various events by summarizing them
49 + in lossy data structures and transferring these data structures
50 + into user space. If in doubt, say "N".
55 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
56 index dfdbf64..29c79b8 100644
57 --- a/arch/x86/kernel/asm-offsets_32.c
58 +++ b/arch/x86/kernel/asm-offsets_32.c
60 #include <linux/signal.h>
61 #include <linux/personality.h>
62 #include <linux/suspend.h>
63 +#include <linux/arrays.h>
64 #include <linux/kbuild.h>
65 #include <asm/ucontext.h>
66 #include <asm/sigframe.h>
68 #include <linux/lguest.h>
69 #include "../../../drivers/lguest/lg.h"
71 +#ifdef CONFIG_CHOPSTIX
72 +#define STACKOFFSET(sym, str, mem) \
73 + DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
77 + unsigned long dcookie;
79 + unsigned int number;
83 /* workaround for a warning with -Wmissing-prototypes */
86 @@ -51,6 +64,18 @@ void foo(void)
87 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
90 +#ifdef CONFIG_CHOPSTIX
91 + STACKOFFSET(TASK_thread, task_struct, thread);
92 + STACKOFFSET(THREAD_esp, thread_struct, sp);
93 + STACKOFFSET(EVENT_event_data, event, event_data);
94 + STACKOFFSET(EVENT_task, event, task);
95 + STACKOFFSET(EVENT_event_type, event, event_type);
96 + STACKOFFSET(SPEC_number, event_spec, number);
97 + DEFINE(EVENT_SIZE, sizeof(struct event));
98 + DEFINE(SPEC_SIZE, sizeof(struct event_spec));
99 + DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
102 OFFSET(TI_task, thread_info, task);
103 OFFSET(TI_exec_domain, thread_info, exec_domain);
104 OFFSET(TI_flags, thread_info, flags);
105 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
106 index 8b8db35..fc82d32 100644
107 --- a/arch/x86/kernel/entry_32.S
108 +++ b/arch/x86/kernel/entry_32.S
109 @@ -538,6 +538,34 @@ ENTRY(system_call)
110 cmpl $(nr_syscalls), %eax
113 +#ifdef CONFIG_CHOPSTIX
114 + /* Move Chopstix syscall probe here */
115 + /* Save and clobber: eax, ecx, ebp */
120 + subl $SPEC_EVENT_SIZE, %esp
121 + movl rec_event, %ecx
124 + # struct event is first, just below %ebp
125 + movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
126 + leal -SPEC_EVENT_SIZE(%ebp), %eax
127 + movl %eax, EVENT_event_data(%ebp)
128 + movl $7, EVENT_event_type(%ebp)
129 + movl rec_event, %edx
131 + leal -EVENT_SIZE(%ebp), %eax
135 + addl $SPEC_EVENT_SIZE, %esp
141 call *sys_call_table(,%eax,4)
142 movl %eax,PT_EAX(%esp) # store the return value
144 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
145 index b899fb7..c827e81 100644
146 --- a/arch/x86/mm/fault.c
147 +++ b/arch/x86/mm/fault.c
148 @@ -65,6 +65,16 @@ static inline int notify_page_fault(struct pt_regs *regs)
152 +#ifdef CONFIG_CHOPSTIX
153 +extern void (*rec_event)(void *,unsigned int);
156 + unsigned long dcookie;
158 + unsigned char reason;
165 diff --git a/block/blk-core.c b/block/blk-core.c
166 index 48dbd8d..94030b1 100644
167 --- a/block/blk-core.c
168 +++ b/block/blk-core.c
170 #include <linux/writeback.h>
171 #include <linux/task_io_accounting_ops.h>
172 #include <linux/fault-inject.h>
173 +#include <linux/arrays.h>
175 #define CREATE_TRACE_POINTS
176 #include <trace/events/block.h>
180 +#ifdef CONFIG_CHOPSTIX
181 +extern void (*rec_event)(void *,unsigned int);
184 + unsigned long dcookie;
186 + unsigned char reason;
190 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
191 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
192 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
193 @@ -1568,6 +1579,24 @@ static inline void __generic_make_request(struct bio *bio)
195 trace_block_bio_queue(q, bio);
197 +#ifdef CONFIG_CHOPSTIX
199 + struct event event;
200 + struct event_spec espec;
203 + espec.reason = 0;/*request */
205 + eip = bio->bi_end_io;
206 + event.event_data=&espec;
208 + event.event_type=3;
209 + /* index in the event array currently set up */
210 + /* make sure the counters are loaded in the order we want them to show up*/
211 + (*rec_event)(&event, bio->bi_size);
215 ret = q->make_request_fn(q, bio);
218 diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
219 index 9e0ef46..f6217fd 100644
220 --- a/drivers/oprofile/cpu_buffer.c
221 +++ b/drivers/oprofile/cpu_buffer.c
223 #include <linux/sched.h>
224 #include <linux/oprofile.h>
225 #include <linux/errno.h>
226 +#include <linux/arrays.h>
228 #include "event_buffer.h"
229 #include "cpu_buffer.h"
230 @@ -286,6 +287,17 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
231 cpu_buf->tracing = 0;
234 +#ifdef CONFIG_CHOPSTIX
238 + unsigned long dcookie;
242 +extern void (*rec_event)(void *,unsigned int);
246 __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
247 unsigned long event, int is_kernel)
248 @@ -328,7 +340,25 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
249 pc = ESCAPE_CODE; /* as this causes an early return. */
252 +#ifdef CONFIG_CHOPSTIX
255 + struct event_spec espec;
256 + esig.task = current;
259 + esig.event_data = &espec;
260 + esig.event_type = event; /* index in the event array currently set up */
261 + /* make sure the counters are loaded in the order we want them to show up*/
262 + (*rec_event)(&esig, 1);
265 + __oprofile_add_ext_sample(pc, regs, event, is_kernel);
268 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
274 diff --git a/fs/bio.c b/fs/bio.c
275 index 06f71fc..56fc42d 100644
279 #include <linux/mempool.h>
280 #include <linux/workqueue.h>
281 #include <scsi/sg.h> /* for struct sg_iovec */
282 +#include <linux/arrays.h>
284 #include <trace/events/block.h>
286 @@ -48,6 +49,7 @@ struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
292 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
293 * IO code that does not need private memory pools.
294 @@ -1408,6 +1410,17 @@ void bio_check_pages_dirty(struct bio *bio)
298 +#ifdef CONFIG_CHOPSTIX
301 + unsigned long dcookie;
303 + unsigned char reason;
306 +extern void (*rec_event)(void *,unsigned int);
310 * bio_endio - end I/O on a bio
312 @@ -1429,6 +1442,24 @@ void bio_endio(struct bio *bio, int error)
313 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
316 +#ifdef CONFIG_CHOPSTIX
318 + struct event event;
319 + struct event_spec espec;
322 + espec.reason = 1;/*response */
324 + eip = bio->bi_end_io;
325 + event.event_data=&espec;
327 + event.event_type=3;
328 + /* index in the event array currently set up */
329 + /* make sure the counters are loaded in the order we want them to show up*/
330 + (*rec_event)(&event, bio->bi_size);
335 bio->bi_end_io(bio, error);
337 diff --git a/fs/exec.c b/fs/exec.c
338 index 7fdbf49..6f2d772 100644
342 #include <linux/fdtable.h>
343 #include <linux/mm.h>
344 #include <linux/stat.h>
345 +#include <linux/dcookies.h>
346 #include <linux/fcntl.h>
347 #include <linux/smp_lock.h>
348 #include <linux/swap.h>
349 @@ -735,6 +736,13 @@ struct file *open_exec(const char *name)
353 +#ifdef CONFIG_CHOPSTIX
354 + unsigned long cookie;
355 + extern void (*rec_event)(void *, unsigned int);
356 + if (rec_event && !(file->f_path.dentry->d_flags & DCACHE_COOKIE))
357 + get_dcookie(&file->f_path, &cookie);
363 diff --git a/include/linux/arrays.h b/include/linux/arrays.h
365 index 0000000..7641a3c
367 +++ b/include/linux/arrays.h
369 +#ifndef __ARRAYS_H__
370 +#define __ARRAYS_H__
371 +#include <linux/list.h>
373 +#define SAMPLING_METHOD_DEFAULT 0
374 +#define SAMPLING_METHOD_LOG 1
376 +#define DEFAULT_ARRAY_SIZE 2048
378 +/* Every probe has an array handler */
380 +/* XXX - Optimize this structure */
382 +extern void (*rec_event)(void *,unsigned int);
383 +struct array_handler {
384 + struct list_head link;
385 + unsigned int (*hash_func)(void *);
386 + unsigned int (*sampling_func)(void *,int,void *);
387 + unsigned short size;
388 + unsigned int threshold;
389 + unsigned char **expcount;
390 + unsigned int sampling_method;
391 + unsigned int **arrays;
392 + unsigned int arraysize;
393 + unsigned int num_samples[2];
394 + void **epoch_samples; /* size-sized lists of samples */
395 + unsigned int (*serialize)(void *, void *);
396 + unsigned char code[5];
397 + unsigned int last_threshold;
401 + struct list_head link;
403 + unsigned int count;
404 + unsigned int event_type;
405 + struct task_struct *task;
408 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
409 index 878cab4..6c21914 100644
410 --- a/include/linux/mutex.h
411 +++ b/include/linux/mutex.h
412 @@ -50,7 +50,7 @@ struct mutex {
414 spinlock_t wait_lock;
415 struct list_head wait_list;
416 -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
417 +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) || defined(CONFIG_CHOPSTIX)
418 struct thread_info *owner;
420 #ifdef CONFIG_DEBUG_MUTEXES
421 diff --git a/include/linux/sched.h b/include/linux/sched.h
422 index b0cb58b..45f69c3 100644
423 --- a/include/linux/sched.h
424 +++ b/include/linux/sched.h
425 @@ -1398,6 +1398,11 @@ struct task_struct {
426 cputime_t utime, stime, utimescaled, stimescaled;
428 cputime_t prev_utime, prev_stime;
430 + #ifdef CONFIG_CHOPSTIX
431 + unsigned long last_interrupted, last_ran_j;
434 unsigned long nvcsw, nivcsw; /* context switch counts */
435 struct timespec start_time; /* monotonic time */
436 struct timespec real_start_time; /* boot based time */
437 diff --git a/kernel/mutex.c b/kernel/mutex.c
438 index e04aa45..196ac04 100644
442 #include <linux/spinlock.h>
443 #include <linux/interrupt.h>
444 #include <linux/debug_locks.h>
445 +#include <linux/arrays.h>
447 +#ifdef CONFIG_CHOPSTIX
450 + unsigned long dcookie;
452 + unsigned char reason;
457 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
458 @@ -49,6 +59,9 @@ void
459 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
461 atomic_set(&lock->count, 1);
462 +#ifdef CONFIG_CHOPSTIX
463 + lock->owner = NULL;
465 spin_lock_init(&lock->wait_lock);
466 INIT_LIST_HEAD(&lock->wait_list);
467 mutex_clear_owner(lock);
468 @@ -254,6 +267,25 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
470 __set_task_state(task, state);
472 +#if 0 && CONFIG_CHOPSTIX
475 + struct event event;
476 + struct event_spec espec;
477 + struct task_struct *p = lock->owner->task;
479 + espec.reason = 0; /* lock */
480 + event.event_data = &espec;
483 + event.event_type = 5;
484 + (*rec_event)(&event, 1);
491 /* didnt get the lock, go to sleep: */
492 spin_unlock_mutex(&lock->wait_lock, flags);
493 preempt_enable_no_resched();
494 @@ -268,6 +300,10 @@ done:
495 mutex_remove_waiter(lock, &waiter, current_thread_info());
496 mutex_set_owner(lock);
498 +#ifdef CONFIG_CHOPSTIX
499 + lock->owner = task_thread_info(task);
502 /* set it to 0 if there are no waiters left: */
503 if (likely(list_empty(&lock->wait_list)))
504 atomic_set(&lock->count, 0);
505 @@ -338,6 +374,25 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
507 debug_mutex_wake_waiter(lock, waiter);
509 +#if 0 && CONFIG_CHOPSTIX
512 + struct event event;
513 + struct event_spec espec;
514 + struct task_struct *p = lock->owner->task;
516 + espec.reason = 1; /* unlock */
517 + event.event_data = &espec;
520 + event.event_type = 5;
521 + (*rec_event)(&event, 1);
528 wake_up_process(waiter->task);
531 diff --git a/kernel/sched.c b/kernel/sched.c
532 index 1e90fc0..aa4d3d7 100644
536 * 1998-11-19 Implemented schedule_timeout() and related stuff
537 * by Andrea Arcangeli
538 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
539 - * hybrid priority-list and round-robin design with
540 + * hybrid priority-list and round-robin deventn with
541 * an array-switch method of distributing timeslices
542 * and per-CPU runqueues. Cleanups and useful suggestions
543 * by Davide Libenzi, preemptible kernel bits by Robert Love.
545 #include <linux/ftrace.h>
546 #include <linux/vs_sched.h>
547 #include <linux/vs_cvirt.h>
548 +#include <linux/arrays.h>
551 #include <asm/irq_regs.h>
553 #include "sched_cpupri.h"
554 #include "sched_autogroup.h"
556 +#define INTERRUPTIBLE -1
559 #define CREATE_TRACE_POINTS
560 #include <trace/events/sched.h>
562 @@ -2670,6 +2674,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
564 spin_lock(&rq->lock);
566 +#ifdef CONFIG_CHOPSTIX
567 + p->last_ran_j = jiffies;
568 + p->last_interrupted = INTERRUPTIBLE;
571 * We migrated the task without holding either rq->lock, however
572 * since the task is not on the task list itself, nobody else
573 @@ -5988,6 +5996,30 @@ pick_next_task(struct rq *rq)
577 +#ifdef CONFIG_CHOPSTIX
578 +void (*rec_event)(void *,unsigned int) = NULL;
579 +EXPORT_SYMBOL(rec_event);
580 +EXPORT_SYMBOL(in_sched_functions);
584 + unsigned long dcookie;
585 + unsigned int count;
586 + unsigned int reason;
589 +/* To support safe calling from asm */
590 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
591 + struct pt_regs *regs;
592 + struct event_spec *es = event_signature_in->event_data;
593 + regs = task_pt_regs(current);
594 + event_signature_in->task=current;
596 + event_signature_in->count=1;
597 + (*rec_event)(event_signature_in, count);
602 * schedule() is the main scheduler function.
604 @@ -6034,6 +6066,54 @@ need_resched_nonpreemptible:
605 next = pick_next_task(rq);
607 if (likely(prev != next)) {
609 +#ifdef CONFIG_CHOPSTIX
610 + /* Run only if the Chopstix module so decrees it */
612 + unsigned long diff;
613 + int sampling_reason;
614 + prev->last_ran_j = jiffies;
615 + if (next->last_interrupted!=INTERRUPTIBLE) {
616 + if (next->last_interrupted!=RUNNING) {
617 + diff = (jiffies-next->last_interrupted);
618 + sampling_reason = 0;/* BLOCKING */
621 + diff = jiffies-next->last_ran_j;
622 + sampling_reason = 1;/* PREEMPTION */
625 + if (diff >= HZ/10) {
626 + struct event event;
627 + struct event_spec espec;
628 + struct pt_regs *regs;
629 + regs = task_pt_regs(current);
631 + espec.reason = sampling_reason;
632 + event.event_data=&espec;
635 + event.event_type=2;
636 + /* index in the event array currently set up */
637 + /* make sure the counters are loaded in the order we want them to show up*/
638 + (*rec_event)(&event, diff);
641 + /* next has been elected to run */
642 + next->last_interrupted=0;
644 + /* An uninterruptible process just yielded. Record the current jiffy */
645 + if (prev->state & TASK_UNINTERRUPTIBLE) {
646 + prev->last_interrupted=jiffies;
648 + /* An interruptible process just yielded, or it got preempted.
649 + * Mark it as interruptible */
650 + else if (prev->state & TASK_INTERRUPTIBLE) {
651 + prev->last_interrupted=INTERRUPTIBLE;
656 sched_info_switch(prev, next);
657 perf_event_task_sched_out(prev, next);
659 diff --git a/mm/memory.c b/mm/memory.c
660 index dbd2c19..7f3667d 100644
664 #include <linux/swapops.h>
665 #include <linux/elf.h>
666 // #include <linux/vs_memory.h>
667 +#include <linux/arrays.h>
670 #include <asm/pgalloc.h>
671 @@ -3168,6 +3169,16 @@ out:
675 +#ifdef CONFIG_CHOPSTIX
676 +extern void (*rec_event)(void *,unsigned int);
679 + unsigned long dcookie;
681 + unsigned char reason;
686 * By the time we get here, we already hold the mm semaphore
688 @@ -3213,6 +3224,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
692 +#ifdef CONFIG_CHOPSTIX
694 + struct event event;
695 + struct event_spec espec;
696 + struct pt_regs *regs;
698 + regs = task_pt_regs(current);
699 + pc = regs->ip & (unsigned int) ~4095;
701 + espec.reason = 0; /* alloc */
702 + event.event_data=&espec;
703 + event.task = current;
705 + event.event_type = 6;
706 + (*rec_event)(&event, 1);
710 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
713 diff --git a/mm/slab.c b/mm/slab.c
714 index c3ceb66..ad2f1a9 100644
718 #include <linux/fault-inject.h>
719 #include <linux/rtmutex.h>
720 #include <linux/reciprocal_div.h>
721 +#include <linux/arrays.h>
722 #include <linux/debugobjects.h>
723 #include <linux/kmemcheck.h>
724 #include <linux/memory.h>
725 @@ -253,6 +254,16 @@ struct slab_rcu {
729 +#ifdef CONFIG_CHOPSTIX
730 +extern void (*rec_event)(void *,unsigned int);
733 + unsigned long dcookie;
735 + unsigned char reason;
742 @@ -3497,6 +3508,19 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
743 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
746 +#ifdef CONFIG_CHOPSTIX
747 + if (rec_event && objp) {
748 + struct event event;
749 + struct event_spec espec;
751 + espec.reason = 0; /* alloc */
752 + event.event_data=&espec;
753 + event.task = current;
755 + event.event_type=4;
756 + (*rec_event)(&event, cachep->buffer_size);
761 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
762 @@ -3609,13 +3633,28 @@ free_done:
763 * Release an obj back to its cache. If the obj has a constructed state, it must
764 * be in this state _before_ it is released. Called with disabled ints.
766 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
767 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
769 struct array_cache *ac = cpu_cache_get(cachep);
772 kmemleak_free_recursive(objp, cachep->flags);
773 - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
774 + objp = cache_free_debugcheck(cachep, objp, caller);
776 +#ifdef CONFIG_CHOPSTIX
777 + if (rec_event && objp) {
778 + struct event event;
779 + struct event_spec espec;
781 + espec.reason = 1; /* free */
782 + event.event_data = &espec;
783 + event.task = current;
785 + event.event_type = 4;
786 + (*rec_event)(&event, cachep->buffer_size);
790 vx_slab_free(cachep);
792 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
793 @@ -3817,10 +3856,17 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
794 EXPORT_SYMBOL(__kmalloc_track_caller);
797 +#ifdef CONFIG_CHOPSTIX
798 +void *__kmalloc(size_t size, gfp_t flags)
800 + return __do_kmalloc(size, flags, __builtin_return_address(0));
803 void *__kmalloc(size_t size, gfp_t flags)
805 return __do_kmalloc(size, flags, NULL);
808 EXPORT_SYMBOL(__kmalloc);
811 @@ -3840,7 +3886,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
812 debug_check_no_locks_freed(objp, obj_size(cachep));
813 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
814 debug_check_no_obj_freed(objp, obj_size(cachep));
815 - __cache_free(cachep, objp);
816 + __cache_free(cachep, objp,__builtin_return_address(0));
817 local_irq_restore(flags);
819 trace_kmem_cache_free(_RET_IP_, objp);
820 @@ -3870,7 +3916,7 @@ void kfree(const void *objp)
821 c = virt_to_cache(objp);
822 debug_check_no_locks_freed(objp, obj_size(c));
823 debug_check_no_obj_freed(objp, obj_size(c));
824 - __cache_free(c, (void *)objp);
825 + __cache_free(c, (void *)objp,__builtin_return_address(0));
826 local_irq_restore(flags);
828 EXPORT_SYMBOL(kfree);