1 From 6f68de5f723e57e2709b468f55914fd0f963ce90 Mon Sep 17 00:00:00 2001
2 From: S.Çağlar Onur <caglar@cs.princeton.edu>
3 Date: Tue, 7 Dec 2010 11:09:43 -0500
4 Subject: [PATCH] linux-2.6-591-chopstix-intern.patch
8 arch/x86/kernel/asm-offsets_32.c | 25 +++++++++++
9 arch/x86/kernel/entry_32.S | 28 +++++++++++++
10 arch/x86/mm/fault.c | 10 +++++
11 block/blk-core.c | 29 +++++++++++++
12 drivers/oprofile/cpu_buffer.c | 30 ++++++++++++++
13 fs/bio.c | 31 ++++++++++++++
15 include/linux/arrays.h | 39 ++++++++++++++++++
16 include/linux/mutex.h | 2 +-
17 include/linux/sched.h | 5 ++
18 kernel/mutex.c | 55 +++++++++++++++++++++++++
19 kernel/sched.c | 82 +++++++++++++++++++++++++++++++++++++-
20 mm/memory.c | 29 +++++++++++++
21 mm/slab.c | 54 +++++++++++++++++++++++--
22 15 files changed, 429 insertions(+), 6 deletions(-)
23 create mode 100644 include/linux/arrays.h
25 diff --git a/arch/Kconfig b/arch/Kconfig
26 index b15fd1c..16a5734 100644
29 @@ -41,6 +41,14 @@ config OPROFILE_EVENT_MULTIPLEX
34 + bool "Chopstix (PlanetLab)"
35 + depends on MODULES && OPROFILE
37 + Chopstix allows you to monitor various events by summarizing them
38 + in lossy data structures and transferring these data structures
39 + into user space. If in doubt, say "N".
44 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
45 index dfdbf64..29c79b8 100644
46 --- a/arch/x86/kernel/asm-offsets_32.c
47 +++ b/arch/x86/kernel/asm-offsets_32.c
49 #include <linux/signal.h>
50 #include <linux/personality.h>
51 #include <linux/suspend.h>
52 +#include <linux/arrays.h>
53 #include <linux/kbuild.h>
54 #include <asm/ucontext.h>
55 #include <asm/sigframe.h>
57 #include <linux/lguest.h>
58 #include "../../../drivers/lguest/lg.h"
60 +#ifdef CONFIG_CHOPSTIX
61 +#define STACKOFFSET(sym, str, mem) \
62 + DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
66 + unsigned long dcookie;
68 + unsigned int number;
72 /* workaround for a warning with -Wmissing-prototypes */
75 @@ -51,6 +64,18 @@ void foo(void)
76 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
79 +#ifdef CONFIG_CHOPSTIX
80 + STACKOFFSET(TASK_thread, task_struct, thread);
81 + STACKOFFSET(THREAD_esp, thread_struct, sp);
82 + STACKOFFSET(EVENT_event_data, event, event_data);
83 + STACKOFFSET(EVENT_task, event, task);
84 + STACKOFFSET(EVENT_event_type, event, event_type);
85 + STACKOFFSET(SPEC_number, event_spec, number);
86 + DEFINE(EVENT_SIZE, sizeof(struct event));
87 + DEFINE(SPEC_SIZE, sizeof(struct event_spec));
88 + DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
91 OFFSET(TI_task, thread_info, task);
92 OFFSET(TI_exec_domain, thread_info, exec_domain);
93 OFFSET(TI_flags, thread_info, flags);
94 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
95 index 21feb03..1109aac 100644
96 --- a/arch/x86/kernel/entry_32.S
97 +++ b/arch/x86/kernel/entry_32.S
98 @@ -526,6 +526,34 @@ ENTRY(system_call)
99 cmpl $(nr_syscalls), %eax
102 +#ifdef CONFIG_CHOPSTIX
103 + /* Move Chopstix syscall probe here */
104 + /* Save and clobber: eax, ecx, ebp */
109 + subl $SPEC_EVENT_SIZE, %esp
110 + movl rec_event, %ecx
113 + # struct event is first, just below %ebp
114 + movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
115 + leal -SPEC_EVENT_SIZE(%ebp), %eax
116 + movl %eax, EVENT_event_data(%ebp)
117 + movl $7, EVENT_event_type(%ebp)
118 + movl rec_event, %edx
120 + leal -EVENT_SIZE(%ebp), %eax
124 + addl $SPEC_EVENT_SIZE, %esp
130 call *sys_call_table(,%eax,4)
131 movl %eax,PT_EAX(%esp) # store the return value
133 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
134 index a948561..76d32a6 100644
135 --- a/arch/x86/mm/fault.c
136 +++ b/arch/x86/mm/fault.c
137 @@ -62,6 +62,16 @@ static inline int notify_page_fault(struct pt_regs *regs)
141 +#ifdef CONFIG_CHOPSTIX
142 +extern void (*rec_event)(void *,unsigned int);
145 + unsigned long dcookie;
147 + unsigned char reason;
154 diff --git a/block/blk-core.c b/block/blk-core.c
155 index 5e1b914..2260822 100644
156 --- a/block/blk-core.c
157 +++ b/block/blk-core.c
159 #include <linux/writeback.h>
160 #include <linux/task_io_accounting_ops.h>
161 #include <linux/fault-inject.h>
162 +#include <linux/arrays.h>
164 #define CREATE_TRACE_POINTS
165 #include <trace/events/block.h>
169 +#ifdef CONFIG_CHOPSTIX
170 +extern void (*rec_event)(void *,unsigned int);
173 + unsigned long dcookie;
175 + unsigned char reason;
179 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
180 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
181 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
182 @@ -1535,6 +1546,24 @@ static inline void __generic_make_request(struct bio *bio)
184 trace_block_bio_queue(q, bio);
186 +#ifdef CONFIG_CHOPSTIX
188 + struct event event;
189 + struct event_spec espec;
192 + espec.reason = 0;/*request */
194 + eip = bio->bi_end_io;
195 + event.event_data=&espec;
197 + event.event_type=3;
198 + /* index in the event array currently set up */
199 + /* make sure the counters are loaded in the order we want them to show up*/
200 + (*rec_event)(&event, bio->bi_size);
204 ret = q->make_request_fn(q, bio);
207 diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
208 index de82183..1f1acf6 100644
209 --- a/drivers/oprofile/cpu_buffer.c
210 +++ b/drivers/oprofile/cpu_buffer.c
212 #include <linux/sched.h>
213 #include <linux/oprofile.h>
214 #include <linux/errno.h>
215 +#include <linux/arrays.h>
217 #include "event_buffer.h"
218 #include "cpu_buffer.h"
219 @@ -288,6 +289,17 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
220 cpu_buf->tracing = 0;
223 +#ifdef CONFIG_CHOPSTIX
227 + unsigned long dcookie;
231 +extern void (*rec_event)(void *,unsigned int);
235 __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
236 unsigned long event, int is_kernel)
237 @@ -322,7 +334,25 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
238 int is_kernel = !user_mode(regs);
239 unsigned long pc = profile_pc(regs);
241 +#ifdef CONFIG_CHOPSTIX
244 + struct event_spec espec;
245 + esig.task = current;
248 + esig.event_data = &espec;
249 + esig.event_type = event; /* index in the event array currently set up */
250 + /* make sure the counters are loaded in the order we want them to show up*/
251 + (*rec_event)(&esig, 1);
254 + __oprofile_add_ext_sample(pc, regs, event, is_kernel);
257 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
263 diff --git a/fs/bio.c b/fs/bio.c
264 index e10d5b1..db37c70 100644
268 #include <linux/mempool.h>
269 #include <linux/workqueue.h>
270 #include <scsi/sg.h> /* for struct sg_iovec */
271 +#include <linux/arrays.h>
273 #include <trace/events/block.h>
275 @@ -48,6 +49,7 @@ struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
281 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
282 * IO code that does not need private memory pools.
283 @@ -1395,6 +1397,17 @@ void bio_check_pages_dirty(struct bio *bio)
287 +#ifdef CONFIG_CHOPSTIX
290 + unsigned long dcookie;
292 + unsigned char reason;
295 +extern void (*rec_event)(void *,unsigned int);
299 * bio_endio - end I/O on a bio
301 @@ -1416,6 +1429,24 @@ void bio_endio(struct bio *bio, int error)
302 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
305 +#ifdef CONFIG_CHOPSTIX
307 + struct event event;
308 + struct event_spec espec;
311 + espec.reason = 1;/*response */
313 + eip = bio->bi_end_io;
314 + event.event_data=&espec;
316 + event.event_type=3;
317 + /* index in the event array currently set up */
318 + /* make sure the counters are loaded in the order we want them to show up*/
319 + (*rec_event)(&event, bio->bi_size);
324 bio->bi_end_io(bio, error);
326 diff --git a/fs/exec.c b/fs/exec.c
327 index f42d519..5ac9745 100644
331 #include <linux/fdtable.h>
332 #include <linux/mm.h>
333 #include <linux/stat.h>
334 +#include <linux/dcookies.h>
335 #include <linux/fcntl.h>
336 #include <linux/smp_lock.h>
337 #include <linux/swap.h>
338 @@ -693,6 +694,13 @@ struct file *open_exec(const char *name)
342 +#ifdef CONFIG_CHOPSTIX
343 + unsigned long cookie;
344 + extern void (*rec_event)(void *, unsigned int);
345 + if (rec_event && !(file->f_path.dentry->d_flags & DCACHE_COOKIE))
346 + get_dcookie(&file->f_path, &cookie);
352 diff --git a/include/linux/arrays.h b/include/linux/arrays.h
354 index 0000000..7641a3c
356 +++ b/include/linux/arrays.h
358 +#ifndef __ARRAYS_H__
359 +#define __ARRAYS_H__
360 +#include <linux/list.h>
362 +#define SAMPLING_METHOD_DEFAULT 0
363 +#define SAMPLING_METHOD_LOG 1
365 +#define DEFAULT_ARRAY_SIZE 2048
367 +/* Every probe has an array handler */
369 +/* XXX - Optimize this structure */
371 +extern void (*rec_event)(void *,unsigned int);
372 +struct array_handler {
373 + struct list_head link;
374 + unsigned int (*hash_func)(void *);
375 + unsigned int (*sampling_func)(void *,int,void *);
376 + unsigned short size;
377 + unsigned int threshold;
378 + unsigned char **expcount;
379 + unsigned int sampling_method;
380 + unsigned int **arrays;
381 + unsigned int arraysize;
382 + unsigned int num_samples[2];
383 + void **epoch_samples; /* size-sized lists of samples */
384 + unsigned int (*serialize)(void *, void *);
385 + unsigned char code[5];
386 + unsigned int last_threshold;
390 + struct list_head link;
392 + unsigned int count;
393 + unsigned int event_type;
394 + struct task_struct *task;
397 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
398 index 878cab4..6c21914 100644
399 --- a/include/linux/mutex.h
400 +++ b/include/linux/mutex.h
401 @@ -50,7 +50,7 @@ struct mutex {
403 spinlock_t wait_lock;
404 struct list_head wait_list;
405 -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
406 +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) || defined(CONFIG_CHOPSTIX)
407 struct thread_info *owner;
409 #ifdef CONFIG_DEBUG_MUTEXES
410 diff --git a/include/linux/sched.h b/include/linux/sched.h
411 index caf30e1..6d60b0f 100644
412 --- a/include/linux/sched.h
413 +++ b/include/linux/sched.h
414 @@ -1351,6 +1351,11 @@ struct task_struct {
415 cputime_t utime, stime, utimescaled, stimescaled;
417 cputime_t prev_utime, prev_stime;
419 + #ifdef CONFIG_CHOPSTIX
420 + unsigned long last_interrupted, last_ran_j;
423 unsigned long nvcsw, nivcsw; /* context switch counts */
424 struct timespec start_time; /* monotonic time */
425 struct timespec real_start_time; /* boot based time */
426 diff --git a/kernel/mutex.c b/kernel/mutex.c
427 index e04aa45..196ac04 100644
431 #include <linux/spinlock.h>
432 #include <linux/interrupt.h>
433 #include <linux/debug_locks.h>
434 +#include <linux/arrays.h>
436 +#ifdef CONFIG_CHOPSTIX
439 + unsigned long dcookie;
441 + unsigned char reason;
446 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
447 @@ -49,6 +59,9 @@ void
448 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
450 atomic_set(&lock->count, 1);
451 +#ifdef CONFIG_CHOPSTIX
452 + lock->owner = NULL;
454 spin_lock_init(&lock->wait_lock);
455 INIT_LIST_HEAD(&lock->wait_list);
456 mutex_clear_owner(lock);
457 @@ -254,6 +267,25 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
459 __set_task_state(task, state);
461 +#if 0 && CONFIG_CHOPSTIX
464 + struct event event;
465 + struct event_spec espec;
466 + struct task_struct *p = lock->owner->task;
468 + espec.reason = 0; /* lock */
469 + event.event_data = &espec;
472 + event.event_type = 5;
473 + (*rec_event)(&event, 1);
480 /* didnt get the lock, go to sleep: */
481 spin_unlock_mutex(&lock->wait_lock, flags);
482 preempt_enable_no_resched();
483 @@ -268,6 +300,10 @@ done:
484 mutex_remove_waiter(lock, &waiter, current_thread_info());
485 mutex_set_owner(lock);
487 +#ifdef CONFIG_CHOPSTIX
488 + lock->owner = task_thread_info(task);
491 /* set it to 0 if there are no waiters left: */
492 if (likely(list_empty(&lock->wait_list)))
493 atomic_set(&lock->count, 0);
494 @@ -338,6 +374,25 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
496 debug_mutex_wake_waiter(lock, waiter);
498 +#if 0 && CONFIG_CHOPSTIX
501 + struct event event;
502 + struct event_spec espec;
503 + struct task_struct *p = lock->owner->task;
505 + espec.reason = 1; /* unlock */
506 + event.event_data = &espec;
509 + event.event_type = 5;
510 + (*rec_event)(&event, 1);
517 wake_up_process(waiter->task);
520 diff --git a/kernel/sched.c b/kernel/sched.c
521 index dd8a4df..345645b 100644
525 * 1998-11-19 Implemented schedule_timeout() and related stuff
526 * by Andrea Arcangeli
527 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
528 - * hybrid priority-list and round-robin design with
529 + * hybrid priority-list and round-robin deventn with
530 * an array-switch method of distributing timeslices
531 * and per-CPU runqueues. Cleanups and useful suggestions
532 * by Davide Libenzi, preemptible kernel bits by Robert Love.
534 #include <linux/ftrace.h>
535 #include <linux/vs_sched.h>
536 #include <linux/vs_cvirt.h>
537 +#include <linux/arrays.h>
540 #include <asm/irq_regs.h>
542 #include "sched_cpupri.h"
544 +#define INTERRUPTIBLE -1
547 #define CREATE_TRACE_POINTS
548 #include <trace/events/sched.h>
550 @@ -2719,6 +2723,10 @@ static void __sched_fork(struct task_struct *p)
551 INIT_HLIST_HEAD(&p->preempt_notifiers);
554 +#ifdef CONFIG_CHOPSTIX
555 + p->last_ran_j = jiffies;
556 + p->last_interrupted = INTERRUPTIBLE;
559 * We mark the process as running here, but have not actually
560 * inserted it onto the runqueue yet. This guarantees that
561 @@ -5764,6 +5772,30 @@ pick_next_task(struct rq *rq)
565 +#ifdef CONFIG_CHOPSTIX
566 +void (*rec_event)(void *,unsigned int) = NULL;
567 +EXPORT_SYMBOL(rec_event);
568 +EXPORT_SYMBOL(in_sched_functions);
572 + unsigned long dcookie;
573 + unsigned int count;
574 + unsigned int reason;
577 +/* To support safe calling from asm */
578 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
579 + struct pt_regs *regs;
580 + struct event_spec *es = event_signature_in->event_data;
581 + regs = task_pt_regs(current);
582 + event_signature_in->task=current;
584 + event_signature_in->count=1;
585 + (*rec_event)(event_signature_in, count);
590 * schedule() is the main scheduler function.
592 @@ -5811,6 +5843,54 @@ need_resched_nonpreemptible:
593 next = pick_next_task(rq);
595 if (likely(prev != next)) {
597 +#ifdef CONFIG_CHOPSTIX
598 + /* Run only if the Chopstix module so decrees it */
600 + unsigned long diff;
601 + int sampling_reason;
602 + prev->last_ran_j = jiffies;
603 + if (next->last_interrupted!=INTERRUPTIBLE) {
604 + if (next->last_interrupted!=RUNNING) {
605 + diff = (jiffies-next->last_interrupted);
606 + sampling_reason = 0;/* BLOCKING */
609 + diff = jiffies-next->last_ran_j;
610 + sampling_reason = 1;/* PREEMPTION */
613 + if (diff >= HZ/10) {
614 + struct event event;
615 + struct event_spec espec;
616 + struct pt_regs *regs;
617 + regs = task_pt_regs(current);
619 + espec.reason = sampling_reason;
620 + event.event_data=&espec;
623 + event.event_type=2;
624 + /* index in the event array currently set up */
625 + /* make sure the counters are loaded in the order we want them to show up*/
626 + (*rec_event)(&event, diff);
629 + /* next has been elected to run */
630 + next->last_interrupted=0;
632 + /* An uninterruptible process just yielded. Record the current jiffy */
633 + if (prev->state & TASK_UNINTERRUPTIBLE) {
634 + prev->last_interrupted=jiffies;
636 + /* An interruptible process just yielded, or it got preempted.
637 + * Mark it as interruptible */
638 + else if (prev->state & TASK_INTERRUPTIBLE) {
639 + prev->last_interrupted=INTERRUPTIBLE;
644 sched_info_switch(prev, next);
645 perf_event_task_sched_out(prev, next);
647 diff --git a/mm/memory.c b/mm/memory.c
648 index 30858a5..b9a9d9f 100644
652 #include <linux/swapops.h>
653 #include <linux/elf.h>
654 // #include <linux/vs_memory.h>
655 +#include <linux/arrays.h>
658 #include <asm/pgalloc.h>
659 @@ -3152,6 +3153,16 @@ out:
663 +#ifdef CONFIG_CHOPSTIX
664 +extern void (*rec_event)(void *,unsigned int);
667 + unsigned long dcookie;
669 + unsigned char reason;
674 * By the time we get here, we already hold the mm semaphore
676 @@ -3197,6 +3208,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
680 +#ifdef CONFIG_CHOPSTIX
682 + struct event event;
683 + struct event_spec espec;
684 + struct pt_regs *regs;
686 + regs = task_pt_regs(current);
687 + pc = regs->ip & (unsigned int) ~4095;
689 + espec.reason = 0; /* alloc */
690 + event.event_data=&espec;
691 + event.task = current;
693 + event.event_type = 6;
694 + (*rec_event)(&event, 1);
698 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
701 diff --git a/mm/slab.c b/mm/slab.c
702 index f644e70..6a5489c 100644
706 #include <linux/fault-inject.h>
707 #include <linux/rtmutex.h>
708 #include <linux/reciprocal_div.h>
709 +#include <linux/arrays.h>
710 #include <linux/debugobjects.h>
711 #include <linux/kmemcheck.h>
712 #include <linux/memory.h>
713 @@ -253,6 +254,16 @@ struct slab_rcu {
717 +#ifdef CONFIG_CHOPSTIX
718 +extern void (*rec_event)(void *,unsigned int);
721 + unsigned long dcookie;
723 + unsigned char reason;
730 @@ -3491,6 +3502,19 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
731 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
734 +#ifdef CONFIG_CHOPSTIX
735 + if (rec_event && objp) {
736 + struct event event;
737 + struct event_spec espec;
739 + espec.reason = 0; /* alloc */
740 + event.event_data=&espec;
741 + event.task = current;
743 + event.event_type=4;
744 + (*rec_event)(&event, cachep->buffer_size);
749 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
750 @@ -3603,13 +3627,28 @@ free_done:
751 * Release an obj back to its cache. If the obj has a constructed state, it must
752 * be in this state _before_ it is released. Called with disabled ints.
754 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
755 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
757 struct array_cache *ac = cpu_cache_get(cachep);
760 kmemleak_free_recursive(objp, cachep->flags);
761 - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
762 + objp = cache_free_debugcheck(cachep, objp, caller);
764 +#ifdef CONFIG_CHOPSTIX
765 + if (rec_event && objp) {
766 + struct event event;
767 + struct event_spec espec;
769 + espec.reason = 1; /* free */
770 + event.event_data = &espec;
771 + event.task = current;
773 + event.event_type = 4;
774 + (*rec_event)(&event, cachep->buffer_size);
778 vx_slab_free(cachep);
780 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
781 @@ -3811,10 +3850,17 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
782 EXPORT_SYMBOL(__kmalloc_track_caller);
785 +#ifdef CONFIG_CHOPSTIX
786 +void *__kmalloc(size_t size, gfp_t flags)
788 + return __do_kmalloc(size, flags, __builtin_return_address(0));
791 void *__kmalloc(size_t size, gfp_t flags)
793 return __do_kmalloc(size, flags, NULL);
796 EXPORT_SYMBOL(__kmalloc);
799 @@ -3834,7 +3880,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
800 debug_check_no_locks_freed(objp, obj_size(cachep));
801 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
802 debug_check_no_obj_freed(objp, obj_size(cachep));
803 - __cache_free(cachep, objp);
804 + __cache_free(cachep, objp,__builtin_return_address(0));
805 local_irq_restore(flags);
807 trace_kmem_cache_free(_RET_IP_, objp);
808 @@ -3864,7 +3910,7 @@ void kfree(const void *objp)
809 c = virt_to_cache(objp);
810 debug_check_no_locks_freed(objp, obj_size(c));
811 debug_check_no_obj_freed(objp, obj_size(c));
812 - __cache_free(c, (void *)objp);
813 + __cache_free(c, (void *)objp,__builtin_return_address(0));
814 local_irq_restore(flags);
816 EXPORT_SYMBOL(kfree);