(no commit message)
[linux-2.6.git] / linux-2.6-590-chopstix-intern.patch
1 diff -Nurb linux-2.6.22-580/arch/i386/Kconfig linux-2.6.22-590/arch/i386/Kconfig
2 --- linux-2.6.22-580/arch/i386/Kconfig  2008-04-30 09:29:26.000000000 -0400
3 +++ linux-2.6.22-590/arch/i386/Kconfig  2008-04-30 09:29:41.000000000 -0400
4 @@ -1217,6 +1217,14 @@
5  
6  source "arch/i386/oprofile/Kconfig"
7  
8 +config CHOPSTIX
9 +       bool "Chopstix (PlanetLab)"
10 +       depends on MODULES && OPROFILE
11 +       help
12 +         Chopstix allows you to monitor various events by summarizing them
13 +         in lossy data structures and transferring these data structures
14 +         into user space. If in doubt, say "N".
15 +
16  config KPROBES
17         bool "Kprobes (EXPERIMENTAL)"
18         depends on KALLSYMS && EXPERIMENTAL && MODULES
19 diff -Nurb linux-2.6.22-580/block/ll_rw_blk.c linux-2.6.22-590/block/ll_rw_blk.c
20 --- linux-2.6.22-580/block/ll_rw_blk.c  2008-04-30 09:29:21.000000000 -0400
21 +++ linux-2.6.22-590/block/ll_rw_blk.c  2008-04-30 09:29:41.000000000 -0400
22 @@ -30,6 +30,7 @@
23  #include <linux/cpu.h>
24  #include <linux/blktrace_api.h>
25  #include <linux/fault-inject.h>
26 +#include <linux/arrays.h>
27  
28  /*
29   * for max sense size
30 @@ -3102,6 +3103,13 @@
31  
32  #endif /* CONFIG_FAIL_MAKE_REQUEST */
33  
34 +extern void (*rec_event)(void *,unsigned int);
35 +struct event_spec {
36 +       unsigned long pc;
37 +       unsigned long dcookie;
38 +       unsigned count;
39 +       unsigned char reason;
40 +};
41  /**
42   * generic_make_request: hand a buffer to its device driver for I/O
43   * @bio:  The bio describing the location in memory and on the device.
44 @@ -3220,7 +3228,23 @@
45                                 goto end_io;
46                         }
47                 }
48 -
49 +#ifdef CONFIG_CHOPSTIX
50 +               if (rec_event) {
51 +                       struct event event;
52 +                       struct event_spec espec;
53 +                       unsigned long eip;
54 +                       
55 +                       espec.reason = 0;/*request */
56 +
57 +                       eip = bio->bi_end_io;
58 +                       event.event_data=&espec;
59 +                       espec.pc=eip;
60 +                       event.event_type=3; 
61 +                       /* index in the event array currently set up */
62 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
63 +                       (*rec_event)(&event, bio->bi_size);
64 +               }
65 +#endif
66                 ret = q->make_request_fn(q, bio);
67         } while (ret);
68  }
69 diff -Nurb linux-2.6.22-580/drivers/oprofile/cpu_buffer.c linux-2.6.22-590/drivers/oprofile/cpu_buffer.c
70 --- linux-2.6.22-580/drivers/oprofile/cpu_buffer.c      2007-07-08 19:32:17.000000000 -0400
71 +++ linux-2.6.22-590/drivers/oprofile/cpu_buffer.c      2008-04-30 09:29:41.000000000 -0400
72 @@ -21,6 +21,7 @@
73  #include <linux/oprofile.h>
74  #include <linux/vmalloc.h>
75  #include <linux/errno.h>
76 +#include <linux/arrays.h>
77   
78  #include "event_buffer.h"
79  #include "cpu_buffer.h"
80 @@ -143,6 +144,17 @@
81                 b->head_pos = 0;
82  }
83  
84 +#ifdef CONFIG_CHOPSTIX
85 +
86 +struct event_spec {
87 +       unsigned int pc;
88 +       unsigned long dcookie;
89 +       unsigned count;
90 +};
91 +
92 +extern void (*rec_event)(void *,unsigned int);
93 +#endif
94 +
95  static inline void
96  add_sample(struct oprofile_cpu_buffer * cpu_buf,
97             unsigned long pc, unsigned long event)
98 @@ -151,6 +163,7 @@
99         entry->eip = pc;
100         entry->event = event;
101         increment_head(cpu_buf);
102 +
103  }
104  
105  static inline void
106 @@ -241,8 +254,28 @@
107  {
108         int is_kernel = !user_mode(regs);
109         unsigned long pc = profile_pc(regs);
110 +       int res=0;
111  
112 +#ifdef CONFIG_CHOPSTIX
113 +       if (rec_event) {
114 +               struct event esig;
115 +               struct event_spec espec;
116 +               esig.task = current;
117 +               espec.pc=pc;
118 +               espec.count=1;
119 +               esig.event_data=&espec;
120 +               esig.event_type=event; /* index in the event array currently set up */
121 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
122 +               (*rec_event)(&esig, 1);
123 +       }
124 +       else {
125         oprofile_add_ext_sample(pc, regs, event, is_kernel);
126 +       }
127 +#else
128 +       oprofile_add_ext_sample(pc, regs, event, is_kernel);
129 +#endif
130 +
131 +
132  }
133  
134  void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
135 diff -Nurb linux-2.6.22-580/fs/bio.c linux-2.6.22-590/fs/bio.c
136 --- linux-2.6.22-580/fs/bio.c   2007-07-08 19:32:17.000000000 -0400
137 +++ linux-2.6.22-590/fs/bio.c   2008-04-30 09:29:41.000000000 -0400
138 @@ -27,6 +27,7 @@
139  #include <linux/workqueue.h>
140  #include <linux/blktrace_api.h>
141  #include <scsi/sg.h>           /* for struct sg_iovec */
142 +#include <linux/arrays.h>
143  
144  #define BIO_POOL_SIZE 2
145  
146 @@ -47,6 +48,7 @@
147         struct kmem_cache *slab;
148  };
149  
150 +
151  /*
152   * if you change this list, also change bvec_alloc or things will
153   * break badly! cannot be bigger than what you can fit into an
154 @@ -999,6 +1001,14 @@
155         }
156  }
157  
158 +struct event_spec {
159 +       unsigned long pc;
160 +       unsigned long dcookie;
161 +       unsigned count;
162 +       unsigned char reason;
163 +};
164 +
165 +extern void (*rec_event)(void *,unsigned int);
166  /**
167   * bio_endio - end I/O on a bio
168   * @bio:       bio
169 @@ -1028,6 +1038,24 @@
170         bio->bi_size -= bytes_done;
171         bio->bi_sector += (bytes_done >> 9);
172  
173 +#ifdef CONFIG_CHOPSTIX
174 +               if (rec_event) {
175 +                       struct event event;
176 +                       struct event_spec espec;
177 +                       unsigned long eip;
178 +                       
179 +                       espec.reason = 1;/*response */
180 +
181 +                       eip = bio->bi_end_io;
182 +                       event.event_data=&espec;
183 +                       espec.pc=eip;
184 +                       event.event_type=3; 
185 +                       /* index in the event array currently set up */
186 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
187 +                       (*rec_event)(&event, bytes_done);
188 +               }
189 +#endif
190 +
191         if (bio->bi_end_io)
192                 bio->bi_end_io(bio, bytes_done, error);
193  }
194 diff -Nurb linux-2.6.22-580/fs/exec.c linux-2.6.22-590/fs/exec.c
195 --- linux-2.6.22-580/fs/exec.c  2008-04-30 09:29:26.000000000 -0400
196 +++ linux-2.6.22-590/fs/exec.c  2008-04-30 09:29:41.000000000 -0400
197 @@ -38,7 +38,7 @@
198  #include <linux/binfmts.h>
199  #include <linux/swap.h>
200  #include <linux/utsname.h>
201 -#include <linux/pid_namespace.h>
202 +/*#include <linux/pid_namespace.h>*/
203  #include <linux/module.h>
204  #include <linux/namei.h>
205  #include <linux/proc_fs.h>
206 @@ -52,6 +52,7 @@
207  #include <linux/audit.h>
208  #include <linux/signalfd.h>
209  #include <linux/vs_memory.h>
210 +#include <linux/dcookies.h>
211  
212  #include <asm/uaccess.h>
213  #include <asm/mmu_context.h>
214 @@ -488,6 +489,12 @@
215  
216         if (!err) {
217                 struct inode *inode = nd.dentry->d_inode;
218 +#ifdef CONFIG_CHOPSTIX
219 +               unsigned long cookie;
220 +               if (!nd.dentry->d_cookie)
221 +                       get_dcookie(nd.dentry, nd.mnt, &cookie);
222 +#endif
223 +
224                 file = ERR_PTR(-EACCES);
225                 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
226                     S_ISREG(inode->i_mode)) {
227 @@ -627,8 +634,10 @@
228          * Reparenting needs write_lock on tasklist_lock,
229          * so it is safe to do it under read_lock.
230          */
231 +       /*
232         if (unlikely(tsk->group_leader == child_reaper(tsk)))
233                 tsk->nsproxy->pid_ns->child_reaper = tsk;
234 +               */
235  
236         zap_other_threads(tsk);
237         read_unlock(&tasklist_lock);
238 diff -Nurb linux-2.6.22-580/include/linux/arrays.h linux-2.6.22-590/include/linux/arrays.h
239 --- linux-2.6.22-580/include/linux/arrays.h     1969-12-31 19:00:00.000000000 -0500
240 +++ linux-2.6.22-590/include/linux/arrays.h     2008-04-30 09:29:41.000000000 -0400
241 @@ -0,0 +1,36 @@
242 +#ifndef __ARRAYS_H__
243 +#define __ARRAYS_H__
244 +#include <linux/list.h>
245 +
246 +#define SAMPLING_METHOD_DEFAULT 0
247 +#define SAMPLING_METHOD_LOG 1
248 +
249 +/* Every probe has an array handler */
250 +
251 +/* XXX - Optimize this structure */
252 +
253 +extern void (*rec_event)(void *,unsigned int);
254 +struct array_handler {
255 +       struct list_head link;
256 +       unsigned int (*hash_func)(void *);
257 +       unsigned int (*sampling_func)(void *,int,void *);
258 +       unsigned short size;
259 +       unsigned int threshold;
260 +       unsigned char **expcount;
261 +       unsigned int sampling_method;
262 +       unsigned int **arrays;
263 +       unsigned int arraysize;
264 +       unsigned int num_samples[2];
265 +       void **epoch_samples; /* size-sized lists of samples */
266 +       unsigned int (*serialize)(void *, void *);
267 +       unsigned char code[5];
268 +};
269 +
270 +struct event {
271 +       struct list_head link;
272 +       void *event_data;
273 +       unsigned int count;
274 +       unsigned int event_type;
275 +       struct task_struct *task;
276 +};
277 +#endif
278 diff -Nurb linux-2.6.22-580/include/linux/mutex.h linux-2.6.22-590/include/linux/mutex.h
279 --- linux-2.6.22-580/include/linux/mutex.h      2007-07-08 19:32:17.000000000 -0400
280 +++ linux-2.6.22-590/include/linux/mutex.h      2008-04-30 09:45:43.000000000 -0400
281 @@ -53,6 +53,10 @@
282         struct thread_info      *owner;
283         const char              *name;
284         void                    *magic;
285 +#else
286 +#ifdef CONFIG_CHOPSTIX
287 +       struct thread_info      *owner;
288 +#endif
289  #endif
290  #ifdef CONFIG_DEBUG_LOCK_ALLOC
291         struct lockdep_map      dep_map;
292 diff -Nurb linux-2.6.22-580/include/linux/sched.h linux-2.6.22-590/include/linux/sched.h
293 --- linux-2.6.22-580/include/linux/sched.h      2008-04-30 09:29:26.000000000 -0400
294 +++ linux-2.6.22-590/include/linux/sched.h      2008-04-30 09:29:41.000000000 -0400
295 @@ -850,6 +850,10 @@
296  #endif
297         unsigned long sleep_avg;
298         unsigned long long timestamp, last_ran;
299 +#ifdef CONFIG_CHOPSTIX
300 +       unsigned long last_interrupted, last_ran_j;
301 +#endif
302 +
303         unsigned long long sched_time; /* sched_clock time spent running */
304         enum sleep_type sleep_type;
305  
306 diff -Nurb linux-2.6.22-580/kernel/mutex.c linux-2.6.22-590/kernel/mutex.c
307 --- linux-2.6.22-580/kernel/mutex.c     2007-07-08 19:32:17.000000000 -0400
308 +++ linux-2.6.22-590/kernel/mutex.c     2008-04-30 09:29:41.000000000 -0400
309 @@ -18,6 +18,16 @@
310  #include <linux/spinlock.h>
311  #include <linux/interrupt.h>
312  #include <linux/debug_locks.h>
313 +#include <linux/arrays.h>
314 +
315 +#ifdef CONFIG_CHOPSTIX
316 +struct event_spec {
317 +       unsigned long pc;
318 +       unsigned long dcookie;
319 +       unsigned count;
320 +       unsigned char reason;
321 +};
322 +#endif
323  
324  /*
325   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
326 @@ -43,6 +53,9 @@
327  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
328  {
329         atomic_set(&lock->count, 1);
330 +#ifdef CONFIG_CHOPSTIX
331 +       lock->owner=NULL;
332 +#endif
333         spin_lock_init(&lock->wait_lock);
334         INIT_LIST_HEAD(&lock->wait_list);
335  
336 @@ -88,6 +101,7 @@
337          * The locking fastpath is the 1->0 transition from
338          * 'unlocked' into 'locked' state.
339          */
340 +
341         __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
342  }
343  
344 @@ -168,6 +182,27 @@
345                 }
346                 __set_task_state(task, state);
347  
348 +#ifdef CONFIG_CHOPSTIX
349 +               if (rec_event) {
350 +                       if (lock->owner) {
351 +                               struct event event;
352 +                               struct event_spec espec;
353 +                               struct task_struct *p = lock->owner->task;
354 +                               /*spin_lock(&p->alloc_lock);*/
355 +                               espec.reason = 0; /* lock */
356 +                               event.event_data=&espec;
357 +                               event.task = p;
358 +                               espec.pc=lock;
359 +                               event.event_type=5; 
360 +                               (*rec_event)(&event, 1);
361 +                               /*spin_unlock(&p->alloc_lock);*/
362 +
363 +                       }
364 +                       else 
365 +                               BUG();
366 +               }
367 +#endif
368 +
369                 /* didnt get the lock, go to sleep: */
370                 spin_unlock_mutex(&lock->wait_lock, flags);
371                 schedule();
372 @@ -177,6 +212,9 @@
373         /* got the lock - rejoice! */
374         mutex_remove_waiter(lock, &waiter, task_thread_info(task));
375         debug_mutex_set_owner(lock, task_thread_info(task));
376 +#ifdef CONFIG_CHOPSTIX
377 +       lock->owner = task_thread_info(task);
378 +#endif
379  
380         /* set it to 0 if there are no waiters left: */
381         if (likely(list_empty(&lock->wait_list)))
382 @@ -202,6 +240,7 @@
383  mutex_lock_nested(struct mutex *lock, unsigned int subclass)
384  {
385         might_sleep();
386 +
387         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
388  }
389  
390 @@ -211,6 +250,7 @@
391  mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
392  {
393         might_sleep();
394 +
395         return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
396  }
397  
398 @@ -246,6 +286,23 @@
399  
400                 debug_mutex_wake_waiter(lock, waiter);
401  
402 +#ifdef CONFIG_CHOPSTIX
403 +               if (rec_event) {
404 +                       if (lock->owner) {
405 +                               struct event event;
406 +                               struct event_spec espec;
407 +
408 +                               espec.reason = 1; /* unlock */
409 +                               event.event_data=&espec;
410 +                               event.task = lock->owner->task;
411 +                               espec.pc=lock;
412 +                               event.event_type=5; 
413 +                               (*rec_event)(&event, 1);
414 +                       }
415 +                       else 
416 +                               BUG();
417 +               }
418 +#endif
419                 wake_up_process(waiter->task);
420         }
421  
422 diff -Nurb linux-2.6.22-580/kernel/sched.c linux-2.6.22-590/kernel/sched.c
423 --- linux-2.6.22-580/kernel/sched.c     2008-04-30 09:29:26.000000000 -0400
424 +++ linux-2.6.22-590/kernel/sched.c     2008-04-30 09:29:41.000000000 -0400
425 @@ -10,7 +10,7 @@
426   *  1998-11-19 Implemented schedule_timeout() and related stuff
427   *             by Andrea Arcangeli
428   *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
429 - *             hybrid priority-list and round-robin design with
430 + *             hybrid priority-list and round-robin deventn with
431   *             an array-switch method of distributing timeslices
432   *             and per-CPU runqueues.  Cleanups and useful suggestions
433   *             by Davide Libenzi, preemptible kernel bits by Robert Love.
434 @@ -56,6 +56,7 @@
435  
436  #include <asm/tlb.h>
437  #include <asm/unistd.h>
438 +#include <linux/arrays.h>
439  #include <linux/vs_sched.h>
440  #include <linux/vs_cvirt.h>
441  
442 @@ -431,6 +432,7 @@
443  
444  repeat_lock_task:
445         rq = task_rq(p);
446 +
447         spin_lock(&rq->lock);
448         if (unlikely(rq != task_rq(p))) {
449                 spin_unlock(&rq->lock);
450 @@ -1741,6 +1743,10 @@
451          * event cannot wake it up and insert it on the runqueue either.
452          */
453         p->state = TASK_RUNNING;
454 +#ifdef CONFIG_CHOPSTIX
455 +       p->last_interrupted=0;
456 +       p->last_ran_j=jiffies;
457 +#endif
458  
459         /*
460          * Make sure we do not leak PI boosting priority to the child:
461 @@ -3608,6 +3614,7 @@
462  
463  #endif
464  
465 +
466  static inline int interactive_sleep(enum sleep_type sleep_type)
467  {
468         return (sleep_type == SLEEP_INTERACTIVE ||
469 @@ -3617,16 +3624,28 @@
470  /*
471   * schedule() is the main scheduler function.
472   */
473 +
474 +#ifdef CONFIG_CHOPSTIX
475 +extern void (*rec_event)(void *,unsigned int);
476 +struct event_spec {
477 +       unsigned long pc;
478 +       unsigned long dcookie;
479 +       unsigned count;
480 +       unsigned char reason;
481 +};
482 +#endif
483 +
484  asmlinkage void __sched schedule(void)
485  {
486         struct task_struct *prev, *next;
487         struct prio_array *array;
488         struct list_head *queue;
489         unsigned long long now;
490 -       unsigned long run_time;
491 +       unsigned long run_time, diff;
492         int cpu, idx, new_prio;
493         long *switch_count;
494         struct rq *rq;
495 +       int sampling_reason;
496  
497         /*
498          * Test if we are atomic.  Since do_exit() needs to call into
499 @@ -3680,6 +3699,7 @@
500         switch_count = &prev->nivcsw;
501         if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
502                 switch_count = &prev->nvcsw;
503 +
504                 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
505                                 unlikely(signal_pending(prev))))
506                         prev->state = TASK_RUNNING;
507 @@ -3689,6 +3709,14 @@
508                                 vx_uninterruptible_inc(prev);
509                         }
510                         deactivate_task(prev, rq);
511 +#ifdef CONFIG_CHOPSTIX
512 +                       if (prev->state & TASK_UNINTERRUPTIBLE) {
513 +                               prev->last_interrupted=jiffies;
514 +                       }
515 +                       else if (prev->state & TASK_INTERRUPTIBLE) {
516 +                               prev->last_interrupted=-1;
517 +                       }
518 +#endif
519                 }
520         }
521  
522 @@ -3765,6 +3793,39 @@
523                 prev->sleep_avg = 0;
524         prev->timestamp = prev->last_ran = now;
525  
526 +#ifdef CONFIG_CHOPSTIX
527 +       /* Run only if the Chopstix module so decrees it */
528 +       if (rec_event) {
529 +               prev->last_ran_j = jiffies;
530 +               if (next->last_interrupted!=-1) {
531 +                       if (next->last_interrupted) {
532 +                               diff = (jiffies-next->last_interrupted);
533 +                               sampling_reason = 0;
534 +                       }
535 +                       else {
536 +                               diff = jiffies-next->last_ran_j;
537 +                               sampling_reason = 1;
538 +                       }
539 +
540 +                       if (diff > HZ/5) {
541 +                               struct event event;
542 +                               struct event_spec espec;
543 +                               unsigned long eip;
544 +
545 +                               espec.reason = sampling_reason;
546 +                               eip = next->thread.esp & 4095;
547 +                               event.event_data=&espec;
548 +                               event.task=next;
549 +                               espec.pc=eip;
550 +                               event.event_type=2; 
551 +                               /* index in the event array currently set up */
552 +                               /* make sure the counters are loaded in the order we want them to show up*/ 
553 +                               (*rec_event)(&event, diff);
554 +                       }
555 +               }
556 +               next->last_interrupted=0;
557 +       }
558 +#endif
559         sched_info_switch(prev, next);
560         if (likely(prev != next)) {
561                 next->timestamp = next->last_ran = now;
562 @@ -4664,6 +4725,7 @@
563         get_task_struct(p);
564         read_unlock(&tasklist_lock);
565  
566 +
567         retval = -EPERM;
568         if ((current->euid != p->euid) && (current->euid != p->uid) &&
569                         !capable(CAP_SYS_NICE))
570 @@ -5032,6 +5094,7 @@
571         jiffies_to_timespec(p->policy == SCHED_FIFO ?
572                                 0 : task_timeslice(p), &t);
573         read_unlock(&tasklist_lock);
574 +
575         retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
576  out_nounlock:
577         return retval;
578 @@ -7275,3 +7338,9 @@
579  }
580  
581  #endif
582 +
583 +#ifdef CONFIG_CHOPSTIX
584 +void (*rec_event)(void *,unsigned int);
585 +EXPORT_SYMBOL(rec_event);
586 +EXPORT_SYMBOL(in_sched_functions);
587 +#endif
588 diff -Nurb linux-2.6.22-580/mm/slab.c linux-2.6.22-590/mm/slab.c
589 --- linux-2.6.22-580/mm/slab.c  2008-04-30 09:29:26.000000000 -0400
590 +++ linux-2.6.22-590/mm/slab.c  2008-04-30 09:29:41.000000000 -0400
591 @@ -110,11 +110,13 @@
592  #include       <linux/fault-inject.h>
593  #include       <linux/rtmutex.h>
594  #include       <linux/reciprocal_div.h>
595 +#include <linux/arrays.h>
596  
597  #include       <asm/cacheflush.h>
598  #include       <asm/tlbflush.h>
599  #include       <asm/page.h>
600  
601 +
602  /*
603   * DEBUG       - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
604   *               0 for faster, smaller code (especially in the critical paths).
605 @@ -249,6 +251,14 @@
606         void *addr;
607  };
608  
609 +extern void (*rec_event)(void *,unsigned int);
610 +struct event_spec {
611 +       unsigned long pc;
612 +       unsigned long dcookie; 
613 +       unsigned count;
614 +       unsigned char reason;
615 +};
616 +
617  /*
618   * struct array_cache
619   *
620 @@ -3443,6 +3453,19 @@
621         local_irq_restore(save_flags);
622         objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
623         prefetchw(objp);
624 +#ifdef CONFIG_CHOPSTIX
625 +       if (rec_event && objp) {
626 +               struct event event;
627 +               struct event_spec espec;
628 +
629 +               espec.reason = 0; /* alloc */
630 +               event.event_data=&espec;
631 +               event.task = current;
632 +               espec.pc=caller;
633 +               event.event_type=4; 
634 +               (*rec_event)(&event, cachep->buffer_size);
635 +       }
636 +#endif
637  
638         return objp;
639  }
640 @@ -3549,13 +3572,26 @@
641   * Release an obj back to its cache. If the obj has a constructed state, it must
642   * be in this state _before_ it is released.  Called with disabled ints.
643   */
644 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
645 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
646  {
647         struct array_cache *ac = cpu_cache_get(cachep);
648  
649         check_irq_off();
650 -       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
651 +       objp = cache_free_debugcheck(cachep, objp, caller);
652         vx_slab_free(cachep);
653 +#ifdef CONFIG_CHOPSTIX
654 +       if (rec_event && objp) {
655 +               struct event event;
656 +               struct event_spec espec;
657 +
658 +               espec.reason = 1; /* free */
659 +               event.event_data=&espec;
660 +               event.task = current;
661 +               espec.pc=caller;
662 +               event.event_type=4; 
663 +               (*rec_event)(&event, cachep->buffer_size);
664 +       }
665 +#endif
666  
667         if (cache_free_alien(cachep, objp))
668                 return;
669 @@ -3651,16 +3687,19 @@
670                         __builtin_return_address(0));
671  }
672  EXPORT_SYMBOL(kmem_cache_alloc_node);
673 -
674  static __always_inline void *
675  __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
676  {
677         struct kmem_cache *cachep;
678 +       void *ret;
679 +
680  
681         cachep = kmem_find_general_cachep(size, flags);
682         if (unlikely(cachep == NULL))
683                 return NULL;
684 -       return kmem_cache_alloc_node(cachep, flags, node);
685 +       ret = kmem_cache_alloc_node(cachep, flags, node);
686 +       
687 +       return ret;
688  }
689  
690  #ifdef CONFIG_DEBUG_SLAB
691 @@ -3696,6 +3735,7 @@
692                                           void *caller)
693  {
694         struct kmem_cache *cachep;
695 +       void *ret;
696  
697         /* If you want to save a few bytes .text space: replace
698          * __ with kmem_.
699 @@ -3705,9 +3745,10 @@
700         cachep = __find_general_cachep(size, flags);
701         if (unlikely(cachep == NULL))
702                 return NULL;
703 -       return __cache_alloc(cachep, flags, caller);
704 -}
705 +       ret = __cache_alloc(cachep, flags, caller);
706  
707 +       return ret;
708 +}
709  
710  #ifdef CONFIG_DEBUG_SLAB
711  void *__kmalloc(size_t size, gfp_t flags)
712 @@ -3723,10 +3764,17 @@
713  EXPORT_SYMBOL(__kmalloc_track_caller);
714  
715  #else
716 +#ifdef CONFIG_CHOPSTIX
717 +void *__kmalloc(size_t size, gfp_t flags)
718 +{
719 +       return __do_kmalloc(size, flags, __builtin_return_address(0));
720 +}
721 +#else
722  void *__kmalloc(size_t size, gfp_t flags)
723  {
724         return __do_kmalloc(size, flags, NULL);
725  }
726 +#endif
727  EXPORT_SYMBOL(__kmalloc);
728  #endif
729  
730 @@ -3792,7 +3840,7 @@
731  
732         local_irq_save(flags);
733         debug_check_no_locks_freed(objp, obj_size(cachep));
734 -       __cache_free(cachep, objp);
735 +       __cache_free(cachep, objp,__builtin_return_address(0));
736         local_irq_restore(flags);
737  }
738  EXPORT_SYMBOL(kmem_cache_free);
739 @@ -3817,7 +3865,7 @@
740         kfree_debugcheck(objp);
741         c = virt_to_cache(objp);
742         debug_check_no_locks_freed(objp, obj_size(c));
743 -       __cache_free(c, (void *)objp);
744 +       __cache_free(c, (void *)objp,__builtin_return_address(0));
745         local_irq_restore(flags);
746  }
747  EXPORT_SYMBOL(kfree);