Something that was bothering gcc and dhozac
[linux-2.6.git] / linux-2.6-590-chopstix-intern.patch
1 diff -Nurb linux-2.6.22-580/drivers/oprofile/cpu_buffer.c linux-2.6.22-590/drivers/oprofile/cpu_buffer.c
2 --- linux-2.6.22-580/drivers/oprofile/cpu_buffer.c      2007-07-08 19:32:17.000000000 -0400
3 +++ linux-2.6.22-590/drivers/oprofile/cpu_buffer.c      2008-02-27 13:53:47.000000000 -0500
4 @@ -21,6 +21,7 @@
5  #include <linux/oprofile.h>
6  #include <linux/vmalloc.h>
7  #include <linux/errno.h>
8 +#include <linux/arrays.h>
9   
10  #include "event_buffer.h"
11  #include "cpu_buffer.h"
12 @@ -143,6 +144,14 @@
13                 b->head_pos = 0;
14  }
15  
16 +struct event_spec {
17 +       unsigned int pc;
18 +       unsigned long dcookie;
19 +       unsigned count;
20 +};
21 +
22 +extern void (*rec_event)(void *,unsigned int);
23 +
24  static inline void
25  add_sample(struct oprofile_cpu_buffer * cpu_buf,
26             unsigned long pc, unsigned long event)
27 @@ -151,6 +160,7 @@
28         entry->eip = pc;
29         entry->event = event;
30         increment_head(cpu_buf);
31 +
32  }
33  
34  static inline void
35 @@ -237,12 +247,66 @@
36         oprofile_end_trace(cpu_buf);
37  }
38  
39 +static int proc_pid_cmdline(struct task_struct *task, char * buffer)
40 +{
41 +       int res = 0;
42 +       unsigned int len;
43 +       struct mm_struct *mm = get_task_mm(task);
44 +       if (!mm)
45 +               goto out;
46 +       if (!mm->arg_end)
47 +               goto out_mm;    /* Shh! No looking before we're done */
48 +
49 +       len = mm->arg_end - mm->arg_start;
50
51 +       if (len > PAGE_SIZE)
52 +               len = PAGE_SIZE;
53
54 +       res = access_process_vm(task, mm->arg_start, buffer, len, 0);
55 +
56 +       // If the nul at the end of args has been overwritten, then
57 +       // assume application is using setproctitle(3).
58 +       if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
59 +               len = strnlen(buffer, res);
60 +               if (len < res) {
61 +                   res = len;
62 +               } else {
63 +                       len = mm->env_end - mm->env_start;
64 +                       if (len > PAGE_SIZE - res)
65 +                               len = PAGE_SIZE - res;
66 +                       res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
67 +                       res = strnlen(buffer, res);
68 +               }
69 +       }
70 +out_mm:
71 +       mmput(mm);
72 +out:
73 +       return res;
74 +}
75 +
76 +
77 +
78  void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
79  {
80         int is_kernel = !user_mode(regs);
81         unsigned long pc = profile_pc(regs);
82 +       int res=0;
83  
84 +       if (rec_event) {
85 +               struct event esig;
86 +               struct event_spec espec;
87 +               /*res = proc_pid_cmdline(current, espec->appname);*/
88 +               esig.task = current;
89 +               espec.pc=pc;
90 +               espec.count=1;
91 +               esig.event_data=&espec;
92 +               esig.event_type=event; /* index in the event array currently set up */
93 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
94 +               (*rec_event)(&esig, 1);
95 +       }
96 +       else {
97         oprofile_add_ext_sample(pc, regs, event, is_kernel);
98 +       }
99  }
100  
101  void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
102 diff -Nurb linux-2.6.22-580/fs/exec.c linux-2.6.22-590/fs/exec.c
103 --- linux-2.6.22-580/fs/exec.c  2008-02-27 13:46:38.000000000 -0500
104 +++ linux-2.6.22-590/fs/exec.c  2008-02-27 13:49:58.000000000 -0500
105 @@ -52,6 +52,7 @@
106  #include <linux/audit.h>
107  #include <linux/signalfd.h>
108  #include <linux/vs_memory.h>
109 +#include <linux/dcookies.h>
110  
111  #include <asm/uaccess.h>
112  #include <asm/mmu_context.h>
113 @@ -488,6 +489,10 @@
114  
115         if (!err) {
116                 struct inode *inode = nd.dentry->d_inode;
117 +               unsigned long cookie;
118 +               if (!nd.dentry->d_cookie)
119 +                       get_dcookie(nd.dentry, nd.mnt, &cookie);
120 +
121                 file = ERR_PTR(-EACCES);
122                 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
123                     S_ISREG(inode->i_mode)) {
124 diff -Nurb linux-2.6.22-580/include/linux/arrays.h linux-2.6.22-590/include/linux/arrays.h
125 --- linux-2.6.22-580/include/linux/arrays.h     1969-12-31 19:00:00.000000000 -0500
126 +++ linux-2.6.22-590/include/linux/arrays.h     2008-02-27 13:48:29.000000000 -0500
127 @@ -0,0 +1,35 @@
128 +#ifndef __ARRAYS_H__
129 +#define __ARRAYS_H__
130 +#include <linux/list.h>
131 +
132 +#define SAMPLING_METHOD_DEFAULT 0
133 +#define SAMPLING_METHOD_LOG 1
134 +
135 +/* Every probe has an array handler */
136 +
137 +/* XXX - Optimize this structure */
138 +
139 +struct array_handler {
140 +       struct list_head link;
141 +       unsigned int (*hash_func)(void *);
142 +       unsigned int (*sampling_func)(void *,int,void *);
143 +       unsigned short size;
144 +       unsigned int threshold;
145 +       unsigned char **expcount;
146 +       unsigned int sampling_method;
147 +       unsigned int **arrays;
148 +       unsigned int arraysize;
149 +       unsigned int num_samples[2];
150 +       void **epoch_samples; /* size-sized lists of samples */
151 +       unsigned int (*serialize)(void *, void *);
152 +       unsigned char code[5];
153 +};
154 +
155 +struct event {
156 +       struct list_head link;
157 +       void *event_data;
158 +       unsigned int count;
159 +       unsigned int event_type;
160 +       struct task_struct *task;
161 +};
162 +#endif
163 diff -Nurb linux-2.6.22-580/include/linux/sched.h linux-2.6.22-590/include/linux/sched.h
164 --- linux-2.6.22-580/include/linux/sched.h      2008-02-27 13:46:40.000000000 -0500
165 +++ linux-2.6.22-590/include/linux/sched.h      2008-02-27 13:48:29.000000000 -0500
166 @@ -849,7 +849,7 @@
167         unsigned int btrace_seq;
168  #endif
169         unsigned long sleep_avg;
170 -       unsigned long long timestamp, last_ran;
171 +       unsigned long long timestamp, last_ran, last_interrupted, last_ran_j;
172         unsigned long long sched_time; /* sched_clock time spent running */
173         enum sleep_type sleep_type;
174  
175 diff -Nurb linux-2.6.22-580/kernel/fork.c linux-2.6.22-590/kernel/fork.c
176 --- linux-2.6.22-580/kernel/fork.c      2008-02-27 13:46:40.000000000 -0500
177 +++ linux-2.6.22-590/kernel/fork.c      2008-02-27 13:48:29.000000000 -0500
178 @@ -197,6 +197,8 @@
179         tsk->btrace_seq = 0;
180  #endif
181         tsk->splice_pipe = NULL;
182 +       //tsk->cmdline[0]='\0';
183 +       tsk->last_interrupted = 0;
184         return tsk;
185  }
186  
187 diff -Nurb linux-2.6.22-580/kernel/sched.c linux-2.6.22-590/kernel/sched.c
188 --- linux-2.6.22-580/kernel/sched.c     2008-02-27 13:46:40.000000000 -0500
189 +++ linux-2.6.22-590/kernel/sched.c     2008-02-27 14:08:26.000000000 -0500
190 @@ -10,7 +10,7 @@
191   *  1998-11-19 Implemented schedule_timeout() and related stuff
192   *             by Andrea Arcangeli
193   *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
194 - *             hybrid priority-list and round-robin design with
195 + *             hybrid priority-list and round-robin deventn with
196   *             an array-switch method of distributing timeslices
197   *             and per-CPU runqueues.  Cleanups and useful suggestions
198   *             by Davide Libenzi, preemptible kernel bits by Robert Love.
199 @@ -56,6 +56,7 @@
200  
201  #include <asm/tlb.h>
202  #include <asm/unistd.h>
203 +#include <linux/arrays.h>
204  #include <linux/vs_sched.h>
205  #include <linux/vs_cvirt.h>
206  
207 @@ -3608,6 +3609,8 @@
208  
209  #endif
210  
211 +extern void (*rec_event)(void *,unsigned int);
212 +
213  static inline int interactive_sleep(enum sleep_type sleep_type)
214  {
215         return (sleep_type == SLEEP_INTERACTIVE ||
216 @@ -3617,16 +3620,51 @@
217  /*
218   * schedule() is the main scheduler function.
219   */
220 +
221 +struct event_spec {
222 +       unsigned long pc;
223 +       unsigned long dcookie;
224 +       unsigned count;
225 +       unsigned char reason;
226 +};
227 +
228 +#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
229 +#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
230 +
231 +static inline unsigned long my_get_wchan(struct task_struct *p)
232 +{
233 +        unsigned long ebp, esp, eip;
234 +        unsigned long stack_page;
235 +        int count = 0;
236 +        stack_page = (unsigned long)task_stack_page(p);
237 +        esp = p->thread.esp;
238 +        if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
239 +                return 0;
240 +        /* include/asm-i386/system.h:switch_to() pushes ebp last. */
241 +        ebp = *(unsigned long *) esp;
242 +        do {
243 +                if (ebp < stack_page || ebp > top_ebp+stack_page)
244 +                        return 0;
245 +                eip = *(unsigned long *) (ebp+4);
246 +                if (!in_sched_functions(eip))
247 +                        return eip;
248 +                ebp = *(unsigned long *) ebp;
249 +        } while (count++ < 16);
250 +        return 0;
251 +}
252 +/* CHOPSTIX */
253 +
254  asmlinkage void __sched schedule(void)
255  {
256         struct task_struct *prev, *next;
257         struct prio_array *array;
258         struct list_head *queue;
259         unsigned long long now;
260 -       unsigned long run_time;
261 +       unsigned long run_time, diff;
262         int cpu, idx, new_prio;
263         long *switch_count;
264         struct rq *rq;
265 +       int sampling_reason;
266  
267         /*
268          * Test if we are atomic.  Since do_exit() needs to call into
269 @@ -3680,6 +3718,7 @@
270         switch_count = &prev->nivcsw;
271         if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
272                 switch_count = &prev->nvcsw;
273 +
274                 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
275                                 unlikely(signal_pending(prev))))
276                         prev->state = TASK_RUNNING;
277 @@ -3689,6 +3728,9 @@
278                                 vx_uninterruptible_inc(prev);
279                         }
280                         deactivate_task(prev, rq);
281 +                       if (prev->state & TASK_INTERRUPTIBLE) {
282 +                               prev->last_interrupted=jiffies;
283 +                       }
284                 }
285         }
286  
287 @@ -3763,8 +3805,44 @@
288         prev->sleep_avg -= run_time;
289         if ((long)prev->sleep_avg <= 0)
290                 prev->sleep_avg = 0;
291 +       
292         prev->timestamp = prev->last_ran = now;
293  
294 +       /* CHOPSTIX */
295 +
296 +       prev->last_ran_j = jiffies;
297 +       if (next->last_interrupted) {
298 +               diff = (jiffies-next->last_interrupted);
299 +               next->last_interrupted = 0;
300 +               sampling_reason = 0;
301 +       }
302 +       else {
303 +               diff = jiffies-next->last_ran_j;
304 +               sampling_reason = 1;
305 +       }
306 +
307 +       if (rec_event && (diff>HZ/5)) {
308 +               struct event event;
309 +               struct event_spec espec;
310 +               unsigned long eip;
311 +               unsigned int state = next->state;
312 +               
313 +               espec.reason = sampling_reason;
314 +
315 +               next->state = 0;
316 +               eip = next->thread.esp;
317 +               next->state = state;
318 +
319 +               next->last_interrupted = 0;     
320 +               event.event_data=&espec;
321 +               event.task=next;
322 +               espec.pc=eip;
323 +               event.event_type=2; 
324 +               /* index in the event array currently set up */
325 +               /* make sure the counters are loaded in the order we want them to show up*/ 
326 +               (*rec_event)(&event, diff);
327 +       }
328 +
329         sched_info_switch(prev, next);
330         if (likely(prev != next)) {
331                 next->timestamp = next->last_ran = now;
332 @@ -7275,3 +7353,7 @@
333  }
334  
335  #endif
336 +
337 +void (*rec_event)(void *,unsigned int);
338 +EXPORT_SYMBOL(rec_event);
339 +EXPORT_SYMBOL(in_sched_functions);
340 diff -Nurb linux-2.6.22-580/kernel/sched.c.rej linux-2.6.22-590/kernel/sched.c.rej
341 --- linux-2.6.22-580/kernel/sched.c.rej 1969-12-31 19:00:00.000000000 -0500
342 +++ linux-2.6.22-590/kernel/sched.c.rej 2008-02-27 13:48:29.000000000 -0500
343 @@ -0,0 +1,18 @@
344 +***************
345 +*** 56,61 ****
346 +  
347 +  #include <asm/tlb.h>
348 +  #include <asm/unistd.h>
349 +  
350 +  /*
351 +   * Scheduler clock - returns current time in nanosec units.
352 +--- 56,64 ----
353 +  
354 +  #include <asm/tlb.h>
355 +  #include <asm/unistd.h>
356 ++ #include <linux/arrays.h>
357 ++ 
358 ++ 
359 +  
360 +  /*
361 +   * Scheduler clock - returns current time in nanosec units.