upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / kernel / sched.c
1 /*
2  *  kernel/sched.c
3  *
4  *  Kernel scheduler and related syscalls
5  *
6  *  Copyright (C) 1991-2002  Linus Torvalds
7  *
8  *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9  *              make semaphores SMP safe
10  *  1998-11-19  Implemented schedule_timeout() and related stuff
11  *              by Andrea Arcangeli
12  *  2002-01-04  New ultra-scalable O(1) scheduler by Ingo Molnar:
13  *              hybrid priority-list and round-robin design with
14  *              an array-switch method of distributing timeslices
15  *              and per-CPU runqueues.  Cleanups and useful suggestions
16  *              by Davide Libenzi, preemptible kernel bits by Robert Love.
17  *  2003-09-03  Interactivity tuning by Con Kolivas.
18  *  2004-04-02  Scheduler domains code by Nick Piggin
19  */
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/nmi.h>
23 #include <linux/init.h>
24 #include <asm/uaccess.h>
25 #include <linux/highmem.h>
26 #include <linux/smp_lock.h>
27 #include <asm/mmu_context.h>
28 #include <linux/interrupt.h>
29 #include <linux/completion.h>
30 #include <linux/kernel_stat.h>
31 #include <linux/security.h>
32 #include <linux/notifier.h>
33 #include <linux/profile.h>
34 #include <linux/suspend.h>
35 #include <linux/blkdev.h>
36 #include <linux/delay.h>
37 #include <linux/smp.h>
38 #include <linux/timer.h>
39 #include <linux/rcupdate.h>
40 #include <linux/cpu.h>
41 #include <linux/percpu.h>
42 #include <linux/kthread.h>
43 #include <linux/seq_file.h>
44 #include <linux/syscalls.h>
45 #include <linux/times.h>
46 #include <linux/vserver/sched.h>
47 #include <linux/vs_base.h>
48 #include <linux/vs_context.h>
49 #include <linux/vs_cvirt.h>
50 #include <asm/tlb.h>
51
52 #include <asm/unistd.h>
53
54 #ifdef CONFIG_NUMA
55 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
56 #else
57 #define cpu_to_node_mask(cpu) (cpu_online_map)
58 #endif
59
60 /* used to soft spin in sched while dump is in progress */
61 unsigned long dump_oncpu;
62 EXPORT_SYMBOL(dump_oncpu);
63
64 /*
65  * Convert user-nice values [ -20 ... 0 ... 19 ]
66  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
67  * and back.
68  */
69 #define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
70 #define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
71 #define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
72
73 /*
74  * 'User priority' is the nice value converted to something we
75  * can work with better when scaling various scheduler parameters,
76  * it's a [ 0 ... 39 ] range.
77  */
78 #define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
79 #define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
80 #define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
81
82 /*
83  * Some helpers for converting nanosecond timing to jiffy resolution
84  */
85 #define NS_TO_JIFFIES(TIME)     ((TIME) / (1000000000 / HZ))
86 #define JIFFIES_TO_NS(TIME)     ((TIME) * (1000000000 / HZ))
87
88 /*
89  * These are the 'tuning knobs' of the scheduler:
90  *
91  * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
92  * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
93  * Timeslices get refilled after they expire.
94  */
95 #define MIN_TIMESLICE           max(5 * HZ / 1000, 1)
96 #define DEF_TIMESLICE           (100 * HZ / 1000)
97 #define ON_RUNQUEUE_WEIGHT       30
98 #define CHILD_PENALTY            95
99 #define PARENT_PENALTY          100
100 #define EXIT_WEIGHT               3
101 #define PRIO_BONUS_RATIO         25
102 #define MAX_BONUS               (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
103 #define INTERACTIVE_DELTA         2
104 #define MAX_SLEEP_AVG           (DEF_TIMESLICE * MAX_BONUS)
105 #define STARVATION_LIMIT        (MAX_SLEEP_AVG)
106 #define NS_MAX_SLEEP_AVG        (JIFFIES_TO_NS(MAX_SLEEP_AVG))
107 #define CREDIT_LIMIT            100
108
109 /*
110  * If a task is 'interactive' then we reinsert it in the active
111  * array after it has expired its current timeslice. (it will not
112  * continue to run immediately, it will still roundrobin with
113  * other interactive tasks.)
114  *
115  * This part scales the interactivity limit depending on niceness.
116  *
117  * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
118  * Here are a few examples of different nice levels:
119  *
120  *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
121  *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
122  *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
123  *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
124  *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
125  *
126  * (the X axis represents the possible -5 ... 0 ... +5 dynamic
127  *  priority range a task can explore, a value of '1' means the
128  *  task is rated interactive.)
129  *
130  * Ie. nice +19 tasks can never get 'interactive' enough to be
131  * reinserted into the active array. And only heavily CPU-hog nice -20
132  * tasks will be expired. Default nice 0 tasks are somewhere between,
133  * it takes some effort for them to get interactive, but it's not
134  * too hard.
135  */
136
137 #define CURRENT_BONUS(p) \
138         (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
139                 MAX_SLEEP_AVG)
140
141 #ifdef CONFIG_SMP
142 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
143                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
144                         num_online_cpus())
145 #else
146 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
147                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
148 #endif
149
150 #define SCALE(v1,v1_max,v2_max) \
151         (v1) * (v2_max) / (v1_max)
152
153 #define DELTA(p) \
154         (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
155
156 #define TASK_INTERACTIVE(p) \
157         ((p)->prio <= (p)->static_prio - DELTA(p))
158
159 #define INTERACTIVE_SLEEP(p) \
160         (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
161                 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
162
163 #define HIGH_CREDIT(p) \
164         ((p)->interactive_credit > CREDIT_LIMIT)
165
166 #define LOW_CREDIT(p) \
167         ((p)->interactive_credit < -CREDIT_LIMIT)
168
169 #ifdef CONFIG_CKRM_CPU_SCHEDULE
170 /*
171  *  if belong to different class, compare class priority
172  *  otherwise compare task priority 
173  */
174 #define TASK_PREEMPTS_CURR(p, rq) \
175         ( ((p)->cpu_class != (rq)->curr->cpu_class) \
176           && ((rq)->curr != (rq)->idle) && ((p) != (rq)->idle )) \
177           ? class_preempts_curr((p),(rq)->curr)  \
178           : ((p)->prio < (rq)->curr->prio)
179 #else
180 #define TASK_PREEMPTS_CURR(p, rq) \
181         ((p)->prio < (rq)->curr->prio)
182 #endif
183
184 /*
185  * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
186  * to time slice values: [800ms ... 100ms ... 5ms]
187  *
188  * The higher a thread's priority, the bigger timeslices
189  * it gets during one round of execution. But even the lowest
190  * priority thread gets MIN_TIMESLICE worth of execution time.
191  */
192
193 #define SCALE_PRIO(x, prio) \
194         max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
195
196 unsigned int task_timeslice(task_t *p)
197 {
198         if (p->static_prio < NICE_TO_PRIO(0))
199                 return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
200         else
201                 return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
202 }
203 #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)       \
204                                 < (long long) (sd)->cache_hot_time)
205
206 /*
207  * These are the runqueue data structures:
208  */
209
210 typedef struct runqueue runqueue_t;
211 #include <linux/ckrm_classqueue.h>
212 #include <linux/ckrm_sched.h>
213
214 /*
215  * This is the main, per-CPU runqueue data structure.
216  *
217  * Locking rule: those places that want to lock multiple runqueues
218  * (such as the load balancing or the thread migration code), lock
219  * acquire operations must be ordered by ascending &runqueue.
220  */
221 struct runqueue {
222         spinlock_t lock;
223
224         /*
225          * nr_running and cpu_load should be in the same cacheline because
226          * remote CPUs use both these fields when doing load calculation.
227          */
228         unsigned long nr_running;
229 #ifdef CONFIG_SMP
230         unsigned long cpu_load;
231 #endif
232         unsigned long long nr_switches;
233
234         /*
235          * This is part of a global counter where only the total sum
236          * over all CPUs matters. A task can increase this counter on
237          * one CPU and if it got migrated afterwards it may decrease
238          * it on another CPU. Always updated under the runqueue lock:
239          */
240         unsigned long nr_uninterruptible;
241
242         unsigned long expired_timestamp;
243         unsigned long long timestamp_last_tick;
244         task_t *curr, *idle;
245         struct mm_struct *prev_mm;
246 #ifdef CONFIG_CKRM_CPU_SCHEDULE
247         struct classqueue_struct classqueue;   
248         ckrm_load_t ckrm_load;
249 #else
250         prio_array_t *active, *expired, arrays[2];
251 #endif
252         int best_expired_prio;
253         atomic_t nr_iowait;
254
255 #ifdef CONFIG_SMP
256         struct sched_domain *sd;
257
258         /* For active balancing */
259         int active_balance;
260         int push_cpu;
261
262         task_t *migration_thread;
263         struct list_head migration_queue;
264 #endif
265
266 #ifdef CONFIG_VSERVER_HARDCPU
267         struct list_head hold_queue;
268         int idle_tokens;
269 #endif
270
271 #ifdef CONFIG_SCHEDSTATS
272         /* latency stats */
273         struct sched_info rq_sched_info;
274
275         /* sys_sched_yield() stats */
276         unsigned long yld_exp_empty;
277         unsigned long yld_act_empty;
278         unsigned long yld_both_empty;
279         unsigned long yld_cnt;
280
281         /* schedule() stats */
282         unsigned long sched_noswitch;
283         unsigned long sched_switch;
284         unsigned long sched_cnt;
285         unsigned long sched_goidle;
286
287         /* pull_task() stats */
288         unsigned long pt_gained[MAX_IDLE_TYPES];
289         unsigned long pt_lost[MAX_IDLE_TYPES];
290
291         /* active_load_balance() stats */
292         unsigned long alb_cnt;
293         unsigned long alb_lost;
294         unsigned long alb_gained;
295         unsigned long alb_failed;
296
297         /* try_to_wake_up() stats */
298         unsigned long ttwu_cnt;
299         unsigned long ttwu_attempts;
300         unsigned long ttwu_moved;
301
302         /* wake_up_new_task() stats */
303         unsigned long wunt_cnt;
304         unsigned long wunt_moved;
305
306         /* sched_migrate_task() stats */
307         unsigned long smt_cnt;
308
309         /* sched_balance_exec() stats */
310         unsigned long sbe_cnt;
311 #endif
312 };
313
314 static DEFINE_PER_CPU(struct runqueue, runqueues);
315
316 #define for_each_domain(cpu, domain) \
317         for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent)
318
319 #define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
320 #define this_rq()               (&__get_cpu_var(runqueues))
321 #define task_rq(p)              cpu_rq(task_cpu(p))
322 #define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
323
324 /*
325  * Default context-switch locking:
326  */
327 #ifndef prepare_arch_switch
328 # define prepare_arch_switch(rq, next)  do { } while (0)
329 # define finish_arch_switch(rq, next)   spin_unlock_irq(&(rq)->lock)
330 # define task_running(rq, p)            ((rq)->curr == (p))
331 #endif
332
333 /*
334  * task_rq_lock - lock the runqueue a given task resides on and disable
335  * interrupts.  Note the ordering: we can safely lookup the task_rq without
336  * explicitly disabling preemption.
337  */
338 static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
339         __acquires(rq->lock)
340 {
341         struct runqueue *rq;
342
343 repeat_lock_task:
344         local_irq_save(*flags);
345         rq = task_rq(p);
346         spin_lock(&rq->lock);
347         if (unlikely(rq != task_rq(p))) {
348                 spin_unlock_irqrestore(&rq->lock, *flags);
349                 goto repeat_lock_task;
350         }
351         return rq;
352 }
353
354 static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
355         __releases(rq->lock)
356 {
357         spin_unlock_irqrestore(&rq->lock, *flags);
358 }
359
360 #ifdef CONFIG_SCHEDSTATS
361 /*
362  * bump this up when changing the output format or the meaning of an existing
363  * format, so that tools can adapt (or abort)
364  */
365 #define SCHEDSTAT_VERSION 10
366
367 static int show_schedstat(struct seq_file *seq, void *v)
368 {
369         int cpu;
370         enum idle_type itype;
371
372         seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
373         seq_printf(seq, "timestamp %lu\n", jiffies);
374         for_each_online_cpu(cpu) {
375                 runqueue_t *rq = cpu_rq(cpu);
376 #ifdef CONFIG_SMP
377                 struct sched_domain *sd;
378                 int dcnt = 0;
379 #endif
380
381                 /* runqueue-specific stats */
382                 seq_printf(seq,
383                     "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu "
384                     "%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
385                     cpu, rq->yld_both_empty,
386                     rq->yld_act_empty, rq->yld_exp_empty,
387                     rq->yld_cnt, rq->sched_noswitch,
388                     rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
389                     rq->alb_cnt, rq->alb_gained, rq->alb_lost,
390                     rq->alb_failed,
391                     rq->ttwu_cnt, rq->ttwu_moved, rq->ttwu_attempts,
392                     rq->wunt_cnt, rq->wunt_moved,
393                     rq->smt_cnt, rq->sbe_cnt, rq->rq_sched_info.cpu_time,
394                     rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
395
396                 for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; itype++)
397                         seq_printf(seq, " %lu %lu", rq->pt_gained[itype],
398                                                     rq->pt_lost[itype]);
399                 seq_printf(seq, "\n");
400
401 #ifdef CONFIG_SMP
402                 /* domain-specific stats */
403                 for_each_domain(cpu, sd) {
404                         char mask_str[NR_CPUS];
405
406                         cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
407                         seq_printf(seq, "domain%d %s", dcnt++, mask_str);
408                         for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
409                                                 itype++) {
410                                 seq_printf(seq, " %lu %lu %lu %lu %lu",
411                                     sd->lb_cnt[itype],
412                                     sd->lb_failed[itype],
413                                     sd->lb_imbalance[itype],
414                                     sd->lb_nobusyq[itype],
415                                     sd->lb_nobusyg[itype]);
416                         }
417                         seq_printf(seq, " %lu %lu %lu %lu\n",
418                             sd->sbe_pushed, sd->sbe_attempts,
419                             sd->ttwu_wake_affine, sd->ttwu_wake_balance);
420                 }
421 #endif
422         }
423         return 0;
424 }
425
426 static int schedstat_open(struct inode *inode, struct file *file)
427 {
428         unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
429         char *buf = kmalloc(size, GFP_KERNEL);
430         struct seq_file *m;
431         int res;
432
433         if (!buf)
434                 return -ENOMEM;
435         res = single_open(file, show_schedstat, NULL);
436         if (!res) {
437                 m = file->private_data;
438                 m->buf = buf;
439                 m->size = size;
440         } else
441                 kfree(buf);
442         return res;
443 }
444
445 struct file_operations proc_schedstat_operations = {
446         .open    = schedstat_open,
447         .read    = seq_read,
448         .llseek  = seq_lseek,
449         .release = single_release,
450 };
451
452 # define schedstat_inc(rq, field)       rq->field++;
453 # define schedstat_add(rq, field, amt)  rq->field += amt;
454 #else /* !CONFIG_SCHEDSTATS */
455 # define schedstat_inc(rq, field)       do { } while (0);
456 # define schedstat_add(rq, field, amt)  do { } while (0);
457 #endif
458
459 /*
460  * rq_lock - lock a given runqueue and disable interrupts.
461  */
462 static runqueue_t *this_rq_lock(void)
463         __acquires(rq->lock)
464 {
465         runqueue_t *rq;
466
467         local_irq_disable();
468         rq = this_rq();
469         spin_lock(&rq->lock);
470
471         return rq;
472 }
473
474 static inline void rq_unlock(runqueue_t *rq)
475         __releases(rq->lock)
476 {
477         spin_unlock_irq(&rq->lock);
478 }
479
480 #ifdef CONFIG_SCHEDSTATS
481 /*
482  * Called when a process is dequeued from the active array and given
483  * the cpu.  We should note that with the exception of interactive
484  * tasks, the expired queue will become the active queue after the active
485  * queue is empty, without explicitly dequeuing and requeuing tasks in the
486  * expired queue.  (Interactive tasks may be requeued directly to the
487  * active queue, thus delaying tasks in the expired queue from running;
488  * see scheduler_tick()).
489  *
490  * This function is only called from sched_info_arrive(), rather than
491  * dequeue_task(). Even though a task may be queued and dequeued multiple
492  * times as it is shuffled about, we're really interested in knowing how
493  * long it was from the *first* time it was queued to the time that it
494  * finally hit a cpu.
495  */
496 static inline void sched_info_dequeued(task_t *t)
497 {
498         t->sched_info.last_queued = 0;
499 }
500
501 /*
502  * Called when a task finally hits the cpu.  We can now calculate how
503  * long it was waiting to run.  We also note when it began so that we
504  * can keep stats on how long its timeslice is.
505  */
506 static inline void sched_info_arrive(task_t *t)
507 {
508         unsigned long now = jiffies, diff = 0;
509         struct runqueue *rq = task_rq(t);
510
511         if (t->sched_info.last_queued)
512                 diff = now - t->sched_info.last_queued;
513         sched_info_dequeued(t);
514         t->sched_info.run_delay += diff;
515         t->sched_info.last_arrival = now;
516         t->sched_info.pcnt++;
517
518         if (!rq)
519                 return;
520
521         rq->rq_sched_info.run_delay += diff;
522         rq->rq_sched_info.pcnt++;
523 }
524
525 /*
526  * Called when a process is queued into either the active or expired
527  * array.  The time is noted and later used to determine how long we
528  * had to wait for us to reach the cpu.  Since the expired queue will
529  * become the active queue after active queue is empty, without dequeuing
530  * and requeuing any tasks, we are interested in queuing to either. It
531  * is unusual but not impossible for tasks to be dequeued and immediately
532  * requeued in the same or another array: this can happen in sched_yield(),
533  * set_user_nice(), and even load_balance() as it moves tasks from runqueue
534  * to runqueue.
535  *
536  * This function is only called from enqueue_task(), but also only updates
537  * the timestamp if it is already not set.  It's assumed that
538  * sched_info_dequeued() will clear that stamp when appropriate.
539  */
540 static inline void sched_info_queued(task_t *t)
541 {
542         if (!t->sched_info.last_queued)
543                 t->sched_info.last_queued = jiffies;
544 }
545
546 /*
547  * Called when a process ceases being the active-running process, either
548  * voluntarily or involuntarily.  Now we can calculate how long we ran.
549  */
550 static inline void sched_info_depart(task_t *t)
551 {
552         struct runqueue *rq = task_rq(t);
553         unsigned long diff = jiffies - t->sched_info.last_arrival;
554
555         t->sched_info.cpu_time += diff;
556
557         if (rq)
558                 rq->rq_sched_info.cpu_time += diff;
559 }
560
561 /*
562  * Called when tasks are switched involuntarily due, typically, to expiring
563  * their time slice.  (This may also be called when switching to or from
564  * the idle task.)  We are only called when prev != next.
565  */
566 static inline void sched_info_switch(task_t *prev, task_t *next)
567 {
568         struct runqueue *rq = task_rq(prev);
569
570         /*
571          * prev now departs the cpu.  It's not interesting to record
572          * stats about how efficient we were at scheduling the idle
573          * process, however.
574          */
575         if (prev != rq->idle)
576                 sched_info_depart(prev);
577
578         if (next != rq->idle)
579                 sched_info_arrive(next);
580 }
581 #else
582 #define sched_info_queued(t)            do { } while (0)
583 #define sched_info_switch(t, next)      do { } while (0)
584 #endif /* CONFIG_SCHEDSTATS */
585
586 #ifdef CONFIG_CKRM_CPU_SCHEDULE
587 static inline ckrm_lrq_t *rq_get_next_class(struct runqueue *rq)
588 {
589         cq_node_t *node = classqueue_get_head(&rq->classqueue);
590         return ((node) ? class_list_entry(node) : NULL);
591 }
592
593 /*
594  * return the cvt of the current running class
595  * if no current running class, return 0
596  * assume cpu is valid (cpu_online(cpu) == 1)
597  */
598 CVT_t get_local_cur_cvt(int cpu)
599 {
600         ckrm_lrq_t * lrq = rq_get_next_class(cpu_rq(cpu));
601
602         if (lrq)
603                 return lrq->local_cvt;
604         else    
605                 return 0;
606 }
607
608 static inline struct task_struct * rq_get_next_task(struct runqueue* rq) 
609 {
610         prio_array_t               *array;
611         struct task_struct         *next;
612         ckrm_lrq_t *queue;
613         int idx;
614         int cpu = smp_processor_id();
615
616         // it is guaranteed be the ( rq->nr_running > 0 ) check in 
617         // schedule that a task will be found.
618
619  retry_next_class:
620         queue = rq_get_next_class(rq);
621         // BUG_ON( !queue );
622
623         array = queue->active;
624         if (unlikely(!array->nr_active)) {
625                 queue->active = queue->expired;
626                 queue->expired = array;
627                 queue->expired_timestamp = 0;
628
629                 schedstat_inc(rq, sched_switch);
630                 if (queue->active->nr_active)
631                         set_top_priority(queue,
632                                          find_first_bit(queue->active->bitmap, MAX_PRIO));
633                 else {
634                         classqueue_dequeue(queue->classqueue,
635                                            &queue->classqueue_linkobj);
636                         cpu_demand_event(get_rq_local_stat(queue,cpu),CPU_DEMAND_DEQUEUE,0);
637                 }
638                 goto retry_next_class;                          
639         } else
640                 schedstat_inc(rq, sched_noswitch);
641         // BUG_ON(!array->nr_active);
642
643         idx = queue->top_priority;
644         // BUG_ON (idx == MAX_PRIO);
645         next = task_list_entry(array->queue[idx].next);
646         return next;
647 }
648 #else /*! CONFIG_CKRM_CPU_SCHEDULE*/
649 static inline struct task_struct * rq_get_next_task(struct runqueue* rq) 
650 {
651         prio_array_t *array;
652         struct list_head *queue;
653         int idx;
654
655         array = rq->active;
656         if (unlikely(!array->nr_active)) {
657                 /*
658                  * Switch the active and expired arrays.
659                  */
660                 schedstat_inc(rq, sched_switch);
661                 rq->active = rq->expired;
662                 rq->expired = array;
663                 array = rq->active;
664                 rq->expired_timestamp = 0;
665                 rq->best_expired_prio = MAX_PRIO;
666         } else 
667                 schedstat_inc(rq, sched_noswitch);
668
669         idx = sched_find_first_bit(array->bitmap);
670         queue = array->queue + idx;
671         return list_entry(queue->next, task_t, run_list);
672 }
673
674 static inline void class_enqueue_task(struct task_struct* p, prio_array_t *array) { }
675 static inline void class_dequeue_task(struct task_struct* p, prio_array_t *array) { }
676 static inline void init_cpu_classes(void) { }
677 #define rq_ckrm_load(rq) NULL
678 static inline void ckrm_sched_tick(int j,int this_cpu,void* name) {}
679 #endif  /* CONFIG_CKRM_CPU_SCHEDULE */
680
681 /*
682  * Adding/removing a task to/from a priority array:
683  */
684 static void dequeue_task(struct task_struct *p, prio_array_t *array)
685 {
686         array->nr_active--;
687         list_del(&p->run_list);
688         if (list_empty(array->queue + p->prio))
689                 __clear_bit(p->prio, array->bitmap);
690         class_dequeue_task(p,array);
691 }
692
693 static void enqueue_task(struct task_struct *p, prio_array_t *array)
694 {
695         sched_info_queued(p);
696         list_add_tail(&p->run_list, array->queue + p->prio);
697         __set_bit(p->prio, array->bitmap);
698         array->nr_active++;
699         p->array = array;
700         class_enqueue_task(p,array);
701 }
702
703 /*
704  * Used by the migration code - we pull tasks from the head of the
705  * remote queue so we want these tasks to show up at the head of the
706  * local queue:
707  */
708 static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
709 {
710         list_add(&p->run_list, array->queue + p->prio);
711         __set_bit(p->prio, array->bitmap);
712         array->nr_active++;
713         p->array = array;
714         class_enqueue_task(p,array);
715 }
716
717 /*
718  * effective_prio - return the priority that is based on the static
719  * priority but is modified by bonuses/penalties.
720  *
721  * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
722  * into the -5 ... 0 ... +5 bonus/penalty range.
723  *
724  * We use 25% of the full 0...39 priority range so that:
725  *
726  * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
727  * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
728  *
729  * Both properties are important to certain workloads.
730  */
731 static int effective_prio(task_t *p)
732 {
733         int bonus, prio;
734
735         if (rt_task(p))
736                 return p->prio;
737
738         bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
739
740         prio = p->static_prio - bonus;
741
742 #ifdef CONFIG_VSERVER_HARDCPU
743         if (task_vx_flags(p, VXF_SCHED_PRIO, 0))
744                 prio += effective_vavavoom(p, MAX_USER_PRIO);
745 #endif
746
747         if (prio < MAX_RT_PRIO)
748                 prio = MAX_RT_PRIO;
749         if (prio > MAX_PRIO-1)
750                 prio = MAX_PRIO-1;
751         return prio;
752 }
753
754 /*
755  * __activate_task - move a task to the runqueue.
756  */
757 static inline void __activate_task(task_t *p, runqueue_t *rq)
758 {
759         enqueue_task(p, rq_active(p,rq));
760         rq->nr_running++;
761 }
762
763 /*
764  * __activate_idle_task - move idle task to the _front_ of runqueue.
765  */
766 static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
767 {
768         enqueue_task_head(p, rq_active(p,rq));
769         rq->nr_running++;
770 }
771
772 static void recalc_task_prio(task_t *p, unsigned long long now)
773 {
774         unsigned long long __sleep_time = now - p->timestamp;
775         unsigned long sleep_time;
776
777         if (__sleep_time > NS_MAX_SLEEP_AVG)
778                 sleep_time = NS_MAX_SLEEP_AVG;
779         else
780                 sleep_time = (unsigned long)__sleep_time;
781
782         if (likely(sleep_time > 0)) {
783                 /*
784                  * User tasks that sleep a long time are categorised as
785                  * idle and will get just interactive status to stay active &
786                  * prevent them suddenly becoming cpu hogs and starving
787                  * other processes.
788                  */
789                 if (p->mm && p->activated != -1 &&
790                         sleep_time > INTERACTIVE_SLEEP(p)) {
791                                 p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
792                                                 DEF_TIMESLICE);
793                                 if (!HIGH_CREDIT(p))
794                                         p->interactive_credit++;
795                 } else {
796                         /*
797                          * The lower the sleep avg a task has the more
798                          * rapidly it will rise with sleep time.
799                          */
800                         sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
801
802                         /*
803                          * Tasks with low interactive_credit are limited to
804                          * one timeslice worth of sleep avg bonus.
805                          */
806                         if (LOW_CREDIT(p) &&
807                             sleep_time > JIFFIES_TO_NS(task_timeslice(p)))
808                                 sleep_time = JIFFIES_TO_NS(task_timeslice(p));
809
810                         /*
811                          * Non high_credit tasks waking from uninterruptible
812                          * sleep are limited in their sleep_avg rise as they
813                          * are likely to be cpu hogs waiting on I/O
814                          */
815                         if (p->activated == -1 && !HIGH_CREDIT(p) && p->mm) {
816                                 if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
817                                         sleep_time = 0;
818                                 else if (p->sleep_avg + sleep_time >=
819                                                 INTERACTIVE_SLEEP(p)) {
820                                         p->sleep_avg = INTERACTIVE_SLEEP(p);
821                                         sleep_time = 0;
822                                 }
823                         }
824
825                         /*
826                          * This code gives a bonus to interactive tasks.
827                          *
828                          * The boost works by updating the 'average sleep time'
829                          * value here, based on ->timestamp. The more time a
830                          * task spends sleeping, the higher the average gets -
831                          * and the higher the priority boost gets as well.
832                          */
833                         p->sleep_avg += sleep_time;
834
835                         if (p->sleep_avg > NS_MAX_SLEEP_AVG) {
836                                 p->sleep_avg = NS_MAX_SLEEP_AVG;
837                                 if (!HIGH_CREDIT(p))
838                                         p->interactive_credit++;
839                         }
840                 }
841         }
842
843         p->prio = effective_prio(p);
844 }
845
846 /*
847  * activate_task - move a task to the runqueue and do priority recalculation
848  *
849  * Update all the scheduling statistics stuff. (sleep average
850  * calculation, priority modifiers, etc.)
851  */
852 static void activate_task(task_t *p, runqueue_t *rq, int local)
853 {
854         unsigned long long now;
855
856         now = sched_clock();
857 #ifdef CONFIG_SMP
858         if (!local) {
859                 /* Compensate for drifting sched_clock */
860                 runqueue_t *this_rq = this_rq();
861                 now = (now - this_rq->timestamp_last_tick)
862                         + rq->timestamp_last_tick;
863         }
864 #endif
865
866         recalc_task_prio(p, now);
867
868         /*
869          * This checks to make sure it's not an uninterruptible task
870          * that is now waking up.
871          */
872         if (!p->activated) {
873                 /*
874                  * Tasks which were woken up by interrupts (ie. hw events)
875                  * are most likely of interactive nature. So we give them
876                  * the credit of extending their sleep time to the period
877                  * of time they spend on the runqueue, waiting for execution
878                  * on a CPU, first time around:
879                  */
880                 if (in_interrupt())
881                         p->activated = 2;
882                 else {
883                         /*
884                          * Normal first-time wakeups get a credit too for
885                          * on-runqueue time, but it will be weighted down:
886                          */
887                         p->activated = 1;
888                 }
889         }
890         p->timestamp = now;
891
892         vx_activate_task(p);
893         __activate_task(p, rq);
894 }
895
896 /*
897  * deactivate_task - remove a task from the runqueue.
898  */
899 static void __deactivate_task(struct task_struct *p, runqueue_t *rq)
900 {
901         rq->nr_running--;
902         dequeue_task(p, p->array);
903
904         p->array = NULL;
905 }
906
907 static void deactivate_task(struct task_struct *p, runqueue_t *rq)
908 {
909         __deactivate_task(p, rq);
910         vx_deactivate_task(p);
911 }
912
913 /*
914  * resched_task - mark a task 'to be rescheduled now'.
915  *
916  * On UP this means the setting of the need_resched flag, on SMP it
917  * might also involve a cross-CPU call to trigger the scheduler on
918  * the target CPU.
919  */
920 #ifdef CONFIG_SMP
921 static void resched_task(task_t *p)
922 {
923         int need_resched, nrpolling;
924
925         BUG_ON(!spin_is_locked(&task_rq(p)->lock));
926
927         /* minimise the chance of sending an interrupt to poll_idle() */
928         nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
929         need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
930         nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
931
932         if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
933                 smp_send_reschedule(task_cpu(p));
934 }
935 #else
936 static inline void resched_task(task_t *p)
937 {
938         set_tsk_need_resched(p);
939 }
940 #endif
941
942 /**
943  * task_curr - is this task currently executing on a CPU?
944  * @p: the task in question.
945  */
946 inline int task_curr(const task_t *p)
947 {
948         return cpu_curr(task_cpu(p)) == p;
949 }
950
951 #ifdef CONFIG_SMP
952 enum request_type {
953         REQ_MOVE_TASK,
954         REQ_SET_DOMAIN,
955 };
956
957 typedef struct {
958         struct list_head list;
959         enum request_type type;
960
961         /* For REQ_MOVE_TASK */
962         task_t *task;
963         int dest_cpu;
964
965         /* For REQ_SET_DOMAIN */
966         struct sched_domain *sd;
967
968         struct completion done;
969 } migration_req_t;
970
971 /*
972  * The task's runqueue lock must be held.
973  * Returns true if you have to wait for migration thread.
974  */
975 static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
976 {
977         runqueue_t *rq = task_rq(p);
978
979         /*
980          * If the task is not on a runqueue (and not running), then
981          * it is sufficient to simply update the task's cpu field.
982          */
983         if (!p->array && !task_running(rq, p)) {
984                 set_task_cpu(p, dest_cpu);
985                 return 0;
986         }
987
988         init_completion(&req->done);
989         req->type = REQ_MOVE_TASK;
990         req->task = p;
991         req->dest_cpu = dest_cpu;
992         list_add(&req->list, &rq->migration_queue);
993         return 1;
994 }
995
996 /*
997  * wait_task_inactive - wait for a thread to unschedule.
998  *
999  * The caller must ensure that the task *will* unschedule sometime soon,
1000  * else this function might spin for a *long* time. This function can't
1001  * be called with interrupts off, or it may introduce deadlock with
1002  * smp_call_function() if an IPI is sent by the same process we are
1003  * waiting to become inactive.
1004  */
1005 void wait_task_inactive(task_t * p)
1006 {
1007         unsigned long flags;
1008         runqueue_t *rq;
1009         int preempted;
1010
1011 repeat:
1012         rq = task_rq_lock(p, &flags);
1013         /* Must be off runqueue entirely, not preempted. */
1014         if (unlikely(p->array)) {
1015                 /* If it's preempted, we yield.  It could be a while. */
1016                 preempted = !task_running(rq, p);
1017                 task_rq_unlock(rq, &flags);
1018                 cpu_relax();
1019                 if (preempted)
1020                         yield();
1021                 goto repeat;
1022         }
1023         task_rq_unlock(rq, &flags);
1024 }
1025
1026 /***
1027  * kick_process - kick a running thread to enter/exit the kernel
1028  * @p: the to-be-kicked thread
1029  *
1030  * Cause a process which is running on another CPU to enter
1031  * kernel-mode, without any delay. (to get signals handled.)
1032  */
1033 void kick_process(task_t *p)
1034 {
1035         int cpu;
1036
1037         preempt_disable();
1038         cpu = task_cpu(p);
1039         if ((cpu != smp_processor_id()) && task_curr(p))
1040                 smp_send_reschedule(cpu);
1041         preempt_enable();
1042 }
1043
1044 /*
1045  * Return a low guess at the load of a migration-source cpu.
1046  *
1047  * We want to under-estimate the load of migration sources, to
1048  * balance conservatively.
1049  */
1050 static inline unsigned long source_load(int cpu)
1051 {
1052         runqueue_t *rq = cpu_rq(cpu);
1053         unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
1054
1055         return min(rq->cpu_load, load_now);
1056 }
1057
1058 /*
1059  * Return a high guess at the load of a migration-target cpu
1060  */
1061 static inline unsigned long target_load(int cpu)
1062 {
1063         runqueue_t *rq = cpu_rq(cpu);
1064         unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
1065
1066         return max(rq->cpu_load, load_now);
1067 }
1068
1069 #endif
1070
1071 /*
1072  * wake_idle() is useful especially on SMT architectures to wake a
1073  * task onto an idle sibling if we would otherwise wake it onto a
1074  * busy sibling.
1075  *
1076  * Returns the CPU we should wake onto.
1077  */
1078 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1079 static int wake_idle(int cpu, task_t *p)
1080 {
1081         cpumask_t tmp;
1082         runqueue_t *rq = cpu_rq(cpu);
1083         struct sched_domain *sd;
1084         int i;
1085
1086         if (idle_cpu(cpu))
1087                 return cpu;
1088
1089         sd = rq->sd;
1090         if (!(sd->flags & SD_WAKE_IDLE))
1091                 return cpu;
1092
1093         cpus_and(tmp, sd->span, p->cpus_allowed);
1094
1095         for_each_cpu_mask(i, tmp) {
1096                 if (idle_cpu(i))
1097                         return i;
1098         }
1099
1100         return cpu;
1101 }
1102 #else
1103 static inline int wake_idle(int cpu, task_t *p)
1104 {
1105         return cpu;
1106 }
1107 #endif
1108
1109 /***
1110  * try_to_wake_up - wake up a thread
1111  * @p: the to-be-woken-up thread
1112  * @state: the mask of task states that can be woken
1113  * @sync: do a synchronous wakeup?
1114  *
1115  * Put it on the run-queue if it's not already there. The "current"
1116  * thread is always on the run-queue (except when the actual
1117  * re-schedule is in progress), and as such you're allowed to do
1118  * the simpler "current->state = TASK_RUNNING" to mark yourself
1119  * runnable without the overhead of this.
1120  *
1121  * returns failure only if the task is already active.
1122  */
1123 static int try_to_wake_up(task_t * p, unsigned int state, int sync)
1124 {
1125         int cpu, this_cpu, success = 0;
1126         unsigned long flags;
1127         long old_state;
1128         runqueue_t *rq;
1129 #ifdef CONFIG_SMP
1130         unsigned long load, this_load;
1131         struct sched_domain *sd;
1132         int new_cpu;
1133 #endif
1134
1135         rq = task_rq_lock(p, &flags);
1136         schedstat_inc(rq, ttwu_cnt);
1137         old_state = p->state;
1138         if (!(old_state & state))
1139                 goto out;
1140
1141         if (p->array)
1142                 goto out_running;
1143
1144         cpu = task_cpu(p);
1145         this_cpu = smp_processor_id();
1146
1147 #ifdef CONFIG_SMP
1148         if (unlikely(task_running(rq, p)))
1149                 goto out_activate;
1150
1151         new_cpu = cpu;
1152
1153         if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1154                 goto out_set_cpu;
1155
1156         load = source_load(cpu);
1157         this_load = target_load(this_cpu);
1158
1159         /*
1160          * If sync wakeup then subtract the (maximum possible) effect of
1161          * the currently running task from the load of the current CPU:
1162          */
1163         if (sync)
1164                 this_load -= SCHED_LOAD_SCALE;
1165
1166         /* Don't pull the task off an idle CPU to a busy one */
1167         if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
1168                 goto out_set_cpu;
1169
1170         new_cpu = this_cpu; /* Wake to this CPU if we can */
1171
1172         /*
1173          * Scan domains for affine wakeup and passive balancing
1174          * possibilities.
1175          */
1176         for_each_domain(this_cpu, sd) {
1177                 unsigned int imbalance;
1178                 /*
1179                  * Start passive balancing when half the imbalance_pct
1180                  * limit is reached.
1181                  */
1182                 imbalance = sd->imbalance_pct + (sd->imbalance_pct - 100) / 2;
1183
1184                 if ((sd->flags & SD_WAKE_AFFINE) &&
1185                                 !task_hot(p, rq->timestamp_last_tick, sd)) {
1186                         /*
1187                          * This domain has SD_WAKE_AFFINE and p is cache cold
1188                          * in this domain.
1189                          */
1190                         if (cpu_isset(cpu, sd->span)) {
1191                                 schedstat_inc(sd, ttwu_wake_affine);
1192                                 goto out_set_cpu;
1193                         }
1194                 } else if ((sd->flags & SD_WAKE_BALANCE) &&
1195                                 imbalance*this_load <= 100*load) {
1196                         /*
1197                          * This domain has SD_WAKE_BALANCE and there is
1198                          * an imbalance.
1199                          */
1200                         if (cpu_isset(cpu, sd->span)) {
1201                                 schedstat_inc(sd, ttwu_wake_balance);
1202                                 goto out_set_cpu;
1203                         }
1204                 }
1205         }
1206
1207         new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1208 out_set_cpu:
1209         schedstat_inc(rq, ttwu_attempts);
1210         new_cpu = wake_idle(new_cpu, p);
1211         if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) {
1212                 schedstat_inc(rq, ttwu_moved);
1213                 set_task_cpu(p, new_cpu);
1214                 task_rq_unlock(rq, &flags);
1215                 /* might preempt at this point */
1216                 rq = task_rq_lock(p, &flags);
1217                 old_state = p->state;
1218                 if (!(old_state & state))
1219                         goto out;
1220                 if (p->array)
1221                         goto out_running;
1222
1223                 this_cpu = smp_processor_id();
1224                 cpu = task_cpu(p);
1225         }
1226
1227 out_activate:
1228 #endif /* CONFIG_SMP */
1229         if (old_state == TASK_UNINTERRUPTIBLE) {
1230                 rq->nr_uninterruptible--;
1231                 /*
1232                  * Tasks on involuntary sleep don't earn
1233                  * sleep_avg beyond just interactive state.
1234                  */
1235                 p->activated = -1;
1236         }
1237
1238         /*
1239          * Sync wakeups (i.e. those types of wakeups where the waker
1240          * has indicated that it will leave the CPU in short order)
1241          * don't trigger a preemption, if the woken up task will run on
1242          * this cpu. (in this case the 'I will reschedule' promise of
1243          * the waker guarantees that the freshly woken up task is going
1244          * to be considered on this CPU.)
1245          */
1246         activate_task(p, rq, cpu == this_cpu);
1247         if (!sync || cpu != this_cpu) {
1248                 if (TASK_PREEMPTS_CURR(p, rq))
1249                         resched_task(rq->curr);
1250         }
1251         success = 1;
1252
1253 out_running:
1254         p->state = TASK_RUNNING;
1255 out:
1256         task_rq_unlock(rq, &flags);
1257
1258         return success;
1259 }
1260
1261 int fastcall wake_up_process(task_t * p)
1262 {
1263         return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1264                                  TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1265 }
1266
1267 EXPORT_SYMBOL(wake_up_process);
1268
1269 int fastcall wake_up_state(task_t *p, unsigned int state)
1270 {
1271         return try_to_wake_up(p, state, 0);
1272 }
1273
1274 #ifdef CONFIG_SMP
1275 static int find_idlest_cpu(struct task_struct *p, int this_cpu,
1276                            struct sched_domain *sd);
1277 #endif
1278
1279 /*
1280  * Perform scheduler related setup for a newly forked process p.
1281  * p is forked by current.
1282  */
1283 void fastcall sched_fork(task_t *p)
1284 {
1285         /*
1286          * We mark the process as running here, but have not actually
1287          * inserted it onto the runqueue yet. This guarantees that
1288          * nobody will actually run it, and a signal or other external
1289          * event cannot wake it up and insert it on the runqueue either.
1290          */
1291         p->state = TASK_RUNNING;
1292         INIT_LIST_HEAD(&p->run_list);
1293         p->array = NULL;
1294         spin_lock_init(&p->switch_lock);
1295 #ifdef CONFIG_SCHEDSTATS
1296         memset(&p->sched_info, 0, sizeof(p->sched_info));
1297 #endif
1298 #ifdef CONFIG_CKRM_CPU_SCHEDULE
1299         cpu_demand_event(&p->demand_stat,CPU_DEMAND_INIT,0);
1300 #endif
1301 #ifdef CONFIG_PREEMPT
1302         /*
1303          * During context-switch we hold precisely one spinlock, which
1304          * schedule_tail drops. (in the common case it's this_rq()->lock,
1305          * but it also can be p->switch_lock.) So we compensate with a count
1306          * of 1. Also, we want to start with kernel preemption disabled.
1307          */
1308         p->thread_info->preempt_count = 1;
1309 #endif
1310         /*
1311          * Share the timeslice between parent and child, thus the
1312          * total amount of pending timeslices in the system doesn't change,
1313          * resulting in more scheduling fairness.
1314          */
1315         local_irq_disable();
1316         p->time_slice = (current->time_slice + 1) >> 1;
1317         /*
1318          * The remainder of the first timeslice might be recovered by
1319          * the parent if the child exits early enough.
1320          */
1321         p->first_time_slice = 1;
1322         current->time_slice >>= 1;
1323         p->timestamp = sched_clock();
1324         if (unlikely(!current->time_slice)) {
1325                 /*
1326                  * This case is rare, it happens when the parent has only
1327                  * a single jiffy left from its timeslice. Taking the
1328                  * runqueue lock is not a problem.
1329                  */
1330                 current->time_slice = 1;
1331                 preempt_disable();
1332                 scheduler_tick(0, 0);
1333                 local_irq_enable();
1334                 preempt_enable();
1335         } else
1336                 local_irq_enable();
1337 }
1338
1339 /*
1340  * wake_up_new_task - wake up a newly created task for the first time.
1341  *
1342  * This function will do some initial scheduler statistics housekeeping
1343  * that must be done for every newly created context, then puts the task
1344  * on the runqueue and wakes it.
1345  */
1346 void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
1347 {
1348         unsigned long flags;
1349         int this_cpu, cpu;
1350         runqueue_t *rq, *this_rq;
1351
1352         rq = task_rq_lock(p, &flags);
1353         cpu = task_cpu(p);
1354         this_cpu = smp_processor_id();
1355
1356         BUG_ON(p->state != TASK_RUNNING);
1357
1358         schedstat_inc(rq, wunt_cnt);
1359         /*
1360          * We decrease the sleep average of forking parents
1361          * and children as well, to keep max-interactive tasks
1362          * from forking tasks that are max-interactive. The parent
1363          * (current) is done further down, under its lock.
1364          */
1365         p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
1366                 CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1367
1368         p->interactive_credit = 0;
1369
1370         p->prio = effective_prio(p);
1371
1372         vx_activate_task(p);
1373         if (likely(cpu == this_cpu)) {
1374                 if (!(clone_flags & CLONE_VM)) {
1375                         /*
1376                          * The VM isn't cloned, so we're in a good position to
1377                          * do child-runs-first in anticipation of an exec. This
1378                          * usually avoids a lot of COW overhead.
1379                          */
1380                         if (unlikely(!current->array))
1381                                 __activate_task(p, rq);
1382                         else {
1383                                 p->prio = current->prio;
1384                                 list_add_tail(&p->run_list, &current->run_list);
1385                                 p->array = current->array;
1386                                 p->array->nr_active++;
1387                                 rq->nr_running++;
1388                                 class_enqueue_task(p,p->array);
1389                         }
1390                         set_need_resched();
1391                 } else
1392                         /* Run child last */
1393                         __activate_task(p, rq);
1394                 /*
1395                  * We skip the following code due to cpu == this_cpu
1396                  *
1397                  *   task_rq_unlock(rq, &flags);
1398                  *   this_rq = task_rq_lock(current, &flags);
1399                  */
1400                 this_rq = rq;
1401         } else {
1402                 this_rq = cpu_rq(this_cpu);
1403
1404                 /*
1405                  * Not the local CPU - must adjust timestamp. This should
1406                  * get optimised away in the !CONFIG_SMP case.
1407                  */
1408                 p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
1409                                         + rq->timestamp_last_tick;
1410                 __activate_task(p, rq);
1411                 if (TASK_PREEMPTS_CURR(p, rq))
1412                         resched_task(rq->curr);
1413
1414                 schedstat_inc(rq, wunt_moved);
1415                 /*
1416                  * Parent and child are on different CPUs, now get the
1417                  * parent runqueue to update the parent's ->sleep_avg:
1418                  */
1419                 task_rq_unlock(rq, &flags);
1420                 this_rq = task_rq_lock(current, &flags);
1421         }
1422         current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
1423                 PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1424         task_rq_unlock(this_rq, &flags);
1425 }
1426
1427 /*
1428  * Potentially available exiting-child timeslices are
1429  * retrieved here - this way the parent does not get
1430  * penalized for creating too many threads.
1431  *
1432  * (this cannot be used to 'generate' timeslices
1433  * artificially, because any timeslice recovered here
1434  * was given away by the parent in the first place.)
1435  */
1436 void fastcall sched_exit(task_t * p)
1437 {
1438         unsigned long flags;
1439         runqueue_t *rq;
1440
1441         /*
1442          * If the child was a (relative-) CPU hog then decrease
1443          * the sleep_avg of the parent as well.
1444          */
1445         rq = task_rq_lock(p->parent, &flags);
1446         if (p->first_time_slice) {
1447                 p->parent->time_slice += p->time_slice;
1448                 if (unlikely(p->parent->time_slice > task_timeslice(p)))
1449                         p->parent->time_slice = task_timeslice(p);
1450         }
1451         if (p->sleep_avg < p->parent->sleep_avg)
1452                 p->parent->sleep_avg = p->parent->sleep_avg /
1453                 (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
1454                 (EXIT_WEIGHT + 1);
1455         task_rq_unlock(rq, &flags);
1456 }
1457
1458 /**
1459  * finish_task_switch - clean up after a task-switch
1460  * @prev: the thread we just switched away from.
1461  *
1462  * We enter this with the runqueue still locked, and finish_arch_switch()
1463  * will unlock it along with doing any other architecture-specific cleanup
1464  * actions.
1465  *
1466  * Note that we may have delayed dropping an mm in context_switch(). If
1467  * so, we finish that here outside of the runqueue lock.  (Doing it
1468  * with the lock held can cause deadlocks; see schedule() for
1469  * details.)
1470  */
1471 static void finish_task_switch(task_t *prev)
1472         __releases(rq->lock)
1473 {
1474         runqueue_t *rq = this_rq();
1475         struct mm_struct *mm = rq->prev_mm;
1476         unsigned long prev_task_flags;
1477
1478         rq->prev_mm = NULL;
1479
1480         /*
1481          * A task struct has one reference for the use as "current".
1482          * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
1483          * calls schedule one last time. The schedule call will never return,
1484          * and the scheduled task must drop that reference.
1485          * The test for EXIT_ZOMBIE must occur while the runqueue locks are
1486          * still held, otherwise prev could be scheduled on another cpu, die
1487          * there before we look at prev->state, and then the reference would
1488          * be dropped twice.
1489          *              Manfred Spraul <manfred@colorfullife.com>
1490          */
1491         prev_task_flags = prev->flags;
1492         finish_arch_switch(rq, prev);
1493         if (mm)
1494                 mmdrop(mm);
1495         if (unlikely(prev_task_flags & PF_DEAD))
1496                 put_task_struct(prev);
1497 }
1498
1499 /**
1500  * schedule_tail - first thing a freshly forked thread must call.
1501  * @prev: the thread we just switched away from.
1502  */
1503 asmlinkage void schedule_tail(task_t *prev)
1504         __releases(rq->lock)
1505 {
1506         finish_task_switch(prev);
1507
1508         if (current->set_child_tid)
1509                 put_user(current->pid, current->set_child_tid);
1510 }
1511
1512 /*
1513  * context_switch - switch to the new MM and the new
1514  * thread's register state.
1515  */
1516 static inline
1517 task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
1518 {
1519         struct mm_struct *mm = next->mm;
1520         struct mm_struct *oldmm = prev->active_mm;
1521
1522         if (unlikely(!mm)) {
1523                 next->active_mm = oldmm;
1524                 atomic_inc(&oldmm->mm_count);
1525                 enter_lazy_tlb(oldmm, next);
1526         } else
1527                 switch_mm(oldmm, mm, next);
1528
1529         if (unlikely(!prev->mm)) {
1530                 prev->active_mm = NULL;
1531                 WARN_ON(rq->prev_mm);
1532                 rq->prev_mm = oldmm;
1533         }
1534
1535         /* Here we just switch the register state and the stack. */
1536         switch_to(prev, next, prev);
1537
1538         return prev;
1539 }
1540
1541 /*
1542  * nr_running, nr_uninterruptible and nr_context_switches:
1543  *
1544  * externally visible scheduler statistics: current number of runnable
1545  * threads, current number of uninterruptible-sleeping threads, total
1546  * number of context switches performed since bootup.
1547  */
1548 unsigned long nr_running(void)
1549 {
1550         unsigned long i, sum = 0;
1551
1552         for_each_online_cpu(i)
1553                 sum += cpu_rq(i)->nr_running;
1554
1555         return sum;
1556 }
1557
1558 unsigned long nr_uninterruptible(void)
1559 {
1560         unsigned long i, sum = 0;
1561
1562         for_each_cpu(i)
1563                 sum += cpu_rq(i)->nr_uninterruptible;
1564
1565         /*
1566          * Since we read the counters lockless, it might be slightly
1567          * inaccurate. Do not allow it to go below zero though:
1568          */
1569         if (unlikely((long)sum < 0))
1570                 sum = 0;
1571
1572         return sum;
1573 }
1574
1575 unsigned long long nr_context_switches(void)
1576 {
1577         unsigned long long i, sum = 0;
1578
1579         for_each_cpu(i)
1580                 sum += cpu_rq(i)->nr_switches;
1581
1582         return sum;
1583 }
1584
1585 unsigned long nr_iowait(void)
1586 {
1587         unsigned long i, sum = 0;
1588
1589         for_each_cpu(i)
1590                 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1591
1592         return sum;
1593 }
1594
1595 #ifdef CONFIG_SMP
1596
1597 /*
1598  * double_rq_lock - safely lock two runqueues
1599  *
1600  * Note this does not disable interrupts like task_rq_lock,
1601  * you need to do so manually before calling.
1602  */
1603 static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1604         __acquires(rq1->lock)
1605         __acquires(rq2->lock)
1606 {
1607         if (rq1 == rq2) {
1608                 spin_lock(&rq1->lock);
1609                 __acquire(rq2->lock);   /* Fake it out ;) */
1610         } else {
1611                 if (rq1 < rq2) {
1612                         spin_lock(&rq1->lock);
1613                         spin_lock(&rq2->lock);
1614                 } else {
1615                         spin_lock(&rq2->lock);
1616                         spin_lock(&rq1->lock);
1617                 }
1618         }
1619 }
1620
1621 /*
1622  * double_rq_unlock - safely unlock two runqueues
1623  *
1624  * Note this does not restore interrupts like task_rq_unlock,
1625  * you need to do so manually after calling.
1626  */
1627 static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
1628         __releases(rq1->lock)
1629         __releases(rq2->lock)
1630 {
1631         spin_unlock(&rq1->lock);
1632         if (rq1 != rq2)
1633                 spin_unlock(&rq2->lock);
1634         else
1635                 __release(rq2->lock);
1636 }
1637
1638 /*
1639  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1640  */
1641 static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1642         __releases(this_rq->lock)
1643         __acquires(busiest->lock)
1644         __acquires(this_rq->lock)
1645 {
1646         if (unlikely(!spin_trylock(&busiest->lock))) {
1647                 if (busiest < this_rq) {
1648                         spin_unlock(&this_rq->lock);
1649                         spin_lock(&busiest->lock);
1650                         spin_lock(&this_rq->lock);
1651                 } else
1652                         spin_lock(&busiest->lock);
1653         }
1654 }
1655
1656 /*
1657  * find_idlest_cpu - find the least busy runqueue.
1658  */
1659 static int find_idlest_cpu(struct task_struct *p, int this_cpu,
1660                            struct sched_domain *sd)
1661 {
1662         unsigned long load, min_load, this_load;
1663         int i, min_cpu;
1664         cpumask_t mask;
1665
1666         min_cpu = UINT_MAX;
1667         min_load = ULONG_MAX;
1668
1669         cpus_and(mask, sd->span, p->cpus_allowed);
1670
1671         for_each_cpu_mask(i, mask) {
1672                 load = target_load(i);
1673
1674                 if (load < min_load) {
1675                         min_cpu = i;
1676                         min_load = load;
1677
1678                         /* break out early on an idle CPU: */
1679                         if (!min_load)
1680                                 break;
1681                 }
1682         }
1683
1684         /* add +1 to account for the new task */
1685         this_load = source_load(this_cpu) + SCHED_LOAD_SCALE;
1686
1687         /*
1688          * Would with the addition of the new task to the
1689          * current CPU there be an imbalance between this
1690          * CPU and the idlest CPU?
1691          *
1692          * Use half of the balancing threshold - new-context is
1693          * a good opportunity to balance.
1694          */
1695         if (min_load*(100 + (sd->imbalance_pct-100)/2) < this_load*100)
1696                 return min_cpu;
1697
1698         return this_cpu;
1699 }
1700
1701 /*
1702  * If dest_cpu is allowed for this process, migrate the task to it.
1703  * This is accomplished by forcing the cpu_allowed mask to only
1704  * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
1705  * the cpu_allowed mask is restored.
1706  */
1707 static void sched_migrate_task(task_t *p, int dest_cpu)
1708 {
1709         migration_req_t req;
1710         runqueue_t *rq;
1711         unsigned long flags;
1712
1713         rq = task_rq_lock(p, &flags);
1714         if (!cpu_isset(dest_cpu, p->cpus_allowed)
1715             || unlikely(cpu_is_offline(dest_cpu)))
1716                 goto out;
1717
1718         schedstat_inc(rq, smt_cnt);
1719         /* force the process onto the specified CPU */
1720         if (migrate_task(p, dest_cpu, &req)) {
1721                 /* Need to wait for migration thread (might exit: take ref). */
1722                 struct task_struct *mt = rq->migration_thread;
1723                 get_task_struct(mt);
1724                 task_rq_unlock(rq, &flags);
1725                 wake_up_process(mt);
1726                 put_task_struct(mt);
1727                 wait_for_completion(&req.done);
1728                 return;
1729         }
1730 out:
1731         task_rq_unlock(rq, &flags);
1732 }
1733
1734 /*
1735  * sched_exec(): find the highest-level, exec-balance-capable
1736  * domain and try to migrate the task to the least loaded CPU.
1737  *
1738  * execve() is a valuable balancing opportunity, because at this point
1739  * the task has the smallest effective memory and cache footprint.
1740  */
1741 void sched_exec(void)
1742 {
1743         struct sched_domain *tmp, *sd = NULL;
1744         int new_cpu, this_cpu = get_cpu();
1745
1746         schedstat_inc(this_rq(), sbe_cnt);
1747         /* Prefer the current CPU if there's only this task running */
1748         if (this_rq()->nr_running <= 1)
1749                 goto out;
1750
1751         for_each_domain(this_cpu, tmp)
1752                 if (tmp->flags & SD_BALANCE_EXEC)
1753                         sd = tmp;
1754
1755         if (sd) {
1756                 schedstat_inc(sd, sbe_attempts);
1757                 new_cpu = find_idlest_cpu(current, this_cpu, sd);
1758                 if (new_cpu != this_cpu) {
1759                         schedstat_inc(sd, sbe_pushed);
1760                         put_cpu();
1761                         sched_migrate_task(current, new_cpu);
1762                         return;
1763                 }
1764         }
1765 out:
1766         put_cpu();
1767 }
1768
1769 /*
1770  * pull_task - move a task from a remote runqueue to the local runqueue.
1771  * Both runqueues must be locked.
1772  */
1773 static inline
1774 void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1775                runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
1776 {
1777         dequeue_task(p, src_array);
1778         src_rq->nr_running--;
1779         set_task_cpu(p, this_cpu);
1780         this_rq->nr_running++;
1781         enqueue_task(p, this_array);
1782         p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
1783                                 + this_rq->timestamp_last_tick;
1784         /*
1785          * Note that idle threads have a prio of MAX_PRIO, for this test
1786          * to be always true for them.
1787          */
1788         if (TASK_PREEMPTS_CURR(p, this_rq))
1789                 resched_task(this_rq->curr);
1790 }
1791
1792 /*
1793  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1794  */
1795 static inline
1796 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1797                      struct sched_domain *sd, enum idle_type idle)
1798 {
1799         /*
1800          * We do not migrate tasks that are:
1801          * 1) running (obviously), or
1802          * 2) cannot be migrated to this CPU due to cpus_allowed, or
1803          * 3) are cache-hot on their current CPU.
1804          */
1805         if (task_running(rq, p))
1806                 return 0;
1807         if (!cpu_isset(this_cpu, p->cpus_allowed))
1808                 return 0;
1809
1810         /* Aggressive migration if we've failed balancing */
1811         if (idle == NEWLY_IDLE ||
1812                         sd->nr_balance_failed < sd->cache_nice_tries) {
1813                 if (task_hot(p, rq->timestamp_last_tick, sd))
1814                         return 0;
1815         }
1816
1817         return 1;
1818 }
1819
1820 #ifdef CONFIG_CKRM_CPU_SCHEDULE
1821 static inline int ckrm_preferred_task(task_t *tmp,long min, long max, 
1822                                       int phase, enum idle_type idle)
1823 {
1824         long pressure = task_load(tmp);
1825         
1826         if (pressure > max) 
1827                 return 0;
1828
1829         if ((idle == NOT_IDLE) && ! phase && (pressure <= min))
1830                 return 0;
1831         return 1;
1832 }
1833
1834 /*
1835  * move tasks for a specic local class
1836  * return number of tasks pulled
1837  */
1838 static inline int ckrm_cls_move_tasks(ckrm_lrq_t* src_lrq,ckrm_lrq_t*dst_lrq,
1839                                       runqueue_t *this_rq,
1840                                       runqueue_t *busiest,
1841                                       struct sched_domain *sd,
1842                                       int this_cpu,
1843                                       enum idle_type idle,
1844                                       long* pressure_imbalance) 
1845 {
1846         prio_array_t *array, *dst_array;
1847         struct list_head *head, *curr;
1848         task_t *tmp;
1849         int idx;
1850         int pulled = 0;
1851         int phase = -1;
1852         long pressure_min, pressure_max;
1853         /*hzheng: magic : 90% balance is enough*/
1854         long balance_min = *pressure_imbalance / 10; 
1855 /*
1856  * we don't want to migrate tasks that will reverse the balance
1857  *     or the tasks that make too small difference
1858  */
1859 #define CKRM_BALANCE_MAX_RATIO  100
1860 #define CKRM_BALANCE_MIN_RATIO  1
1861  start:
1862         phase ++;
1863         /*
1864          * We first consider expired tasks. Those will likely not be
1865          * executed in the near future, and they are most likely to
1866          * be cache-cold, thus switching CPUs has the least effect
1867          * on them.
1868          */
1869         if (src_lrq->expired->nr_active) {
1870                 array = src_lrq->expired;
1871                 dst_array = dst_lrq->expired;
1872         } else {
1873                 array = src_lrq->active;
1874                 dst_array = dst_lrq->active;
1875         }
1876         
1877  new_array:
1878         /* Start searching at priority 0: */
1879         idx = 0;
1880  skip_bitmap:
1881         if (!idx)
1882                 idx = sched_find_first_bit(array->bitmap);
1883         else
1884                 idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
1885         if (idx >= MAX_PRIO) {
1886                 if (array == src_lrq->expired && src_lrq->active->nr_active) {
1887                         array = src_lrq->active;
1888                         dst_array = dst_lrq->active;
1889                         goto new_array;
1890                 }
1891                 if ((! phase) && (! pulled) && (idle != IDLE))
1892                         goto start; //try again
1893                 else 
1894                         goto out; //finished search for this lrq
1895         }
1896         
1897         head = array->queue + idx;
1898         curr = head->prev;
1899  skip_queue:
1900         tmp = list_entry(curr, task_t, run_list);
1901         
1902         curr = curr->prev;
1903         
1904         if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
1905                 if (curr != head)
1906                         goto skip_queue;
1907                 idx++;
1908                 goto skip_bitmap;
1909         }
1910
1911         pressure_min = *pressure_imbalance * CKRM_BALANCE_MIN_RATIO/100;
1912         pressure_max = *pressure_imbalance * CKRM_BALANCE_MAX_RATIO/100;
1913         /*
1914          * skip the tasks that will reverse the balance too much
1915          */
1916         if (ckrm_preferred_task(tmp,pressure_min,pressure_max,phase,idle)) {
1917                 *pressure_imbalance -= task_load(tmp);
1918                 pull_task(busiest, array, tmp, 
1919                           this_rq, dst_array, this_cpu);
1920                 pulled++;
1921
1922                 if (*pressure_imbalance <= balance_min)
1923                         goto out;
1924         }
1925                 
1926         if (curr != head)
1927                 goto skip_queue;
1928         idx++;
1929         goto skip_bitmap;
1930  out:          
1931         return pulled;
1932 }
1933
1934 static inline long ckrm_rq_imbalance(runqueue_t *this_rq,runqueue_t *dst_rq)
1935 {
1936         long imbalance;
1937         /*
1938          * make sure after balance, imbalance' > - imbalance/2
1939          * we don't want the imbalance be reversed too much
1940          */
1941         imbalance = pid_get_pressure(rq_ckrm_load(dst_rq),0) 
1942                 - pid_get_pressure(rq_ckrm_load(this_rq),1);
1943         imbalance /= 2;
1944         return imbalance;
1945 }
1946
1947 /*
1948  * try to balance the two runqueues
1949  *
1950  * Called with both runqueues locked.
1951  * if move_tasks is called, it will try to move at least one task over
1952  */
1953 static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
1954                       unsigned long max_nr_move, struct sched_domain *sd,
1955                       enum idle_type idle)
1956 {
1957         struct ckrm_cpu_class *clsptr,*vip_cls = NULL;
1958         ckrm_lrq_t* src_lrq,*dst_lrq;
1959         long pressure_imbalance, pressure_imbalance_old;
1960         int src_cpu = task_cpu(busiest->curr);
1961         struct list_head *list;
1962         int pulled = 0;
1963         long imbalance;
1964
1965         imbalance =  ckrm_rq_imbalance(this_rq,busiest);
1966
1967         if ((idle == NOT_IDLE && imbalance <= 0) || busiest->nr_running <= 1)
1968                 goto out;
1969
1970         //try to find the vip class
1971         list_for_each_entry(clsptr,&active_cpu_classes,links) {
1972                 src_lrq = get_ckrm_lrq(clsptr,src_cpu);
1973
1974                 if (! lrq_nr_running(src_lrq))
1975                         continue;
1976
1977                 if (! vip_cls || cpu_class_weight(vip_cls) < cpu_class_weight(clsptr) )  
1978                         {
1979                                 vip_cls = clsptr;
1980                         }
1981         }
1982
1983         /*
1984          * do search from the most significant class
1985          * hopefully, less tasks will be migrated this way
1986          */
1987         clsptr = vip_cls;
1988
1989  move_class:
1990         if (! clsptr)
1991                 goto out;
1992         
1993
1994         src_lrq = get_ckrm_lrq(clsptr,src_cpu);
1995         if (! lrq_nr_running(src_lrq))
1996                 goto other_class;
1997         
1998         dst_lrq = get_ckrm_lrq(clsptr,this_cpu);
1999
2000         //how much pressure for this class should be transferred
2001         pressure_imbalance = src_lrq->lrq_load * imbalance/src_lrq->local_weight;
2002         if (pulled && ! pressure_imbalance) 
2003                 goto other_class;
2004         
2005         pressure_imbalance_old = pressure_imbalance;
2006         
2007         //move tasks
2008         pulled += 
2009                 ckrm_cls_move_tasks(src_lrq,dst_lrq,
2010                                     this_rq,
2011                                     busiest,
2012                                     sd,this_cpu,idle,
2013                                     &pressure_imbalance);
2014
2015         /* 
2016          * hzheng: 2 is another magic number
2017          * stop balancing if the imbalance is less than 25% of the orig
2018          */
2019         if (pressure_imbalance <= (pressure_imbalance_old >> 2))
2020                 goto out;
2021                 
2022         //update imbalance
2023         imbalance *= pressure_imbalance / pressure_imbalance_old;
2024  other_class:
2025         //who is next?
2026         list = clsptr->links.next;
2027         if (list == &active_cpu_classes)
2028                 list = list->next;
2029         clsptr = list_entry(list, typeof(*clsptr), links);
2030         if (clsptr != vip_cls)
2031                 goto move_class;
2032  out:
2033         return pulled;
2034 }
2035
2036 /**
2037  * ckrm_check_balance - is load balancing necessary?
2038  * return 0 if load balancing is not necessary
2039  * otherwise return the average load of the system
2040  * also, update nr_group
2041  *
2042  * heuristics: 
2043  *   no load balancing if it's load is over average
2044  *   no load balancing if it's load is far more than the min
2045  * task:
2046  *   read the status of all the runqueues
2047  */
2048 static unsigned long ckrm_check_balance(struct sched_domain *sd, int this_cpu,
2049                                              enum idle_type idle, int* nr_group)
2050 {
2051         struct sched_group *group = sd->groups;
2052         unsigned long min_load, max_load, avg_load;
2053         unsigned long total_load, this_load, total_pwr;
2054
2055         max_load = this_load = total_load = total_pwr = 0;
2056         min_load = 0xFFFFFFFF;
2057         *nr_group = 0;
2058
2059         do {
2060                 cpumask_t tmp;
2061                 unsigned long load;
2062                 int local_group;
2063                 int i, nr_cpus = 0;
2064
2065                 /* Tally up the load of all CPUs in the group */
2066                 cpus_and(tmp, group->cpumask, cpu_online_map);
2067                 if (unlikely(cpus_empty(tmp)))
2068                         goto nextgroup;
2069
2070                 avg_load = 0;
2071                 local_group = cpu_isset(this_cpu, group->cpumask);
2072
2073                 for_each_cpu_mask(i, tmp) {
2074                         load = pid_get_pressure(rq_ckrm_load(cpu_rq(i)),local_group);
2075                         nr_cpus++;
2076                         avg_load += load;
2077                 }
2078
2079                 if (!nr_cpus)
2080                         goto nextgroup;
2081
2082                 total_load += avg_load;
2083                 total_pwr += group->cpu_power;
2084
2085                 /* Adjust by relative CPU power of the group */
2086                 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
2087
2088                 if (local_group) {
2089                         this_load = avg_load;
2090                         goto nextgroup;
2091                 } else if (avg_load > max_load) {
2092                         max_load = avg_load;
2093                 }      
2094                 if (avg_load < min_load) {
2095                         min_load = avg_load;
2096                 }
2097 nextgroup:
2098                 group = group->next;
2099                 *nr_group = *nr_group + 1;
2100         } while (group != sd->groups);
2101
2102         if (!max_load || this_load >= max_load)
2103                 goto out_balanced;
2104
2105         avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
2106
2107         /* hzheng: debugging: 105 is a magic number
2108          * 100*max_load <= sd->imbalance_pct*this_load)
2109          * should use imbalance_pct instead
2110          */
2111         if (this_load > avg_load 
2112             || 100*max_load < 105*this_load
2113             || 100*min_load < 70*this_load
2114             )
2115                 goto out_balanced;
2116
2117         return avg_load;
2118  out_balanced:
2119         return 0;
2120 }
2121
2122 /**
2123  * any group that has above average load is considered busy
2124  * find the busiest queue from any of busy group
2125  */
2126 static runqueue_t *
2127 ckrm_find_busy_queue(struct sched_domain *sd, int this_cpu,
2128                      unsigned long avg_load, enum idle_type idle,
2129                      int nr_group)
2130 {
2131         struct sched_group *group;
2132         runqueue_t * busiest=NULL;
2133         unsigned long rand;
2134         
2135         group = sd->groups;
2136         rand = get_ckrm_rand(nr_group);
2137         nr_group = 0;
2138
2139         do {
2140                 unsigned long load,total_load,max_load;
2141                 cpumask_t tmp;
2142                 int i;
2143                 runqueue_t * grp_busiest;
2144
2145                 cpus_and(tmp, group->cpumask, cpu_online_map);
2146                 if (unlikely(cpus_empty(tmp)))
2147                         goto find_nextgroup;
2148
2149                 total_load = 0;
2150                 max_load = 0;
2151                 grp_busiest = NULL;
2152                 for_each_cpu_mask(i, tmp) {
2153                         load = pid_get_pressure(rq_ckrm_load(cpu_rq(i)),0);
2154                         total_load += load;
2155                         if (load > max_load) {
2156                                 max_load = load;
2157                                 grp_busiest = cpu_rq(i);
2158                         }                               
2159                 }
2160
2161                 total_load = (total_load * SCHED_LOAD_SCALE) / group->cpu_power;
2162                 if (total_load > avg_load) {
2163                         busiest = grp_busiest;
2164                         if (nr_group >= rand)
2165                                 break;
2166                 }
2167         find_nextgroup:         
2168                 group = group->next;
2169                 nr_group ++;
2170         } while (group != sd->groups);
2171
2172         return busiest;
2173 }
2174
2175 /**
2176  * load_balance - pressure based load balancing algorithm used by ckrm
2177  */
2178 static int ckrm_load_balance(int this_cpu, runqueue_t *this_rq,
2179                         struct sched_domain *sd, enum idle_type idle)
2180 {
2181         runqueue_t *busiest;
2182         unsigned long avg_load;
2183         int nr_moved,nr_group;
2184
2185         avg_load = ckrm_check_balance(sd, this_cpu, idle, &nr_group);
2186         if (! avg_load)
2187                 goto out_balanced;
2188
2189         busiest = ckrm_find_busy_queue(sd,this_cpu,avg_load,idle,nr_group);
2190         if (! busiest)
2191                 goto out_balanced;
2192         /*
2193          * This should be "impossible", but since load
2194          * balancing is inherently racy and statistical,
2195          * it could happen in theory.
2196          */
2197         if (unlikely(busiest == this_rq)) {
2198                 WARN_ON(1);
2199                 goto out_balanced;
2200         }
2201
2202         nr_moved = 0;
2203         if (busiest->nr_running > 1) {
2204                 /*
2205                  * Attempt to move tasks. If find_busiest_group has found
2206                  * an imbalance but busiest->nr_running <= 1, the group is
2207                  * still unbalanced. nr_moved simply stays zero, so it is
2208                  * correctly treated as an imbalance.
2209                  */
2210                 double_lock_balance(this_rq, busiest);
2211                 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2212                                       0,sd, idle);              
2213                 spin_unlock(&busiest->lock);
2214                 if (nr_moved) {
2215                         adjust_local_weight();
2216                 }
2217         }
2218
2219         if (!nr_moved) 
2220                 sd->nr_balance_failed ++;
2221         else
2222                 sd->nr_balance_failed  = 0;             
2223
2224         /* We were unbalanced, so reset the balancing interval */
2225         sd->balance_interval = sd->min_interval;
2226
2227         return nr_moved;
2228
2229 out_balanced:
2230         /* tune up the balancing interval */
2231         if (sd->balance_interval < sd->max_interval)
2232                 sd->balance_interval *= 2;
2233
2234         return 0;
2235 }
2236
2237 /*
2238  * this_rq->lock is already held
2239  */
2240 static inline int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2241                                        struct sched_domain *sd)
2242 {
2243         int ret;
2244         read_lock(&class_list_lock);
2245         ret = ckrm_load_balance(this_cpu,this_rq,sd,NEWLY_IDLE);
2246         read_unlock(&class_list_lock);
2247         return ret;
2248 }
2249
2250 static inline int load_balance(int this_cpu, runqueue_t *this_rq,
2251                         struct sched_domain *sd, enum idle_type idle)
2252 {
2253         int ret;
2254
2255         spin_lock(&this_rq->lock);
2256         read_lock(&class_list_lock);
2257         ret= ckrm_load_balance(this_cpu,this_rq,sd,NEWLY_IDLE);
2258         read_unlock(&class_list_lock);
2259         spin_unlock(&this_rq->lock);
2260         return ret;
2261 }
2262 #else /*! CONFIG_CKRM_CPU_SCHEDULE */
2263 /*
2264  * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
2265  * as part of a balancing operation within "domain". Returns the number of
2266  * tasks moved.
2267  *
2268  * Called with both runqueues locked.
2269  */
2270 static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
2271                       unsigned long max_nr_move, struct sched_domain *sd,
2272                       enum idle_type idle)
2273 {
2274         prio_array_t *array, *dst_array;
2275         struct list_head *head, *curr;
2276         int idx, pulled = 0;
2277         task_t *tmp;
2278
2279         if (max_nr_move <= 0 || busiest->nr_running <= 1)
2280                 goto out;
2281
2282         /*
2283          * We first consider expired tasks. Those will likely not be
2284          * executed in the near future, and they are most likely to
2285          * be cache-cold, thus switching CPUs has the least effect
2286          * on them.
2287          */
2288         if (busiest->expired->nr_active) {
2289                 array = busiest->expired;
2290                 dst_array = this_rq->expired;
2291         } else {
2292                 array = busiest->active;
2293                 dst_array = this_rq->active;
2294         }
2295
2296 new_array:
2297         /* Start searching at priority 0: */
2298         idx = 0;
2299 skip_bitmap:
2300         if (!idx)
2301                 idx = sched_find_first_bit(array->bitmap);
2302         else
2303                 idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
2304         if (idx >= MAX_PRIO) {
2305                 if (array == busiest->expired && busiest->active->nr_active) {
2306                         array = busiest->active;
2307                         dst_array = this_rq->active;
2308                         goto new_array;
2309                 }
2310                 goto out;
2311         }
2312
2313         head = array->queue + idx;
2314         curr = head->prev;
2315 skip_queue:
2316         tmp = list_entry(curr, task_t, run_list);
2317
2318         curr = curr->prev;
2319
2320         if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
2321                 if (curr != head)
2322                         goto skip_queue;
2323                 idx++;
2324                 goto skip_bitmap;
2325         }
2326
2327         /*
2328          * Right now, this is the only place pull_task() is called,
2329          * so we can safely collect pull_task() stats here rather than
2330          * inside pull_task().
2331          */
2332         schedstat_inc(this_rq, pt_gained[idle]);
2333         schedstat_inc(busiest, pt_lost[idle]);
2334
2335         pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
2336         pulled++;
2337
2338         /* We only want to steal up to the prescribed number of tasks. */
2339         if (pulled < max_nr_move) {
2340                 if (curr != head)
2341                         goto skip_queue;
2342                 idx++;
2343                 goto skip_bitmap;
2344         }
2345 out:
2346         return pulled;
2347 }
2348
2349 /*
2350  * find_busiest_group finds and returns the busiest CPU group within the
2351  * domain. It calculates and returns the number of tasks which should be
2352  * moved to restore balance via the imbalance parameter.
2353  */
2354 static struct sched_group *
2355 find_busiest_group(struct sched_domain *sd, int this_cpu,
2356                    unsigned long *imbalance, enum idle_type idle)
2357 {
2358         struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
2359         unsigned long max_load, avg_load, total_load, this_load, total_pwr;
2360
2361         max_load = this_load = total_load = total_pwr = 0;
2362
2363         do {
2364                 unsigned long load;
2365                 int local_group;
2366                 int i, nr_cpus = 0;
2367
2368                 local_group = cpu_isset(this_cpu, group->cpumask);
2369
2370                 /* Tally up the load of all CPUs in the group */
2371                 avg_load = 0;
2372
2373                 for_each_cpu_mask(i, group->cpumask) {
2374                         /* Bias balancing toward cpus of our domain */
2375                         if (local_group)
2376                                 load = target_load(i);
2377                         else
2378                                 load = source_load(i);
2379
2380                         nr_cpus++;
2381                         avg_load += load;
2382                 }
2383
2384                 if (!nr_cpus)
2385                         goto nextgroup;
2386
2387                 total_load += avg_load;
2388                 total_pwr += group->cpu_power;
2389
2390                 /* Adjust by relative CPU power of the group */
2391                 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
2392
2393                 if (local_group) {
2394                         this_load = avg_load;
2395                         this = group;
2396                         goto nextgroup;
2397                 } else if (avg_load > max_load) {
2398                         max_load = avg_load;
2399                         busiest = group;
2400                 }
2401 nextgroup:
2402                 group = group->next;
2403         } while (group != sd->groups);
2404
2405         if (!busiest || this_load >= max_load)
2406                 goto out_balanced;
2407
2408         avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
2409
2410         if (this_load >= avg_load ||
2411                         100*max_load <= sd->imbalance_pct*this_load)
2412                 goto out_balanced;
2413
2414         /*
2415          * We're trying to get all the cpus to the average_load, so we don't
2416          * want to push ourselves above the average load, nor do we wish to
2417          * reduce the max loaded cpu below the average load, as either of these
2418          * actions would just result in more rebalancing later, and ping-pong
2419          * tasks around. Thus we look for the minimum possible imbalance.
2420          * Negative imbalances (*we* are more loaded than anyone else) will
2421          * be counted as no imbalance for these purposes -- we can't fix that
2422          * by pulling tasks to us.  Be careful of negative numbers as they'll
2423          * appear as very large values with unsigned longs.
2424          */
2425         *imbalance = min(max_load - avg_load, avg_load - this_load);
2426
2427         /* How much load to actually move to equalise the imbalance */
2428         *imbalance = (*imbalance * min(busiest->cpu_power, this->cpu_power))
2429                                 / SCHED_LOAD_SCALE;
2430
2431         if (*imbalance < SCHED_LOAD_SCALE - 1) {
2432                 unsigned long pwr_now = 0, pwr_move = 0;
2433                 unsigned long tmp;
2434
2435                 if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
2436                         *imbalance = 1;
2437                         return busiest;
2438                 }
2439
2440                 /*
2441                  * OK, we don't have enough imbalance to justify moving tasks,
2442                  * however we may be able to increase total CPU power used by
2443                  * moving them.
2444                  */
2445
2446                 pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load);
2447                 pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load);
2448                 pwr_now /= SCHED_LOAD_SCALE;
2449
2450                 /* Amount of load we'd subtract */
2451                 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power;
2452                 if (max_load > tmp)
2453                         pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE,
2454                                                         max_load - tmp);
2455
2456                 /* Amount of load we'd add */
2457                 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
2458                 if (max_load < tmp)
2459                         tmp = max_load;
2460                 pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
2461                 pwr_move /= SCHED_LOAD_SCALE;
2462
2463                 /* Move if we gain another 8th of a CPU worth of throughput */
2464                 if (pwr_move < pwr_now + SCHED_LOAD_SCALE / 8)
2465                         goto out_balanced;
2466
2467                 *imbalance = 1;
2468                 return busiest;
2469         }
2470
2471         /* Get rid of the scaling factor, rounding down as we divide */
2472         *imbalance = (*imbalance + 1) / SCHED_LOAD_SCALE;
2473
2474         return busiest;
2475
2476 out_balanced:
2477         if (busiest && (idle == NEWLY_IDLE ||
2478                         (idle == SCHED_IDLE && max_load > SCHED_LOAD_SCALE)) ) {
2479                 *imbalance = 1;
2480                 return busiest;
2481         }
2482
2483         *imbalance = 0;
2484         return NULL;
2485 }
2486
2487 /*
2488  * find_busiest_queue - find the busiest runqueue among the cpus in group.
2489  */
2490 static runqueue_t *find_busiest_queue(struct sched_group *group)
2491 {
2492         unsigned long load, max_load = 0;
2493         runqueue_t *busiest = NULL;
2494         int i;
2495
2496         for_each_cpu_mask(i, group->cpumask) {
2497                 load = source_load(i);
2498
2499                 if (load > max_load) {
2500                         max_load = load;
2501                         busiest = cpu_rq(i);
2502                 }
2503         }
2504
2505         return busiest;
2506 }
2507
2508 /*
2509  * Check this_cpu to ensure it is balanced within domain. Attempt to move
2510  * tasks if there is an imbalance.
2511  *
2512  * Called with this_rq unlocked.
2513  */
2514 static int load_balance(int this_cpu, runqueue_t *this_rq,
2515                         struct sched_domain *sd, enum idle_type idle)
2516 {
2517         struct sched_group *group;
2518         runqueue_t *busiest;
2519         unsigned long imbalance;
2520         int nr_moved;
2521
2522         spin_lock(&this_rq->lock);
2523         schedstat_inc(sd, lb_cnt[idle]);
2524
2525         group = find_busiest_group(sd, this_cpu, &imbalance, idle);
2526         if (!group) {
2527                 schedstat_inc(sd, lb_nobusyg[idle]);
2528                 goto out_balanced;
2529         }
2530
2531         busiest = find_busiest_queue(group);
2532         if (!busiest) {
2533                 schedstat_inc(sd, lb_nobusyq[idle]);
2534                 goto out_balanced;
2535         }
2536
2537         /*
2538          * This should be "impossible", but since load
2539          * balancing is inherently racy and statistical,
2540          * it could happen in theory.
2541          */
2542         if (unlikely(busiest == this_rq)) {
2543                 WARN_ON(1);
2544                 goto out_balanced;
2545         }
2546
2547         schedstat_add(sd, lb_imbalance[idle], imbalance);
2548
2549         nr_moved = 0;
2550         if (busiest->nr_running > 1) {
2551                 /*
2552                  * Attempt to move tasks. If find_busiest_group has found
2553                  * an imbalance but busiest->nr_running <= 1, the group is
2554                  * still unbalanced. nr_moved simply stays zero, so it is
2555                  * correctly treated as an imbalance.
2556                  */
2557                 double_lock_balance(this_rq, busiest);
2558                 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2559                                                 imbalance, sd, idle);
2560                 spin_unlock(&busiest->lock);
2561         }
2562         spin_unlock(&this_rq->lock);
2563
2564         if (!nr_moved) {
2565                 schedstat_inc(sd, lb_failed[idle]);
2566                 sd->nr_balance_failed++;
2567
2568                 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
2569                         int wake = 0;
2570
2571                         spin_lock(&busiest->lock);
2572                         if (!busiest->active_balance) {
2573                                 busiest->active_balance = 1;
2574                                 busiest->push_cpu = this_cpu;
2575                                 wake = 1;
2576                         }
2577                         spin_unlock(&busiest->lock);
2578                         if (wake)
2579                                 wake_up_process(busiest->migration_thread);
2580
2581                         /*
2582                          * We've kicked active balancing, reset the failure
2583                          * counter.
2584                          */
2585                         sd->nr_balance_failed = sd->cache_nice_tries;
2586                 }
2587
2588                 /*
2589                  * We were unbalanced, but unsuccessful in move_tasks(),
2590                  * so bump the balance_interval to lessen the lock contention.
2591                  */
2592                 if (sd->balance_interval < sd->max_interval)
2593                         sd->balance_interval++;
2594         } else {
2595                 sd->nr_balance_failed = 0;
2596
2597                 /* We were unbalanced, so reset the balancing interval */
2598                 sd->balance_interval = sd->min_interval;
2599         }
2600
2601         return nr_moved;
2602
2603 out_balanced:
2604         spin_unlock(&this_rq->lock);
2605
2606         /* tune up the balancing interval */
2607         if (sd->balance_interval < sd->max_interval)
2608                 sd->balance_interval *= 2;
2609
2610         return 0;
2611 }
2612
2613 /*
2614  * Check this_cpu to ensure it is balanced within domain. Attempt to move
2615  * tasks if there is an imbalance.
2616  *
2617  * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
2618  * this_rq is locked.
2619  */
2620 static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2621                                 struct sched_domain *sd)
2622 {
2623         struct sched_group *group;
2624         runqueue_t *busiest = NULL;
2625         unsigned long imbalance;
2626         int nr_moved = 0;
2627
2628         schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
2629         group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
2630         if (!group) {
2631                 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
2632                 goto out;
2633         }
2634
2635         busiest = find_busiest_queue(group);
2636         if (!busiest || busiest == this_rq) {
2637                 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
2638                 goto out;
2639         }
2640
2641         /* Attempt to move tasks */
2642         double_lock_balance(this_rq, busiest);
2643
2644         schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
2645         nr_moved = move_tasks(this_rq, this_cpu, busiest,
2646                                         imbalance, sd, NEWLY_IDLE);
2647         if (!nr_moved)
2648                 schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
2649
2650         spin_unlock(&busiest->lock);
2651
2652 out:
2653         return nr_moved;
2654 }
2655 #endif /* CONFIG_CKRM_CPU_SCHEDULE*/
2656
2657
2658 /*
2659  * idle_balance is called by schedule() if this_cpu is about to become
2660  * idle. Attempts to pull tasks from other CPUs.
2661  */
2662 static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
2663 {
2664         struct sched_domain *sd;
2665
2666         for_each_domain(this_cpu, sd) {
2667                 if (sd->flags & SD_BALANCE_NEWIDLE) {
2668                         if (load_balance_newidle(this_cpu, this_rq, sd)) {
2669                                 /* We've pulled tasks over so stop searching */
2670                                 break;
2671                         }
2672                 }
2673         }
2674 }
2675
2676 #ifdef CONFIG_SCHED_SMT
2677 static int cpu_and_siblings_are_idle(int cpu)
2678 {
2679         int sib;
2680         for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
2681                 if (idle_cpu(sib))
2682                         continue;
2683                 return 0;
2684         }
2685
2686         return 1;
2687 }
2688 #else
2689 #define cpu_and_siblings_are_idle(A) idle_cpu(A)
2690 #endif
2691
2692
2693 /*
2694  * active_load_balance is run by migration threads. It pushes running tasks
2695  * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
2696  * running on each physical CPU where possible, and avoids physical /
2697  * logical imbalances.
2698  *
2699  * Called with busiest_rq locked.
2700  */
2701 static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
2702 {
2703         struct sched_domain *sd;
2704         struct sched_group *cpu_group;
2705         cpumask_t visited_cpus;
2706
2707         schedstat_inc(busiest_rq, alb_cnt);
2708         /*
2709          * Search for suitable CPUs to push tasks to in successively higher
2710          * domains with SD_LOAD_BALANCE set.
2711          */
2712         visited_cpus = CPU_MASK_NONE;
2713         for_each_domain(busiest_cpu, sd) {
2714                 if (!(sd->flags & SD_LOAD_BALANCE) || busiest_rq->nr_running <= 1)
2715                         break; /* no more domains to search or no more tasks to move */
2716
2717                 cpu_group = sd->groups;
2718                 do { /* sched_groups should either use list_heads or be merged into the domains structure */
2719                         int cpu, target_cpu = -1;
2720                         runqueue_t *target_rq;
2721
2722                         for_each_cpu_mask(cpu, cpu_group->cpumask) {
2723                                 if (cpu_isset(cpu, visited_cpus) || cpu == busiest_cpu ||
2724                                     !cpu_and_siblings_are_idle(cpu)) {
2725                                         cpu_set(cpu, visited_cpus);
2726                                         continue;
2727                                 }
2728                                 target_cpu = cpu;
2729                                 break;
2730                         }
2731                         if (target_cpu == -1)
2732                                 goto next_group; /* failed to find a suitable target cpu in this domain */
2733
2734                         target_rq = cpu_rq(target_cpu);
2735
2736                         /*
2737                          * This condition is "impossible", if it occurs we need to fix it
2738                          * Reported by Bjorn Helgaas on a 128-cpu setup.
2739                          */
2740                         BUG_ON(busiest_rq == target_rq);
2741
2742                         /* move a task from busiest_rq to target_rq */
2743                         double_lock_balance(busiest_rq, target_rq);
2744                         if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE)) {
2745                                 schedstat_inc(busiest_rq, alb_lost);
2746                                 schedstat_inc(target_rq, alb_gained);
2747                         } else {
2748                                 schedstat_inc(busiest_rq, alb_failed);
2749                         }
2750                         spin_unlock(&target_rq->lock);
2751 next_group:
2752                         cpu_group = cpu_group->next;
2753                 } while (cpu_group != sd->groups && busiest_rq->nr_running > 1);
2754         }
2755 }
2756
2757 /*
2758  * rebalance_tick will get called every timer tick, on every CPU.
2759  *
2760  * It checks each scheduling domain to see if it is due to be balanced,
2761  * and initiates a balancing operation if so.
2762  *
2763  * Balancing parameters are set up in arch_init_sched_domains.
2764  */
2765
2766 /* Don't have all balancing operations going off at once */
2767 #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
2768
2769 static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
2770                            enum idle_type idle)
2771 {
2772         unsigned long old_load, this_load;
2773         unsigned long j = jiffies + CPU_OFFSET(this_cpu);
2774         struct sched_domain *sd;
2775
2776         /* Update our load */
2777         old_load = this_rq->cpu_load;
2778         this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
2779         /*
2780          * Round up the averaging division if load is increasing. This
2781          * prevents us from getting stuck on 9 if the load is 10, for
2782          * example.
2783          */
2784         if (this_load > old_load)
2785                 old_load++;
2786         this_rq->cpu_load = (old_load + this_load) / 2;
2787
2788         for_each_domain(this_cpu, sd) {
2789                 unsigned long interval;
2790
2791                 if (!(sd->flags & SD_LOAD_BALANCE))
2792                         continue;
2793
2794                 interval = sd->balance_interval;
2795                 if (idle != SCHED_IDLE)
2796                         interval *= sd->busy_factor;
2797
2798                 /* scale ms to jiffies */
2799                 interval = msecs_to_jiffies(interval);
2800                 if (unlikely(!interval))
2801                         interval = 1;
2802
2803                 if (j - sd->last_balance >= interval) {
2804                         if (load_balance(this_cpu, this_rq, sd, idle)) {
2805                                 /* We've pulled tasks over so no longer idle */
2806                                 idle = NOT_IDLE;
2807                         }
2808                         sd->last_balance += interval;
2809                 }
2810         }
2811 }
2812 #else /* SMP*/
2813 /*
2814  * on UP we do not need to balance between CPUs:
2815  */
2816 static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
2817 {
2818 }
2819 static inline void idle_balance(int cpu, runqueue_t *rq)
2820 {
2821 }
2822 #endif
2823
2824 static inline int wake_priority_sleeper(runqueue_t *rq)
2825 {
2826         int ret = 0;
2827 #ifdef CONFIG_SCHED_SMT
2828         spin_lock(&rq->lock);
2829         /*
2830          * If an SMT sibling task has been put to sleep for priority
2831          * reasons reschedule the idle task to see if it can now run.
2832          */
2833         if (rq->nr_running) {
2834                 resched_task(rq->idle);
2835                 ret = 1;
2836         }
2837         spin_unlock(&rq->lock);
2838 #endif
2839         return ret;
2840 }
2841
2842 DEFINE_PER_CPU(struct kernel_stat, kstat);
2843 EXPORT_PER_CPU_SYMBOL(kstat);
2844
2845 /*
2846  * We place interactive tasks back into the active array, if possible.
2847  *
2848  * To guarantee that this does not starve expired tasks we ignore the
2849  * interactivity of a task if the first expired task had to wait more
2850  * than a 'reasonable' amount of time. This deadline timeout is
2851  * load-dependent, as the frequency of array switched decreases with
2852  * increasing number of running tasks. We also ignore the interactivity
2853  * if a better static_prio task has expired:
2854  */
2855
2856 #ifndef CONFIG_CKRM_CPU_SCHEDULE
2857 #define EXPIRED_STARVING(rq) \
2858         ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
2859                 (jiffies - (rq)->expired_timestamp >= \
2860                         STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
2861                         ((rq)->curr->static_prio > (rq)->best_expired_prio))
2862 #else
2863 #define EXPIRED_STARVING(rq) \
2864                 (STARVATION_LIMIT && ((rq)->expired_timestamp && \
2865                 (jiffies - (rq)->expired_timestamp >= \
2866                         STARVATION_LIMIT * (lrq_nr_running(rq)) + 1)))
2867 #endif
2868
2869 /*
2870  * This function gets called by the timer code, with HZ frequency.
2871  * We call it with interrupts disabled.
2872  *
2873  * It also gets called by the fork code, when changing the parent's
2874  * timeslices.
2875  */
2876 void scheduler_tick(int user_ticks, int sys_ticks)
2877 {
2878         int cpu = smp_processor_id();
2879         struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2880         runqueue_t *rq = this_rq();
2881         task_t *p = current;
2882         struct vx_info *vxi = p->vx_info;
2883
2884         rq->timestamp_last_tick = sched_clock();
2885
2886         if (rcu_pending(cpu))
2887                 rcu_check_callbacks(cpu, user_ticks);
2888
2889
2890         if (vxi) {
2891                 vxi->sched.cpu[cpu].user_ticks += user_ticks;
2892                 vxi->sched.cpu[cpu].sys_ticks += sys_ticks;
2893         }
2894
2895         /* note: this timer irq context must be accounted for as well */
2896         if (hardirq_count() - HARDIRQ_OFFSET) {
2897                 cpustat->irq += sys_ticks;
2898                 sys_ticks = 0;
2899         } else if (softirq_count()) {
2900                 cpustat->softirq += sys_ticks;
2901                 sys_ticks = 0;
2902         }
2903
2904         if (p == rq->idle) {
2905                 if (atomic_read(&rq->nr_iowait) > 0)
2906                         cpustat->iowait += sys_ticks;
2907                         // vx_cpustat_acc(vxi, iowait, cpu, cpustat, sys_ticks);
2908                 else
2909                         cpustat->idle += sys_ticks;
2910                         // vx_cpustat_acc(vxi, idle, cpu, cpustat, sys_ticks);
2911
2912                 if (wake_priority_sleeper(rq))
2913                         goto out;
2914                 ckrm_sched_tick(jiffies,cpu,rq_ckrm_load(rq));
2915
2916 #ifdef CONFIG_VSERVER_HARDCPU_IDLE
2917                 if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
2918                         set_need_resched();
2919 #endif
2920                 rebalance_tick(cpu, rq, SCHED_IDLE);
2921                 return;
2922         }
2923         if (TASK_NICE(p) > 0)
2924                 cpustat->nice += user_ticks;
2925         else
2926                 cpustat->user += user_ticks;
2927         cpustat->system += sys_ticks;
2928
2929         /* Task might have expired already, but not scheduled off yet */
2930         if (p->array != rq_active(p,rq)) {
2931                 set_tsk_need_resched(p);
2932                 goto out;
2933         }
2934         spin_lock(&rq->lock);
2935         /*
2936          * The task was running during this tick - update the
2937          * time slice counter. Note: we do not update a thread's
2938          * priority until it either goes to sleep or uses up its
2939          * timeslice. This makes it possible for interactive tasks
2940          * to use up their timeslices at their highest priority levels.
2941          */
2942         if (rt_task(p)) {
2943                 /*
2944                  * RR tasks need a special form of timeslice management.
2945                  * FIFO tasks have no timeslices.
2946                  */
2947                 if ((p->policy == SCHED_RR) && !--p->time_slice) {
2948                         p->time_slice = task_timeslice(p);
2949                         p->first_time_slice = 0;
2950                         set_tsk_need_resched(p);
2951
2952                         /* put it at the end of the queue: */
2953                         dequeue_task(p, rq_active(p,rq));
2954                         enqueue_task(p, rq_active(p,rq));
2955                 }
2956                 goto out_unlock;
2957         }
2958         if (vx_need_resched(p)) {
2959 #ifdef CONFIG_CKRM_CPU_SCHEDULE
2960                 /* Hubertus ... we can abstract this out */
2961                 ckrm_lrq_t* rq = get_task_lrq(p);
2962 #endif
2963                 dequeue_task(p, rq->active);
2964                 set_tsk_need_resched(p);
2965                 p->prio = effective_prio(p);
2966                 p->time_slice = task_timeslice(p);
2967                 p->first_time_slice = 0;
2968
2969                 if (!rq->expired_timestamp)
2970                         rq->expired_timestamp = jiffies;
2971                 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
2972                         enqueue_task(p, rq->expired);
2973                         if (p->static_prio < this_rq()->best_expired_prio)
2974                                 this_rq()->best_expired_prio = p->static_prio;
2975                 } else
2976                         enqueue_task(p, rq->active);
2977         } else {
2978                 /*
2979                  * Prevent a too long timeslice allowing a task to monopolize
2980                  * the CPU. We do this by splitting up the timeslice into
2981                  * smaller pieces.
2982                  *
2983                  * Note: this does not mean the task's timeslices expire or
2984                  * get lost in any way, they just might be preempted by
2985                  * another task of equal priority. (one with higher
2986                  * priority would have preempted this task already.) We
2987                  * requeue this task to the end of the list on this priority
2988                  * level, which is in essence a round-robin of tasks with
2989                  * equal priority.
2990                  *
2991                  * This only applies to tasks in the interactive
2992                  * delta range with at least TIMESLICE_GRANULARITY to requeue.
2993                  */
2994                 if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
2995                         p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
2996                         (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
2997                         (p->array == rq_active(p,rq))) {
2998
2999                         dequeue_task(p, rq_active(p,rq));
3000                         set_tsk_need_resched(p);
3001                         p->prio = effective_prio(p);
3002                         enqueue_task(p, rq_active(p,rq));
3003                 }
3004         }
3005 out_unlock:
3006         spin_unlock(&rq->lock);
3007 out:
3008         ckrm_sched_tick(jiffies,cpu,rq_ckrm_load(rq));
3009         rebalance_tick(cpu, rq, NOT_IDLE);
3010 }
3011
3012 #ifdef CONFIG_SCHED_SMT
3013 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
3014 {
3015         struct sched_domain *sd = this_rq->sd;
3016         cpumask_t sibling_map;
3017         int i;
3018
3019         if (!(sd->flags & SD_SHARE_CPUPOWER))
3020                 return;
3021
3022 #ifdef CONFIG_CKRM_CPU_SCHEDULE
3023         if (prev != rq->idle) {
3024                 unsigned long long run = now - prev->timestamp;
3025                 ckrm_lrq_t * lrq = get_task_lrq(prev);
3026
3027                 lrq->lrq_load -= task_load(prev);
3028                 cpu_demand_event(&prev->demand_stat,CPU_DEMAND_DESCHEDULE,run);
3029                 lrq->lrq_load += task_load(prev);
3030
3031                 cpu_demand_event(get_task_lrq_stat(prev),CPU_DEMAND_DESCHEDULE,run);
3032                 update_local_cvt(prev, run);
3033         }
3034 #endif
3035         /*
3036          * Unlock the current runqueue because we have to lock in
3037          * CPU order to avoid deadlocks. Caller knows that we might
3038          * unlock. We keep IRQs disabled.
3039          */
3040         spin_unlock(&this_rq->lock);
3041
3042         sibling_map = sd->span;
3043
3044         for_each_cpu_mask(i, sibling_map)
3045                 spin_lock(&cpu_rq(i)->lock);
3046         /*
3047          * We clear this CPU from the mask. This both simplifies the
3048          * inner loop and keps this_rq locked when we exit:
3049          */
3050         cpu_clear(this_cpu, sibling_map);
3051
3052         for_each_cpu_mask(i, sibling_map) {
3053                 runqueue_t *smt_rq = cpu_rq(i);
3054
3055                 /*
3056                  * If an SMT sibling task is sleeping due to priority
3057                  * reasons wake it up now.
3058                  */
3059                 if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
3060                         resched_task(smt_rq->idle);
3061         }
3062
3063         for_each_cpu_mask(i, sibling_map)
3064                 spin_unlock(&cpu_rq(i)->lock);
3065         /*
3066          * We exit with this_cpu's rq still held and IRQs
3067          * still disabled:
3068          */
3069 }
3070
3071 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
3072 {
3073         struct sched_domain *sd = this_rq->sd;
3074         cpumask_t sibling_map;
3075         prio_array_t *array;
3076         int ret = 0, i;
3077         task_t *p;
3078
3079         if (!(sd->flags & SD_SHARE_CPUPOWER))
3080                 return 0;
3081
3082         /*
3083          * The same locking rules and details apply as for
3084          * wake_sleeping_dependent():
3085          */
3086         spin_unlock(&this_rq->lock);
3087         sibling_map = sd->span;
3088         for_each_cpu_mask(i, sibling_map)
3089                 spin_lock(&cpu_rq(i)->lock);
3090         cpu_clear(this_cpu, sibling_map);
3091
3092         /*
3093          * Establish next task to be run - it might have gone away because
3094          * we released the runqueue lock above:
3095          */
3096         if (!this_rq->nr_running)
3097                 goto out_unlock;
3098         array = this_rq->active;
3099         if (!array->nr_active)
3100                 array = this_rq->expired;
3101         BUG_ON(!array->nr_active);
3102
3103         p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
3104                 task_t, run_list);
3105
3106         for_each_cpu_mask(i, sibling_map) {
3107                 runqueue_t *smt_rq = cpu_rq(i);
3108                 task_t *smt_curr = smt_rq->curr;
3109
3110                 /*
3111                  * If a user task with lower static priority than the
3112                  * running task on the SMT sibling is trying to schedule,
3113                  * delay it till there is proportionately less timeslice
3114                  * left of the sibling task to prevent a lower priority
3115                  * task from using an unfair proportion of the
3116                  * physical cpu's resources. -ck
3117                  */
3118                 if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
3119                         task_timeslice(p) || rt_task(smt_curr)) &&
3120                         p->mm && smt_curr->mm && !rt_task(p))
3121                                 ret = 1;
3122
3123                 /*
3124                  * Reschedule a lower priority task on the SMT sibling,
3125                  * or wake it up if it has been put to sleep for priority
3126                  * reasons.
3127                  */
3128                 if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
3129                         task_timeslice(smt_curr) || rt_task(p)) &&
3130                         smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
3131                         (smt_curr == smt_rq->idle && smt_rq->nr_running))
3132                                 resched_task(smt_curr);
3133         }
3134 out_unlock:
3135         for_each_cpu_mask(i, sibling_map)
3136                 spin_unlock(&cpu_rq(i)->lock);
3137         return ret;
3138 }
3139 #else
3140 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
3141 {
3142 }
3143
3144 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
3145 {
3146         return 0;
3147 }
3148 #endif
3149
3150 /*
3151  * schedule() is the main scheduler function.
3152  */
3153 asmlinkage void __sched schedule(void)
3154 {
3155         long *switch_count;
3156         task_t *prev, *next;
3157         runqueue_t *rq;
3158         prio_array_t *array;
3159         unsigned long long now;
3160         unsigned long run_time;
3161         int cpu;
3162 #ifdef  CONFIG_VSERVER_HARDCPU
3163         struct vx_info *vxi;
3164         int maxidle = -HZ;
3165 #endif
3166
3167         /*
3168          * If crash dump is in progress, this other cpu's
3169          * need to wait until it completes.
3170          * NB: this code is optimized away for kernels without
3171          * dumping enabled.
3172          */
3173          if (unlikely(dump_oncpu))
3174                  goto dump_scheduling_disabled;
3175
3176
3177         /*
3178          * Test if we are atomic.  Since do_exit() needs to call into
3179          * schedule() atomically, we ignore that path for now.
3180          * Otherwise, whine if we are scheduling when we should not be.
3181          */
3182         if (likely(!(current->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)))) {
3183                 if (unlikely(in_atomic())) {
3184                         printk(KERN_ERR "scheduling while atomic: "
3185                                 "%s/0x%08x/%d\n",
3186                                 current->comm, preempt_count(), current->pid);
3187                         dump_stack();
3188                 }
3189         }
3190         profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3191
3192 need_resched:
3193         preempt_disable();
3194         prev = current;
3195         release_kernel_lock(prev);
3196 need_resched_nonpreemptible:
3197         rq = this_rq();
3198
3199         /*
3200          * The idle thread is not allowed to schedule!
3201          * Remove this check after it has been exercised a bit.
3202          */
3203         if (unlikely(current == rq->idle) && current->state != TASK_RUNNING) {
3204                 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
3205                 dump_stack();
3206         }
3207
3208         schedstat_inc(rq, sched_cnt);
3209         now = sched_clock();
3210         if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
3211                 run_time = now - prev->timestamp;
3212         else
3213                 run_time = NS_MAX_SLEEP_AVG;
3214
3215         /*
3216          * Tasks with interactive credits get charged less run_time
3217          * at high sleep_avg to delay them losing their interactive
3218          * status
3219          */
3220         if (HIGH_CREDIT(prev))
3221                 run_time /= (CURRENT_BONUS(prev) ? : 1);
3222
3223         spin_lock_irq(&rq->lock);
3224
3225 #ifdef CONFIG_CKRM_CPU_SCHEDULE
3226         if (prev != rq->idle) {
3227                 unsigned long long run = now - prev->timestamp;
3228                 ckrm_lrq_t * lrq = get_task_lrq(prev);
3229
3230                 lrq->lrq_load -= task_load(prev);
3231                 cpu_demand_event(&prev->demand_stat,CPU_DEMAND_DESCHEDULE,run);
3232                 lrq->lrq_load += task_load(prev);
3233
3234                 cpu_demand_event(get_task_lrq_stat(prev),CPU_DEMAND_DESCHEDULE,run);
3235                 update_local_cvt(prev, run);
3236         }
3237 #endif
3238
3239         if (unlikely(current->flags & PF_DEAD))
3240                 current->state = EXIT_DEAD;
3241         /*
3242          * if entering off of a kernel preemption go straight
3243          * to picking the next task.
3244          */
3245         switch_count = &prev->nivcsw;
3246         if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3247                 switch_count = &prev->nvcsw;
3248                 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
3249                                 unlikely(signal_pending(prev))))
3250                         prev->state = TASK_RUNNING;
3251                 else {
3252                         if (prev->state == TASK_UNINTERRUPTIBLE)
3253                                 rq->nr_uninterruptible++;
3254                         deactivate_task(prev, rq);
3255                 }
3256         }
3257
3258 #ifdef CONFIG_VSERVER_HARDCPU
3259         if (!list_empty(&rq->hold_queue)) {
3260                 struct list_head *l, *n;
3261                 int ret;
3262
3263                 vxi = NULL;
3264                 list_for_each_safe(l, n, &rq->hold_queue) {
3265                         next = list_entry(l, task_t, run_list);
3266                         if (vxi == next->vx_info)
3267                                 continue;
3268
3269                         vxi = next->vx_info;
3270                         ret = vx_tokens_recalc(vxi);
3271                         // tokens = vx_tokens_avail(next);
3272
3273                         if (ret > 0) {
3274                                 list_del(&next->run_list);
3275                                 next->state &= ~TASK_ONHOLD;
3276                                 // one less waiting
3277                                 vx_onhold_dec(vxi);
3278                                 array = rq->expired;
3279                                 next->prio = MAX_PRIO-1;
3280                                 enqueue_task(next, array);
3281                                 rq->nr_running++;
3282                                 if (next->static_prio < rq->best_expired_prio)
3283                                         rq->best_expired_prio = next->static_prio;
3284
3285                                 // printk("··· %8lu unhold %p [%d]\n", jiffies, next, next->prio);
3286                                 break;
3287                         }
3288                         if ((ret < 0) && (maxidle < ret))
3289                                 maxidle = ret;
3290                 }
3291         }
3292         rq->idle_tokens = -maxidle;
3293
3294 pick_next:
3295 #endif
3296
3297         cpu = smp_processor_id();
3298         if (unlikely(!rq->nr_running)) {
3299 go_idle:
3300                 idle_balance(cpu, rq);
3301                 if (!rq->nr_running) {
3302                         next = rq->idle;
3303                         rq->expired_timestamp = 0;
3304                         wake_sleeping_dependent(cpu, rq);
3305                         /*
3306                          * wake_sleeping_dependent() might have released
3307                          * the runqueue, so break out if we got new
3308                          * tasks meanwhile:
3309                          */
3310                         if (!rq->nr_running)
3311                                 goto switch_tasks;
3312                 }
3313         } else {
3314                 if (dependent_sleeper(cpu, rq)) {
3315                         next = rq->idle;
3316                         goto switch_tasks;
3317                 }
3318                 /*
3319                  * dependent_sleeper() releases and reacquires the runqueue
3320                  * lock, hence go into the idle loop if the rq went
3321                  * empty meanwhile:
3322                  */
3323                 if (unlikely(!rq->nr_running))
3324                         goto go_idle;
3325         }
3326
3327         /* MEF: CKRM refactored code into rq_get_next_task(); make
3328          * sure that when upgrading changes are reflected into both
3329          * versions of the code.
3330          */
3331         next = rq_get_next_task(rq);
3332
3333 #ifdef  CONFIG_VSERVER_HARDCPU
3334         vxi = next->vx_info;
3335         if (vx_info_flags(vxi, VXF_SCHED_PAUSE|VXF_SCHED_HARD, 0)) {
3336                 int ret = vx_tokens_recalc(vxi);
3337
3338                 if (unlikely(ret <= 0)) {
3339                         if (ret && (rq->idle_tokens > -ret))
3340                                 rq->idle_tokens = -ret;
3341                         __deactivate_task(next, rq);
3342                         recalc_task_prio(next, now);
3343                         // a new one on hold
3344                         vx_onhold_inc(vxi);
3345                         next->state |= TASK_ONHOLD;
3346                         list_add_tail(&next->run_list, &rq->hold_queue);
3347                         //printk("··· %8lu hold   %p [%d]\n", jiffies, next, next->prio);
3348                         goto pick_next;
3349                 }
3350         }
3351 #endif
3352
3353         if (!rt_task(next) && next->activated > 0) {
3354                 unsigned long long delta = now - next->timestamp;
3355
3356                 if (next->activated == 1)
3357                         delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
3358
3359                 array = next->array;
3360                 dequeue_task(next, array);
3361                 recalc_task_prio(next, next->timestamp + delta);
3362                 enqueue_task(next, array);
3363         }
3364         next->activated = 0;
3365 switch_tasks:
3366         if (next == rq->idle)
3367                 schedstat_inc(rq, sched_goidle);
3368         prefetch(next);
3369         clear_tsk_need_resched(prev);
3370         rcu_qsctr_inc(task_cpu(prev));
3371
3372         prev->sleep_avg -= run_time;
3373         if ((long)prev->sleep_avg <= 0) {
3374                 prev->sleep_avg = 0;
3375                 if (!(HIGH_CREDIT(prev) || LOW_CREDIT(prev)))
3376                         prev->interactive_credit--;
3377         }
3378         prev->timestamp = prev->last_ran = now;
3379
3380         sched_info_switch(prev, next);
3381         if (likely(prev != next)) {
3382                 next->timestamp = now;
3383                 rq->nr_switches++;
3384                 rq->curr = next;
3385                 ++*switch_count;
3386
3387                 prepare_arch_switch(rq, next);
3388                 prev = context_switch(rq, prev, next);
3389                 barrier();
3390
3391                 finish_task_switch(prev);
3392         } else
3393                 spin_unlock_irq(&rq->lock);
3394
3395         prev = current;
3396         if (unlikely(reacquire_kernel_lock(prev) < 0))
3397                 goto need_resched_nonpreemptible;
3398         preempt_enable_no_resched();
3399         if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3400                 goto need_resched;
3401
3402         return;
3403
3404  dump_scheduling_disabled:
3405         /* allow scheduling only if this is the dumping cpu */
3406         if (dump_oncpu != smp_processor_id()+1) {
3407                 while (dump_oncpu)
3408                         cpu_relax();
3409         }
3410         return;
3411 }
3412
3413 EXPORT_SYMBOL(schedule);
3414 #ifdef CONFIG_PREEMPT
3415 /*
3416  * this is is the entry point to schedule() from in-kernel preemption
3417  * off of preempt_enable.  Kernel preemptions off return from interrupt
3418  * occur there and call schedule directly.
3419  */
3420 asmlinkage void __sched preempt_schedule(void)
3421 {
3422         struct thread_info *ti = current_thread_info();
3423
3424         /*
3425          * If there is a non-zero preempt_count or interrupts are disabled,
3426          * we do not want to preempt the current task.  Just return..
3427          */
3428         if (unlikely(ti->preempt_count || irqs_disabled()))
3429                 return;
3430
3431 need_resched:
3432         ti->preempt_count = PREEMPT_ACTIVE;
3433         schedule();
3434         ti->preempt_count = 0;
3435
3436         /* we could miss a preemption opportunity between schedule and now */
3437         barrier();
3438         if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3439                 goto need_resched;
3440 }
3441
3442 EXPORT_SYMBOL(preempt_schedule);
3443 #endif /* CONFIG_PREEMPT */
3444
3445 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
3446 {
3447         task_t *p = curr->task;
3448         return try_to_wake_up(p, mode, sync);
3449 }
3450
3451 EXPORT_SYMBOL(default_wake_function);
3452
3453 /*
3454  * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
3455  * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
3456  * number) then we wake all the non-exclusive tasks and one exclusive task.
3457  *
3458  * There are circumstances in which we can try to wake a task which has already
3459  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
3460  * zero in this (rare) case, and we handle it by continuing to scan the queue.
3461  */
3462 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3463                              int nr_exclusive, int sync, void *key)
3464 {
3465         struct list_head *tmp, *next;
3466
3467         list_for_each_safe(tmp, next, &q->task_list) {
3468                 wait_queue_t *curr;
3469                 unsigned flags;
3470                 curr = list_entry(tmp, wait_queue_t, task_list);
3471                 flags = curr->flags;
3472                 if (curr->func(curr, mode, sync, key) &&
3473                     (flags & WQ_FLAG_EXCLUSIVE) &&
3474                     !--nr_exclusive)
3475                         break;
3476         }
3477 }
3478
3479 /**
3480  * __wake_up - wake up threads blocked on a waitqueue.
3481  * @q: the waitqueue
3482  * @mode: which threads
3483  * @nr_exclusive: how many wake-one or wake-many threads to wake up
3484  */
3485 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
3486                                 int nr_exclusive, void *key)
3487 {
3488         unsigned long flags;
3489
3490         spin_lock_irqsave(&q->lock, flags);
3491         __wake_up_common(q, mode, nr_exclusive, 0, key);
3492         spin_unlock_irqrestore(&q->lock, flags);
3493 }
3494
3495 EXPORT_SYMBOL(__wake_up);
3496
3497 /*
3498  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3499  */
3500 void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
3501 {
3502         __wake_up_common(q, mode, 1, 0, NULL);
3503 }
3504
3505 /**
3506  * __wake_up - sync- wake up threads blocked on a waitqueue.
3507  * @q: the waitqueue
3508  * @mode: which threads
3509  * @nr_exclusive: how many wake-one or wake-many threads to wake up
3510  *
3511  * The sync wakeup differs that the waker knows that it will schedule
3512  * away soon, so while the target thread will be woken up, it will not
3513  * be migrated to another CPU - ie. the two threads are 'synchronized'
3514  * with each other. This can prevent needless bouncing between CPUs.
3515  *
3516  * On UP it can prevent extra preemption.
3517  */
3518 void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3519 {
3520         unsigned long flags;
3521         int sync = 1;
3522
3523         if (unlikely(!q))
3524                 return;
3525
3526         if (unlikely(!nr_exclusive))
3527                 sync = 0;
3528
3529         spin_lock_irqsave(&q->lock, flags);
3530         __wake_up_common(q, mode, nr_exclusive, sync, NULL);
3531         spin_unlock_irqrestore(&q->lock, flags);
3532 }
3533 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
3534
3535 void fastcall complete(struct completion *x)
3536 {
3537         unsigned long flags;
3538
3539         spin_lock_irqsave(&x->wait.lock, flags);
3540         x->done++;
3541         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
3542                          1, 0, NULL);
3543         spin_unlock_irqrestore(&x->wait.lock, flags);
3544 }
3545 EXPORT_SYMBOL(complete);
3546
3547 void fastcall complete_all(struct completion *x)
3548 {
3549         unsigned long flags;
3550
3551         spin_lock_irqsave(&x->wait.lock, flags);
3552         x->done += UINT_MAX/2;
3553         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
3554                          0, 0, NULL);
3555         spin_unlock_irqrestore(&x->wait.lock, flags);
3556 }
3557 EXPORT_SYMBOL(complete_all);
3558
3559 void fastcall __sched wait_for_completion(struct completion *x)
3560 {
3561         might_sleep();
3562         spin_lock_irq(&x->wait.lock);
3563         if (!x->done) {
3564                 DECLARE_WAITQUEUE(wait, current);
3565
3566                 wait.flags |= WQ_FLAG_EXCLUSIVE;
3567                 __add_wait_queue_tail(&x->wait, &wait);
3568                 do {
3569                         __set_current_state(TASK_UNINTERRUPTIBLE);
3570                         spin_unlock_irq(&x->wait.lock);
3571                         schedule();
3572                         spin_lock_irq(&x->wait.lock);
3573                 } while (!x->done);
3574                 __remove_wait_queue(&x->wait, &wait);
3575         }
3576         x->done--;
3577         spin_unlock_irq(&x->wait.lock);
3578 }
3579 EXPORT_SYMBOL(wait_for_completion);
3580
3581 #define SLEEP_ON_VAR                                    \
3582         unsigned long flags;                            \
3583         wait_queue_t wait;                              \
3584         init_waitqueue_entry(&wait, current);
3585
3586 #define SLEEP_ON_HEAD                                   \
3587         spin_lock_irqsave(&q->lock,flags);              \
3588         __add_wait_queue(q, &wait);                     \
3589         spin_unlock(&q->lock);
3590
3591 #define SLEEP_ON_TAIL                                   \
3592         spin_lock_irq(&q->lock);                        \
3593         __remove_wait_queue(q, &wait);                  \
3594         spin_unlock_irqrestore(&q->lock, flags);
3595
3596 #define SLEEP_ON_BKLCHECK                               \
3597         if (unlikely(!kernel_locked()) &&               \
3598             sleep_on_bkl_warnings < 10) {               \
3599                 sleep_on_bkl_warnings++;                \
3600                 WARN_ON(1);                             \
3601         }
3602
3603 static int sleep_on_bkl_warnings;
3604
3605 void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
3606 {
3607         SLEEP_ON_VAR
3608
3609         SLEEP_ON_BKLCHECK
3610
3611         current->state = TASK_INTERRUPTIBLE;
3612
3613         SLEEP_ON_HEAD
3614         schedule();
3615         SLEEP_ON_TAIL
3616 }
3617
3618 EXPORT_SYMBOL(interruptible_sleep_on);
3619
3620 long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3621 {
3622         SLEEP_ON_VAR
3623
3624         SLEEP_ON_BKLCHECK
3625
3626         current->state = TASK_INTERRUPTIBLE;
3627
3628         SLEEP_ON_HEAD
3629         timeout = schedule_timeout(timeout);
3630         SLEEP_ON_TAIL
3631
3632         return timeout;
3633 }
3634
3635 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3636
3637 long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3638 {
3639         SLEEP_ON_VAR
3640
3641         SLEEP_ON_BKLCHECK
3642
3643         current->state = TASK_UNINTERRUPTIBLE;
3644
3645         SLEEP_ON_HEAD
3646         timeout = schedule_timeout(timeout);
3647         SLEEP_ON_TAIL
3648
3649         return timeout;
3650 }
3651
3652 EXPORT_SYMBOL(sleep_on_timeout);
3653
3654 void set_user_nice(task_t *p, long nice)
3655 {
3656         unsigned long flags;
3657         prio_array_t *array;
3658         runqueue_t *rq;
3659         int old_prio, new_prio, delta;
3660
3661         if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3662                 return;
3663         /*
3664          * We have to be careful, if called from sys_setpriority(),
3665          * the task might be in the middle of scheduling on another CPU.
3666          */
3667         rq = task_rq_lock(p, &flags);
3668         /*
3669          * The RT priorities are set via setscheduler(), but we still
3670          * allow the 'normal' nice value to be set - but as expected
3671          * it wont have any effect on scheduling until the task is
3672          * not SCHED_NORMAL:
3673          */
3674         if (rt_task(p)) {
3675                 p->static_prio = NICE_TO_PRIO(nice);
3676                 goto out_unlock;
3677         }
3678         array = p->array;
3679         if (array)
3680                 dequeue_task(p, array);
3681
3682         old_prio = p->prio;
3683         new_prio = NICE_TO_PRIO(nice);
3684         delta = new_prio - old_prio;
3685         p->static_prio = NICE_TO_PRIO(nice);
3686         p->prio += delta;
3687
3688         if (array) {
3689                 enqueue_task(p, array);
3690                 /*
3691                  * If the task increased its priority or is running and
3692                  * lowered its priority, then reschedule its CPU:
3693                  */
3694                 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3695                         resched_task(rq->curr);
3696         }
3697 out_unlock:
3698         task_rq_unlock(rq, &flags);
3699 }
3700
3701 EXPORT_SYMBOL(set_user_nice);
3702
3703 #ifdef __ARCH_WANT_SYS_NICE
3704
3705 /*
3706  * sys_nice - change the priority of the current process.
3707  * @increment: priority increment
3708  *
3709  * sys_setpriority is a more generic, but much slower function that
3710  * does similar things.
3711  */
3712 asmlinkage long sys_nice(int increment)
3713 {
3714         int retval;
3715         long nice;
3716
3717         /*
3718          * Setpriority might change our priority at the same moment.
3719          * We don't have to worry. Conceptually one call occurs first
3720          * and we have a single winner.
3721          */
3722         if (increment < 0) {
3723                 if (vx_flags(VXF_IGNEG_NICE, 0))
3724                         return 0;
3725                 if (!capable(CAP_SYS_NICE))
3726                         return -EPERM;
3727                 if (increment < -40)
3728                         increment = -40;
3729         }
3730         if (increment > 40)
3731                 increment = 40;
3732
3733         nice = PRIO_TO_NICE(current->static_prio) + increment;
3734         if (nice < -20)
3735                 nice = -20;
3736         if (nice > 19)
3737                 nice = 19;
3738
3739         retval = security_task_setnice(current, nice);
3740         if (retval)
3741                 return retval;
3742
3743         set_user_nice(current, nice);
3744         return 0;
3745 }
3746
3747 #endif
3748
3749 /**
3750  * task_prio - return the priority value of a given task.
3751  * @p: the task in question.
3752  *
3753  * This is the priority value as seen by users in /proc.
3754  * RT tasks are offset by -200. Normal tasks are centered
3755  * around 0, value goes from -16 to +15.
3756  */
3757 int task_prio(const task_t *p)
3758 {
3759         return p->prio - MAX_RT_PRIO;
3760 }
3761
3762 /**
3763  * task_nice - return the nice value of a given task.
3764  * @p: the task in question.
3765  */
3766 int task_nice(const task_t *p)
3767 {
3768         return TASK_NICE(p);
3769 }
3770
3771 /**
3772  * idle_cpu - is a given cpu idle currently?
3773  * @cpu: the processor in question.
3774  */
3775 int idle_cpu(int cpu)
3776 {
3777         return cpu_curr(cpu) == cpu_rq(cpu)->idle;
3778 }
3779
3780 EXPORT_SYMBOL_GPL(idle_cpu);
3781
3782 /**
3783  * find_process_by_pid - find a process with a matching PID value.
3784  * @pid: the pid in question.
3785  */
3786 static inline task_t *find_process_by_pid(pid_t pid)
3787 {
3788         return pid ? find_task_by_pid(pid) : current;
3789 }
3790
3791 /* Actually do priority change: must hold rq lock. */
3792 static void __setscheduler(struct task_struct *p, int policy, int prio)
3793 {
3794         BUG_ON(p->array);
3795         p->policy = policy;
3796         p->rt_priority = prio;
3797         if (policy != SCHED_NORMAL)
3798                 p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
3799         else
3800                 p->prio = p->static_prio;
3801 }
3802
3803 /*
3804  * setscheduler - change the scheduling policy and/or RT priority of a thread.
3805  */
3806 static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3807 {
3808         struct sched_param lp;
3809         int retval = -EINVAL;
3810         int oldprio, oldpolicy = -1;
3811         prio_array_t *array;
3812         unsigned long flags;
3813         runqueue_t *rq;
3814         task_t *p;
3815
3816         if (!param || pid < 0)
3817                 goto out_nounlock;
3818
3819         retval = -EFAULT;
3820         if (copy_from_user(&lp, param, sizeof(struct sched_param)))
3821                 goto out_nounlock;
3822
3823         /*
3824          * We play safe to avoid deadlocks.
3825          */
3826         read_lock_irq(&tasklist_lock);
3827
3828         p = find_process_by_pid(pid);
3829
3830         retval = -ESRCH;
3831         if (!p)
3832                 goto out_unlock;
3833 recheck:
3834         /* double check policy once rq lock held */
3835         if (policy < 0)
3836                 policy = oldpolicy = p->policy;
3837         else {
3838                 retval = -EINVAL;
3839                 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3840                                 policy != SCHED_NORMAL)
3841                         goto out_unlock;
3842         }
3843         /*
3844          * Valid priorities for SCHED_FIFO and SCHED_RR are
3845          * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
3846          */
3847         retval = -EINVAL;
3848         if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
3849                 goto out_unlock;
3850         if ((policy == SCHED_NORMAL) != (lp.sched_priority == 0))
3851                 goto out_unlock;
3852
3853         retval = -EPERM;
3854         if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
3855             !capable(CAP_SYS_NICE))
3856                 goto out_unlock;
3857         if ((current->euid != p->euid) && (current->euid != p->uid) &&
3858             !capable(CAP_SYS_NICE))
3859                 goto out_unlock;
3860
3861         retval = security_task_setscheduler(p, policy, &lp);
3862         if (retval)
3863                 goto out_unlock;
3864         /*
3865          * To be able to change p->policy safely, the apropriate
3866          * runqueue lock must be held.
3867          */
3868         rq = task_rq_lock(p, &flags);
3869         /* recheck policy now with rq lock held */
3870         if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3871                 policy = oldpolicy = -1;
3872                 task_rq_unlock(rq, &flags);
3873                 goto recheck;
3874         }
3875         array = p->array;
3876         if (array)
3877                 deactivate_task(p, task_rq(p));
3878         retval = 0;
3879         oldprio = p->prio;
3880         __setscheduler(p, policy, lp.sched_priority);
3881         if (array) {
3882                 vx_activate_task(p);
3883                 __activate_task(p, task_rq(p));
3884                 /*
3885                  * Reschedule if we are currently running on this runqueue and
3886                  * our priority decreased, or if we are not currently running on
3887                  * this runqueue and our priority is higher than the current's
3888                  */
3889                 if (task_running(rq, p)) {
3890                         if (p->prio > oldprio)
3891                                 resched_task(rq->curr);
3892                 } else if (TASK_PREEMPTS_CURR(p, rq))
3893                         resched_task(rq->curr);
3894         }
3895         task_rq_unlock(rq, &flags);
3896 out_unlock:
3897         read_unlock_irq(&tasklist_lock);
3898 out_nounlock:
3899         return retval;
3900 }
3901
3902 /**
3903  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3904  * @pid: the pid in question.
3905  * @policy: new policy
3906  * @param: structure containing the new RT priority.
3907  */
3908 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
3909                                        struct sched_param __user *param)
3910 {
3911         return setscheduler(pid, policy, param);
3912 }
3913
3914 /**
3915  * sys_sched_setparam - set/change the RT priority of a thread
3916  * @pid: the pid in question.
3917  * @param: structure containing the new RT priority.
3918  */
3919 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
3920 {
3921         return setscheduler(pid, -1, param);
3922 }
3923
3924 /**
3925  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3926  * @pid: the pid in question.
3927  */
3928 asmlinkage long sys_sched_getscheduler(pid_t pid)
3929 {
3930         int retval = -EINVAL;
3931         task_t *p;
3932
3933         if (pid < 0)
3934                 goto out_nounlock;
3935
3936         retval = -ESRCH;
3937         read_lock(&tasklist_lock);
3938         p = find_process_by_pid(pid);
3939         if (p) {
3940                 retval = security_task_getscheduler(p);
3941                 if (!retval)
3942                         retval = p->policy;
3943         }
3944         read_unlock(&tasklist_lock);
3945
3946 out_nounlock:
3947         return retval;
3948 }
3949
3950 /**
3951  * sys_sched_getscheduler - get the RT priority of a thread
3952  * @pid: the pid in question.
3953  * @param: structure containing the RT priority.
3954  */
3955 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
3956 {
3957         struct sched_param lp;
3958         int retval = -EINVAL;
3959         task_t *p;
3960
3961         if (!param || pid < 0)
3962                 goto out_nounlock;
3963
3964         read_lock(&tasklist_lock);
3965         p = find_process_by_pid(pid);
3966         retval = -ESRCH;
3967         if (!p)
3968                 goto out_unlock;
3969
3970         retval = security_task_getscheduler(p);
3971         if (retval)
3972                 goto out_unlock;
3973
3974         lp.sched_priority = p->rt_priority;
3975         read_unlock(&tasklist_lock);
3976
3977         /*
3978          * This one might sleep, we cannot do it with a spinlock held ...
3979          */
3980         retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3981
3982 out_nounlock:
3983         return retval;
3984
3985 out_unlock:
3986         read_unlock(&tasklist_lock);
3987         return retval;
3988 }
3989
3990 long sched_setaffinity(pid_t pid, cpumask_t new_mask)
3991 {
3992         task_t *p;
3993         int retval;
3994
3995         lock_cpu_hotplug();
3996         read_lock(&tasklist_lock);
3997
3998         p = find_process_by_pid(pid);
3999         if (!p) {
4000                 read_unlock(&tasklist_lock);
4001                 unlock_cpu_hotplug();
4002                 return -ESRCH;
4003         }
4004
4005         /*
4006          * It is not safe to call set_cpus_allowed with the
4007          * tasklist_lock held.  We will bump the task_struct's
4008          * usage count and then drop tasklist_lock.
4009          */
4010         get_task_struct(p);
4011         read_unlock(&tasklist_lock);
4012
4013         retval = -EPERM;
4014         if ((current->euid != p->euid) && (current->euid != p->uid) &&
4015                         !capable(CAP_SYS_NICE))
4016                 goto out_unlock;
4017
4018         retval = set_cpus_allowed(p, new_mask);
4019
4020 out_unlock:
4021         put_task_struct(p);
4022         unlock_cpu_hotplug();
4023         return retval;
4024 }
4025
4026 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4027                              cpumask_t *new_mask)
4028 {
4029         if (len < sizeof(cpumask_t)) {
4030                 memset(new_mask, 0, sizeof(cpumask_t));
4031         } else if (len > sizeof(cpumask_t)) {
4032                 len = sizeof(cpumask_t);
4033         }
4034         return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4035 }
4036
4037 /**
4038  * sys_sched_setaffinity - set the cpu affinity of a process
4039  * @pid: pid of the process
4040  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4041  * @user_mask_ptr: user-space pointer to the new cpu mask
4042  */
4043 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
4044                                       unsigned long __user *user_mask_ptr)
4045 {
4046         cpumask_t new_mask;
4047         int retval;
4048
4049         retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
4050         if (retval)
4051                 return retval;
4052
4053         return sched_setaffinity(pid, new_mask);
4054 }
4055
4056 /*
4057  * Represents all cpu's present in the system
4058  * In systems capable of hotplug, this map could dynamically grow
4059  * as new cpu's are detected in the system via any platform specific
4060  * method, such as ACPI for e.g.
4061  */
4062
4063 cpumask_t cpu_present_map;
4064 EXPORT_SYMBOL(cpu_present_map);
4065
4066 #ifndef CONFIG_SMP
4067 cpumask_t cpu_online_map = CPU_MASK_ALL;
4068 cpumask_t cpu_possible_map = CPU_MASK_ALL;
4069 #endif
4070
4071 long sched_getaffinity(pid_t pid, cpumask_t *mask)
4072 {
4073         int retval;
4074         task_t *p;
4075
4076         lock_cpu_hotplug();
4077         read_lock(&tasklist_lock);
4078
4079         retval = -ESRCH;
4080         p = find_process_by_pid(pid);
4081         if (!p)
4082                 goto out_unlock;
4083
4084         retval = 0;
4085         cpus_and(*mask, p->cpus_allowed, cpu_possible_map);
4086
4087 out_unlock:
4088         read_unlock(&tasklist_lock);
4089         unlock_cpu_hotplug();
4090         if (retval)
4091                 return retval;
4092
4093         return 0;
4094 }
4095
4096 /**
4097  * sys_sched_getaffinity - get the cpu affinity of a process
4098  * @pid: pid of the process
4099  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4100  * @user_mask_ptr: user-space pointer to hold the current cpu mask
4101  */
4102 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4103                                       unsigned long __user *user_mask_ptr)
4104 {
4105         int ret;
4106         cpumask_t mask;
4107
4108         if (len < sizeof(cpumask_t))
4109                 return -EINVAL;
4110
4111         ret = sched_getaffinity(pid, &mask);
4112         if (ret < 0)
4113                 return ret;
4114
4115         if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
4116                 return -EFAULT;
4117
4118         return sizeof(cpumask_t);
4119 }
4120
4121 /**
4122  * sys_sched_yield - yield the current processor to other threads.
4123  *
4124  * this function yields the current CPU by moving the calling thread
4125  * to the expired array. If there are no other threads running on this
4126  * CPU then this function will return.
4127  */
4128 asmlinkage long sys_sched_yield(void)
4129 {
4130         runqueue_t *rq = this_rq_lock();
4131         prio_array_t *array = current->array;
4132         prio_array_t *target = rq_expired(current,rq);
4133
4134         schedstat_inc(rq, yld_cnt);
4135         /*
4136          * We implement yielding by moving the task into the expired
4137          * queue.
4138          *
4139          * (special rule: RT tasks will just roundrobin in the active
4140          *  array.)
4141          */
4142         if (rt_task(current))
4143                 target = rq_active(current,rq);
4144
4145 #warning MEF need to fix up SCHEDSTATS code, but I hope this is fixed by the 2.6.10 CKRM patch
4146 #ifdef CONFIG_SCHEDSTATS
4147         if (current->array->nr_active == 1) {
4148                 schedstat_inc(rq, yld_act_empty);
4149                 if (!rq->expired->nr_active)
4150                         schedstat_inc(rq, yld_both_empty);
4151         } else if (!rq->expired->nr_active)
4152                 schedstat_inc(rq, yld_exp_empty);
4153 #endif
4154
4155         dequeue_task(current, array);
4156         enqueue_task(current, target);
4157
4158         /*
4159          * Since we are going to call schedule() anyway, there's
4160          * no need to preempt or enable interrupts:
4161          */
4162         __release(rq->lock);
4163         _raw_spin_unlock(&rq->lock);
4164         preempt_enable_no_resched();
4165
4166         schedule();
4167
4168         return 0;
4169 }
4170
4171 void __sched __cond_resched(void)
4172 {
4173         set_current_state(TASK_RUNNING);
4174         schedule();
4175 }
4176
4177 EXPORT_SYMBOL(__cond_resched);
4178
4179 /**
4180  * yield - yield the current processor to other threads.
4181  *
4182  * this is a shortcut for kernel-space yielding - it marks the
4183  * thread runnable and calls sys_sched_yield().
4184  */
4185 void __sched yield(void)
4186 {
4187         set_current_state(TASK_RUNNING);
4188         sys_sched_yield();
4189 }
4190
4191 EXPORT_SYMBOL(yield);
4192
4193 /*
4194  * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
4195  * that process accounting knows that this is a task in IO wait state.
4196  *
4197  * But don't do that if it is a deliberate, throttling IO wait (this task
4198  * has set its backing_dev_info: the queue against which it should throttle)
4199  */
4200 void __sched io_schedule(void)
4201 {
4202         struct runqueue *rq = this_rq();
4203
4204         atomic_inc(&rq->nr_iowait);
4205         schedule();
4206         atomic_dec(&rq->nr_iowait);
4207 }
4208
4209 EXPORT_SYMBOL(io_schedule);
4210
4211 long __sched io_schedule_timeout(long timeout)
4212 {
4213         struct runqueue *rq = this_rq();
4214         long ret;
4215
4216         atomic_inc(&rq->nr_iowait);
4217         ret = schedule_timeout(timeout);
4218         atomic_dec(&rq->nr_iowait);
4219         return ret;
4220 }
4221
4222 /**
4223  * sys_sched_get_priority_max - return maximum RT priority.
4224  * @policy: scheduling class.
4225  *
4226  * this syscall returns the maximum rt_priority that can be used
4227  * by a given scheduling class.
4228  */
4229 asmlinkage long sys_sched_get_priority_max(int policy)
4230 {
4231         int ret = -EINVAL;
4232
4233         switch (policy) {
4234         case SCHED_FIFO:
4235         case SCHED_RR:
4236                 ret = MAX_USER_RT_PRIO-1;
4237                 break;
4238         case SCHED_NORMAL:
4239                 ret = 0;
4240                 break;
4241         }
4242         return ret;
4243 }
4244
4245 /**
4246  * sys_sched_get_priority_min - return minimum RT priority.
4247  * @policy: scheduling class.
4248  *
4249  * this syscall returns the minimum rt_priority that can be used
4250  * by a given scheduling class.
4251  */
4252 asmlinkage long sys_sched_get_priority_min(int policy)
4253 {
4254         int ret = -EINVAL;
4255
4256         switch (policy) {
4257         case SCHED_FIFO:
4258         case SCHED_RR:
4259                 ret = 1;
4260                 break;
4261         case SCHED_NORMAL:
4262                 ret = 0;
4263         }
4264         return ret;
4265 }
4266
4267 /**
4268  * sys_sched_rr_get_interval - return the default timeslice of a process.
4269  * @pid: pid of the process.
4270  * @interval: userspace pointer to the timeslice value.
4271  *
4272  * this syscall writes the default timeslice value of a given process
4273  * into the user-space timespec buffer. A value of '0' means infinity.
4274  */
4275 asmlinkage
4276 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4277 {
4278         int retval = -EINVAL;
4279         struct timespec t;
4280         task_t *p;
4281
4282         if (pid < 0)
4283                 goto out_nounlock;
4284
4285         retval = -ESRCH;
4286         read_lock(&tasklist_lock);
4287         p = find_process_by_pid(pid);
4288         if (!p)
4289                 goto out_unlock;
4290
4291         retval = security_task_getscheduler(p);
4292         if (retval)
4293                 goto out_unlock;
4294
4295         jiffies_to_timespec(p->policy & SCHED_FIFO ?
4296                                 0 : task_timeslice(p), &t);
4297         read_unlock(&tasklist_lock);
4298         retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4299 out_nounlock:
4300         return retval;
4301 out_unlock:
4302         read_unlock(&tasklist_lock);
4303         return retval;
4304 }
4305
4306 static inline struct task_struct *eldest_child(struct task_struct *p)
4307 {
4308         if (list_empty(&p->children)) return NULL;
4309         return list_entry(p->children.next,struct task_struct,sibling);
4310 }
4311
4312 static inline struct task_struct *older_sibling(struct task_struct *p)
4313 {
4314         if (p->sibling.prev==&p->parent->children) return NULL;
4315         return list_entry(p->sibling.prev,struct task_struct,sibling);
4316 }
4317
4318 static inline struct task_struct *younger_sibling(struct task_struct *p)
4319 {
4320         if (p->sibling.next==&p->parent->children) return NULL;
4321         return list_entry(p->sibling.next,struct task_struct,sibling);
4322 }
4323
4324 static void show_task(task_t * p)
4325 {
4326         task_t *relative;
4327         unsigned state;
4328         unsigned long free = 0;
4329         static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
4330
4331         printk("%-13.13s ", p->comm);
4332         state = p->state ? __ffs(p->state) + 1 : 0;
4333         if (state < ARRAY_SIZE(stat_nam))
4334                 printk(stat_nam[state]);
4335         else
4336                 printk("?");
4337 #if (BITS_PER_LONG == 32)
4338         if (state == TASK_RUNNING)
4339                 printk(" running ");
4340         else
4341                 printk(" %08lX ", thread_saved_pc(p));
4342 #else
4343         if (state == TASK_RUNNING)
4344                 printk("  running task   ");
4345         else
4346                 printk(" %016lx ", thread_saved_pc(p));
4347 #endif
4348 #ifdef CONFIG_DEBUG_STACK_USAGE
4349         {
4350                 unsigned long * n = (unsigned long *) (p->thread_info+1);
4351                 while (!*n)
4352                         n++;
4353                 free = (unsigned long) n - (unsigned long)(p->thread_info+1);
4354         }
4355 #endif
4356         printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
4357         if ((relative = eldest_child(p)))
4358                 printk("%5d ", relative->pid);
4359         else
4360                 printk("      ");
4361         if ((relative = younger_sibling(p)))
4362                 printk("%7d", relative->pid);
4363         else
4364                 printk("       ");
4365         if ((relative = older_sibling(p)))
4366                 printk(" %5d", relative->pid);
4367         else
4368                 printk("      ");
4369         if (!p->mm)
4370                 printk(" (L-TLB)\n");
4371         else
4372                 printk(" (NOTLB)\n");
4373
4374         if (state != TASK_RUNNING)
4375                 show_stack(p, NULL);
4376 }
4377
4378 void show_state(void)
4379 {
4380         task_t *g, *p;
4381
4382 #if (BITS_PER_LONG == 32)
4383         printk("\n"
4384                "                                               sibling\n");
4385         printk("  task             PC      pid father child younger older\n");
4386 #else
4387         printk("\n"
4388                "                                                       sibling\n");
4389         printk("  task                 PC          pid father child younger older\n");
4390 #endif
4391         read_lock(&tasklist_lock);
4392         do_each_thread(g, p) {
4393                 /*
4394                  * reset the NMI-timeout, listing all files on a slow
4395                  * console might take alot of time:
4396                  */
4397                 touch_nmi_watchdog();
4398                 show_task(p);
4399         } while_each_thread(g, p);
4400
4401         read_unlock(&tasklist_lock);
4402 }
4403
4404 EXPORT_SYMBOL_GPL(show_state);
4405
4406 void __devinit init_idle(task_t *idle, int cpu)
4407 {
4408         runqueue_t *rq = cpu_rq(cpu);
4409         unsigned long flags;
4410
4411         idle->sleep_avg = 0;
4412         idle->interactive_credit = 0;
4413         idle->array = NULL;
4414         idle->prio = MAX_PRIO;
4415         idle->state = TASK_RUNNING;
4416         set_task_cpu(idle, cpu);
4417
4418 #ifdef CONFIG_CKRM_CPU_SCHEDULE
4419         cpu_demand_event(&(idle->demand_stat),CPU_DEMAND_INIT,0);
4420         idle->cpu_class = get_default_cpu_class();
4421         idle->array = NULL;
4422 #endif
4423
4424         spin_lock_irqsave(&rq->lock, flags);
4425         rq->curr = rq->idle = idle;
4426         set_tsk_need_resched(idle);
4427         spin_unlock_irqrestore(&rq->lock, flags);
4428
4429         /* Set the preempt count _outside_ the spinlocks! */
4430 #ifdef CONFIG_PREEMPT
4431         idle->thread_info->preempt_count = (idle->lock_depth >= 0);
4432 #else
4433         idle->thread_info->preempt_count = 0;
4434 #endif
4435 }
4436
4437 /*
4438  * In a system that switches off the HZ timer nohz_cpu_mask
4439  * indicates which cpus entered this state. This is used
4440  * in the rcu update to wait only for active cpus. For system
4441  * which do not switch off the HZ timer nohz_cpu_mask should
4442  * always be CPU_MASK_NONE.
4443  */
4444 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4445
4446 #ifdef CONFIG_SMP
4447 /*
4448  * This is how migration works:
4449  *
4450  * 1) we queue a migration_req_t structure in the source CPU's
4451  *    runqueue and wake up that CPU's migration thread.
4452  * 2) we down() the locked semaphore => thread blocks.
4453  * 3) migration thread wakes up (implicitly it forces the migrated
4454  *    thread off the CPU)
4455  * 4) it gets the migration request and checks whether the migrated
4456  *    task is still in the wrong runqueue.
4457  * 5) if it's in the wrong runqueue then the migration thread removes
4458  *    it and puts it into the right queue.
4459  * 6) migration thread up()s the semaphore.
4460  * 7) we wake up and the migration is done.
4461  */
4462
4463 /*
4464  * Change a given task's CPU affinity. Migrate the thread to a
4465  * proper CPU and schedule it away if the CPU it's executing on
4466  * is removed from the allowed bitmask.
4467  *
4468  * NOTE: the caller must have a valid reference to the task, the
4469  * task must not exit() & deallocate itself prematurely.  The
4470  * call is not atomic; no spinlocks may be held.
4471  */
4472 int set_cpus_allowed(task_t *p, cpumask_t new_mask)
4473 {
4474         unsigned long flags;
4475         int ret = 0;
4476         migration_req_t req;
4477         runqueue_t *rq;
4478
4479         rq = task_rq_lock(p, &flags);
4480         if (!cpus_intersects(new_mask, cpu_online_map)) {
4481                 ret = -EINVAL;
4482                 goto out;
4483         }
4484
4485         p->cpus_allowed = new_mask;
4486         /* Can the task run on the task's current CPU? If so, we're done */
4487         if (cpu_isset(task_cpu(p), new_mask))
4488                 goto out;
4489
4490         if (migrate_task(p, any_online_cpu(new_mask), &req)) {
4491                 /* Need help from migration thread: drop lock and wait. */
4492                 task_rq_unlock(rq, &flags);
4493                 wake_up_process(rq->migration_thread);
4494                 wait_for_completion(&req.done);
4495                 tlb_migrate_finish(p->mm);
4496                 return 0;
4497         }
4498 out:
4499         task_rq_unlock(rq, &flags);
4500         return ret;
4501 }
4502
4503 EXPORT_SYMBOL_GPL(set_cpus_allowed);
4504
4505 /*
4506  * Move (not current) task off this cpu, onto dest cpu.  We're doing
4507  * this because either it can't run here any more (set_cpus_allowed()
4508  * away from this CPU, or CPU going down), or because we're
4509  * attempting to rebalance this task on exec (sched_exec).
4510  *
4511  * So we race with normal scheduler movements, but that's OK, as long
4512  * as the task is no longer on this CPU.
4513  */
4514 static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4515 {
4516         runqueue_t *rq_dest, *rq_src;
4517
4518         if (unlikely(cpu_is_offline(dest_cpu)))
4519                 return;
4520
4521         rq_src = cpu_rq(src_cpu);
4522         rq_dest = cpu_rq(dest_cpu);
4523
4524         double_rq_lock(rq_src, rq_dest);
4525         /* Already moved. */
4526         if (task_cpu(p) != src_cpu)
4527                 goto out;
4528         /* Affinity changed (again). */
4529         if (!cpu_isset(dest_cpu, p->cpus_allowed))
4530                 goto out;
4531
4532         if (p->array) {
4533                 /*
4534                  * Sync timestamp with rq_dest's before activating.
4535                  * The same thing could be achieved by doing this step
4536                  * afterwards, and pretending it was a local activate.
4537                  * This way is cleaner and logically correct.
4538                  */
4539                 p->timestamp = p->timestamp - rq_src->timestamp_last_tick
4540                                 + rq_dest->timestamp_last_tick;
4541                 deactivate_task(p, rq_src);
4542                 set_task_cpu(p, dest_cpu);
4543                 activate_task(p, rq_dest, 0);
4544                 if (TASK_PREEMPTS_CURR(p, rq_dest))
4545                         resched_task(rq_dest->curr);
4546         } else
4547                 set_task_cpu(p, dest_cpu);
4548
4549 out:
4550         double_rq_unlock(rq_src, rq_dest);
4551 }
4552
4553 /*
4554  * migration_thread - this is a highprio system thread that performs
4555  * thread migration by bumping thread off CPU then 'pushing' onto
4556  * another runqueue.
4557  */
4558 static int migration_thread(void * data)
4559 {
4560         runqueue_t *rq;
4561         int cpu = (long)data;
4562
4563         rq = cpu_rq(cpu);
4564         BUG_ON(rq->migration_thread != current);
4565
4566         set_current_state(TASK_INTERRUPTIBLE);
4567         while (!kthread_should_stop()) {
4568                 struct list_head *head;
4569                 migration_req_t *req;
4570
4571                 if (current->flags & PF_FREEZE)
4572                         refrigerator(PF_FREEZE);
4573
4574                 spin_lock_irq(&rq->lock);
4575
4576                 if (cpu_is_offline(cpu)) {
4577                         spin_unlock_irq(&rq->lock);
4578                         goto wait_to_die;
4579                 }
4580
4581                 if (rq->active_balance) {
4582                         active_load_balance(rq, cpu);
4583                         rq->active_balance = 0;
4584                 }
4585
4586                 head = &rq->migration_queue;
4587
4588                 if (list_empty(head)) {
4589                         spin_unlock_irq(&rq->lock);
4590                         schedule();
4591                         set_current_state(TASK_INTERRUPTIBLE);
4592                         continue;
4593                 }
4594                 req = list_entry(head->next, migration_req_t, list);
4595                 list_del_init(head->next);
4596
4597                 if (req->type == REQ_MOVE_TASK) {
4598                         spin_unlock(&rq->lock);
4599                         __migrate_task(req->task, smp_processor_id(),
4600                                         req->dest_cpu);
4601                         local_irq_enable();
4602                 } else if (req->type == REQ_SET_DOMAIN) {
4603                         rq->sd = req->sd;
4604                         spin_unlock_irq(&rq->lock);
4605                 } else {
4606                         spin_unlock_irq(&rq->lock);
4607                         WARN_ON(1);
4608                 }
4609
4610                 complete(&req->done);
4611         }
4612         __set_current_state(TASK_RUNNING);
4613         return 0;
4614
4615 wait_to_die:
4616         /* Wait for kthread_stop */
4617         set_current_state(TASK_INTERRUPTIBLE);
4618         while (!kthread_should_stop()) {
4619                 schedule();
4620                 set_current_state(TASK_INTERRUPTIBLE);
4621         }
4622         __set_current_state(TASK_RUNNING);
4623         return 0;
4624 }
4625
4626 #ifdef CONFIG_HOTPLUG_CPU
4627 /* Figure out where task on dead CPU should go, use force if neccessary. */
4628 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
4629 {
4630         int dest_cpu;
4631         cpumask_t mask;
4632
4633         /* On same node? */
4634         mask = node_to_cpumask(cpu_to_node(dead_cpu));
4635         cpus_and(mask, mask, tsk->cpus_allowed);
4636         dest_cpu = any_online_cpu(mask);
4637
4638         /* On any allowed CPU? */
4639         if (dest_cpu == NR_CPUS)
4640                 dest_cpu = any_online_cpu(tsk->cpus_allowed);
4641
4642         /* No more Mr. Nice Guy. */
4643         if (dest_cpu == NR_CPUS) {
4644                 cpus_setall(tsk->cpus_allowed);
4645                 dest_cpu = any_online_cpu(tsk->cpus_allowed);
4646
4647                 /*
4648                  * Don't tell them about moving exiting tasks or
4649                  * kernel threads (both mm NULL), since they never
4650                  * leave kernel.
4651                  */
4652                 if (tsk->mm && printk_ratelimit())
4653                         printk(KERN_INFO "process %d (%s) no "
4654                                "longer affine to cpu%d\n",
4655                                tsk->pid, tsk->comm, dead_cpu);
4656         }
4657         __migrate_task(tsk, dead_cpu, dest_cpu);
4658 }
4659
4660 /*
4661  * While a dead CPU has no uninterruptible tasks queued at this point,
4662  * it might still have a nonzero ->nr_uninterruptible counter, because
4663  * for performance reasons the counter is not stricly tracking tasks to
4664  * their home CPUs. So we just add the counter to another CPU's counter,
4665  * to keep the global sum constant after CPU-down:
4666  */
4667 static void migrate_nr_uninterruptible(runqueue_t *rq_src)
4668 {
4669         runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
4670         unsigned long flags;
4671
4672         local_irq_save(flags);
4673         double_rq_lock(rq_src, rq_dest);
4674         rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
4675         rq_src->nr_uninterruptible = 0;
4676         double_rq_unlock(rq_src, rq_dest);
4677         local_irq_restore(flags);
4678 }
4679
4680 /* Run through task list and migrate tasks from the dead cpu. */
4681 static void migrate_live_tasks(int src_cpu)
4682 {
4683         struct task_struct *tsk, *t;
4684
4685         write_lock_irq(&tasklist_lock);
4686
4687         do_each_thread(t, tsk) {
4688                 if (tsk == current)
4689                         continue;
4690
4691                 if (task_cpu(tsk) == src_cpu)
4692                         move_task_off_dead_cpu(src_cpu, tsk);
4693         } while_each_thread(t, tsk);
4694
4695         write_unlock_irq(&tasklist_lock);
4696 }
4697
4698 /* Schedules idle task to be the next runnable task on current CPU.
4699  * It does so by boosting its priority to highest possible and adding it to
4700  * the _front_ of runqueue. Used by CPU offline code.
4701  */
4702 void sched_idle_next(void)
4703 {
4704         int cpu = smp_processor_id();
4705         runqueue_t *rq = this_rq();
4706         struct task_struct *p = rq->idle;
4707         unsigned long flags;
4708
4709         /* cpu has to be offline */
4710         BUG_ON(cpu_online(cpu));
4711
4712         /* Strictly not necessary since rest of the CPUs are stopped by now
4713          * and interrupts disabled on current cpu.
4714          */
4715         spin_lock_irqsave(&rq->lock, flags);
4716
4717         __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
4718         /* Add idle task to _front_ of it's priority queue */
4719         __activate_idle_task(p, rq);
4720
4721         spin_unlock_irqrestore(&rq->lock, flags);
4722 }
4723
4724 static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
4725 {
4726         struct runqueue *rq = cpu_rq(dead_cpu);
4727
4728         /* Must be exiting, otherwise would be on tasklist. */
4729         BUG_ON(tsk->exit_state != EXIT_ZOMBIE && tsk->exit_state != EXIT_DEAD);
4730
4731         /* Cannot have done final schedule yet: would have vanished. */
4732         BUG_ON(tsk->flags & PF_DEAD);
4733
4734         get_task_struct(tsk);
4735
4736         /*
4737          * Drop lock around migration; if someone else moves it,
4738          * that's OK.  No task can be added to this CPU, so iteration is
4739          * fine.
4740          */
4741         spin_unlock_irq(&rq->lock);
4742         move_task_off_dead_cpu(dead_cpu, tsk);
4743         spin_lock_irq(&rq->lock);
4744
4745         put_task_struct(tsk);
4746 }
4747
4748 /* release_task() removes task from tasklist, so we won't find dead tasks. */
4749 static void migrate_dead_tasks(unsigned int dead_cpu)
4750 {
4751         unsigned arr, i;
4752         struct runqueue *rq = cpu_rq(dead_cpu);
4753
4754         for (arr = 0; arr < 2; arr++) {
4755                 for (i = 0; i < MAX_PRIO; i++) {
4756                         struct list_head *list = &rq->arrays[arr].queue[i];
4757                         while (!list_empty(list))
4758                                 migrate_dead(dead_cpu,
4759                                              list_entry(list->next, task_t,
4760                                                         run_list));
4761                 }
4762         }
4763 }
4764 #endif /* CONFIG_HOTPLUG_CPU */
4765
4766 /*
4767  * migration_call - callback that gets triggered when a CPU is added.
4768  * Here we can start up the necessary migration thread for the new CPU.
4769  */
4770 static int migration_call(struct notifier_block *nfb, unsigned long action,
4771                           void *hcpu)
4772 {
4773         int cpu = (long)hcpu;
4774         struct task_struct *p;
4775         struct runqueue *rq;
4776         unsigned long flags;
4777
4778         switch (action) {
4779         case CPU_UP_PREPARE:
4780                 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
4781                 if (IS_ERR(p))
4782                         return NOTIFY_BAD;
4783                 p->flags |= PF_NOFREEZE;
4784                 kthread_bind(p, cpu);
4785                 /* Must be high prio: stop_machine expects to yield to it. */
4786                 rq = task_rq_lock(p, &flags);
4787                 __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
4788                 task_rq_unlock(rq, &flags);
4789                 cpu_rq(cpu)->migration_thread = p;
4790                 break;
4791         case CPU_ONLINE:
4792                 /* Strictly unneccessary, as first user will wake it. */
4793                 wake_up_process(cpu_rq(cpu)->migration_thread);
4794                 break;
4795 #ifdef CONFIG_HOTPLUG_CPU
4796         case CPU_UP_CANCELED:
4797                 /* Unbind it from offline cpu so it can run.  Fall thru. */
4798                 kthread_bind(cpu_rq(cpu)->migration_thread,smp_processor_id());
4799                 kthread_stop(cpu_rq(cpu)->migration_thread);
4800                 cpu_rq(cpu)->migration_thread = NULL;
4801                 break;
4802         case CPU_DEAD:
4803                 migrate_live_tasks(cpu);
4804                 rq = cpu_rq(cpu);
4805                 kthread_stop(rq->migration_thread);
4806                 rq->migration_thread = NULL;
4807                 /* Idle task back to normal (off runqueue, low prio) */
4808                 rq = task_rq_lock(rq->idle, &flags);
4809                 deactivate_task(rq->idle, rq);
4810                 rq->idle->static_prio = MAX_PRIO;
4811                 __setscheduler(rq->idle, SCHED_NORMAL, 0);
4812                 migrate_dead_tasks(cpu);
4813                 task_rq_unlock(rq, &flags);
4814                 migrate_nr_uninterruptible(rq);
4815                 BUG_ON(rq->nr_running != 0);
4816
4817                 /* No need to migrate the tasks: it was best-effort if
4818                  * they didn't do lock_cpu_hotplug().  Just wake up
4819                  * the requestors. */
4820                 spin_lock_irq(&rq->lock);
4821                 while (!list_empty(&rq->migration_queue)) {
4822                         migration_req_t *req;
4823                         req = list_entry(rq->migration_queue.next,
4824                                          migration_req_t, list);
4825                         BUG_ON(req->type != REQ_MOVE_TASK);
4826                         list_del_init(&req->list);
4827                         complete(&req->done);
4828                 }
4829                 spin_unlock_irq(&rq->lock);
4830                 break;
4831 #endif
4832         }
4833         return NOTIFY_OK;
4834 }
4835
4836 /* Register at highest priority so that task migration (migrate_all_tasks)
4837  * happens before everything else.
4838  */
4839 static struct notifier_block __devinitdata migration_notifier = {
4840         .notifier_call = migration_call,
4841         .priority = 10
4842 };
4843
4844 int __init migration_init(void)
4845 {
4846         void *cpu = (void *)(long)smp_processor_id();
4847         /* Start one for boot CPU. */
4848         migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4849         migration_call(&migration_notifier, CPU_ONLINE, cpu);
4850         register_cpu_notifier(&migration_notifier);
4851         return 0;
4852 }
4853 #endif
4854
4855 #ifdef CONFIG_SMP
4856 /*
4857  * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
4858  * hold the hotplug lock.
4859  */
4860 void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu)
4861 {
4862         migration_req_t req;
4863         unsigned long flags;
4864         runqueue_t *rq = cpu_rq(cpu);
4865         int local = 1;
4866
4867         spin_lock_irqsave(&rq->lock, flags);
4868
4869         if (cpu == smp_processor_id() || !cpu_online(cpu)) {
4870                 rq->sd = sd;
4871         } else {
4872                 init_completion(&req.done);
4873                 req.type = REQ_SET_DOMAIN;
4874                 req.sd = sd;
4875                 list_add(&req.list, &rq->migration_queue);
4876                 local = 0;
4877         }
4878
4879         spin_unlock_irqrestore(&rq->lock, flags);
4880
4881         if (!local) {
4882                 wake_up_process(rq->migration_thread);
4883                 wait_for_completion(&req.done);
4884         }
4885 }
4886
4887 /* cpus with isolated domains */
4888 cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
4889
4890 /* Setup the mask of cpus configured for isolated domains */
4891 static int __init isolated_cpu_setup(char *str)
4892 {
4893         int ints[NR_CPUS], i;
4894
4895         str = get_options(str, ARRAY_SIZE(ints), ints);
4896         cpus_clear(cpu_isolated_map);
4897         for (i = 1; i <= ints[0]; i++)
4898                 cpu_set(ints[i], cpu_isolated_map);
4899         return 1;
4900 }
4901
4902 __setup ("isolcpus=", isolated_cpu_setup);
4903
4904 /*
4905  * init_sched_build_groups takes an array of groups, the cpumask we wish
4906  * to span, and a pointer to a function which identifies what group a CPU
4907  * belongs to. The return value of group_fn must be a valid index into the
4908  * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we
4909  * keep track of groups covered with a cpumask_t).
4910  *
4911  * init_sched_build_groups will build a circular linked list of the groups
4912  * covered by the given span, and will set each group's ->cpumask correctly,
4913  * and ->cpu_power to 0.
4914  */
4915 void __devinit init_sched_build_groups(struct sched_group groups[],
4916                         cpumask_t span, int (*group_fn)(int cpu))
4917 {
4918         struct sched_group *first = NULL, *last = NULL;
4919         cpumask_t covered = CPU_MASK_NONE;
4920         int i;
4921
4922         for_each_cpu_mask(i, span) {
4923                 int group = group_fn(i);
4924                 struct sched_group *sg = &groups[group];
4925                 int j;
4926
4927                 if (cpu_isset(i, covered))
4928                         continue;
4929
4930                 sg->cpumask = CPU_MASK_NONE;
4931                 sg->cpu_power = 0;
4932
4933                 for_each_cpu_mask(j, span) {
4934                         if (group_fn(j) != group)
4935                                 continue;
4936
4937                         cpu_set(j, covered);
4938                         cpu_set(j, sg->cpumask);
4939                 }
4940                 if (!first)
4941                         first = sg;
4942                 if (last)
4943                         last->next = sg;
4944                 last = sg;
4945         }
4946         last->next = first;
4947 }
4948
4949
4950 #ifdef ARCH_HAS_SCHED_DOMAIN
4951 extern void __devinit arch_init_sched_domains(void);
4952 extern void __devinit arch_destroy_sched_domains(void);
4953 #else
4954 #ifdef CONFIG_SCHED_SMT
4955 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
4956 static struct sched_group sched_group_cpus[NR_CPUS];
4957 static int __devinit cpu_to_cpu_group(int cpu)
4958 {
4959         return cpu;
4960 }
4961 #endif
4962
4963 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
4964 static struct sched_group sched_group_phys[NR_CPUS];
4965 static int __devinit cpu_to_phys_group(int cpu)
4966 {
4967 #ifdef CONFIG_SCHED_SMT
4968         return first_cpu(cpu_sibling_map[cpu]);
4969 #else
4970         return cpu;
4971 #endif
4972 }
4973
4974 #ifdef CONFIG_NUMA
4975
4976 static DEFINE_PER_CPU(struct sched_domain, node_domains);
4977 static struct sched_group sched_group_nodes[MAX_NUMNODES];
4978 static int __devinit cpu_to_node_group(int cpu)
4979 {
4980         return cpu_to_node(cpu);
4981 }
4982 #endif
4983
4984 #if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
4985 /*
4986  * The domains setup code relies on siblings not spanning
4987  * multiple nodes. Make sure the architecture has a proper
4988  * siblings map:
4989  */
4990 static void check_sibling_maps(void)
4991 {
4992         int i, j;
4993
4994         for_each_online_cpu(i) {
4995                 for_each_cpu_mask(j, cpu_sibling_map[i]) {
4996                         if (cpu_to_node(i) != cpu_to_node(j)) {
4997                                 printk(KERN_INFO "warning: CPU %d siblings map "
4998                                         "to different node - isolating "
4999                                         "them.\n", i);
5000                                 cpu_sibling_map[i] = cpumask_of_cpu(i);
5001                                 break;
5002                         }
5003                 }
5004         }
5005 }
5006 #endif
5007
5008 /*
5009  * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
5010  */
5011 static void __devinit arch_init_sched_domains(void)
5012 {
5013         int i;
5014         cpumask_t cpu_default_map;
5015
5016 #if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
5017         check_sibling_maps();
5018 #endif
5019         /*
5020          * Setup mask for cpus without special case scheduling requirements.
5021          * For now this just excludes isolated cpus, but could be used to
5022          * exclude other special cases in the future.
5023          */
5024         cpus_complement(cpu_default_map, cpu_isolated_map);
5025         cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
5026
5027         /*
5028          * Set up domains. Isolated domains just stay on the dummy domain.
5029          */
5030         for_each_cpu_mask(i, cpu_default_map) {
5031                 int group;
5032                 struct sched_domain *sd = NULL, *p;
5033                 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
5034
5035                 cpus_and(nodemask, nodemask, cpu_default_map);
5036
5037 #ifdef CONFIG_NUMA
5038                 sd = &per_cpu(node_domains, i);
5039                 group = cpu_to_node_group(i);
5040                 *sd = SD_NODE_INIT;
5041                 sd->span = cpu_default_map;
5042                 sd->groups = &sched_group_nodes[group];
5043 #endif
5044
5045                 p = sd;
5046                 sd = &per_cpu(phys_domains, i);
5047                 group = cpu_to_phys_group(i);
5048                 *sd = SD_CPU_INIT;
5049                 sd->span = nodemask;
5050                 sd->parent = p;
5051                 sd->groups = &sched_group_phys[group];
5052
5053 #ifdef CONFIG_SCHED_SMT
5054                 p = sd;
5055                 sd = &per_cpu(cpu_domains, i);
5056                 group = cpu_to_cpu_group(i);
5057                 *sd = SD_SIBLING_INIT;
5058                 sd->span = cpu_sibling_map[i];
5059                 cpus_and(sd->span, sd->span, cpu_default_map);
5060                 sd->parent = p;
5061                 sd->groups = &sched_group_cpus[group];
5062 #endif
5063         }
5064
5065 #ifdef CONFIG_SCHED_SMT
5066         /* Set up CPU (sibling) groups */
5067         for_each_online_cpu(i) {
5068                 cpumask_t this_sibling_map = cpu_sibling_map[i];
5069                 cpus_and(this_sibling_map, this_sibling_map, cpu_default_map);
5070                 if (i != first_cpu(this_sibling_map))
5071                         continue;
5072
5073                 init_sched_build_groups(sched_group_cpus, this_sibling_map,
5074                                                 &cpu_to_cpu_group);
5075         }
5076 #endif
5077
5078         /* Set up physical groups */
5079         for (i = 0; i < MAX_NUMNODES; i++) {
5080                 cpumask_t nodemask = node_to_cpumask(i);
5081
5082                 cpus_and(nodemask, nodemask, cpu_default_map);
5083                 if (cpus_empty(nodemask))
5084                         continue;
5085
5086                 init_sched_build_groups(sched_group_phys, nodemask,
5087                                                 &cpu_to_phys_group);
5088         }
5089
5090
5091 #ifdef CONFIG_NUMA
5092         /* Set up node groups */
5093         init_sched_build_groups(sched_group_nodes, cpu_default_map,
5094                                         &cpu_to_node_group);
5095 #endif
5096
5097
5098         /* Calculate CPU power for physical packages and nodes */
5099         for_each_cpu_mask(i, cpu_default_map) {
5100                 int power;
5101                 struct sched_domain *sd;
5102 #ifdef CONFIG_SCHED_SMT
5103                 sd = &per_cpu(cpu_domains, i);
5104                 power = SCHED_LOAD_SCALE;
5105                 sd->groups->cpu_power = power;
5106 #endif
5107
5108                 sd = &per_cpu(phys_domains, i);
5109                 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
5110                                 (cpus_weight(sd->groups->cpumask)-1) / 10;
5111                 sd->groups->cpu_power = power;
5112
5113
5114 #ifdef CONFIG_NUMA
5115                 if (i == first_cpu(sd->groups->cpumask)) {
5116                         /* Only add "power" once for each physical package. */
5117                         sd = &per_cpu(node_domains, i);
5118                         sd->groups->cpu_power += power;
5119                 }
5120 #endif
5121         }
5122
5123         /* Attach the domains */
5124         for_each_online_cpu(i) {
5125                 struct sched_domain *sd;
5126 #ifdef CONFIG_SCHED_SMT
5127                 sd = &per_cpu(cpu_domains, i);
5128 #else
5129                 sd = &per_cpu(phys_domains, i);
5130 #endif
5131                 cpu_attach_domain(sd, i);
5132         }
5133         last->next = first;
5134 }
5135
5136 #ifdef CONFIG_HOTPLUG_CPU
5137 static void __devinit arch_destroy_sched_domains(void)
5138 {
5139         /* Do nothing: everything is statically allocated. */
5140 }
5141 #endif
5142
5143 #endif /* ARCH_HAS_SCHED_DOMAIN */
5144
5145 #define SCHED_DOMAIN_DEBUG
5146 #ifdef SCHED_DOMAIN_DEBUG
5147 static void sched_domain_debug(void)
5148 {
5149         int i;
5150
5151         for_each_online_cpu(i) {
5152                 runqueue_t *rq = cpu_rq(i);
5153                 struct sched_domain *sd;
5154                 int level = 0;
5155
5156                 sd = rq->sd;
5157
5158                 printk(KERN_DEBUG "CPU%d:\n", i);
5159
5160                 do {
5161                         int j;
5162                         char str[NR_CPUS];
5163                         struct sched_group *group = sd->groups;
5164                         cpumask_t groupmask;
5165
5166                         cpumask_scnprintf(str, NR_CPUS, sd->span);
5167                         cpus_clear(groupmask);
5168
5169                         printk(KERN_DEBUG);
5170                         for (j = 0; j < level + 1; j++)
5171                                 printk(" ");
5172                         printk("domain %d: ", level);
5173
5174                         if (!(sd->flags & SD_LOAD_BALANCE)) {
5175                                 printk("does not load-balance");
5176                                 if (sd->parent)
5177                                         printk(" ERROR !SD_LOAD_BALANCE domain has parent");
5178                                 printk("\n");
5179                                 break;
5180                         }
5181
5182                         printk("span %s\n", str);
5183
5184                         if (!cpu_isset(i, sd->span))
5185                                 printk(KERN_DEBUG "ERROR domain->span does not contain CPU%d\n", i);
5186                         if (!cpu_isset(i, group->cpumask))
5187                                 printk(KERN_DEBUG "ERROR domain->groups does not contain CPU%d\n", i);
5188
5189                         printk(KERN_DEBUG);
5190                         for (j = 0; j < level + 2; j++)
5191                                 printk(" ");
5192                         printk("groups:");
5193                         do {
5194                                 if (!group) {
5195                                         printk(" ERROR: NULL");
5196                                         break;
5197                                 }
5198                                 
5199                                 if (!group->cpu_power)
5200                                         printk(KERN_DEBUG "ERROR group->cpu_power not set\n");
5201
5202                                 if (!cpus_weight(group->cpumask))
5203                                         printk(" ERROR empty group:");
5204
5205                                 if (cpus_intersects(groupmask, group->cpumask))
5206                                         printk(" ERROR repeated CPUs:");
5207
5208                                 cpus_or(groupmask, groupmask, group->cpumask);
5209
5210                                 cpumask_scnprintf(str, NR_CPUS, group->cpumask);
5211                                 printk(" %s", str);
5212
5213                                 group = group->next;
5214                         } while (group != sd->groups);
5215                         printk("\n");
5216
5217                         if (!cpus_equal(sd->span, groupmask))
5218                                 printk(KERN_DEBUG "ERROR groups don't span domain->span\n");
5219
5220                         level++;
5221                         sd = sd->parent;
5222
5223                         if (sd) {
5224                                 if (!cpus_subset(groupmask, sd->span))
5225                                         printk(KERN_DEBUG "ERROR parent span is not a superset of domain->span\n");
5226                         }
5227
5228                 } while (sd);
5229         }
5230 }
5231 #else
5232 #define sched_domain_debug() {}
5233 #endif
5234
5235 /*
5236  * Initial dummy domain for early boot and for hotplug cpu. Being static,
5237  * it is initialized to zero, so all balancing flags are cleared which is
5238  * what we want.
5239  */
5240 static struct sched_domain sched_domain_dummy;
5241
5242 #ifdef CONFIG_HOTPLUG_CPU
5243 /*
5244  * Force a reinitialization of the sched domains hierarchy.  The domains
5245  * and groups cannot be updated in place without racing with the balancing
5246  * code, so we temporarily attach all running cpus to a "dummy" domain
5247  * which will prevent rebalancing while the sched domains are recalculated.
5248  */
5249 static int update_sched_domains(struct notifier_block *nfb,
5250                                 unsigned long action, void *hcpu)
5251 {
5252         int i;
5253
5254         switch (action) {
5255         case CPU_UP_PREPARE:
5256         case CPU_DOWN_PREPARE:
5257                 for_each_online_cpu(i)
5258                         cpu_attach_domain(&sched_domain_dummy, i);
5259                 arch_destroy_sched_domains();
5260                 return NOTIFY_OK;
5261
5262         case CPU_UP_CANCELED:
5263         case CPU_DOWN_FAILED:
5264         case CPU_ONLINE:
5265         case CPU_DEAD:
5266                 /*
5267                  * Fall through and re-initialise the domains.
5268                  */
5269                 break;
5270         default:
5271                 return NOTIFY_DONE;
5272         }
5273
5274         /* The hotplug lock is already held by cpu_up/cpu_down */
5275         arch_init_sched_domains();
5276
5277         sched_domain_debug();
5278
5279         return NOTIFY_OK;
5280 }
5281 #endif
5282
5283 void __init sched_init_smp(void)
5284 {
5285         lock_cpu_hotplug();
5286         arch_init_sched_domains();
5287         sched_domain_debug();
5288         unlock_cpu_hotplug();
5289         /* XXX: Theoretical race here - CPU may be hotplugged now */
5290         hotcpu_notifier(update_sched_domains, 0);
5291 }
5292 #else
5293 void __init sched_init_smp(void)
5294 {
5295 }
5296 #endif /* CONFIG_SMP */
5297
5298 int in_sched_functions(unsigned long addr)
5299 {
5300         /* Linker adds these: start and end of __sched functions */
5301         extern char __sched_text_start[], __sched_text_end[];
5302         return in_lock_functions(addr) ||
5303                 (addr >= (unsigned long)__sched_text_start
5304                 && addr < (unsigned long)__sched_text_end);
5305 }
5306
5307 void __init sched_init(void)
5308 {
5309         runqueue_t *rq;
5310         int i;
5311
5312         init_cpu_classes();
5313
5314         for (i = 0; i < NR_CPUS; i++) {
5315 #ifndef CONFIG_CKRM_CPU_SCHEDULE
5316                 int j, k;
5317                 prio_array_t *array;
5318
5319                 rq = cpu_rq(i);
5320                 spin_lock_init(&rq->lock);
5321
5322                 for (j = 0; j < 2; j++) {
5323                         array = rq->arrays + j;
5324                         for (k = 0; k < MAX_PRIO; k++) {
5325                                 INIT_LIST_HEAD(array->queue + k);
5326                                 __clear_bit(k, array->bitmap);
5327                         }
5328                         // delimiter for bitsearch
5329                         __set_bit(MAX_PRIO, array->bitmap);
5330                 }
5331
5332                 rq->active = rq->arrays;
5333                 rq->expired = rq->arrays + 1;
5334                 rq->best_expired_prio = MAX_PRIO;
5335
5336 #else
5337                 rq = cpu_rq(i);
5338                 spin_lock_init(&rq->lock);
5339 #endif
5340
5341 #ifdef CONFIG_SMP
5342                 rq->sd = &sched_domain_dummy;
5343                 rq->cpu_load = 0;
5344 #ifdef CONFIG_CKRM_CPU_SCHEDULE
5345                 ckrm_load_init(rq_ckrm_load(rq));
5346 #endif
5347                 rq->active_balance = 0;
5348                 rq->push_cpu = 0;
5349                 rq->migration_thread = NULL;
5350                 INIT_LIST_HEAD(&rq->migration_queue);
5351 #endif
5352 #ifdef CONFIG_VSERVER_HARDCPU
5353                 INIT_LIST_HEAD(&rq->hold_queue);
5354 #endif
5355                 atomic_set(&rq->nr_iowait, 0);
5356
5357         }
5358
5359         /*
5360          * The boot idle thread does lazy MMU switching as well:
5361          */
5362         atomic_inc(&init_mm.mm_count);
5363         enter_lazy_tlb(&init_mm, current);
5364
5365         /*
5366          * Make us the idle thread. Technically, schedule() should not be
5367          * called from this thread, however somewhere below it might be,
5368          * but because we are the idle thread, we just pick up running again
5369          * when this runqueue becomes "idle".
5370          */
5371         init_idle(current, smp_processor_id());
5372 }
5373
5374 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
5375 void __might_sleep(char *file, int line)
5376 {
5377 #if defined(in_atomic)
5378         static unsigned long prev_jiffy;        /* ratelimiting */
5379
5380         if ((in_atomic() || irqs_disabled()) &&
5381             system_state == SYSTEM_RUNNING) {
5382                 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
5383                         return;
5384                 prev_jiffy = jiffies;
5385                 printk(KERN_ERR "Debug: sleeping function called from invalid"
5386                                 " context at %s:%d\n", file, line);
5387                 printk("in_atomic():%d, irqs_disabled():%d\n",
5388                         in_atomic(), irqs_disabled());
5389                 dump_stack();
5390         }
5391 #endif
5392 }
5393 EXPORT_SYMBOL(__might_sleep);
5394 #endif
5395
5396 #ifdef CONFIG_CKRM_CPU_SCHEDULE
5397 /**
5398  * return the classqueue object of a certain processor
5399  */
5400 struct classqueue_struct * get_cpu_classqueue(int cpu)
5401 {
5402         return (& (cpu_rq(cpu)->classqueue) );
5403 }
5404
5405 /**
5406  * _ckrm_cpu_change_class - change the class of a task
5407  */
5408 void _ckrm_cpu_change_class(task_t *tsk, struct ckrm_cpu_class *newcls)
5409 {
5410         prio_array_t *array;
5411         struct runqueue *rq;
5412         unsigned long flags;
5413
5414         rq = task_rq_lock(tsk,&flags); 
5415         array = tsk->array;
5416         if (array) {
5417                 dequeue_task(tsk,array);
5418                 tsk->cpu_class = newcls;
5419                 enqueue_task(tsk,rq_active(tsk,rq));
5420         } else
5421                 tsk->cpu_class = newcls;
5422
5423         task_rq_unlock(rq,&flags);
5424 }
5425 #endif
5426
5427 #ifdef CONFIG_MAGIC_SYSRQ
5428 void normalize_rt_tasks(void)
5429 {
5430         struct task_struct *p;
5431         prio_array_t *array;
5432         unsigned long flags;
5433         runqueue_t *rq;
5434
5435         read_lock_irq(&tasklist_lock);
5436         for_each_process (p) {
5437                 if (!rt_task(p))
5438                         continue;
5439
5440                 rq = task_rq_lock(p, &flags);
5441
5442                 array = p->array;
5443                 if (array)
5444                         deactivate_task(p, task_rq(p));
5445                 __setscheduler(p, SCHED_NORMAL, 0);
5446                 if (array) {
5447                         __activate_task(p, task_rq(p));
5448                         resched_task(rq->curr);
5449                 }
5450
5451                 task_rq_unlock(rq, &flags);
5452         }
5453         read_unlock_irq(&tasklist_lock);
5454 }
5455
5456 #endif /* CONFIG_MAGIC_SYSRQ */