patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / kernel / sched.c
1 /*
2  *  kernel/sched.c
3  *
4  *  Kernel scheduler and related syscalls
5  *
6  *  Copyright (C) 1991-2002  Linus Torvalds
7  *
8  *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9  *              make semaphores SMP safe
10  *  1998-11-19  Implemented schedule_timeout() and related stuff
11  *              by Andrea Arcangeli
12  *  2002-01-04  New ultra-scalable O(1) scheduler by Ingo Molnar:
13  *              hybrid priority-list and round-robin design with
14  *              an array-switch method of distributing timeslices
15  *              and per-CPU runqueues.  Cleanups and useful suggestions
16  *              by Davide Libenzi, preemptible kernel bits by Robert Love.
17  *  2003-09-03  Interactivity tuning by Con Kolivas.
18  *  2004-04-02  Scheduler domains code by Nick Piggin
19  */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/nmi.h>
24 #include <linux/init.h>
25 #include <asm/uaccess.h>
26 #include <linux/highmem.h>
27 #include <linux/smp_lock.h>
28 #include <asm/mmu_context.h>
29 #include <linux/interrupt.h>
30 #include <linux/completion.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/security.h>
33 #include <linux/notifier.h>
34 #include <linux/suspend.h>
35 #include <linux/blkdev.h>
36 #include <linux/delay.h>
37 #include <linux/smp.h>
38 #include <linux/timer.h>
39 #include <linux/rcupdate.h>
40 #include <linux/cpu.h>
41 #include <linux/percpu.h>
42 #include <linux/kthread.h>
43 #include <linux/vserver/sched.h>
44 #include <linux/vs_base.h>
45
46 #include <asm/unistd.h>
47
48 #ifdef CONFIG_NUMA
49 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
50 #else
51 #define cpu_to_node_mask(cpu) (cpu_online_map)
52 #endif
53
54 /*
55  * Convert user-nice values [ -20 ... 0 ... 19 ]
56  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
57  * and back.
58  */
59 #define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
60 #define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
61 #define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
62
63 /*
64  * 'User priority' is the nice value converted to something we
65  * can work with better when scaling various scheduler parameters,
66  * it's a [ 0 ... 39 ] range.
67  */
68 #define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
69 #define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
70 #define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
71 #define AVG_TIMESLICE   (MIN_TIMESLICE + ((MAX_TIMESLICE - MIN_TIMESLICE) *\
72                         (MAX_PRIO-1-NICE_TO_PRIO(0))/(MAX_USER_PRIO - 1)))
73
74 /*
75  * Some helpers for converting nanosecond timing to jiffy resolution
76  */
77 #define NS_TO_JIFFIES(TIME)     ((TIME) / (1000000000 / HZ))
78 #define JIFFIES_TO_NS(TIME)     ((TIME) * (1000000000 / HZ))
79
80 /*
81  * These are the 'tuning knobs' of the scheduler:
82  *
83  * Minimum timeslice is 10 msecs, default timeslice is 100 msecs,
84  * maximum timeslice is 200 msecs. Timeslices get refilled after
85  * they expire.
86  */
87 #define MIN_TIMESLICE           ( 10 * HZ / 1000)
88 #define MAX_TIMESLICE           (200 * HZ / 1000)
89 #define ON_RUNQUEUE_WEIGHT       30
90 #define CHILD_PENALTY            95
91 #define PARENT_PENALTY          100
92 #define EXIT_WEIGHT               3
93 #define PRIO_BONUS_RATIO         25
94 #define MAX_BONUS               (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
95 #define INTERACTIVE_DELTA         2
96 #define MAX_SLEEP_AVG           (AVG_TIMESLICE * MAX_BONUS)
97 #define STARVATION_LIMIT        (MAX_SLEEP_AVG)
98 #define NS_MAX_SLEEP_AVG        (JIFFIES_TO_NS(MAX_SLEEP_AVG))
99 #define CREDIT_LIMIT            100
100
101 /*
102  * If a task is 'interactive' then we reinsert it in the active
103  * array after it has expired its current timeslice. (it will not
104  * continue to run immediately, it will still roundrobin with
105  * other interactive tasks.)
106  *
107  * This part scales the interactivity limit depending on niceness.
108  *
109  * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
110  * Here are a few examples of different nice levels:
111  *
112  *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
113  *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
114  *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
115  *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
116  *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
117  *
118  * (the X axis represents the possible -5 ... 0 ... +5 dynamic
119  *  priority range a task can explore, a value of '1' means the
120  *  task is rated interactive.)
121  *
122  * Ie. nice +19 tasks can never get 'interactive' enough to be
123  * reinserted into the active array. And only heavily CPU-hog nice -20
124  * tasks will be expired. Default nice 0 tasks are somewhere between,
125  * it takes some effort for them to get interactive, but it's not
126  * too hard.
127  */
128
129 #define CURRENT_BONUS(p) \
130         (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
131                 MAX_SLEEP_AVG)
132
133 #ifdef CONFIG_SMP
134 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
135                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
136                         num_online_cpus())
137 #else
138 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
139                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
140 #endif
141
142 #define SCALE(v1,v1_max,v2_max) \
143         (v1) * (v2_max) / (v1_max)
144
145 #define DELTA(p) \
146         (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
147
148 #define TASK_INTERACTIVE(p) \
149         ((p)->prio <= (p)->static_prio - DELTA(p))
150
151 #define INTERACTIVE_SLEEP(p) \
152         (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
153                 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
154
155 #define HIGH_CREDIT(p) \
156         ((p)->interactive_credit > CREDIT_LIMIT)
157
158 #define LOW_CREDIT(p) \
159         ((p)->interactive_credit < -CREDIT_LIMIT)
160
161 #define TASK_PREEMPTS_CURR(p, rq) \
162         ((p)->prio < (rq)->curr->prio)
163
164 /*
165  * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ]
166  * to time slice values.
167  *
168  * The higher a thread's priority, the bigger timeslices
169  * it gets during one round of execution. But even the lowest
170  * priority thread gets MIN_TIMESLICE worth of execution time.
171  *
172  * task_timeslice() is the interface that is used by the scheduler.
173  */
174
175 #define BASE_TIMESLICE(p) (MIN_TIMESLICE + \
176                 ((MAX_TIMESLICE - MIN_TIMESLICE) * \
177                         (MAX_PRIO-1 - (p)->static_prio) / (MAX_USER_PRIO-1)))
178
179 static unsigned int task_timeslice(task_t *p)
180 {
181         return BASE_TIMESLICE(p);
182 }
183
184 #define task_hot(p, now, sd) ((now) - (p)->timestamp < (sd)->cache_hot_time)
185
186 /*
187  * These are the runqueue data structures:
188  */
189
190 #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
191
192 typedef struct runqueue runqueue_t;
193
194 struct prio_array {
195         unsigned int nr_active;
196         unsigned long bitmap[BITMAP_SIZE];
197         struct list_head queue[MAX_PRIO];
198 };
199
200 /*
201  * This is the main, per-CPU runqueue data structure.
202  *
203  * Locking rule: those places that want to lock multiple runqueues
204  * (such as the load balancing or the thread migration code), lock
205  * acquire operations must be ordered by ascending &runqueue.
206  */
207 struct runqueue {
208         spinlock_t lock;
209
210         /*
211          * nr_running and cpu_load should be in the same cacheline because
212          * remote CPUs use both these fields when doing load calculation.
213          */
214         unsigned long nr_running;
215 #ifdef CONFIG_SMP
216         unsigned long cpu_load;
217 #endif
218         unsigned long long nr_switches;
219         unsigned long expired_timestamp, nr_uninterruptible;
220         unsigned long long timestamp_last_tick;
221         task_t *curr, *idle;
222         struct mm_struct *prev_mm;
223         prio_array_t *active, *expired, arrays[2];
224         int best_expired_prio;
225         atomic_t nr_iowait;
226
227 #ifdef CONFIG_SMP
228         struct sched_domain *sd;
229
230         /* For active balancing */
231         int active_balance;
232         int push_cpu;
233
234         task_t *migration_thread;
235         struct list_head migration_queue;
236 #endif
237         struct list_head hold_queue;
238         int idle_tokens;
239 };
240
241 static DEFINE_PER_CPU(struct runqueue, runqueues);
242
243 #define for_each_domain(cpu, domain) \
244         for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent)
245
246 #define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
247 #define this_rq()               (&__get_cpu_var(runqueues))
248 #define task_rq(p)              cpu_rq(task_cpu(p))
249 #define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
250
251 /*
252  * Default context-switch locking:
253  */
254 #ifndef prepare_arch_switch
255 # define prepare_arch_switch(rq, next)  do { } while (0)
256 # define finish_arch_switch(rq, next)   spin_unlock_irq(&(rq)->lock)
257 # define task_running(rq, p)            ((rq)->curr == (p))
258 #endif
259
260 /*
261  * task_rq_lock - lock the runqueue a given task resides on and disable
262  * interrupts.  Note the ordering: we can safely lookup the task_rq without
263  * explicitly disabling preemption.
264  */
265 static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
266 {
267         struct runqueue *rq;
268
269 repeat_lock_task:
270         local_irq_save(*flags);
271         rq = task_rq(p);
272         spin_lock(&rq->lock);
273         if (unlikely(rq != task_rq(p))) {
274                 spin_unlock_irqrestore(&rq->lock, *flags);
275                 goto repeat_lock_task;
276         }
277         return rq;
278 }
279
280 static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
281 {
282         spin_unlock_irqrestore(&rq->lock, *flags);
283 }
284
285 /*
286  * rq_lock - lock a given runqueue and disable interrupts.
287  */
288 static runqueue_t *this_rq_lock(void)
289 {
290         runqueue_t *rq;
291
292         local_irq_disable();
293         rq = this_rq();
294         spin_lock(&rq->lock);
295
296         return rq;
297 }
298
299 static inline void rq_unlock(runqueue_t *rq)
300 {
301         spin_unlock_irq(&rq->lock);
302 }
303
304 /*
305  * Adding/removing a task to/from a priority array:
306  */
307 static void dequeue_task(struct task_struct *p, prio_array_t *array)
308 {
309         array->nr_active--;
310         list_del(&p->run_list);
311         if (list_empty(array->queue + p->prio))
312                 __clear_bit(p->prio, array->bitmap);
313 }
314
315 static void enqueue_task(struct task_struct *p, prio_array_t *array)
316 {
317         list_add_tail(&p->run_list, array->queue + p->prio);
318         __set_bit(p->prio, array->bitmap);
319         array->nr_active++;
320         p->array = array;
321 }
322
323 /*
324  * Used by the migration code - we pull tasks from the head of the
325  * remote queue so we want these tasks to show up at the head of the
326  * local queue:
327  */
328 static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
329 {
330         list_add(&p->run_list, array->queue + p->prio);
331         __set_bit(p->prio, array->bitmap);
332         array->nr_active++;
333         p->array = array;
334 }
335
336 /*
337  * effective_prio - return the priority that is based on the static
338  * priority but is modified by bonuses/penalties.
339  *
340  * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
341  * into the -5 ... 0 ... +5 bonus/penalty range.
342  *
343  * We use 25% of the full 0...39 priority range so that:
344  *
345  * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
346  * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
347  *
348  * Both properties are important to certain workloads.
349  */
350 static int effective_prio(task_t *p)
351 {
352         int bonus, prio;
353
354         if (rt_task(p))
355                 return p->prio;
356
357         bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
358
359         prio = p->static_prio - bonus;
360         if (__vx_task_flags(p, VXF_SCHED_PRIO, 0))
361                 prio += effective_vavavoom(p, MAX_USER_PRIO);
362
363         if (prio < MAX_RT_PRIO)
364                 prio = MAX_RT_PRIO;
365         if (prio > MAX_PRIO-1)
366                 prio = MAX_PRIO-1;
367         return prio;
368 }
369
370 /*
371  * __activate_task - move a task to the runqueue.
372  */
373 static inline void __activate_task(task_t *p, runqueue_t *rq)
374 {
375         enqueue_task(p, rq->active);
376         rq->nr_running++;
377 }
378
379 /*
380  * __activate_idle_task - move idle task to the _front_ of runqueue.
381  */
382 static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
383 {
384         enqueue_task_head(p, rq->active);
385         rq->nr_running++;
386 }
387
388 static void recalc_task_prio(task_t *p, unsigned long long now)
389 {
390         unsigned long long __sleep_time = now - p->timestamp;
391         unsigned long sleep_time;
392
393         if (__sleep_time > NS_MAX_SLEEP_AVG)
394                 sleep_time = NS_MAX_SLEEP_AVG;
395         else
396                 sleep_time = (unsigned long)__sleep_time;
397
398         if (likely(sleep_time > 0)) {
399                 /*
400                  * User tasks that sleep a long time are categorised as
401                  * idle and will get just interactive status to stay active &
402                  * prevent them suddenly becoming cpu hogs and starving
403                  * other processes.
404                  */
405                 if (p->mm && p->activated != -1 &&
406                         sleep_time > INTERACTIVE_SLEEP(p)) {
407                                 p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
408                                                 AVG_TIMESLICE);
409                                 if (!HIGH_CREDIT(p))
410                                         p->interactive_credit++;
411                 } else {
412                         /*
413                          * The lower the sleep avg a task has the more
414                          * rapidly it will rise with sleep time.
415                          */
416                         sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
417
418                         /*
419                          * Tasks with low interactive_credit are limited to
420                          * one timeslice worth of sleep avg bonus.
421                          */
422                         if (LOW_CREDIT(p) &&
423                             sleep_time > JIFFIES_TO_NS(task_timeslice(p)))
424                                 sleep_time = JIFFIES_TO_NS(task_timeslice(p));
425
426                         /*
427                          * Non high_credit tasks waking from uninterruptible
428                          * sleep are limited in their sleep_avg rise as they
429                          * are likely to be cpu hogs waiting on I/O
430                          */
431                         if (p->activated == -1 && !HIGH_CREDIT(p) && p->mm) {
432                                 if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
433                                         sleep_time = 0;
434                                 else if (p->sleep_avg + sleep_time >=
435                                                 INTERACTIVE_SLEEP(p)) {
436                                         p->sleep_avg = INTERACTIVE_SLEEP(p);
437                                         sleep_time = 0;
438                                 }
439                         }
440
441                         /*
442                          * This code gives a bonus to interactive tasks.
443                          *
444                          * The boost works by updating the 'average sleep time'
445                          * value here, based on ->timestamp. The more time a
446                          * task spends sleeping, the higher the average gets -
447                          * and the higher the priority boost gets as well.
448                          */
449                         p->sleep_avg += sleep_time;
450
451                         if (p->sleep_avg > NS_MAX_SLEEP_AVG) {
452                                 p->sleep_avg = NS_MAX_SLEEP_AVG;
453                                 if (!HIGH_CREDIT(p))
454                                         p->interactive_credit++;
455                         }
456                 }
457         }
458
459         p->prio = effective_prio(p);
460 }
461
462 /*
463  * activate_task - move a task to the runqueue and do priority recalculation
464  *
465  * Update all the scheduling statistics stuff. (sleep average
466  * calculation, priority modifiers, etc.)
467  */
468 static void activate_task(task_t *p, runqueue_t *rq, int local)
469 {
470         unsigned long long now;
471
472         now = sched_clock();
473 #ifdef CONFIG_SMP
474         if (!local) {
475                 /* Compensate for drifting sched_clock */
476                 runqueue_t *this_rq = this_rq();
477                 now = (now - this_rq->timestamp_last_tick)
478                         + rq->timestamp_last_tick;
479         }
480 #endif
481
482         recalc_task_prio(p, now);
483
484         /*
485          * This checks to make sure it's not an uninterruptible task
486          * that is now waking up.
487          */
488         if (!p->activated) {
489                 /*
490                  * Tasks which were woken up by interrupts (ie. hw events)
491                  * are most likely of interactive nature. So we give them
492                  * the credit of extending their sleep time to the period
493                  * of time they spend on the runqueue, waiting for execution
494                  * on a CPU, first time around:
495                  */
496                 if (in_interrupt())
497                         p->activated = 2;
498                 else {
499                         /*
500                          * Normal first-time wakeups get a credit too for
501                          * on-runqueue time, but it will be weighted down:
502                          */
503                         p->activated = 1;
504                 }
505         }
506         p->timestamp = now;
507
508         __activate_task(p, rq);
509 }
510
511 /*
512  * deactivate_task - remove a task from the runqueue.
513  */
514 static void deactivate_task(struct task_struct *p, runqueue_t *rq)
515 {
516         rq->nr_running--;
517         if (p->state == TASK_UNINTERRUPTIBLE)
518                 rq->nr_uninterruptible++;
519         dequeue_task(p, p->array);
520         p->array = NULL;
521 }
522
523 /*
524  * resched_task - mark a task 'to be rescheduled now'.
525  *
526  * On UP this means the setting of the need_resched flag, on SMP it
527  * might also involve a cross-CPU call to trigger the scheduler on
528  * the target CPU.
529  */
530 #ifdef CONFIG_SMP
531 static void resched_task(task_t *p)
532 {
533         int need_resched, nrpolling;
534
535         preempt_disable();
536         /* minimise the chance of sending an interrupt to poll_idle() */
537         nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
538         need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
539         nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
540
541         if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
542                 smp_send_reschedule(task_cpu(p));
543         preempt_enable();
544 }
545 #else
546 static inline void resched_task(task_t *p)
547 {
548         set_tsk_need_resched(p);
549 }
550 #endif
551
552 /**
553  * task_curr - is this task currently executing on a CPU?
554  * @p: the task in question.
555  */
556 inline int task_curr(task_t *p)
557 {
558         return cpu_curr(task_cpu(p)) == p;
559 }
560
561 #ifdef CONFIG_SMP
562 enum request_type {
563         REQ_MOVE_TASK,
564         REQ_SET_DOMAIN,
565 };
566
567 typedef struct {
568         struct list_head list;
569         enum request_type type;
570
571         /* For REQ_MOVE_TASK */
572         task_t *task;
573         int dest_cpu;
574
575         /* For REQ_SET_DOMAIN */
576         struct sched_domain *sd;
577
578         struct completion done;
579 } migration_req_t;
580
581 /*
582  * The task's runqueue lock must be held.
583  * Returns true if you have to wait for migration thread.
584  */
585 static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
586 {
587         runqueue_t *rq = task_rq(p);
588
589         /*
590          * If the task is not on a runqueue (and not running), then
591          * it is sufficient to simply update the task's cpu field.
592          */
593         if (!p->array && !task_running(rq, p)) {
594                 set_task_cpu(p, dest_cpu);
595                 return 0;
596         }
597
598         init_completion(&req->done);
599         req->type = REQ_MOVE_TASK;
600         req->task = p;
601         req->dest_cpu = dest_cpu;
602         list_add(&req->list, &rq->migration_queue);
603         return 1;
604 }
605
606 /*
607  * wait_task_inactive - wait for a thread to unschedule.
608  *
609  * The caller must ensure that the task *will* unschedule sometime soon,
610  * else this function might spin for a *long* time. This function can't
611  * be called with interrupts off, or it may introduce deadlock with
612  * smp_call_function() if an IPI is sent by the same process we are
613  * waiting to become inactive.
614  */
615 void wait_task_inactive(task_t * p)
616 {
617         unsigned long flags;
618         runqueue_t *rq;
619         int preempted;
620
621 repeat:
622         rq = task_rq_lock(p, &flags);
623         /* Must be off runqueue entirely, not preempted. */
624         if (unlikely(p->array)) {
625                 /* If it's preempted, we yield.  It could be a while. */
626                 preempted = !task_running(rq, p);
627                 task_rq_unlock(rq, &flags);
628                 cpu_relax();
629                 if (preempted)
630                         yield();
631                 goto repeat;
632         }
633         task_rq_unlock(rq, &flags);
634 }
635
636 /***
637  * kick_process - kick a running thread to enter/exit the kernel
638  * @p: the to-be-kicked thread
639  *
640  * Cause a process which is running on another CPU to enter
641  * kernel-mode, without any delay. (to get signals handled.)
642  */
643 void kick_process(task_t *p)
644 {
645         int cpu;
646
647         preempt_disable();
648         cpu = task_cpu(p);
649         if ((cpu != smp_processor_id()) && task_curr(p))
650                 smp_send_reschedule(cpu);
651         preempt_enable();
652 }
653
654 EXPORT_SYMBOL_GPL(kick_process);
655
656 /*
657  * Return a low guess at the load of a migration-source cpu.
658  *
659  * We want to under-estimate the load of migration sources, to
660  * balance conservatively.
661  */
662 static inline unsigned long source_load(int cpu)
663 {
664         runqueue_t *rq = cpu_rq(cpu);
665         unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
666
667         return min(rq->cpu_load, load_now);
668 }
669
670 /*
671  * Return a high guess at the load of a migration-target cpu
672  */
673 static inline unsigned long target_load(int cpu)
674 {
675         runqueue_t *rq = cpu_rq(cpu);
676         unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
677
678         return max(rq->cpu_load, load_now);
679 }
680
681 #endif
682
683 /*
684  * wake_idle() is useful especially on SMT architectures to wake a
685  * task onto an idle sibling if we would otherwise wake it onto a
686  * busy sibling.
687  *
688  * Returns the CPU we should wake onto.
689  */
690 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
691 static int wake_idle(int cpu, task_t *p)
692 {
693         cpumask_t tmp;
694         runqueue_t *rq = cpu_rq(cpu);
695         struct sched_domain *sd;
696         int i;
697
698         if (idle_cpu(cpu))
699                 return cpu;
700
701         sd = rq->sd;
702         if (!(sd->flags & SD_WAKE_IDLE))
703                 return cpu;
704
705         cpus_and(tmp, sd->span, cpu_online_map);
706         for_each_cpu_mask(i, tmp) {
707                 if (!cpu_isset(i, p->cpus_allowed))
708                         continue;
709
710                 if (idle_cpu(i))
711                         return i;
712         }
713
714         return cpu;
715 }
716 #else
717 static inline int wake_idle(int cpu, task_t *p)
718 {
719         return cpu;
720 }
721 #endif
722
723 /***
724  * try_to_wake_up - wake up a thread
725  * @p: the to-be-woken-up thread
726  * @state: the mask of task states that can be woken
727  * @sync: do a synchronous wakeup?
728  *
729  * Put it on the run-queue if it's not already there. The "current"
730  * thread is always on the run-queue (except when the actual
731  * re-schedule is in progress), and as such you're allowed to do
732  * the simpler "current->state = TASK_RUNNING" to mark yourself
733  * runnable without the overhead of this.
734  *
735  * returns failure only if the task is already active.
736  */
737 static int try_to_wake_up(task_t * p, unsigned int state, int sync)
738 {
739         int cpu, this_cpu, success = 0;
740         unsigned long flags;
741         long old_state;
742         runqueue_t *rq;
743 #ifdef CONFIG_SMP
744         unsigned long load, this_load;
745         struct sched_domain *sd;
746         int new_cpu;
747 #endif
748
749         rq = task_rq_lock(p, &flags);
750         old_state = p->state;
751         if (!(old_state & state))
752                 goto out;
753
754         if (p->array)
755                 goto out_running;
756
757         cpu = task_cpu(p);
758         this_cpu = smp_processor_id();
759
760 #ifdef CONFIG_SMP
761         if (unlikely(task_running(rq, p)))
762                 goto out_activate;
763
764         new_cpu = cpu;
765
766         if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
767                 goto out_set_cpu;
768
769         load = source_load(cpu);
770         this_load = target_load(this_cpu);
771
772         /*
773          * If sync wakeup then subtract the (maximum possible) effect of
774          * the currently running task from the load of the current CPU:
775          */
776         if (sync)
777                 this_load -= SCHED_LOAD_SCALE;
778
779         /* Don't pull the task off an idle CPU to a busy one */
780         if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
781                 goto out_set_cpu;
782
783         new_cpu = this_cpu; /* Wake to this CPU if we can */
784
785         /*
786          * Scan domains for affine wakeup and passive balancing
787          * possibilities.
788          */
789         for_each_domain(this_cpu, sd) {
790                 unsigned int imbalance;
791                 /*
792                  * Start passive balancing when half the imbalance_pct
793                  * limit is reached.
794                  */
795                 imbalance = sd->imbalance_pct + (sd->imbalance_pct - 100) / 2;
796
797                 if ( ((sd->flags & SD_WAKE_AFFINE) &&
798                                 !task_hot(p, rq->timestamp_last_tick, sd))
799                         || ((sd->flags & SD_WAKE_BALANCE) &&
800                                 imbalance*this_load <= 100*load) ) {
801                         /*
802                          * Now sd has SD_WAKE_AFFINE and p is cache cold in sd
803                          * or sd has SD_WAKE_BALANCE and there is an imbalance
804                          */
805                         if (cpu_isset(cpu, sd->span))
806                                 goto out_set_cpu;
807                 }
808         }
809
810         new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
811 out_set_cpu:
812         new_cpu = wake_idle(new_cpu, p);
813         if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) {
814                 set_task_cpu(p, new_cpu);
815                 task_rq_unlock(rq, &flags);
816                 /* might preempt at this point */
817                 rq = task_rq_lock(p, &flags);
818                 old_state = p->state;
819                 if (!(old_state & state))
820                         goto out;
821                 if (p->array)
822                         goto out_running;
823
824                 this_cpu = smp_processor_id();
825                 cpu = task_cpu(p);
826         }
827
828 out_activate:
829 #endif /* CONFIG_SMP */
830         if (old_state == TASK_UNINTERRUPTIBLE) {
831                 rq->nr_uninterruptible--;
832                 /*
833                  * Tasks on involuntary sleep don't earn
834                  * sleep_avg beyond just interactive state.
835                  */
836                 p->activated = -1;
837         }
838
839         /*
840          * Sync wakeups (i.e. those types of wakeups where the waker
841          * has indicated that it will leave the CPU in short order)
842          * don't trigger a preemption, if the woken up task will run on
843          * this cpu. (in this case the 'I will reschedule' promise of
844          * the waker guarantees that the freshly woken up task is going
845          * to be considered on this CPU.)
846          */
847         activate_task(p, rq, cpu == this_cpu);
848         if (!sync || cpu != this_cpu) {
849                 if (TASK_PREEMPTS_CURR(p, rq))
850                         resched_task(rq->curr);
851         }
852         success = 1;
853
854 out_running:
855         p->state = TASK_RUNNING;
856 out:
857         task_rq_unlock(rq, &flags);
858
859         return success;
860 }
861
862 int fastcall wake_up_process(task_t * p)
863 {
864         return try_to_wake_up(p, TASK_STOPPED |
865                                  TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
866 }
867
868 EXPORT_SYMBOL(wake_up_process);
869
870 int fastcall wake_up_state(task_t *p, unsigned int state)
871 {
872         return try_to_wake_up(p, state, 0);
873 }
874
875 /*
876  * Perform scheduler related setup for a newly forked process p.
877  * p is forked by current.
878  */
879 void fastcall sched_fork(task_t *p)
880 {
881         /*
882          * We mark the process as running here, but have not actually
883          * inserted it onto the runqueue yet. This guarantees that
884          * nobody will actually run it, and a signal or other external
885          * event cannot wake it up and insert it on the runqueue either.
886          */
887         p->state = TASK_RUNNING;
888         INIT_LIST_HEAD(&p->run_list);
889         p->array = NULL;
890         spin_lock_init(&p->switch_lock);
891 #ifdef CONFIG_PREEMPT
892         /*
893          * During context-switch we hold precisely one spinlock, which
894          * schedule_tail drops. (in the common case it's this_rq()->lock,
895          * but it also can be p->switch_lock.) So we compensate with a count
896          * of 1. Also, we want to start with kernel preemption disabled.
897          */
898         p->thread_info->preempt_count = 1;
899 #endif
900         /*
901          * Share the timeslice between parent and child, thus the
902          * total amount of pending timeslices in the system doesn't change,
903          * resulting in more scheduling fairness.
904          */
905         local_irq_disable();
906         p->time_slice = (current->time_slice + 1) >> 1;
907         /*
908          * The remainder of the first timeslice might be recovered by
909          * the parent if the child exits early enough.
910          */
911         p->first_time_slice = 1;
912         current->time_slice >>= 1;
913         p->timestamp = sched_clock();
914         if (!current->time_slice) {
915                 /*
916                  * This case is rare, it happens when the parent has only
917                  * a single jiffy left from its timeslice. Taking the
918                  * runqueue lock is not a problem.
919                  */
920                 current->time_slice = 1;
921                 preempt_disable();
922                 scheduler_tick(0, 0);
923                 local_irq_enable();
924                 preempt_enable();
925         } else
926                 local_irq_enable();
927 }
928
929 /*
930  * wake_up_forked_process - wake up a freshly forked process.
931  *
932  * This function will do some initial scheduler statistics housekeeping
933  * that must be done for every newly created process.
934  */
935 void fastcall wake_up_forked_process(task_t * p)
936 {
937         unsigned long flags;
938         runqueue_t *rq = task_rq_lock(current, &flags);
939
940         BUG_ON(p->state != TASK_RUNNING);
941
942         /*
943          * We decrease the sleep average of forking parents
944          * and children as well, to keep max-interactive tasks
945          * from forking tasks that are max-interactive.
946          */
947         current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
948                 PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
949
950         p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
951                 CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
952
953         p->interactive_credit = 0;
954
955         p->prio = effective_prio(p);
956         set_task_cpu(p, smp_processor_id());
957
958         if (unlikely(!current->array))
959                 __activate_task(p, rq);
960         else {
961                 p->prio = current->prio;
962                 list_add_tail(&p->run_list, &current->run_list);
963                 p->array = current->array;
964                 p->array->nr_active++;
965                 rq->nr_running++;
966         }
967         task_rq_unlock(rq, &flags);
968 }
969
970 /*
971  * Potentially available exiting-child timeslices are
972  * retrieved here - this way the parent does not get
973  * penalized for creating too many threads.
974  *
975  * (this cannot be used to 'generate' timeslices
976  * artificially, because any timeslice recovered here
977  * was given away by the parent in the first place.)
978  */
979 void fastcall sched_exit(task_t * p)
980 {
981         unsigned long flags;
982         runqueue_t *rq;
983
984         local_irq_save(flags);
985         if (p->first_time_slice) {
986                 p->parent->time_slice += p->time_slice;
987                 if (unlikely(p->parent->time_slice > MAX_TIMESLICE))
988                         p->parent->time_slice = MAX_TIMESLICE;
989         }
990         local_irq_restore(flags);
991         /*
992          * If the child was a (relative-) CPU hog then decrease
993          * the sleep_avg of the parent as well.
994          */
995         rq = task_rq_lock(p->parent, &flags);
996         if (p->sleep_avg < p->parent->sleep_avg)
997                 p->parent->sleep_avg = p->parent->sleep_avg /
998                 (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
999                 (EXIT_WEIGHT + 1);
1000         task_rq_unlock(rq, &flags);
1001 }
1002
1003 /**
1004  * finish_task_switch - clean up after a task-switch
1005  * @prev: the thread we just switched away from.
1006  *
1007  * We enter this with the runqueue still locked, and finish_arch_switch()
1008  * will unlock it along with doing any other architecture-specific cleanup
1009  * actions.
1010  *
1011  * Note that we may have delayed dropping an mm in context_switch(). If
1012  * so, we finish that here outside of the runqueue lock.  (Doing it
1013  * with the lock held can cause deadlocks; see schedule() for
1014  * details.)
1015  */
1016 static void finish_task_switch(task_t *prev)
1017 {
1018         runqueue_t *rq = this_rq();
1019         struct mm_struct *mm = rq->prev_mm;
1020         unsigned long prev_task_flags;
1021
1022         rq->prev_mm = NULL;
1023
1024         /*
1025          * A task struct has one reference for the use as "current".
1026          * If a task dies, then it sets TASK_ZOMBIE in tsk->state and calls
1027          * schedule one last time. The schedule call will never return,
1028          * and the scheduled task must drop that reference.
1029          * The test for TASK_ZOMBIE must occur while the runqueue locks are
1030          * still held, otherwise prev could be scheduled on another cpu, die
1031          * there before we look at prev->state, and then the reference would
1032          * be dropped twice.
1033          *              Manfred Spraul <manfred@colorfullife.com>
1034          */
1035         prev_task_flags = prev->flags;
1036         finish_arch_switch(rq, prev);
1037         if (mm)
1038                 mmdrop(mm);
1039         if (unlikely(prev_task_flags & PF_DEAD))
1040                 put_task_struct(prev);
1041 }
1042
1043 /**
1044  * schedule_tail - first thing a freshly forked thread must call.
1045  * @prev: the thread we just switched away from.
1046  */
1047 asmlinkage void schedule_tail(task_t *prev)
1048 {
1049         finish_task_switch(prev);
1050
1051         if (current->set_child_tid)
1052                 put_user(current->pid, current->set_child_tid);
1053 }
1054
1055 /*
1056  * context_switch - switch to the new MM and the new
1057  * thread's register state.
1058  */
1059 static inline
1060 task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
1061 {
1062         struct mm_struct *mm = next->mm;
1063         struct mm_struct *oldmm = prev->active_mm;
1064
1065         if (unlikely(!mm)) {
1066                 next->active_mm = oldmm;
1067                 atomic_inc(&oldmm->mm_count);
1068                 enter_lazy_tlb(oldmm, next);
1069         } else
1070                 switch_mm(oldmm, mm, next);
1071
1072         if (unlikely(!prev->mm)) {
1073                 prev->active_mm = NULL;
1074                 WARN_ON(rq->prev_mm);
1075                 rq->prev_mm = oldmm;
1076         }
1077
1078         /* Here we just switch the register state and the stack. */
1079         switch_to(prev, next, prev);
1080
1081         return prev;
1082 }
1083
1084 /*
1085  * nr_running, nr_uninterruptible and nr_context_switches:
1086  *
1087  * externally visible scheduler statistics: current number of runnable
1088  * threads, current number of uninterruptible-sleeping threads, total
1089  * number of context switches performed since bootup.
1090  */
1091 unsigned long nr_running(void)
1092 {
1093         unsigned long i, sum = 0;
1094
1095         for_each_cpu(i)
1096                 sum += cpu_rq(i)->nr_running;
1097
1098         return sum;
1099 }
1100
1101 unsigned long nr_uninterruptible(void)
1102 {
1103         unsigned long i, sum = 0;
1104
1105         for_each_online_cpu(i)
1106                 sum += cpu_rq(i)->nr_uninterruptible;
1107
1108         return sum;
1109 }
1110
1111 unsigned long long nr_context_switches(void)
1112 {
1113         unsigned long long i, sum = 0;
1114
1115         for_each_online_cpu(i)
1116                 sum += cpu_rq(i)->nr_switches;
1117
1118         return sum;
1119 }
1120
1121 unsigned long nr_iowait(void)
1122 {
1123         unsigned long i, sum = 0;
1124
1125         for_each_online_cpu(i)
1126                 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1127
1128         return sum;
1129 }
1130
1131 /*
1132  * double_rq_lock - safely lock two runqueues
1133  *
1134  * Note this does not disable interrupts like task_rq_lock,
1135  * you need to do so manually before calling.
1136  */
1137 static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1138 {
1139         if (rq1 == rq2)
1140                 spin_lock(&rq1->lock);
1141         else {
1142                 if (rq1 < rq2) {
1143                         spin_lock(&rq1->lock);
1144                         spin_lock(&rq2->lock);
1145                 } else {
1146                         spin_lock(&rq2->lock);
1147                         spin_lock(&rq1->lock);
1148                 }
1149         }
1150 }
1151
1152 /*
1153  * double_rq_unlock - safely unlock two runqueues
1154  *
1155  * Note this does not restore interrupts like task_rq_unlock,
1156  * you need to do so manually after calling.
1157  */
1158 static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
1159 {
1160         spin_unlock(&rq1->lock);
1161         if (rq1 != rq2)
1162                 spin_unlock(&rq2->lock);
1163 }
1164
1165 enum idle_type
1166 {
1167         IDLE,
1168         NOT_IDLE,
1169         NEWLY_IDLE,
1170 };
1171
1172 #ifdef CONFIG_SMP
1173
1174 /*
1175  * find_idlest_cpu - find the least busy runqueue.
1176  */
1177 static int find_idlest_cpu(struct task_struct *p, int this_cpu,
1178                            struct sched_domain *sd)
1179 {
1180         unsigned long load, min_load, this_load;
1181         int i, min_cpu;
1182         cpumask_t mask;
1183
1184         min_cpu = UINT_MAX;
1185         min_load = ULONG_MAX;
1186
1187         cpus_and(mask, sd->span, cpu_online_map);
1188         cpus_and(mask, mask, p->cpus_allowed);
1189
1190         for_each_cpu_mask(i, mask) {
1191                 load = target_load(i);
1192
1193                 if (load < min_load) {
1194                         min_cpu = i;
1195                         min_load = load;
1196
1197                         /* break out early on an idle CPU: */
1198                         if (!min_load)
1199                                 break;
1200                 }
1201         }
1202
1203         /* add +1 to account for the new task */
1204         this_load = source_load(this_cpu) + SCHED_LOAD_SCALE;
1205
1206         /*
1207          * Would with the addition of the new task to the
1208          * current CPU there be an imbalance between this
1209          * CPU and the idlest CPU?
1210          *
1211          * Use half of the balancing threshold - new-context is
1212          * a good opportunity to balance.
1213          */
1214         if (min_load*(100 + (sd->imbalance_pct-100)/2) < this_load*100)
1215                 return min_cpu;
1216
1217         return this_cpu;
1218 }
1219
1220 /*
1221  * wake_up_forked_thread - wake up a freshly forked thread.
1222  *
1223  * This function will do some initial scheduler statistics housekeeping
1224  * that must be done for every newly created context, and it also does
1225  * runqueue balancing.
1226  */
1227 void fastcall wake_up_forked_thread(task_t * p)
1228 {
1229         unsigned long flags;
1230         int this_cpu = get_cpu(), cpu;
1231         struct sched_domain *tmp, *sd = NULL;
1232         runqueue_t *this_rq = cpu_rq(this_cpu), *rq;
1233
1234         /*
1235          * Find the largest domain that this CPU is part of that
1236          * is willing to balance on clone:
1237          */
1238         for_each_domain(this_cpu, tmp)
1239                 if (tmp->flags & SD_BALANCE_CLONE)
1240                         sd = tmp;
1241         if (sd)
1242                 cpu = find_idlest_cpu(p, this_cpu, sd);
1243         else
1244                 cpu = this_cpu;
1245
1246         local_irq_save(flags);
1247 lock_again:
1248         rq = cpu_rq(cpu);
1249         double_rq_lock(this_rq, rq);
1250
1251         BUG_ON(p->state != TASK_RUNNING);
1252
1253         /*
1254          * We did find_idlest_cpu() unlocked, so in theory
1255          * the mask could have changed - just dont migrate
1256          * in this case:
1257          */
1258         if (unlikely(!cpu_isset(cpu, p->cpus_allowed))) {
1259                 cpu = this_cpu;
1260                 double_rq_unlock(this_rq, rq);
1261                 goto lock_again;
1262         }
1263         /*
1264          * We decrease the sleep average of forking parents
1265          * and children as well, to keep max-interactive tasks
1266          * from forking tasks that are max-interactive.
1267          */
1268         current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
1269                 PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1270
1271         p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
1272                 CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1273
1274         p->interactive_credit = 0;
1275
1276         p->prio = effective_prio(p);
1277         set_task_cpu(p, cpu);
1278
1279         if (cpu == this_cpu) {
1280                 if (unlikely(!current->array))
1281                         __activate_task(p, rq);
1282                 else {
1283                         p->prio = current->prio;
1284                         list_add_tail(&p->run_list, &current->run_list);
1285                         p->array = current->array;
1286                         p->array->nr_active++;
1287                         rq->nr_running++;
1288                 }
1289         } else {
1290                 /* Not the local CPU - must adjust timestamp */
1291                 p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
1292                                         + rq->timestamp_last_tick;
1293                 __activate_task(p, rq);
1294                 if (TASK_PREEMPTS_CURR(p, rq))
1295                         resched_task(rq->curr);
1296         }
1297
1298         double_rq_unlock(this_rq, rq);
1299         local_irq_restore(flags);
1300         put_cpu();
1301 }
1302
1303 /*
1304  * If dest_cpu is allowed for this process, migrate the task to it.
1305  * This is accomplished by forcing the cpu_allowed mask to only
1306  * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
1307  * the cpu_allowed mask is restored.
1308  */
1309 static void sched_migrate_task(task_t *p, int dest_cpu)
1310 {
1311         migration_req_t req;
1312         runqueue_t *rq;
1313         unsigned long flags;
1314
1315         rq = task_rq_lock(p, &flags);
1316         if (!cpu_isset(dest_cpu, p->cpus_allowed)
1317             || unlikely(cpu_is_offline(dest_cpu)))
1318                 goto out;
1319
1320         /* force the process onto the specified CPU */
1321         if (migrate_task(p, dest_cpu, &req)) {
1322                 /* Need to wait for migration thread (might exit: take ref). */
1323                 struct task_struct *mt = rq->migration_thread;
1324                 get_task_struct(mt);
1325                 task_rq_unlock(rq, &flags);
1326                 wake_up_process(mt);
1327                 put_task_struct(mt);
1328                 wait_for_completion(&req.done);
1329                 return;
1330         }
1331 out:
1332         task_rq_unlock(rq, &flags);
1333 }
1334
1335 /*
1336  * sched_balance_exec(): find the highest-level, exec-balance-capable
1337  * domain and try to migrate the task to the least loaded CPU.
1338  *
1339  * execve() is a valuable balancing opportunity, because at this point
1340  * the task has the smallest effective memory and cache footprint.
1341  */
1342 void sched_balance_exec(void)
1343 {
1344         struct sched_domain *tmp, *sd = NULL;
1345         int new_cpu, this_cpu = get_cpu();
1346
1347         /* Prefer the current CPU if there's only this task running */
1348         if (this_rq()->nr_running <= 1)
1349                 goto out;
1350
1351         for_each_domain(this_cpu, tmp)
1352                 if (tmp->flags & SD_BALANCE_EXEC)
1353                         sd = tmp;
1354
1355         if (sd) {
1356                 new_cpu = find_idlest_cpu(current, this_cpu, sd);
1357                 if (new_cpu != this_cpu) {
1358                         put_cpu();
1359                         sched_migrate_task(current, new_cpu);
1360                         return;
1361                 }
1362         }
1363 out:
1364         put_cpu();
1365 }
1366
1367 /*
1368  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1369  */
1370 static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1371 {
1372         if (unlikely(!spin_trylock(&busiest->lock))) {
1373                 if (busiest < this_rq) {
1374                         spin_unlock(&this_rq->lock);
1375                         spin_lock(&busiest->lock);
1376                         spin_lock(&this_rq->lock);
1377                 } else
1378                         spin_lock(&busiest->lock);
1379         }
1380 }
1381
1382 /*
1383  * pull_task - move a task from a remote runqueue to the local runqueue.
1384  * Both runqueues must be locked.
1385  */
1386 static inline
1387 void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1388                runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
1389 {
1390         dequeue_task(p, src_array);
1391         src_rq->nr_running--;
1392         set_task_cpu(p, this_cpu);
1393         this_rq->nr_running++;
1394         enqueue_task(p, this_array);
1395         p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
1396                                 + this_rq->timestamp_last_tick;
1397         /*
1398          * Note that idle threads have a prio of MAX_PRIO, for this test
1399          * to be always true for them.
1400          */
1401         if (TASK_PREEMPTS_CURR(p, this_rq))
1402                 resched_task(this_rq->curr);
1403 }
1404
1405 /*
1406  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1407  */
1408 static inline
1409 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1410                      struct sched_domain *sd, enum idle_type idle)
1411 {
1412         /*
1413          * We do not migrate tasks that are:
1414          * 1) running (obviously), or
1415          * 2) cannot be migrated to this CPU due to cpus_allowed, or
1416          * 3) are cache-hot on their current CPU.
1417          */
1418         if (task_running(rq, p))
1419                 return 0;
1420         if (!cpu_isset(this_cpu, p->cpus_allowed))
1421                 return 0;
1422
1423         /* Aggressive migration if we've failed balancing */
1424         if (idle == NEWLY_IDLE ||
1425                         sd->nr_balance_failed < sd->cache_nice_tries) {
1426                 if (task_hot(p, rq->timestamp_last_tick, sd))
1427                         return 0;
1428         }
1429
1430         return 1;
1431 }
1432
1433 /*
1434  * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
1435  * as part of a balancing operation within "domain". Returns the number of
1436  * tasks moved.
1437  *
1438  * Called with both runqueues locked.
1439  */
1440 static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
1441                       unsigned long max_nr_move, struct sched_domain *sd,
1442                       enum idle_type idle)
1443 {
1444         prio_array_t *array, *dst_array;
1445         struct list_head *head, *curr;
1446         int idx, pulled = 0;
1447         task_t *tmp;
1448
1449         if (max_nr_move <= 0 || busiest->nr_running <= 1)
1450                 goto out;
1451
1452         /*
1453          * We first consider expired tasks. Those will likely not be
1454          * executed in the near future, and they are most likely to
1455          * be cache-cold, thus switching CPUs has the least effect
1456          * on them.
1457          */
1458         if (busiest->expired->nr_active) {
1459                 array = busiest->expired;
1460                 dst_array = this_rq->expired;
1461         } else {
1462                 array = busiest->active;
1463                 dst_array = this_rq->active;
1464         }
1465
1466 new_array:
1467         /* Start searching at priority 0: */
1468         idx = 0;
1469 skip_bitmap:
1470         if (!idx)
1471                 idx = sched_find_first_bit(array->bitmap);
1472         else
1473                 idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
1474         if (idx >= MAX_PRIO) {
1475                 if (array == busiest->expired && busiest->active->nr_active) {
1476                         array = busiest->active;
1477                         dst_array = this_rq->active;
1478                         goto new_array;
1479                 }
1480                 goto out;
1481         }
1482
1483         head = array->queue + idx;
1484         curr = head->prev;
1485 skip_queue:
1486         tmp = list_entry(curr, task_t, run_list);
1487
1488         curr = curr->prev;
1489
1490         if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
1491                 if (curr != head)
1492                         goto skip_queue;
1493                 idx++;
1494                 goto skip_bitmap;
1495         }
1496         pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
1497         pulled++;
1498
1499         /* We only want to steal up to the prescribed number of tasks. */
1500         if (pulled < max_nr_move) {
1501                 if (curr != head)
1502                         goto skip_queue;
1503                 idx++;
1504                 goto skip_bitmap;
1505         }
1506 out:
1507         return pulled;
1508 }
1509
1510 /*
1511  * find_busiest_group finds and returns the busiest CPU group within the
1512  * domain. It calculates and returns the number of tasks which should be
1513  * moved to restore balance via the imbalance parameter.
1514  */
1515 static struct sched_group *
1516 find_busiest_group(struct sched_domain *sd, int this_cpu,
1517                    unsigned long *imbalance, enum idle_type idle)
1518 {
1519         struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
1520         unsigned long max_load, avg_load, total_load, this_load, total_pwr;
1521
1522         max_load = this_load = total_load = total_pwr = 0;
1523
1524         do {
1525                 cpumask_t tmp;
1526                 unsigned long load;
1527                 int local_group;
1528                 int i, nr_cpus = 0;
1529
1530                 local_group = cpu_isset(this_cpu, group->cpumask);
1531
1532                 /* Tally up the load of all CPUs in the group */
1533                 avg_load = 0;
1534                 cpus_and(tmp, group->cpumask, cpu_online_map);
1535                 if (unlikely(cpus_empty(tmp)))
1536                         goto nextgroup;
1537
1538                 for_each_cpu_mask(i, tmp) {
1539                         /* Bias balancing toward cpus of our domain */
1540                         if (local_group)
1541                                 load = target_load(i);
1542                         else
1543                                 load = source_load(i);
1544
1545                         nr_cpus++;
1546                         avg_load += load;
1547                 }
1548
1549                 if (!nr_cpus)
1550                         goto nextgroup;
1551
1552                 total_load += avg_load;
1553                 total_pwr += group->cpu_power;
1554
1555                 /* Adjust by relative CPU power of the group */
1556                 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1557
1558                 if (local_group) {
1559                         this_load = avg_load;
1560                         this = group;
1561                         goto nextgroup;
1562                 } else if (avg_load > max_load) {
1563                         max_load = avg_load;
1564                         busiest = group;
1565                 }
1566 nextgroup:
1567                 group = group->next;
1568         } while (group != sd->groups);
1569
1570         if (!busiest || this_load >= max_load)
1571                 goto out_balanced;
1572
1573         avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
1574
1575         if (this_load >= avg_load ||
1576                         100*max_load <= sd->imbalance_pct*this_load)
1577                 goto out_balanced;
1578
1579         /*
1580          * We're trying to get all the cpus to the average_load, so we don't
1581          * want to push ourselves above the average load, nor do we wish to
1582          * reduce the max loaded cpu below the average load, as either of these
1583          * actions would just result in more rebalancing later, and ping-pong
1584          * tasks around. Thus we look for the minimum possible imbalance.
1585          * Negative imbalances (*we* are more loaded than anyone else) will
1586          * be counted as no imbalance for these purposes -- we can't fix that
1587          * by pulling tasks to us.  Be careful of negative numbers as they'll
1588          * appear as very large values with unsigned longs.
1589          */
1590         *imbalance = min(max_load - avg_load, avg_load - this_load);
1591
1592         /* How much load to actually move to equalise the imbalance */
1593         *imbalance = (*imbalance * min(busiest->cpu_power, this->cpu_power))
1594                                 / SCHED_LOAD_SCALE;
1595
1596         if (*imbalance < SCHED_LOAD_SCALE - 1) {
1597                 unsigned long pwr_now = 0, pwr_move = 0;
1598                 unsigned long tmp;
1599
1600                 if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
1601                         *imbalance = 1;
1602                         return busiest;
1603                 }
1604
1605                 /*
1606                  * OK, we don't have enough imbalance to justify moving tasks,
1607                  * however we may be able to increase total CPU power used by
1608                  * moving them.
1609                  */
1610
1611                 pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load);
1612                 pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load);
1613                 pwr_now /= SCHED_LOAD_SCALE;
1614
1615                 /* Amount of load we'd subtract */
1616                 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power;
1617                 if (max_load > tmp)
1618                         pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE,
1619                                                         max_load - tmp);
1620
1621                 /* Amount of load we'd add */
1622                 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
1623                 if (max_load < tmp)
1624                         tmp = max_load;
1625                 pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
1626                 pwr_move /= SCHED_LOAD_SCALE;
1627
1628                 /* Move if we gain another 8th of a CPU worth of throughput */
1629                 if (pwr_move < pwr_now + SCHED_LOAD_SCALE / 8)
1630                         goto out_balanced;
1631
1632                 *imbalance = 1;
1633                 return busiest;
1634         }
1635
1636         /* Get rid of the scaling factor, rounding down as we divide */
1637         *imbalance = (*imbalance + 1) / SCHED_LOAD_SCALE;
1638
1639         return busiest;
1640
1641 out_balanced:
1642         if (busiest && (idle == NEWLY_IDLE ||
1643                         (idle == IDLE && max_load > SCHED_LOAD_SCALE)) ) {
1644                 *imbalance = 1;
1645                 return busiest;
1646         }
1647
1648         *imbalance = 0;
1649         return NULL;
1650 }
1651
1652 /*
1653  * find_busiest_queue - find the busiest runqueue among the cpus in group.
1654  */
1655 static runqueue_t *find_busiest_queue(struct sched_group *group)
1656 {
1657         cpumask_t tmp;
1658         unsigned long load, max_load = 0;
1659         runqueue_t *busiest = NULL;
1660         int i;
1661
1662         cpus_and(tmp, group->cpumask, cpu_online_map);
1663         for_each_cpu_mask(i, tmp) {
1664                 load = source_load(i);
1665
1666                 if (load > max_load) {
1667                         max_load = load;
1668                         busiest = cpu_rq(i);
1669                 }
1670         }
1671
1672         return busiest;
1673 }
1674
1675 /*
1676  * Check this_cpu to ensure it is balanced within domain. Attempt to move
1677  * tasks if there is an imbalance.
1678  *
1679  * Called with this_rq unlocked.
1680  */
1681 static int load_balance(int this_cpu, runqueue_t *this_rq,
1682                         struct sched_domain *sd, enum idle_type idle)
1683 {
1684         struct sched_group *group;
1685         runqueue_t *busiest;
1686         unsigned long imbalance;
1687         int nr_moved;
1688
1689         spin_lock(&this_rq->lock);
1690
1691         group = find_busiest_group(sd, this_cpu, &imbalance, idle);
1692         if (!group)
1693                 goto out_balanced;
1694
1695         busiest = find_busiest_queue(group);
1696         if (!busiest)
1697                 goto out_balanced;
1698         /*
1699          * This should be "impossible", but since load
1700          * balancing is inherently racy and statistical,
1701          * it could happen in theory.
1702          */
1703         if (unlikely(busiest == this_rq)) {
1704                 WARN_ON(1);
1705                 goto out_balanced;
1706         }
1707
1708         nr_moved = 0;
1709         if (busiest->nr_running > 1) {
1710                 /*
1711                  * Attempt to move tasks. If find_busiest_group has found
1712                  * an imbalance but busiest->nr_running <= 1, the group is
1713                  * still unbalanced. nr_moved simply stays zero, so it is
1714                  * correctly treated as an imbalance.
1715                  */
1716                 double_lock_balance(this_rq, busiest);
1717                 nr_moved = move_tasks(this_rq, this_cpu, busiest,
1718                                                 imbalance, sd, idle);
1719                 spin_unlock(&busiest->lock);
1720         }
1721         spin_unlock(&this_rq->lock);
1722
1723         if (!nr_moved) {
1724                 sd->nr_balance_failed++;
1725
1726                 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
1727                         int wake = 0;
1728
1729                         spin_lock(&busiest->lock);
1730                         if (!busiest->active_balance) {
1731                                 busiest->active_balance = 1;
1732                                 busiest->push_cpu = this_cpu;
1733                                 wake = 1;
1734                         }
1735                         spin_unlock(&busiest->lock);
1736                         if (wake)
1737                                 wake_up_process(busiest->migration_thread);
1738
1739                         /*
1740                          * We've kicked active balancing, reset the failure
1741                          * counter.
1742                          */
1743                         sd->nr_balance_failed = sd->cache_nice_tries;
1744                 }
1745         } else
1746                 sd->nr_balance_failed = 0;
1747
1748         /* We were unbalanced, so reset the balancing interval */
1749         sd->balance_interval = sd->min_interval;
1750
1751         return nr_moved;
1752
1753 out_balanced:
1754         spin_unlock(&this_rq->lock);
1755
1756         /* tune up the balancing interval */
1757         if (sd->balance_interval < sd->max_interval)
1758                 sd->balance_interval *= 2;
1759
1760         return 0;
1761 }
1762
1763 /*
1764  * Check this_cpu to ensure it is balanced within domain. Attempt to move
1765  * tasks if there is an imbalance.
1766  *
1767  * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
1768  * this_rq is locked.
1769  */
1770 static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
1771                                 struct sched_domain *sd)
1772 {
1773         struct sched_group *group;
1774         runqueue_t *busiest = NULL;
1775         unsigned long imbalance;
1776         int nr_moved = 0;
1777
1778         group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
1779         if (!group)
1780                 goto out;
1781
1782         busiest = find_busiest_queue(group);
1783         if (!busiest || busiest == this_rq)
1784                 goto out;
1785
1786         /* Attempt to move tasks */
1787         double_lock_balance(this_rq, busiest);
1788
1789         nr_moved = move_tasks(this_rq, this_cpu, busiest,
1790                                         imbalance, sd, NEWLY_IDLE);
1791
1792         spin_unlock(&busiest->lock);
1793
1794 out:
1795         return nr_moved;
1796 }
1797
1798 /*
1799  * idle_balance is called by schedule() if this_cpu is about to become
1800  * idle. Attempts to pull tasks from other CPUs.
1801  */
1802 static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
1803 {
1804         struct sched_domain *sd;
1805
1806         for_each_domain(this_cpu, sd) {
1807                 if (sd->flags & SD_BALANCE_NEWIDLE) {
1808                         if (load_balance_newidle(this_cpu, this_rq, sd)) {
1809                                 /* We've pulled tasks over so stop searching */
1810                                 break;
1811                         }
1812                 }
1813         }
1814 }
1815
1816 /*
1817  * active_load_balance is run by migration threads. It pushes a running
1818  * task off the cpu. It can be required to correctly have at least 1 task
1819  * running on each physical CPU where possible, and not have a physical /
1820  * logical imbalance.
1821  *
1822  * Called with busiest locked.
1823  */
1824 static void active_load_balance(runqueue_t *busiest, int busiest_cpu)
1825 {
1826         struct sched_domain *sd;
1827         struct sched_group *group, *busy_group;
1828         int i;
1829
1830         if (busiest->nr_running <= 1)
1831                 return;
1832
1833         for_each_domain(busiest_cpu, sd)
1834                 if (cpu_isset(busiest->push_cpu, sd->span))
1835                         break;
1836         if (!sd) {
1837                 WARN_ON(1);
1838                 return;
1839         }
1840
1841         group = sd->groups;
1842         while (!cpu_isset(busiest_cpu, group->cpumask))
1843                 group = group->next;
1844         busy_group = group;
1845
1846         group = sd->groups;
1847         do {
1848                 cpumask_t tmp;
1849                 runqueue_t *rq;
1850                 int push_cpu = 0;
1851
1852                 if (group == busy_group)
1853                         goto next_group;
1854
1855                 cpus_and(tmp, group->cpumask, cpu_online_map);
1856                 if (!cpus_weight(tmp))
1857                         goto next_group;
1858
1859                 for_each_cpu_mask(i, tmp) {
1860                         if (!idle_cpu(i))
1861                                 goto next_group;
1862                         push_cpu = i;
1863                 }
1864
1865                 rq = cpu_rq(push_cpu);
1866
1867                 /*
1868                  * This condition is "impossible", but since load
1869                  * balancing is inherently a bit racy and statistical,
1870                  * it can trigger.. Reported by Bjorn Helgaas on a
1871                  * 128-cpu setup.
1872                  */
1873                 if (unlikely(busiest == rq))
1874                         goto next_group;
1875                 double_lock_balance(busiest, rq);
1876                 move_tasks(rq, push_cpu, busiest, 1, sd, IDLE);
1877                 spin_unlock(&rq->lock);
1878 next_group:
1879                 group = group->next;
1880         } while (group != sd->groups);
1881 }
1882
1883 /*
1884  * rebalance_tick will get called every timer tick, on every CPU.
1885  *
1886  * It checks each scheduling domain to see if it is due to be balanced,
1887  * and initiates a balancing operation if so.
1888  *
1889  * Balancing parameters are set up in arch_init_sched_domains.
1890  */
1891
1892 /* Don't have all balancing operations going off at once */
1893 #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
1894
1895 static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
1896                            enum idle_type idle)
1897 {
1898         unsigned long old_load, this_load;
1899         unsigned long j = jiffies + CPU_OFFSET(this_cpu);
1900         struct sched_domain *sd;
1901
1902         /* Update our load */
1903         old_load = this_rq->cpu_load;
1904         this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
1905         /*
1906          * Round up the averaging division if load is increasing. This
1907          * prevents us from getting stuck on 9 if the load is 10, for
1908          * example.
1909          */
1910         if (this_load > old_load)
1911                 old_load++;
1912         this_rq->cpu_load = (old_load + this_load) / 2;
1913
1914         for_each_domain(this_cpu, sd) {
1915                 unsigned long interval = sd->balance_interval;
1916
1917                 if (idle != IDLE)
1918                         interval *= sd->busy_factor;
1919
1920                 /* scale ms to jiffies */
1921                 interval = msecs_to_jiffies(interval);
1922                 if (unlikely(!interval))
1923                         interval = 1;
1924
1925                 if (j - sd->last_balance >= interval) {
1926                         if (load_balance(this_cpu, this_rq, sd, idle)) {
1927                                 /* We've pulled tasks over so no longer idle */
1928                                 idle = NOT_IDLE;
1929                         }
1930                         sd->last_balance += interval;
1931                 }
1932         }
1933 }
1934 #else
1935 /*
1936  * on UP we do not need to balance between CPUs:
1937  */
1938 static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
1939 {
1940 }
1941 static inline void idle_balance(int cpu, runqueue_t *rq)
1942 {
1943 }
1944 #endif
1945
1946 static inline int wake_priority_sleeper(runqueue_t *rq)
1947 {
1948 #ifdef CONFIG_SCHED_SMT
1949         /*
1950          * If an SMT sibling task has been put to sleep for priority
1951          * reasons reschedule the idle task to see if it can now run.
1952          */
1953         if (rq->nr_running) {
1954                 resched_task(rq->idle);
1955                 return 1;
1956         }
1957 #endif
1958         return 0;
1959 }
1960
1961 DEFINE_PER_CPU(struct kernel_stat, kstat);
1962
1963 EXPORT_PER_CPU_SYMBOL(kstat);
1964
1965 /*
1966  * We place interactive tasks back into the active array, if possible.
1967  *
1968  * To guarantee that this does not starve expired tasks we ignore the
1969  * interactivity of a task if the first expired task had to wait more
1970  * than a 'reasonable' amount of time. This deadline timeout is
1971  * load-dependent, as the frequency of array switched decreases with
1972  * increasing number of running tasks. We also ignore the interactivity
1973  * if a better static_prio task has expired:
1974  */
1975 #define EXPIRED_STARVING(rq) \
1976         ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
1977                 (jiffies - (rq)->expired_timestamp >= \
1978                         STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
1979                         ((rq)->curr->static_prio > (rq)->best_expired_prio))
1980
1981 /*
1982  * This function gets called by the timer code, with HZ frequency.
1983  * We call it with interrupts disabled.
1984  *
1985  * It also gets called by the fork code, when changing the parent's
1986  * timeslices.
1987  */
1988 void scheduler_tick(int user_ticks, int sys_ticks)
1989 {
1990         int cpu = smp_processor_id();
1991         struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1992         runqueue_t *rq = this_rq();
1993         task_t *p = current;
1994
1995         rq->timestamp_last_tick = sched_clock();
1996
1997         if (rcu_pending(cpu))
1998                 rcu_check_callbacks(cpu, user_ticks);
1999
2000         /* note: this timer irq context must be accounted for as well */
2001         if (hardirq_count() - HARDIRQ_OFFSET) {
2002                 cpustat->irq += sys_ticks;
2003                 sys_ticks = 0;
2004         } else if (softirq_count()) {
2005                 cpustat->softirq += sys_ticks;
2006                 sys_ticks = 0;
2007         }
2008
2009         if (p == rq->idle) {
2010                 if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
2011                         set_need_resched();     
2012
2013                 if (atomic_read(&rq->nr_iowait) > 0)
2014                         cpustat->iowait += sys_ticks;
2015                 else
2016                         cpustat->idle += sys_ticks;
2017                 if (wake_priority_sleeper(rq))
2018                         goto out;
2019                 rebalance_tick(cpu, rq, IDLE);
2020                 return;
2021         }
2022         if (TASK_NICE(p) > 0)
2023                 cpustat->nice += user_ticks;
2024         else
2025                 cpustat->user += user_ticks;
2026         cpustat->system += sys_ticks;
2027
2028         /* Task might have expired already, but not scheduled off yet */
2029         if (p->array != rq->active) {
2030                 set_tsk_need_resched(p);
2031                 goto out;
2032         }
2033         spin_lock(&rq->lock);
2034         /*
2035          * The task was running during this tick - update the
2036          * time slice counter. Note: we do not update a thread's
2037          * priority until it either goes to sleep or uses up its
2038          * timeslice. This makes it possible for interactive tasks
2039          * to use up their timeslices at their highest priority levels.
2040          */
2041         if (unlikely(rt_task(p))) {
2042                 /*
2043                  * RR tasks need a special form of timeslice management.
2044                  * FIFO tasks have no timeslices.
2045                  */
2046                 if ((p->policy == SCHED_RR) && !--p->time_slice) {
2047                         p->time_slice = task_timeslice(p);
2048                         p->first_time_slice = 0;
2049                         set_tsk_need_resched(p);
2050
2051                         /* put it at the end of the queue: */
2052                         dequeue_task(p, rq->active);
2053                         enqueue_task(p, rq->active);
2054                 }
2055                 goto out_unlock;
2056         }
2057         if (vx_need_resched(p)) {
2058                 dequeue_task(p, rq->active);
2059                 set_tsk_need_resched(p);
2060                 p->prio = effective_prio(p);
2061                 p->time_slice = task_timeslice(p);
2062                 p->first_time_slice = 0;
2063
2064                 if (!rq->expired_timestamp)
2065                         rq->expired_timestamp = jiffies;
2066                 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
2067                         enqueue_task(p, rq->expired);
2068                         if (p->static_prio < rq->best_expired_prio)
2069                                 rq->best_expired_prio = p->static_prio;
2070                 } else
2071                         enqueue_task(p, rq->active);
2072         } else {
2073                 /*
2074                  * Prevent a too long timeslice allowing a task to monopolize
2075                  * the CPU. We do this by splitting up the timeslice into
2076                  * smaller pieces.
2077                  *
2078                  * Note: this does not mean the task's timeslices expire or
2079                  * get lost in any way, they just might be preempted by
2080                  * another task of equal priority. (one with higher
2081                  * priority would have preempted this task already.) We
2082                  * requeue this task to the end of the list on this priority
2083                  * level, which is in essence a round-robin of tasks with
2084                  * equal priority.
2085                  *
2086                  * This only applies to tasks in the interactive
2087                  * delta range with at least TIMESLICE_GRANULARITY to requeue.
2088                  */
2089                 if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
2090                         p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
2091                         (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
2092                         (p->array == rq->active)) {
2093
2094                         dequeue_task(p, rq->active);
2095                         set_tsk_need_resched(p);
2096                         p->prio = effective_prio(p);
2097                         enqueue_task(p, rq->active);
2098                 }
2099         }
2100 out_unlock:
2101         spin_unlock(&rq->lock);
2102 out:
2103         rebalance_tick(cpu, rq, NOT_IDLE);
2104 }
2105
2106 #ifdef CONFIG_SCHED_SMT
2107 static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq)
2108 {
2109         int i;
2110         struct sched_domain *sd = rq->sd;
2111         cpumask_t sibling_map;
2112
2113         if (!(sd->flags & SD_SHARE_CPUPOWER))
2114                 return;
2115
2116         cpus_and(sibling_map, sd->span, cpu_online_map);
2117         for_each_cpu_mask(i, sibling_map) {
2118                 runqueue_t *smt_rq;
2119
2120                 if (i == cpu)
2121                         continue;
2122
2123                 smt_rq = cpu_rq(i);
2124
2125                 /*
2126                  * If an SMT sibling task is sleeping due to priority
2127                  * reasons wake it up now.
2128                  */
2129                 if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
2130                         resched_task(smt_rq->idle);
2131         }
2132 }
2133
2134 static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p)
2135 {
2136         struct sched_domain *sd = rq->sd;
2137         cpumask_t sibling_map;
2138         int ret = 0, i;
2139
2140         if (!(sd->flags & SD_SHARE_CPUPOWER))
2141                 return 0;
2142
2143         cpus_and(sibling_map, sd->span, cpu_online_map);
2144         for_each_cpu_mask(i, sibling_map) {
2145                 runqueue_t *smt_rq;
2146                 task_t *smt_curr;
2147
2148                 if (i == cpu)
2149                         continue;
2150
2151                 smt_rq = cpu_rq(i);
2152                 smt_curr = smt_rq->curr;
2153
2154                 /*
2155                  * If a user task with lower static priority than the
2156                  * running task on the SMT sibling is trying to schedule,
2157                  * delay it till there is proportionately less timeslice
2158                  * left of the sibling task to prevent a lower priority
2159                  * task from using an unfair proportion of the
2160                  * physical cpu's resources. -ck
2161                  */
2162                 if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
2163                         task_timeslice(p) || rt_task(smt_curr)) &&
2164                         p->mm && smt_curr->mm && !rt_task(p))
2165                                 ret = 1;
2166
2167                 /*
2168                  * Reschedule a lower priority task on the SMT sibling,
2169                  * or wake it up if it has been put to sleep for priority
2170                  * reasons.
2171                  */
2172                 if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
2173                         task_timeslice(smt_curr) || rt_task(p)) &&
2174                         smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
2175                         (smt_curr == smt_rq->idle && smt_rq->nr_running))
2176                                 resched_task(smt_curr);
2177         }
2178         return ret;
2179 }
2180 #else
2181 static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq)
2182 {
2183 }
2184
2185 static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p)
2186 {
2187         return 0;
2188 }
2189 #endif
2190
2191 /*
2192  * schedule() is the main scheduler function.
2193  */
2194 asmlinkage void __sched schedule(void)
2195 {
2196         long *switch_count;
2197         task_t *prev, *next;
2198         runqueue_t *rq;
2199         prio_array_t *array;
2200         struct list_head *queue;
2201         unsigned long long now;
2202         unsigned long run_time;
2203 #ifdef  CONFIG_VSERVER_HARDCPU          
2204         struct vx_info *vxi;
2205         int maxidle = -HZ;
2206 #endif
2207         int cpu, idx;
2208
2209         /*
2210          * Test if we are atomic.  Since do_exit() needs to call into
2211          * schedule() atomically, we ignore that path for now.
2212          * Otherwise, whine if we are scheduling when we should not be.
2213          */
2214         if (likely(!(current->state & (TASK_DEAD | TASK_ZOMBIE)))) {
2215                 if (unlikely(in_atomic())) {
2216                         printk(KERN_ERR "bad: scheduling while atomic!\n");
2217                         dump_stack();
2218                 }
2219         }
2220
2221 need_resched:
2222         preempt_disable();
2223         prev = current;
2224         rq = this_rq();
2225
2226         release_kernel_lock(prev);
2227         now = sched_clock();
2228         if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
2229                 run_time = now - prev->timestamp;
2230         else
2231                 run_time = NS_MAX_SLEEP_AVG;
2232
2233         /*
2234          * Tasks with interactive credits get charged less run_time
2235          * at high sleep_avg to delay them losing their interactive
2236          * status
2237          */
2238         if (HIGH_CREDIT(prev))
2239                 run_time /= (CURRENT_BONUS(prev) ? : 1);
2240
2241         spin_lock_irq(&rq->lock);
2242
2243         /*
2244          * if entering off of a kernel preemption go straight
2245          * to picking the next task.
2246          */
2247         switch_count = &prev->nivcsw;
2248         if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2249                 switch_count = &prev->nvcsw;
2250                 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
2251                                 unlikely(signal_pending(prev))))
2252                         prev->state = TASK_RUNNING;
2253                 else
2254                         deactivate_task(prev, rq);
2255         }
2256
2257 #ifdef  CONFIG_VSERVER_HARDCPU          
2258         if (!list_empty(&rq->hold_queue)) {
2259                 struct list_head *l, *n;
2260                 int ret;
2261
2262                 vxi = NULL;
2263                 list_for_each_safe(l, n, &rq->hold_queue) {
2264                         next = list_entry(l, task_t, run_list);
2265                         if (vxi == next->vx_info)
2266                                 continue;
2267
2268                         vxi = next->vx_info;
2269                         ret = vx_tokens_recalc(vxi);
2270                         // tokens = vx_tokens_avail(next);
2271
2272                         if (ret > 0) {
2273                                 list_del(&next->run_list);
2274                                 next->state &= ~TASK_ONHOLD;
2275                                 recalc_task_prio(next, now);
2276                                 __activate_task(next, rq);
2277                                 // printk("··· unhold %p\n", next);
2278                                 break;
2279                         }
2280                         if ((ret < 0) && (maxidle < ret))
2281                                 maxidle = ret;
2282                 }       
2283         }
2284         rq->idle_tokens = -maxidle;
2285
2286 pick_next:
2287 #endif
2288         cpu = smp_processor_id();
2289         if (unlikely(!rq->nr_running)) {
2290                 idle_balance(cpu, rq);
2291                 if (!rq->nr_running) {
2292                         next = rq->idle;
2293                         rq->expired_timestamp = 0;
2294                         wake_sleeping_dependent(cpu, rq);
2295                         goto switch_tasks;
2296                 }
2297         }
2298
2299         array = rq->active;
2300         if (unlikely(!array->nr_active)) {
2301                 /*
2302                  * Switch the active and expired arrays.
2303                  */
2304                 rq->active = rq->expired;
2305                 rq->expired = array;
2306                 array = rq->active;
2307                 rq->expired_timestamp = 0;
2308                 rq->best_expired_prio = MAX_PRIO;
2309         }
2310
2311         idx = sched_find_first_bit(array->bitmap);
2312         queue = array->queue + idx;
2313         next = list_entry(queue->next, task_t, run_list);
2314
2315         if (dependent_sleeper(cpu, rq, next)) {
2316                 next = rq->idle;
2317                 goto switch_tasks;
2318         }
2319
2320 #ifdef  CONFIG_VSERVER_HARDCPU          
2321         vxi = next->vx_info;
2322         if (vxi && __vx_flags(vxi->vx_flags,
2323                 VXF_SCHED_PAUSE|VXF_SCHED_HARD, 0)) {
2324                 int ret = vx_tokens_recalc(vxi);
2325
2326                 if (unlikely(ret <= 0)) {
2327                         if (ret && (rq->idle_tokens > -ret))
2328                                 rq->idle_tokens = -ret;
2329                         deactivate_task(next, rq);
2330                         list_add_tail(&next->run_list, &rq->hold_queue);
2331                         next->state |= TASK_ONHOLD;                     
2332                         goto pick_next;
2333                 }
2334         }
2335 #endif
2336
2337         if (!rt_task(next) && next->activated > 0) {
2338                 unsigned long long delta = now - next->timestamp;
2339
2340                 if (next->activated == 1)
2341                         delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
2342
2343                 array = next->array;
2344                 dequeue_task(next, array);
2345                 recalc_task_prio(next, next->timestamp + delta);
2346                 enqueue_task(next, array);
2347         }
2348         next->activated = 0;
2349 switch_tasks:
2350         prefetch(next);
2351         clear_tsk_need_resched(prev);
2352         RCU_qsctr(task_cpu(prev))++;
2353
2354         prev->sleep_avg -= run_time;
2355         if ((long)prev->sleep_avg <= 0) {
2356                 prev->sleep_avg = 0;
2357                 if (!(HIGH_CREDIT(prev) || LOW_CREDIT(prev)))
2358                         prev->interactive_credit--;
2359         }
2360         prev->timestamp = now;
2361
2362         if (likely(prev != next)) {
2363                 next->timestamp = now;
2364                 rq->nr_switches++;
2365                 rq->curr = next;
2366                 ++*switch_count;
2367
2368                 prepare_arch_switch(rq, next);
2369                 prev = context_switch(rq, prev, next);
2370                 barrier();
2371
2372                 finish_task_switch(prev);
2373         } else
2374                 spin_unlock_irq(&rq->lock);
2375
2376         reacquire_kernel_lock(current);
2377         preempt_enable_no_resched();
2378         if (test_thread_flag(TIF_NEED_RESCHED))
2379                 goto need_resched;
2380 }
2381
2382 EXPORT_SYMBOL(schedule);
2383
2384 #ifdef CONFIG_PREEMPT
2385 /*
2386  * this is is the entry point to schedule() from in-kernel preemption
2387  * off of preempt_enable.  Kernel preemptions off return from interrupt
2388  * occur there and call schedule directly.
2389  */
2390 asmlinkage void __sched preempt_schedule(void)
2391 {
2392         struct thread_info *ti = current_thread_info();
2393
2394         /*
2395          * If there is a non-zero preempt_count or interrupts are disabled,
2396          * we do not want to preempt the current task.  Just return..
2397          */
2398         if (unlikely(ti->preempt_count || irqs_disabled()))
2399                 return;
2400
2401 need_resched:
2402         ti->preempt_count = PREEMPT_ACTIVE;
2403         schedule();
2404         ti->preempt_count = 0;
2405
2406         /* we could miss a preemption opportunity between schedule and now */
2407         barrier();
2408         if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2409                 goto need_resched;
2410 }
2411
2412 EXPORT_SYMBOL(preempt_schedule);
2413 #endif /* CONFIG_PREEMPT */
2414
2415 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
2416 {
2417         task_t *p = curr->task;
2418         return try_to_wake_up(p, mode, sync);
2419 }
2420
2421 EXPORT_SYMBOL(default_wake_function);
2422
2423 /*
2424  * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
2425  * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
2426  * number) then we wake all the non-exclusive tasks and one exclusive task.
2427  *
2428  * There are circumstances in which we can try to wake a task which has already
2429  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
2430  * zero in this (rare) case, and we handle it by continuing to scan the queue.
2431  */
2432 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
2433                              int nr_exclusive, int sync, void *key)
2434 {
2435         struct list_head *tmp, *next;
2436
2437         list_for_each_safe(tmp, next, &q->task_list) {
2438                 wait_queue_t *curr;
2439                 unsigned flags;
2440                 curr = list_entry(tmp, wait_queue_t, task_list);
2441                 flags = curr->flags;
2442                 if (curr->func(curr, mode, sync, key) &&
2443                     (flags & WQ_FLAG_EXCLUSIVE) &&
2444                     !--nr_exclusive)
2445                         break;
2446         }
2447 }
2448
2449 /**
2450  * __wake_up - wake up threads blocked on a waitqueue.
2451  * @q: the waitqueue
2452  * @mode: which threads
2453  * @nr_exclusive: how many wake-one or wake-many threads to wake up
2454  */
2455 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
2456                                 int nr_exclusive, void *key)
2457 {
2458         unsigned long flags;
2459
2460         spin_lock_irqsave(&q->lock, flags);
2461         __wake_up_common(q, mode, nr_exclusive, 0, key);
2462         spin_unlock_irqrestore(&q->lock, flags);
2463 }
2464
2465 EXPORT_SYMBOL(__wake_up);
2466
2467 /*
2468  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2469  */
2470 void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
2471 {
2472         __wake_up_common(q, mode, 1, 0, NULL);
2473 }
2474
2475 /**
2476  * __wake_up - sync- wake up threads blocked on a waitqueue.
2477  * @q: the waitqueue
2478  * @mode: which threads
2479  * @nr_exclusive: how many wake-one or wake-many threads to wake up
2480  *
2481  * The sync wakeup differs that the waker knows that it will schedule
2482  * away soon, so while the target thread will be woken up, it will not
2483  * be migrated to another CPU - ie. the two threads are 'synchronized'
2484  * with each other. This can prevent needless bouncing between CPUs.
2485  *
2486  * On UP it can prevent extra preemption.
2487  */
2488 void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2489 {
2490         unsigned long flags;
2491         int sync = 1;
2492
2493         if (unlikely(!q))
2494                 return;
2495
2496         if (unlikely(!nr_exclusive))
2497                 sync = 0;
2498
2499         spin_lock_irqsave(&q->lock, flags);
2500         __wake_up_common(q, mode, nr_exclusive, sync, NULL);
2501         spin_unlock_irqrestore(&q->lock, flags);
2502 }
2503 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
2504
2505 void fastcall complete(struct completion *x)
2506 {
2507         unsigned long flags;
2508
2509         spin_lock_irqsave(&x->wait.lock, flags);
2510         x->done++;
2511         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2512                          1, 0, NULL);
2513         spin_unlock_irqrestore(&x->wait.lock, flags);
2514 }
2515 EXPORT_SYMBOL(complete);
2516
2517 void fastcall complete_all(struct completion *x)
2518 {
2519         unsigned long flags;
2520
2521         spin_lock_irqsave(&x->wait.lock, flags);
2522         x->done += UINT_MAX/2;
2523         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2524                          0, 0, NULL);
2525         spin_unlock_irqrestore(&x->wait.lock, flags);
2526 }
2527 EXPORT_SYMBOL(complete_all);
2528
2529 void fastcall __sched wait_for_completion(struct completion *x)
2530 {
2531         might_sleep();
2532         spin_lock_irq(&x->wait.lock);
2533         if (!x->done) {
2534                 DECLARE_WAITQUEUE(wait, current);
2535
2536                 wait.flags |= WQ_FLAG_EXCLUSIVE;
2537                 __add_wait_queue_tail(&x->wait, &wait);
2538                 do {
2539                         __set_current_state(TASK_UNINTERRUPTIBLE);
2540                         spin_unlock_irq(&x->wait.lock);
2541                         schedule();
2542                         spin_lock_irq(&x->wait.lock);
2543                 } while (!x->done);
2544                 __remove_wait_queue(&x->wait, &wait);
2545         }
2546         x->done--;
2547         spin_unlock_irq(&x->wait.lock);
2548 }
2549 EXPORT_SYMBOL(wait_for_completion);
2550
2551 #define SLEEP_ON_VAR                                    \
2552         unsigned long flags;                            \
2553         wait_queue_t wait;                              \
2554         init_waitqueue_entry(&wait, current);
2555
2556 #define SLEEP_ON_HEAD                                   \
2557         spin_lock_irqsave(&q->lock,flags);              \
2558         __add_wait_queue(q, &wait);                     \
2559         spin_unlock(&q->lock);
2560
2561 #define SLEEP_ON_TAIL                                   \
2562         spin_lock_irq(&q->lock);                        \
2563         __remove_wait_queue(q, &wait);                  \
2564         spin_unlock_irqrestore(&q->lock, flags);
2565
2566 void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
2567 {
2568         SLEEP_ON_VAR
2569
2570         current->state = TASK_INTERRUPTIBLE;
2571
2572         SLEEP_ON_HEAD
2573         schedule();
2574         SLEEP_ON_TAIL
2575 }
2576
2577 EXPORT_SYMBOL(interruptible_sleep_on);
2578
2579 long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
2580 {
2581         SLEEP_ON_VAR
2582
2583         current->state = TASK_INTERRUPTIBLE;
2584
2585         SLEEP_ON_HEAD
2586         timeout = schedule_timeout(timeout);
2587         SLEEP_ON_TAIL
2588
2589         return timeout;
2590 }
2591
2592 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2593
2594 void fastcall __sched sleep_on(wait_queue_head_t *q)
2595 {
2596         SLEEP_ON_VAR
2597
2598         current->state = TASK_UNINTERRUPTIBLE;
2599
2600         SLEEP_ON_HEAD
2601         schedule();
2602         SLEEP_ON_TAIL
2603 }
2604
2605 EXPORT_SYMBOL(sleep_on);
2606
2607 long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
2608 {
2609         SLEEP_ON_VAR
2610
2611         current->state = TASK_UNINTERRUPTIBLE;
2612
2613         SLEEP_ON_HEAD
2614         timeout = schedule_timeout(timeout);
2615         SLEEP_ON_TAIL
2616
2617         return timeout;
2618 }
2619
2620 EXPORT_SYMBOL(sleep_on_timeout);
2621
2622 void set_user_nice(task_t *p, long nice)
2623 {
2624         unsigned long flags;
2625         prio_array_t *array;
2626         runqueue_t *rq;
2627         int old_prio, new_prio, delta;
2628
2629         if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
2630                 return;
2631         /*
2632          * We have to be careful, if called from sys_setpriority(),
2633          * the task might be in the middle of scheduling on another CPU.
2634          */
2635         rq = task_rq_lock(p, &flags);
2636         /*
2637          * The RT priorities are set via setscheduler(), but we still
2638          * allow the 'normal' nice value to be set - but as expected
2639          * it wont have any effect on scheduling until the task is
2640          * not SCHED_NORMAL:
2641          */
2642         if (rt_task(p)) {
2643                 p->static_prio = NICE_TO_PRIO(nice);
2644                 goto out_unlock;
2645         }
2646         array = p->array;
2647         if (array)
2648                 dequeue_task(p, array);
2649
2650         old_prio = p->prio;
2651         new_prio = NICE_TO_PRIO(nice);
2652         delta = new_prio - old_prio;
2653         p->static_prio = NICE_TO_PRIO(nice);
2654         p->prio += delta;
2655
2656         if (array) {
2657                 enqueue_task(p, array);
2658                 /*
2659                  * If the task increased its priority or is running and
2660                  * lowered its priority, then reschedule its CPU:
2661                  */
2662                 if (delta < 0 || (delta > 0 && task_running(rq, p)))
2663                         resched_task(rq->curr);
2664         }
2665 out_unlock:
2666         task_rq_unlock(rq, &flags);
2667 }
2668
2669 EXPORT_SYMBOL(set_user_nice);
2670
2671 #ifdef __ARCH_WANT_SYS_NICE
2672
2673 /*
2674  * sys_nice - change the priority of the current process.
2675  * @increment: priority increment
2676  *
2677  * sys_setpriority is a more generic, but much slower function that
2678  * does similar things.
2679  */
2680 asmlinkage long sys_nice(int increment)
2681 {
2682         int retval;
2683         long nice;
2684
2685         /*
2686          * Setpriority might change our priority at the same moment.
2687          * We don't have to worry. Conceptually one call occurs first
2688          * and we have a single winner.
2689          */
2690         if (increment < 0) {
2691                 if (!capable(CAP_SYS_NICE))
2692                         return -EPERM;
2693                 if (increment < -40)
2694                         increment = -40;
2695         }
2696         if (increment > 40)
2697                 increment = 40;
2698
2699         nice = PRIO_TO_NICE(current->static_prio) + increment;
2700         if (nice < -20)
2701                 nice = -20;
2702         if (nice > 19)
2703                 nice = 19;
2704
2705         retval = security_task_setnice(current, nice);
2706         if (retval)
2707                 return retval;
2708
2709         set_user_nice(current, nice);
2710         return 0;
2711 }
2712
2713 #endif
2714
2715 /**
2716  * task_prio - return the priority value of a given task.
2717  * @p: the task in question.
2718  *
2719  * This is the priority value as seen by users in /proc.
2720  * RT tasks are offset by -200. Normal tasks are centered
2721  * around 0, value goes from -16 to +15.
2722  */
2723 int task_prio(task_t *p)
2724 {
2725         return p->prio - MAX_RT_PRIO;
2726 }
2727
2728 /**
2729  * task_nice - return the nice value of a given task.
2730  * @p: the task in question.
2731  */
2732 int task_nice(task_t *p)
2733 {
2734         return TASK_NICE(p);
2735 }
2736
2737 EXPORT_SYMBOL(task_nice);
2738
2739 /**
2740  * idle_cpu - is a given cpu idle currently?
2741  * @cpu: the processor in question.
2742  */
2743 int idle_cpu(int cpu)
2744 {
2745         return cpu_curr(cpu) == cpu_rq(cpu)->idle;
2746 }
2747
2748 EXPORT_SYMBOL_GPL(idle_cpu);
2749
2750 /**
2751  * find_process_by_pid - find a process with a matching PID value.
2752  * @pid: the pid in question.
2753  */
2754 static inline task_t *find_process_by_pid(pid_t pid)
2755 {
2756         return pid ? find_task_by_pid(pid) : current;
2757 }
2758
2759 /* Actually do priority change: must hold rq lock. */
2760 static void __setscheduler(struct task_struct *p, int policy, int prio)
2761 {
2762         BUG_ON(p->array);
2763         p->policy = policy;
2764         p->rt_priority = prio;
2765         if (policy != SCHED_NORMAL)
2766                 p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
2767         else
2768                 p->prio = p->static_prio;
2769 }
2770
2771 /*
2772  * setscheduler - change the scheduling policy and/or RT priority of a thread.
2773  */
2774 static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
2775 {
2776         struct sched_param lp;
2777         int retval = -EINVAL;
2778         int oldprio;
2779         prio_array_t *array;
2780         unsigned long flags;
2781         runqueue_t *rq;
2782         task_t *p;
2783
2784         if (!param || pid < 0)
2785                 goto out_nounlock;
2786
2787         retval = -EFAULT;
2788         if (copy_from_user(&lp, param, sizeof(struct sched_param)))
2789                 goto out_nounlock;
2790
2791         /*
2792          * We play safe to avoid deadlocks.
2793          */
2794         read_lock_irq(&tasklist_lock);
2795
2796         p = find_process_by_pid(pid);
2797
2798         retval = -ESRCH;
2799         if (!p)
2800                 goto out_unlock_tasklist;
2801
2802         /*
2803          * To be able to change p->policy safely, the apropriate
2804          * runqueue lock must be held.
2805          */
2806         rq = task_rq_lock(p, &flags);
2807
2808         if (policy < 0)
2809                 policy = p->policy;
2810         else {
2811                 retval = -EINVAL;
2812                 if (policy != SCHED_FIFO && policy != SCHED_RR &&
2813                                 policy != SCHED_NORMAL)
2814                         goto out_unlock;
2815         }
2816
2817         /*
2818          * Valid priorities for SCHED_FIFO and SCHED_RR are
2819          * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
2820          */
2821         retval = -EINVAL;
2822         if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
2823                 goto out_unlock;
2824         if ((policy == SCHED_NORMAL) != (lp.sched_priority == 0))
2825                 goto out_unlock;
2826
2827         retval = -EPERM;
2828         if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
2829             !capable(CAP_SYS_NICE))
2830                 goto out_unlock;
2831         if ((current->euid != p->euid) && (current->euid != p->uid) &&
2832             !capable(CAP_SYS_NICE))
2833                 goto out_unlock;
2834
2835         retval = security_task_setscheduler(p, policy, &lp);
2836         if (retval)
2837                 goto out_unlock;
2838
2839         array = p->array;
2840         if (array)
2841                 deactivate_task(p, task_rq(p));
2842         retval = 0;
2843         oldprio = p->prio;
2844         __setscheduler(p, policy, lp.sched_priority);
2845         if (array) {
2846                 __activate_task(p, task_rq(p));
2847                 /*
2848                  * Reschedule if we are currently running on this runqueue and
2849                  * our priority decreased, or if we are not currently running on
2850                  * this runqueue and our priority is higher than the current's
2851                  */
2852                 if (task_running(rq, p)) {
2853                         if (p->prio > oldprio)
2854                                 resched_task(rq->curr);
2855                 } else if (TASK_PREEMPTS_CURR(p, rq))
2856                         resched_task(rq->curr);
2857         }
2858
2859 out_unlock:
2860         task_rq_unlock(rq, &flags);
2861 out_unlock_tasklist:
2862         read_unlock_irq(&tasklist_lock);
2863
2864 out_nounlock:
2865         return retval;
2866 }
2867
2868 /**
2869  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
2870  * @pid: the pid in question.
2871  * @policy: new policy
2872  * @param: structure containing the new RT priority.
2873  */
2874 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
2875                                        struct sched_param __user *param)
2876 {
2877         return setscheduler(pid, policy, param);
2878 }
2879
2880 /**
2881  * sys_sched_setparam - set/change the RT priority of a thread
2882  * @pid: the pid in question.
2883  * @param: structure containing the new RT priority.
2884  */
2885 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
2886 {
2887         return setscheduler(pid, -1, param);
2888 }
2889
2890 /**
2891  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
2892  * @pid: the pid in question.
2893  */
2894 asmlinkage long sys_sched_getscheduler(pid_t pid)
2895 {
2896         int retval = -EINVAL;
2897         task_t *p;
2898
2899         if (pid < 0)
2900                 goto out_nounlock;
2901
2902         retval = -ESRCH;
2903         read_lock(&tasklist_lock);
2904         p = find_process_by_pid(pid);
2905         if (p) {
2906                 retval = security_task_getscheduler(p);
2907                 if (!retval)
2908                         retval = p->policy;
2909         }
2910         read_unlock(&tasklist_lock);
2911
2912 out_nounlock:
2913         return retval;
2914 }
2915
2916 /**
2917  * sys_sched_getscheduler - get the RT priority of a thread
2918  * @pid: the pid in question.
2919  * @param: structure containing the RT priority.
2920  */
2921 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
2922 {
2923         struct sched_param lp;
2924         int retval = -EINVAL;
2925         task_t *p;
2926
2927         if (!param || pid < 0)
2928                 goto out_nounlock;
2929
2930         read_lock(&tasklist_lock);
2931         p = find_process_by_pid(pid);
2932         retval = -ESRCH;
2933         if (!p)
2934                 goto out_unlock;
2935
2936         retval = security_task_getscheduler(p);
2937         if (retval)
2938                 goto out_unlock;
2939
2940         lp.sched_priority = p->rt_priority;
2941         read_unlock(&tasklist_lock);
2942
2943         /*
2944          * This one might sleep, we cannot do it with a spinlock held ...
2945          */
2946         retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
2947
2948 out_nounlock:
2949         return retval;
2950
2951 out_unlock:
2952         read_unlock(&tasklist_lock);
2953         return retval;
2954 }
2955
2956 /**
2957  * sys_sched_setaffinity - set the cpu affinity of a process
2958  * @pid: pid of the process
2959  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
2960  * @user_mask_ptr: user-space pointer to the new cpu mask
2961  */
2962 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
2963                                       unsigned long __user *user_mask_ptr)
2964 {
2965         cpumask_t new_mask;
2966         int retval;
2967         task_t *p;
2968
2969         if (len < sizeof(new_mask))
2970                 return -EINVAL;
2971
2972         if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
2973                 return -EFAULT;
2974
2975         lock_cpu_hotplug();
2976         read_lock(&tasklist_lock);
2977
2978         p = find_process_by_pid(pid);
2979         if (!p) {
2980                 read_unlock(&tasklist_lock);
2981                 unlock_cpu_hotplug();
2982                 return -ESRCH;
2983         }
2984
2985         /*
2986          * It is not safe to call set_cpus_allowed with the
2987          * tasklist_lock held.  We will bump the task_struct's
2988          * usage count and then drop tasklist_lock.
2989          */
2990         get_task_struct(p);
2991         read_unlock(&tasklist_lock);
2992
2993         retval = -EPERM;
2994         if ((current->euid != p->euid) && (current->euid != p->uid) &&
2995                         !capable(CAP_SYS_NICE))
2996                 goto out_unlock;
2997
2998         retval = set_cpus_allowed(p, new_mask);
2999
3000 out_unlock:
3001         put_task_struct(p);
3002         unlock_cpu_hotplug();
3003         return retval;
3004 }
3005
3006 /**
3007  * sys_sched_getaffinity - get the cpu affinity of a process
3008  * @pid: pid of the process
3009  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3010  * @user_mask_ptr: user-space pointer to hold the current cpu mask
3011  */
3012 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
3013                                       unsigned long __user *user_mask_ptr)
3014 {
3015         unsigned int real_len;
3016         cpumask_t mask;
3017         int retval;
3018         task_t *p;
3019
3020         real_len = sizeof(mask);
3021         if (len < real_len)
3022                 return -EINVAL;
3023
3024         lock_cpu_hotplug();
3025         read_lock(&tasklist_lock);
3026
3027         retval = -ESRCH;
3028         p = find_process_by_pid(pid);
3029         if (!p)
3030                 goto out_unlock;
3031
3032         retval = 0;
3033         cpus_and(mask, p->cpus_allowed, cpu_possible_map);
3034
3035 out_unlock:
3036         read_unlock(&tasklist_lock);
3037         unlock_cpu_hotplug();
3038         if (retval)
3039                 return retval;
3040         if (copy_to_user(user_mask_ptr, &mask, real_len))
3041                 return -EFAULT;
3042         return real_len;
3043 }
3044
3045 /**
3046  * sys_sched_yield - yield the current processor to other threads.
3047  *
3048  * this function yields the current CPU by moving the calling thread
3049  * to the expired array. If there are no other threads running on this
3050  * CPU then this function will return.
3051  */
3052 asmlinkage long sys_sched_yield(void)
3053 {
3054         runqueue_t *rq = this_rq_lock();
3055         prio_array_t *array = current->array;
3056         prio_array_t *target = rq->expired;
3057
3058         /*
3059          * We implement yielding by moving the task into the expired
3060          * queue.
3061          *
3062          * (special rule: RT tasks will just roundrobin in the active
3063          *  array.)
3064          */
3065         if (unlikely(rt_task(current)))
3066                 target = rq->active;
3067
3068         dequeue_task(current, array);
3069         enqueue_task(current, target);
3070
3071         /*
3072          * Since we are going to call schedule() anyway, there's
3073          * no need to preempt or enable interrupts:
3074          */
3075         _raw_spin_unlock(&rq->lock);
3076         preempt_enable_no_resched();
3077
3078         schedule();
3079
3080         return 0;
3081 }
3082
3083 void __sched __cond_resched(void)
3084 {
3085         set_current_state(TASK_RUNNING);
3086         schedule();
3087 }
3088
3089 EXPORT_SYMBOL(__cond_resched);
3090
3091 /**
3092  * yield - yield the current processor to other threads.
3093  *
3094  * this is a shortcut for kernel-space yielding - it marks the
3095  * thread runnable and calls sys_sched_yield().
3096  */
3097 void __sched yield(void)
3098 {
3099         set_current_state(TASK_RUNNING);
3100         sys_sched_yield();
3101 }
3102
3103 EXPORT_SYMBOL(yield);
3104
3105 /*
3106  * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
3107  * that process accounting knows that this is a task in IO wait state.
3108  *
3109  * But don't do that if it is a deliberate, throttling IO wait (this task
3110  * has set its backing_dev_info: the queue against which it should throttle)
3111  */
3112 void __sched io_schedule(void)
3113 {
3114         struct runqueue *rq = this_rq();
3115
3116         atomic_inc(&rq->nr_iowait);
3117         schedule();
3118         atomic_dec(&rq->nr_iowait);
3119 }
3120
3121 EXPORT_SYMBOL(io_schedule);
3122
3123 long __sched io_schedule_timeout(long timeout)
3124 {
3125         struct runqueue *rq = this_rq();
3126         long ret;
3127
3128         atomic_inc(&rq->nr_iowait);
3129         ret = schedule_timeout(timeout);
3130         atomic_dec(&rq->nr_iowait);
3131         return ret;
3132 }
3133
3134 /**
3135  * sys_sched_get_priority_max - return maximum RT priority.
3136  * @policy: scheduling class.
3137  *
3138  * this syscall returns the maximum rt_priority that can be used
3139  * by a given scheduling class.
3140  */
3141 asmlinkage long sys_sched_get_priority_max(int policy)
3142 {
3143         int ret = -EINVAL;
3144
3145         switch (policy) {
3146         case SCHED_FIFO:
3147         case SCHED_RR:
3148                 ret = MAX_USER_RT_PRIO-1;
3149                 break;
3150         case SCHED_NORMAL:
3151                 ret = 0;
3152                 break;
3153         }
3154         return ret;
3155 }
3156
3157 /**
3158  * sys_sched_get_priority_min - return minimum RT priority.
3159  * @policy: scheduling class.
3160  *
3161  * this syscall returns the minimum rt_priority that can be used
3162  * by a given scheduling class.
3163  */
3164 asmlinkage long sys_sched_get_priority_min(int policy)
3165 {
3166         int ret = -EINVAL;
3167
3168         switch (policy) {
3169         case SCHED_FIFO:
3170         case SCHED_RR:
3171                 ret = 1;
3172                 break;
3173         case SCHED_NORMAL:
3174                 ret = 0;
3175         }
3176         return ret;
3177 }
3178
3179 /**
3180  * sys_sched_rr_get_interval - return the default timeslice of a process.
3181  * @pid: pid of the process.
3182  * @interval: userspace pointer to the timeslice value.
3183  *
3184  * this syscall writes the default timeslice value of a given process
3185  * into the user-space timespec buffer. A value of '0' means infinity.
3186  */
3187 asmlinkage
3188 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
3189 {
3190         int retval = -EINVAL;
3191         struct timespec t;
3192         task_t *p;
3193
3194         if (pid < 0)
3195                 goto out_nounlock;
3196
3197         retval = -ESRCH;
3198         read_lock(&tasklist_lock);
3199         p = find_process_by_pid(pid);
3200         if (!p)
3201                 goto out_unlock;
3202
3203         retval = security_task_getscheduler(p);
3204         if (retval)
3205                 goto out_unlock;
3206
3207         jiffies_to_timespec(p->policy & SCHED_FIFO ?
3208                                 0 : task_timeslice(p), &t);
3209         read_unlock(&tasklist_lock);
3210         retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
3211 out_nounlock:
3212         return retval;
3213 out_unlock:
3214         read_unlock(&tasklist_lock);
3215         return retval;
3216 }
3217
3218 static inline struct task_struct *eldest_child(struct task_struct *p)
3219 {
3220         if (list_empty(&p->children)) return NULL;
3221         return list_entry(p->children.next,struct task_struct,sibling);
3222 }
3223
3224 static inline struct task_struct *older_sibling(struct task_struct *p)
3225 {
3226         if (p->sibling.prev==&p->parent->children) return NULL;
3227         return list_entry(p->sibling.prev,struct task_struct,sibling);
3228 }
3229
3230 static inline struct task_struct *younger_sibling(struct task_struct *p)
3231 {
3232         if (p->sibling.next==&p->parent->children) return NULL;
3233         return list_entry(p->sibling.next,struct task_struct,sibling);
3234 }
3235
3236 static void show_task(task_t * p)
3237 {
3238         task_t *relative;
3239         unsigned state;
3240         unsigned long free = 0;
3241         static const char *stat_nam[] = { "R", "S", "D", "T", "Z", "W" };
3242
3243         printk("%-13.13s ", p->comm);
3244         state = p->state ? __ffs(p->state) + 1 : 0;
3245         if (state < ARRAY_SIZE(stat_nam))
3246                 printk(stat_nam[state]);
3247         else
3248                 printk("?");
3249 #if (BITS_PER_LONG == 32)
3250         if (state == TASK_RUNNING)
3251                 printk(" running ");
3252         else
3253                 printk(" %08lX ", thread_saved_pc(p));
3254 #else
3255         if (state == TASK_RUNNING)
3256                 printk("  running task   ");
3257         else
3258                 printk(" %016lx ", thread_saved_pc(p));
3259 #endif
3260 #ifdef CONFIG_DEBUG_STACK_USAGE
3261         {
3262                 unsigned long * n = (unsigned long *) (p->thread_info+1);
3263                 while (!*n)
3264                         n++;
3265                 free = (unsigned long) n - (unsigned long)(p->thread_info+1);
3266         }
3267 #endif
3268         printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
3269         if ((relative = eldest_child(p)))
3270                 printk("%5d ", relative->pid);
3271         else
3272                 printk("      ");
3273         if ((relative = younger_sibling(p)))
3274                 printk("%7d", relative->pid);
3275         else
3276                 printk("       ");
3277         if ((relative = older_sibling(p)))
3278                 printk(" %5d", relative->pid);
3279         else
3280                 printk("      ");
3281         if (!p->mm)
3282                 printk(" (L-TLB)\n");
3283         else
3284                 printk(" (NOTLB)\n");
3285
3286         if (state != TASK_RUNNING)
3287                 show_stack(p, NULL);
3288 }
3289
3290 void show_state(void)
3291 {
3292         task_t *g, *p;
3293
3294 #if (BITS_PER_LONG == 32)
3295         printk("\n"
3296                "                                               sibling\n");
3297         printk("  task             PC      pid father child younger older\n");
3298 #else
3299         printk("\n"
3300                "                                                       sibling\n");
3301         printk("  task                 PC          pid father child younger older\n");
3302 #endif
3303         read_lock(&tasklist_lock);
3304         do_each_thread(g, p) {
3305                 /*
3306                  * reset the NMI-timeout, listing all files on a slow
3307                  * console might take alot of time:
3308                  */
3309                 touch_nmi_watchdog();
3310                 show_task(p);
3311         } while_each_thread(g, p);
3312
3313         read_unlock(&tasklist_lock);
3314 }
3315
3316 void __devinit init_idle(task_t *idle, int cpu)
3317 {
3318         runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
3319         unsigned long flags;
3320
3321         local_irq_save(flags);
3322         double_rq_lock(idle_rq, rq);
3323
3324         idle_rq->curr = idle_rq->idle = idle;
3325         deactivate_task(idle, rq);
3326         idle->array = NULL;
3327         idle->prio = MAX_PRIO;
3328         idle->state = TASK_RUNNING;
3329         set_task_cpu(idle, cpu);
3330         double_rq_unlock(idle_rq, rq);
3331         set_tsk_need_resched(idle);
3332         local_irq_restore(flags);
3333
3334         /* Set the preempt count _outside_ the spinlocks! */
3335 #ifdef CONFIG_PREEMPT
3336         idle->thread_info->preempt_count = (idle->lock_depth >= 0);
3337 #else
3338         idle->thread_info->preempt_count = 0;
3339 #endif
3340 }
3341
3342 /*
3343  * In a system that switches off the HZ timer nohz_cpu_mask
3344  * indicates which cpus entered this state. This is used
3345  * in the rcu update to wait only for active cpus. For system
3346  * which do not switch off the HZ timer nohz_cpu_mask should
3347  * always be CPU_MASK_NONE.
3348  */
3349 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
3350
3351 #ifdef CONFIG_SMP
3352 /*
3353  * This is how migration works:
3354  *
3355  * 1) we queue a migration_req_t structure in the source CPU's
3356  *    runqueue and wake up that CPU's migration thread.
3357  * 2) we down() the locked semaphore => thread blocks.
3358  * 3) migration thread wakes up (implicitly it forces the migrated
3359  *    thread off the CPU)
3360  * 4) it gets the migration request and checks whether the migrated
3361  *    task is still in the wrong runqueue.
3362  * 5) if it's in the wrong runqueue then the migration thread removes
3363  *    it and puts it into the right queue.
3364  * 6) migration thread up()s the semaphore.
3365  * 7) we wake up and the migration is done.
3366  */
3367
3368 /*
3369  * Change a given task's CPU affinity. Migrate the thread to a
3370  * proper CPU and schedule it away if the CPU it's executing on
3371  * is removed from the allowed bitmask.
3372  *
3373  * NOTE: the caller must have a valid reference to the task, the
3374  * task must not exit() & deallocate itself prematurely.  The
3375  * call is not atomic; no spinlocks may be held.
3376  */
3377 int set_cpus_allowed(task_t *p, cpumask_t new_mask)
3378 {
3379         unsigned long flags;
3380         int ret = 0;
3381         migration_req_t req;
3382         runqueue_t *rq;
3383
3384         rq = task_rq_lock(p, &flags);
3385         if (any_online_cpu(new_mask) == NR_CPUS) {
3386                 ret = -EINVAL;
3387                 goto out;
3388         }
3389
3390         p->cpus_allowed = new_mask;
3391         /* Can the task run on the task's current CPU? If so, we're done */
3392         if (cpu_isset(task_cpu(p), new_mask))
3393                 goto out;
3394
3395         if (migrate_task(p, any_online_cpu(new_mask), &req)) {
3396                 /* Need help from migration thread: drop lock and wait. */
3397                 task_rq_unlock(rq, &flags);
3398                 wake_up_process(rq->migration_thread);
3399                 wait_for_completion(&req.done);
3400                 return 0;
3401         }
3402 out:
3403         task_rq_unlock(rq, &flags);
3404         return ret;
3405 }
3406
3407 EXPORT_SYMBOL_GPL(set_cpus_allowed);
3408
3409 /*
3410  * Move (not current) task off this cpu, onto dest cpu.  We're doing
3411  * this because either it can't run here any more (set_cpus_allowed()
3412  * away from this CPU, or CPU going down), or because we're
3413  * attempting to rebalance this task on exec (sched_balance_exec).
3414  *
3415  * So we race with normal scheduler movements, but that's OK, as long
3416  * as the task is no longer on this CPU.
3417  */
3418 static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
3419 {
3420         runqueue_t *rq_dest, *rq_src;
3421
3422         if (unlikely(cpu_is_offline(dest_cpu)))
3423                 return;
3424
3425         rq_src  = cpu_rq(src_cpu);
3426         rq_dest = cpu_rq(dest_cpu);
3427
3428         double_rq_lock(rq_src, rq_dest);
3429         /* Already moved. */
3430         if (task_cpu(p) != src_cpu)
3431                 goto out;
3432         /* Affinity changed (again). */
3433         if (!cpu_isset(dest_cpu, p->cpus_allowed))
3434                 goto out;
3435
3436         set_task_cpu(p, dest_cpu);
3437         if (p->array) {
3438                 /*
3439                  * Sync timestamp with rq_dest's before activating.
3440                  * The same thing could be achieved by doing this step
3441                  * afterwards, and pretending it was a local activate.
3442                  * This way is cleaner and logically correct.
3443                  */
3444                 p->timestamp = p->timestamp - rq_src->timestamp_last_tick
3445                                 + rq_dest->timestamp_last_tick;
3446                 deactivate_task(p, rq_src);
3447                 activate_task(p, rq_dest, 0);
3448                 if (TASK_PREEMPTS_CURR(p, rq_dest))
3449                         resched_task(rq_dest->curr);
3450         }
3451
3452 out:
3453         double_rq_unlock(rq_src, rq_dest);
3454 }
3455
3456 /*
3457  * migration_thread - this is a highprio system thread that performs
3458  * thread migration by bumping thread off CPU then 'pushing' onto
3459  * another runqueue.
3460  */
3461 static int migration_thread(void * data)
3462 {
3463         runqueue_t *rq;
3464         int cpu = (long)data;
3465
3466         rq = cpu_rq(cpu);
3467         BUG_ON(rq->migration_thread != current);
3468
3469         set_current_state(TASK_INTERRUPTIBLE);
3470         while (!kthread_should_stop()) {
3471                 struct list_head *head;
3472                 migration_req_t *req;
3473
3474                 if (current->flags & PF_FREEZE)
3475                         refrigerator(PF_FREEZE);
3476
3477                 spin_lock_irq(&rq->lock);
3478
3479                 if (cpu_is_offline(cpu)) {
3480                         spin_unlock_irq(&rq->lock);
3481                         goto wait_to_die;
3482                 }
3483
3484                 if (rq->active_balance) {
3485                         active_load_balance(rq, cpu);
3486                         rq->active_balance = 0;
3487                 }
3488
3489                 head = &rq->migration_queue;
3490
3491                 if (list_empty(head)) {
3492                         spin_unlock_irq(&rq->lock);
3493                         schedule();
3494                         set_current_state(TASK_INTERRUPTIBLE);
3495                         continue;
3496                 }
3497                 req = list_entry(head->next, migration_req_t, list);
3498                 list_del_init(head->next);
3499
3500                 if (req->type == REQ_MOVE_TASK) {
3501                         spin_unlock(&rq->lock);
3502                         __migrate_task(req->task, smp_processor_id(),
3503                                         req->dest_cpu);
3504                         local_irq_enable();
3505                 } else if (req->type == REQ_SET_DOMAIN) {
3506                         rq->sd = req->sd;
3507                         spin_unlock_irq(&rq->lock);
3508                 } else {
3509                         spin_unlock_irq(&rq->lock);
3510                         WARN_ON(1);
3511                 }
3512
3513                 complete(&req->done);
3514         }
3515         __set_current_state(TASK_RUNNING);
3516         return 0;
3517
3518 wait_to_die:
3519         /* Wait for kthread_stop */
3520         set_current_state(TASK_INTERRUPTIBLE);
3521         while (!kthread_should_stop()) {
3522                 schedule();
3523                 set_current_state(TASK_INTERRUPTIBLE);
3524         }
3525         __set_current_state(TASK_RUNNING);
3526         return 0;
3527 }
3528
3529 #ifdef CONFIG_HOTPLUG_CPU
3530 /* migrate_all_tasks - function to migrate all tasks from the dead cpu.  */
3531 static void migrate_all_tasks(int src_cpu)
3532 {
3533         struct task_struct *tsk, *t;
3534         int dest_cpu;
3535         unsigned int node;
3536
3537         write_lock_irq(&tasklist_lock);
3538
3539         /* watch out for per node tasks, let's stay on this node */
3540         node = cpu_to_node(src_cpu);
3541
3542         do_each_thread(t, tsk) {
3543                 cpumask_t mask;
3544                 if (tsk == current)
3545                         continue;
3546
3547                 if (task_cpu(tsk) != src_cpu)
3548                         continue;
3549
3550                 /* Figure out where this task should go (attempting to
3551                  * keep it on-node), and check if it can be migrated
3552                  * as-is.  NOTE that kernel threads bound to more than
3553                  * one online cpu will be migrated. */
3554                 mask = node_to_cpumask(node);
3555                 cpus_and(mask, mask, tsk->cpus_allowed);
3556                 dest_cpu = any_online_cpu(mask);
3557                 if (dest_cpu == NR_CPUS)
3558                         dest_cpu = any_online_cpu(tsk->cpus_allowed);
3559                 if (dest_cpu == NR_CPUS) {
3560                         cpus_clear(tsk->cpus_allowed);
3561                         cpus_complement(tsk->cpus_allowed);
3562                         dest_cpu = any_online_cpu(tsk->cpus_allowed);
3563
3564                         /* Don't tell them about moving exiting tasks
3565                            or kernel threads (both mm NULL), since
3566                            they never leave kernel. */
3567                         if (tsk->mm && printk_ratelimit())
3568                                 printk(KERN_INFO "process %d (%s) no "
3569                                        "longer affine to cpu%d\n",
3570                                        tsk->pid, tsk->comm, src_cpu);
3571                 }
3572
3573                 __migrate_task(tsk, src_cpu, dest_cpu);
3574         } while_each_thread(t, tsk);
3575
3576         write_unlock_irq(&tasklist_lock);
3577 }
3578
3579 /* Schedules idle task to be the next runnable task on current CPU.
3580  * It does so by boosting its priority to highest possible and adding it to
3581  * the _front_ of runqueue. Used by CPU offline code.
3582  */
3583 void sched_idle_next(void)
3584 {
3585         int cpu = smp_processor_id();
3586         runqueue_t *rq = this_rq();
3587         struct task_struct *p = rq->idle;
3588         unsigned long flags;
3589
3590         /* cpu has to be offline */
3591         BUG_ON(cpu_online(cpu));
3592
3593         /* Strictly not necessary since rest of the CPUs are stopped by now
3594          * and interrupts disabled on current cpu.
3595          */
3596         spin_lock_irqsave(&rq->lock, flags);
3597
3598         __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
3599         /* Add idle task to _front_ of it's priority queue */
3600         __activate_idle_task(p, rq);
3601
3602         spin_unlock_irqrestore(&rq->lock, flags);
3603 }
3604 #endif /* CONFIG_HOTPLUG_CPU */
3605
3606 /*
3607  * migration_call - callback that gets triggered when a CPU is added.
3608  * Here we can start up the necessary migration thread for the new CPU.
3609  */
3610 static int migration_call(struct notifier_block *nfb, unsigned long action,
3611                           void *hcpu)
3612 {
3613         int cpu = (long)hcpu;
3614         struct task_struct *p;
3615         struct runqueue *rq;
3616         unsigned long flags;
3617
3618         switch (action) {
3619         case CPU_UP_PREPARE:
3620                 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
3621                 if (IS_ERR(p))
3622                         return NOTIFY_BAD;
3623                 kthread_bind(p, cpu);
3624                 /* Must be high prio: stop_machine expects to yield to it. */
3625                 rq = task_rq_lock(p, &flags);
3626                 __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
3627                 task_rq_unlock(rq, &flags);
3628                 cpu_rq(cpu)->migration_thread = p;
3629                 break;
3630         case CPU_ONLINE:
3631                 /* Strictly unneccessary, as first user will wake it. */
3632                 wake_up_process(cpu_rq(cpu)->migration_thread);
3633                 break;
3634 #ifdef CONFIG_HOTPLUG_CPU
3635         case CPU_UP_CANCELED:
3636                 /* Unbind it from offline cpu so it can run.  Fall thru. */
3637                 kthread_bind(cpu_rq(cpu)->migration_thread,smp_processor_id());
3638                 kthread_stop(cpu_rq(cpu)->migration_thread);
3639                 cpu_rq(cpu)->migration_thread = NULL;
3640                 break;
3641         case CPU_DEAD:
3642                 migrate_all_tasks(cpu);
3643                 rq = cpu_rq(cpu);
3644                 kthread_stop(rq->migration_thread);
3645                 rq->migration_thread = NULL;
3646                 /* Idle task back to normal (off runqueue, low prio) */
3647                 rq = task_rq_lock(rq->idle, &flags);
3648                 deactivate_task(rq->idle, rq);
3649                 rq->idle->static_prio = MAX_PRIO;
3650                 __setscheduler(rq->idle, SCHED_NORMAL, 0);
3651                 task_rq_unlock(rq, &flags);
3652                 BUG_ON(rq->nr_running != 0);
3653
3654                 /* No need to migrate the tasks: it was best-effort if
3655                  * they didn't do lock_cpu_hotplug().  Just wake up
3656                  * the requestors. */
3657                 spin_lock_irq(&rq->lock);
3658                 while (!list_empty(&rq->migration_queue)) {
3659                         migration_req_t *req;
3660                         req = list_entry(rq->migration_queue.next,
3661                                          migration_req_t, list);
3662                         BUG_ON(req->type != REQ_MOVE_TASK);
3663                         list_del_init(&req->list);
3664                         complete(&req->done);
3665                 }
3666                 spin_unlock_irq(&rq->lock);
3667                 break;
3668 #endif
3669         }
3670         return NOTIFY_OK;
3671 }
3672
3673 /* Register at highest priority so that task migration (migrate_all_tasks)
3674  * happens before everything else.
3675  */
3676 static struct notifier_block __devinitdata migration_notifier = {
3677         .notifier_call = migration_call,
3678         .priority = 10
3679 };
3680
3681 int __init migration_init(void)
3682 {
3683         void *cpu = (void *)(long)smp_processor_id();
3684         /* Start one for boot CPU. */
3685         migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
3686         migration_call(&migration_notifier, CPU_ONLINE, cpu);
3687         register_cpu_notifier(&migration_notifier);
3688         return 0;
3689 }
3690 #endif
3691
3692 /*
3693  * The 'big kernel lock'
3694  *
3695  * This spinlock is taken and released recursively by lock_kernel()
3696  * and unlock_kernel().  It is transparently dropped and reaquired
3697  * over schedule().  It is used to protect legacy code that hasn't
3698  * been migrated to a proper locking design yet.
3699  *
3700  * Don't use in new code.
3701  *
3702  * Note: spinlock debugging needs this even on !CONFIG_SMP.
3703  */
3704 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
3705 EXPORT_SYMBOL(kernel_flag);
3706
3707 #ifdef CONFIG_SMP
3708 /* Attach the domain 'sd' to 'cpu' as its base domain */
3709 void cpu_attach_domain(struct sched_domain *sd, int cpu)
3710 {
3711         migration_req_t req;
3712         unsigned long flags;
3713         runqueue_t *rq = cpu_rq(cpu);
3714         int local = 1;
3715
3716         lock_cpu_hotplug();
3717
3718         spin_lock_irqsave(&rq->lock, flags);
3719
3720         if (cpu == smp_processor_id() || !cpu_online(cpu)) {
3721                 rq->sd = sd;
3722         } else {
3723                 init_completion(&req.done);
3724                 req.type = REQ_SET_DOMAIN;
3725                 req.sd = sd;
3726                 list_add(&req.list, &rq->migration_queue);
3727                 local = 0;
3728         }
3729
3730         spin_unlock_irqrestore(&rq->lock, flags);
3731
3732         if (!local) {
3733                 wake_up_process(rq->migration_thread);
3734                 wait_for_completion(&req.done);
3735         }
3736
3737         unlock_cpu_hotplug();
3738 }
3739
3740 #ifdef ARCH_HAS_SCHED_DOMAIN
3741 extern void __init arch_init_sched_domains(void);
3742 #else
3743 static struct sched_group sched_group_cpus[NR_CPUS];
3744 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
3745 #ifdef CONFIG_NUMA
3746 static struct sched_group sched_group_nodes[MAX_NUMNODES];
3747 static DEFINE_PER_CPU(struct sched_domain, node_domains);
3748 static void __init arch_init_sched_domains(void)
3749 {
3750         int i;
3751         struct sched_group *first_node = NULL, *last_node = NULL;
3752
3753         /* Set up domains */
3754         for_each_cpu(i) {
3755                 int node = cpu_to_node(i);
3756                 cpumask_t nodemask = node_to_cpumask(node);
3757                 struct sched_domain *node_sd = &per_cpu(node_domains, i);
3758                 struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i);
3759
3760                 *node_sd = SD_NODE_INIT;
3761                 node_sd->span = cpu_possible_map;
3762                 node_sd->groups = &sched_group_nodes[cpu_to_node(i)];
3763
3764                 *cpu_sd = SD_CPU_INIT;
3765                 cpus_and(cpu_sd->span, nodemask, cpu_possible_map);
3766                 cpu_sd->groups = &sched_group_cpus[i];
3767                 cpu_sd->parent = node_sd;
3768         }
3769
3770         /* Set up groups */
3771         for (i = 0; i < MAX_NUMNODES; i++) {
3772                 cpumask_t tmp = node_to_cpumask(i);
3773                 cpumask_t nodemask;
3774                 struct sched_group *first_cpu = NULL, *last_cpu = NULL;
3775                 struct sched_group *node = &sched_group_nodes[i];
3776                 int j;
3777
3778                 cpus_and(nodemask, tmp, cpu_possible_map);
3779
3780                 if (cpus_empty(nodemask))
3781                         continue;
3782
3783                 node->cpumask = nodemask;
3784                 node->cpu_power = SCHED_LOAD_SCALE * cpus_weight(node->cpumask);
3785
3786                 for_each_cpu_mask(j, node->cpumask) {
3787                         struct sched_group *cpu = &sched_group_cpus[j];
3788
3789                         cpus_clear(cpu->cpumask);
3790                         cpu_set(j, cpu->cpumask);
3791                         cpu->cpu_power = SCHED_LOAD_SCALE;
3792
3793                         if (!first_cpu)
3794                                 first_cpu = cpu;
3795                         if (last_cpu)
3796                                 last_cpu->next = cpu;
3797                         last_cpu = cpu;
3798                 }
3799                 last_cpu->next = first_cpu;
3800
3801                 if (!first_node)
3802                         first_node = node;
3803                 if (last_node)
3804                         last_node->next = node;
3805                 last_node = node;
3806         }
3807         last_node->next = first_node;
3808
3809         mb();
3810         for_each_cpu(i) {
3811                 struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i);
3812                 cpu_attach_domain(cpu_sd, i);
3813         }
3814 }
3815
3816 #else /* !CONFIG_NUMA */
3817 static void __init arch_init_sched_domains(void)
3818 {
3819         int i;
3820         struct sched_group *first_cpu = NULL, *last_cpu = NULL;
3821
3822         /* Set up domains */
3823         for_each_cpu(i) {
3824                 struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i);
3825
3826                 *cpu_sd = SD_CPU_INIT;
3827                 cpu_sd->span = cpu_possible_map;
3828                 cpu_sd->groups = &sched_group_cpus[i];
3829         }
3830
3831         /* Set up CPU groups */
3832         for_each_cpu_mask(i, cpu_possible_map) {
3833                 struct sched_group *cpu = &sched_group_cpus[i];
3834
3835                 cpus_clear(cpu->cpumask);
3836                 cpu_set(i, cpu->cpumask);
3837                 cpu->cpu_power = SCHED_LOAD_SCALE;
3838
3839                 if (!first_cpu)
3840                         first_cpu = cpu;
3841                 if (last_cpu)
3842                         last_cpu->next = cpu;
3843                 last_cpu = cpu;
3844         }
3845         last_cpu->next = first_cpu;
3846
3847         mb(); /* domains were modified outside the lock */
3848         for_each_cpu(i) {
3849                 struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i);
3850                 cpu_attach_domain(cpu_sd, i);
3851         }
3852 }
3853
3854 #endif /* CONFIG_NUMA */
3855 #endif /* ARCH_HAS_SCHED_DOMAIN */
3856
3857 #define SCHED_DOMAIN_DEBUG
3858 #ifdef SCHED_DOMAIN_DEBUG
3859 void sched_domain_debug(void)
3860 {
3861         int i;
3862
3863         for_each_cpu(i) {
3864                 runqueue_t *rq = cpu_rq(i);
3865                 struct sched_domain *sd;
3866                 int level = 0;
3867
3868                 sd = rq->sd;
3869
3870                 printk(KERN_DEBUG "CPU%d: %s\n",
3871                                 i, (cpu_online(i) ? " online" : "offline"));
3872
3873                 do {
3874                         int j;
3875                         char str[NR_CPUS];
3876                         struct sched_group *group = sd->groups;
3877                         cpumask_t groupmask, tmp;
3878
3879                         cpumask_scnprintf(str, NR_CPUS, sd->span);
3880                         cpus_clear(groupmask);
3881
3882                         printk(KERN_DEBUG);
3883                         for (j = 0; j < level + 1; j++)
3884                                 printk(" ");
3885                         printk("domain %d: span %s\n", level, str);
3886
3887                         if (!cpu_isset(i, sd->span))
3888                                 printk(KERN_DEBUG "ERROR domain->span does not contain CPU%d\n", i);
3889                         if (!cpu_isset(i, group->cpumask))
3890                                 printk(KERN_DEBUG "ERROR domain->groups does not contain CPU%d\n", i);
3891                         if (!group->cpu_power)
3892                                 printk(KERN_DEBUG "ERROR domain->cpu_power not set\n");
3893
3894                         printk(KERN_DEBUG);
3895                         for (j = 0; j < level + 2; j++)
3896                                 printk(" ");
3897                         printk("groups:");
3898                         do {
3899                                 if (!group) {
3900                                         printk(" ERROR: NULL");
3901                                         break;
3902                                 }
3903
3904                                 if (!cpus_weight(group->cpumask))
3905                                         printk(" ERROR empty group:");
3906
3907                                 cpus_and(tmp, groupmask, group->cpumask);
3908                                 if (cpus_weight(tmp) > 0)
3909                                         printk(" ERROR repeated CPUs:");
3910
3911                                 cpus_or(groupmask, groupmask, group->cpumask);
3912
3913                                 cpumask_scnprintf(str, NR_CPUS, group->cpumask);
3914                                 printk(" %s", str);
3915
3916                                 group = group->next;
3917                         } while (group != sd->groups);
3918                         printk("\n");
3919
3920                         if (!cpus_equal(sd->span, groupmask))
3921                                 printk(KERN_DEBUG "ERROR groups don't span domain->span\n");
3922
3923                         level++;
3924                         sd = sd->parent;
3925
3926                         if (sd) {
3927                                 cpus_and(tmp, groupmask, sd->span);
3928                                 if (!cpus_equal(tmp, groupmask))
3929                                         printk(KERN_DEBUG "ERROR parent span is not a superset of domain->span\n");
3930                         }
3931
3932                 } while (sd);
3933         }
3934 }
3935 #else
3936 #define sched_domain_debug() {}
3937 #endif
3938
3939 void __init sched_init_smp(void)
3940 {
3941         arch_init_sched_domains();
3942         sched_domain_debug();
3943 }
3944 #else
3945 void __init sched_init_smp(void)
3946 {
3947 }
3948 #endif /* CONFIG_SMP */
3949
3950 int in_sched_functions(unsigned long addr)
3951 {
3952         /* Linker adds these: start and end of __sched functions */
3953         extern char __sched_text_start[], __sched_text_end[];
3954         return addr >= (unsigned long)__sched_text_start
3955                 && addr < (unsigned long)__sched_text_end;
3956 }
3957
3958 void __init sched_init(void)
3959 {
3960         runqueue_t *rq;
3961         int i, j, k;
3962
3963 #ifdef CONFIG_SMP
3964         /* Set up an initial dummy domain for early boot */
3965         static struct sched_domain sched_domain_init;
3966         static struct sched_group sched_group_init;
3967         cpumask_t cpu_mask_all = CPU_MASK_ALL;
3968
3969         memset(&sched_domain_init, 0, sizeof(struct sched_domain));
3970         sched_domain_init.span = cpu_mask_all;
3971         sched_domain_init.groups = &sched_group_init;
3972         sched_domain_init.last_balance = jiffies;
3973         sched_domain_init.balance_interval = INT_MAX; /* Don't balance */
3974
3975         memset(&sched_group_init, 0, sizeof(struct sched_group));
3976         sched_group_init.cpumask = cpu_mask_all;
3977         sched_group_init.next = &sched_group_init;
3978         sched_group_init.cpu_power = SCHED_LOAD_SCALE;
3979 #endif
3980
3981         for (i = 0; i < NR_CPUS; i++) {
3982                 prio_array_t *array;
3983
3984                 rq = cpu_rq(i);
3985                 spin_lock_init(&rq->lock);
3986                 rq->active = rq->arrays;
3987                 rq->expired = rq->arrays + 1;
3988                 rq->best_expired_prio = MAX_PRIO;
3989
3990 #ifdef CONFIG_SMP
3991                 rq->sd = &sched_domain_init;
3992                 rq->cpu_load = 0;
3993                 rq->active_balance = 0;
3994                 rq->push_cpu = 0;
3995                 rq->migration_thread = NULL;
3996                 INIT_LIST_HEAD(&rq->migration_queue);
3997 #endif
3998                 INIT_LIST_HEAD(&rq->hold_queue);
3999                 atomic_set(&rq->nr_iowait, 0);
4000
4001                 for (j = 0; j < 2; j++) {
4002                         array = rq->arrays + j;
4003                         for (k = 0; k < MAX_PRIO; k++) {
4004                                 INIT_LIST_HEAD(array->queue + k);
4005                                 __clear_bit(k, array->bitmap);
4006                         }
4007                         // delimiter for bitsearch
4008                         __set_bit(MAX_PRIO, array->bitmap);
4009                 }
4010         }
4011         /*
4012          * We have to do a little magic to get the first
4013          * thread right in SMP mode.
4014          */
4015         rq = this_rq();
4016         rq->curr = current;
4017         rq->idle = current;
4018         set_task_cpu(current, smp_processor_id());
4019         wake_up_forked_process(current);
4020
4021         /*
4022          * The boot idle thread does lazy MMU switching as well:
4023          */
4024         atomic_inc(&init_mm.mm_count);
4025         enter_lazy_tlb(&init_mm, current);
4026 }
4027
4028 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4029 void __might_sleep(char *file, int line)
4030 {
4031 #if defined(in_atomic)
4032         static unsigned long prev_jiffy;        /* ratelimiting */
4033
4034         if ((in_atomic() || irqs_disabled()) &&
4035             system_state == SYSTEM_RUNNING) {
4036                 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
4037                         return;
4038                 prev_jiffy = jiffies;
4039                 printk(KERN_ERR "Debug: sleeping function called from invalid"
4040                                 " context at %s:%d\n", file, line);
4041                 printk("in_atomic():%d, irqs_disabled():%d\n",
4042                         in_atomic(), irqs_disabled());
4043                 dump_stack();
4044         }
4045 #endif
4046 }
4047 EXPORT_SYMBOL(__might_sleep);
4048 #endif
4049
4050
4051 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
4052 /*
4053  * This could be a long-held lock.  If another CPU holds it for a long time,
4054  * and that CPU is not asked to reschedule then *this* CPU will spin on the
4055  * lock for a long time, even if *this* CPU is asked to reschedule.
4056  *
4057  * So what we do here, in the slow (contended) path is to spin on the lock by
4058  * hand while permitting preemption.
4059  *
4060  * Called inside preempt_disable().
4061  */
4062 void __sched __preempt_spin_lock(spinlock_t *lock)
4063 {
4064         if (preempt_count() > 1) {
4065                 _raw_spin_lock(lock);
4066                 return;
4067         }
4068         do {
4069                 preempt_enable();
4070                 while (spin_is_locked(lock))
4071                         cpu_relax();
4072                 preempt_disable();
4073         } while (!_raw_spin_trylock(lock));
4074 }
4075
4076 EXPORT_SYMBOL(__preempt_spin_lock);
4077
4078 void __sched __preempt_write_lock(rwlock_t *lock)
4079 {
4080         if (preempt_count() > 1) {
4081                 _raw_write_lock(lock);
4082                 return;
4083         }
4084
4085         do {
4086                 preempt_enable();
4087                 while (rwlock_is_locked(lock))
4088                         cpu_relax();
4089                 preempt_disable();
4090         } while (!_raw_write_trylock(lock));
4091 }
4092
4093 EXPORT_SYMBOL(__preempt_write_lock);
4094 #endif /* defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) */