kernel.org linux-2.6.10
[linux-2.6.git] / kernel / sched.c
1 /*
2  *  kernel/sched.c
3  *
4  *  Kernel scheduler and related syscalls
5  *
6  *  Copyright (C) 1991-2002  Linus Torvalds
7  *
8  *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9  *              make semaphores SMP safe
10  *  1998-11-19  Implemented schedule_timeout() and related stuff
11  *              by Andrea Arcangeli
12  *  2002-01-04  New ultra-scalable O(1) scheduler by Ingo Molnar:
13  *              hybrid priority-list and round-robin design with
14  *              an array-switch method of distributing timeslices
15  *              and per-CPU runqueues.  Cleanups and useful suggestions
16  *              by Davide Libenzi, preemptible kernel bits by Robert Love.
17  *  2003-09-03  Interactivity tuning by Con Kolivas.
18  *  2004-04-02  Scheduler domains code by Nick Piggin
19  */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/nmi.h>
24 #include <linux/init.h>
25 #include <asm/uaccess.h>
26 #include <linux/highmem.h>
27 #include <linux/smp_lock.h>
28 #include <asm/mmu_context.h>
29 #include <linux/interrupt.h>
30 #include <linux/completion.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/security.h>
33 #include <linux/notifier.h>
34 #include <linux/profile.h>
35 #include <linux/suspend.h>
36 #include <linux/blkdev.h>
37 #include <linux/delay.h>
38 #include <linux/smp.h>
39 #include <linux/timer.h>
40 #include <linux/rcupdate.h>
41 #include <linux/cpu.h>
42 #include <linux/percpu.h>
43 #include <linux/kthread.h>
44 #include <linux/seq_file.h>
45 #include <linux/syscalls.h>
46 #include <linux/times.h>
47 #include <asm/tlb.h>
48
49 #include <asm/unistd.h>
50
51 #ifdef CONFIG_NUMA
52 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
53 #else
54 #define cpu_to_node_mask(cpu) (cpu_online_map)
55 #endif
56
57 /*
58  * Convert user-nice values [ -20 ... 0 ... 19 ]
59  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
60  * and back.
61  */
62 #define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
63 #define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
64 #define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
65
66 /*
67  * 'User priority' is the nice value converted to something we
68  * can work with better when scaling various scheduler parameters,
69  * it's a [ 0 ... 39 ] range.
70  */
71 #define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
72 #define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
73 #define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
74
75 /*
76  * Some helpers for converting nanosecond timing to jiffy resolution
77  */
78 #define NS_TO_JIFFIES(TIME)     ((TIME) / (1000000000 / HZ))
79 #define JIFFIES_TO_NS(TIME)     ((TIME) * (1000000000 / HZ))
80
81 /*
82  * These are the 'tuning knobs' of the scheduler:
83  *
84  * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
85  * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
86  * Timeslices get refilled after they expire.
87  */
88 #define MIN_TIMESLICE           max(5 * HZ / 1000, 1)
89 #define DEF_TIMESLICE           (100 * HZ / 1000)
90 #define ON_RUNQUEUE_WEIGHT       30
91 #define CHILD_PENALTY            95
92 #define PARENT_PENALTY          100
93 #define EXIT_WEIGHT               3
94 #define PRIO_BONUS_RATIO         25
95 #define MAX_BONUS               (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
96 #define INTERACTIVE_DELTA         2
97 #define MAX_SLEEP_AVG           (DEF_TIMESLICE * MAX_BONUS)
98 #define STARVATION_LIMIT        (MAX_SLEEP_AVG)
99 #define NS_MAX_SLEEP_AVG        (JIFFIES_TO_NS(MAX_SLEEP_AVG))
100 #define CREDIT_LIMIT            100
101
102 /*
103  * If a task is 'interactive' then we reinsert it in the active
104  * array after it has expired its current timeslice. (it will not
105  * continue to run immediately, it will still roundrobin with
106  * other interactive tasks.)
107  *
108  * This part scales the interactivity limit depending on niceness.
109  *
110  * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
111  * Here are a few examples of different nice levels:
112  *
113  *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
114  *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
115  *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
116  *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
117  *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
118  *
119  * (the X axis represents the possible -5 ... 0 ... +5 dynamic
120  *  priority range a task can explore, a value of '1' means the
121  *  task is rated interactive.)
122  *
123  * Ie. nice +19 tasks can never get 'interactive' enough to be
124  * reinserted into the active array. And only heavily CPU-hog nice -20
125  * tasks will be expired. Default nice 0 tasks are somewhere between,
126  * it takes some effort for them to get interactive, but it's not
127  * too hard.
128  */
129
130 #define CURRENT_BONUS(p) \
131         (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
132                 MAX_SLEEP_AVG)
133
134 #ifdef CONFIG_SMP
135 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
136                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
137                         num_online_cpus())
138 #else
139 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
140                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
141 #endif
142
143 #define SCALE(v1,v1_max,v2_max) \
144         (v1) * (v2_max) / (v1_max)
145
146 #define DELTA(p) \
147         (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
148
149 #define TASK_INTERACTIVE(p) \
150         ((p)->prio <= (p)->static_prio - DELTA(p))
151
152 #define INTERACTIVE_SLEEP(p) \
153         (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
154                 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
155
156 #define HIGH_CREDIT(p) \
157         ((p)->interactive_credit > CREDIT_LIMIT)
158
159 #define LOW_CREDIT(p) \
160         ((p)->interactive_credit < -CREDIT_LIMIT)
161
162 #define TASK_PREEMPTS_CURR(p, rq) \
163         ((p)->prio < (rq)->curr->prio)
164
165 /*
166  * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
167  * to time slice values: [800ms ... 100ms ... 5ms]
168  *
169  * The higher a thread's priority, the bigger timeslices
170  * it gets during one round of execution. But even the lowest
171  * priority thread gets MIN_TIMESLICE worth of execution time.
172  */
173
174 #define SCALE_PRIO(x, prio) \
175         max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
176
177 static unsigned int task_timeslice(task_t *p)
178 {
179         if (p->static_prio < NICE_TO_PRIO(0))
180                 return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
181         else
182                 return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
183 }
184 #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)       \
185                                 < (long long) (sd)->cache_hot_time)
186
187 /*
188  * These are the runqueue data structures:
189  */
190
191 #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
192
193 typedef struct runqueue runqueue_t;
194
195 struct prio_array {
196         unsigned int nr_active;
197         unsigned long bitmap[BITMAP_SIZE];
198         struct list_head queue[MAX_PRIO];
199 };
200
201 /*
202  * This is the main, per-CPU runqueue data structure.
203  *
204  * Locking rule: those places that want to lock multiple runqueues
205  * (such as the load balancing or the thread migration code), lock
206  * acquire operations must be ordered by ascending &runqueue.
207  */
208 struct runqueue {
209         spinlock_t lock;
210
211         /*
212          * nr_running and cpu_load should be in the same cacheline because
213          * remote CPUs use both these fields when doing load calculation.
214          */
215         unsigned long nr_running;
216 #ifdef CONFIG_SMP
217         unsigned long cpu_load;
218 #endif
219         unsigned long long nr_switches;
220
221         /*
222          * This is part of a global counter where only the total sum
223          * over all CPUs matters. A task can increase this counter on
224          * one CPU and if it got migrated afterwards it may decrease
225          * it on another CPU. Always updated under the runqueue lock:
226          */
227         unsigned long nr_uninterruptible;
228
229         unsigned long expired_timestamp;
230         unsigned long long timestamp_last_tick;
231         task_t *curr, *idle;
232         struct mm_struct *prev_mm;
233         prio_array_t *active, *expired, arrays[2];
234         int best_expired_prio;
235         atomic_t nr_iowait;
236
237 #ifdef CONFIG_SMP
238         struct sched_domain *sd;
239
240         /* For active balancing */
241         int active_balance;
242         int push_cpu;
243
244         task_t *migration_thread;
245         struct list_head migration_queue;
246 #endif
247
248 #ifdef CONFIG_SCHEDSTATS
249         /* latency stats */
250         struct sched_info rq_sched_info;
251
252         /* sys_sched_yield() stats */
253         unsigned long yld_exp_empty;
254         unsigned long yld_act_empty;
255         unsigned long yld_both_empty;
256         unsigned long yld_cnt;
257
258         /* schedule() stats */
259         unsigned long sched_noswitch;
260         unsigned long sched_switch;
261         unsigned long sched_cnt;
262         unsigned long sched_goidle;
263
264         /* pull_task() stats */
265         unsigned long pt_gained[MAX_IDLE_TYPES];
266         unsigned long pt_lost[MAX_IDLE_TYPES];
267
268         /* active_load_balance() stats */
269         unsigned long alb_cnt;
270         unsigned long alb_lost;
271         unsigned long alb_gained;
272         unsigned long alb_failed;
273
274         /* try_to_wake_up() stats */
275         unsigned long ttwu_cnt;
276         unsigned long ttwu_attempts;
277         unsigned long ttwu_moved;
278
279         /* wake_up_new_task() stats */
280         unsigned long wunt_cnt;
281         unsigned long wunt_moved;
282
283         /* sched_migrate_task() stats */
284         unsigned long smt_cnt;
285
286         /* sched_balance_exec() stats */
287         unsigned long sbe_cnt;
288 #endif
289 };
290
291 static DEFINE_PER_CPU(struct runqueue, runqueues);
292
293 #define for_each_domain(cpu, domain) \
294         for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent)
295
296 #define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
297 #define this_rq()               (&__get_cpu_var(runqueues))
298 #define task_rq(p)              cpu_rq(task_cpu(p))
299 #define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
300
301 /*
302  * Default context-switch locking:
303  */
304 #ifndef prepare_arch_switch
305 # define prepare_arch_switch(rq, next)  do { } while (0)
306 # define finish_arch_switch(rq, next)   spin_unlock_irq(&(rq)->lock)
307 # define task_running(rq, p)            ((rq)->curr == (p))
308 #endif
309
310 /*
311  * task_rq_lock - lock the runqueue a given task resides on and disable
312  * interrupts.  Note the ordering: we can safely lookup the task_rq without
313  * explicitly disabling preemption.
314  */
315 static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
316         __acquires(rq->lock)
317 {
318         struct runqueue *rq;
319
320 repeat_lock_task:
321         local_irq_save(*flags);
322         rq = task_rq(p);
323         spin_lock(&rq->lock);
324         if (unlikely(rq != task_rq(p))) {
325                 spin_unlock_irqrestore(&rq->lock, *flags);
326                 goto repeat_lock_task;
327         }
328         return rq;
329 }
330
331 static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
332         __releases(rq->lock)
333 {
334         spin_unlock_irqrestore(&rq->lock, *flags);
335 }
336
337 #ifdef CONFIG_SCHEDSTATS
338 /*
339  * bump this up when changing the output format or the meaning of an existing
340  * format, so that tools can adapt (or abort)
341  */
342 #define SCHEDSTAT_VERSION 10
343
344 static int show_schedstat(struct seq_file *seq, void *v)
345 {
346         int cpu;
347         enum idle_type itype;
348
349         seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
350         seq_printf(seq, "timestamp %lu\n", jiffies);
351         for_each_online_cpu(cpu) {
352                 runqueue_t *rq = cpu_rq(cpu);
353 #ifdef CONFIG_SMP
354                 struct sched_domain *sd;
355                 int dcnt = 0;
356 #endif
357
358                 /* runqueue-specific stats */
359                 seq_printf(seq,
360                     "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu "
361                     "%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
362                     cpu, rq->yld_both_empty,
363                     rq->yld_act_empty, rq->yld_exp_empty,
364                     rq->yld_cnt, rq->sched_noswitch,
365                     rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
366                     rq->alb_cnt, rq->alb_gained, rq->alb_lost,
367                     rq->alb_failed,
368                     rq->ttwu_cnt, rq->ttwu_moved, rq->ttwu_attempts,
369                     rq->wunt_cnt, rq->wunt_moved,
370                     rq->smt_cnt, rq->sbe_cnt, rq->rq_sched_info.cpu_time,
371                     rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
372
373                 for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; itype++)
374                         seq_printf(seq, " %lu %lu", rq->pt_gained[itype],
375                                                     rq->pt_lost[itype]);
376                 seq_printf(seq, "\n");
377
378 #ifdef CONFIG_SMP
379                 /* domain-specific stats */
380                 for_each_domain(cpu, sd) {
381                         char mask_str[NR_CPUS];
382
383                         cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
384                         seq_printf(seq, "domain%d %s", dcnt++, mask_str);
385                         for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
386                                                 itype++) {
387                                 seq_printf(seq, " %lu %lu %lu %lu %lu",
388                                     sd->lb_cnt[itype],
389                                     sd->lb_failed[itype],
390                                     sd->lb_imbalance[itype],
391                                     sd->lb_nobusyq[itype],
392                                     sd->lb_nobusyg[itype]);
393                         }
394                         seq_printf(seq, " %lu %lu %lu %lu\n",
395                             sd->sbe_pushed, sd->sbe_attempts,
396                             sd->ttwu_wake_affine, sd->ttwu_wake_balance);
397                 }
398 #endif
399         }
400         return 0;
401 }
402
403 static int schedstat_open(struct inode *inode, struct file *file)
404 {
405         unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
406         char *buf = kmalloc(size, GFP_KERNEL);
407         struct seq_file *m;
408         int res;
409
410         if (!buf)
411                 return -ENOMEM;
412         res = single_open(file, show_schedstat, NULL);
413         if (!res) {
414                 m = file->private_data;
415                 m->buf = buf;
416                 m->size = size;
417         } else
418                 kfree(buf);
419         return res;
420 }
421
422 struct file_operations proc_schedstat_operations = {
423         .open    = schedstat_open,
424         .read    = seq_read,
425         .llseek  = seq_lseek,
426         .release = single_release,
427 };
428
429 # define schedstat_inc(rq, field)       rq->field++;
430 # define schedstat_add(rq, field, amt)  rq->field += amt;
431 #else /* !CONFIG_SCHEDSTATS */
432 # define schedstat_inc(rq, field)       do { } while (0);
433 # define schedstat_add(rq, field, amt)  do { } while (0);
434 #endif
435
436 /*
437  * rq_lock - lock a given runqueue and disable interrupts.
438  */
439 static runqueue_t *this_rq_lock(void)
440         __acquires(rq->lock)
441 {
442         runqueue_t *rq;
443
444         local_irq_disable();
445         rq = this_rq();
446         spin_lock(&rq->lock);
447
448         return rq;
449 }
450
451 static inline void rq_unlock(runqueue_t *rq)
452         __releases(rq->lock)
453 {
454         spin_unlock_irq(&rq->lock);
455 }
456
457 #ifdef CONFIG_SCHEDSTATS
458 /*
459  * Called when a process is dequeued from the active array and given
460  * the cpu.  We should note that with the exception of interactive
461  * tasks, the expired queue will become the active queue after the active
462  * queue is empty, without explicitly dequeuing and requeuing tasks in the
463  * expired queue.  (Interactive tasks may be requeued directly to the
464  * active queue, thus delaying tasks in the expired queue from running;
465  * see scheduler_tick()).
466  *
467  * This function is only called from sched_info_arrive(), rather than
468  * dequeue_task(). Even though a task may be queued and dequeued multiple
469  * times as it is shuffled about, we're really interested in knowing how
470  * long it was from the *first* time it was queued to the time that it
471  * finally hit a cpu.
472  */
473 static inline void sched_info_dequeued(task_t *t)
474 {
475         t->sched_info.last_queued = 0;
476 }
477
478 /*
479  * Called when a task finally hits the cpu.  We can now calculate how
480  * long it was waiting to run.  We also note when it began so that we
481  * can keep stats on how long its timeslice is.
482  */
483 static inline void sched_info_arrive(task_t *t)
484 {
485         unsigned long now = jiffies, diff = 0;
486         struct runqueue *rq = task_rq(t);
487
488         if (t->sched_info.last_queued)
489                 diff = now - t->sched_info.last_queued;
490         sched_info_dequeued(t);
491         t->sched_info.run_delay += diff;
492         t->sched_info.last_arrival = now;
493         t->sched_info.pcnt++;
494
495         if (!rq)
496                 return;
497
498         rq->rq_sched_info.run_delay += diff;
499         rq->rq_sched_info.pcnt++;
500 }
501
502 /*
503  * Called when a process is queued into either the active or expired
504  * array.  The time is noted and later used to determine how long we
505  * had to wait for us to reach the cpu.  Since the expired queue will
506  * become the active queue after active queue is empty, without dequeuing
507  * and requeuing any tasks, we are interested in queuing to either. It
508  * is unusual but not impossible for tasks to be dequeued and immediately
509  * requeued in the same or another array: this can happen in sched_yield(),
510  * set_user_nice(), and even load_balance() as it moves tasks from runqueue
511  * to runqueue.
512  *
513  * This function is only called from enqueue_task(), but also only updates
514  * the timestamp if it is already not set.  It's assumed that
515  * sched_info_dequeued() will clear that stamp when appropriate.
516  */
517 static inline void sched_info_queued(task_t *t)
518 {
519         if (!t->sched_info.last_queued)
520                 t->sched_info.last_queued = jiffies;
521 }
522
523 /*
524  * Called when a process ceases being the active-running process, either
525  * voluntarily or involuntarily.  Now we can calculate how long we ran.
526  */
527 static inline void sched_info_depart(task_t *t)
528 {
529         struct runqueue *rq = task_rq(t);
530         unsigned long diff = jiffies - t->sched_info.last_arrival;
531
532         t->sched_info.cpu_time += diff;
533
534         if (rq)
535                 rq->rq_sched_info.cpu_time += diff;
536 }
537
538 /*
539  * Called when tasks are switched involuntarily due, typically, to expiring
540  * their time slice.  (This may also be called when switching to or from
541  * the idle task.)  We are only called when prev != next.
542  */
543 static inline void sched_info_switch(task_t *prev, task_t *next)
544 {
545         struct runqueue *rq = task_rq(prev);
546
547         /*
548          * prev now departs the cpu.  It's not interesting to record
549          * stats about how efficient we were at scheduling the idle
550          * process, however.
551          */
552         if (prev != rq->idle)
553                 sched_info_depart(prev);
554
555         if (next != rq->idle)
556                 sched_info_arrive(next);
557 }
558 #else
559 #define sched_info_queued(t)            do { } while (0)
560 #define sched_info_switch(t, next)      do { } while (0)
561 #endif /* CONFIG_SCHEDSTATS */
562
563 /*
564  * Adding/removing a task to/from a priority array:
565  */
566 static void dequeue_task(struct task_struct *p, prio_array_t *array)
567 {
568         array->nr_active--;
569         list_del(&p->run_list);
570         if (list_empty(array->queue + p->prio))
571                 __clear_bit(p->prio, array->bitmap);
572 }
573
574 static void enqueue_task(struct task_struct *p, prio_array_t *array)
575 {
576         sched_info_queued(p);
577         list_add_tail(&p->run_list, array->queue + p->prio);
578         __set_bit(p->prio, array->bitmap);
579         array->nr_active++;
580         p->array = array;
581 }
582
583 /*
584  * Used by the migration code - we pull tasks from the head of the
585  * remote queue so we want these tasks to show up at the head of the
586  * local queue:
587  */
588 static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
589 {
590         list_add(&p->run_list, array->queue + p->prio);
591         __set_bit(p->prio, array->bitmap);
592         array->nr_active++;
593         p->array = array;
594 }
595
596 /*
597  * effective_prio - return the priority that is based on the static
598  * priority but is modified by bonuses/penalties.
599  *
600  * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
601  * into the -5 ... 0 ... +5 bonus/penalty range.
602  *
603  * We use 25% of the full 0...39 priority range so that:
604  *
605  * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
606  * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
607  *
608  * Both properties are important to certain workloads.
609  */
610 static int effective_prio(task_t *p)
611 {
612         int bonus, prio;
613
614         if (rt_task(p))
615                 return p->prio;
616
617         bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
618
619         prio = p->static_prio - bonus;
620         if (prio < MAX_RT_PRIO)
621                 prio = MAX_RT_PRIO;
622         if (prio > MAX_PRIO-1)
623                 prio = MAX_PRIO-1;
624         return prio;
625 }
626
627 /*
628  * __activate_task - move a task to the runqueue.
629  */
630 static inline void __activate_task(task_t *p, runqueue_t *rq)
631 {
632         enqueue_task(p, rq->active);
633         rq->nr_running++;
634 }
635
636 /*
637  * __activate_idle_task - move idle task to the _front_ of runqueue.
638  */
639 static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
640 {
641         enqueue_task_head(p, rq->active);
642         rq->nr_running++;
643 }
644
645 static void recalc_task_prio(task_t *p, unsigned long long now)
646 {
647         unsigned long long __sleep_time = now - p->timestamp;
648         unsigned long sleep_time;
649
650         if (__sleep_time > NS_MAX_SLEEP_AVG)
651                 sleep_time = NS_MAX_SLEEP_AVG;
652         else
653                 sleep_time = (unsigned long)__sleep_time;
654
655         if (likely(sleep_time > 0)) {
656                 /*
657                  * User tasks that sleep a long time are categorised as
658                  * idle and will get just interactive status to stay active &
659                  * prevent them suddenly becoming cpu hogs and starving
660                  * other processes.
661                  */
662                 if (p->mm && p->activated != -1 &&
663                         sleep_time > INTERACTIVE_SLEEP(p)) {
664                                 p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
665                                                 DEF_TIMESLICE);
666                                 if (!HIGH_CREDIT(p))
667                                         p->interactive_credit++;
668                 } else {
669                         /*
670                          * The lower the sleep avg a task has the more
671                          * rapidly it will rise with sleep time.
672                          */
673                         sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
674
675                         /*
676                          * Tasks with low interactive_credit are limited to
677                          * one timeslice worth of sleep avg bonus.
678                          */
679                         if (LOW_CREDIT(p) &&
680                             sleep_time > JIFFIES_TO_NS(task_timeslice(p)))
681                                 sleep_time = JIFFIES_TO_NS(task_timeslice(p));
682
683                         /*
684                          * Non high_credit tasks waking from uninterruptible
685                          * sleep are limited in their sleep_avg rise as they
686                          * are likely to be cpu hogs waiting on I/O
687                          */
688                         if (p->activated == -1 && !HIGH_CREDIT(p) && p->mm) {
689                                 if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
690                                         sleep_time = 0;
691                                 else if (p->sleep_avg + sleep_time >=
692                                                 INTERACTIVE_SLEEP(p)) {
693                                         p->sleep_avg = INTERACTIVE_SLEEP(p);
694                                         sleep_time = 0;
695                                 }
696                         }
697
698                         /*
699                          * This code gives a bonus to interactive tasks.
700                          *
701                          * The boost works by updating the 'average sleep time'
702                          * value here, based on ->timestamp. The more time a
703                          * task spends sleeping, the higher the average gets -
704                          * and the higher the priority boost gets as well.
705                          */
706                         p->sleep_avg += sleep_time;
707
708                         if (p->sleep_avg > NS_MAX_SLEEP_AVG) {
709                                 p->sleep_avg = NS_MAX_SLEEP_AVG;
710                                 if (!HIGH_CREDIT(p))
711                                         p->interactive_credit++;
712                         }
713                 }
714         }
715
716         p->prio = effective_prio(p);
717 }
718
719 /*
720  * activate_task - move a task to the runqueue and do priority recalculation
721  *
722  * Update all the scheduling statistics stuff. (sleep average
723  * calculation, priority modifiers, etc.)
724  */
725 static void activate_task(task_t *p, runqueue_t *rq, int local)
726 {
727         unsigned long long now;
728
729         now = sched_clock();
730 #ifdef CONFIG_SMP
731         if (!local) {
732                 /* Compensate for drifting sched_clock */
733                 runqueue_t *this_rq = this_rq();
734                 now = (now - this_rq->timestamp_last_tick)
735                         + rq->timestamp_last_tick;
736         }
737 #endif
738
739         recalc_task_prio(p, now);
740
741         /*
742          * This checks to make sure it's not an uninterruptible task
743          * that is now waking up.
744          */
745         if (!p->activated) {
746                 /*
747                  * Tasks which were woken up by interrupts (ie. hw events)
748                  * are most likely of interactive nature. So we give them
749                  * the credit of extending their sleep time to the period
750                  * of time they spend on the runqueue, waiting for execution
751                  * on a CPU, first time around:
752                  */
753                 if (in_interrupt())
754                         p->activated = 2;
755                 else {
756                         /*
757                          * Normal first-time wakeups get a credit too for
758                          * on-runqueue time, but it will be weighted down:
759                          */
760                         p->activated = 1;
761                 }
762         }
763         p->timestamp = now;
764
765         __activate_task(p, rq);
766 }
767
768 /*
769  * deactivate_task - remove a task from the runqueue.
770  */
771 static void deactivate_task(struct task_struct *p, runqueue_t *rq)
772 {
773         rq->nr_running--;
774         dequeue_task(p, p->array);
775         p->array = NULL;
776 }
777
778 /*
779  * resched_task - mark a task 'to be rescheduled now'.
780  *
781  * On UP this means the setting of the need_resched flag, on SMP it
782  * might also involve a cross-CPU call to trigger the scheduler on
783  * the target CPU.
784  */
785 #ifdef CONFIG_SMP
786 static void resched_task(task_t *p)
787 {
788         int need_resched, nrpolling;
789
790         BUG_ON(!spin_is_locked(&task_rq(p)->lock));
791
792         /* minimise the chance of sending an interrupt to poll_idle() */
793         nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
794         need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
795         nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
796
797         if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
798                 smp_send_reschedule(task_cpu(p));
799 }
800 #else
801 static inline void resched_task(task_t *p)
802 {
803         set_tsk_need_resched(p);
804 }
805 #endif
806
807 /**
808  * task_curr - is this task currently executing on a CPU?
809  * @p: the task in question.
810  */
811 inline int task_curr(const task_t *p)
812 {
813         return cpu_curr(task_cpu(p)) == p;
814 }
815
816 #ifdef CONFIG_SMP
817 enum request_type {
818         REQ_MOVE_TASK,
819         REQ_SET_DOMAIN,
820 };
821
822 typedef struct {
823         struct list_head list;
824         enum request_type type;
825
826         /* For REQ_MOVE_TASK */
827         task_t *task;
828         int dest_cpu;
829
830         /* For REQ_SET_DOMAIN */
831         struct sched_domain *sd;
832
833         struct completion done;
834 } migration_req_t;
835
836 /*
837  * The task's runqueue lock must be held.
838  * Returns true if you have to wait for migration thread.
839  */
840 static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
841 {
842         runqueue_t *rq = task_rq(p);
843
844         /*
845          * If the task is not on a runqueue (and not running), then
846          * it is sufficient to simply update the task's cpu field.
847          */
848         if (!p->array && !task_running(rq, p)) {
849                 set_task_cpu(p, dest_cpu);
850                 return 0;
851         }
852
853         init_completion(&req->done);
854         req->type = REQ_MOVE_TASK;
855         req->task = p;
856         req->dest_cpu = dest_cpu;
857         list_add(&req->list, &rq->migration_queue);
858         return 1;
859 }
860
861 /*
862  * wait_task_inactive - wait for a thread to unschedule.
863  *
864  * The caller must ensure that the task *will* unschedule sometime soon,
865  * else this function might spin for a *long* time. This function can't
866  * be called with interrupts off, or it may introduce deadlock with
867  * smp_call_function() if an IPI is sent by the same process we are
868  * waiting to become inactive.
869  */
870 void wait_task_inactive(task_t * p)
871 {
872         unsigned long flags;
873         runqueue_t *rq;
874         int preempted;
875
876 repeat:
877         rq = task_rq_lock(p, &flags);
878         /* Must be off runqueue entirely, not preempted. */
879         if (unlikely(p->array)) {
880                 /* If it's preempted, we yield.  It could be a while. */
881                 preempted = !task_running(rq, p);
882                 task_rq_unlock(rq, &flags);
883                 cpu_relax();
884                 if (preempted)
885                         yield();
886                 goto repeat;
887         }
888         task_rq_unlock(rq, &flags);
889 }
890
891 /***
892  * kick_process - kick a running thread to enter/exit the kernel
893  * @p: the to-be-kicked thread
894  *
895  * Cause a process which is running on another CPU to enter
896  * kernel-mode, without any delay. (to get signals handled.)
897  */
898 void kick_process(task_t *p)
899 {
900         int cpu;
901
902         preempt_disable();
903         cpu = task_cpu(p);
904         if ((cpu != smp_processor_id()) && task_curr(p))
905                 smp_send_reschedule(cpu);
906         preempt_enable();
907 }
908
909 /*
910  * Return a low guess at the load of a migration-source cpu.
911  *
912  * We want to under-estimate the load of migration sources, to
913  * balance conservatively.
914  */
915 static inline unsigned long source_load(int cpu)
916 {
917         runqueue_t *rq = cpu_rq(cpu);
918         unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
919
920         return min(rq->cpu_load, load_now);
921 }
922
923 /*
924  * Return a high guess at the load of a migration-target cpu
925  */
926 static inline unsigned long target_load(int cpu)
927 {
928         runqueue_t *rq = cpu_rq(cpu);
929         unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
930
931         return max(rq->cpu_load, load_now);
932 }
933
934 #endif
935
936 /*
937  * wake_idle() is useful especially on SMT architectures to wake a
938  * task onto an idle sibling if we would otherwise wake it onto a
939  * busy sibling.
940  *
941  * Returns the CPU we should wake onto.
942  */
943 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
944 static int wake_idle(int cpu, task_t *p)
945 {
946         cpumask_t tmp;
947         runqueue_t *rq = cpu_rq(cpu);
948         struct sched_domain *sd;
949         int i;
950
951         if (idle_cpu(cpu))
952                 return cpu;
953
954         sd = rq->sd;
955         if (!(sd->flags & SD_WAKE_IDLE))
956                 return cpu;
957
958         cpus_and(tmp, sd->span, p->cpus_allowed);
959
960         for_each_cpu_mask(i, tmp) {
961                 if (idle_cpu(i))
962                         return i;
963         }
964
965         return cpu;
966 }
967 #else
968 static inline int wake_idle(int cpu, task_t *p)
969 {
970         return cpu;
971 }
972 #endif
973
974 /***
975  * try_to_wake_up - wake up a thread
976  * @p: the to-be-woken-up thread
977  * @state: the mask of task states that can be woken
978  * @sync: do a synchronous wakeup?
979  *
980  * Put it on the run-queue if it's not already there. The "current"
981  * thread is always on the run-queue (except when the actual
982  * re-schedule is in progress), and as such you're allowed to do
983  * the simpler "current->state = TASK_RUNNING" to mark yourself
984  * runnable without the overhead of this.
985  *
986  * returns failure only if the task is already active.
987  */
988 static int try_to_wake_up(task_t * p, unsigned int state, int sync)
989 {
990         int cpu, this_cpu, success = 0;
991         unsigned long flags;
992         long old_state;
993         runqueue_t *rq;
994 #ifdef CONFIG_SMP
995         unsigned long load, this_load;
996         struct sched_domain *sd;
997         int new_cpu;
998 #endif
999
1000         rq = task_rq_lock(p, &flags);
1001         schedstat_inc(rq, ttwu_cnt);
1002         old_state = p->state;
1003         if (!(old_state & state))
1004                 goto out;
1005
1006         if (p->array)
1007                 goto out_running;
1008
1009         cpu = task_cpu(p);
1010         this_cpu = smp_processor_id();
1011
1012 #ifdef CONFIG_SMP
1013         if (unlikely(task_running(rq, p)))
1014                 goto out_activate;
1015
1016         new_cpu = cpu;
1017
1018         if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1019                 goto out_set_cpu;
1020
1021         load = source_load(cpu);
1022         this_load = target_load(this_cpu);
1023
1024         /*
1025          * If sync wakeup then subtract the (maximum possible) effect of
1026          * the currently running task from the load of the current CPU:
1027          */
1028         if (sync)
1029                 this_load -= SCHED_LOAD_SCALE;
1030
1031         /* Don't pull the task off an idle CPU to a busy one */
1032         if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
1033                 goto out_set_cpu;
1034
1035         new_cpu = this_cpu; /* Wake to this CPU if we can */
1036
1037         /*
1038          * Scan domains for affine wakeup and passive balancing
1039          * possibilities.
1040          */
1041         for_each_domain(this_cpu, sd) {
1042                 unsigned int imbalance;
1043                 /*
1044                  * Start passive balancing when half the imbalance_pct
1045                  * limit is reached.
1046                  */
1047                 imbalance = sd->imbalance_pct + (sd->imbalance_pct - 100) / 2;
1048
1049                 if ((sd->flags & SD_WAKE_AFFINE) &&
1050                                 !task_hot(p, rq->timestamp_last_tick, sd)) {
1051                         /*
1052                          * This domain has SD_WAKE_AFFINE and p is cache cold
1053                          * in this domain.
1054                          */
1055                         if (cpu_isset(cpu, sd->span)) {
1056                                 schedstat_inc(sd, ttwu_wake_affine);
1057                                 goto out_set_cpu;
1058                         }
1059                 } else if ((sd->flags & SD_WAKE_BALANCE) &&
1060                                 imbalance*this_load <= 100*load) {
1061                         /*
1062                          * This domain has SD_WAKE_BALANCE and there is
1063                          * an imbalance.
1064                          */
1065                         if (cpu_isset(cpu, sd->span)) {
1066                                 schedstat_inc(sd, ttwu_wake_balance);
1067                                 goto out_set_cpu;
1068                         }
1069                 }
1070         }
1071
1072         new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1073 out_set_cpu:
1074         schedstat_inc(rq, ttwu_attempts);
1075         new_cpu = wake_idle(new_cpu, p);
1076         if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) {
1077                 schedstat_inc(rq, ttwu_moved);
1078                 set_task_cpu(p, new_cpu);
1079                 task_rq_unlock(rq, &flags);
1080                 /* might preempt at this point */
1081                 rq = task_rq_lock(p, &flags);
1082                 old_state = p->state;
1083                 if (!(old_state & state))
1084                         goto out;
1085                 if (p->array)
1086                         goto out_running;
1087
1088                 this_cpu = smp_processor_id();
1089                 cpu = task_cpu(p);
1090         }
1091
1092 out_activate:
1093 #endif /* CONFIG_SMP */
1094         if (old_state == TASK_UNINTERRUPTIBLE) {
1095                 rq->nr_uninterruptible--;
1096                 /*
1097                  * Tasks on involuntary sleep don't earn
1098                  * sleep_avg beyond just interactive state.
1099                  */
1100                 p->activated = -1;
1101         }
1102
1103         /*
1104          * Sync wakeups (i.e. those types of wakeups where the waker
1105          * has indicated that it will leave the CPU in short order)
1106          * don't trigger a preemption, if the woken up task will run on
1107          * this cpu. (in this case the 'I will reschedule' promise of
1108          * the waker guarantees that the freshly woken up task is going
1109          * to be considered on this CPU.)
1110          */
1111         activate_task(p, rq, cpu == this_cpu);
1112         if (!sync || cpu != this_cpu) {
1113                 if (TASK_PREEMPTS_CURR(p, rq))
1114                         resched_task(rq->curr);
1115         }
1116         success = 1;
1117
1118 out_running:
1119         p->state = TASK_RUNNING;
1120 out:
1121         task_rq_unlock(rq, &flags);
1122
1123         return success;
1124 }
1125
1126 int fastcall wake_up_process(task_t * p)
1127 {
1128         return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1129                                  TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1130 }
1131
1132 EXPORT_SYMBOL(wake_up_process);
1133
1134 int fastcall wake_up_state(task_t *p, unsigned int state)
1135 {
1136         return try_to_wake_up(p, state, 0);
1137 }
1138
1139 #ifdef CONFIG_SMP
1140 static int find_idlest_cpu(struct task_struct *p, int this_cpu,
1141                            struct sched_domain *sd);
1142 #endif
1143
1144 /*
1145  * Perform scheduler related setup for a newly forked process p.
1146  * p is forked by current.
1147  */
1148 void fastcall sched_fork(task_t *p)
1149 {
1150         /*
1151          * We mark the process as running here, but have not actually
1152          * inserted it onto the runqueue yet. This guarantees that
1153          * nobody will actually run it, and a signal or other external
1154          * event cannot wake it up and insert it on the runqueue either.
1155          */
1156         p->state = TASK_RUNNING;
1157         INIT_LIST_HEAD(&p->run_list);
1158         p->array = NULL;
1159         spin_lock_init(&p->switch_lock);
1160 #ifdef CONFIG_SCHEDSTATS
1161         memset(&p->sched_info, 0, sizeof(p->sched_info));
1162 #endif
1163 #ifdef CONFIG_PREEMPT
1164         /*
1165          * During context-switch we hold precisely one spinlock, which
1166          * schedule_tail drops. (in the common case it's this_rq()->lock,
1167          * but it also can be p->switch_lock.) So we compensate with a count
1168          * of 1. Also, we want to start with kernel preemption disabled.
1169          */
1170         p->thread_info->preempt_count = 1;
1171 #endif
1172         /*
1173          * Share the timeslice between parent and child, thus the
1174          * total amount of pending timeslices in the system doesn't change,
1175          * resulting in more scheduling fairness.
1176          */
1177         local_irq_disable();
1178         p->time_slice = (current->time_slice + 1) >> 1;
1179         /*
1180          * The remainder of the first timeslice might be recovered by
1181          * the parent if the child exits early enough.
1182          */
1183         p->first_time_slice = 1;
1184         current->time_slice >>= 1;
1185         p->timestamp = sched_clock();
1186         if (unlikely(!current->time_slice)) {
1187                 /*
1188                  * This case is rare, it happens when the parent has only
1189                  * a single jiffy left from its timeslice. Taking the
1190                  * runqueue lock is not a problem.
1191                  */
1192                 current->time_slice = 1;
1193                 preempt_disable();
1194                 scheduler_tick(0, 0);
1195                 local_irq_enable();
1196                 preempt_enable();
1197         } else
1198                 local_irq_enable();
1199 }
1200
1201 /*
1202  * wake_up_new_task - wake up a newly created task for the first time.
1203  *
1204  * This function will do some initial scheduler statistics housekeeping
1205  * that must be done for every newly created context, then puts the task
1206  * on the runqueue and wakes it.
1207  */
1208 void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
1209 {
1210         unsigned long flags;
1211         int this_cpu, cpu;
1212         runqueue_t *rq, *this_rq;
1213
1214         rq = task_rq_lock(p, &flags);
1215         cpu = task_cpu(p);
1216         this_cpu = smp_processor_id();
1217
1218         BUG_ON(p->state != TASK_RUNNING);
1219
1220         schedstat_inc(rq, wunt_cnt);
1221         /*
1222          * We decrease the sleep average of forking parents
1223          * and children as well, to keep max-interactive tasks
1224          * from forking tasks that are max-interactive. The parent
1225          * (current) is done further down, under its lock.
1226          */
1227         p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
1228                 CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1229
1230         p->interactive_credit = 0;
1231
1232         p->prio = effective_prio(p);
1233
1234         if (likely(cpu == this_cpu)) {
1235                 if (!(clone_flags & CLONE_VM)) {
1236                         /*
1237                          * The VM isn't cloned, so we're in a good position to
1238                          * do child-runs-first in anticipation of an exec. This
1239                          * usually avoids a lot of COW overhead.
1240                          */
1241                         if (unlikely(!current->array))
1242                                 __activate_task(p, rq);
1243                         else {
1244                                 p->prio = current->prio;
1245                                 list_add_tail(&p->run_list, &current->run_list);
1246                                 p->array = current->array;
1247                                 p->array->nr_active++;
1248                                 rq->nr_running++;
1249                         }
1250                         set_need_resched();
1251                 } else
1252                         /* Run child last */
1253                         __activate_task(p, rq);
1254                 /*
1255                  * We skip the following code due to cpu == this_cpu
1256                  *
1257                  *   task_rq_unlock(rq, &flags);
1258                  *   this_rq = task_rq_lock(current, &flags);
1259                  */
1260                 this_rq = rq;
1261         } else {
1262                 this_rq = cpu_rq(this_cpu);
1263
1264                 /*
1265                  * Not the local CPU - must adjust timestamp. This should
1266                  * get optimised away in the !CONFIG_SMP case.
1267                  */
1268                 p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
1269                                         + rq->timestamp_last_tick;
1270                 __activate_task(p, rq);
1271                 if (TASK_PREEMPTS_CURR(p, rq))
1272                         resched_task(rq->curr);
1273
1274                 schedstat_inc(rq, wunt_moved);
1275                 /*
1276                  * Parent and child are on different CPUs, now get the
1277                  * parent runqueue to update the parent's ->sleep_avg:
1278                  */
1279                 task_rq_unlock(rq, &flags);
1280                 this_rq = task_rq_lock(current, &flags);
1281         }
1282         current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
1283                 PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1284         task_rq_unlock(this_rq, &flags);
1285 }
1286
1287 /*
1288  * Potentially available exiting-child timeslices are
1289  * retrieved here - this way the parent does not get
1290  * penalized for creating too many threads.
1291  *
1292  * (this cannot be used to 'generate' timeslices
1293  * artificially, because any timeslice recovered here
1294  * was given away by the parent in the first place.)
1295  */
1296 void fastcall sched_exit(task_t * p)
1297 {
1298         unsigned long flags;
1299         runqueue_t *rq;
1300
1301         /*
1302          * If the child was a (relative-) CPU hog then decrease
1303          * the sleep_avg of the parent as well.
1304          */
1305         rq = task_rq_lock(p->parent, &flags);
1306         if (p->first_time_slice) {
1307                 p->parent->time_slice += p->time_slice;
1308                 if (unlikely(p->parent->time_slice > task_timeslice(p)))
1309                         p->parent->time_slice = task_timeslice(p);
1310         }
1311         if (p->sleep_avg < p->parent->sleep_avg)
1312                 p->parent->sleep_avg = p->parent->sleep_avg /
1313                 (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
1314                 (EXIT_WEIGHT + 1);
1315         task_rq_unlock(rq, &flags);
1316 }
1317
1318 /**
1319  * finish_task_switch - clean up after a task-switch
1320  * @prev: the thread we just switched away from.
1321  *
1322  * We enter this with the runqueue still locked, and finish_arch_switch()
1323  * will unlock it along with doing any other architecture-specific cleanup
1324  * actions.
1325  *
1326  * Note that we may have delayed dropping an mm in context_switch(). If
1327  * so, we finish that here outside of the runqueue lock.  (Doing it
1328  * with the lock held can cause deadlocks; see schedule() for
1329  * details.)
1330  */
1331 static void finish_task_switch(task_t *prev)
1332         __releases(rq->lock)
1333 {
1334         runqueue_t *rq = this_rq();
1335         struct mm_struct *mm = rq->prev_mm;
1336         unsigned long prev_task_flags;
1337
1338         rq->prev_mm = NULL;
1339
1340         /*
1341          * A task struct has one reference for the use as "current".
1342          * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
1343          * calls schedule one last time. The schedule call will never return,
1344          * and the scheduled task must drop that reference.
1345          * The test for EXIT_ZOMBIE must occur while the runqueue locks are
1346          * still held, otherwise prev could be scheduled on another cpu, die
1347          * there before we look at prev->state, and then the reference would
1348          * be dropped twice.
1349          *              Manfred Spraul <manfred@colorfullife.com>
1350          */
1351         prev_task_flags = prev->flags;
1352         finish_arch_switch(rq, prev);
1353         if (mm)
1354                 mmdrop(mm);
1355         if (unlikely(prev_task_flags & PF_DEAD))
1356                 put_task_struct(prev);
1357 }
1358
1359 /**
1360  * schedule_tail - first thing a freshly forked thread must call.
1361  * @prev: the thread we just switched away from.
1362  */
1363 asmlinkage void schedule_tail(task_t *prev)
1364         __releases(rq->lock)
1365 {
1366         finish_task_switch(prev);
1367
1368         if (current->set_child_tid)
1369                 put_user(current->pid, current->set_child_tid);
1370 }
1371
1372 /*
1373  * context_switch - switch to the new MM and the new
1374  * thread's register state.
1375  */
1376 static inline
1377 task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
1378 {
1379         struct mm_struct *mm = next->mm;
1380         struct mm_struct *oldmm = prev->active_mm;
1381
1382         if (unlikely(!mm)) {
1383                 next->active_mm = oldmm;
1384                 atomic_inc(&oldmm->mm_count);
1385                 enter_lazy_tlb(oldmm, next);
1386         } else
1387                 switch_mm(oldmm, mm, next);
1388
1389         if (unlikely(!prev->mm)) {
1390                 prev->active_mm = NULL;
1391                 WARN_ON(rq->prev_mm);
1392                 rq->prev_mm = oldmm;
1393         }
1394
1395         /* Here we just switch the register state and the stack. */
1396         switch_to(prev, next, prev);
1397
1398         return prev;
1399 }
1400
1401 /*
1402  * nr_running, nr_uninterruptible and nr_context_switches:
1403  *
1404  * externally visible scheduler statistics: current number of runnable
1405  * threads, current number of uninterruptible-sleeping threads, total
1406  * number of context switches performed since bootup.
1407  */
1408 unsigned long nr_running(void)
1409 {
1410         unsigned long i, sum = 0;
1411
1412         for_each_online_cpu(i)
1413                 sum += cpu_rq(i)->nr_running;
1414
1415         return sum;
1416 }
1417
1418 unsigned long nr_uninterruptible(void)
1419 {
1420         unsigned long i, sum = 0;
1421
1422         for_each_cpu(i)
1423                 sum += cpu_rq(i)->nr_uninterruptible;
1424
1425         /*
1426          * Since we read the counters lockless, it might be slightly
1427          * inaccurate. Do not allow it to go below zero though:
1428          */
1429         if (unlikely((long)sum < 0))
1430                 sum = 0;
1431
1432         return sum;
1433 }
1434
1435 unsigned long long nr_context_switches(void)
1436 {
1437         unsigned long long i, sum = 0;
1438
1439         for_each_cpu(i)
1440                 sum += cpu_rq(i)->nr_switches;
1441
1442         return sum;
1443 }
1444
1445 unsigned long nr_iowait(void)
1446 {
1447         unsigned long i, sum = 0;
1448
1449         for_each_cpu(i)
1450                 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1451
1452         return sum;
1453 }
1454
1455 #ifdef CONFIG_SMP
1456
1457 /*
1458  * double_rq_lock - safely lock two runqueues
1459  *
1460  * Note this does not disable interrupts like task_rq_lock,
1461  * you need to do so manually before calling.
1462  */
1463 static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1464         __acquires(rq1->lock)
1465         __acquires(rq2->lock)
1466 {
1467         if (rq1 == rq2) {
1468                 spin_lock(&rq1->lock);
1469                 __acquire(rq2->lock);   /* Fake it out ;) */
1470         } else {
1471                 if (rq1 < rq2) {
1472                         spin_lock(&rq1->lock);
1473                         spin_lock(&rq2->lock);
1474                 } else {
1475                         spin_lock(&rq2->lock);
1476                         spin_lock(&rq1->lock);
1477                 }
1478         }
1479 }
1480
1481 /*
1482  * double_rq_unlock - safely unlock two runqueues
1483  *
1484  * Note this does not restore interrupts like task_rq_unlock,
1485  * you need to do so manually after calling.
1486  */
1487 static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
1488         __releases(rq1->lock)
1489         __releases(rq2->lock)
1490 {
1491         spin_unlock(&rq1->lock);
1492         if (rq1 != rq2)
1493                 spin_unlock(&rq2->lock);
1494         else
1495                 __release(rq2->lock);
1496 }
1497
1498 /*
1499  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1500  */
1501 static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1502         __releases(this_rq->lock)
1503         __acquires(busiest->lock)
1504         __acquires(this_rq->lock)
1505 {
1506         if (unlikely(!spin_trylock(&busiest->lock))) {
1507                 if (busiest < this_rq) {
1508                         spin_unlock(&this_rq->lock);
1509                         spin_lock(&busiest->lock);
1510                         spin_lock(&this_rq->lock);
1511                 } else
1512                         spin_lock(&busiest->lock);
1513         }
1514 }
1515
1516 /*
1517  * find_idlest_cpu - find the least busy runqueue.
1518  */
1519 static int find_idlest_cpu(struct task_struct *p, int this_cpu,
1520                            struct sched_domain *sd)
1521 {
1522         unsigned long load, min_load, this_load;
1523         int i, min_cpu;
1524         cpumask_t mask;
1525
1526         min_cpu = UINT_MAX;
1527         min_load = ULONG_MAX;
1528
1529         cpus_and(mask, sd->span, p->cpus_allowed);
1530
1531         for_each_cpu_mask(i, mask) {
1532                 load = target_load(i);
1533
1534                 if (load < min_load) {
1535                         min_cpu = i;
1536                         min_load = load;
1537
1538                         /* break out early on an idle CPU: */
1539                         if (!min_load)
1540                                 break;
1541                 }
1542         }
1543
1544         /* add +1 to account for the new task */
1545         this_load = source_load(this_cpu) + SCHED_LOAD_SCALE;
1546
1547         /*
1548          * Would with the addition of the new task to the
1549          * current CPU there be an imbalance between this
1550          * CPU and the idlest CPU?
1551          *
1552          * Use half of the balancing threshold - new-context is
1553          * a good opportunity to balance.
1554          */
1555         if (min_load*(100 + (sd->imbalance_pct-100)/2) < this_load*100)
1556                 return min_cpu;
1557
1558         return this_cpu;
1559 }
1560
1561 /*
1562  * If dest_cpu is allowed for this process, migrate the task to it.
1563  * This is accomplished by forcing the cpu_allowed mask to only
1564  * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
1565  * the cpu_allowed mask is restored.
1566  */
1567 static void sched_migrate_task(task_t *p, int dest_cpu)
1568 {
1569         migration_req_t req;
1570         runqueue_t *rq;
1571         unsigned long flags;
1572
1573         rq = task_rq_lock(p, &flags);
1574         if (!cpu_isset(dest_cpu, p->cpus_allowed)
1575             || unlikely(cpu_is_offline(dest_cpu)))
1576                 goto out;
1577
1578         schedstat_inc(rq, smt_cnt);
1579         /* force the process onto the specified CPU */
1580         if (migrate_task(p, dest_cpu, &req)) {
1581                 /* Need to wait for migration thread (might exit: take ref). */
1582                 struct task_struct *mt = rq->migration_thread;
1583                 get_task_struct(mt);
1584                 task_rq_unlock(rq, &flags);
1585                 wake_up_process(mt);
1586                 put_task_struct(mt);
1587                 wait_for_completion(&req.done);
1588                 return;
1589         }
1590 out:
1591         task_rq_unlock(rq, &flags);
1592 }
1593
1594 /*
1595  * sched_exec(): find the highest-level, exec-balance-capable
1596  * domain and try to migrate the task to the least loaded CPU.
1597  *
1598  * execve() is a valuable balancing opportunity, because at this point
1599  * the task has the smallest effective memory and cache footprint.
1600  */
1601 void sched_exec(void)
1602 {
1603         struct sched_domain *tmp, *sd = NULL;
1604         int new_cpu, this_cpu = get_cpu();
1605
1606         schedstat_inc(this_rq(), sbe_cnt);
1607         /* Prefer the current CPU if there's only this task running */
1608         if (this_rq()->nr_running <= 1)
1609                 goto out;
1610
1611         for_each_domain(this_cpu, tmp)
1612                 if (tmp->flags & SD_BALANCE_EXEC)
1613                         sd = tmp;
1614
1615         if (sd) {
1616                 schedstat_inc(sd, sbe_attempts);
1617                 new_cpu = find_idlest_cpu(current, this_cpu, sd);
1618                 if (new_cpu != this_cpu) {
1619                         schedstat_inc(sd, sbe_pushed);
1620                         put_cpu();
1621                         sched_migrate_task(current, new_cpu);
1622                         return;
1623                 }
1624         }
1625 out:
1626         put_cpu();
1627 }
1628
1629 /*
1630  * pull_task - move a task from a remote runqueue to the local runqueue.
1631  * Both runqueues must be locked.
1632  */
1633 static inline
1634 void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1635                runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
1636 {
1637         dequeue_task(p, src_array);
1638         src_rq->nr_running--;
1639         set_task_cpu(p, this_cpu);
1640         this_rq->nr_running++;
1641         enqueue_task(p, this_array);
1642         p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
1643                                 + this_rq->timestamp_last_tick;
1644         /*
1645          * Note that idle threads have a prio of MAX_PRIO, for this test
1646          * to be always true for them.
1647          */
1648         if (TASK_PREEMPTS_CURR(p, this_rq))
1649                 resched_task(this_rq->curr);
1650 }
1651
1652 /*
1653  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1654  */
1655 static inline
1656 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1657                      struct sched_domain *sd, enum idle_type idle)
1658 {
1659         /*
1660          * We do not migrate tasks that are:
1661          * 1) running (obviously), or
1662          * 2) cannot be migrated to this CPU due to cpus_allowed, or
1663          * 3) are cache-hot on their current CPU.
1664          */
1665         if (task_running(rq, p))
1666                 return 0;
1667         if (!cpu_isset(this_cpu, p->cpus_allowed))
1668                 return 0;
1669
1670         /* Aggressive migration if we've failed balancing */
1671         if (idle == NEWLY_IDLE ||
1672                         sd->nr_balance_failed < sd->cache_nice_tries) {
1673                 if (task_hot(p, rq->timestamp_last_tick, sd))
1674                         return 0;
1675         }
1676
1677         return 1;
1678 }
1679
1680 /*
1681  * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
1682  * as part of a balancing operation within "domain". Returns the number of
1683  * tasks moved.
1684  *
1685  * Called with both runqueues locked.
1686  */
1687 static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
1688                       unsigned long max_nr_move, struct sched_domain *sd,
1689                       enum idle_type idle)
1690 {
1691         prio_array_t *array, *dst_array;
1692         struct list_head *head, *curr;
1693         int idx, pulled = 0;
1694         task_t *tmp;
1695
1696         if (max_nr_move <= 0 || busiest->nr_running <= 1)
1697                 goto out;
1698
1699         /*
1700          * We first consider expired tasks. Those will likely not be
1701          * executed in the near future, and they are most likely to
1702          * be cache-cold, thus switching CPUs has the least effect
1703          * on them.
1704          */
1705         if (busiest->expired->nr_active) {
1706                 array = busiest->expired;
1707                 dst_array = this_rq->expired;
1708         } else {
1709                 array = busiest->active;
1710                 dst_array = this_rq->active;
1711         }
1712
1713 new_array:
1714         /* Start searching at priority 0: */
1715         idx = 0;
1716 skip_bitmap:
1717         if (!idx)
1718                 idx = sched_find_first_bit(array->bitmap);
1719         else
1720                 idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
1721         if (idx >= MAX_PRIO) {
1722                 if (array == busiest->expired && busiest->active->nr_active) {
1723                         array = busiest->active;
1724                         dst_array = this_rq->active;
1725                         goto new_array;
1726                 }
1727                 goto out;
1728         }
1729
1730         head = array->queue + idx;
1731         curr = head->prev;
1732 skip_queue:
1733         tmp = list_entry(curr, task_t, run_list);
1734
1735         curr = curr->prev;
1736
1737         if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
1738                 if (curr != head)
1739                         goto skip_queue;
1740                 idx++;
1741                 goto skip_bitmap;
1742         }
1743
1744         /*
1745          * Right now, this is the only place pull_task() is called,
1746          * so we can safely collect pull_task() stats here rather than
1747          * inside pull_task().
1748          */
1749         schedstat_inc(this_rq, pt_gained[idle]);
1750         schedstat_inc(busiest, pt_lost[idle]);
1751
1752         pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
1753         pulled++;
1754
1755         /* We only want to steal up to the prescribed number of tasks. */
1756         if (pulled < max_nr_move) {
1757                 if (curr != head)
1758                         goto skip_queue;
1759                 idx++;
1760                 goto skip_bitmap;
1761         }
1762 out:
1763         return pulled;
1764 }
1765
1766 /*
1767  * find_busiest_group finds and returns the busiest CPU group within the
1768  * domain. It calculates and returns the number of tasks which should be
1769  * moved to restore balance via the imbalance parameter.
1770  */
1771 static struct sched_group *
1772 find_busiest_group(struct sched_domain *sd, int this_cpu,
1773                    unsigned long *imbalance, enum idle_type idle)
1774 {
1775         struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
1776         unsigned long max_load, avg_load, total_load, this_load, total_pwr;
1777
1778         max_load = this_load = total_load = total_pwr = 0;
1779
1780         do {
1781                 unsigned long load;
1782                 int local_group;
1783                 int i, nr_cpus = 0;
1784
1785                 local_group = cpu_isset(this_cpu, group->cpumask);
1786
1787                 /* Tally up the load of all CPUs in the group */
1788                 avg_load = 0;
1789
1790                 for_each_cpu_mask(i, group->cpumask) {
1791                         /* Bias balancing toward cpus of our domain */
1792                         if (local_group)
1793                                 load = target_load(i);
1794                         else
1795                                 load = source_load(i);
1796
1797                         nr_cpus++;
1798                         avg_load += load;
1799                 }
1800
1801                 if (!nr_cpus)
1802                         goto nextgroup;
1803
1804                 total_load += avg_load;
1805                 total_pwr += group->cpu_power;
1806
1807                 /* Adjust by relative CPU power of the group */
1808                 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1809
1810                 if (local_group) {
1811                         this_load = avg_load;
1812                         this = group;
1813                         goto nextgroup;
1814                 } else if (avg_load > max_load) {
1815                         max_load = avg_load;
1816                         busiest = group;
1817                 }
1818 nextgroup:
1819                 group = group->next;
1820         } while (group != sd->groups);
1821
1822         if (!busiest || this_load >= max_load)
1823                 goto out_balanced;
1824
1825         avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
1826
1827         if (this_load >= avg_load ||
1828                         100*max_load <= sd->imbalance_pct*this_load)
1829                 goto out_balanced;
1830
1831         /*
1832          * We're trying to get all the cpus to the average_load, so we don't
1833          * want to push ourselves above the average load, nor do we wish to
1834          * reduce the max loaded cpu below the average load, as either of these
1835          * actions would just result in more rebalancing later, and ping-pong
1836          * tasks around. Thus we look for the minimum possible imbalance.
1837          * Negative imbalances (*we* are more loaded than anyone else) will
1838          * be counted as no imbalance for these purposes -- we can't fix that
1839          * by pulling tasks to us.  Be careful of negative numbers as they'll
1840          * appear as very large values with unsigned longs.
1841          */
1842         *imbalance = min(max_load - avg_load, avg_load - this_load);
1843
1844         /* How much load to actually move to equalise the imbalance */
1845         *imbalance = (*imbalance * min(busiest->cpu_power, this->cpu_power))
1846                                 / SCHED_LOAD_SCALE;
1847
1848         if (*imbalance < SCHED_LOAD_SCALE - 1) {
1849                 unsigned long pwr_now = 0, pwr_move = 0;
1850                 unsigned long tmp;
1851
1852                 if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
1853                         *imbalance = 1;
1854                         return busiest;
1855                 }
1856
1857                 /*
1858                  * OK, we don't have enough imbalance to justify moving tasks,
1859                  * however we may be able to increase total CPU power used by
1860                  * moving them.
1861                  */
1862
1863                 pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load);
1864                 pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load);
1865                 pwr_now /= SCHED_LOAD_SCALE;
1866
1867                 /* Amount of load we'd subtract */
1868                 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power;
1869                 if (max_load > tmp)
1870                         pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE,
1871                                                         max_load - tmp);
1872
1873                 /* Amount of load we'd add */
1874                 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
1875                 if (max_load < tmp)
1876                         tmp = max_load;
1877                 pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
1878                 pwr_move /= SCHED_LOAD_SCALE;
1879
1880                 /* Move if we gain another 8th of a CPU worth of throughput */
1881                 if (pwr_move < pwr_now + SCHED_LOAD_SCALE / 8)
1882                         goto out_balanced;
1883
1884                 *imbalance = 1;
1885                 return busiest;
1886         }
1887
1888         /* Get rid of the scaling factor, rounding down as we divide */
1889         *imbalance = (*imbalance + 1) / SCHED_LOAD_SCALE;
1890
1891         return busiest;
1892
1893 out_balanced:
1894         if (busiest && (idle == NEWLY_IDLE ||
1895                         (idle == SCHED_IDLE && max_load > SCHED_LOAD_SCALE)) ) {
1896                 *imbalance = 1;
1897                 return busiest;
1898         }
1899
1900         *imbalance = 0;
1901         return NULL;
1902 }
1903
1904 /*
1905  * find_busiest_queue - find the busiest runqueue among the cpus in group.
1906  */
1907 static runqueue_t *find_busiest_queue(struct sched_group *group)
1908 {
1909         unsigned long load, max_load = 0;
1910         runqueue_t *busiest = NULL;
1911         int i;
1912
1913         for_each_cpu_mask(i, group->cpumask) {
1914                 load = source_load(i);
1915
1916                 if (load > max_load) {
1917                         max_load = load;
1918                         busiest = cpu_rq(i);
1919                 }
1920         }
1921
1922         return busiest;
1923 }
1924
1925 /*
1926  * Check this_cpu to ensure it is balanced within domain. Attempt to move
1927  * tasks if there is an imbalance.
1928  *
1929  * Called with this_rq unlocked.
1930  */
1931 static int load_balance(int this_cpu, runqueue_t *this_rq,
1932                         struct sched_domain *sd, enum idle_type idle)
1933 {
1934         struct sched_group *group;
1935         runqueue_t *busiest;
1936         unsigned long imbalance;
1937         int nr_moved;
1938
1939         spin_lock(&this_rq->lock);
1940         schedstat_inc(sd, lb_cnt[idle]);
1941
1942         group = find_busiest_group(sd, this_cpu, &imbalance, idle);
1943         if (!group) {
1944                 schedstat_inc(sd, lb_nobusyg[idle]);
1945                 goto out_balanced;
1946         }
1947
1948         busiest = find_busiest_queue(group);
1949         if (!busiest) {
1950                 schedstat_inc(sd, lb_nobusyq[idle]);
1951                 goto out_balanced;
1952         }
1953
1954         /*
1955          * This should be "impossible", but since load
1956          * balancing is inherently racy and statistical,
1957          * it could happen in theory.
1958          */
1959         if (unlikely(busiest == this_rq)) {
1960                 WARN_ON(1);
1961                 goto out_balanced;
1962         }
1963
1964         schedstat_add(sd, lb_imbalance[idle], imbalance);
1965
1966         nr_moved = 0;
1967         if (busiest->nr_running > 1) {
1968                 /*
1969                  * Attempt to move tasks. If find_busiest_group has found
1970                  * an imbalance but busiest->nr_running <= 1, the group is
1971                  * still unbalanced. nr_moved simply stays zero, so it is
1972                  * correctly treated as an imbalance.
1973                  */
1974                 double_lock_balance(this_rq, busiest);
1975                 nr_moved = move_tasks(this_rq, this_cpu, busiest,
1976                                                 imbalance, sd, idle);
1977                 spin_unlock(&busiest->lock);
1978         }
1979         spin_unlock(&this_rq->lock);
1980
1981         if (!nr_moved) {
1982                 schedstat_inc(sd, lb_failed[idle]);
1983                 sd->nr_balance_failed++;
1984
1985                 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
1986                         int wake = 0;
1987
1988                         spin_lock(&busiest->lock);
1989                         if (!busiest->active_balance) {
1990                                 busiest->active_balance = 1;
1991                                 busiest->push_cpu = this_cpu;
1992                                 wake = 1;
1993                         }
1994                         spin_unlock(&busiest->lock);
1995                         if (wake)
1996                                 wake_up_process(busiest->migration_thread);
1997
1998                         /*
1999                          * We've kicked active balancing, reset the failure
2000                          * counter.
2001                          */
2002                         sd->nr_balance_failed = sd->cache_nice_tries;
2003                 }
2004
2005                 /*
2006                  * We were unbalanced, but unsuccessful in move_tasks(),
2007                  * so bump the balance_interval to lessen the lock contention.
2008                  */
2009                 if (sd->balance_interval < sd->max_interval)
2010                         sd->balance_interval++;
2011         } else {
2012                 sd->nr_balance_failed = 0;
2013
2014                 /* We were unbalanced, so reset the balancing interval */
2015                 sd->balance_interval = sd->min_interval;
2016         }
2017
2018         return nr_moved;
2019
2020 out_balanced:
2021         spin_unlock(&this_rq->lock);
2022
2023         /* tune up the balancing interval */
2024         if (sd->balance_interval < sd->max_interval)
2025                 sd->balance_interval *= 2;
2026
2027         return 0;
2028 }
2029
2030 /*
2031  * Check this_cpu to ensure it is balanced within domain. Attempt to move
2032  * tasks if there is an imbalance.
2033  *
2034  * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
2035  * this_rq is locked.
2036  */
2037 static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2038                                 struct sched_domain *sd)
2039 {
2040         struct sched_group *group;
2041         runqueue_t *busiest = NULL;
2042         unsigned long imbalance;
2043         int nr_moved = 0;
2044
2045         schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
2046         group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
2047         if (!group) {
2048                 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
2049                 goto out;
2050         }
2051
2052         busiest = find_busiest_queue(group);
2053         if (!busiest || busiest == this_rq) {
2054                 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
2055                 goto out;
2056         }
2057
2058         /* Attempt to move tasks */
2059         double_lock_balance(this_rq, busiest);
2060
2061         schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
2062         nr_moved = move_tasks(this_rq, this_cpu, busiest,
2063                                         imbalance, sd, NEWLY_IDLE);
2064         if (!nr_moved)
2065                 schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
2066
2067         spin_unlock(&busiest->lock);
2068
2069 out:
2070         return nr_moved;
2071 }
2072
2073 /*
2074  * idle_balance is called by schedule() if this_cpu is about to become
2075  * idle. Attempts to pull tasks from other CPUs.
2076  */
2077 static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
2078 {
2079         struct sched_domain *sd;
2080
2081         for_each_domain(this_cpu, sd) {
2082                 if (sd->flags & SD_BALANCE_NEWIDLE) {
2083                         if (load_balance_newidle(this_cpu, this_rq, sd)) {
2084                                 /* We've pulled tasks over so stop searching */
2085                                 break;
2086                         }
2087                 }
2088         }
2089 }
2090
2091 #ifdef CONFIG_SCHED_SMT
2092 static int cpu_and_siblings_are_idle(int cpu)
2093 {
2094         int sib;
2095         for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
2096                 if (idle_cpu(sib))
2097                         continue;
2098                 return 0;
2099         }
2100
2101         return 1;
2102 }
2103 #else
2104 #define cpu_and_siblings_are_idle(A) idle_cpu(A)
2105 #endif
2106
2107
2108 /*
2109  * active_load_balance is run by migration threads. It pushes running tasks
2110  * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
2111  * running on each physical CPU where possible, and avoids physical /
2112  * logical imbalances.
2113  *
2114  * Called with busiest_rq locked.
2115  */
2116 static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
2117 {
2118         struct sched_domain *sd;
2119         struct sched_group *cpu_group;
2120         cpumask_t visited_cpus;
2121
2122         schedstat_inc(busiest_rq, alb_cnt);
2123         /*
2124          * Search for suitable CPUs to push tasks to in successively higher
2125          * domains with SD_LOAD_BALANCE set.
2126          */
2127         visited_cpus = CPU_MASK_NONE;
2128         for_each_domain(busiest_cpu, sd) {
2129                 if (!(sd->flags & SD_LOAD_BALANCE) || busiest_rq->nr_running <= 1)
2130                         break; /* no more domains to search or no more tasks to move */
2131
2132                 cpu_group = sd->groups;
2133                 do { /* sched_groups should either use list_heads or be merged into the domains structure */
2134                         int cpu, target_cpu = -1;
2135                         runqueue_t *target_rq;
2136
2137                         for_each_cpu_mask(cpu, cpu_group->cpumask) {
2138                                 if (cpu_isset(cpu, visited_cpus) || cpu == busiest_cpu ||
2139                                     !cpu_and_siblings_are_idle(cpu)) {
2140                                         cpu_set(cpu, visited_cpus);
2141                                         continue;
2142                                 }
2143                                 target_cpu = cpu;
2144                                 break;
2145                         }
2146                         if (target_cpu == -1)
2147                                 goto next_group; /* failed to find a suitable target cpu in this domain */
2148
2149                         target_rq = cpu_rq(target_cpu);
2150
2151                         /*
2152                          * This condition is "impossible", if it occurs we need to fix it
2153                          * Reported by Bjorn Helgaas on a 128-cpu setup.
2154                          */
2155                         BUG_ON(busiest_rq == target_rq);
2156
2157                         /* move a task from busiest_rq to target_rq */
2158                         double_lock_balance(busiest_rq, target_rq);
2159                         if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE)) {
2160                                 schedstat_inc(busiest_rq, alb_lost);
2161                                 schedstat_inc(target_rq, alb_gained);
2162                         } else {
2163                                 schedstat_inc(busiest_rq, alb_failed);
2164                         }
2165                         spin_unlock(&target_rq->lock);
2166 next_group:
2167                         cpu_group = cpu_group->next;
2168                 } while (cpu_group != sd->groups && busiest_rq->nr_running > 1);
2169         }
2170 }
2171
2172 /*
2173  * rebalance_tick will get called every timer tick, on every CPU.
2174  *
2175  * It checks each scheduling domain to see if it is due to be balanced,
2176  * and initiates a balancing operation if so.
2177  *
2178  * Balancing parameters are set up in arch_init_sched_domains.
2179  */
2180
2181 /* Don't have all balancing operations going off at once */
2182 #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
2183
2184 static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
2185                            enum idle_type idle)
2186 {
2187         unsigned long old_load, this_load;
2188         unsigned long j = jiffies + CPU_OFFSET(this_cpu);
2189         struct sched_domain *sd;
2190
2191         /* Update our load */
2192         old_load = this_rq->cpu_load;
2193         this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
2194         /*
2195          * Round up the averaging division if load is increasing. This
2196          * prevents us from getting stuck on 9 if the load is 10, for
2197          * example.
2198          */
2199         if (this_load > old_load)
2200                 old_load++;
2201         this_rq->cpu_load = (old_load + this_load) / 2;
2202
2203         for_each_domain(this_cpu, sd) {
2204                 unsigned long interval;
2205
2206                 if (!(sd->flags & SD_LOAD_BALANCE))
2207                         continue;
2208
2209                 interval = sd->balance_interval;
2210                 if (idle != SCHED_IDLE)
2211                         interval *= sd->busy_factor;
2212
2213                 /* scale ms to jiffies */
2214                 interval = msecs_to_jiffies(interval);
2215                 if (unlikely(!interval))
2216                         interval = 1;
2217
2218                 if (j - sd->last_balance >= interval) {
2219                         if (load_balance(this_cpu, this_rq, sd, idle)) {
2220                                 /* We've pulled tasks over so no longer idle */
2221                                 idle = NOT_IDLE;
2222                         }
2223                         sd->last_balance += interval;
2224                 }
2225         }
2226 }
2227 #else
2228 /*
2229  * on UP we do not need to balance between CPUs:
2230  */
2231 static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
2232 {
2233 }
2234 static inline void idle_balance(int cpu, runqueue_t *rq)
2235 {
2236 }
2237 #endif
2238
2239 static inline int wake_priority_sleeper(runqueue_t *rq)
2240 {
2241         int ret = 0;
2242 #ifdef CONFIG_SCHED_SMT
2243         spin_lock(&rq->lock);
2244         /*
2245          * If an SMT sibling task has been put to sleep for priority
2246          * reasons reschedule the idle task to see if it can now run.
2247          */
2248         if (rq->nr_running) {
2249                 resched_task(rq->idle);
2250                 ret = 1;
2251         }
2252         spin_unlock(&rq->lock);
2253 #endif
2254         return ret;
2255 }
2256
2257 DEFINE_PER_CPU(struct kernel_stat, kstat);
2258
2259 EXPORT_PER_CPU_SYMBOL(kstat);
2260
2261 /*
2262  * We place interactive tasks back into the active array, if possible.
2263  *
2264  * To guarantee that this does not starve expired tasks we ignore the
2265  * interactivity of a task if the first expired task had to wait more
2266  * than a 'reasonable' amount of time. This deadline timeout is
2267  * load-dependent, as the frequency of array switched decreases with
2268  * increasing number of running tasks. We also ignore the interactivity
2269  * if a better static_prio task has expired:
2270  */
2271 #define EXPIRED_STARVING(rq) \
2272         ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
2273                 (jiffies - (rq)->expired_timestamp >= \
2274                         STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
2275                         ((rq)->curr->static_prio > (rq)->best_expired_prio))
2276
2277 /*
2278  * This function gets called by the timer code, with HZ frequency.
2279  * We call it with interrupts disabled.
2280  *
2281  * It also gets called by the fork code, when changing the parent's
2282  * timeslices.
2283  */
2284 void scheduler_tick(int user_ticks, int sys_ticks)
2285 {
2286         int cpu = smp_processor_id();
2287         struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2288         runqueue_t *rq = this_rq();
2289         task_t *p = current;
2290
2291         rq->timestamp_last_tick = sched_clock();
2292
2293         if (rcu_pending(cpu))
2294                 rcu_check_callbacks(cpu, user_ticks);
2295
2296         /* note: this timer irq context must be accounted for as well */
2297         if (hardirq_count() - HARDIRQ_OFFSET) {
2298                 cpustat->irq += sys_ticks;
2299                 sys_ticks = 0;
2300         } else if (softirq_count()) {
2301                 cpustat->softirq += sys_ticks;
2302                 sys_ticks = 0;
2303         }
2304
2305         if (p == rq->idle) {
2306                 if (atomic_read(&rq->nr_iowait) > 0)
2307                         cpustat->iowait += sys_ticks;
2308                 else
2309                         cpustat->idle += sys_ticks;
2310                 if (wake_priority_sleeper(rq))
2311                         goto out;
2312                 rebalance_tick(cpu, rq, SCHED_IDLE);
2313                 return;
2314         }
2315         if (TASK_NICE(p) > 0)
2316                 cpustat->nice += user_ticks;
2317         else
2318                 cpustat->user += user_ticks;
2319         cpustat->system += sys_ticks;
2320
2321         /* Task might have expired already, but not scheduled off yet */
2322         if (p->array != rq->active) {
2323                 set_tsk_need_resched(p);
2324                 goto out;
2325         }
2326         spin_lock(&rq->lock);
2327         /*
2328          * The task was running during this tick - update the
2329          * time slice counter. Note: we do not update a thread's
2330          * priority until it either goes to sleep or uses up its
2331          * timeslice. This makes it possible for interactive tasks
2332          * to use up their timeslices at their highest priority levels.
2333          */
2334         if (rt_task(p)) {
2335                 /*
2336                  * RR tasks need a special form of timeslice management.
2337                  * FIFO tasks have no timeslices.
2338                  */
2339                 if ((p->policy == SCHED_RR) && !--p->time_slice) {
2340                         p->time_slice = task_timeslice(p);
2341                         p->first_time_slice = 0;
2342                         set_tsk_need_resched(p);
2343
2344                         /* put it at the end of the queue: */
2345                         dequeue_task(p, rq->active);
2346                         enqueue_task(p, rq->active);
2347                 }
2348                 goto out_unlock;
2349         }
2350         if (!--p->time_slice) {
2351                 dequeue_task(p, rq->active);
2352                 set_tsk_need_resched(p);
2353                 p->prio = effective_prio(p);
2354                 p->time_slice = task_timeslice(p);
2355                 p->first_time_slice = 0;
2356
2357                 if (!rq->expired_timestamp)
2358                         rq->expired_timestamp = jiffies;
2359                 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
2360                         enqueue_task(p, rq->expired);
2361                         if (p->static_prio < rq->best_expired_prio)
2362                                 rq->best_expired_prio = p->static_prio;
2363                 } else
2364                         enqueue_task(p, rq->active);
2365         } else {
2366                 /*
2367                  * Prevent a too long timeslice allowing a task to monopolize
2368                  * the CPU. We do this by splitting up the timeslice into
2369                  * smaller pieces.
2370                  *
2371                  * Note: this does not mean the task's timeslices expire or
2372                  * get lost in any way, they just might be preempted by
2373                  * another task of equal priority. (one with higher
2374                  * priority would have preempted this task already.) We
2375                  * requeue this task to the end of the list on this priority
2376                  * level, which is in essence a round-robin of tasks with
2377                  * equal priority.
2378                  *
2379                  * This only applies to tasks in the interactive
2380                  * delta range with at least TIMESLICE_GRANULARITY to requeue.
2381                  */
2382                 if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
2383                         p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
2384                         (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
2385                         (p->array == rq->active)) {
2386
2387                         dequeue_task(p, rq->active);
2388                         set_tsk_need_resched(p);
2389                         p->prio = effective_prio(p);
2390                         enqueue_task(p, rq->active);
2391                 }
2392         }
2393 out_unlock:
2394         spin_unlock(&rq->lock);
2395 out:
2396         rebalance_tick(cpu, rq, NOT_IDLE);
2397 }
2398
2399 #ifdef CONFIG_SCHED_SMT
2400 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2401 {
2402         struct sched_domain *sd = this_rq->sd;
2403         cpumask_t sibling_map;
2404         int i;
2405
2406         if (!(sd->flags & SD_SHARE_CPUPOWER))
2407                 return;
2408
2409         /*
2410          * Unlock the current runqueue because we have to lock in
2411          * CPU order to avoid deadlocks. Caller knows that we might
2412          * unlock. We keep IRQs disabled.
2413          */
2414         spin_unlock(&this_rq->lock);
2415
2416         sibling_map = sd->span;
2417
2418         for_each_cpu_mask(i, sibling_map)
2419                 spin_lock(&cpu_rq(i)->lock);
2420         /*
2421          * We clear this CPU from the mask. This both simplifies the
2422          * inner loop and keps this_rq locked when we exit:
2423          */
2424         cpu_clear(this_cpu, sibling_map);
2425
2426         for_each_cpu_mask(i, sibling_map) {
2427                 runqueue_t *smt_rq = cpu_rq(i);
2428
2429                 /*
2430                  * If an SMT sibling task is sleeping due to priority
2431                  * reasons wake it up now.
2432                  */
2433                 if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
2434                         resched_task(smt_rq->idle);
2435         }
2436
2437         for_each_cpu_mask(i, sibling_map)
2438                 spin_unlock(&cpu_rq(i)->lock);
2439         /*
2440          * We exit with this_cpu's rq still held and IRQs
2441          * still disabled:
2442          */
2443 }
2444
2445 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2446 {
2447         struct sched_domain *sd = this_rq->sd;
2448         cpumask_t sibling_map;
2449         prio_array_t *array;
2450         int ret = 0, i;
2451         task_t *p;
2452
2453         if (!(sd->flags & SD_SHARE_CPUPOWER))
2454                 return 0;
2455
2456         /*
2457          * The same locking rules and details apply as for
2458          * wake_sleeping_dependent():
2459          */
2460         spin_unlock(&this_rq->lock);
2461         sibling_map = sd->span;
2462         for_each_cpu_mask(i, sibling_map)
2463                 spin_lock(&cpu_rq(i)->lock);
2464         cpu_clear(this_cpu, sibling_map);
2465
2466         /*
2467          * Establish next task to be run - it might have gone away because
2468          * we released the runqueue lock above:
2469          */
2470         if (!this_rq->nr_running)
2471                 goto out_unlock;
2472         array = this_rq->active;
2473         if (!array->nr_active)
2474                 array = this_rq->expired;
2475         BUG_ON(!array->nr_active);
2476
2477         p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
2478                 task_t, run_list);
2479
2480         for_each_cpu_mask(i, sibling_map) {
2481                 runqueue_t *smt_rq = cpu_rq(i);
2482                 task_t *smt_curr = smt_rq->curr;
2483
2484                 /*
2485                  * If a user task with lower static priority than the
2486                  * running task on the SMT sibling is trying to schedule,
2487                  * delay it till there is proportionately less timeslice
2488                  * left of the sibling task to prevent a lower priority
2489                  * task from using an unfair proportion of the
2490                  * physical cpu's resources. -ck
2491                  */
2492                 if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
2493                         task_timeslice(p) || rt_task(smt_curr)) &&
2494                         p->mm && smt_curr->mm && !rt_task(p))
2495                                 ret = 1;
2496
2497                 /*
2498                  * Reschedule a lower priority task on the SMT sibling,
2499                  * or wake it up if it has been put to sleep for priority
2500                  * reasons.
2501                  */
2502                 if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
2503                         task_timeslice(smt_curr) || rt_task(p)) &&
2504                         smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
2505                         (smt_curr == smt_rq->idle && smt_rq->nr_running))
2506                                 resched_task(smt_curr);
2507         }
2508 out_unlock:
2509         for_each_cpu_mask(i, sibling_map)
2510                 spin_unlock(&cpu_rq(i)->lock);
2511         return ret;
2512 }
2513 #else
2514 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2515 {
2516 }
2517
2518 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2519 {
2520         return 0;
2521 }
2522 #endif
2523
2524 /*
2525  * schedule() is the main scheduler function.
2526  */
2527 asmlinkage void __sched schedule(void)
2528 {
2529         long *switch_count;
2530         task_t *prev, *next;
2531         runqueue_t *rq;
2532         prio_array_t *array;
2533         struct list_head *queue;
2534         unsigned long long now;
2535         unsigned long run_time;
2536         int cpu, idx;
2537
2538         /*
2539          * Test if we are atomic.  Since do_exit() needs to call into
2540          * schedule() atomically, we ignore that path for now.
2541          * Otherwise, whine if we are scheduling when we should not be.
2542          */
2543         if (likely(!(current->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)))) {
2544                 if (unlikely(in_atomic())) {
2545                         printk(KERN_ERR "scheduling while atomic: "
2546                                 "%s/0x%08x/%d\n",
2547                                 current->comm, preempt_count(), current->pid);
2548                         dump_stack();
2549                 }
2550         }
2551         profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2552
2553 need_resched:
2554         preempt_disable();
2555         prev = current;
2556         release_kernel_lock(prev);
2557 need_resched_nonpreemptible:
2558         rq = this_rq();
2559
2560         /*
2561          * The idle thread is not allowed to schedule!
2562          * Remove this check after it has been exercised a bit.
2563          */
2564         if (unlikely(current == rq->idle) && current->state != TASK_RUNNING) {
2565                 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
2566                 dump_stack();
2567         }
2568
2569         schedstat_inc(rq, sched_cnt);
2570         now = sched_clock();
2571         if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
2572                 run_time = now - prev->timestamp;
2573         else
2574                 run_time = NS_MAX_SLEEP_AVG;
2575
2576         /*
2577          * Tasks with interactive credits get charged less run_time
2578          * at high sleep_avg to delay them losing their interactive
2579          * status
2580          */
2581         if (HIGH_CREDIT(prev))
2582                 run_time /= (CURRENT_BONUS(prev) ? : 1);
2583
2584         spin_lock_irq(&rq->lock);
2585
2586         if (unlikely(current->flags & PF_DEAD))
2587                 current->state = EXIT_DEAD;
2588         /*
2589          * if entering off of a kernel preemption go straight
2590          * to picking the next task.
2591          */
2592         switch_count = &prev->nivcsw;
2593         if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2594                 switch_count = &prev->nvcsw;
2595                 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
2596                                 unlikely(signal_pending(prev))))
2597                         prev->state = TASK_RUNNING;
2598                 else {
2599                         if (prev->state == TASK_UNINTERRUPTIBLE)
2600                                 rq->nr_uninterruptible++;
2601                         deactivate_task(prev, rq);
2602                 }
2603         }
2604
2605         cpu = smp_processor_id();
2606         if (unlikely(!rq->nr_running)) {
2607 go_idle:
2608                 idle_balance(cpu, rq);
2609                 if (!rq->nr_running) {
2610                         next = rq->idle;
2611                         rq->expired_timestamp = 0;
2612                         wake_sleeping_dependent(cpu, rq);
2613                         /*
2614                          * wake_sleeping_dependent() might have released
2615                          * the runqueue, so break out if we got new
2616                          * tasks meanwhile:
2617                          */
2618                         if (!rq->nr_running)
2619                                 goto switch_tasks;
2620                 }
2621         } else {
2622                 if (dependent_sleeper(cpu, rq)) {
2623                         next = rq->idle;
2624                         goto switch_tasks;
2625                 }
2626                 /*
2627                  * dependent_sleeper() releases and reacquires the runqueue
2628                  * lock, hence go into the idle loop if the rq went
2629                  * empty meanwhile:
2630                  */
2631                 if (unlikely(!rq->nr_running))
2632                         goto go_idle;
2633         }
2634
2635         array = rq->active;
2636         if (unlikely(!array->nr_active)) {
2637                 /*
2638                  * Switch the active and expired arrays.
2639                  */
2640                 schedstat_inc(rq, sched_switch);
2641                 rq->active = rq->expired;
2642                 rq->expired = array;
2643                 array = rq->active;
2644                 rq->expired_timestamp = 0;
2645                 rq->best_expired_prio = MAX_PRIO;
2646         } else
2647                 schedstat_inc(rq, sched_noswitch);
2648
2649         idx = sched_find_first_bit(array->bitmap);
2650         queue = array->queue + idx;
2651         next = list_entry(queue->next, task_t, run_list);
2652
2653         if (!rt_task(next) && next->activated > 0) {
2654                 unsigned long long delta = now - next->timestamp;
2655
2656                 if (next->activated == 1)
2657                         delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
2658
2659                 array = next->array;
2660                 dequeue_task(next, array);
2661                 recalc_task_prio(next, next->timestamp + delta);
2662                 enqueue_task(next, array);
2663         }
2664         next->activated = 0;
2665 switch_tasks:
2666         if (next == rq->idle)
2667                 schedstat_inc(rq, sched_goidle);
2668         prefetch(next);
2669         clear_tsk_need_resched(prev);
2670         rcu_qsctr_inc(task_cpu(prev));
2671
2672         prev->sleep_avg -= run_time;
2673         if ((long)prev->sleep_avg <= 0) {
2674                 prev->sleep_avg = 0;
2675                 if (!(HIGH_CREDIT(prev) || LOW_CREDIT(prev)))
2676                         prev->interactive_credit--;
2677         }
2678         prev->timestamp = prev->last_ran = now;
2679
2680         sched_info_switch(prev, next);
2681         if (likely(prev != next)) {
2682                 next->timestamp = now;
2683                 rq->nr_switches++;
2684                 rq->curr = next;
2685                 ++*switch_count;
2686
2687                 prepare_arch_switch(rq, next);
2688                 prev = context_switch(rq, prev, next);
2689                 barrier();
2690
2691                 finish_task_switch(prev);
2692         } else
2693                 spin_unlock_irq(&rq->lock);
2694
2695         prev = current;
2696         if (unlikely(reacquire_kernel_lock(prev) < 0))
2697                 goto need_resched_nonpreemptible;
2698         preempt_enable_no_resched();
2699         if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2700                 goto need_resched;
2701 }
2702
2703 EXPORT_SYMBOL(schedule);
2704
2705 #ifdef CONFIG_PREEMPT
2706 /*
2707  * this is is the entry point to schedule() from in-kernel preemption
2708  * off of preempt_enable.  Kernel preemptions off return from interrupt
2709  * occur there and call schedule directly.
2710  */
2711 asmlinkage void __sched preempt_schedule(void)
2712 {
2713         struct thread_info *ti = current_thread_info();
2714
2715         /*
2716          * If there is a non-zero preempt_count or interrupts are disabled,
2717          * we do not want to preempt the current task.  Just return..
2718          */
2719         if (unlikely(ti->preempt_count || irqs_disabled()))
2720                 return;
2721
2722 need_resched:
2723         ti->preempt_count = PREEMPT_ACTIVE;
2724         schedule();
2725         ti->preempt_count = 0;
2726
2727         /* we could miss a preemption opportunity between schedule and now */
2728         barrier();
2729         if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2730                 goto need_resched;
2731 }
2732
2733 EXPORT_SYMBOL(preempt_schedule);
2734 #endif /* CONFIG_PREEMPT */
2735
2736 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
2737 {
2738         task_t *p = curr->task;
2739         return try_to_wake_up(p, mode, sync);
2740 }
2741
2742 EXPORT_SYMBOL(default_wake_function);
2743
2744 /*
2745  * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
2746  * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
2747  * number) then we wake all the non-exclusive tasks and one exclusive task.
2748  *
2749  * There are circumstances in which we can try to wake a task which has already
2750  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
2751  * zero in this (rare) case, and we handle it by continuing to scan the queue.
2752  */
2753 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
2754                              int nr_exclusive, int sync, void *key)
2755 {
2756         struct list_head *tmp, *next;
2757
2758         list_for_each_safe(tmp, next, &q->task_list) {
2759                 wait_queue_t *curr;
2760                 unsigned flags;
2761                 curr = list_entry(tmp, wait_queue_t, task_list);
2762                 flags = curr->flags;
2763                 if (curr->func(curr, mode, sync, key) &&
2764                     (flags & WQ_FLAG_EXCLUSIVE) &&
2765                     !--nr_exclusive)
2766                         break;
2767         }
2768 }
2769
2770 /**
2771  * __wake_up - wake up threads blocked on a waitqueue.
2772  * @q: the waitqueue
2773  * @mode: which threads
2774  * @nr_exclusive: how many wake-one or wake-many threads to wake up
2775  */
2776 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
2777                                 int nr_exclusive, void *key)
2778 {
2779         unsigned long flags;
2780
2781         spin_lock_irqsave(&q->lock, flags);
2782         __wake_up_common(q, mode, nr_exclusive, 0, key);
2783         spin_unlock_irqrestore(&q->lock, flags);
2784 }
2785
2786 EXPORT_SYMBOL(__wake_up);
2787
2788 /*
2789  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2790  */
2791 void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
2792 {
2793         __wake_up_common(q, mode, 1, 0, NULL);
2794 }
2795
2796 /**
2797  * __wake_up - sync- wake up threads blocked on a waitqueue.
2798  * @q: the waitqueue
2799  * @mode: which threads
2800  * @nr_exclusive: how many wake-one or wake-many threads to wake up
2801  *
2802  * The sync wakeup differs that the waker knows that it will schedule
2803  * away soon, so while the target thread will be woken up, it will not
2804  * be migrated to another CPU - ie. the two threads are 'synchronized'
2805  * with each other. This can prevent needless bouncing between CPUs.
2806  *
2807  * On UP it can prevent extra preemption.
2808  */
2809 void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2810 {
2811         unsigned long flags;
2812         int sync = 1;
2813
2814         if (unlikely(!q))
2815                 return;
2816
2817         if (unlikely(!nr_exclusive))
2818                 sync = 0;
2819
2820         spin_lock_irqsave(&q->lock, flags);
2821         __wake_up_common(q, mode, nr_exclusive, sync, NULL);
2822         spin_unlock_irqrestore(&q->lock, flags);
2823 }
2824 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
2825
2826 void fastcall complete(struct completion *x)
2827 {
2828         unsigned long flags;
2829
2830         spin_lock_irqsave(&x->wait.lock, flags);
2831         x->done++;
2832         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2833                          1, 0, NULL);
2834         spin_unlock_irqrestore(&x->wait.lock, flags);
2835 }
2836 EXPORT_SYMBOL(complete);
2837
2838 void fastcall complete_all(struct completion *x)
2839 {
2840         unsigned long flags;
2841
2842         spin_lock_irqsave(&x->wait.lock, flags);
2843         x->done += UINT_MAX/2;
2844         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2845                          0, 0, NULL);
2846         spin_unlock_irqrestore(&x->wait.lock, flags);
2847 }
2848 EXPORT_SYMBOL(complete_all);
2849
2850 void fastcall __sched wait_for_completion(struct completion *x)
2851 {
2852         might_sleep();
2853         spin_lock_irq(&x->wait.lock);
2854         if (!x->done) {
2855                 DECLARE_WAITQUEUE(wait, current);
2856
2857                 wait.flags |= WQ_FLAG_EXCLUSIVE;
2858                 __add_wait_queue_tail(&x->wait, &wait);
2859                 do {
2860                         __set_current_state(TASK_UNINTERRUPTIBLE);
2861                         spin_unlock_irq(&x->wait.lock);
2862                         schedule();
2863                         spin_lock_irq(&x->wait.lock);
2864                 } while (!x->done);
2865                 __remove_wait_queue(&x->wait, &wait);
2866         }
2867         x->done--;
2868         spin_unlock_irq(&x->wait.lock);
2869 }
2870 EXPORT_SYMBOL(wait_for_completion);
2871
2872 #define SLEEP_ON_VAR                                    \
2873         unsigned long flags;                            \
2874         wait_queue_t wait;                              \
2875         init_waitqueue_entry(&wait, current);
2876
2877 #define SLEEP_ON_HEAD                                   \
2878         spin_lock_irqsave(&q->lock,flags);              \
2879         __add_wait_queue(q, &wait);                     \
2880         spin_unlock(&q->lock);
2881
2882 #define SLEEP_ON_TAIL                                   \
2883         spin_lock_irq(&q->lock);                        \
2884         __remove_wait_queue(q, &wait);                  \
2885         spin_unlock_irqrestore(&q->lock, flags);
2886
2887 void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
2888 {
2889         SLEEP_ON_VAR
2890
2891         current->state = TASK_INTERRUPTIBLE;
2892
2893         SLEEP_ON_HEAD
2894         schedule();
2895         SLEEP_ON_TAIL
2896 }
2897
2898 EXPORT_SYMBOL(interruptible_sleep_on);
2899
2900 long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
2901 {
2902         SLEEP_ON_VAR
2903
2904         current->state = TASK_INTERRUPTIBLE;
2905
2906         SLEEP_ON_HEAD
2907         timeout = schedule_timeout(timeout);
2908         SLEEP_ON_TAIL
2909
2910         return timeout;
2911 }
2912
2913 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2914
2915 void fastcall __sched sleep_on(wait_queue_head_t *q)
2916 {
2917         SLEEP_ON_VAR
2918
2919         current->state = TASK_UNINTERRUPTIBLE;
2920
2921         SLEEP_ON_HEAD
2922         schedule();
2923         SLEEP_ON_TAIL
2924 }
2925
2926 EXPORT_SYMBOL(sleep_on);
2927
2928 long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
2929 {
2930         SLEEP_ON_VAR
2931
2932         current->state = TASK_UNINTERRUPTIBLE;
2933
2934         SLEEP_ON_HEAD
2935         timeout = schedule_timeout(timeout);
2936         SLEEP_ON_TAIL
2937
2938         return timeout;
2939 }
2940
2941 EXPORT_SYMBOL(sleep_on_timeout);
2942
2943 void set_user_nice(task_t *p, long nice)
2944 {
2945         unsigned long flags;
2946         prio_array_t *array;
2947         runqueue_t *rq;
2948         int old_prio, new_prio, delta;
2949
2950         if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
2951                 return;
2952         /*
2953          * We have to be careful, if called from sys_setpriority(),
2954          * the task might be in the middle of scheduling on another CPU.
2955          */
2956         rq = task_rq_lock(p, &flags);
2957         /*
2958          * The RT priorities are set via setscheduler(), but we still
2959          * allow the 'normal' nice value to be set - but as expected
2960          * it wont have any effect on scheduling until the task is
2961          * not SCHED_NORMAL:
2962          */
2963         if (rt_task(p)) {
2964                 p->static_prio = NICE_TO_PRIO(nice);
2965                 goto out_unlock;
2966         }
2967         array = p->array;
2968         if (array)
2969                 dequeue_task(p, array);
2970
2971         old_prio = p->prio;
2972         new_prio = NICE_TO_PRIO(nice);
2973         delta = new_prio - old_prio;
2974         p->static_prio = NICE_TO_PRIO(nice);
2975         p->prio += delta;
2976
2977         if (array) {
2978                 enqueue_task(p, array);
2979                 /*
2980                  * If the task increased its priority or is running and
2981                  * lowered its priority, then reschedule its CPU:
2982                  */
2983                 if (delta < 0 || (delta > 0 && task_running(rq, p)))
2984                         resched_task(rq->curr);
2985         }
2986 out_unlock:
2987         task_rq_unlock(rq, &flags);
2988 }
2989
2990 EXPORT_SYMBOL(set_user_nice);
2991
2992 #ifdef __ARCH_WANT_SYS_NICE
2993
2994 /*
2995  * sys_nice - change the priority of the current process.
2996  * @increment: priority increment
2997  *
2998  * sys_setpriority is a more generic, but much slower function that
2999  * does similar things.
3000  */
3001 asmlinkage long sys_nice(int increment)
3002 {
3003         int retval;
3004         long nice;
3005
3006         /*
3007          * Setpriority might change our priority at the same moment.
3008          * We don't have to worry. Conceptually one call occurs first
3009          * and we have a single winner.
3010          */
3011         if (increment < 0) {
3012                 if (!capable(CAP_SYS_NICE))
3013                         return -EPERM;
3014                 if (increment < -40)
3015                         increment = -40;
3016         }
3017         if (increment > 40)
3018                 increment = 40;
3019
3020         nice = PRIO_TO_NICE(current->static_prio) + increment;
3021         if (nice < -20)
3022                 nice = -20;
3023         if (nice > 19)
3024                 nice = 19;
3025
3026         retval = security_task_setnice(current, nice);
3027         if (retval)
3028                 return retval;
3029
3030         set_user_nice(current, nice);
3031         return 0;
3032 }
3033
3034 #endif
3035
3036 /**
3037  * task_prio - return the priority value of a given task.
3038  * @p: the task in question.
3039  *
3040  * This is the priority value as seen by users in /proc.
3041  * RT tasks are offset by -200. Normal tasks are centered
3042  * around 0, value goes from -16 to +15.
3043  */
3044 int task_prio(const task_t *p)
3045 {
3046         return p->prio - MAX_RT_PRIO;
3047 }
3048
3049 /**
3050  * task_nice - return the nice value of a given task.
3051  * @p: the task in question.
3052  */
3053 int task_nice(const task_t *p)
3054 {
3055         return TASK_NICE(p);
3056 }
3057
3058 /**
3059  * idle_cpu - is a given cpu idle currently?
3060  * @cpu: the processor in question.
3061  */
3062 int idle_cpu(int cpu)
3063 {
3064         return cpu_curr(cpu) == cpu_rq(cpu)->idle;
3065 }
3066
3067 EXPORT_SYMBOL_GPL(idle_cpu);
3068
3069 /**
3070  * find_process_by_pid - find a process with a matching PID value.
3071  * @pid: the pid in question.
3072  */
3073 static inline task_t *find_process_by_pid(pid_t pid)
3074 {
3075         return pid ? find_task_by_pid(pid) : current;
3076 }
3077
3078 /* Actually do priority change: must hold rq lock. */
3079 static void __setscheduler(struct task_struct *p, int policy, int prio)
3080 {
3081         BUG_ON(p->array);
3082         p->policy = policy;
3083         p->rt_priority = prio;
3084         if (policy != SCHED_NORMAL)
3085                 p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
3086         else
3087                 p->prio = p->static_prio;
3088 }
3089
3090 /*
3091  * setscheduler - change the scheduling policy and/or RT priority of a thread.
3092  */
3093 static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3094 {
3095         struct sched_param lp;
3096         int retval = -EINVAL;
3097         int oldprio, oldpolicy = -1;
3098         prio_array_t *array;
3099         unsigned long flags;
3100         runqueue_t *rq;
3101         task_t *p;
3102
3103         if (!param || pid < 0)
3104                 goto out_nounlock;
3105
3106         retval = -EFAULT;
3107         if (copy_from_user(&lp, param, sizeof(struct sched_param)))
3108                 goto out_nounlock;
3109
3110         /*
3111          * We play safe to avoid deadlocks.
3112          */
3113         read_lock_irq(&tasklist_lock);
3114
3115         p = find_process_by_pid(pid);
3116
3117         retval = -ESRCH;
3118         if (!p)
3119                 goto out_unlock;
3120 recheck:
3121         /* double check policy once rq lock held */
3122         if (policy < 0)
3123                 policy = oldpolicy = p->policy;
3124         else {
3125                 retval = -EINVAL;
3126                 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3127                                 policy != SCHED_NORMAL)
3128                         goto out_unlock;
3129         }
3130         /*
3131          * Valid priorities for SCHED_FIFO and SCHED_RR are
3132          * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
3133          */
3134         retval = -EINVAL;
3135         if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
3136                 goto out_unlock;
3137         if ((policy == SCHED_NORMAL) != (lp.sched_priority == 0))
3138                 goto out_unlock;
3139
3140         retval = -EPERM;
3141         if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
3142             !capable(CAP_SYS_NICE))
3143                 goto out_unlock;
3144         if ((current->euid != p->euid) && (current->euid != p->uid) &&
3145             !capable(CAP_SYS_NICE))
3146                 goto out_unlock;
3147
3148         retval = security_task_setscheduler(p, policy, &lp);
3149         if (retval)
3150                 goto out_unlock;
3151         /*
3152          * To be able to change p->policy safely, the apropriate
3153          * runqueue lock must be held.
3154          */
3155         rq = task_rq_lock(p, &flags);
3156         /* recheck policy now with rq lock held */
3157         if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3158                 policy = oldpolicy = -1;
3159                 task_rq_unlock(rq, &flags);
3160                 goto recheck;
3161         }
3162         array = p->array;
3163         if (array)
3164                 deactivate_task(p, task_rq(p));
3165         retval = 0;
3166         oldprio = p->prio;
3167         __setscheduler(p, policy, lp.sched_priority);
3168         if (array) {
3169                 __activate_task(p, task_rq(p));
3170                 /*
3171                  * Reschedule if we are currently running on this runqueue and
3172                  * our priority decreased, or if we are not currently running on
3173                  * this runqueue and our priority is higher than the current's
3174                  */
3175                 if (task_running(rq, p)) {
3176                         if (p->prio > oldprio)
3177                                 resched_task(rq->curr);
3178                 } else if (TASK_PREEMPTS_CURR(p, rq))
3179                         resched_task(rq->curr);
3180         }
3181         task_rq_unlock(rq, &flags);
3182 out_unlock:
3183         read_unlock_irq(&tasklist_lock);
3184 out_nounlock:
3185         return retval;
3186 }
3187
3188 /**
3189  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3190  * @pid: the pid in question.
3191  * @policy: new policy
3192  * @param: structure containing the new RT priority.
3193  */
3194 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
3195                                        struct sched_param __user *param)
3196 {
3197         return setscheduler(pid, policy, param);
3198 }
3199
3200 /**
3201  * sys_sched_setparam - set/change the RT priority of a thread
3202  * @pid: the pid in question.
3203  * @param: structure containing the new RT priority.
3204  */
3205 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
3206 {
3207         return setscheduler(pid, -1, param);
3208 }
3209
3210 /**
3211  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3212  * @pid: the pid in question.
3213  */
3214 asmlinkage long sys_sched_getscheduler(pid_t pid)
3215 {
3216         int retval = -EINVAL;
3217         task_t *p;
3218
3219         if (pid < 0)
3220                 goto out_nounlock;
3221
3222         retval = -ESRCH;
3223         read_lock(&tasklist_lock);
3224         p = find_process_by_pid(pid);
3225         if (p) {
3226                 retval = security_task_getscheduler(p);
3227                 if (!retval)
3228                         retval = p->policy;
3229         }
3230         read_unlock(&tasklist_lock);
3231
3232 out_nounlock:
3233         return retval;
3234 }
3235
3236 /**
3237  * sys_sched_getscheduler - get the RT priority of a thread
3238  * @pid: the pid in question.
3239  * @param: structure containing the RT priority.
3240  */
3241 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
3242 {
3243         struct sched_param lp;
3244         int retval = -EINVAL;
3245         task_t *p;
3246
3247         if (!param || pid < 0)
3248                 goto out_nounlock;
3249
3250         read_lock(&tasklist_lock);
3251         p = find_process_by_pid(pid);
3252         retval = -ESRCH;
3253         if (!p)
3254                 goto out_unlock;
3255
3256         retval = security_task_getscheduler(p);
3257         if (retval)
3258                 goto out_unlock;
3259
3260         lp.sched_priority = p->rt_priority;
3261         read_unlock(&tasklist_lock);
3262
3263         /*
3264          * This one might sleep, we cannot do it with a spinlock held ...
3265          */
3266         retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3267
3268 out_nounlock:
3269         return retval;
3270
3271 out_unlock:
3272         read_unlock(&tasklist_lock);
3273         return retval;
3274 }
3275
3276 long sched_setaffinity(pid_t pid, cpumask_t new_mask)
3277 {
3278         task_t *p;
3279         int retval;
3280
3281         lock_cpu_hotplug();
3282         read_lock(&tasklist_lock);
3283
3284         p = find_process_by_pid(pid);
3285         if (!p) {
3286                 read_unlock(&tasklist_lock);
3287                 unlock_cpu_hotplug();
3288                 return -ESRCH;
3289         }
3290
3291         /*
3292          * It is not safe to call set_cpus_allowed with the
3293          * tasklist_lock held.  We will bump the task_struct's
3294          * usage count and then drop tasklist_lock.
3295          */
3296         get_task_struct(p);
3297         read_unlock(&tasklist_lock);
3298
3299         retval = -EPERM;
3300         if ((current->euid != p->euid) && (current->euid != p->uid) &&
3301                         !capable(CAP_SYS_NICE))
3302                 goto out_unlock;
3303
3304         retval = set_cpus_allowed(p, new_mask);
3305
3306 out_unlock:
3307         put_task_struct(p);
3308         unlock_cpu_hotplug();
3309         return retval;
3310 }
3311
3312 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3313                              cpumask_t *new_mask)
3314 {
3315         if (len < sizeof(cpumask_t)) {
3316                 memset(new_mask, 0, sizeof(cpumask_t));
3317         } else if (len > sizeof(cpumask_t)) {
3318                 len = sizeof(cpumask_t);
3319         }
3320         return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
3321 }
3322
3323 /**
3324  * sys_sched_setaffinity - set the cpu affinity of a process
3325  * @pid: pid of the process
3326  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3327  * @user_mask_ptr: user-space pointer to the new cpu mask
3328  */
3329 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
3330                                       unsigned long __user *user_mask_ptr)
3331 {
3332         cpumask_t new_mask;
3333         int retval;
3334
3335         retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
3336         if (retval)
3337                 return retval;
3338
3339         return sched_setaffinity(pid, new_mask);
3340 }
3341
3342 /*
3343  * Represents all cpu's present in the system
3344  * In systems capable of hotplug, this map could dynamically grow
3345  * as new cpu's are detected in the system via any platform specific
3346  * method, such as ACPI for e.g.
3347  */
3348
3349 cpumask_t cpu_present_map;
3350 EXPORT_SYMBOL(cpu_present_map);
3351
3352 #ifndef CONFIG_SMP
3353 cpumask_t cpu_online_map = CPU_MASK_ALL;
3354 cpumask_t cpu_possible_map = CPU_MASK_ALL;
3355 #endif
3356
3357 long sched_getaffinity(pid_t pid, cpumask_t *mask)
3358 {
3359         int retval;
3360         task_t *p;
3361
3362         lock_cpu_hotplug();
3363         read_lock(&tasklist_lock);
3364
3365         retval = -ESRCH;
3366         p = find_process_by_pid(pid);
3367         if (!p)
3368                 goto out_unlock;
3369
3370         retval = 0;
3371         cpus_and(*mask, p->cpus_allowed, cpu_possible_map);
3372
3373 out_unlock:
3374         read_unlock(&tasklist_lock);
3375         unlock_cpu_hotplug();
3376         if (retval)
3377                 return retval;
3378
3379         return 0;
3380 }
3381
3382 /**
3383  * sys_sched_getaffinity - get the cpu affinity of a process
3384  * @pid: pid of the process
3385  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3386  * @user_mask_ptr: user-space pointer to hold the current cpu mask
3387  */
3388 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
3389                                       unsigned long __user *user_mask_ptr)
3390 {
3391         int ret;
3392         cpumask_t mask;
3393
3394         if (len < sizeof(cpumask_t))
3395                 return -EINVAL;
3396
3397         ret = sched_getaffinity(pid, &mask);
3398         if (ret < 0)
3399                 return ret;
3400
3401         if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
3402                 return -EFAULT;
3403
3404         return sizeof(cpumask_t);
3405 }
3406
3407 /**
3408  * sys_sched_yield - yield the current processor to other threads.
3409  *
3410  * this function yields the current CPU by moving the calling thread
3411  * to the expired array. If there are no other threads running on this
3412  * CPU then this function will return.
3413  */
3414 asmlinkage long sys_sched_yield(void)
3415 {
3416         runqueue_t *rq = this_rq_lock();
3417         prio_array_t *array = current->array;
3418         prio_array_t *target = rq->expired;
3419
3420         schedstat_inc(rq, yld_cnt);
3421         /*
3422          * We implement yielding by moving the task into the expired
3423          * queue.
3424          *
3425          * (special rule: RT tasks will just roundrobin in the active
3426          *  array.)
3427          */
3428         if (rt_task(current))
3429                 target = rq->active;
3430
3431         if (current->array->nr_active == 1) {
3432                 schedstat_inc(rq, yld_act_empty);
3433                 if (!rq->expired->nr_active)
3434                         schedstat_inc(rq, yld_both_empty);
3435         } else if (!rq->expired->nr_active)
3436                 schedstat_inc(rq, yld_exp_empty);
3437
3438         dequeue_task(current, array);
3439         enqueue_task(current, target);
3440
3441         /*
3442          * Since we are going to call schedule() anyway, there's
3443          * no need to preempt or enable interrupts:
3444          */
3445         __release(rq->lock);
3446         _raw_spin_unlock(&rq->lock);
3447         preempt_enable_no_resched();
3448
3449         schedule();
3450
3451         return 0;
3452 }
3453
3454 void __sched __cond_resched(void)
3455 {
3456         set_current_state(TASK_RUNNING);
3457         schedule();
3458 }
3459
3460 EXPORT_SYMBOL(__cond_resched);
3461
3462 /**
3463  * yield - yield the current processor to other threads.
3464  *
3465  * this is a shortcut for kernel-space yielding - it marks the
3466  * thread runnable and calls sys_sched_yield().
3467  */
3468 void __sched yield(void)
3469 {
3470         set_current_state(TASK_RUNNING);
3471         sys_sched_yield();
3472 }
3473
3474 EXPORT_SYMBOL(yield);
3475
3476 /*
3477  * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
3478  * that process accounting knows that this is a task in IO wait state.
3479  *
3480  * But don't do that if it is a deliberate, throttling IO wait (this task
3481  * has set its backing_dev_info: the queue against which it should throttle)
3482  */
3483 void __sched io_schedule(void)
3484 {
3485         struct runqueue *rq = this_rq();
3486
3487         atomic_inc(&rq->nr_iowait);
3488         schedule();
3489         atomic_dec(&rq->nr_iowait);
3490 }
3491
3492 EXPORT_SYMBOL(io_schedule);
3493
3494 long __sched io_schedule_timeout(long timeout)
3495 {
3496         struct runqueue *rq = this_rq();
3497         long ret;
3498
3499         atomic_inc(&rq->nr_iowait);
3500         ret = schedule_timeout(timeout);
3501         atomic_dec(&rq->nr_iowait);
3502         return ret;
3503 }
3504
3505 /**
3506  * sys_sched_get_priority_max - return maximum RT priority.
3507  * @policy: scheduling class.
3508  *
3509  * this syscall returns the maximum rt_priority that can be used
3510  * by a given scheduling class.
3511  */
3512 asmlinkage long sys_sched_get_priority_max(int policy)
3513 {
3514         int ret = -EINVAL;
3515
3516         switch (policy) {
3517         case SCHED_FIFO:
3518         case SCHED_RR:
3519                 ret = MAX_USER_RT_PRIO-1;
3520                 break;
3521         case SCHED_NORMAL:
3522                 ret = 0;
3523                 break;
3524         }
3525         return ret;
3526 }
3527
3528 /**
3529  * sys_sched_get_priority_min - return minimum RT priority.
3530  * @policy: scheduling class.
3531  *
3532  * this syscall returns the minimum rt_priority that can be used
3533  * by a given scheduling class.
3534  */
3535 asmlinkage long sys_sched_get_priority_min(int policy)
3536 {
3537         int ret = -EINVAL;
3538
3539         switch (policy) {
3540         case SCHED_FIFO:
3541         case SCHED_RR:
3542                 ret = 1;
3543                 break;
3544         case SCHED_NORMAL:
3545                 ret = 0;
3546         }
3547         return ret;
3548 }
3549
3550 /**
3551  * sys_sched_rr_get_interval - return the default timeslice of a process.
3552  * @pid: pid of the process.
3553  * @interval: userspace pointer to the timeslice value.
3554  *
3555  * this syscall writes the default timeslice value of a given process
3556  * into the user-space timespec buffer. A value of '0' means infinity.
3557  */
3558 asmlinkage
3559 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
3560 {
3561         int retval = -EINVAL;
3562         struct timespec t;
3563         task_t *p;
3564
3565         if (pid < 0)
3566                 goto out_nounlock;
3567
3568         retval = -ESRCH;
3569         read_lock(&tasklist_lock);
3570         p = find_process_by_pid(pid);
3571         if (!p)
3572                 goto out_unlock;
3573
3574         retval = security_task_getscheduler(p);
3575         if (retval)
3576                 goto out_unlock;
3577
3578         jiffies_to_timespec(p->policy & SCHED_FIFO ?
3579                                 0 : task_timeslice(p), &t);
3580         read_unlock(&tasklist_lock);
3581         retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
3582 out_nounlock:
3583         return retval;
3584 out_unlock:
3585         read_unlock(&tasklist_lock);
3586         return retval;
3587 }
3588
3589 static inline struct task_struct *eldest_child(struct task_struct *p)
3590 {
3591         if (list_empty(&p->children)) return NULL;
3592         return list_entry(p->children.next,struct task_struct,sibling);
3593 }
3594
3595 static inline struct task_struct *older_sibling(struct task_struct *p)
3596 {
3597         if (p->sibling.prev==&p->parent->children) return NULL;
3598         return list_entry(p->sibling.prev,struct task_struct,sibling);
3599 }
3600
3601 static inline struct task_struct *younger_sibling(struct task_struct *p)
3602 {
3603         if (p->sibling.next==&p->parent->children) return NULL;
3604         return list_entry(p->sibling.next,struct task_struct,sibling);
3605 }
3606
3607 static void show_task(task_t * p)
3608 {
3609         task_t *relative;
3610         unsigned state;
3611         unsigned long free = 0;
3612         static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
3613
3614         printk("%-13.13s ", p->comm);
3615         state = p->state ? __ffs(p->state) + 1 : 0;
3616         if (state < ARRAY_SIZE(stat_nam))
3617                 printk(stat_nam[state]);
3618         else
3619                 printk("?");
3620 #if (BITS_PER_LONG == 32)
3621         if (state == TASK_RUNNING)
3622                 printk(" running ");
3623         else
3624                 printk(" %08lX ", thread_saved_pc(p));
3625 #else
3626         if (state == TASK_RUNNING)
3627                 printk("  running task   ");
3628         else
3629                 printk(" %016lx ", thread_saved_pc(p));
3630 #endif
3631 #ifdef CONFIG_DEBUG_STACK_USAGE
3632         {
3633                 unsigned long * n = (unsigned long *) (p->thread_info+1);
3634                 while (!*n)
3635                         n++;
3636                 free = (unsigned long) n - (unsigned long)(p->thread_info+1);
3637         }
3638 #endif
3639         printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
3640         if ((relative = eldest_child(p)))
3641                 printk("%5d ", relative->pid);
3642         else
3643                 printk("      ");
3644         if ((relative = younger_sibling(p)))
3645                 printk("%7d", relative->pid);
3646         else
3647                 printk("       ");
3648         if ((relative = older_sibling(p)))
3649                 printk(" %5d", relative->pid);
3650         else
3651                 printk("      ");
3652         if (!p->mm)
3653                 printk(" (L-TLB)\n");
3654         else
3655                 printk(" (NOTLB)\n");
3656
3657         if (state != TASK_RUNNING)
3658                 show_stack(p, NULL);
3659 }
3660
3661 void show_state(void)
3662 {
3663         task_t *g, *p;
3664
3665 #if (BITS_PER_LONG == 32)
3666         printk("\n"
3667                "                                               sibling\n");
3668         printk("  task             PC      pid father child younger older\n");
3669 #else
3670         printk("\n"
3671                "                                                       sibling\n");
3672         printk("  task                 PC          pid father child younger older\n");
3673 #endif
3674         read_lock(&tasklist_lock);
3675         do_each_thread(g, p) {
3676                 /*
3677                  * reset the NMI-timeout, listing all files on a slow
3678                  * console might take alot of time:
3679                  */
3680                 touch_nmi_watchdog();
3681                 show_task(p);
3682         } while_each_thread(g, p);
3683
3684         read_unlock(&tasklist_lock);
3685 }
3686
3687 void __devinit init_idle(task_t *idle, int cpu)
3688 {
3689         runqueue_t *rq = cpu_rq(cpu);
3690         unsigned long flags;
3691
3692         idle->sleep_avg = 0;
3693         idle->interactive_credit = 0;
3694         idle->array = NULL;
3695         idle->prio = MAX_PRIO;
3696         idle->state = TASK_RUNNING;
3697         set_task_cpu(idle, cpu);
3698
3699         spin_lock_irqsave(&rq->lock, flags);
3700         rq->curr = rq->idle = idle;
3701         set_tsk_need_resched(idle);
3702         spin_unlock_irqrestore(&rq->lock, flags);
3703
3704         /* Set the preempt count _outside_ the spinlocks! */
3705 #ifdef CONFIG_PREEMPT
3706         idle->thread_info->preempt_count = (idle->lock_depth >= 0);
3707 #else
3708         idle->thread_info->preempt_count = 0;
3709 #endif
3710 }
3711
3712 /*
3713  * In a system that switches off the HZ timer nohz_cpu_mask
3714  * indicates which cpus entered this state. This is used
3715  * in the rcu update to wait only for active cpus. For system
3716  * which do not switch off the HZ timer nohz_cpu_mask should
3717  * always be CPU_MASK_NONE.
3718  */
3719 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
3720
3721 #ifdef CONFIG_SMP
3722 /*
3723  * This is how migration works:
3724  *
3725  * 1) we queue a migration_req_t structure in the source CPU's
3726  *    runqueue and wake up that CPU's migration thread.
3727  * 2) we down() the locked semaphore => thread blocks.
3728  * 3) migration thread wakes up (implicitly it forces the migrated
3729  *    thread off the CPU)
3730  * 4) it gets the migration request and checks whether the migrated
3731  *    task is still in the wrong runqueue.
3732  * 5) if it's in the wrong runqueue then the migration thread removes
3733  *    it and puts it into the right queue.
3734  * 6) migration thread up()s the semaphore.
3735  * 7) we wake up and the migration is done.
3736  */
3737
3738 /*
3739  * Change a given task's CPU affinity. Migrate the thread to a
3740  * proper CPU and schedule it away if the CPU it's executing on
3741  * is removed from the allowed bitmask.
3742  *
3743  * NOTE: the caller must have a valid reference to the task, the
3744  * task must not exit() & deallocate itself prematurely.  The
3745  * call is not atomic; no spinlocks may be held.
3746  */
3747 int set_cpus_allowed(task_t *p, cpumask_t new_mask)
3748 {
3749         unsigned long flags;
3750         int ret = 0;
3751         migration_req_t req;
3752         runqueue_t *rq;
3753
3754         rq = task_rq_lock(p, &flags);
3755         if (!cpus_intersects(new_mask, cpu_online_map)) {
3756                 ret = -EINVAL;
3757                 goto out;
3758         }
3759
3760         p->cpus_allowed = new_mask;
3761         /* Can the task run on the task's current CPU? If so, we're done */
3762         if (cpu_isset(task_cpu(p), new_mask))
3763                 goto out;
3764
3765         if (migrate_task(p, any_online_cpu(new_mask), &req)) {
3766                 /* Need help from migration thread: drop lock and wait. */
3767                 task_rq_unlock(rq, &flags);
3768                 wake_up_process(rq->migration_thread);
3769                 wait_for_completion(&req.done);
3770                 tlb_migrate_finish(p->mm);
3771                 return 0;
3772         }
3773 out:
3774         task_rq_unlock(rq, &flags);
3775         return ret;
3776 }
3777
3778 EXPORT_SYMBOL_GPL(set_cpus_allowed);
3779
3780 /*
3781  * Move (not current) task off this cpu, onto dest cpu.  We're doing
3782  * this because either it can't run here any more (set_cpus_allowed()
3783  * away from this CPU, or CPU going down), or because we're
3784  * attempting to rebalance this task on exec (sched_exec).
3785  *
3786  * So we race with normal scheduler movements, but that's OK, as long
3787  * as the task is no longer on this CPU.
3788  */
3789 static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
3790 {
3791         runqueue_t *rq_dest, *rq_src;
3792
3793         if (unlikely(cpu_is_offline(dest_cpu)))
3794                 return;
3795
3796         rq_src = cpu_rq(src_cpu);
3797         rq_dest = cpu_rq(dest_cpu);
3798
3799         double_rq_lock(rq_src, rq_dest);
3800         /* Already moved. */
3801         if (task_cpu(p) != src_cpu)
3802                 goto out;
3803         /* Affinity changed (again). */
3804         if (!cpu_isset(dest_cpu, p->cpus_allowed))
3805                 goto out;
3806
3807         set_task_cpu(p, dest_cpu);
3808         if (p->array) {
3809                 /*
3810                  * Sync timestamp with rq_dest's before activating.
3811                  * The same thing could be achieved by doing this step
3812                  * afterwards, and pretending it was a local activate.
3813                  * This way is cleaner and logically correct.
3814                  */
3815                 p->timestamp = p->timestamp - rq_src->timestamp_last_tick
3816                                 + rq_dest->timestamp_last_tick;
3817                 deactivate_task(p, rq_src);
3818                 activate_task(p, rq_dest, 0);
3819                 if (TASK_PREEMPTS_CURR(p, rq_dest))
3820                         resched_task(rq_dest->curr);
3821         }
3822
3823 out:
3824         double_rq_unlock(rq_src, rq_dest);
3825 }
3826
3827 /*
3828  * migration_thread - this is a highprio system thread that performs
3829  * thread migration by bumping thread off CPU then 'pushing' onto
3830  * another runqueue.
3831  */
3832 static int migration_thread(void * data)
3833 {
3834         runqueue_t *rq;
3835         int cpu = (long)data;
3836
3837         rq = cpu_rq(cpu);
3838         BUG_ON(rq->migration_thread != current);
3839
3840         set_current_state(TASK_INTERRUPTIBLE);
3841         while (!kthread_should_stop()) {
3842                 struct list_head *head;
3843                 migration_req_t *req;
3844
3845                 if (current->flags & PF_FREEZE)
3846                         refrigerator(PF_FREEZE);
3847
3848                 spin_lock_irq(&rq->lock);
3849
3850                 if (cpu_is_offline(cpu)) {
3851                         spin_unlock_irq(&rq->lock);
3852                         goto wait_to_die;
3853                 }
3854
3855                 if (rq->active_balance) {
3856                         active_load_balance(rq, cpu);
3857                         rq->active_balance = 0;
3858                 }
3859
3860                 head = &rq->migration_queue;
3861
3862                 if (list_empty(head)) {
3863                         spin_unlock_irq(&rq->lock);
3864                         schedule();
3865                         set_current_state(TASK_INTERRUPTIBLE);
3866                         continue;
3867                 }
3868                 req = list_entry(head->next, migration_req_t, list);
3869                 list_del_init(head->next);
3870
3871                 if (req->type == REQ_MOVE_TASK) {
3872                         spin_unlock(&rq->lock);
3873                         __migrate_task(req->task, smp_processor_id(),
3874                                         req->dest_cpu);
3875                         local_irq_enable();
3876                 } else if (req->type == REQ_SET_DOMAIN) {
3877                         rq->sd = req->sd;
3878                         spin_unlock_irq(&rq->lock);
3879                 } else {
3880                         spin_unlock_irq(&rq->lock);
3881                         WARN_ON(1);
3882                 }
3883
3884                 complete(&req->done);
3885         }
3886         __set_current_state(TASK_RUNNING);
3887         return 0;
3888
3889 wait_to_die:
3890         /* Wait for kthread_stop */
3891         set_current_state(TASK_INTERRUPTIBLE);
3892         while (!kthread_should_stop()) {
3893                 schedule();
3894                 set_current_state(TASK_INTERRUPTIBLE);
3895         }
3896         __set_current_state(TASK_RUNNING);
3897         return 0;
3898 }
3899
3900 #ifdef CONFIG_HOTPLUG_CPU
3901 /* Figure out where task on dead CPU should go, use force if neccessary. */
3902 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
3903 {
3904         int dest_cpu;
3905         cpumask_t mask;
3906
3907         /* On same node? */
3908         mask = node_to_cpumask(cpu_to_node(dead_cpu));
3909         cpus_and(mask, mask, tsk->cpus_allowed);
3910         dest_cpu = any_online_cpu(mask);
3911
3912         /* On any allowed CPU? */
3913         if (dest_cpu == NR_CPUS)
3914                 dest_cpu = any_online_cpu(tsk->cpus_allowed);
3915
3916         /* No more Mr. Nice Guy. */
3917         if (dest_cpu == NR_CPUS) {
3918                 cpus_setall(tsk->cpus_allowed);
3919                 dest_cpu = any_online_cpu(tsk->cpus_allowed);
3920
3921                 /*
3922                  * Don't tell them about moving exiting tasks or
3923                  * kernel threads (both mm NULL), since they never
3924                  * leave kernel.
3925                  */
3926                 if (tsk->mm && printk_ratelimit())
3927                         printk(KERN_INFO "process %d (%s) no "
3928                                "longer affine to cpu%d\n",
3929                                tsk->pid, tsk->comm, dead_cpu);
3930         }
3931         __migrate_task(tsk, dead_cpu, dest_cpu);
3932 }
3933
3934 /*
3935  * While a dead CPU has no uninterruptible tasks queued at this point,
3936  * it might still have a nonzero ->nr_uninterruptible counter, because
3937  * for performance reasons the counter is not stricly tracking tasks to
3938  * their home CPUs. So we just add the counter to another CPU's counter,
3939  * to keep the global sum constant after CPU-down:
3940  */
3941 static void migrate_nr_uninterruptible(runqueue_t *rq_src)
3942 {
3943         runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
3944         unsigned long flags;
3945
3946         local_irq_save(flags);
3947         double_rq_lock(rq_src, rq_dest);
3948         rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
3949         rq_src->nr_uninterruptible = 0;
3950         double_rq_unlock(rq_src, rq_dest);
3951         local_irq_restore(flags);
3952 }
3953
3954 /* Run through task list and migrate tasks from the dead cpu. */
3955 static void migrate_live_tasks(int src_cpu)
3956 {
3957         struct task_struct *tsk, *t;
3958
3959         write_lock_irq(&tasklist_lock);
3960
3961         do_each_thread(t, tsk) {
3962                 if (tsk == current)
3963                         continue;
3964
3965                 if (task_cpu(tsk) == src_cpu)
3966                         move_task_off_dead_cpu(src_cpu, tsk);
3967         } while_each_thread(t, tsk);
3968
3969         write_unlock_irq(&tasklist_lock);
3970 }
3971
3972 /* Schedules idle task to be the next runnable task on current CPU.
3973  * It does so by boosting its priority to highest possible and adding it to
3974  * the _front_ of runqueue. Used by CPU offline code.
3975  */
3976 void sched_idle_next(void)
3977 {
3978         int cpu = smp_processor_id();
3979         runqueue_t *rq = this_rq();
3980         struct task_struct *p = rq->idle;
3981         unsigned long flags;
3982
3983         /* cpu has to be offline */
3984         BUG_ON(cpu_online(cpu));
3985
3986         /* Strictly not necessary since rest of the CPUs are stopped by now
3987          * and interrupts disabled on current cpu.
3988          */
3989         spin_lock_irqsave(&rq->lock, flags);
3990
3991         __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
3992         /* Add idle task to _front_ of it's priority queue */
3993         __activate_idle_task(p, rq);
3994
3995         spin_unlock_irqrestore(&rq->lock, flags);
3996 }
3997
3998 static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
3999 {
4000         struct runqueue *rq = cpu_rq(dead_cpu);
4001
4002         /* Must be exiting, otherwise would be on tasklist. */
4003         BUG_ON(tsk->exit_state != EXIT_ZOMBIE && tsk->exit_state != EXIT_DEAD);
4004
4005         /* Cannot have done final schedule yet: would have vanished. */
4006         BUG_ON(tsk->flags & PF_DEAD);
4007
4008         get_task_struct(tsk);
4009
4010         /*
4011          * Drop lock around migration; if someone else moves it,
4012          * that's OK.  No task can be added to this CPU, so iteration is
4013          * fine.
4014          */
4015         spin_unlock_irq(&rq->lock);
4016         move_task_off_dead_cpu(dead_cpu, tsk);
4017         spin_lock_irq(&rq->lock);
4018
4019         put_task_struct(tsk);
4020 }
4021
4022 /* release_task() removes task from tasklist, so we won't find dead tasks. */
4023 static void migrate_dead_tasks(unsigned int dead_cpu)
4024 {
4025         unsigned arr, i;
4026         struct runqueue *rq = cpu_rq(dead_cpu);
4027
4028         for (arr = 0; arr < 2; arr++) {
4029                 for (i = 0; i < MAX_PRIO; i++) {
4030                         struct list_head *list = &rq->arrays[arr].queue[i];
4031                         while (!list_empty(list))
4032                                 migrate_dead(dead_cpu,
4033                                              list_entry(list->next, task_t,
4034                                                         run_list));
4035                 }
4036         }
4037 }
4038 #endif /* CONFIG_HOTPLUG_CPU */
4039
4040 /*
4041  * migration_call - callback that gets triggered when a CPU is added.
4042  * Here we can start up the necessary migration thread for the new CPU.
4043  */
4044 static int migration_call(struct notifier_block *nfb, unsigned long action,
4045                           void *hcpu)
4046 {
4047         int cpu = (long)hcpu;
4048         struct task_struct *p;
4049         struct runqueue *rq;
4050         unsigned long flags;
4051
4052         switch (action) {
4053         case CPU_UP_PREPARE:
4054                 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
4055                 if (IS_ERR(p))
4056                         return NOTIFY_BAD;
4057                 p->flags |= PF_NOFREEZE;
4058                 kthread_bind(p, cpu);
4059                 /* Must be high prio: stop_machine expects to yield to it. */
4060                 rq = task_rq_lock(p, &flags);
4061                 __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
4062                 task_rq_unlock(rq, &flags);
4063                 cpu_rq(cpu)->migration_thread = p;
4064                 break;
4065         case CPU_ONLINE:
4066                 /* Strictly unneccessary, as first user will wake it. */
4067                 wake_up_process(cpu_rq(cpu)->migration_thread);
4068                 break;
4069 #ifdef CONFIG_HOTPLUG_CPU
4070         case CPU_UP_CANCELED:
4071                 /* Unbind it from offline cpu so it can run.  Fall thru. */
4072                 kthread_bind(cpu_rq(cpu)->migration_thread,smp_processor_id());
4073                 kthread_stop(cpu_rq(cpu)->migration_thread);
4074                 cpu_rq(cpu)->migration_thread = NULL;
4075                 break;
4076         case CPU_DEAD:
4077                 migrate_live_tasks(cpu);
4078                 rq = cpu_rq(cpu);
4079                 kthread_stop(rq->migration_thread);
4080                 rq->migration_thread = NULL;
4081                 /* Idle task back to normal (off runqueue, low prio) */
4082                 rq = task_rq_lock(rq->idle, &flags);
4083                 deactivate_task(rq->idle, rq);
4084                 rq->idle->static_prio = MAX_PRIO;
4085                 __setscheduler(rq->idle, SCHED_NORMAL, 0);
4086                 migrate_dead_tasks(cpu);
4087                 task_rq_unlock(rq, &flags);
4088                 migrate_nr_uninterruptible(rq);
4089                 BUG_ON(rq->nr_running != 0);
4090
4091                 /* No need to migrate the tasks: it was best-effort if
4092                  * they didn't do lock_cpu_hotplug().  Just wake up
4093                  * the requestors. */
4094                 spin_lock_irq(&rq->lock);
4095                 while (!list_empty(&rq->migration_queue)) {
4096                         migration_req_t *req;
4097                         req = list_entry(rq->migration_queue.next,
4098                                          migration_req_t, list);
4099                         BUG_ON(req->type != REQ_MOVE_TASK);
4100                         list_del_init(&req->list);
4101                         complete(&req->done);
4102                 }
4103                 spin_unlock_irq(&rq->lock);
4104                 break;
4105 #endif
4106         }
4107         return NOTIFY_OK;
4108 }
4109
4110 /* Register at highest priority so that task migration (migrate_all_tasks)
4111  * happens before everything else.
4112  */
4113 static struct notifier_block __devinitdata migration_notifier = {
4114         .notifier_call = migration_call,
4115         .priority = 10
4116 };
4117
4118 int __init migration_init(void)
4119 {
4120         void *cpu = (void *)(long)smp_processor_id();
4121         /* Start one for boot CPU. */
4122         migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4123         migration_call(&migration_notifier, CPU_ONLINE, cpu);
4124         register_cpu_notifier(&migration_notifier);
4125         return 0;
4126 }
4127 #endif
4128
4129 #ifdef CONFIG_SMP
4130 /*
4131  * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
4132  * hold the hotplug lock.
4133  */
4134 void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu)
4135 {
4136         migration_req_t req;
4137         unsigned long flags;
4138         runqueue_t *rq = cpu_rq(cpu);
4139         int local = 1;
4140
4141         spin_lock_irqsave(&rq->lock, flags);
4142
4143         if (cpu == smp_processor_id() || !cpu_online(cpu)) {
4144                 rq->sd = sd;
4145         } else {
4146                 init_completion(&req.done);
4147                 req.type = REQ_SET_DOMAIN;
4148                 req.sd = sd;
4149                 list_add(&req.list, &rq->migration_queue);
4150                 local = 0;
4151         }
4152
4153         spin_unlock_irqrestore(&rq->lock, flags);
4154
4155         if (!local) {
4156                 wake_up_process(rq->migration_thread);
4157                 wait_for_completion(&req.done);
4158         }
4159 }
4160
4161 /* cpus with isolated domains */
4162 cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
4163
4164 /* Setup the mask of cpus configured for isolated domains */
4165 static int __init isolated_cpu_setup(char *str)
4166 {
4167         int ints[NR_CPUS], i;
4168
4169         str = get_options(str, ARRAY_SIZE(ints), ints);
4170         cpus_clear(cpu_isolated_map);
4171         for (i = 1; i <= ints[0]; i++)
4172                 cpu_set(ints[i], cpu_isolated_map);
4173         return 1;
4174 }
4175
4176 __setup ("isolcpus=", isolated_cpu_setup);
4177
4178 /*
4179  * init_sched_build_groups takes an array of groups, the cpumask we wish
4180  * to span, and a pointer to a function which identifies what group a CPU
4181  * belongs to. The return value of group_fn must be a valid index into the
4182  * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we
4183  * keep track of groups covered with a cpumask_t).
4184  *
4185  * init_sched_build_groups will build a circular linked list of the groups
4186  * covered by the given span, and will set each group's ->cpumask correctly,
4187  * and ->cpu_power to 0.
4188  */
4189 void __devinit init_sched_build_groups(struct sched_group groups[],
4190                         cpumask_t span, int (*group_fn)(int cpu))
4191 {
4192         struct sched_group *first = NULL, *last = NULL;
4193         cpumask_t covered = CPU_MASK_NONE;
4194         int i;
4195
4196         for_each_cpu_mask(i, span) {
4197                 int group = group_fn(i);
4198                 struct sched_group *sg = &groups[group];
4199                 int j;
4200
4201                 if (cpu_isset(i, covered))
4202                         continue;
4203
4204                 sg->cpumask = CPU_MASK_NONE;
4205                 sg->cpu_power = 0;
4206
4207                 for_each_cpu_mask(j, span) {
4208                         if (group_fn(j) != group)
4209                                 continue;
4210
4211                         cpu_set(j, covered);
4212                         cpu_set(j, sg->cpumask);
4213                 }
4214                 if (!first)
4215                         first = sg;
4216                 if (last)
4217                         last->next = sg;
4218                 last = sg;
4219         }
4220         last->next = first;
4221 }
4222
4223
4224 #ifdef ARCH_HAS_SCHED_DOMAIN
4225 extern void __devinit arch_init_sched_domains(void);
4226 extern void __devinit arch_destroy_sched_domains(void);
4227 #else
4228 #ifdef CONFIG_SCHED_SMT
4229 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
4230 static struct sched_group sched_group_cpus[NR_CPUS];
4231 static int __devinit cpu_to_cpu_group(int cpu)
4232 {
4233         return cpu;
4234 }
4235 #endif
4236
4237 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
4238 static struct sched_group sched_group_phys[NR_CPUS];
4239 static int __devinit cpu_to_phys_group(int cpu)
4240 {
4241 #ifdef CONFIG_SCHED_SMT
4242         return first_cpu(cpu_sibling_map[cpu]);
4243 #else
4244         return cpu;
4245 #endif
4246 }
4247
4248 #ifdef CONFIG_NUMA
4249
4250 static DEFINE_PER_CPU(struct sched_domain, node_domains);
4251 static struct sched_group sched_group_nodes[MAX_NUMNODES];
4252 static int __devinit cpu_to_node_group(int cpu)
4253 {
4254         return cpu_to_node(cpu);
4255 }
4256 #endif
4257
4258 #if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
4259 /*
4260  * The domains setup code relies on siblings not spanning
4261  * multiple nodes. Make sure the architecture has a proper
4262  * siblings map:
4263  */
4264 static void check_sibling_maps(void)
4265 {
4266         int i, j;
4267
4268         for_each_online_cpu(i) {
4269                 for_each_cpu_mask(j, cpu_sibling_map[i]) {
4270                         if (cpu_to_node(i) != cpu_to_node(j)) {
4271                                 printk(KERN_INFO "warning: CPU %d siblings map "
4272                                         "to different node - isolating "
4273                                         "them.\n", i);
4274                                 cpu_sibling_map[i] = cpumask_of_cpu(i);
4275                                 break;
4276                         }
4277                 }
4278         }
4279 }
4280 #endif
4281
4282 /*
4283  * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
4284  */
4285 static void __devinit arch_init_sched_domains(void)
4286 {
4287         int i;
4288         cpumask_t cpu_default_map;
4289
4290 #if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
4291         check_sibling_maps();
4292 #endif
4293         /*
4294          * Setup mask for cpus without special case scheduling requirements.
4295          * For now this just excludes isolated cpus, but could be used to
4296          * exclude other special cases in the future.
4297          */
4298         cpus_complement(cpu_default_map, cpu_isolated_map);
4299         cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
4300
4301         /*
4302          * Set up domains. Isolated domains just stay on the dummy domain.
4303          */
4304         for_each_cpu_mask(i, cpu_default_map) {
4305                 int group;
4306                 struct sched_domain *sd = NULL, *p;
4307                 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
4308
4309                 cpus_and(nodemask, nodemask, cpu_default_map);
4310
4311 #ifdef CONFIG_NUMA
4312                 sd = &per_cpu(node_domains, i);
4313                 group = cpu_to_node_group(i);
4314                 *sd = SD_NODE_INIT;
4315                 sd->span = cpu_default_map;
4316                 sd->groups = &sched_group_nodes[group];
4317 #endif
4318
4319                 p = sd;
4320                 sd = &per_cpu(phys_domains, i);
4321                 group = cpu_to_phys_group(i);
4322                 *sd = SD_CPU_INIT;
4323                 sd->span = nodemask;
4324                 sd->parent = p;
4325                 sd->groups = &sched_group_phys[group];
4326
4327 #ifdef CONFIG_SCHED_SMT
4328                 p = sd;
4329                 sd = &per_cpu(cpu_domains, i);
4330                 group = cpu_to_cpu_group(i);
4331                 *sd = SD_SIBLING_INIT;
4332                 sd->span = cpu_sibling_map[i];
4333                 cpus_and(sd->span, sd->span, cpu_default_map);
4334                 sd->parent = p;
4335                 sd->groups = &sched_group_cpus[group];
4336 #endif
4337         }
4338
4339 #ifdef CONFIG_SCHED_SMT
4340         /* Set up CPU (sibling) groups */
4341         for_each_online_cpu(i) {
4342                 cpumask_t this_sibling_map = cpu_sibling_map[i];
4343                 cpus_and(this_sibling_map, this_sibling_map, cpu_default_map);
4344                 if (i != first_cpu(this_sibling_map))
4345                         continue;
4346
4347                 init_sched_build_groups(sched_group_cpus, this_sibling_map,
4348                                                 &cpu_to_cpu_group);
4349         }
4350 #endif
4351
4352         /* Set up physical groups */
4353         for (i = 0; i < MAX_NUMNODES; i++) {
4354                 cpumask_t nodemask = node_to_cpumask(i);
4355
4356                 cpus_and(nodemask, nodemask, cpu_default_map);
4357                 if (cpus_empty(nodemask))
4358                         continue;
4359
4360                 init_sched_build_groups(sched_group_phys, nodemask,
4361                                                 &cpu_to_phys_group);
4362         }
4363
4364 #ifdef CONFIG_NUMA
4365         /* Set up node groups */
4366         init_sched_build_groups(sched_group_nodes, cpu_default_map,
4367                                         &cpu_to_node_group);
4368 #endif
4369
4370         /* Calculate CPU power for physical packages and nodes */
4371         for_each_cpu_mask(i, cpu_default_map) {
4372                 int power;
4373                 struct sched_domain *sd;
4374 #ifdef CONFIG_SCHED_SMT
4375                 sd = &per_cpu(cpu_domains, i);
4376                 power = SCHED_LOAD_SCALE;
4377                 sd->groups->cpu_power = power;
4378 #endif
4379
4380                 sd = &per_cpu(phys_domains, i);
4381                 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
4382                                 (cpus_weight(sd->groups->cpumask)-1) / 10;
4383                 sd->groups->cpu_power = power;
4384
4385 #ifdef CONFIG_NUMA
4386                 if (i == first_cpu(sd->groups->cpumask)) {
4387                         /* Only add "power" once for each physical package. */
4388                         sd = &per_cpu(node_domains, i);
4389                         sd->groups->cpu_power += power;
4390                 }
4391 #endif
4392         }
4393
4394         /* Attach the domains */
4395         for_each_online_cpu(i) {
4396                 struct sched_domain *sd;
4397 #ifdef CONFIG_SCHED_SMT
4398                 sd = &per_cpu(cpu_domains, i);
4399 #else
4400                 sd = &per_cpu(phys_domains, i);
4401 #endif
4402                 cpu_attach_domain(sd, i);
4403         }
4404 }
4405
4406 #ifdef CONFIG_HOTPLUG_CPU
4407 static void __devinit arch_destroy_sched_domains(void)
4408 {
4409         /* Do nothing: everything is statically allocated. */
4410 }
4411 #endif
4412
4413 #endif /* ARCH_HAS_SCHED_DOMAIN */
4414
4415 #define SCHED_DOMAIN_DEBUG
4416 #ifdef SCHED_DOMAIN_DEBUG
4417 static void sched_domain_debug(void)
4418 {
4419         int i;
4420
4421         for_each_online_cpu(i) {
4422                 runqueue_t *rq = cpu_rq(i);
4423                 struct sched_domain *sd;
4424                 int level = 0;
4425
4426                 sd = rq->sd;
4427
4428                 printk(KERN_DEBUG "CPU%d:\n", i);
4429
4430                 do {
4431                         int j;
4432                         char str[NR_CPUS];
4433                         struct sched_group *group = sd->groups;
4434                         cpumask_t groupmask;
4435
4436                         cpumask_scnprintf(str, NR_CPUS, sd->span);
4437                         cpus_clear(groupmask);
4438
4439                         printk(KERN_DEBUG);
4440                         for (j = 0; j < level + 1; j++)
4441                                 printk(" ");
4442                         printk("domain %d: ", level);
4443
4444                         if (!(sd->flags & SD_LOAD_BALANCE)) {
4445                                 printk("does not load-balance");
4446                                 if (sd->parent)
4447                                         printk(" ERROR !SD_LOAD_BALANCE domain has parent");
4448                                 printk("\n");
4449                                 break;
4450                         }
4451
4452                         printk("span %s\n", str);
4453
4454                         if (!cpu_isset(i, sd->span))
4455                                 printk(KERN_DEBUG "ERROR domain->span does not contain CPU%d\n", i);
4456                         if (!cpu_isset(i, group->cpumask))
4457                                 printk(KERN_DEBUG "ERROR domain->groups does not contain CPU%d\n", i);
4458
4459                         printk(KERN_DEBUG);
4460                         for (j = 0; j < level + 2; j++)
4461                                 printk(" ");
4462                         printk("groups:");
4463                         do {
4464                                 if (!group) {
4465                                         printk(" ERROR: NULL");
4466                                         break;
4467                                 }
4468                                 
4469                                 if (!group->cpu_power)
4470                                         printk(KERN_DEBUG "ERROR group->cpu_power not set\n");
4471
4472                                 if (!cpus_weight(group->cpumask))
4473                                         printk(" ERROR empty group:");
4474
4475                                 if (cpus_intersects(groupmask, group->cpumask))
4476                                         printk(" ERROR repeated CPUs:");
4477
4478                                 cpus_or(groupmask, groupmask, group->cpumask);
4479
4480                                 cpumask_scnprintf(str, NR_CPUS, group->cpumask);
4481                                 printk(" %s", str);
4482
4483                                 group = group->next;
4484                         } while (group != sd->groups);
4485                         printk("\n");
4486
4487                         if (!cpus_equal(sd->span, groupmask))
4488                                 printk(KERN_DEBUG "ERROR groups don't span domain->span\n");
4489
4490                         level++;
4491                         sd = sd->parent;
4492
4493                         if (sd) {
4494                                 if (!cpus_subset(groupmask, sd->span))
4495                                         printk(KERN_DEBUG "ERROR parent span is not a superset of domain->span\n");
4496                         }
4497
4498                 } while (sd);
4499         }
4500 }
4501 #else
4502 #define sched_domain_debug() {}
4503 #endif
4504
4505 /*
4506  * Initial dummy domain for early boot and for hotplug cpu. Being static,
4507  * it is initialized to zero, so all balancing flags are cleared which is
4508  * what we want.
4509  */
4510 static struct sched_domain sched_domain_dummy;
4511
4512 #ifdef CONFIG_HOTPLUG_CPU
4513 /*
4514  * Force a reinitialization of the sched domains hierarchy.  The domains
4515  * and groups cannot be updated in place without racing with the balancing
4516  * code, so we temporarily attach all running cpus to a "dummy" domain
4517  * which will prevent rebalancing while the sched domains are recalculated.
4518  */
4519 static int update_sched_domains(struct notifier_block *nfb,
4520                                 unsigned long action, void *hcpu)
4521 {
4522         int i;
4523
4524         switch (action) {
4525         case CPU_UP_PREPARE:
4526         case CPU_DOWN_PREPARE:
4527                 for_each_online_cpu(i)
4528                         cpu_attach_domain(&sched_domain_dummy, i);
4529                 arch_destroy_sched_domains();
4530                 return NOTIFY_OK;
4531
4532         case CPU_UP_CANCELED:
4533         case CPU_DOWN_FAILED:
4534         case CPU_ONLINE:
4535         case CPU_DEAD:
4536                 /*
4537                  * Fall through and re-initialise the domains.
4538                  */
4539                 break;
4540         default:
4541                 return NOTIFY_DONE;
4542         }
4543
4544         /* The hotplug lock is already held by cpu_up/cpu_down */
4545         arch_init_sched_domains();
4546
4547         sched_domain_debug();
4548
4549         return NOTIFY_OK;
4550 }
4551 #endif
4552
4553 void __init sched_init_smp(void)
4554 {
4555         lock_cpu_hotplug();
4556         arch_init_sched_domains();
4557         sched_domain_debug();
4558         unlock_cpu_hotplug();
4559         /* XXX: Theoretical race here - CPU may be hotplugged now */
4560         hotcpu_notifier(update_sched_domains, 0);
4561 }
4562 #else
4563 void __init sched_init_smp(void)
4564 {
4565 }
4566 #endif /* CONFIG_SMP */
4567
4568 int in_sched_functions(unsigned long addr)
4569 {
4570         /* Linker adds these: start and end of __sched functions */
4571         extern char __sched_text_start[], __sched_text_end[];
4572         return in_lock_functions(addr) ||
4573                 (addr >= (unsigned long)__sched_text_start
4574                 && addr < (unsigned long)__sched_text_end);
4575 }
4576
4577 void __init sched_init(void)
4578 {
4579         runqueue_t *rq;
4580         int i, j, k;
4581
4582         for (i = 0; i < NR_CPUS; i++) {
4583                 prio_array_t *array;
4584
4585                 rq = cpu_rq(i);
4586                 spin_lock_init(&rq->lock);
4587                 rq->active = rq->arrays;
4588                 rq->expired = rq->arrays + 1;
4589                 rq->best_expired_prio = MAX_PRIO;
4590
4591 #ifdef CONFIG_SMP
4592                 rq->sd = &sched_domain_dummy;
4593                 rq->cpu_load = 0;
4594                 rq->active_balance = 0;
4595                 rq->push_cpu = 0;
4596                 rq->migration_thread = NULL;
4597                 INIT_LIST_HEAD(&rq->migration_queue);
4598 #endif
4599                 atomic_set(&rq->nr_iowait, 0);
4600
4601                 for (j = 0; j < 2; j++) {
4602                         array = rq->arrays + j;
4603                         for (k = 0; k < MAX_PRIO; k++) {
4604                                 INIT_LIST_HEAD(array->queue + k);
4605                                 __clear_bit(k, array->bitmap);
4606                         }
4607                         // delimiter for bitsearch
4608                         __set_bit(MAX_PRIO, array->bitmap);
4609                 }
4610         }
4611
4612         /*
4613          * The boot idle thread does lazy MMU switching as well:
4614          */
4615         atomic_inc(&init_mm.mm_count);
4616         enter_lazy_tlb(&init_mm, current);
4617
4618         /*
4619          * Make us the idle thread. Technically, schedule() should not be
4620          * called from this thread, however somewhere below it might be,
4621          * but because we are the idle thread, we just pick up running again
4622          * when this runqueue becomes "idle".
4623          */
4624         init_idle(current, smp_processor_id());
4625 }
4626
4627 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4628 void __might_sleep(char *file, int line)
4629 {
4630 #if defined(in_atomic)
4631         static unsigned long prev_jiffy;        /* ratelimiting */
4632
4633         if ((in_atomic() || irqs_disabled()) &&
4634             system_state == SYSTEM_RUNNING) {
4635                 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
4636                         return;
4637                 prev_jiffy = jiffies;
4638                 printk(KERN_ERR "Debug: sleeping function called from invalid"
4639                                 " context at %s:%d\n", file, line);
4640                 printk("in_atomic():%d, irqs_disabled():%d\n",
4641                         in_atomic(), irqs_disabled());
4642                 dump_stack();
4643         }
4644 #endif
4645 }
4646 EXPORT_SYMBOL(__might_sleep);
4647 #endif
4648
4649 #ifdef CONFIG_MAGIC_SYSRQ
4650 void normalize_rt_tasks(void)
4651 {
4652         struct task_struct *p;
4653         prio_array_t *array;
4654         unsigned long flags;
4655         runqueue_t *rq;
4656
4657         read_lock_irq(&tasklist_lock);
4658         for_each_process (p) {
4659                 if (!rt_task(p))
4660                         continue;
4661
4662                 rq = task_rq_lock(p, &flags);
4663
4664                 array = p->array;
4665                 if (array)
4666                         deactivate_task(p, task_rq(p));
4667                 __setscheduler(p, SCHED_NORMAL, 0);
4668                 if (array) {
4669                         __activate_task(p, task_rq(p));
4670                         resched_task(rq->curr);
4671                 }
4672
4673                 task_rq_unlock(rq, &flags);
4674         }
4675         read_unlock_irq(&tasklist_lock);
4676 }
4677
4678 #endif /* CONFIG_MAGIC_SYSRQ */