fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / kernel / sched.c
index 04a557a..74624e0 100644 (file)
 #include <linux/capability.h>
 #include <linux/completion.h>
 #include <linux/kernel_stat.h>
+#include <linux/debug_locks.h>
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/syscalls.h>
 #include <linux/times.h>
-#include <linux/acct.h>
+#include <linux/tsacct_kern.h>
+#include <linux/kprobes.h>
+#include <linux/delayacct.h>
 #include <asm/tlb.h>
 
 #include <asm/unistd.h>
-#include <linux/vs_context.h>
-#include <linux/vs_cvirt.h>
 #include <linux/vs_sched.h>
+#include <linux/vs_cvirt.h>
 
 /*
  * Convert user-nice values [ -20 ... 0 ... 19 ]
        (v1) * (v2_max) / (v1_max)
 
 #define DELTA(p) \
-       (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
+       (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
+               INTERACTIVE_DELTA)
 
 #define TASK_INTERACTIVE(p) \
        ((p)->prio <= (p)->static_prio - DELTA(p))
 #define TASK_PREEMPTS_CURR(p, rq) \
        ((p)->prio < (rq)->curr->prio)
 
+#define SCALE_PRIO(x, prio) \
+       max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
+
+static unsigned int static_prio_timeslice(int static_prio)
+{
+       if (static_prio < NICE_TO_PRIO(0))
+               return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
+       else
+               return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+}
+
 /*
  * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
  * to time slice values: [800ms ... 100ms ... 5ms]
  * priority thread gets MIN_TIMESLICE worth of execution time.
  */
 
-#define SCALE_PRIO(x, prio) \
-       max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
-
-static unsigned int task_timeslice(task_t *p)
+static inline unsigned int task_timeslice(struct task_struct *p)
 {
-       if (p->static_prio < NICE_TO_PRIO(0))
-               return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
-       else
-               return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+       return static_prio_timeslice(p->static_prio);
 }
-#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)      \
-                               < (long long) (sd)->cache_hot_time)
 
 /*
  * These are the runqueue data structures:
  */
 
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
-
-typedef struct runqueue runqueue_t;
-
 struct prio_array {
        unsigned int nr_active;
-       unsigned long bitmap[BITMAP_SIZE];
+       DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
        struct list_head queue[MAX_PRIO];
 };
 
@@ -202,7 +204,7 @@ struct prio_array {
  * (such as the load balancing or the thread migration code), lock
  * acquire operations must be ordered by ascending &runqueue.
  */
-struct runqueue {
+struct rq {
        spinlock_t lock;
 
        /*
@@ -210,6 +212,7 @@ struct runqueue {
         * remote CPUs use both these fields when doing load calculation.
         */
        unsigned long nr_running;
+       unsigned long raw_weighted_load;
 #ifdef CONFIG_SMP
        unsigned long cpu_load[3];
 #endif
@@ -224,10 +227,12 @@ struct runqueue {
        unsigned long nr_uninterruptible;
 
        unsigned long expired_timestamp;
-       unsigned long long timestamp_last_tick;
-       task_t *curr, *idle;
+       /* Cached timestamp set by update_cpu_clock() */
+       unsigned long long most_recent_timestamp;
+       struct task_struct *curr, *idle;
+       unsigned long next_balance;
        struct mm_struct *prev_mm;
-       prio_array_t *active, *expired, arrays[2];
+       struct prio_array *active, *expired, arrays[2];
        int best_expired_prio;
        atomic_t nr_iowait;
 
@@ -237,13 +242,19 @@ struct runqueue {
        /* For active balancing */
        int active_balance;
        int push_cpu;
+       int cpu;                /* cpu of this runqueue */
 
-       task_t *migration_thread;
+       struct task_struct *migration_thread;
        struct list_head migration_queue;
-       int cpu;
+#endif
+       unsigned long norm_time;
+       unsigned long idle_time;
+#ifdef CONFIG_VSERVER_IDLETIME
+       int idle_skip;
 #endif
 #ifdef CONFIG_VSERVER_HARDCPU
        struct list_head hold_queue;
+       unsigned long nr_onhold;
        int idle_tokens;
 #endif
 
@@ -266,9 +277,19 @@ struct runqueue {
        unsigned long ttwu_cnt;
        unsigned long ttwu_local;
 #endif
+       struct lock_class_key rq_lock_key;
 };
 
-static DEFINE_PER_CPU(struct runqueue, runqueues);
+static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+
+static inline int cpu_of(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+       return rq->cpu;
+#else
+       return 0;
+#endif
+}
 
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
@@ -277,8 +298,8 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);
  * The domain tree of any CPU may only be accessed from within
  * preempt-disabled sections.
  */
-#define for_each_domain(cpu, domain) \
-for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent)
+#define for_each_domain(cpu, __sd) \
+       for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              (&__get_cpu_var(runqueues))
@@ -293,26 +314,33 @@ for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent)
 #endif
 
 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
-static inline int task_running(runqueue_t *rq, task_t *p)
+static inline int task_running(struct rq *rq, struct task_struct *p)
 {
        return rq->curr == p;
 }
 
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 {
 }
 
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 {
 #ifdef CONFIG_DEBUG_SPINLOCK
        /* this is a valid case when another task releases the spinlock */
        rq->lock.owner = current;
 #endif
+       /*
+        * If we are tracking spinlock dependencies then we have to
+        * fix up the runqueue lock - which gets 'carried over' from
+        * prev into current:
+        */
+       spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
        spin_unlock_irq(&rq->lock);
 }
 
 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline int task_running(runqueue_t *rq, task_t *p)
+static inline int task_running(struct rq *rq, struct task_struct *p)
 {
 #ifdef CONFIG_SMP
        return p->oncpu;
@@ -321,7 +349,7 @@ static inline int task_running(runqueue_t *rq, task_t *p)
 #endif
 }
 
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 {
 #ifdef CONFIG_SMP
        /*
@@ -338,7 +366,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
 #endif
 }
 
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 {
 #ifdef CONFIG_SMP
        /*
@@ -355,15 +383,34 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
 }
 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 
+/*
+ * __task_rq_lock - lock the runqueue a given task resides on.
+ * Must be called interrupts disabled.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+repeat_lock_task:
+       rq = task_rq(p);
+       spin_lock(&rq->lock);
+       if (unlikely(rq != task_rq(p))) {
+               spin_unlock(&rq->lock);
+               goto repeat_lock_task;
+       }
+       return rq;
+}
+
 /*
  * task_rq_lock - lock the runqueue a given task resides on and disable
  * interrupts.  Note the ordering: we can safely lookup the task_rq without
  * explicitly disabling preemption.
  */
-static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
        __acquires(rq->lock)
 {
-       struct runqueue *rq;
+       struct rq *rq;
 
 repeat_lock_task:
        local_irq_save(*flags);
@@ -376,7 +423,13 @@ repeat_lock_task:
        return rq;
 }
 
-static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
+static inline void __task_rq_unlock(struct rq *rq)
+       __releases(rq->lock)
+{
+       spin_unlock(&rq->lock);
+}
+
+static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
        __releases(rq->lock)
 {
        spin_unlock_irqrestore(&rq->lock, *flags);
@@ -387,7 +440,7 @@ static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
  * bump this up when changing the output format or the meaning of an existing
  * format, so that tools can adapt (or abort)
  */
-#define SCHEDSTAT_VERSION 12
+#define SCHEDSTAT_VERSION 14
 
 static int show_schedstat(struct seq_file *seq, void *v)
 {
@@ -396,7 +449,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
        seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
        seq_printf(seq, "timestamp %lu\n", jiffies);
        for_each_online_cpu(cpu) {
-               runqueue_t *rq = cpu_rq(cpu);
+               struct rq *rq = cpu_rq(cpu);
 #ifdef CONFIG_SMP
                struct sched_domain *sd;
                int dcnt = 0;
@@ -425,7 +478,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
                        seq_printf(seq, "domain%d %s", dcnt++, mask_str);
                        for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
                                        itype++) {
-                               seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
+                               seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
+                                               "%lu",
                                    sd->lb_cnt[itype],
                                    sd->lb_balanced[itype],
                                    sd->lb_failed[itype],
@@ -435,11 +489,13 @@ static int show_schedstat(struct seq_file *seq, void *v)
                                    sd->lb_nobusyq[itype],
                                    sd->lb_nobusyg[itype]);
                        }
-                       seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+                       seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
+                           " %lu %lu %lu\n",
                            sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
                            sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
                            sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
-                           sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
+                           sd->ttwu_wake_remote, sd->ttwu_move_affine,
+                           sd->ttwu_move_balance);
                }
                preempt_enable();
 #endif
@@ -466,27 +522,54 @@ static int schedstat_open(struct inode *inode, struct file *file)
        return res;
 }
 
-struct file_operations proc_schedstat_operations = {
+const struct file_operations proc_schedstat_operations = {
        .open    = schedstat_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = single_release,
 };
 
+/*
+ * Expects runqueue lock to be held for atomicity of update
+ */
+static inline void
+rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
+{
+       if (rq) {
+               rq->rq_sched_info.run_delay += delta_jiffies;
+               rq->rq_sched_info.pcnt++;
+       }
+}
+
+/*
+ * Expects runqueue lock to be held for atomicity of update
+ */
+static inline void
+rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
+{
+       if (rq)
+               rq->rq_sched_info.cpu_time += delta_jiffies;
+}
 # define schedstat_inc(rq, field)      do { (rq)->field++; } while (0)
 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
 #else /* !CONFIG_SCHEDSTATS */
+static inline void
+rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
+{}
+static inline void
+rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
+{}
 # define schedstat_inc(rq, field)      do { } while (0)
 # define schedstat_add(rq, field, amt) do { } while (0)
 #endif
 
 /*
- * rq_lock - lock a given runqueue and disable interrupts.
+ * this_rq_lock - lock this runqueue and disable interrupts.
  */
-static inline runqueue_t *this_rq_lock(void)
+static inline struct rq *this_rq_lock(void)
        __acquires(rq->lock)
 {
-       runqueue_t *rq;
+       struct rq *rq;
 
        local_irq_disable();
        rq = this_rq();
@@ -495,7 +578,7 @@ static inline runqueue_t *this_rq_lock(void)
        return rq;
 }
 
-#ifdef CONFIG_SCHEDSTATS
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 /*
  * Called when a process is dequeued from the active array and given
  * the cpu.  We should note that with the exception of interactive
@@ -511,7 +594,7 @@ static inline runqueue_t *this_rq_lock(void)
  * long it was from the *first* time it was queued to the time that it
  * finally hit a cpu.
  */
-static inline void sched_info_dequeued(task_t *t)
+static inline void sched_info_dequeued(struct task_struct *t)
 {
        t->sched_info.last_queued = 0;
 }
@@ -521,23 +604,18 @@ static inline void sched_info_dequeued(task_t *t)
  * long it was waiting to run.  We also note when it began so that we
  * can keep stats on how long its timeslice is.
  */
-static void sched_info_arrive(task_t *t)
+static void sched_info_arrive(struct task_struct *t)
 {
-       unsigned long now = jiffies, diff = 0;
-       struct runqueue *rq = task_rq(t);
+       unsigned long now = jiffies, delta_jiffies = 0;
 
        if (t->sched_info.last_queued)
-               diff = now - t->sched_info.last_queued;
+               delta_jiffies = now - t->sched_info.last_queued;
        sched_info_dequeued(t);
-       t->sched_info.run_delay += diff;
+       t->sched_info.run_delay += delta_jiffies;
        t->sched_info.last_arrival = now;
        t->sched_info.pcnt++;
 
-       if (!rq)
-               return;
-
-       rq->rq_sched_info.run_delay += diff;
-       rq->rq_sched_info.pcnt++;
+       rq_sched_info_arrive(task_rq(t), delta_jiffies);
 }
 
 /*
@@ -555,25 +633,23 @@ static void sched_info_arrive(task_t *t)
  * the timestamp if it is already not set.  It's assumed that
  * sched_info_dequeued() will clear that stamp when appropriate.
  */
-static inline void sched_info_queued(task_t *t)
+static inline void sched_info_queued(struct task_struct *t)
 {
-       if (!t->sched_info.last_queued)
-               t->sched_info.last_queued = jiffies;
+       if (unlikely(sched_info_on()))
+               if (!t->sched_info.last_queued)
+                       t->sched_info.last_queued = jiffies;
 }
 
 /*
  * Called when a process ceases being the active-running process, either
  * voluntarily or involuntarily.  Now we can calculate how long we ran.
  */
-static inline void sched_info_depart(task_t *t)
+static inline void sched_info_depart(struct task_struct *t)
 {
-       struct runqueue *rq = task_rq(t);
-       unsigned long diff = jiffies - t->sched_info.last_arrival;
-
-       t->sched_info.cpu_time += diff;
+       unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
 
-       if (rq)
-               rq->rq_sched_info.cpu_time += diff;
+       t->sched_info.cpu_time += delta_jiffies;
+       rq_sched_info_depart(task_rq(t), delta_jiffies);
 }
 
 /*
@@ -581,9 +657,10 @@ static inline void sched_info_depart(task_t *t)
  * their time slice.  (This may also be called when switching to or from
  * the idle task.)  We are only called when prev != next.
  */
-static inline void sched_info_switch(task_t *prev, task_t *next)
+static inline void
+__sched_info_switch(struct task_struct *prev, struct task_struct *next)
 {
-       struct runqueue *rq = task_rq(prev);
+       struct rq *rq = task_rq(prev);
 
        /*
         * prev now departs the cpu.  It's not interesting to record
@@ -596,15 +673,21 @@ static inline void sched_info_switch(task_t *prev, task_t *next)
        if (next != rq->idle)
                sched_info_arrive(next);
 }
+static inline void
+sched_info_switch(struct task_struct *prev, struct task_struct *next)
+{
+       if (unlikely(sched_info_on()))
+               __sched_info_switch(prev, next);
+}
 #else
 #define sched_info_queued(t)           do { } while (0)
 #define sched_info_switch(t, next)     do { } while (0)
-#endif /* CONFIG_SCHEDSTATS */
+#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
 
 /*
  * Adding/removing a task to/from a priority array:
  */
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
+static void dequeue_task(struct task_struct *p, struct prio_array *array)
 {
        BUG_ON(p->state & TASK_ONHOLD);
        array->nr_active--;
@@ -613,7 +696,7 @@ static void dequeue_task(struct task_struct *p, prio_array_t *array)
                __clear_bit(p->prio, array->bitmap);
 }
 
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
+static void enqueue_task(struct task_struct *p, struct prio_array *array)
 {
        BUG_ON(p->state & TASK_ONHOLD);
        sched_info_queued(p);
@@ -627,13 +710,14 @@ static void enqueue_task(struct task_struct *p, prio_array_t *array)
  * Put task to the end of the run list without the overhead of dequeue
  * followed by enqueue.
  */
-static void requeue_task(struct task_struct *p, prio_array_t *array)
+static void requeue_task(struct task_struct *p, struct prio_array *array)
 {
        BUG_ON(p->state & TASK_ONHOLD);
        list_move_tail(&p->run_list, array->queue + p->prio);
 }
 
-static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
+static inline void
+enqueue_task_head(struct task_struct *p, struct prio_array *array)
 {
        BUG_ON(p->state & TASK_ONHOLD);
        list_add(&p->run_list, array->queue + p->prio);
@@ -643,7 +727,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
 }
 
 /*
- * effective_prio - return the priority that is based on the static
+ * __normal_prio - return the priority that is based on the static
  * priority but is modified by bonuses/penalties.
  *
  * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
@@ -656,21 +740,17 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
  *
  * Both properties are important to certain workloads.
  */
-static int effective_prio(task_t *p)
+
+static inline int __normal_prio(struct task_struct *p)
 {
        int bonus, prio;
-       struct vx_info *vxi;
-
-       if (rt_task(p))
-               return p->prio;
 
        bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
 
        prio = p->static_prio - bonus;
 
-       if ((vxi = p->vx_info) &&
-               vx_info_flags(vxi, VXF_SCHED_PRIO, 0))
-               prio += vx_effective_vavavoom(vxi, MAX_USER_PRIO);
+       /* adjust effective priority */
+       prio = vx_adjust_prio(p, prio, MAX_USER_PRIO);
 
        if (prio < MAX_RT_PRIO)
                prio = MAX_RT_PRIO;
@@ -680,68 +760,183 @@ static int effective_prio(task_t *p)
 }
 
 /*
- * __activate_task - move a task to the runqueue.
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+/*
+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
+ * If static_prio_timeslice() is ever changed to break this assumption then
+ * this code will need modification
  */
-static inline void __activate_task(task_t *p, runqueue_t *rq)
+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
+#define LOAD_WEIGHT(lp) \
+       (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+       LOAD_WEIGHT(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+       (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static void set_load_weight(struct task_struct *p)
+{
+       if (has_rt_policy(p)) {
+#ifdef CONFIG_SMP
+               if (p == task_rq(p)->migration_thread)
+                       /*
+                        * The migration thread does the actual balancing.
+                        * Giving its load any weight will skew balancing
+                        * adversely.
+                        */
+                       p->load_weight = 0;
+               else
+#endif
+                       p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+       } else
+               p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+
+static inline void
+inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
+{
+       rq->raw_weighted_load += p->load_weight;
+}
+
+static inline void
+dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
+{
+       rq->raw_weighted_load -= p->load_weight;
+}
+
+static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
 {
-       enqueue_task(p, rq->active);
        rq->nr_running++;
+       inc_raw_weighted_load(rq, p);
+}
+
+static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
+{
+       rq->nr_running--;
+       dec_raw_weighted_load(rq, p);
+}
+
+/*
+ * Calculate the expected normal priority: i.e. priority
+ * without taking RT-inheritance into account. Might be
+ * boosted by interactivity modifiers. Changes upon fork,
+ * setprio syscalls, and whenever the interactivity
+ * estimator recalculates.
+ */
+static inline int normal_prio(struct task_struct *p)
+{
+       int prio;
+
+       if (has_rt_policy(p))
+               prio = MAX_RT_PRIO-1 - p->rt_priority;
+       else
+               prio = __normal_prio(p);
+       return prio;
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks, or might be boosted by
+ * interactivity modifiers. Will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(struct task_struct *p)
+{
+       p->normal_prio = normal_prio(p);
+       /*
+        * If we are RT tasks or we were boosted to RT priority,
+        * keep the priority unchanged. Otherwise, update priority
+        * to the normal priority:
+        */
+       if (!rt_prio(p->prio))
+               return p->normal_prio;
+       return p->prio;
+}
+
+#include "sched_mon.h"
+
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static void __activate_task(struct task_struct *p, struct rq *rq)
+{
+       struct prio_array *target = rq->active;
+
+       if (batch_task(p))
+               target = rq->expired;
+       vxm_activate_task(p, rq);
+       enqueue_task(p, target);
+       inc_nr_running(p, rq);
 }
 
 /*
  * __activate_idle_task - move idle task to the _front_ of runqueue.
  */
-static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
 {
+       vxm_activate_idle(p, rq);
        enqueue_task_head(p, rq->active);
-       rq->nr_running++;
+       inc_nr_running(p, rq);
 }
 
-static int recalc_task_prio(task_t *p, unsigned long long now)
+/*
+ * Recalculate p->normal_prio and p->prio after having slept,
+ * updating the sleep-average too:
+ */
+static int recalc_task_prio(struct task_struct *p, unsigned long long now)
 {
        /* Caller must always ensure 'now >= p->timestamp' */
-       unsigned long long __sleep_time = now - p->timestamp;
-       unsigned long sleep_time;
+       unsigned long sleep_time = now - p->timestamp;
 
-       if (unlikely(p->policy == SCHED_BATCH))
+       if (batch_task(p))
                sleep_time = 0;
-       else {
-               if (__sleep_time > NS_MAX_SLEEP_AVG)
-                       sleep_time = NS_MAX_SLEEP_AVG;
-               else
-                       sleep_time = (unsigned long)__sleep_time;
-       }
 
        if (likely(sleep_time > 0)) {
                /*
-                * User tasks that sleep a long time are categorised as
-                * idle and will get just interactive status to stay active &
-                * prevent them suddenly becoming cpu hogs and starving
-                * other processes.
+                * This ceiling is set to the lowest priority that would allow
+                * a task to be reinserted into the active array on timeslice
+                * completion.
                 */
-               if (p->mm && p->activated != -1 &&
-                       sleep_time > INTERACTIVE_SLEEP(p)) {
-                               p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
-                                               DEF_TIMESLICE);
-               } else {
+               unsigned long ceiling = INTERACTIVE_SLEEP(p);
+
+               if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
                        /*
-                        * The lower the sleep avg a task has the more
-                        * rapidly it will rise with sleep time.
+                        * Prevents user tasks from achieving best priority
+                        * with one single large enough sleep.
                         */
-                       sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
+                       p->sleep_avg = ceiling;
+                       /*
+                        * Using INTERACTIVE_SLEEP() as a ceiling places a
+                        * nice(0) task 1ms sleep away from promotion, and
+                        * gives it 700ms to round-robin with no chance of
+                        * being demoted.  This is more than generous, so
+                        * mark this sleep as non-interactive to prevent the
+                        * on-runqueue bonus logic from intervening should
+                        * this task not receive cpu immediately.
+                        */
+                       p->sleep_type = SLEEP_NONINTERACTIVE;
+               } else {
                        /*
                         * Tasks waking from uninterruptible sleep are
                         * limited in their sleep_avg rise as they
                         * are likely to be waiting on I/O
                         */
-                       if (p->activated == -1 && p->mm) {
-                               if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
+                       if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
+                               if (p->sleep_avg >= ceiling)
                                        sleep_time = 0;
                                else if (p->sleep_avg + sleep_time >=
-                                               INTERACTIVE_SLEEP(p)) {
-                                       p->sleep_avg = INTERACTIVE_SLEEP(p);
-                                       sleep_time = 0;
+                                        ceiling) {
+                                               p->sleep_avg = ceiling;
+                                               sleep_time = 0;
                                }
                        }
 
@@ -755,9 +950,9 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
                         */
                        p->sleep_avg += sleep_time;
 
-                       if (p->sleep_avg > NS_MAX_SLEEP_AVG)
-                               p->sleep_avg = NS_MAX_SLEEP_AVG;
                }
+               if (p->sleep_avg > NS_MAX_SLEEP_AVG)
+                       p->sleep_avg = NS_MAX_SLEEP_AVG;
        }
 
        return effective_prio(p);
@@ -769,28 +964,41 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
  * Update all the scheduling statistics stuff. (sleep average
  * calculation, priority modifiers, etc.)
  */
-static void activate_task(task_t *p, runqueue_t *rq, int local)
+static void activate_task(struct task_struct *p, struct rq *rq, int local)
 {
        unsigned long long now;
 
+       if (rt_task(p))
+               goto out;
+
        now = sched_clock();
 #ifdef CONFIG_SMP
        if (!local) {
                /* Compensate for drifting sched_clock */
-               runqueue_t *this_rq = this_rq();
-               now = (now - this_rq->timestamp_last_tick)
-                       + rq->timestamp_last_tick;
+               struct rq *this_rq = this_rq();
+               now = (now - this_rq->most_recent_timestamp)
+                       + rq->most_recent_timestamp;
        }
 #endif
 
-       if (!rt_task(p))
-               p->prio = recalc_task_prio(p, now);
+       /*
+        * Sleep time is in units of nanosecs, so shift by 20 to get a
+        * milliseconds-range estimation of the amount of time that the task
+        * spent sleeping:
+        */
+       if (unlikely(prof_on == SLEEP_PROFILING)) {
+               if (p->state == TASK_UNINTERRUPTIBLE)
+                       profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
+                                    (now - p->timestamp) >> 20);
+       }
+
+       p->prio = recalc_task_prio(p, now);
 
        /*
         * This checks to make sure it's not an uninterruptible task
         * that is now waking up.
         */
-       if (!p->activated) {
+       if (p->sleep_type == SLEEP_NORMAL) {
                /*
                 * Tasks which were woken up by interrupts (ie. hw events)
                 * are most likely of interactive nature. So we give them
@@ -799,87 +1007,40 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
                 * on a CPU, first time around:
                 */
                if (in_interrupt())
-                       p->activated = 2;
+                       p->sleep_type = SLEEP_INTERRUPTED;
                else {
                        /*
                         * Normal first-time wakeups get a credit too for
                         * on-runqueue time, but it will be weighted down:
                         */
-                       p->activated = 1;
+                       p->sleep_type = SLEEP_INTERACTIVE;
                }
        }
        p->timestamp = now;
-
+out:
        vx_activate_task(p);
        __activate_task(p, rq);
 }
 
 /*
- * deactivate_task - remove a task from the runqueue.
+ * __deactivate_task - remove a task from the runqueue.
  */
-static void __deactivate_task(struct task_struct *p, runqueue_t *rq)
+static void __deactivate_task(struct task_struct *p, struct rq *rq)
 {
-       rq->nr_running--;
+       dec_nr_running(p, rq);
        dequeue_task(p, p->array);
+       vxm_deactivate_task(p, rq);
        p->array = NULL;
 }
 
 static inline
-void deactivate_task(struct task_struct *p, runqueue_t *rq)
+void deactivate_task(struct task_struct *p, struct rq *rq)
 {
        vx_deactivate_task(p);
        __deactivate_task(p, rq);
 }
 
-
-#ifdef CONFIG_VSERVER_HARDCPU
-/*
- * vx_hold_task - put a task on the hold queue
- */
-static inline
-void vx_hold_task(struct vx_info *vxi,
-       struct task_struct *p, runqueue_t *rq)
-{
-       __deactivate_task(p, rq);
-       p->state |= TASK_ONHOLD;
-       /* a new one on hold */
-       vx_onhold_inc(vxi);
-       list_add_tail(&p->run_list, &rq->hold_queue);
-}
-
-/*
- * vx_unhold_task - put a task back to the runqueue
- */
-static inline
-void vx_unhold_task(struct vx_info *vxi,
-       struct task_struct *p, runqueue_t *rq)
-{
-       list_del(&p->run_list);
-       /* one less waiting */
-       vx_onhold_dec(vxi);
-       p->state &= ~TASK_ONHOLD;
-       enqueue_task(p, rq->expired);
-       rq->nr_running++;
-
-       if (p->static_prio < rq->best_expired_prio)
-               rq->best_expired_prio = p->static_prio;
-}
-#else
-static inline
-void vx_hold_task(struct vx_info *vxi,
-       struct task_struct *p, runqueue_t *rq)
-{
-       return;
-}
-
-static inline
-void vx_unhold_task(struct vx_info *vxi,
-       struct task_struct *p, runqueue_t *rq)
-{
-       return;
-}
-#endif /* CONFIG_VSERVER_HARDCPU */
-
+#include "sched_hard.h"
 
 /*
  * resched_task - mark a task 'to be rescheduled now'.
@@ -889,7 +1050,12 @@ void vx_unhold_task(struct vx_info *vxi,
  * the target CPU.
  */
 #ifdef CONFIG_SMP
-static void resched_task(task_t *p)
+
+#ifndef tsk_is_polling
+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#endif
+
+static void resched_task(struct task_struct *p)
 {
        int cpu;
 
@@ -904,13 +1070,13 @@ static void resched_task(task_t *p)
        if (cpu == smp_processor_id())
                return;
 
-       /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+       /* NEED_RESCHED must be visible before we test polling */
        smp_mb();
-       if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+       if (!tsk_is_polling(p))
                smp_send_reschedule(cpu);
 }
 #else
-static inline void resched_task(task_t *p)
+static inline void resched_task(struct task_struct *p)
 {
        assert_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
@@ -921,29 +1087,37 @@ static inline void resched_task(task_t *p)
  * task_curr - is this task currently executing on a CPU?
  * @p: the task in question.
  */
-inline int task_curr(const task_t *p)
+inline int task_curr(const struct task_struct *p)
 {
        return cpu_curr(task_cpu(p)) == p;
 }
 
+/* Used instead of source_load when we know the type == 0 */
+unsigned long weighted_cpuload(const int cpu)
+{
+       return cpu_rq(cpu)->raw_weighted_load;
+}
+
 #ifdef CONFIG_SMP
-typedef struct {
+struct migration_req {
        struct list_head list;
 
-       task_t *task;
+       struct task_struct *task;
        int dest_cpu;
 
        struct completion done;
-} migration_req_t;
+};
 
 /*
  * The task's runqueue lock must be held.
  * Returns true if you have to wait for migration thread.
  */
-static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
+static int
+migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
 {
-       runqueue_t *rq = task_rq(p);
+       struct rq *rq = task_rq(p);
 
+       vxm_migrate_task(p, rq, dest_cpu);
        /*
         * If the task is not on a runqueue (and not running), then
         * it is sufficient to simply update the task's cpu field.
@@ -957,6 +1131,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
        req->task = p;
        req->dest_cpu = dest_cpu;
        list_add(&req->list, &rq->migration_queue);
+
        return 1;
 }
 
@@ -969,10 +1144,10 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
  * smp_call_function() if an IPI is sent by the same process we are
  * waiting to become inactive.
  */
-void wait_task_inactive(task_t *p)
+void wait_task_inactive(struct task_struct *p)
 {
        unsigned long flags;
-       runqueue_t *rq;
+       struct rq *rq;
        int preempted;
 
 repeat:
@@ -1003,7 +1178,7 @@ repeat:
  * to another CPU then no harm is done and the purpose has been
  * achieved as well.
  */
-void kick_process(task_t *p)
+void kick_process(struct task_struct *p)
 {
        int cpu;
 
@@ -1015,32 +1190,45 @@ void kick_process(task_t *p)
 }
 
 /*
- * Return a low guess at the load of a migration-source cpu.
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
  *
  * We want to under-estimate the load of migration sources, to
  * balance conservatively.
  */
 static inline unsigned long source_load(int cpu, int type)
 {
-       runqueue_t *rq = cpu_rq(cpu);
-       unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+       struct rq *rq = cpu_rq(cpu);
+
        if (type == 0)
-               return load_now;
+               return rq->raw_weighted_load;
 
-       return min(rq->cpu_load[type-1], load_now);
+       return min(rq->cpu_load[type-1], rq->raw_weighted_load);
 }
 
 /*
- * Return a high guess at the load of a migration-target cpu
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
  */
 static inline unsigned long target_load(int cpu, int type)
 {
-       runqueue_t *rq = cpu_rq(cpu);
-       unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+       struct rq *rq = cpu_rq(cpu);
+
        if (type == 0)
-               return load_now;
+               return rq->raw_weighted_load;
+
+       return max(rq->cpu_load[type-1], rq->raw_weighted_load);
+}
+
+/*
+ * Return the average load per task on the cpu's run queue
+ */
+static inline unsigned long cpu_avg_load_per_task(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long n = rq->nr_running;
 
-       return max(rq->cpu_load[type-1], load_now);
+       return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
 }
 
 /*
@@ -1099,7 +1287,7 @@ nextgroup:
 }
 
 /*
- * find_idlest_queue - find the idlest runqueue among the cpus in group.
+ * find_idlest_cpu - find the idlest cpu among the cpus in group.
  */
 static int
 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
@@ -1113,7 +1301,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
        cpus_and(tmp, group->cpumask, p->cpus_allowed);
 
        for_each_cpu_mask(i, tmp) {
-               load = source_load(i, 0);
+               load = weighted_cpuload(i);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
                        min_load = load;
@@ -1140,28 +1328,42 @@ static int sched_balance_self(int cpu, int flag)
        struct task_struct *t = current;
        struct sched_domain *tmp, *sd = NULL;
 
-       for_each_domain(cpu, tmp)
+       for_each_domain(cpu, tmp) {
+               /*
+                * If power savings logic is enabled for a domain, stop there.
+                */
+               if (tmp->flags & SD_POWERSAVINGS_BALANCE)
+                       break;
                if (tmp->flags & flag)
                        sd = tmp;
+       }
 
        while (sd) {
                cpumask_t span;
                struct sched_group *group;
-               int new_cpu;
-               int weight;
+               int new_cpu, weight;
+
+               if (!(sd->flags & flag)) {
+                       sd = sd->child;
+                       continue;
+               }
 
                span = sd->span;
                group = find_idlest_group(sd, t, cpu);
-               if (!group)
-                       goto nextlevel;
+               if (!group) {
+                       sd = sd->child;
+                       continue;
+               }
 
                new_cpu = find_idlest_cpu(group, t, cpu);
-               if (new_cpu == -1 || new_cpu == cpu)
-                       goto nextlevel;
+               if (new_cpu == -1 || new_cpu == cpu) {
+                       /* Now try balancing at a lower domain level of cpu */
+                       sd = sd->child;
+                       continue;
+               }
 
-               /* Now try balancing at a lower domain level */
+               /* Now try balancing at a lower domain level of new_cpu */
                cpu = new_cpu;
-nextlevel:
                sd = NULL;
                weight = cpus_weight(span);
                for_each_domain(cpu, tmp) {
@@ -1187,7 +1389,7 @@ nextlevel:
  * Returns the CPU we should wake onto.
  */
 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
-static int wake_idle(int cpu, task_t *p)
+static int wake_idle(int cpu, struct task_struct *p)
 {
        cpumask_t tmp;
        struct sched_domain *sd;
@@ -1210,7 +1412,7 @@ static int wake_idle(int cpu, task_t *p)
        return cpu;
 }
 #else
-static inline int wake_idle(int cpu, task_t *p)
+static inline int wake_idle(int cpu, struct task_struct *p)
 {
        return cpu;
 }
@@ -1230,15 +1432,15 @@ static inline int wake_idle(int cpu, task_t *p)
  *
  * returns failure only if the task is already active.
  */
-static int try_to_wake_up(task_t *p, unsigned int state, int sync)
+static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 {
        int cpu, this_cpu, success = 0;
        unsigned long flags;
        long old_state;
-       runqueue_t *rq;
+       struct rq *rq;
 #ifdef CONFIG_SMP
-       unsigned long load, this_load;
        struct sched_domain *sd, *this_sd = NULL;
+       unsigned long load, this_load;
        int new_cpu;
 #endif
 
@@ -1247,7 +1449,7 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
 
        /* we need to unhold suspended tasks */
        if (old_state & TASK_ONHOLD) {
-               vx_unhold_task(p->vx_info, p, rq);
+               vx_unhold_task(p, rq);
                old_state = p->state;
        }
        if (!(old_state & state))
@@ -1298,17 +1500,21 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
 
                if (this_sd->flags & SD_WAKE_AFFINE) {
                        unsigned long tl = this_load;
+                       unsigned long tl_per_task;
+
+                       tl_per_task = cpu_avg_load_per_task(this_cpu);
+
                        /*
                         * If sync wakeup then subtract the (maximum possible)
                         * effect of the currently running task from the load
                         * of the current CPU:
                         */
                        if (sync)
-                               tl -= SCHED_LOAD_SCALE;
+                               tl -= current->load_weight;
 
                        if ((tl <= load &&
-                               tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) ||
-                               100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) {
+                               tl + target_load(cpu, idx) <= tl_per_task) ||
+                               100*(tl + p->load_weight) <= imbalance*load) {
                                /*
                                 * This domain has SD_WAKE_AFFINE and
                                 * p is cache cold in this domain, and
@@ -1353,29 +1559,24 @@ out_activate:
 #endif /* CONFIG_SMP */
        if (old_state == TASK_UNINTERRUPTIBLE) {
                rq->nr_uninterruptible--;
+               vx_uninterruptible_dec(p);
                /*
                 * Tasks on involuntary sleep don't earn
                 * sleep_avg beyond just interactive state.
                 */
-               p->activated = -1;
-       }
+               p->sleep_type = SLEEP_NONINTERACTIVE;
+       } else
 
        /*
         * Tasks that have marked their sleep as noninteractive get
-        * woken up without updating their sleep average. (i.e. their
-        * sleep is handled in a priority-neutral manner, no priority
-        * boost and no penalty.)
+        * woken up with their sleep average not weighted in an
+        * interactive way.
         */
-       if (old_state & TASK_NONINTERACTIVE) {
-               vx_activate_task(p);
-               __activate_task(p, rq);
-       } else
-               activate_task(p, rq, cpu == this_cpu);
+               if (old_state & TASK_NONINTERACTIVE)
+                       p->sleep_type = SLEEP_NONINTERACTIVE;
 
-       /* this is to get the accounting behind the load update */
-       if (old_state & TASK_UNINTERRUPTIBLE)
-               vx_uninterruptible_dec(p);
 
+       activate_task(p, rq, cpu == this_cpu);
        /*
         * Sync wakeups (i.e. those types of wakeups where the waker
         * has indicated that it will leave the CPU in short order)
@@ -1398,24 +1599,24 @@ out:
        return success;
 }
 
-int fastcall wake_up_process(task_t *p)
+int fastcall wake_up_process(struct task_struct *p)
 {
        return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
                                 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
 }
-
 EXPORT_SYMBOL(wake_up_process);
 
-int fastcall wake_up_state(task_t *p, unsigned int state)
+int fastcall wake_up_state(struct task_struct *p, unsigned int state)
 {
        return try_to_wake_up(p, state, 0);
 }
 
+static void task_running_tick(struct rq *rq, struct task_struct *p, int cpu);
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
  */
-void fastcall sched_fork(task_t *p, int clone_flags)
+void fastcall sched_fork(struct task_struct *p, int clone_flags)
 {
        int cpu = get_cpu();
 
@@ -1431,10 +1632,17 @@ void fastcall sched_fork(task_t *p, int clone_flags)
         * event cannot wake it up and insert it on the runqueue either.
         */
        p->state = TASK_RUNNING;
+
+       /*
+        * Make sure we do not leak PI boosting priority to the child:
+        */
+       p->prio = current->normal_prio;
+
        INIT_LIST_HEAD(&p->run_list);
        p->array = NULL;
-#ifdef CONFIG_SCHEDSTATS
-       memset(&p->sched_info, 0, sizeof(p->sched_info));
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+       if (unlikely(sched_info_on()))
+               memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
        p->oncpu = 0;
@@ -1464,7 +1672,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
                 * runqueue lock is not a problem.
                 */
                current->time_slice = 1;
-               scheduler_tick();
+               task_running_tick(cpu_rq(cpu), current, cpu);
        }
        local_irq_enable();
        put_cpu();
@@ -1477,11 +1685,11 @@ void fastcall sched_fork(task_t *p, int clone_flags)
  * that must be done for every newly created context, then puts the task
  * on the runqueue and wakes it.
  */
-void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
+void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 {
+       struct rq *rq, *this_rq;
        unsigned long flags;
        int this_cpu, cpu;
-       runqueue_t *rq, *this_rq;
 
        rq = task_rq_lock(p, &flags);
        BUG_ON(p->state != TASK_RUNNING);
@@ -1512,10 +1720,11 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
                        else {
                                p->prio = current->prio;
                                BUG_ON(p->state & TASK_ONHOLD);
+                               p->normal_prio = current->normal_prio;
                                list_add_tail(&p->run_list, &current->run_list);
                                p->array = current->array;
                                p->array->nr_active++;
-                               rq->nr_running++;
+                               inc_nr_running(p, rq);
                        }
                        set_need_resched();
                } else
@@ -1535,8 +1744,8 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
                 * Not the local CPU - must adjust timestamp. This should
                 * get optimised away in the !CONFIG_SMP case.
                 */
-               p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
-                                       + rq->timestamp_last_tick;
+               p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
+                                       + rq->most_recent_timestamp;
                __activate_task(p, rq);
                if (TASK_PREEMPTS_CURR(p, rq))
                        resched_task(rq->curr);
@@ -1562,10 +1771,10 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
  * artificially, because any timeslice recovered here
  * was given away by the parent in the first place.)
  */
-void fastcall sched_exit(task_t *p)
+void fastcall sched_exit(struct task_struct *p)
 {
        unsigned long flags;
-       runqueue_t *rq;
+       struct rq *rq;
 
        /*
         * If the child was a (relative-) CPU hog then decrease
@@ -1596,7 +1805,7 @@ void fastcall sched_exit(task_t *p)
  * prepare_task_switch sets up locking and calls architecture specific
  * hooks.
  */
-static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
+static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
 {
        prepare_lock_switch(rq, next);
        prepare_arch_switch(next);
@@ -1617,42 +1826,49 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
  * with the lock held can cause deadlocks; see schedule() for
  * details.)
  */
-static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
+static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
        __releases(rq->lock)
 {
        struct mm_struct *mm = rq->prev_mm;
-       unsigned long prev_task_flags;
+       long prev_state;
 
        rq->prev_mm = NULL;
 
        /*
         * A task struct has one reference for the use as "current".
-        * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
-        * calls schedule one last time. The schedule call will never return,
-        * and the scheduled task must drop that reference.
-        * The test for EXIT_ZOMBIE must occur while the runqueue locks are
+        * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+        * schedule one last time. The schedule call will never return, and
+        * the scheduled task must drop that reference.
+        * The test for TASK_DEAD must occur while the runqueue locks are
         * still held, otherwise prev could be scheduled on another cpu, die
         * there before we look at prev->state, and then the reference would
         * be dropped twice.
         *              Manfred Spraul <manfred@colorfullife.com>
         */
-       prev_task_flags = prev->flags;
+       prev_state = prev->state;
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
        if (mm)
                mmdrop(mm);
-       if (unlikely(prev_task_flags & PF_DEAD))
+       if (unlikely(prev_state == TASK_DEAD)) {
+               /*
+                * Remove function-return probe instances associated with this
+                * task and put them back on the free list.
+                */
+               kprobe_flush_task(prev);
                put_task_struct(prev);
+       }
 }
 
 /**
  * schedule_tail - first thing a freshly forked thread must call.
  * @prev: the thread we just switched away from.
  */
-asmlinkage void schedule_tail(task_t *prev)
+asmlinkage void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
-       runqueue_t *rq = this_rq();
+       struct rq *rq = this_rq();
+
        finish_task_switch(rq, prev);
 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
        /* In this case, finish_task_switch does not reenable preemption */
@@ -1666,24 +1882,34 @@ asmlinkage void schedule_tail(task_t *prev)
  * context_switch - switch to the new MM and the new
  * thread's register state.
  */
-static inline
-task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
+static inline struct task_struct *
+context_switch(struct rq *rq, struct task_struct *prev,
+              struct task_struct *next)
 {
        struct mm_struct *mm = next->mm;
        struct mm_struct *oldmm = prev->active_mm;
 
-       if (unlikely(!mm)) {
+       if (!mm) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm(oldmm, mm, next);
 
-       if (unlikely(!prev->mm)) {
+       if (!prev->mm) {
                prev->active_mm = NULL;
                WARN_ON(rq->prev_mm);
                rq->prev_mm = oldmm;
        }
+       /*
+        * Since the runqueue lock will be released by the next
+        * task (which is an invalid locking op but in the case
+        * of the scheduler it's an obvious special-case), so we
+        * do an early lockdep release here:
+        */
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+#endif
 
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
@@ -1712,7 +1938,7 @@ unsigned long nr_uninterruptible(void)
 {
        unsigned long i, sum = 0;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                sum += cpu_rq(i)->nr_uninterruptible;
 
        /*
@@ -1727,9 +1953,10 @@ unsigned long nr_uninterruptible(void)
 
 unsigned long long nr_context_switches(void)
 {
-       unsigned long long i, sum = 0;
+       int i;
+       unsigned long long sum = 0;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                sum += cpu_rq(i)->nr_switches;
 
        return sum;
@@ -1739,32 +1966,54 @@ unsigned long nr_iowait(void)
 {
        unsigned long i, sum = 0;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                sum += atomic_read(&cpu_rq(i)->nr_iowait);
 
        return sum;
 }
 
+unsigned long nr_active(void)
+{
+       unsigned long i, running = 0, uninterruptible = 0;
+
+       for_each_online_cpu(i) {
+               running += cpu_rq(i)->nr_running;
+               uninterruptible += cpu_rq(i)->nr_uninterruptible;
+       }
+
+       if (unlikely((long)uninterruptible < 0))
+               uninterruptible = 0;
+
+       return running + uninterruptible;
+}
+
 #ifdef CONFIG_SMP
 
+/*
+ * Is this task likely cache-hot:
+ */
+static inline int
+task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
+{
+       return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
+}
+
 /*
  * double_rq_lock - safely lock two runqueues
  *
- * We must take them in cpu order to match code in
- * dependent_sleeper and wake_dependent_sleeper.
- *
  * Note this does not disable interrupts like task_rq_lock,
  * you need to do so manually before calling.
  */
-static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
+static void double_rq_lock(struct rq *rq1, struct rq *rq2)
        __acquires(rq1->lock)
        __acquires(rq2->lock)
 {
+       BUG_ON(!irqs_disabled());
        if (rq1 == rq2) {
                spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
        } else {
-               if (rq1->cpu < rq2->cpu) {
+               if (rq1 < rq2) {
                        spin_lock(&rq1->lock);
                        spin_lock(&rq2->lock);
                } else {
@@ -1780,7 +2029,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
  * Note this does not restore interrupts like task_rq_unlock,
  * you need to do so manually after calling.
  */
-static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
+static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
        __releases(rq1->lock)
        __releases(rq2->lock)
 {
@@ -1794,13 +2043,18 @@ static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
 /*
  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  */
-static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
+static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(this_rq->lock)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
+       if (unlikely(!irqs_disabled())) {
+               /* printk() doesn't work good under rq->lock */
+               spin_unlock(&this_rq->lock);
+               BUG_ON(1);
+       }
        if (unlikely(!spin_trylock(&busiest->lock))) {
-               if (busiest->cpu < this_rq->cpu) {
+               if (busiest < this_rq) {
                        spin_unlock(&this_rq->lock);
                        spin_lock(&busiest->lock);
                        spin_lock(&this_rq->lock);
@@ -1815,11 +2069,11 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
  * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
  * the cpu_allowed mask is restored.
  */
-static void sched_migrate_task(task_t *p, int dest_cpu)
+static void sched_migrate_task(struct task_struct *p, int dest_cpu)
 {
-       migration_req_t req;
-       runqueue_t *rq;
+       struct migration_req req;
        unsigned long flags;
+       struct rq *rq;
 
        rq = task_rq_lock(p, &flags);
        if (!cpu_isset(dest_cpu, p->cpus_allowed)
@@ -1830,11 +2084,13 @@ static void sched_migrate_task(task_t *p, int dest_cpu)
        if (migrate_task(p, dest_cpu, &req)) {
                /* Need to wait for migration thread (might exit: take ref). */
                struct task_struct *mt = rq->migration_thread;
+
                get_task_struct(mt);
                task_rq_unlock(rq, &flags);
                wake_up_process(mt);
                put_task_struct(mt);
                wait_for_completion(&req.done);
+
                return;
        }
 out:
@@ -1858,17 +2114,17 @@ void sched_exec(void)
  * pull_task - move a task from a remote runqueue to the local runqueue.
  * Both runqueues must be locked.
  */
-static
-void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
-              runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+static void pull_task(struct rq *src_rq, struct prio_array *src_array,
+                     struct task_struct *p, struct rq *this_rq,
+                     struct prio_array *this_array, int this_cpu)
 {
        dequeue_task(p, src_array);
-       src_rq->nr_running--;
+       dec_nr_running(p, src_rq);
        set_task_cpu(p, this_cpu);
-       this_rq->nr_running++;
+       inc_nr_running(p, this_rq);
        enqueue_task(p, this_array);
-       p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
-                               + this_rq->timestamp_last_tick;
+       p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
+                               + this_rq->most_recent_timestamp;
        /*
         * Note that idle threads have a prio of MAX_PRIO, for this test
         * to be always true for them.
@@ -1881,7 +2137,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  */
 static
-int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
+int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
                     struct sched_domain *sd, enum idle_type idle,
                     int *all_pinned)
 {
@@ -1904,34 +2160,55 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
         * 2) too many balance attempts have failed.
         */
 
-       if (sd->nr_balance_failed > sd->cache_nice_tries)
+       if (sd->nr_balance_failed > sd->cache_nice_tries) {
+#ifdef CONFIG_SCHEDSTATS
+               if (task_hot(p, rq->most_recent_timestamp, sd))
+                       schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
                return 1;
+       }
 
-       if (task_hot(p, rq->timestamp_last_tick, sd))
+       if (task_hot(p, rq->most_recent_timestamp, sd))
                return 0;
        return 1;
 }
 
+#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
+
 /*
- * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
- * as part of a balancing operation within "domain". Returns the number of
- * tasks moved.
+ * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
+ * load from busiest to this_rq, as part of a balancing operation within
+ * "domain". Returns the number of tasks moved.
  *
  * Called with both runqueues locked.
  */
-static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
-                     unsigned long max_nr_move, struct sched_domain *sd,
-                     enum idle_type idle, int *all_pinned)
+static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+                     unsigned long max_nr_move, unsigned long max_load_move,
+                     struct sched_domain *sd, enum idle_type idle,
+                     int *all_pinned)
 {
-       prio_array_t *array, *dst_array;
+       int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
+           best_prio_seen, skip_for_load;
+       struct prio_array *array, *dst_array;
        struct list_head *head, *curr;
-       int idx, pulled = 0, pinned = 0;
-       task_t *tmp;
+       struct task_struct *tmp;
+       long rem_load_move;
 
-       if (max_nr_move == 0)
+       if (max_nr_move == 0 || max_load_move == 0)
                goto out;
 
+       rem_load_move = max_load_move;
        pinned = 1;
+       this_best_prio = rq_best_prio(this_rq);
+       best_prio = rq_best_prio(busiest);
+       /*
+        * Enable handling of the case where there is more than one task
+        * with the best priority.   If the current running task is one
+        * of those with prio==best_prio we know it won't be moved
+        * and therefore it's safe to override the skip (based on load) of
+        * any task we find with that prio.
+        */
+       best_prio_seen = best_prio == busiest->curr->prio;
 
        /*
         * We first consider expired tasks. Those will likely not be
@@ -1967,27 +2244,39 @@ skip_bitmap:
        head = array->queue + idx;
        curr = head->prev;
 skip_queue:
-       tmp = list_entry(curr, task_t, run_list);
+       tmp = list_entry(curr, struct task_struct, run_list);
 
        curr = curr->prev;
 
-       if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+       /*
+        * To help distribute high priority tasks accross CPUs we don't
+        * skip a task if it will be the highest priority task (i.e. smallest
+        * prio value) on its new queue regardless of its load weight
+        */
+       skip_for_load = tmp->load_weight > rem_load_move;
+       if (skip_for_load && idx < this_best_prio)
+               skip_for_load = !best_prio_seen && idx == best_prio;
+       if (skip_for_load ||
+           !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+
+               best_prio_seen |= idx == best_prio;
                if (curr != head)
                        goto skip_queue;
                idx++;
                goto skip_bitmap;
        }
 
-#ifdef CONFIG_SCHEDSTATS
-       if (task_hot(tmp, busiest->timestamp_last_tick, sd))
-               schedstat_inc(sd, lb_hot_gained[idle]);
-#endif
-
        pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
        pulled++;
+       rem_load_move -= tmp->load_weight;
 
-       /* We only want to steal up to the prescribed number of tasks. */
-       if (pulled < max_nr_move) {
+       /*
+        * We only want to steal up to the prescribed number of tasks
+        * and the prescribed amount of weighted load.
+        */
+       if (pulled < max_nr_move && rem_load_move > 0) {
+               if (idx < this_best_prio)
+                       this_best_prio = idx;
                if (curr != head)
                        goto skip_queue;
                idx++;
@@ -2008,20 +2297,30 @@ out:
 
 /*
  * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the number of tasks which should be
- * moved to restore balance via the imbalance parameter.
+ * domain. It calculates and returns the amount of weighted load which
+ * should be moved to restore balance via the imbalance parameter.
  */
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
                   unsigned long *imbalance, enum idle_type idle, int *sd_idle,
-                  cpumask_t *cpus)
+                  cpumask_t *cpus, int *balance)
 {
        struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
        unsigned long max_load, avg_load, total_load, this_load, total_pwr;
        unsigned long max_pull;
+       unsigned long busiest_load_per_task, busiest_nr_running;
+       unsigned long this_load_per_task, this_nr_running;
        int load_idx;
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+       int power_savings_balance = 1;
+       unsigned long leader_nr_running = 0, min_load_per_task = 0;
+       unsigned long min_nr_running = ULONG_MAX;
+       struct sched_group *group_min = NULL, *group_leader = NULL;
+#endif
 
        max_load = this_load = total_load = total_pwr = 0;
+       busiest_load_per_task = busiest_nr_running = 0;
+       this_load_per_task = this_nr_running = 0;
        if (idle == NOT_IDLE)
                load_idx = sd->busy_idx;
        else if (idle == NEWLY_IDLE)
@@ -2030,48 +2329,137 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                load_idx = sd->idle_idx;
 
        do {
-               unsigned long load;
+               unsigned long load, group_capacity;
                int local_group;
                int i;
+               unsigned int balance_cpu = -1, first_idle_cpu = 0;
+               unsigned long sum_nr_running, sum_weighted_load;
 
                local_group = cpu_isset(this_cpu, group->cpumask);
 
+               if (local_group)
+                       balance_cpu = first_cpu(group->cpumask);
+
                /* Tally up the load of all CPUs in the group */
-               avg_load = 0;
+               sum_weighted_load = sum_nr_running = avg_load = 0;
 
                for_each_cpu_mask(i, group->cpumask) {
+                       struct rq *rq;
+
                        if (!cpu_isset(i, *cpus))
                                continue;
 
+                       rq = cpu_rq(i);
+
                        if (*sd_idle && !idle_cpu(i))
                                *sd_idle = 0;
 
                        /* Bias balancing toward cpus of our domain */
-                       if (local_group)
+                       if (local_group) {
+                               if (idle_cpu(i) && !first_idle_cpu) {
+                                       first_idle_cpu = 1;
+                                       balance_cpu = i;
+                               }
+
                                load = target_load(i, load_idx);
-                       else
+                       else
                                load = source_load(i, load_idx);
 
                        avg_load += load;
+                       sum_nr_running += rq->nr_running;
+                       sum_weighted_load += rq->raw_weighted_load;
                }
 
-               total_load += avg_load;
-               total_pwr += group->cpu_power;
-
+               /*
+                * First idle cpu or the first cpu(busiest) in this sched group
+                * is eligible for doing load balancing at this and above
+                * domains.
+                */
+               if (local_group && balance_cpu != this_cpu && balance) {
+                       *balance = 0;
+                       goto ret;
+               }
+
+               total_load += avg_load;
+               total_pwr += group->cpu_power;
+
                /* Adjust by relative CPU power of the group */
                avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
 
+               group_capacity = group->cpu_power / SCHED_LOAD_SCALE;
+
                if (local_group) {
                        this_load = avg_load;
                        this = group;
-               } else if (avg_load > max_load) {
+                       this_nr_running = sum_nr_running;
+                       this_load_per_task = sum_weighted_load;
+               } else if (avg_load > max_load &&
+                          sum_nr_running > group_capacity) {
                        max_load = avg_load;
                        busiest = group;
+                       busiest_nr_running = sum_nr_running;
+                       busiest_load_per_task = sum_weighted_load;
                }
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+               /*
+                * Busy processors will not participate in power savings
+                * balance.
+                */
+               if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+                       goto group_next;
+
+               /*
+                * If the local group is idle or completely loaded
+                * no need to do power savings balance at this domain
+                */
+               if (local_group && (this_nr_running >= group_capacity ||
+                                   !this_nr_running))
+                       power_savings_balance = 0;
+
+               /*
+                * If a group is already running at full capacity or idle,
+                * don't include that group in power savings calculations
+                */
+               if (!power_savings_balance || sum_nr_running >= group_capacity
+                   || !sum_nr_running)
+                       goto group_next;
+
+               /*
+                * Calculate the group which has the least non-idle load.
+                * This is the group from where we need to pick up the load
+                * for saving power
+                */
+               if ((sum_nr_running < min_nr_running) ||
+                   (sum_nr_running == min_nr_running &&
+                    first_cpu(group->cpumask) <
+                    first_cpu(group_min->cpumask))) {
+                       group_min = group;
+                       min_nr_running = sum_nr_running;
+                       min_load_per_task = sum_weighted_load /
+                                               sum_nr_running;
+               }
+
+               /*
+                * Calculate the group which is almost near its
+                * capacity but still has some space to pick up some load
+                * from other group and save more power
+                */
+               if (sum_nr_running <= group_capacity - 1) {
+                       if (sum_nr_running > leader_nr_running ||
+                           (sum_nr_running == leader_nr_running &&
+                            first_cpu(group->cpumask) >
+                             first_cpu(group_leader->cpumask))) {
+                               group_leader = group;
+                               leader_nr_running = sum_nr_running;
+                       }
+               }
+group_next:
+#endif
                group = group->next;
        } while (group != sd->groups);
 
-       if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
+       if (!busiest || this_load >= max_load || busiest_nr_running == 0)
                goto out_balanced;
 
        avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -2080,6 +2468,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                        100*max_load <= sd->imbalance_pct*this_load)
                goto out_balanced;
 
+       busiest_load_per_task /= busiest_nr_running;
        /*
         * We're trying to get all the cpus to the average_load, so we don't
         * want to push ourselves above the average load, nor do we wish to
@@ -2091,21 +2480,49 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * by pulling tasks to us.  Be careful of negative numbers as they'll
         * appear as very large values with unsigned longs.
         */
+       if (max_load <= busiest_load_per_task)
+               goto out_balanced;
+
+       /*
+        * In the presence of smp nice balancing, certain scenarios can have
+        * max load less than avg load(as we skip the groups at or below
+        * its cpu_power, while calculating max_load..)
+        */
+       if (max_load < avg_load) {
+               *imbalance = 0;
+               goto small_imbalance;
+       }
 
        /* Don't want to pull so many tasks that a group would go idle */
-       max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
+       max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
 
        /* How much load to actually move to equalise the imbalance */
        *imbalance = min(max_pull * busiest->cpu_power,
                                (avg_load - this_load) * this->cpu_power)
                        / SCHED_LOAD_SCALE;
 
-       if (*imbalance < SCHED_LOAD_SCALE) {
-               unsigned long pwr_now = 0, pwr_move = 0;
-               unsigned long tmp;
+       /*
+        * if *imbalance is less than the average load per runnable task
+        * there is no gaurantee that any tasks will be moved so we'll have
+        * a think about bumping its value to force at least one task to be
+        * moved
+        */
+       if (*imbalance < busiest_load_per_task) {
+               unsigned long tmp, pwr_now, pwr_move;
+               unsigned int imbn;
+
+small_imbalance:
+               pwr_move = pwr_now = 0;
+               imbn = 2;
+               if (this_nr_running) {
+                       this_load_per_task /= this_nr_running;
+                       if (busiest_load_per_task > this_load_per_task)
+                               imbn = 1;
+               } else
+                       this_load_per_task = SCHED_LOAD_SCALE;
 
-               if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
-                       *imbalance = 1;
+               if (max_load - this_load >= busiest_load_per_task * imbn) {
+                       *imbalance = busiest_load_per_task;
                        return busiest;
                }
 
@@ -2115,39 +2532,50 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                 * moving them.
                 */
 
-               pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load);
-               pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load);
+               pwr_now += busiest->cpu_power *
+                       min(busiest_load_per_task, max_load);
+               pwr_now += this->cpu_power *
+                       min(this_load_per_task, this_load);
                pwr_now /= SCHED_LOAD_SCALE;
 
                /* Amount of load we'd subtract */
-               tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power;
+               tmp = busiest_load_per_task * SCHED_LOAD_SCALE /
+                       busiest->cpu_power;
                if (max_load > tmp)
-                       pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE,
-                                                       max_load - tmp);
+                       pwr_move += busiest->cpu_power *
+                               min(busiest_load_per_task, max_load - tmp);
 
                /* Amount of load we'd add */
-               if (max_load*busiest->cpu_power <
-                               SCHED_LOAD_SCALE*SCHED_LOAD_SCALE)
-                       tmp = max_load*busiest->cpu_power/this->cpu_power;
+               if (max_load * busiest->cpu_power <
+                               busiest_load_per_task * SCHED_LOAD_SCALE)
+                       tmp = max_load * busiest->cpu_power / this->cpu_power;
                else
-                       tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
-               pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
+                       tmp = busiest_load_per_task * SCHED_LOAD_SCALE /
+                               this->cpu_power;
+               pwr_move += this->cpu_power *
+                       min(this_load_per_task, this_load + tmp);
                pwr_move /= SCHED_LOAD_SCALE;
 
                /* Move if we gain throughput */
                if (pwr_move <= pwr_now)
                        goto out_balanced;
 
-               *imbalance = 1;
-               return busiest;
+               *imbalance = busiest_load_per_task;
        }
 
-       /* Get rid of the scaling factor, rounding down as we divide */
-       *imbalance = *imbalance / SCHED_LOAD_SCALE;
        return busiest;
 
 out_balanced:
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+       if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+               goto ret;
 
+       if (this == group_leader && group_leader != group_min) {
+               *imbalance = min_load_per_task;
+               return group_min;
+       }
+#endif
+ret:
        *imbalance = 0;
        return NULL;
 }
@@ -2155,22 +2583,27 @@ out_balanced:
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
-static runqueue_t *find_busiest_queue(struct sched_group *group,
-       enum idle_type idle, cpumask_t *cpus)
+static struct rq *
+find_busiest_queue(struct sched_group *group, enum idle_type idle,
+                  unsigned long imbalance, cpumask_t *cpus)
 {
-       unsigned long load, max_load = 0;
-       runqueue_t *busiest = NULL;
+       struct rq *busiest = NULL, *rq;
+       unsigned long max_load = 0;
        int i;
 
        for_each_cpu_mask(i, group->cpumask) {
+
                if (!cpu_isset(i, *cpus))
                        continue;
 
-               load = source_load(i, 0);
+               rq = cpu_rq(i);
 
-               if (load > max_load) {
-                       max_load = load;
-                       busiest = cpu_rq(i);
+               if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
+                       continue;
+
+               if (rq->raw_weighted_load > max_load) {
+                       max_load = rq->raw_weighted_load;
+                       busiest = rq;
                }
        }
 
@@ -2183,37 +2616,51 @@ static runqueue_t *find_busiest_queue(struct sched_group *group,
  */
 #define MAX_PINNED_INTERVAL    512
 
+static inline unsigned long minus_1_or_zero(unsigned long n)
+{
+       return n > 0 ? n - 1 : 0;
+}
+
 /*
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
- *
- * Called with this_rq unlocked.
  */
-static int load_balance(int this_cpu, runqueue_t *this_rq,
-                       struct sched_domain *sd, enum idle_type idle)
+static int load_balance(int this_cpu, struct rq *this_rq,
+                       struct sched_domain *sd, enum idle_type idle,
+                       int *balance)
 {
+       int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
        struct sched_group *group;
-       runqueue_t *busiest;
        unsigned long imbalance;
-       int nr_moved, all_pinned = 0;
-       int active_balance = 0;
-       int sd_idle = 0;
+       struct rq *busiest;
        cpumask_t cpus = CPU_MASK_ALL;
+       unsigned long flags;
 
-       if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
+       /*
+        * When power savings policy is enabled for the parent domain, idle
+        * sibling can pick up load irrespective of busy siblings. In this case,
+        * let the state of idle sibling percolate up as IDLE, instead of
+        * portraying it as NOT_IDLE.
+        */
+       if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
+           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                sd_idle = 1;
 
        schedstat_inc(sd, lb_cnt[idle]);
 
 redo:
-       group = find_busiest_group(sd, this_cpu, &imbalance, idle,
-                       &sd_idle, &cpus);
+       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
+                                  &cpus, balance);
+
+       if (*balance == 0)
+               goto out_balanced;
+
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[idle]);
                goto out_balanced;
        }
 
-       busiest = find_busiest_queue(group, idle, &cpus);
+       busiest = find_busiest_queue(group, idle, imbalance, &cpus);
        if (!busiest) {
                schedstat_inc(sd, lb_nobusyq[idle]);
                goto out_balanced;
@@ -2231,14 +2678,17 @@ redo:
                 * still unbalanced. nr_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
+               local_irq_save(flags);
                double_rq_lock(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
-                                       imbalance, sd, idle, &all_pinned);
+                                     minus_1_or_zero(busiest->nr_running),
+                                     imbalance, sd, idle, &all_pinned);
                double_rq_unlock(this_rq, busiest);
+               local_irq_restore(flags);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
                if (unlikely(all_pinned)) {
-                       cpu_clear(busiest->cpu, cpus);
+                       cpu_clear(cpu_of(busiest), cpus);
                        if (!cpus_empty(cpus))
                                goto redo;
                        goto out_balanced;
@@ -2251,13 +2701,13 @@ redo:
 
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
-                       spin_lock(&busiest->lock);
+                       spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the migration_thread, if the curr
                         * task on busiest cpu can't be moved to this_cpu
                         */
                        if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
-                               spin_unlock(&busiest->lock);
+                               spin_unlock_irqrestore(&busiest->lock, flags);
                                all_pinned = 1;
                                goto out_one_pinned;
                        }
@@ -2267,7 +2717,7 @@ redo:
                                busiest->push_cpu = this_cpu;
                                active_balance = 1;
                        }
-                       spin_unlock(&busiest->lock);
+                       spin_unlock_irqrestore(&busiest->lock, flags);
                        if (active_balance)
                                wake_up_process(busiest->migration_thread);
 
@@ -2294,7 +2744,8 @@ redo:
                        sd->balance_interval *= 2;
        }
 
-       if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+       if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
+           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                return -1;
        return nr_moved;
 
@@ -2309,7 +2760,8 @@ out_one_pinned:
                        (sd->balance_interval < sd->max_interval))
                sd->balance_interval *= 2;
 
-       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
+           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                return -1;
        return 0;
 }
@@ -2321,29 +2773,37 @@ out_one_pinned:
  * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
  * this_rq is locked.
  */
-static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
-                               struct sched_domain *sd)
+static int
+load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
 {
        struct sched_group *group;
-       runqueue_t *busiest = NULL;
+       struct rq *busiest = NULL;
        unsigned long imbalance;
        int nr_moved = 0;
        int sd_idle = 0;
        cpumask_t cpus = CPU_MASK_ALL;
 
-       if (sd->flags & SD_SHARE_CPUPOWER)
+       /*
+        * When power savings policy is enabled for the parent domain, idle
+        * sibling can pick up load irrespective of busy siblings. In this case,
+        * let the state of idle sibling percolate up as IDLE, instead of
+        * portraying it as NOT_IDLE.
+        */
+       if (sd->flags & SD_SHARE_CPUPOWER &&
+           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                sd_idle = 1;
 
        schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
 redo:
        group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
-                       &sd_idle, &cpus);
+                                  &sd_idle, &cpus, NULL);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
                goto out_balanced;
        }
 
-       busiest = find_busiest_queue(group, NEWLY_IDLE, &cpus);
+       busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
+                               &cpus);
        if (!busiest) {
                schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
                goto out_balanced;
@@ -2358,11 +2818,12 @@ redo:
                /* Attempt to move tasks */
                double_lock_balance(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
+                                       minus_1_or_zero(busiest->nr_running),
                                        imbalance, sd, NEWLY_IDLE, NULL);
                spin_unlock(&busiest->lock);
 
                if (!nr_moved) {
-                       cpu_clear(busiest->cpu, cpus);
+                       cpu_clear(cpu_of(busiest), cpus);
                        if (!cpus_empty(cpus))
                                goto redo;
                }
@@ -2370,7 +2831,8 @@ redo:
 
        if (!nr_moved) {
                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
-               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
+                   !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                        return -1;
        } else
                sd->nr_balance_failed = 0;
@@ -2379,9 +2841,11 @@ redo:
 
 out_balanced:
        schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
-       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
+           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
                return -1;
        sd->nr_balance_failed = 0;
+
        return 0;
 }
 
@@ -2389,18 +2853,31 @@ out_balanced:
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-static void idle_balance(int this_cpu, runqueue_t *this_rq)
+static void idle_balance(int this_cpu, struct rq *this_rq)
 {
        struct sched_domain *sd;
+       int pulled_task = 0;
+       unsigned long next_balance = jiffies + 60 *  HZ;
 
        for_each_domain(this_cpu, sd) {
                if (sd->flags & SD_BALANCE_NEWIDLE) {
-                       if (load_balance_newidle(this_cpu, this_rq, sd)) {
-                               /* We've pulled tasks over so stop searching */
+                       /* If we've pulled tasks over stop searching: */
+                       pulled_task = load_balance_newidle(this_cpu,
+                                                       this_rq, sd);
+                       if (time_after(next_balance,
+                                 sd->last_balance + sd->balance_interval))
+                               next_balance = sd->last_balance
+                                               + sd->balance_interval;
+                       if (pulled_task)
                                break;
-                       }
                }
        }
+       if (!pulled_task)
+               /*
+                * We are going idle. next_balance may be set based on
+                * a busy processor. So reset next_balance.
+                */
+               this_rq->next_balance = next_balance;
 }
 
 /*
@@ -2411,14 +2888,14 @@ static void idle_balance(int this_cpu, runqueue_t *this_rq)
  *
  * Called with busiest_rq locked.
  */
-static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
+static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
 {
-       struct sched_domain *sd;
-       runqueue_t *target_rq;
        int target_cpu = busiest_rq->push_cpu;
+       struct sched_domain *sd;
+       struct rq *target_rq;
 
+       /* Is there any task to move? */
        if (busiest_rq->nr_running <= 1)
-               /* no task to move */
                return;
 
        target_rq = cpu_rq(target_cpu);
@@ -2434,50 +2911,38 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
        double_lock_balance(busiest_rq, target_rq);
 
        /* Search for an sd spanning us and the target CPU. */
-       for_each_domain(target_cpu, sd)
+       for_each_domain(target_cpu, sd) {
                if ((sd->flags & SD_LOAD_BALANCE) &&
-                       cpu_isset(busiest_cpu, sd->span))
+                   cpu_isset(busiest_cpu, sd->span))
                                break;
+       }
 
-       if (unlikely(sd == NULL))
-               goto out;
-
-       schedstat_inc(sd, alb_cnt);
+       if (likely(sd)) {
+               schedstat_inc(sd, alb_cnt);
 
-       if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
-               schedstat_inc(sd, alb_pushed);
-       else
-               schedstat_inc(sd, alb_failed);
-out:
+               if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
+                              RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE,
+                              NULL))
+                       schedstat_inc(sd, alb_pushed);
+               else
+                       schedstat_inc(sd, alb_failed);
+       }
        spin_unlock(&target_rq->lock);
 }
 
-/*
- * rebalance_tick will get called every timer tick, on every CPU.
- *
- * It checks each scheduling domain to see if it is due to be balanced,
- * and initiates a balancing operation if so.
- *
- * Balancing parameters are set up in arch_init_sched_domains.
- */
+static void update_load(struct rq *this_rq)
+{
+       unsigned long this_load;
+       int i, scale;
 
-/* Don't have all balancing operations going off at once */
-#define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
+       this_load = this_rq->raw_weighted_load;
 
-static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
-                          enum idle_type idle)
-{
-       unsigned long old_load, this_load;
-       unsigned long j = jiffies + CPU_OFFSET(this_cpu);
-       struct sched_domain *sd;
-       int i;
+       /* Update our load: */
+       for (i = 0, scale = 1; i < 3; i++, scale <<= 1) {
+               unsigned long old_load, new_load;
 
-       this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
-       /* Update our load */
-       for (i = 0; i < 3; i++) {
-               unsigned long new_load = this_load;
-               int scale = 1 << i;
                old_load = this_rq->cpu_load[i];
+               new_load = this_load;
                /*
                 * Round up the averaging division if load is increasing. This
                 * prevents us from getting stuck on 9 if the load is 10, for
@@ -2487,10 +2952,34 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
                        new_load += scale-1;
                this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
        }
+}
 
-       for_each_domain(this_cpu, sd) {
-               unsigned long interval;
+/*
+ * run_rebalance_domains is triggered when needed from the scheduler tick.
+ *
+ * It checks each scheduling domain to see if it is due to be balanced,
+ * and initiates a balancing operation if so.
+ *
+ * Balancing parameters are set up in arch_init_sched_domains.
+ */
+static DEFINE_SPINLOCK(balancing);
+
+static void run_rebalance_domains(struct softirq_action *h)
+{
+       int this_cpu = smp_processor_id(), balance = 1;
+       struct rq *this_rq = cpu_rq(this_cpu);
+       unsigned long interval;
+       struct sched_domain *sd;
+       /*
+        * We are idle if there are no processes running. This
+        * is valid even if we are the idle process (SMT).
+        */
+       enum idle_type idle = !this_rq->nr_running ?
+                               SCHED_IDLE : NOT_IDLE;
+       /* Earliest time when we have to call run_rebalance_domains again */
+       unsigned long next_balance = jiffies + 60*HZ;
 
+       for_each_domain(this_cpu, sd) {
                if (!(sd->flags & SD_LOAD_BALANCE))
                        continue;
 
@@ -2503,8 +2992,13 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
                if (unlikely(!interval))
                        interval = 1;
 
-               if (j - sd->last_balance >= interval) {
-                       if (load_balance(this_cpu, this_rq, sd, idle)) {
+               if (sd->flags & SD_SERIALIZE) {
+                       if (!spin_trylock(&balancing))
+                               goto out;
+               }
+
+               if (time_after_eq(jiffies, sd->last_balance + interval)) {
+                       if (load_balance(this_cpu, this_rq, sd, idle, &balance)) {
                                /*
                                 * We've pulled tasks over so either we're no
                                 * longer idle, or one of our SMT siblings is
@@ -2512,38 +3006,48 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
                                 */
                                idle = NOT_IDLE;
                        }
-                       sd->last_balance += interval;
+                       sd->last_balance = jiffies;
                }
+               if (sd->flags & SD_SERIALIZE)
+                       spin_unlock(&balancing);
+out:
+               if (time_after(next_balance, sd->last_balance + interval))
+                       next_balance = sd->last_balance + interval;
+
+               /*
+                * Stop the load balance at this level. There is another
+                * CPU in our sched group which is doing load balancing more
+                * actively.
+                */
+               if (!balance)
+                       break;
        }
+       this_rq->next_balance = next_balance;
 }
 #else
 /*
  * on UP we do not need to balance between CPUs:
  */
-static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
-{
-}
-static inline void idle_balance(int cpu, runqueue_t *rq)
+static inline void idle_balance(int cpu, struct rq *rq)
 {
 }
 #endif
 
-static inline int wake_priority_sleeper(runqueue_t *rq)
+static inline void wake_priority_sleeper(struct rq *rq)
 {
-       int ret = 0;
 #ifdef CONFIG_SCHED_SMT
+       if (!rq->nr_running)
+               return;
+
        spin_lock(&rq->lock);
        /*
         * If an SMT sibling task has been put to sleep for priority
         * reasons reschedule the idle task to see if it can now run.
         */
-       if (rq->nr_running) {
+       if (rq->nr_running)
                resched_task(rq->idle);
-               ret = 1;
-       }
        spin_unlock(&rq->lock);
 #endif
-       return ret;
 }
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -2554,25 +3058,26 @@ EXPORT_PER_CPU_SYMBOL(kstat);
  * This is called on clock ticks and on context switches.
  * Bank in p->sched_time the ns elapsed since the last tick or switch.
  */
-static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
-                                   unsigned long long now)
+static inline void
+update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
 {
-       unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
-       p->sched_time += now - last;
+       p->sched_time += now - p->last_ran;
+       p->last_ran = rq->most_recent_timestamp = now;
 }
 
 /*
  * Return current->sched_time plus any more ns on the sched_clock
  * that have not yet been banked.
  */
-unsigned long long current_sched_time(const task_t *tsk)
+unsigned long long current_sched_time(const struct task_struct *p)
 {
        unsigned long long ns;
        unsigned long flags;
+
        local_irq_save(flags);
-       ns = max(tsk->timestamp, task_rq(tsk)->timestamp_last_tick);
-       ns = tsk->sched_time + (sched_clock() - ns);
+       ns = p->sched_time + sched_clock() - p->last_ran;
        local_irq_restore(flags);
+
        return ns;
 }
 
@@ -2586,11 +3091,16 @@ unsigned long long current_sched_time(const task_t *tsk)
  * increasing number of running tasks. We also ignore the interactivity
  * if a better static_prio task has expired:
  */
-#define EXPIRED_STARVING(rq) \
-       ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
-               (jiffies - (rq)->expired_timestamp >= \
-                       STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
-                       ((rq)->curr->static_prio > (rq)->best_expired_prio))
+static inline int expired_starving(struct rq *rq)
+{
+       if (rq->curr->static_prio > rq->best_expired_prio)
+               return 1;
+       if (!STARVATION_LIMIT || !rq->expired_timestamp)
+               return 0;
+       if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running)
+               return 1;
+       return 0;
+}
 
 /*
  * Account user cpu time to a process.
@@ -2627,7 +3137,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
 {
        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
        struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
-       runqueue_t *rq = this_rq();
+       struct rq *rq = this_rq();
        cputime64_t tmp;
 
        p->stime = cputime_add(p->stime, cputime);
@@ -2658,7 +3168,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
 {
        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
        cputime64_t tmp = cputime_to_cputime64(steal);
-       runqueue_t *rq = this_rq();
+       struct rq *rq = this_rq();
 
        if (p == rq->idle) {
                p->stime = cputime_add(p->stime, steal);
@@ -2670,39 +3180,12 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
                cpustat->steal = cputime64_add(cpustat->steal, tmp);
 }
 
-/*
- * This function gets called by the timer code, with HZ frequency.
- * We call it with interrupts disabled.
- *
- * It also gets called by the fork code, when changing the parent's
- * timeslices.
- */
-void scheduler_tick(void)
+static void task_running_tick(struct rq *rq, struct task_struct *p, int cpu)
 {
-       int cpu = smp_processor_id();
-       runqueue_t *rq = this_rq();
-       task_t *p = current;
-       unsigned long long now = sched_clock();
-
-       update_cpu_clock(p, rq, now);
-
-       rq->timestamp_last_tick = now;
-
-       if (p == rq->idle) {
-               if (wake_priority_sleeper(rq))
-                       goto out;
-#ifdef CONFIG_VSERVER_HARDCPU_IDLE
-               if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
-                       set_need_resched();
-#endif
-               rebalance_tick(cpu, rq, SCHED_IDLE);
-               return;
-       }
-
-       /* Task might have expired already, but not scheduled off yet */
        if (p->array != rq->active) {
+               /* Task has expired but was not scheduled yet */
                set_tsk_need_resched(p);
-               goto out;
+               return;
        }
        spin_lock(&rq->lock);
        /*
@@ -2727,7 +3210,7 @@ void scheduler_tick(void)
                }
                goto out_unlock;
        }
-       if (vx_need_resched(p)) {
+       if (vx_need_resched(p, --p->time_slice, cpu)) {
                dequeue_task(p, rq->active);
                set_tsk_need_resched(p);
                p->prio = effective_prio(p);
@@ -2736,7 +3219,7 @@ void scheduler_tick(void)
 
                if (!rq->expired_timestamp)
                        rq->expired_timestamp = jiffies;
-               if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+               if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
                        enqueue_task(p, rq->expired);
                        if (p->static_prio < rq->best_expired_prio)
                                rq->best_expired_prio = p->static_prio;
@@ -2770,60 +3253,75 @@ void scheduler_tick(void)
        }
 out_unlock:
        spin_unlock(&rq->lock);
-out:
-       rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+void scheduler_tick(void)
+{
+       unsigned long long now = sched_clock();
+       struct task_struct *p = current;
+       int cpu = smp_processor_id();
+       struct rq *rq = cpu_rq(cpu);
+
+       update_cpu_clock(p, rq, now);
+       vxm_sync(now, cpu);
+
+       if (p == rq->idle) {
+               /* Task on the idle queue */
+               wake_priority_sleeper(rq);
+               vx_idle_resched(rq);
+       } else
+               task_running_tick(rq, p, cpu);
+#ifdef CONFIG_SMP
+       update_load(rq);
+       if (time_after_eq(jiffies, rq->next_balance))
+               raise_softirq(SCHED_SOFTIRQ);
+#endif
 }
 
 #ifdef CONFIG_SCHED_SMT
-static inline void wakeup_busy_runqueue(runqueue_t *rq)
+static inline void wakeup_busy_runqueue(struct rq *rq)
 {
        /* If an SMT runqueue is sleeping due to priority reasons wake it up */
        if (rq->curr == rq->idle && rq->nr_running)
                resched_task(rq->idle);
 }
 
-static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+/*
+ * Called with interrupt disabled and this_rq's runqueue locked.
+ */
+static void wake_sleeping_dependent(int this_cpu)
 {
        struct sched_domain *tmp, *sd = NULL;
-       cpumask_t sibling_map;
        int i;
 
-       for_each_domain(this_cpu, tmp)
-               if (tmp->flags & SD_SHARE_CPUPOWER)
+       for_each_domain(this_cpu, tmp) {
+               if (tmp->flags & SD_SHARE_CPUPOWER) {
                        sd = tmp;
+                       break;
+               }
+       }
 
        if (!sd)
                return;
 
-       /*
-        * Unlock the current runqueue because we have to lock in
-        * CPU order to avoid deadlocks. Caller knows that we might
-        * unlock. We keep IRQs disabled.
-        */
-       spin_unlock(&this_rq->lock);
-
-       sibling_map = sd->span;
+       for_each_cpu_mask(i, sd->span) {
+               struct rq *smt_rq = cpu_rq(i);
 
-       for_each_cpu_mask(i, sibling_map)
-               spin_lock(&cpu_rq(i)->lock);
-       /*
-        * We clear this CPU from the mask. This both simplifies the
-        * inner loop and keps this_rq locked when we exit:
-        */
-       cpu_clear(this_cpu, sibling_map);
-
-       for_each_cpu_mask(i, sibling_map) {
-               runqueue_t *smt_rq = cpu_rq(i);
+               if (i == this_cpu)
+                       continue;
+               if (unlikely(!spin_trylock(&smt_rq->lock)))
+                       continue;
 
                wakeup_busy_runqueue(smt_rq);
+               spin_unlock(&smt_rq->lock);
        }
-
-       for_each_cpu_mask(i, sibling_map)
-               spin_unlock(&cpu_rq(i)->lock);
-       /*
-        * We exit with this_cpu's rq still held and IRQs
-        * still disabled:
-        */
 }
 
 /*
@@ -2831,57 +3329,53 @@ static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
  * utilize, if another task runs on a sibling. This models the
  * slowdown effect of other tasks running on siblings:
  */
-static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
+static inline unsigned long
+smt_slice(struct task_struct *p, struct sched_domain *sd)
 {
        return p->time_slice * (100 - sd->per_cpu_gain) / 100;
 }
 
-static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+/*
+ * To minimise lock contention and not have to drop this_rq's runlock we only
+ * trylock the sibling runqueues and bypass those runqueues if we fail to
+ * acquire their lock. As we only trylock the normal locking order does not
+ * need to be obeyed.
+ */
+static int
+dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
 {
        struct sched_domain *tmp, *sd = NULL;
-       cpumask_t sibling_map;
-       prio_array_t *array;
        int ret = 0, i;
-       task_t *p;
 
-       for_each_domain(this_cpu, tmp)
-               if (tmp->flags & SD_SHARE_CPUPOWER)
+       /* kernel/rt threads do not participate in dependent sleeping */
+       if (!p->mm || rt_task(p))
+               return 0;
+
+       for_each_domain(this_cpu, tmp) {
+               if (tmp->flags & SD_SHARE_CPUPOWER) {
                        sd = tmp;
+                       break;
+               }
+       }
 
        if (!sd)
                return 0;
 
-       /*
-        * The same locking rules and details apply as for
-        * wake_sleeping_dependent():
-        */
-       spin_unlock(&this_rq->lock);
-       sibling_map = sd->span;
-       for_each_cpu_mask(i, sibling_map)
-               spin_lock(&cpu_rq(i)->lock);
-       cpu_clear(this_cpu, sibling_map);
+       for_each_cpu_mask(i, sd->span) {
+               struct task_struct *smt_curr;
+               struct rq *smt_rq;
 
-       /*
-        * Establish next task to be run - it might have gone away because
-        * we released the runqueue lock above:
-        */
-       if (!this_rq->nr_running)
-               goto out_unlock;
-       array = this_rq->active;
-       if (!array->nr_active)
-               array = this_rq->expired;
-       BUG_ON(!array->nr_active);
+               if (i == this_cpu)
+                       continue;
 
-       p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
-               task_t, run_list);
+               smt_rq = cpu_rq(i);
+               if (unlikely(!spin_trylock(&smt_rq->lock)))
+                       continue;
 
-       for_each_cpu_mask(i, sibling_map) {
-               runqueue_t *smt_rq = cpu_rq(i);
-               task_t *smt_curr = smt_rq->curr;
+               smt_curr = smt_rq->curr;
 
-               /* Kernel threads do not participate in dependent sleeping */
-               if (!p->mm || !smt_curr->mm || rt_task(p))
-                       goto check_smt_task;
+               if (!smt_curr->mm)
+                       goto unlock;
 
                /*
                 * If a user task with lower static priority than the
@@ -2899,49 +3393,23 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
                        if ((jiffies % DEF_TIMESLICE) >
                                (sd->per_cpu_gain * DEF_TIMESLICE / 100))
                                        ret = 1;
-               } else
+               } else {
                        if (smt_curr->static_prio < p->static_prio &&
                                !TASK_PREEMPTS_CURR(p, smt_rq) &&
                                smt_slice(smt_curr, sd) > task_timeslice(p))
                                        ret = 1;
-
-check_smt_task:
-               if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
-                       rt_task(smt_curr))
-                               continue;
-               if (!p->mm) {
-                       wakeup_busy_runqueue(smt_rq);
-                       continue;
-               }
-
-               /*
-                * Reschedule a lower priority task on the SMT sibling for
-                * it to be put to sleep, or wake it up if it has been put to
-                * sleep for priority reasons to see if it should run now.
-                */
-               if (rt_task(p)) {
-                       if ((jiffies % DEF_TIMESLICE) >
-                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
-                                       resched_task(smt_curr);
-               } else {
-                       if (TASK_PREEMPTS_CURR(p, smt_rq) &&
-                               smt_slice(p, sd) > task_timeslice(smt_curr))
-                                       resched_task(smt_curr);
-                       else
-                               wakeup_busy_runqueue(smt_rq);
                }
+unlock:
+               spin_unlock(&smt_rq->lock);
        }
-out_unlock:
-       for_each_cpu_mask(i, sibling_map)
-               spin_unlock(&cpu_rq(i)->lock);
        return ret;
 }
 #else
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+static inline void wake_sleeping_dependent(int this_cpu)
 {
 }
-
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+static inline int
+dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
 {
        return 0;
 }
@@ -2954,12 +3422,14 @@ void fastcall add_preempt_count(int val)
        /*
         * Underflow?
         */
-       BUG_ON((preempt_count() < 0));
+       if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
+               return;
        preempt_count() += val;
        /*
         * Spinlock count overflowing soon?
         */
-       BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
+       DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
+                               PREEMPT_MASK - 10);
 }
 EXPORT_SYMBOL(add_preempt_count);
 
@@ -2968,47 +3438,54 @@ void fastcall sub_preempt_count(int val)
        /*
         * Underflow?
         */
-       BUG_ON(val > preempt_count());
+       if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
+               return;
        /*
         * Is the spinlock portion underflowing?
         */
-       BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK));
+       if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
+                       !(preempt_count() & PREEMPT_MASK)))
+               return;
+
        preempt_count() -= val;
 }
 EXPORT_SYMBOL(sub_preempt_count);
 
 #endif
 
+static inline int interactive_sleep(enum sleep_type sleep_type)
+{
+       return (sleep_type == SLEEP_INTERACTIVE ||
+               sleep_type == SLEEP_INTERRUPTED);
+}
+
 /*
  * schedule() is the main scheduler function.
  */
 asmlinkage void __sched schedule(void)
 {
-       long *switch_count;
-       task_t *prev, *next;
-       runqueue_t *rq;
-       prio_array_t *array;
+       struct task_struct *prev, *next;
+       struct prio_array *array;
        struct list_head *queue;
        unsigned long long now;
        unsigned long run_time;
        int cpu, idx, new_prio;
-       struct vx_info *vxi;
-#ifdef CONFIG_VSERVER_HARDCPU
-       int maxidle = -HZ;
-#endif
+       long *switch_count;
+       struct rq *rq;
 
        /*
         * Test if we are atomic.  Since do_exit() needs to call into
         * schedule() atomically, we ignore that path for now.
         * Otherwise, whine if we are scheduling when we should not be.
         */
-       if (likely(!current->exit_state)) {
-               if (unlikely(in_atomic())) {
-                       printk(KERN_ERR "scheduling while atomic: "
-                               "%s/0x%08x/%d\n",
-                               current->comm, preempt_count(), current->pid);
-                       dump_stack();
-               }
+       if (unlikely(in_atomic() && !current->exit_state)) {
+               printk(KERN_ERR "BUG: scheduling while atomic: "
+                       "%s/0x%08x/%d\n",
+                       current->comm, preempt_count(), current->pid);
+               debug_show_held_locks(current);
+               if (irqs_disabled())
+                       print_irqtrace_events(current);
+               dump_stack();
        }
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
 
@@ -3045,9 +3522,6 @@ need_resched_nonpreemptible:
 
        spin_lock_irq(&rq->lock);
 
-       if (unlikely(prev->flags & PF_DEAD))
-               prev->state = EXIT_DEAD;
-
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                switch_count = &prev->nvcsw;
@@ -3063,61 +3537,24 @@ need_resched_nonpreemptible:
                }
        }
 
-#ifdef CONFIG_VSERVER_HARDCPU
-       if (!list_empty(&rq->hold_queue)) {
-               struct list_head *l, *n;
-               int ret;
-
-               vxi = NULL;
-               list_for_each_safe(l, n, &rq->hold_queue) {
-                       next = list_entry(l, task_t, run_list);
-                       if (vxi == next->vx_info)
-                               continue;
-
-                       vxi = next->vx_info;
-                       ret = vx_tokens_recalc(vxi);
-
-                       if (ret > 0) {
-                               vx_unhold_task(vxi, next, rq);
-                               break;
-                       }
-                       if ((ret < 0) && (maxidle < ret))
-                               maxidle = ret;
-               }
-       }
-       rq->idle_tokens = -maxidle;
-
+       cpu = smp_processor_id();
+       vx_set_rq_time(rq, jiffies);
+try_unhold:
+       vx_try_unhold(rq, cpu);
 pick_next:
-#endif
 
-       cpu = smp_processor_id();
        if (unlikely(!rq->nr_running)) {
-go_idle:
+               /* can we skip idle time? */
+               if (vx_try_skip(rq, cpu))
+                       goto try_unhold;
+
                idle_balance(cpu, rq);
                if (!rq->nr_running) {
                        next = rq->idle;
                        rq->expired_timestamp = 0;
-                       wake_sleeping_dependent(cpu, rq);
-                       /*
-                        * wake_sleeping_dependent() might have released
-                        * the runqueue, so break out if we got new
-                        * tasks meanwhile:
-                        */
-                       if (!rq->nr_running)
-                               goto switch_tasks;
-               }
-       } else {
-               if (dependent_sleeper(cpu, rq)) {
-                       next = rq->idle;
+                       wake_sleeping_dependent(cpu);
                        goto switch_tasks;
                }
-               /*
-                * dependent_sleeper() releases and reacquires the runqueue
-                * lock, hence go into the idle loop if the rq went
-                * empty meanwhile:
-                */
-               if (unlikely(!rq->nr_running))
-                       goto go_idle;
        }
 
        array = rq->active;
@@ -3135,30 +3572,18 @@ go_idle:
 
        idx = sched_find_first_bit(array->bitmap);
        queue = array->queue + idx;
-       next = list_entry(queue->next, task_t, run_list);
-
-       vxi = next->vx_info;
-#ifdef CONFIG_VSERVER_HARDCPU
-       if (vx_info_flags(vxi, VXF_SCHED_PAUSE|VXF_SCHED_HARD, 0)) {
-               int ret = vx_tokens_recalc(vxi);
-
-               if (unlikely(ret <= 0)) {
-                       if (ret && (rq->idle_tokens > -ret))
-                               rq->idle_tokens = -ret;
-                       vx_hold_task(vxi, next, rq);
-                       goto pick_next;
-               }
-       } else  /* well, looks ugly but not as ugly as the ifdef-ed version */
-#endif
-       if (vx_info_flags(vxi, VXF_SCHED_PRIO, 0))
-               vx_tokens_recalc(vxi);
+       next = list_entry(queue->next, struct task_struct, run_list);
+
+       /* check before we schedule this context */
+       if (!vx_schedule(next, rq, cpu))
+               goto pick_next;
 
-       if (!rt_task(next) && next->activated > 0) {
+       if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
                unsigned long long delta = now - next->timestamp;
                if (unlikely((long long)(now - next->timestamp) < 0))
                        delta = 0;
 
-               if (next->activated == 1)
+               if (next->sleep_type == SLEEP_INTERACTIVE)
                        delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
 
                array = next->array;
@@ -3168,10 +3593,11 @@ go_idle:
                        dequeue_task(next, array);
                        next->prio = new_prio;
                        enqueue_task(next, array);
-               } else
-                       requeue_task(next, array);
+               }
        }
-       next->activated = 0;
+       next->sleep_type = SLEEP_NORMAL;
+       if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next))
+               next = rq->idle;
 switch_tasks:
        if (next == rq->idle)
                schedstat_inc(rq, sched_goidle);
@@ -3189,7 +3615,7 @@ switch_tasks:
 
        sched_info_switch(prev, next);
        if (likely(prev != next)) {
-               next->timestamp = now;
+               next->timestamp = next->last_ran = now;
                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;
@@ -3213,12 +3639,11 @@ switch_tasks:
        if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
                goto need_resched;
 }
-
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_PREEMPT
 /*
- * this is is the entry point to schedule() from in-kernel preemption
+ * this is the entry point to schedule() from in-kernel preemption
  * off of preempt_enable.  Kernel preemptions off return from interrupt
  * occur there and call schedule directly.
  */
@@ -3233,7 +3658,7 @@ asmlinkage void __sched preempt_schedule(void)
         * If there is a non-zero preempt_count or interrupts are disabled,
         * we do not want to preempt the current task.  Just return..
         */
-       if (unlikely(ti->preempt_count || irqs_disabled()))
+       if (likely(ti->preempt_count || irqs_disabled()))
                return;
 
 need_resched:
@@ -3258,11 +3683,10 @@ need_resched:
        if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
                goto need_resched;
 }
-
 EXPORT_SYMBOL(preempt_schedule);
 
 /*
- * this is is the entry point to schedule() from kernel preemption
+ * this is the entry point to schedule() from kernel preemption
  * off of irq context.
  * Note, that this is called and return with irqs disabled. This will
  * protect us against recursive calling from irq.
@@ -3274,7 +3698,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
        struct task_struct *task = current;
        int saved_lock_depth;
 #endif
-       /* Catch callers which need to be fixed*/
+       /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
 need_resched:
@@ -3307,10 +3731,8 @@ need_resched:
 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
                          void *key)
 {
-       task_t *p = curr->private;
-       return try_to_wake_up(p, mode, sync);
+       return try_to_wake_up(curr->private, mode, sync);
 }
-
 EXPORT_SYMBOL(default_wake_function);
 
 /*
@@ -3328,13 +3750,11 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
        struct list_head *tmp, *next;
 
        list_for_each_safe(tmp, next, &q->task_list) {
-               wait_queue_t *curr;
-               unsigned flags;
-               curr = list_entry(tmp, wait_queue_t, task_list);
-               flags = curr->flags;
+               wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
+               unsigned flags = curr->flags;
+
                if (curr->func(curr, mode, sync, key) &&
-                   (flags & WQ_FLAG_EXCLUSIVE) &&
-                   !--nr_exclusive)
+                               (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
                        break;
        }
 }
@@ -3355,7 +3775,6 @@ void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
        __wake_up_common(q, mode, nr_exclusive, 0, key);
        spin_unlock_irqrestore(&q->lock, flags);
 }
-
 EXPORT_SYMBOL(__wake_up);
 
 /*
@@ -3424,6 +3843,7 @@ EXPORT_SYMBOL(complete_all);
 void fastcall __sched wait_for_completion(struct completion *x)
 {
        might_sleep();
+
        spin_lock_irq(&x->wait.lock);
        if (!x->done) {
                DECLARE_WAITQUEUE(wait, current);
@@ -3558,17 +3978,27 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
        __remove_wait_queue(q, &wait);                  \
        spin_unlock_irqrestore(&q->lock, flags);
 
+#define SLEEP_ON_BKLCHECK                              \
+       if (unlikely(!kernel_locked()) &&               \
+           sleep_on_bkl_warnings < 10) {               \
+               sleep_on_bkl_warnings++;                \
+               WARN_ON(1);                             \
+       }
+
+static int sleep_on_bkl_warnings;
+
 void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
 {
        SLEEP_ON_VAR
 
+       SLEEP_ON_BKLCHECK
+
        current->state = TASK_INTERRUPTIBLE;
 
        SLEEP_ON_HEAD
        schedule();
        SLEEP_ON_TAIL
 }
-
 EXPORT_SYMBOL(interruptible_sleep_on);
 
 long fastcall __sched
@@ -3576,6 +4006,8 @@ interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
        SLEEP_ON_VAR
 
+       SLEEP_ON_BKLCHECK
+
        current->state = TASK_INTERRUPTIBLE;
 
        SLEEP_ON_HEAD
@@ -3584,43 +4016,84 @@ interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 
        return timeout;
 }
-
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 
-void fastcall __sched sleep_on(wait_queue_head_t *q)
+long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
        SLEEP_ON_VAR
 
+       SLEEP_ON_BKLCHECK
+
        current->state = TASK_UNINTERRUPTIBLE;
 
        SLEEP_ON_HEAD
-       schedule();
+       timeout = schedule_timeout(timeout);
        SLEEP_ON_TAIL
+
+       return timeout;
 }
 
-EXPORT_SYMBOL(sleep_on);
+EXPORT_SYMBOL(sleep_on_timeout);
 
-long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+#ifdef CONFIG_RT_MUTEXES
+
+/*
+ * rt_mutex_setprio - set the current priority of a task
+ * @p: task
+ * @prio: prio value (kernel-internal form)
+ *
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+ *
+ * Used by the rt_mutex code to implement priority inheritance logic.
+ */
+void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       SLEEP_ON_VAR
+       struct prio_array *array;
+       unsigned long flags;
+       struct rq *rq;
+       int oldprio;
 
-       current->state = TASK_UNINTERRUPTIBLE;
+       BUG_ON(prio < 0 || prio > MAX_PRIO);
 
-       SLEEP_ON_HEAD
-       timeout = schedule_timeout(timeout);
-       SLEEP_ON_TAIL
+       rq = task_rq_lock(p, &flags);
 
-       return timeout;
+       oldprio = p->prio;
+       array = p->array;
+       if (array)
+               dequeue_task(p, array);
+       p->prio = prio;
+
+       if (array) {
+               /*
+                * If changing to an RT priority then queue it
+                * in the active array!
+                */
+               if (rt_task(p))
+                       array = rq->active;
+               enqueue_task(p, array);
+               /*
+                * Reschedule if we are currently running on this runqueue and
+                * our priority decreased, or if we are not currently running on
+                * this runqueue and our priority is higher than the current's
+                */
+               if (task_running(rq, p)) {
+                       if (p->prio > oldprio)
+                               resched_task(rq->curr);
+               } else if (TASK_PREEMPTS_CURR(p, rq))
+                       resched_task(rq->curr);
+       }
+       task_rq_unlock(rq, &flags);
 }
 
-EXPORT_SYMBOL(sleep_on_timeout);
+#endif
 
-void set_user_nice(task_t *p, long nice)
+void set_user_nice(struct task_struct *p, long nice)
 {
+       struct prio_array *array;
+       int old_prio, delta;
        unsigned long flags;
-       prio_array_t *array;
-       runqueue_t *rq;
-       int old_prio, new_prio, delta;
+       struct rq *rq;
 
        if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
                return;
@@ -3635,22 +4108,25 @@ void set_user_nice(task_t *p, long nice)
         * it wont have any effect on scheduling until the task is
         * not SCHED_NORMAL/SCHED_BATCH:
         */
-       if (rt_task(p)) {
+       if (has_rt_policy(p)) {
                p->static_prio = NICE_TO_PRIO(nice);
                goto out_unlock;
        }
        array = p->array;
-       if (array)
+       if (array) {
                dequeue_task(p, array);
+               dec_raw_weighted_load(rq, p);
+       }
 
-       old_prio = p->prio;
-       new_prio = NICE_TO_PRIO(nice);
-       delta = new_prio - old_prio;
        p->static_prio = NICE_TO_PRIO(nice);
-       p->prio += delta;
+       set_load_weight(p);
+       old_prio = p->prio;
+       p->prio = effective_prio(p);
+       delta = p->prio - old_prio;
 
        if (array) {
                enqueue_task(p, array);
+               inc_raw_weighted_load(rq, p);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
@@ -3661,7 +4137,6 @@ void set_user_nice(task_t *p, long nice)
 out_unlock:
        task_rq_unlock(rq, &flags);
 }
-
 EXPORT_SYMBOL(set_user_nice);
 
 /*
@@ -3669,10 +4144,11 @@ EXPORT_SYMBOL(set_user_nice);
  * @p: task
  * @nice: nice value
  */
-int can_nice(const task_t *p, const int nice)
+int can_nice(const struct task_struct *p, const int nice)
 {
        /* convert nice value [19,-20] to rlimit style value [1,40] */
        int nice_rlim = 20 - nice;
+
        return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
                capable(CAP_SYS_NICE));
 }
@@ -3688,8 +4164,7 @@ int can_nice(const task_t *p, const int nice)
  */
 asmlinkage long sys_nice(int increment)
 {
-       int retval;
-       long nice;
+       long nice, retval;
 
        /*
         * Setpriority might change our priority at the same moment.
@@ -3728,7 +4203,7 @@ asmlinkage long sys_nice(int increment)
  * RT tasks are offset by -200. Normal tasks are centered
  * around 0, value goes from -16 to +15.
  */
-int task_prio(const task_t *p)
+int task_prio(const struct task_struct *p)
 {
        return p->prio - MAX_RT_PRIO;
 }
@@ -3737,7 +4212,7 @@ int task_prio(const task_t *p)
  * task_nice - return the nice value of a given task.
  * @p: the task in question.
  */
-int task_nice(const task_t *p)
+int task_nice(const struct task_struct *p)
 {
        return TASK_NICE(p);
 }
@@ -3756,7 +4231,7 @@ int idle_cpu(int cpu)
  * idle_task - return the idle task for a given cpu.
  * @cpu: the processor in question.
  */
-task_t *idle_task(int cpu)
+struct task_struct *idle_task(int cpu)
 {
        return cpu_rq(cpu)->idle;
 }
@@ -3765,7 +4240,7 @@ task_t *idle_task(int cpu)
  * find_process_by_pid - find a process with a matching PID value.
  * @pid: the pid in question.
  */
-static inline task_t *find_process_by_pid(pid_t pid)
+static inline struct task_struct *find_process_by_pid(pid_t pid)
 {
        return pid ? find_task_by_pid(pid) : current;
 }
@@ -3774,18 +4249,18 @@ static inline task_t *find_process_by_pid(pid_t pid)
 static void __setscheduler(struct task_struct *p, int policy, int prio)
 {
        BUG_ON(p->array);
+
        p->policy = policy;
        p->rt_priority = prio;
-       if (policy != SCHED_NORMAL && policy != SCHED_BATCH) {
-               p->prio = MAX_RT_PRIO-1 - p->rt_priority;
-       } else {
-               p->prio = p->static_prio;
-               /*
-                * SCHED_BATCH tasks are treated as perpetual CPU hogs:
-                */
-               if (policy == SCHED_BATCH)
-                       p->sleep_avg = 0;
-       }
+       p->normal_prio = normal_prio(p);
+       /* we are holding p->pi_lock already */
+       p->prio = rt_mutex_getprio(p);
+       /*
+        * SCHED_BATCH tasks are treated as perpetual CPU hogs:
+        */
+       if (policy == SCHED_BATCH)
+               p->sleep_avg = 0;
+       set_load_weight(p);
 }
 
 /**
@@ -3794,16 +4269,19 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
  * @p: the task in question.
  * @policy: new policy.
  * @param: structure containing the new RT priority.
+ *
+ * NOTE: the task may be already dead
  */
 int sched_setscheduler(struct task_struct *p, int policy,
                       struct sched_param *param)
 {
-       int retval;
-       int oldprio, oldpolicy = -1;
-       prio_array_t *array;
+       int retval, oldprio, oldpolicy = -1;
+       struct prio_array *array;
        unsigned long flags;
-       runqueue_t *rq;
+       struct rq *rq;
 
+       /* may grab non-irq protected spin_locks */
+       BUG_ON(in_interrupt());
 recheck:
        /* double check policy once rq lock held */
        if (policy < 0)
@@ -3820,28 +4298,32 @@ recheck:
            (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
            (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
                return -EINVAL;
-       if ((policy == SCHED_NORMAL || policy == SCHED_BATCH)
-                                       != (param->sched_priority == 0))
+       if (is_rt_policy(policy) != (param->sched_priority != 0))
                return -EINVAL;
 
        /*
         * Allow unprivileged RT tasks to decrease priority:
         */
        if (!capable(CAP_SYS_NICE)) {
-               /*
-                * can't change policy, except between SCHED_NORMAL
-                * and SCHED_BATCH:
-                */
-               if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) &&
-                       (policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) &&
-                               !p->signal->rlim[RLIMIT_RTPRIO].rlim_cur)
-                       return -EPERM;
-               /* can't increase priority */
-               if ((policy != SCHED_NORMAL && policy != SCHED_BATCH) &&
-                   param->sched_priority > p->rt_priority &&
-                   param->sched_priority >
-                               p->signal->rlim[RLIMIT_RTPRIO].rlim_cur)
-                       return -EPERM;
+               if (is_rt_policy(policy)) {
+                       unsigned long rlim_rtprio;
+                       unsigned long flags;
+
+                       if (!lock_task_sighand(p, &flags))
+                               return -ESRCH;
+                       rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
+                       unlock_task_sighand(p, &flags);
+
+                       /* can't set/change the rt policy */
+                       if (policy != p->policy && !rlim_rtprio)
+                               return -EPERM;
+
+                       /* can't increase priority */
+                       if (param->sched_priority > p->rt_priority &&
+                           param->sched_priority > rlim_rtprio)
+                               return -EPERM;
+               }
+
                /* can't change other user's priorities */
                if ((current->euid != p->euid) &&
                    (current->euid != p->uid))
@@ -3851,15 +4333,21 @@ recheck:
        retval = security_task_setscheduler(p, policy, param);
        if (retval)
                return retval;
+       /*
+        * make sure no PI-waiters arrive (or leave) while we are
+        * changing the priority of the task:
+        */
+       spin_lock_irqsave(&p->pi_lock, flags);
        /*
         * To be able to change p->policy safely, the apropriate
         * runqueue lock must be held.
         */
-       rq = task_rq_lock(p, &flags);
+       rq = __task_rq_lock(p);
        /* recheck policy now with rq lock held */
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
                policy = oldpolicy = -1;
-               task_rq_unlock(rq, &flags);
+               __task_rq_unlock(rq);
+               spin_unlock_irqrestore(&p->pi_lock, flags);
                goto recheck;
        }
        array = p->array;
@@ -3881,7 +4369,11 @@ recheck:
                } else if (TASK_PREEMPTS_CURR(p, rq))
                        resched_task(rq->curr);
        }
-       task_rq_unlock(rq, &flags);
+       __task_rq_unlock(rq);
+       spin_unlock_irqrestore(&p->pi_lock, flags);
+
+       rt_mutex_adjust_pi(p);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(sched_setscheduler);
@@ -3889,22 +4381,22 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
 static int
 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 {
-       int retval;
        struct sched_param lparam;
        struct task_struct *p;
+       int retval;
 
        if (!param || pid < 0)
                return -EINVAL;
        if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
                return -EFAULT;
-       read_lock_irq(&tasklist_lock);
+
+       rcu_read_lock();
+       retval = -ESRCH;
        p = find_process_by_pid(pid);
-       if (!p) {
-               read_unlock_irq(&tasklist_lock);
-               return -ESRCH;
-       }
-       retval = sched_setscheduler(p, policy, &lparam);
-       read_unlock_irq(&tasklist_lock);
+       if (p != NULL)
+               retval = sched_setscheduler(p, policy, &lparam);
+       rcu_read_unlock();
+
        return retval;
 }
 
@@ -3940,8 +4432,8 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
  */
 asmlinkage long sys_sched_getscheduler(pid_t pid)
 {
+       struct task_struct *p;
        int retval = -EINVAL;
-       task_t *p;
 
        if (pid < 0)
                goto out_nounlock;
@@ -3968,8 +4460,8 @@ out_nounlock:
 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
 {
        struct sched_param lp;
+       struct task_struct *p;
        int retval = -EINVAL;
-       task_t *p;
 
        if (!param || pid < 0)
                goto out_nounlock;
@@ -4002,9 +4494,9 @@ out_unlock:
 
 long sched_setaffinity(pid_t pid, cpumask_t new_mask)
 {
-       task_t *p;
-       int retval;
        cpumask_t cpus_allowed;
+       struct task_struct *p;
+       int retval;
 
        lock_cpu_hotplug();
        read_lock(&tasklist_lock);
@@ -4029,6 +4521,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
                        !capable(CAP_SYS_NICE))
                goto out_unlock;
 
+       retval = security_task_setscheduler(p, 0, NULL);
+       if (retval)
+               goto out_unlock;
+
        cpus_allowed = cpuset_cpus_allowed(p);
        cpus_and(new_mask, new_mask, cpus_allowed);
        retval = set_cpus_allowed(p, new_mask);
@@ -4081,13 +4577,16 @@ EXPORT_SYMBOL(cpu_present_map);
 
 #ifndef CONFIG_SMP
 cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
+EXPORT_SYMBOL(cpu_online_map);
+
 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
+EXPORT_SYMBOL(cpu_possible_map);
 #endif
 
 long sched_getaffinity(pid_t pid, cpumask_t *mask)
 {
+       struct task_struct *p;
        int retval;
-       task_t *p;
 
        lock_cpu_hotplug();
        read_lock(&tasklist_lock);
@@ -4097,7 +4596,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
        if (!p)
                goto out_unlock;
 
-       retval = 0;
+       retval = security_task_getscheduler(p);
+       if (retval)
+               goto out_unlock;
+
        cpus_and(*mask, p->cpus_allowed, cpu_online_map);
 
 out_unlock:
@@ -4143,9 +4645,8 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
  */
 asmlinkage long sys_sched_yield(void)
 {
-       runqueue_t *rq = this_rq_lock();
-       prio_array_t *array = current->array;
-       prio_array_t *target = rq->expired;
+       struct rq *rq = this_rq_lock();
+       struct prio_array *array = current->array, *target = rq->expired;
 
        schedstat_inc(rq, yld_cnt);
        /*
@@ -4179,6 +4680,7 @@ asmlinkage long sys_sched_yield(void)
         * no need to preempt or enable interrupts:
         */
        __release(rq->lock);
+       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
        _raw_spin_unlock(&rq->lock);
        preempt_enable_no_resched();
 
@@ -4187,17 +4689,16 @@ asmlinkage long sys_sched_yield(void)
        return 0;
 }
 
-static inline void __cond_resched(void)
+static void __cond_resched(void)
 {
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+       __might_sleep(__FILE__, __LINE__);
+#endif
        /*
         * The BKS might be reacquired before we have dropped
         * PREEMPT_ACTIVE, which could trigger a second
         * cond_resched() call.
         */
-       if (unlikely(preempt_count()))
-               return;
-       if (unlikely(system_state != SYSTEM_RUNNING))
-               return;
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                schedule();
@@ -4207,13 +4708,13 @@ static inline void __cond_resched(void)
 
 int __sched cond_resched(void)
 {
-       if (need_resched()) {
+       if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
+                                       system_state == SYSTEM_RUNNING) {
                __cond_resched();
                return 1;
        }
        return 0;
 }
-
 EXPORT_SYMBOL(cond_resched);
 
 /*
@@ -4234,7 +4735,8 @@ int cond_resched_lock(spinlock_t *lock)
                ret = 1;
                spin_lock(lock);
        }
-       if (need_resched()) {
+       if (need_resched() && system_state == SYSTEM_RUNNING) {
+               spin_release(&lock->dep_map, 1, _THIS_IP_);
                _raw_spin_unlock(lock);
                preempt_enable_no_resched();
                __cond_resched();
@@ -4243,25 +4745,24 @@ int cond_resched_lock(spinlock_t *lock)
        }
        return ret;
 }
-
 EXPORT_SYMBOL(cond_resched_lock);
 
 int __sched cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       if (need_resched()) {
-               __local_bh_enable();
+       if (need_resched() && system_state == SYSTEM_RUNNING) {
+               raw_local_irq_disable();
+               _local_bh_enable();
+               raw_local_irq_enable();
                __cond_resched();
                local_bh_disable();
                return 1;
        }
        return 0;
 }
-
 EXPORT_SYMBOL(cond_resched_softirq);
 
-
 /**
  * yield - yield the current processor to other threads.
  *
@@ -4273,7 +4774,6 @@ void __sched yield(void)
        set_current_state(TASK_RUNNING);
        sys_sched_yield();
 }
-
 EXPORT_SYMBOL(yield);
 
 /*
@@ -4285,23 +4785,26 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct rq *rq = &__raw_get_cpu_var(runqueues);
 
+       delayacct_blkio_start();
        atomic_inc(&rq->nr_iowait);
        schedule();
        atomic_dec(&rq->nr_iowait);
+       delayacct_blkio_end();
 }
-
 EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct rq *rq = &__raw_get_cpu_var(runqueues);
        long ret;
 
+       delayacct_blkio_start();
        atomic_inc(&rq->nr_iowait);
        ret = schedule_timeout(timeout);
        atomic_dec(&rq->nr_iowait);
+       delayacct_blkio_end();
        return ret;
 }
 
@@ -4363,9 +4866,9 @@ asmlinkage long sys_sched_get_priority_min(int policy)
 asmlinkage
 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
 {
+       struct task_struct *p;
        int retval = -EINVAL;
        struct timespec t;
-       task_t *p;
 
        if (pid < 0)
                goto out_nounlock;
@@ -4380,7 +4883,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
        if (retval)
                goto out_unlock;
 
-       jiffies_to_timespec(p->policy & SCHED_FIFO ?
+       jiffies_to_timespec(p->policy == SCHED_FIFO ?
                                0 : task_timeslice(p), &t);
        read_unlock(&tasklist_lock);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -4393,35 +4896,36 @@ out_unlock:
 
 static inline struct task_struct *eldest_child(struct task_struct *p)
 {
-       if (list_empty(&p->children)) return NULL;
+       if (list_empty(&p->children))
+               return NULL;
        return list_entry(p->children.next,struct task_struct,sibling);
 }
 
 static inline struct task_struct *older_sibling(struct task_struct *p)
 {
-       if (p->sibling.prev==&p->parent->children) return NULL;
+       if (p->sibling.prev==&p->parent->children)
+               return NULL;
        return list_entry(p->sibling.prev,struct task_struct,sibling);
 }
 
 static inline struct task_struct *younger_sibling(struct task_struct *p)
 {
-       if (p->sibling.next==&p->parent->children) return NULL;
+       if (p->sibling.next==&p->parent->children)
+               return NULL;
        return list_entry(p->sibling.next,struct task_struct,sibling);
 }
 
-static void show_task(task_t *p)
+static const char stat_nam[] = "RSDTtZX";
+
+static void show_task(struct task_struct *p)
 {
-       task_t *relative;
-       unsigned state;
+       struct task_struct *relative;
        unsigned long free = 0;
-       static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
+       unsigned state;
 
-       printk("%-13.13s ", p->comm);
        state = p->state ? __ffs(p->state) + 1 : 0;
-       if (state < ARRAY_SIZE(stat_nam))
-               printk(stat_nam[state]);
-       else
-               printk("?");
+       printk("%-13.13s %c", p->comm,
+               state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
 #if (BITS_PER_LONG == 32)
        if (state == TASK_RUNNING)
                printk(" running ");
@@ -4463,18 +4967,18 @@ static void show_task(task_t *p)
                show_stack(p, NULL);
 }
 
-void show_state(void)
+void show_state_filter(unsigned long state_filter)
 {
-       task_t *g, *p;
+       struct task_struct *g, *p;
 
 #if (BITS_PER_LONG == 32)
        printk("\n"
-              "                                               sibling\n");
-       printk("  task             PC      pid father child younger older\n");
+              "                         free                        sibling\n");
+       printk("  task             PC    stack   pid father child younger older\n");
 #else
        printk("\n"
-              "                                                       sibling\n");
-       printk("  task                 PC          pid father child younger older\n");
+              "                                 free                        sibling\n");
+       printk("  task                 PC        stack   pid father child younger older\n");
 #endif
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
@@ -4483,11 +4987,16 @@ void show_state(void)
                 * console might take alot of time:
                 */
                touch_nmi_watchdog();
-               show_task(p);
+               if (p->state & state_filter)
+                       show_task(p);
        } while_each_thread(g, p);
 
        read_unlock(&tasklist_lock);
-       mutex_debug_show_all_locks();
+       /*
+        * Only show locks if all tasks are dumped:
+        */
+       if (state_filter == -1)
+               debug_show_all_locks();
 }
 
 /**
@@ -4498,15 +5007,15 @@ void show_state(void)
  * NOTE: this function does not set the idle thread's NEED_RESCHED
  * flag, to make booting more robust.
  */
-void __devinit init_idle(task_t *idle, int cpu)
+void __cpuinit init_idle(struct task_struct *idle, int cpu)
 {
-       runqueue_t *rq = cpu_rq(cpu);
+       struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
        idle->timestamp = sched_clock();
        idle->sleep_avg = 0;
        idle->array = NULL;
-       idle->prio = MAX_PRIO;
+       idle->prio = idle->normal_prio = MAX_PRIO;
        idle->state = TASK_RUNNING;
        idle->cpus_allowed = cpumask_of_cpu(cpu);
        set_task_cpu(idle, cpu);
@@ -4539,7 +5048,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
 /*
  * This is how migration works:
  *
- * 1) we queue a migration_req_t structure in the source CPU's
+ * 1) we queue a struct migration_req structure in the source CPU's
  *    runqueue and wake up that CPU's migration thread.
  * 2) we down() the locked semaphore => thread blocks.
  * 3) migration thread wakes up (implicitly it forces the migrated
@@ -4561,12 +5070,12 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
  * task must not exit() & deallocate itself prematurely.  The
  * call is not atomic; no spinlocks may be held.
  */
-int set_cpus_allowed(task_t *p, cpumask_t new_mask)
+int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 {
+       struct migration_req req;
        unsigned long flags;
+       struct rq *rq;
        int ret = 0;
-       migration_req_t req;
-       runqueue_t *rq;
 
        rq = task_rq_lock(p, &flags);
        if (!cpus_intersects(new_mask, cpu_online_map)) {
@@ -4589,9 +5098,9 @@ int set_cpus_allowed(task_t *p, cpumask_t new_mask)
        }
 out:
        task_rq_unlock(rq, &flags);
+
        return ret;
 }
-
 EXPORT_SYMBOL_GPL(set_cpus_allowed);
 
 /*
@@ -4602,13 +5111,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
  *
  * So we race with normal scheduler movements, but that's OK, as long
  * as the task is no longer on this CPU.
+ *
+ * Returns non-zero if task was successfully migrated.
  */
-static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 {
-       runqueue_t *rq_dest, *rq_src;
+       struct rq *rq_dest, *rq_src;
+       int ret = 0;
 
        if (unlikely(cpu_is_offline(dest_cpu)))
-               return;
+               return ret;
 
        rq_src = cpu_rq(src_cpu);
        rq_dest = cpu_rq(dest_cpu);
@@ -4629,16 +5141,18 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
                 * afterwards, and pretending it was a local activate.
                 * This way is cleaner and logically correct.
                 */
-               p->timestamp = p->timestamp - rq_src->timestamp_last_tick
-                               + rq_dest->timestamp_last_tick;
+               p->timestamp = p->timestamp - rq_src->most_recent_timestamp
+                               + rq_dest->most_recent_timestamp;
                deactivate_task(p, rq_src);
-               activate_task(p, rq_dest, 0);
+               vx_activate_task(p);
+               __activate_task(p, rq_dest);
                if (TASK_PREEMPTS_CURR(p, rq_dest))
                        resched_task(rq_dest->curr);
        }
-
+       ret = 1;
 out:
        double_rq_unlock(rq_src, rq_dest);
+       return ret;
 }
 
 /*
@@ -4648,16 +5162,16 @@ out:
  */
 static int migration_thread(void *data)
 {
-       runqueue_t *rq;
        int cpu = (long)data;
+       struct rq *rq;
 
        rq = cpu_rq(cpu);
        BUG_ON(rq->migration_thread != current);
 
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
+               struct migration_req *req;
                struct list_head *head;
-               migration_req_t *req;
 
                try_to_freeze();
 
@@ -4681,7 +5195,7 @@ static int migration_thread(void *data)
                        set_current_state(TASK_INTERRUPTIBLE);
                        continue;
                }
-               req = list_entry(head->next, migration_req_t, list);
+               req = list_entry(head->next, struct migration_req, list);
                list_del_init(head->next);
 
                spin_unlock(&rq->lock);
@@ -4705,37 +5219,46 @@ wait_to_die:
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-/* Figure out where task on dead CPU should go, use force if neccessary. */
-static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
+/*
+ * Figure out where task on dead CPU should go, use force if neccessary.
+ * NOTE: interrupts should be disabled by the caller
+ */
+static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 {
-       int dest_cpu;
+       unsigned long flags;
        cpumask_t mask;
+       struct rq *rq;
+       int dest_cpu;
 
+restart:
        /* On same node? */
        mask = node_to_cpumask(cpu_to_node(dead_cpu));
-       cpus_and(mask, mask, tsk->cpus_allowed);
+       cpus_and(mask, mask, p->cpus_allowed);
        dest_cpu = any_online_cpu(mask);
 
        /* On any allowed CPU? */
        if (dest_cpu == NR_CPUS)
-               dest_cpu = any_online_cpu(tsk->cpus_allowed);
+               dest_cpu = any_online_cpu(p->cpus_allowed);
 
        /* No more Mr. Nice Guy. */
        if (dest_cpu == NR_CPUS) {
-               cpus_setall(tsk->cpus_allowed);
-               dest_cpu = any_online_cpu(tsk->cpus_allowed);
+               rq = task_rq_lock(p, &flags);
+               cpus_setall(p->cpus_allowed);
+               dest_cpu = any_online_cpu(p->cpus_allowed);
+               task_rq_unlock(rq, &flags);
 
                /*
                 * Don't tell them about moving exiting tasks or
                 * kernel threads (both mm NULL), since they never
                 * leave kernel.
                 */
-               if (tsk->mm && printk_ratelimit())
+               if (p->mm && printk_ratelimit())
                        printk(KERN_INFO "process %d (%s) no "
                               "longer affine to cpu%d\n",
-                              tsk->pid, tsk->comm, dead_cpu);
+                              p->pid, p->comm, dead_cpu);
        }
-       __migrate_task(tsk, dead_cpu, dest_cpu);
+       if (!__migrate_task(p, dead_cpu, dest_cpu))
+               goto restart;
 }
 
 /*
@@ -4745,9 +5268,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
  * their home CPUs. So we just add the counter to another CPU's counter,
  * to keep the global sum constant after CPU-down:
  */
-static void migrate_nr_uninterruptible(runqueue_t *rq_src)
+static void migrate_nr_uninterruptible(struct rq *rq_src)
 {
-       runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
+       struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
        unsigned long flags;
 
        local_irq_save(flags);
@@ -4761,48 +5284,51 @@ static void migrate_nr_uninterruptible(runqueue_t *rq_src)
 /* Run through task list and migrate tasks from the dead cpu. */
 static void migrate_live_tasks(int src_cpu)
 {
-       struct task_struct *tsk, *t;
+       struct task_struct *p, *t;
 
        write_lock_irq(&tasklist_lock);
 
-       do_each_thread(t, tsk) {
-               if (tsk == current)
+       do_each_thread(t, p) {
+               if (p == current)
                        continue;
 
-               if (task_cpu(tsk) == src_cpu)
-                       move_task_off_dead_cpu(src_cpu, tsk);
-       } while_each_thread(t, tsk);
+               if (task_cpu(p) == src_cpu)
+                       move_task_off_dead_cpu(src_cpu, p);
+       } while_each_thread(t, p);
 
        write_unlock_irq(&tasklist_lock);
 }
 
 /* Schedules idle task to be the next runnable task on current CPU.
  * It does so by boosting its priority to highest possible and adding it to
- * the _front_ of runqueue. Used by CPU offline code.
+ * the _front_ of the runqueue. Used by CPU offline code.
  */
 void sched_idle_next(void)
 {
-       int cpu = smp_processor_id();
-       runqueue_t *rq = this_rq();
+       int this_cpu = smp_processor_id();
+       struct rq *rq = cpu_rq(this_cpu);
        struct task_struct *p = rq->idle;
        unsigned long flags;
 
        /* cpu has to be offline */
-       BUG_ON(cpu_online(cpu));
+       BUG_ON(cpu_online(this_cpu));
 
-       /* Strictly not necessary since rest of the CPUs are stopped by now
-        * and interrupts disabled on current cpu.
+       /*
+        * Strictly not necessary since rest of the CPUs are stopped by now
+        * and interrupts disabled on the current cpu.
         */
        spin_lock_irqsave(&rq->lock, flags);
 
        __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
-       /* Add idle task to _front_ of it's priority queue */
+
+       /* Add idle task to the _front_ of its priority queue: */
        __activate_idle_task(p, rq);
 
        spin_unlock_irqrestore(&rq->lock, flags);
 }
 
-/* Ensures that the idle task is using init_mm right before its cpu goes
+/*
+ * Ensures that the idle task is using init_mm right before its cpu goes
  * offline.
  */
 void idle_task_exit(void)
@@ -4816,43 +5342,45 @@ void idle_task_exit(void)
        mmdrop(mm);
 }
 
-static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
+/* called under rq->lock with disabled interrupts */
+static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
 {
-       struct runqueue *rq = cpu_rq(dead_cpu);
+       struct rq *rq = cpu_rq(dead_cpu);
 
        /* Must be exiting, otherwise would be on tasklist. */
-       BUG_ON(tsk->exit_state != EXIT_ZOMBIE && tsk->exit_state != EXIT_DEAD);
+       BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
 
        /* Cannot have done final schedule yet: would have vanished. */
-       BUG_ON(tsk->flags & PF_DEAD);
+       BUG_ON(p->state == TASK_DEAD);
 
-       get_task_struct(tsk);
+       get_task_struct(p);
 
        /*
         * Drop lock around migration; if someone else moves it,
         * that's OK.  No task can be added to this CPU, so iteration is
         * fine.
+        * NOTE: interrupts should be left disabled  --dev@
         */
-       spin_unlock_irq(&rq->lock);
-       move_task_off_dead_cpu(dead_cpu, tsk);
-       spin_lock_irq(&rq->lock);
+       spin_unlock(&rq->lock);
+       move_task_off_dead_cpu(dead_cpu, p);
+       spin_lock(&rq->lock);
 
-       put_task_struct(tsk);
+       put_task_struct(p);
 }
 
 /* release_task() removes task from tasklist, so we won't find dead tasks. */
 static void migrate_dead_tasks(unsigned int dead_cpu)
 {
-       unsigned arr, i;
-       struct runqueue *rq = cpu_rq(dead_cpu);
+       struct rq *rq = cpu_rq(dead_cpu);
+       unsigned int arr, i;
 
        for (arr = 0; arr < 2; arr++) {
                for (i = 0; i < MAX_PRIO; i++) {
                        struct list_head *list = &rq->arrays[arr].queue[i];
+
                        while (!list_empty(list))
-                               migrate_dead(dead_cpu,
-                                            list_entry(list->next, task_t,
-                                                       run_list));
+                               migrate_dead(dead_cpu, list_entry(list->next,
+                                            struct task_struct, run_list));
                }
        }
 }
@@ -4862,13 +5390,13 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
  * migration_call - callback that gets triggered when a CPU is added.
  * Here we can start up the necessary migration thread for the new CPU.
  */
-static int migration_call(struct notifier_block *nfb, unsigned long action,
-                         void *hcpu)
+static int __cpuinit
+migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
-       int cpu = (long)hcpu;
        struct task_struct *p;
-       struct runqueue *rq;
+       int cpu = (long)hcpu;
        unsigned long flags;
+       struct rq *rq;
 
        switch (action) {
        case CPU_UP_PREPARE:
@@ -4883,18 +5411,23 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
                task_rq_unlock(rq, &flags);
                cpu_rq(cpu)->migration_thread = p;
                break;
+
        case CPU_ONLINE:
                /* Strictly unneccessary, as first user will wake it. */
                wake_up_process(cpu_rq(cpu)->migration_thread);
                break;
+
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+               if (!cpu_rq(cpu)->migration_thread)
+                       break;
                /* Unbind it from offline cpu so it can run.  Fall thru. */
                kthread_bind(cpu_rq(cpu)->migration_thread,
                             any_online_cpu(cpu_online_map));
                kthread_stop(cpu_rq(cpu)->migration_thread);
                cpu_rq(cpu)->migration_thread = NULL;
                break;
+
        case CPU_DEAD:
                migrate_live_tasks(cpu);
                rq = cpu_rq(cpu);
@@ -4915,9 +5448,10 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
                 * the requestors. */
                spin_lock_irq(&rq->lock);
                while (!list_empty(&rq->migration_queue)) {
-                       migration_req_t *req;
+                       struct migration_req *req;
+
                        req = list_entry(rq->migration_queue.next,
-                                        migration_req_t, list);
+                                        struct migration_req, list);
                        list_del_init(&req->list);
                        complete(&req->done);
                }
@@ -4931,7 +5465,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
 /* Register at highest priority so that task migration (migrate_all_tasks)
  * happens before everything else.
  */
-static struct notifier_block __devinitdata migration_notifier = {
+static struct notifier_block __cpuinitdata migration_notifier = {
        .notifier_call = migration_call,
        .priority = 10
 };
@@ -4939,10 +5473,14 @@ static struct notifier_block __devinitdata migration_notifier = {
 int __init migration_init(void)
 {
        void *cpu = (void *)(long)smp_processor_id();
-       /* Start one for boot CPU. */
-       migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
+       int err;
+
+       /* Start one for the boot CPU: */
+       err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
+       BUG_ON(err == NOTIFY_BAD);
        migration_call(&migration_notifier, CPU_ONLINE, cpu);
        register_cpu_notifier(&migration_notifier);
+
        return 0;
 }
 #endif
@@ -4978,16 +5516,19 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
                if (!(sd->flags & SD_LOAD_BALANCE)) {
                        printk("does not load-balance\n");
                        if (sd->parent)
-                               printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
+                               printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+                                               " has parent");
                        break;
                }
 
                printk("span %s\n", str);
 
                if (!cpu_isset(cpu, sd->span))
-                       printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
+                       printk(KERN_ERR "ERROR: domain->span does not contain "
+                                       "CPU%d\n", cpu);
                if (!cpu_isset(cpu, group->cpumask))
-                       printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
+                       printk(KERN_ERR "ERROR: domain->groups does not contain"
+                                       " CPU%d\n", cpu);
 
                printk(KERN_DEBUG);
                for (i = 0; i < level + 2; i++)
@@ -5002,7 +5543,8 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
 
                        if (!group->cpu_power) {
                                printk("\n");
-                               printk(KERN_ERR "ERROR: domain->cpu_power not set\n");
+                               printk(KERN_ERR "ERROR: domain->cpu_power not "
+                                               "set\n");
                        }
 
                        if (!cpus_weight(group->cpumask)) {
@@ -5025,20 +5567,22 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
                printk("\n");
 
                if (!cpus_equal(sd->span, groupmask))
-                       printk(KERN_ERR "ERROR: groups don't span domain->span\n");
+                       printk(KERN_ERR "ERROR: groups don't span "
+                                       "domain->span\n");
 
                level++;
                sd = sd->parent;
+               if (!sd)
+                       continue;
 
-               if (sd) {
-                       if (!cpus_subset(groupmask, sd->span))
-                               printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
-               }
+               if (!cpus_subset(groupmask, sd->span))
+                       printk(KERN_ERR "ERROR: parent span is not a superset "
+                               "of domain->span\n");
 
        } while (sd);
 }
 #else
-#define sched_domain_debug(sd, cpu) {}
+# define sched_domain_debug(sd, cpu) do { } while (0)
 #endif
 
 static int sd_degenerate(struct sched_domain *sd)
@@ -5050,7 +5594,9 @@ static int sd_degenerate(struct sched_domain *sd)
        if (sd->flags & (SD_LOAD_BALANCE |
                         SD_BALANCE_NEWIDLE |
                         SD_BALANCE_FORK |
-                        SD_BALANCE_EXEC)) {
+                        SD_BALANCE_EXEC |
+                        SD_SHARE_CPUPOWER |
+                        SD_SHARE_PKG_RESOURCES)) {
                if (sd->groups != sd->groups->next)
                        return 0;
        }
@@ -5064,8 +5610,8 @@ static int sd_degenerate(struct sched_domain *sd)
        return 1;
 }
 
-static int sd_parent_degenerate(struct sched_domain *sd,
-                                               struct sched_domain *parent)
+static int
+sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
 {
        unsigned long cflags = sd->flags, pflags = parent->flags;
 
@@ -5084,7 +5630,9 @@ static int sd_parent_degenerate(struct sched_domain *sd,
                pflags &= ~(SD_LOAD_BALANCE |
                                SD_BALANCE_NEWIDLE |
                                SD_BALANCE_FORK |
-                               SD_BALANCE_EXEC);
+                               SD_BALANCE_EXEC |
+                               SD_SHARE_CPUPOWER |
+                               SD_SHARE_PKG_RESOURCES);
        }
        if (~cflags & pflags)
                return 0;
@@ -5098,7 +5646,7 @@ static int sd_parent_degenerate(struct sched_domain *sd,
  */
 static void cpu_attach_domain(struct sched_domain *sd, int cpu)
 {
-       runqueue_t *rq = cpu_rq(cpu);
+       struct rq *rq = cpu_rq(cpu);
        struct sched_domain *tmp;
 
        /* Remove the sched domains which do not contribute to scheduling. */
@@ -5106,12 +5654,18 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
                struct sched_domain *parent = tmp->parent;
                if (!parent)
                        break;
-               if (sd_parent_degenerate(tmp, parent))
+               if (sd_parent_degenerate(tmp, parent)) {
                        tmp->parent = parent->parent;
+                       if (parent->parent)
+                               parent->parent->child = tmp;
+               }
        }
 
-       if (sd && sd_degenerate(sd))
+       if (sd && sd_degenerate(sd)) {
                sd = sd->parent;
+               if (sd)
+                       sd->child = NULL;
+       }
 
        sched_domain_debug(sd, cpu);
 
@@ -5119,7 +5673,7 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
 }
 
 /* cpus with isolated domains */
-static cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
+static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
 
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
@@ -5137,26 +5691,27 @@ static int __init isolated_cpu_setup(char *str)
 __setup ("isolcpus=", isolated_cpu_setup);
 
 /*
- * init_sched_build_groups takes an array of groups, the cpumask we wish
- * to span, and a pointer to a function which identifies what group a CPU
- * belongs to. The return value of group_fn must be a valid index into the
- * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we
- * keep track of groups covered with a cpumask_t).
+ * init_sched_build_groups takes the cpumask we wish to span, and a pointer
+ * to a function which identifies what group(along with sched group) a CPU
+ * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
+ * (due to the fact that we keep track of groups covered with a cpumask_t).
  *
  * init_sched_build_groups will build a circular linked list of the groups
  * covered by the given span, and will set each group's ->cpumask correctly,
  * and ->cpu_power to 0.
  */
-static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
-                                   int (*group_fn)(int cpu))
+static void
+init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
+                       int (*group_fn)(int cpu, const cpumask_t *cpu_map,
+                                       struct sched_group **sg))
 {
        struct sched_group *first = NULL, *last = NULL;
        cpumask_t covered = CPU_MASK_NONE;
        int i;
 
        for_each_cpu_mask(i, span) {
-               int group = group_fn(i);
-               struct sched_group *sg = &groups[group];
+               struct sched_group *sg;
+               int group = group_fn(i, cpu_map, &sg);
                int j;
 
                if (cpu_isset(i, covered))
@@ -5166,7 +5721,7 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
                sg->cpu_power = 0;
 
                for_each_cpu_mask(j, span) {
-                       if (group_fn(j) != group)
+                       if (group_fn(j, cpu_map, NULL) != group)
                                continue;
 
                        cpu_set(j, covered);
@@ -5340,8 +5895,9 @@ __setup("max_cache_size=", setup_max_cache_size);
  */
 static void touch_cache(void *__cache, unsigned long __size)
 {
-       unsigned long size = __size/sizeof(long), chunk1 = size/3,
-                       chunk2 = 2*size/3;
+       unsigned long size = __size / sizeof(long);
+       unsigned long chunk1 = size / 3;
+       unsigned long chunk2 = 2 * size / 3;
        unsigned long *cache = __cache;
        int i;
 
@@ -5360,8 +5916,8 @@ static void touch_cache(void *__cache, unsigned long __size)
 /*
  * Measure the cache-cost of one task migration. Returns in units of nsec.
  */
-static unsigned long long measure_one(void *cache, unsigned long size,
-                                     int source, int target)
+static unsigned long long
+measure_one(void *cache, unsigned long size, int source, int target)
 {
        cpumask_t mask, saved_mask;
        unsigned long long t0, t1, t2, t3, cost;
@@ -5450,11 +6006,11 @@ measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
         */
        measure_one(cache, size, cpu1, cpu2);
        for (i = 0; i < ITERATIONS; i++)
-               cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
+               cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2);
 
        measure_one(cache, size, cpu2, cpu1);
        for (i = 0; i < ITERATIONS; i++)
-               cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
+               cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1);
 
        /*
         * (We measure the non-migrating [cached] cost on both
@@ -5464,17 +6020,17 @@ measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
 
        measure_one(cache, size, cpu1, cpu1);
        for (i = 0; i < ITERATIONS; i++)
-               cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
+               cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1);
 
        measure_one(cache, size, cpu2, cpu2);
        for (i = 0; i < ITERATIONS; i++)
-               cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
+               cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2);
 
        /*
         * Get the per-iteration migration cost:
         */
-       do_div(cost1, 2*ITERATIONS);
-       do_div(cost2, 2*ITERATIONS);
+       do_div(cost1, 2 * ITERATIONS);
+       do_div(cost2, 2 * ITERATIONS);
 
        return cost1 - cost2;
 }
@@ -5512,8 +6068,8 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
         */
        cache = vmalloc(max_size);
        if (!cache) {
-               printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
-               return 1000000; // return 1 msec on very small boxen
+               printk("could not vmalloc %d bytes for cache!\n", 2 * max_size);
+               return 1000000; /* return 1 msec on very small boxen */
        }
 
        while (size <= max_size) {
@@ -5537,7 +6093,8 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
                avg_fluct = (avg_fluct + fluct)/2;
 
                if (migration_debug)
-                       printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
+                       printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): "
+                               "(%8Ld %8Ld)\n",
                                cpu1, cpu2, size,
                                (long)cost / 1000000,
                                ((long)cost / 100000) % 10,
@@ -5632,7 +6189,7 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
                        -1
 #endif
                );
-       if (system_state == SYSTEM_BOOTING) {
+       if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) {
                printk("migration_cost=");
                for (distance = 0; distance <= max_distance; distance++) {
                        if (distance)
@@ -5643,7 +6200,7 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
        }
        j1 = jiffies;
        if (migration_debug)
-               printk("migration: %ld seconds\n", (j1-j0)/HZ);
+               printk("migration: %ld seconds\n", (j1-j0) / HZ);
 
        /*
         * Move back to the original CPU. NUMA-Q gets confused
@@ -5711,9 +6268,9 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
  */
 static cpumask_t sched_domain_node_span(int node)
 {
-       int i;
-       cpumask_t span, nodemask;
        DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
+       cpumask_t span, nodemask;
+       int i;
 
        cpus_clear(span);
        bitmap_zero(used_nodes, MAX_NUMNODES);
@@ -5724,6 +6281,7 @@ static cpumask_t sched_domain_node_span(int node)
 
        for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
                int next_node = find_next_best_node(node, used_nodes);
+
                nodemask = node_to_cpumask(next_node);
                cpus_or(span, span, nodemask);
        }
@@ -5732,28 +6290,75 @@ static cpumask_t sched_domain_node_span(int node)
 }
 #endif
 
+int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
+
 /*
- * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
- * can switch it on easily if needed.
+ * SMT sched-domains:
  */
 #ifdef CONFIG_SCHED_SMT
 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
-static struct sched_group sched_group_cpus[NR_CPUS];
-static int cpu_to_cpu_group(int cpu)
+static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
+
+static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
+                           struct sched_group **sg)
 {
+       if (sg)
+               *sg = &per_cpu(sched_group_cpus, cpu);
        return cpu;
 }
 #endif
 
-static DEFINE_PER_CPU(struct sched_domain, phys_domains);
-static struct sched_group sched_group_phys[NR_CPUS];
-static int cpu_to_phys_group(int cpu)
+/*
+ * multi-core sched-domains:
+ */
+#ifdef CONFIG_SCHED_MC
+static DEFINE_PER_CPU(struct sched_domain, core_domains);
+static DEFINE_PER_CPU(struct sched_group, sched_group_core);
+#endif
+
+#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
+static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
+                            struct sched_group **sg)
 {
-#ifdef CONFIG_SCHED_SMT
-       return first_cpu(cpu_sibling_map[cpu]);
-#else
+       int group;
+       cpumask_t mask = cpu_sibling_map[cpu];
+       cpus_and(mask, mask, *cpu_map);
+       group = first_cpu(mask);
+       if (sg)
+               *sg = &per_cpu(sched_group_core, group);
+       return group;
+}
+#elif defined(CONFIG_SCHED_MC)
+static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
+                            struct sched_group **sg)
+{
+       if (sg)
+               *sg = &per_cpu(sched_group_core, cpu);
        return cpu;
+}
+#endif
+
+static DEFINE_PER_CPU(struct sched_domain, phys_domains);
+static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
+
+static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
+                            struct sched_group **sg)
+{
+       int group;
+#ifdef CONFIG_SCHED_MC
+       cpumask_t mask = cpu_coregroup_map(cpu);
+       cpus_and(mask, mask, *cpu_map);
+       group = first_cpu(mask);
+#elif defined(CONFIG_SCHED_SMT)
+       cpumask_t mask = cpu_sibling_map[cpu];
+       cpus_and(mask, mask, *cpu_map);
+       group = first_cpu(mask);
+#else
+       group = cpu;
 #endif
+       if (sg)
+               *sg = &per_cpu(sched_group_phys, group);
+       return group;
 }
 
 #ifdef CONFIG_NUMA
@@ -5766,33 +6371,163 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains);
 static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
 
 static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
-static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS];
+static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
+
+static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
+                                struct sched_group **sg)
+{
+       cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
+       int group;
+
+       cpus_and(nodemask, nodemask, *cpu_map);
+       group = first_cpu(nodemask);
+
+       if (sg)
+               *sg = &per_cpu(sched_group_allnodes, group);
+       return group;
+}
+
+static void init_numa_sched_groups_power(struct sched_group *group_head)
+{
+       struct sched_group *sg = group_head;
+       int j;
+
+       if (!sg)
+               return;
+next_sg:
+       for_each_cpu_mask(j, sg->cpumask) {
+               struct sched_domain *sd;
+
+               sd = &per_cpu(phys_domains, j);
+               if (j != first_cpu(sd->groups->cpumask)) {
+                       /*
+                        * Only add "power" once for each
+                        * physical package.
+                        */
+                       continue;
+               }
+
+               sg->cpu_power += sd->groups->cpu_power;
+       }
+       sg = sg->next;
+       if (sg != group_head)
+               goto next_sg;
+}
+#endif
+
+#ifdef CONFIG_NUMA
+/* Free memory allocated for various sched_group structures */
+static void free_sched_groups(const cpumask_t *cpu_map)
+{
+       int cpu, i;
+
+       for_each_cpu_mask(cpu, *cpu_map) {
+               struct sched_group **sched_group_nodes
+                       = sched_group_nodes_bycpu[cpu];
+
+               if (!sched_group_nodes)
+                       continue;
+
+               for (i = 0; i < MAX_NUMNODES; i++) {
+                       cpumask_t nodemask = node_to_cpumask(i);
+                       struct sched_group *oldsg, *sg = sched_group_nodes[i];
+
+                       cpus_and(nodemask, nodemask, *cpu_map);
+                       if (cpus_empty(nodemask))
+                               continue;
 
-static int cpu_to_allnodes_group(int cpu)
+                       if (sg == NULL)
+                               continue;
+                       sg = sg->next;
+next_sg:
+                       oldsg = sg;
+                       sg = sg->next;
+                       kfree(oldsg);
+                       if (oldsg != sched_group_nodes[i])
+                               goto next_sg;
+               }
+               kfree(sched_group_nodes);
+               sched_group_nodes_bycpu[cpu] = NULL;
+       }
+}
+#else
+static void free_sched_groups(const cpumask_t *cpu_map)
 {
-       return cpu_to_node(cpu);
 }
 #endif
 
+/*
+ * Initialize sched groups cpu_power.
+ *
+ * cpu_power indicates the capacity of sched group, which is used while
+ * distributing the load between different sched groups in a sched domain.
+ * Typically cpu_power for all the groups in a sched domain will be same unless
+ * there are asymmetries in the topology. If there are asymmetries, group
+ * having more cpu_power will pickup more load compared to the group having
+ * less cpu_power.
+ *
+ * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
+ * the maximum number of tasks a group can handle in the presence of other idle
+ * or lightly loaded groups in the same sched domain.
+ */
+static void init_sched_groups_power(int cpu, struct sched_domain *sd)
+{
+       struct sched_domain *child;
+       struct sched_group *group;
+
+       WARN_ON(!sd || !sd->groups);
+
+       if (cpu != first_cpu(sd->groups->cpumask))
+               return;
+
+       child = sd->child;
+
+       /*
+        * For perf policy, if the groups in child domain share resources
+        * (for example cores sharing some portions of the cache hierarchy
+        * or SMT), then set this domain groups cpu_power such that each group
+        * can handle only one task, when there are other idle groups in the
+        * same sched domain.
+        */
+       if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
+                      (child->flags &
+                       (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
+               sd->groups->cpu_power = SCHED_LOAD_SCALE;
+               return;
+       }
+
+       sd->groups->cpu_power = 0;
+
+       /*
+        * add cpu_power of each child group to this groups cpu_power
+        */
+       group = child->groups;
+       do {
+               sd->groups->cpu_power += group->cpu_power;
+               group = group->next;
+       } while (group != child->groups);
+}
+
 /*
  * Build sched domains for a given set of cpus and attach the sched domains
  * to the individual cpus
  */
-void build_sched_domains(const cpumask_t *cpu_map)
+static int build_sched_domains(const cpumask_t *cpu_map)
 {
        int i;
+       struct sched_domain *sd;
 #ifdef CONFIG_NUMA
        struct sched_group **sched_group_nodes = NULL;
-       struct sched_group *sched_group_allnodes = NULL;
+       int sd_allnodes = 0;
 
        /*
         * Allocate the per-node list of sched groups
         */
-       sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
-                                          GFP_ATOMIC);
+       sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
+                                          GFP_KERNEL);
        if (!sched_group_nodes) {
                printk(KERN_WARNING "Can not alloc sched group node list\n");
-               return;
+               return -ENOMEM;
        }
        sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
 #endif
@@ -5801,7 +6536,6 @@ void build_sched_domains(const cpumask_t *cpu_map)
         * Set up domains for cpus specified by the cpu_map.
         */
        for_each_cpu_mask(i, *cpu_map) {
-               int group;
                struct sched_domain *sd = NULL, *p;
                cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
 
@@ -5810,25 +6544,12 @@ void build_sched_domains(const cpumask_t *cpu_map)
 #ifdef CONFIG_NUMA
                if (cpus_weight(*cpu_map)
                                > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
-                       if (!sched_group_allnodes) {
-                               sched_group_allnodes
-                                       = kmalloc(sizeof(struct sched_group)
-                                                       * MAX_NUMNODES,
-                                                 GFP_KERNEL);
-                               if (!sched_group_allnodes) {
-                                       printk(KERN_WARNING
-                                       "Can not alloc allnodes sched group\n");
-                                       break;
-                               }
-                               sched_group_allnodes_bycpu[i]
-                                               = sched_group_allnodes;
-                       }
                        sd = &per_cpu(allnodes_domains, i);
                        *sd = SD_ALLNODES_INIT;
                        sd->span = *cpu_map;
-                       group = cpu_to_allnodes_group(i);
-                       sd->groups = &sched_group_allnodes[group];
+                       cpu_to_allnodes_group(i, cpu_map, &sd->groups);
                        p = sd;
+                       sd_allnodes = 1;
                } else
                        p = NULL;
 
@@ -5836,26 +6557,40 @@ void build_sched_domains(const cpumask_t *cpu_map)
                *sd = SD_NODE_INIT;
                sd->span = sched_domain_node_span(cpu_to_node(i));
                sd->parent = p;
+               if (p)
+                       p->child = sd;
                cpus_and(sd->span, sd->span, *cpu_map);
 #endif
 
                p = sd;
                sd = &per_cpu(phys_domains, i);
-               group = cpu_to_phys_group(i);
                *sd = SD_CPU_INIT;
                sd->span = nodemask;
                sd->parent = p;
-               sd->groups = &sched_group_phys[group];
+               if (p)
+                       p->child = sd;
+               cpu_to_phys_group(i, cpu_map, &sd->groups);
+
+#ifdef CONFIG_SCHED_MC
+               p = sd;
+               sd = &per_cpu(core_domains, i);
+               *sd = SD_MC_INIT;
+               sd->span = cpu_coregroup_map(i);
+               cpus_and(sd->span, sd->span, *cpu_map);
+               sd->parent = p;
+               p->child = sd;
+               cpu_to_core_group(i, cpu_map, &sd->groups);
+#endif
 
 #ifdef CONFIG_SCHED_SMT
                p = sd;
                sd = &per_cpu(cpu_domains, i);
-               group = cpu_to_cpu_group(i);
                *sd = SD_SIBLING_INIT;
                sd->span = cpu_sibling_map[i];
                cpus_and(sd->span, sd->span, *cpu_map);
                sd->parent = p;
-               sd->groups = &sched_group_cpus[group];
+               p->child = sd;
+               cpu_to_cpu_group(i, cpu_map, &sd->groups);
 #endif
        }
 
@@ -5867,11 +6602,22 @@ void build_sched_domains(const cpumask_t *cpu_map)
                if (i != first_cpu(this_sibling_map))
                        continue;
 
-               init_sched_build_groups(sched_group_cpus, this_sibling_map,
-                                               &cpu_to_cpu_group);
+               init_sched_build_groups(this_sibling_map, cpu_map, &cpu_to_cpu_group);
+       }
+#endif
+
+#ifdef CONFIG_SCHED_MC
+       /* Set up multi-core groups */
+       for_each_cpu_mask(i, *cpu_map) {
+               cpumask_t this_core_map = cpu_coregroup_map(i);
+               cpus_and(this_core_map, this_core_map, *cpu_map);
+               if (i != first_cpu(this_core_map))
+                       continue;
+               init_sched_build_groups(this_core_map, cpu_map, &cpu_to_core_group);
        }
 #endif
 
+
        /* Set up physical groups */
        for (i = 0; i < MAX_NUMNODES; i++) {
                cpumask_t nodemask = node_to_cpumask(i);
@@ -5880,15 +6626,13 @@ void build_sched_domains(const cpumask_t *cpu_map)
                if (cpus_empty(nodemask))
                        continue;
 
-               init_sched_build_groups(sched_group_phys, nodemask,
-                                               &cpu_to_phys_group);
+               init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
        }
 
 #ifdef CONFIG_NUMA
        /* Set up node groups */
-       if (sched_group_allnodes)
-               init_sched_build_groups(sched_group_allnodes, *cpu_map,
-                                       &cpu_to_allnodes_group);
+       if (sd_allnodes)
+               init_sched_build_groups(*cpu_map, cpu_map, &cpu_to_allnodes_group);
 
        for (i = 0; i < MAX_NUMNODES; i++) {
                /* Set up node groups */
@@ -5907,24 +6651,21 @@ void build_sched_domains(const cpumask_t *cpu_map)
                domainspan = sched_domain_node_span(i);
                cpus_and(domainspan, domainspan, *cpu_map);
 
-               sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+               sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
+               if (!sg) {
+                       printk(KERN_WARNING "Can not alloc domain group for "
+                               "node %d\n", i);
+                       goto error;
+               }
                sched_group_nodes[i] = sg;
                for_each_cpu_mask(j, nodemask) {
                        struct sched_domain *sd;
                        sd = &per_cpu(node_domains, j);
                        sd->groups = sg;
-                       if (sd->groups == NULL) {
-                               /* Turn off balancing if we have no groups */
-                               sd->flags = 0;
-                       }
-               }
-               if (!sg) {
-                       printk(KERN_WARNING
-                       "Can not alloc domain group for node %d\n", i);
-                       continue;
                }
                sg->cpu_power = 0;
                sg->cpumask = nodemask;
+               sg->next = sg;
                cpus_or(covered, covered, nodemask);
                prev = sg;
 
@@ -5943,75 +6684,51 @@ void build_sched_domains(const cpumask_t *cpu_map)
                        if (cpus_empty(tmp))
                                continue;
 
-                       sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+                       sg = kmalloc_node(sizeof(struct sched_group),
+                                         GFP_KERNEL, i);
                        if (!sg) {
                                printk(KERN_WARNING
                                "Can not alloc domain group for node %d\n", j);
-                               break;
+                               goto error;
                        }
                        sg->cpu_power = 0;
                        sg->cpumask = tmp;
+                       sg->next = prev->next;
                        cpus_or(covered, covered, tmp);
                        prev->next = sg;
                        prev = sg;
                }
-               prev->next = sched_group_nodes[i];
        }
 #endif
 
        /* Calculate CPU power for physical packages and nodes */
-       for_each_cpu_mask(i, *cpu_map) {
-               int power;
-               struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
+       for_each_cpu_mask(i, *cpu_map) {
                sd = &per_cpu(cpu_domains, i);
-               power = SCHED_LOAD_SCALE;
-               sd->groups->cpu_power = power;
+               init_sched_groups_power(i, sd);
+       }
+#endif
+#ifdef CONFIG_SCHED_MC
+       for_each_cpu_mask(i, *cpu_map) {
+               sd = &per_cpu(core_domains, i);
+               init_sched_groups_power(i, sd);
+       }
 #endif
 
+       for_each_cpu_mask(i, *cpu_map) {
                sd = &per_cpu(phys_domains, i);
-               power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
-                               (cpus_weight(sd->groups->cpumask)-1) / 10;
-               sd->groups->cpu_power = power;
-
-#ifdef CONFIG_NUMA
-               sd = &per_cpu(allnodes_domains, i);
-               if (sd->groups) {
-                       power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
-                               (cpus_weight(sd->groups->cpumask)-1) / 10;
-                       sd->groups->cpu_power = power;
-               }
-#endif
+               init_sched_groups_power(i, sd);
        }
 
 #ifdef CONFIG_NUMA
-       for (i = 0; i < MAX_NUMNODES; i++) {
-               struct sched_group *sg = sched_group_nodes[i];
-               int j;
+       for (i = 0; i < MAX_NUMNODES; i++)
+               init_numa_sched_groups_power(sched_group_nodes[i]);
 
-               if (sg == NULL)
-                       continue;
-next_sg:
-               for_each_cpu_mask(j, sg->cpumask) {
-                       struct sched_domain *sd;
-                       int power;
-
-                       sd = &per_cpu(phys_domains, j);
-                       if (j != first_cpu(sd->groups->cpumask)) {
-                               /*
-                                * Only add "power" once for each
-                                * physical package.
-                                */
-                               continue;
-                       }
-                       power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
-                               (cpus_weight(sd->groups->cpumask)-1) / 10;
+       if (sd_allnodes) {
+               struct sched_group *sg;
 
-                       sg->cpu_power += power;
-               }
-               sg = sg->next;
-               if (sg != sched_group_nodes[i])
-                       goto next_sg;
+               cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
+               init_numa_sched_groups_power(sg);
        }
 #endif
 
@@ -6020,6 +6737,8 @@ next_sg:
                struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
                sd = &per_cpu(cpu_domains, i);
+#elif defined(CONFIG_SCHED_MC)
+               sd = &per_cpu(core_domains, i);
 #else
                sd = &per_cpu(phys_domains, i);
 #endif
@@ -6029,13 +6748,22 @@ next_sg:
         * Tune cache-hot values:
         */
        calibrate_migration_costs(cpu_map);
+
+       return 0;
+
+#ifdef CONFIG_NUMA
+error:
+       free_sched_groups(cpu_map);
+       return -ENOMEM;
+#endif
 }
 /*
  * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
  */
-static void arch_init_sched_domains(const cpumask_t *cpu_map)
+static int arch_init_sched_domains(const cpumask_t *cpu_map)
 {
        cpumask_t cpu_default_map;
+       int err;
 
        /*
         * Setup mask for cpus without special case scheduling requirements.
@@ -6044,51 +6772,14 @@ static void arch_init_sched_domains(const cpumask_t *cpu_map)
         */
        cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
 
-       build_sched_domains(&cpu_default_map);
+       err = build_sched_domains(&cpu_default_map);
+
+       return err;
 }
 
 static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
 {
-#ifdef CONFIG_NUMA
-       int i;
-       int cpu;
-
-       for_each_cpu_mask(cpu, *cpu_map) {
-               struct sched_group *sched_group_allnodes
-                       = sched_group_allnodes_bycpu[cpu];
-               struct sched_group **sched_group_nodes
-                       = sched_group_nodes_bycpu[cpu];
-
-               if (sched_group_allnodes) {
-                       kfree(sched_group_allnodes);
-                       sched_group_allnodes_bycpu[cpu] = NULL;
-               }
-
-               if (!sched_group_nodes)
-                       continue;
-
-               for (i = 0; i < MAX_NUMNODES; i++) {
-                       cpumask_t nodemask = node_to_cpumask(i);
-                       struct sched_group *oldsg, *sg = sched_group_nodes[i];
-
-                       cpus_and(nodemask, nodemask, *cpu_map);
-                       if (cpus_empty(nodemask))
-                               continue;
-
-                       if (sg == NULL)
-                               continue;
-                       sg = sg->next;
-next_sg:
-                       oldsg = sg;
-                       sg = sg->next;
-                       kfree(oldsg);
-                       if (oldsg != sched_group_nodes[i])
-                               goto next_sg;
-               }
-               kfree(sched_group_nodes);
-               sched_group_nodes_bycpu[cpu] = NULL;
-       }
-#endif
+       free_sched_groups(cpu_map);
 }
 
 /*
@@ -6113,9 +6804,10 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
  * correct sched domains
  * Call with hotplug lock held
  */
-void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
+int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
 {
        cpumask_t change_map;
+       int err = 0;
 
        cpus_and(*partition1, *partition1, cpu_online_map);
        cpus_and(*partition2, *partition2, cpu_online_map);
@@ -6124,12 +6816,89 @@ void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
        /* Detach sched domains from all of the affected cpus */
        detach_destroy_domains(&change_map);
        if (!cpus_empty(*partition1))
-               build_sched_domains(partition1);
-       if (!cpus_empty(*partition2))
-               build_sched_domains(partition2);
+               err = build_sched_domains(partition1);
+       if (!err && !cpus_empty(*partition2))
+               err = build_sched_domains(partition2);
+
+       return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+int arch_reinit_sched_domains(void)
+{
+       int err;
+
+       lock_cpu_hotplug();
+       detach_destroy_domains(&cpu_online_map);
+       err = arch_init_sched_domains(&cpu_online_map);
+       unlock_cpu_hotplug();
+
+       return err;
+}
+
+static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
+{
+       int ret;
+
+       if (buf[0] != '0' && buf[0] != '1')
+               return -EINVAL;
+
+       if (smt)
+               sched_smt_power_savings = (buf[0] == '1');
+       else
+               sched_mc_power_savings = (buf[0] == '1');
+
+       ret = arch_reinit_sched_domains();
+
+       return ret ? ret : count;
+}
+
+int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+{
+       int err = 0;
+
+#ifdef CONFIG_SCHED_SMT
+       if (smt_capable())
+               err = sysfs_create_file(&cls->kset.kobj,
+                                       &attr_sched_smt_power_savings.attr);
+#endif
+#ifdef CONFIG_SCHED_MC
+       if (!err && mc_capable())
+               err = sysfs_create_file(&cls->kset.kobj,
+                                       &attr_sched_mc_power_savings.attr);
+#endif
+       return err;
+}
+#endif
+
+#ifdef CONFIG_SCHED_MC
+static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
+{
+       return sprintf(page, "%u\n", sched_mc_power_savings);
+}
+static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
+                                           const char *buf, size_t count)
+{
+       return sched_power_savings_store(buf, count, 0);
+}
+SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
+           sched_mc_power_savings_store);
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
+{
+       return sprintf(page, "%u\n", sched_smt_power_savings);
+}
+static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
+                                            const char *buf, size_t count)
+{
+       return sched_power_savings_store(buf, count, 1);
+}
+SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
+           sched_smt_power_savings_store);
+#endif
+
 /*
  * Force a reinitialization of the sched domains hierarchy.  The domains
  * and groups cannot be updated in place without racing with the balancing
@@ -6162,15 +6931,23 @@ static int update_sched_domains(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
-#endif
 
 void __init sched_init_smp(void)
 {
+       cpumask_t non_isolated_cpus;
+
        lock_cpu_hotplug();
        arch_init_sched_domains(&cpu_online_map);
+       cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
+       if (cpus_empty(non_isolated_cpus))
+               cpu_set(smp_processor_id(), non_isolated_cpus);
        unlock_cpu_hotplug();
        /* XXX: Theoretical race here - CPU may be hotplugged now */
        hotcpu_notifier(update_sched_domains, 0);
+
+       /* Move init over to a non-isolated CPU */
+       if (set_cpus_allowed(current, non_isolated_cpus) < 0)
+               BUG();
 }
 #else
 void __init sched_init_smp(void)
@@ -6182,6 +6959,7 @@ int in_sched_functions(unsigned long addr)
 {
        /* Linker adds these: start and end of __sched functions */
        extern char __sched_text_start[], __sched_text_end[];
+
        return in_lock_functions(addr) ||
                (addr >= (unsigned long)__sched_text_start
                && addr < (unsigned long)__sched_text_end);
@@ -6189,14 +6967,15 @@ int in_sched_functions(unsigned long addr)
 
 void __init sched_init(void)
 {
-       runqueue_t *rq;
        int i, j, k;
 
-       for_each_cpu(i) {
-               prio_array_t *array;
+       for_each_possible_cpu(i) {
+               struct prio_array *array;
+               struct rq *rq;
 
                rq = cpu_rq(i);
                spin_lock_init(&rq->lock);
+               lockdep_set_class(&rq->lock, &rq->rq_lock_key);
                rq->nr_running = 0;
                rq->active = rq->arrays;
                rq->expired = rq->arrays + 1;
@@ -6211,13 +6990,12 @@ void __init sched_init(void)
                rq->cpu = i;
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
-               rq->cpu = i;
 #endif
                atomic_set(&rq->nr_iowait, 0);
 #ifdef CONFIG_VSERVER_HARDCPU
                INIT_LIST_HEAD(&rq->hold_queue);
+               rq->nr_onhold = 0;
 #endif
-
                for (j = 0; j < 2; j++) {
                        array = rq->arrays + j;
                        for (k = 0; k < MAX_PRIO; k++) {
@@ -6229,6 +7007,16 @@ void __init sched_init(void)
                }
        }
 
+       set_load_weight(&init_task);
+
+#ifdef CONFIG_SMP
+       open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
+#endif
+
+#ifdef CONFIG_RT_MUTEXES
+       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+#endif
+
        /*
         * The boot idle thread does lazy MMU switching as well:
         */
@@ -6247,7 +7035,7 @@ void __init sched_init(void)
 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
 void __might_sleep(char *file, int line)
 {
-#if defined(in_atomic)
+#ifdef in_atomic
        static unsigned long prev_jiffy;        /* ratelimiting */
 
        if ((in_atomic() || irqs_disabled()) &&
@@ -6255,10 +7043,13 @@ void __might_sleep(char *file, int line)
                if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                        return;
                prev_jiffy = jiffies;
-               printk(KERN_ERR "Debug: sleeping function called from invalid"
+               printk(KERN_ERR "BUG: sleeping function called from invalid"
                                " context at %s:%d\n", file, line);
                printk("in_atomic():%d, irqs_disabled():%d\n",
                        in_atomic(), irqs_disabled());
+               debug_show_held_locks(current);
+               if (irqs_disabled())
+                       print_irqtrace_events(current);
                dump_stack();
        }
 #endif
@@ -6269,17 +7060,18 @@ EXPORT_SYMBOL(__might_sleep);
 #ifdef CONFIG_MAGIC_SYSRQ
 void normalize_rt_tasks(void)
 {
+       struct prio_array *array;
        struct task_struct *p;
-       prio_array_t *array;
        unsigned long flags;
-       runqueue_t *rq;
+       struct rq *rq;
 
        read_lock_irq(&tasklist_lock);
-       for_each_process (p) {
+       for_each_process(p) {
                if (!rt_task(p))
                        continue;
 
-               rq = task_rq_lock(p, &flags);
+               spin_lock_irqsave(&p->pi_lock, flags);
+               rq = __task_rq_lock(p);
 
                array = p->array;
                if (array)
@@ -6291,7 +7083,8 @@ void normalize_rt_tasks(void)
                        resched_task(rq->curr);
                }
 
-               task_rq_unlock(rq, &flags);
+               __task_rq_unlock(rq);
+               spin_unlock_irqrestore(&p->pi_lock, flags);
        }
        read_unlock_irq(&tasklist_lock);
 }
@@ -6315,7 +7108,7 @@ void normalize_rt_tasks(void)
  *
  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  */
-task_t *curr_task(int cpu)
+struct task_struct *curr_task(int cpu)
 {
        return cpu_curr(cpu);
 }
@@ -6335,7 +7128,7 @@ task_t *curr_task(int cpu)
  *
  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  */
-void set_curr_task(int cpu, task_t *p)
+void set_curr_task(int cpu, struct task_struct *p)
 {
        cpu_curr(cpu) = p;
 }