* tasks if there is an imbalance.
*
* Called with this_rq unlocked.
- *
- * This function is marked noinline to work around a compiler
- * bug with gcc 3.3.3-hammer on x86-64.
*/
-static int noinline load_balance(int this_cpu, runqueue_t *this_rq,
+static int load_balance(int this_cpu, runqueue_t *this_rq,
struct sched_domain *sd, enum idle_type idle)
{
struct sched_group *group;
busiest = find_busiest_queue(group);
if (!busiest)
goto out_balanced;
+ /*
+ * This should be "impossible", but since load
+ * balancing is inherently racy and statistical,
+ * it could happen in theory.
+ */
if (unlikely(busiest == this_rq)) {
WARN_ON(1);
goto out_balanced;
}
rq = cpu_rq(push_cpu);
+
+ /*
+ * This condition is "impossible", but since load
+ * balancing is inherently a bit racy and statistical,
+ * it can trigger.. Reported by Bjorn Helgaas on a
+ * 128-cpu setup.
+ */
+ if (unlikely(busiest == rq))
+ goto next_group;
double_lock_balance(busiest, rq);
move_tasks(rq, push_cpu, busiest, 1, sd, IDLE);
spin_unlock(&rq->lock);