X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fsched.c;h=f1780c9057b9552226d54e7ce17c8f3eaf12274f;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=305f948526018692107f9a40658263ba9ed1f8a5;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/kernel/sched.c b/kernel/sched.c index 305f94852..f1780c905 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -40,8 +41,12 @@ #include #include #include +#include +#include #include #include +#include +#include #include #include @@ -69,8 +74,6 @@ #define USER_PRIO(p) ((p)-MAX_RT_PRIO) #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) -#define AVG_TIMESLICE (MIN_TIMESLICE + ((MAX_TIMESLICE - MIN_TIMESLICE) *\ - (MAX_PRIO-1-NICE_TO_PRIO(0))/(MAX_USER_PRIO - 1))) /* * Some helpers for converting nanosecond timing to jiffy resolution @@ -81,12 +84,12 @@ /* * These are the 'tuning knobs' of the scheduler: * - * Minimum timeslice is 10 msecs, default timeslice is 100 msecs, - * maximum timeslice is 200 msecs. Timeslices get refilled after - * they expire. + * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger), + * default timeslice is 100 msecs, maximum timeslice is 800 msecs. + * Timeslices get refilled after they expire. */ -#define MIN_TIMESLICE ( 10 * HZ / 1000) -#define MAX_TIMESLICE (200 * HZ / 1000) +#define MIN_TIMESLICE max(5 * HZ / 1000, 1) +#define DEF_TIMESLICE (100 * HZ / 1000) #define ON_RUNQUEUE_WEIGHT 30 #define CHILD_PENALTY 95 #define PARENT_PENALTY 100 @@ -94,7 +97,7 @@ #define PRIO_BONUS_RATIO 25 #define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100) #define INTERACTIVE_DELTA 2 -#define MAX_SLEEP_AVG (AVG_TIMESLICE * MAX_BONUS) +#define MAX_SLEEP_AVG (DEF_TIMESLICE * MAX_BONUS) #define STARVATION_LIMIT (MAX_SLEEP_AVG) #define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG)) #define CREDIT_LIMIT 100 @@ -163,26 +166,36 @@ ((p)->prio < (rq)->curr->prio) /* - * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ] - * to time slice values. + * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] + * to time slice values: [800ms ... 100ms ... 5ms] * * The higher a thread's priority, the bigger timeslices * it gets during one round of execution. But even the lowest * priority thread gets MIN_TIMESLICE worth of execution time. - * - * task_timeslice() is the interface that is used by the scheduler. */ -#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \ - ((MAX_TIMESLICE - MIN_TIMESLICE) * \ - (MAX_PRIO-1 - (p)->static_prio) / (MAX_USER_PRIO-1))) +#define SCALE_PRIO(x, prio) \ + max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE) static unsigned int task_timeslice(task_t *p) { - return BASE_TIMESLICE(p); + if (p->static_prio < NICE_TO_PRIO(0)) + return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio); + else + return SCALE_PRIO(DEF_TIMESLICE, p->static_prio); } +#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ + < (long long) (sd)->cache_hot_time) + +enum idle_type +{ + IDLE, + NOT_IDLE, + NEWLY_IDLE, + MAX_IDLE_TYPES +}; -#define task_hot(p, now, sd) ((now) - (p)->timestamp < (sd)->cache_hot_time) +struct sched_domain; /* * These are the runqueue data structures: @@ -235,12 +248,190 @@ struct runqueue { task_t *migration_thread; struct list_head migration_queue; #endif +#ifdef CONFIG_VSERVER_HARDCPU struct list_head hold_queue; int idle_tokens; +#endif + +#ifdef CONFIG_SCHEDSTATS + /* latency stats */ + struct sched_info rq_sched_info; + + /* sys_sched_yield() stats */ + unsigned long yld_exp_empty; + unsigned long yld_act_empty; + unsigned long yld_both_empty; + unsigned long yld_cnt; + + /* schedule() stats */ + unsigned long sched_noswitch; + unsigned long sched_switch; + unsigned long sched_cnt; + unsigned long sched_goidle; + + /* pull_task() stats */ + unsigned long pt_gained[MAX_IDLE_TYPES]; + unsigned long pt_lost[MAX_IDLE_TYPES]; + + /* active_load_balance() stats */ + unsigned long alb_cnt; + unsigned long alb_lost; + unsigned long alb_gained; + unsigned long alb_failed; + + /* try_to_wake_up() stats */ + unsigned long ttwu_cnt; + unsigned long ttwu_attempts; + unsigned long ttwu_moved; + + /* wake_up_new_task() stats */ + unsigned long wunt_cnt; + unsigned long wunt_moved; + + /* sched_migrate_task() stats */ + unsigned long smt_cnt; + + /* sched_balance_exec() stats */ + unsigned long sbe_cnt; +#endif }; static DEFINE_PER_CPU(struct runqueue, runqueues); +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP +#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ + +#define SD_BALANCE_NEWIDLE 1 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 2 /* Balance on exec */ +#define SD_WAKE_IDLE 4 /* Wake to idle CPU on task wakeup */ +#define SD_WAKE_AFFINE 8 /* Wake task to waking CPU */ +#define SD_WAKE_BALANCE 16 /* Perform balancing at task wakeup */ +#define SD_SHARE_CPUPOWER 32 /* Domain members share cpu power */ + +struct sched_group { + struct sched_group *next; /* Must be a circular list */ + cpumask_t cpumask; + + /* + * CPU power of this group, SCHED_LOAD_SCALE being max power for a + * single CPU. This should be read only (except for setup). Although + * it will need to be written to at cpu hot(un)plug time, perhaps the + * cpucontrol semaphore will provide enough exclusion? + */ + unsigned long cpu_power; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + cpumask_t span; /* span of all CPUs in this domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ + int flags; /* See SD_* */ + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned long lb_cnt[MAX_IDLE_TYPES]; + unsigned long lb_failed[MAX_IDLE_TYPES]; + unsigned long lb_imbalance[MAX_IDLE_TYPES]; + unsigned long lb_nobusyg[MAX_IDLE_TYPES]; + unsigned long lb_nobusyq[MAX_IDLE_TYPES]; + + /* sched_balance_exec() stats */ + unsigned long sbe_attempts; + unsigned long sbe_pushed; + + /* try_to_wake_up() stats */ + unsigned long ttwu_wake_affine; + unsigned long ttwu_wake_balance; +#endif +}; + +#ifndef ARCH_HAS_SCHED_TUNE +#ifdef CONFIG_SCHED_SMT +#define ARCH_HAS_SCHED_WAKE_IDLE +/* Common values for SMT siblings */ +#define SD_SIBLING_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 1, \ + .max_interval = 2, \ + .busy_factor = 8, \ + .imbalance_pct = 110, \ + .cache_hot_time = 0, \ + .cache_nice_tries = 0, \ + .per_cpu_gain = 25, \ + .flags = SD_BALANCE_NEWIDLE \ + | SD_BALANCE_EXEC \ + | SD_WAKE_AFFINE \ + | SD_WAKE_IDLE \ + | SD_SHARE_CPUPOWER, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} +#endif + +/* Common values for CPUs */ +#define SD_CPU_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_hot_time = cache_decay_ticks*1000000 ? : (5*1000000/2),\ + .cache_nice_tries = 1, \ + .per_cpu_gain = 100, \ + .flags = SD_BALANCE_NEWIDLE \ + | SD_BALANCE_EXEC \ + | SD_WAKE_AFFINE \ + | SD_WAKE_BALANCE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} + +/* Arch can override this macro in processor.h */ +#if defined(CONFIG_NUMA) && !defined(SD_NODE_INIT) +#define SD_NODE_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 8, \ + .max_interval = 32, \ + .busy_factor = 32, \ + .imbalance_pct = 125, \ + .cache_hot_time = (10*1000000), \ + .cache_nice_tries = 1, \ + .per_cpu_gain = 100, \ + .flags = SD_BALANCE_EXEC \ + | SD_WAKE_BALANCE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} +#endif +#endif /* ARCH_HAS_SCHED_TUNE */ +#endif + + #define for_each_domain(cpu, domain) \ for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent) @@ -283,6 +474,104 @@ static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) spin_unlock_irqrestore(&rq->lock, *flags); } +#ifdef CONFIG_SCHEDSTATS +/* + * bump this up when changing the output format or the meaning of an existing + * format, so that tools can adapt (or abort) + */ +#define SCHEDSTAT_VERSION 10 + +static int show_schedstat(struct seq_file *seq, void *v) +{ + int cpu; + enum idle_type itype; + + seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); + seq_printf(seq, "timestamp %lu\n", jiffies); + for_each_online_cpu(cpu) { + runqueue_t *rq = cpu_rq(cpu); +#ifdef CONFIG_SMP + struct sched_domain *sd; + int dcnt = 0; +#endif + + /* runqueue-specific stats */ + seq_printf(seq, + "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu " + "%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu", + cpu, rq->yld_both_empty, + rq->yld_act_empty, rq->yld_exp_empty, + rq->yld_cnt, rq->sched_noswitch, + rq->sched_switch, rq->sched_cnt, rq->sched_goidle, + rq->alb_cnt, rq->alb_gained, rq->alb_lost, + rq->alb_failed, + rq->ttwu_cnt, rq->ttwu_moved, rq->ttwu_attempts, + rq->wunt_cnt, rq->wunt_moved, + rq->smt_cnt, rq->sbe_cnt, rq->rq_sched_info.cpu_time, + rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt); + + for (itype = IDLE; itype < MAX_IDLE_TYPES; itype++) + seq_printf(seq, " %lu %lu", rq->pt_gained[itype], + rq->pt_lost[itype]); + seq_printf(seq, "\n"); + +#ifdef CONFIG_SMP + /* domain-specific stats */ + for_each_domain(cpu, sd) { + char mask_str[NR_CPUS]; + + cpumask_scnprintf(mask_str, NR_CPUS, sd->span); + seq_printf(seq, "domain%d %s", dcnt++, mask_str); + for (itype = IDLE; itype < MAX_IDLE_TYPES; itype++) { + seq_printf(seq, " %lu %lu %lu %lu %lu", + sd->lb_cnt[itype], + sd->lb_failed[itype], + sd->lb_imbalance[itype], + sd->lb_nobusyq[itype], + sd->lb_nobusyg[itype]); + } + seq_printf(seq, " %lu %lu %lu %lu\n", + sd->sbe_pushed, sd->sbe_attempts, + sd->ttwu_wake_affine, sd->ttwu_wake_balance); + } +#endif + } + return 0; +} + +static int schedstat_open(struct inode *inode, struct file *file) +{ + unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); + char *buf = kmalloc(size, GFP_KERNEL); + struct seq_file *m; + int res; + + if (!buf) + return -ENOMEM; + res = single_open(file, show_schedstat, NULL); + if (!res) { + m = file->private_data; + m->buf = buf; + m->size = size; + } else + kfree(buf); + return res; +} + +struct file_operations proc_schedstat_operations = { + .open = schedstat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +# define schedstat_inc(rq, field) rq->field++; +# define schedstat_add(rq, field, amt) rq->field += amt; +#else /* !CONFIG_SCHEDSTATS */ +# define schedstat_inc(rq, field) do { } while (0); +# define schedstat_add(rq, field, amt) do { } while (0); +#endif + /* * rq_lock - lock a given runqueue and disable interrupts. */ @@ -302,6 +591,112 @@ static inline void rq_unlock(runqueue_t *rq) spin_unlock_irq(&rq->lock); } +#ifdef CONFIG_SCHEDSTATS +/* + * Called when a process is dequeued from the active array and given + * the cpu. We should note that with the exception of interactive + * tasks, the expired queue will become the active queue after the active + * queue is empty, without explicitly dequeuing and requeuing tasks in the + * expired queue. (Interactive tasks may be requeued directly to the + * active queue, thus delaying tasks in the expired queue from running; + * see scheduler_tick()). + * + * This function is only called from sched_info_arrive(), rather than + * dequeue_task(). Even though a task may be queued and dequeued multiple + * times as it is shuffled about, we're really interested in knowing how + * long it was from the *first* time it was queued to the time that it + * finally hit a cpu. + */ +static inline void sched_info_dequeued(task_t *t) +{ + t->sched_info.last_queued = 0; +} + +/* + * Called when a task finally hits the cpu. We can now calculate how + * long it was waiting to run. We also note when it began so that we + * can keep stats on how long its timeslice is. + */ +static inline void sched_info_arrive(task_t *t) +{ + unsigned long now = jiffies, diff = 0; + struct runqueue *rq = task_rq(t); + + if (t->sched_info.last_queued) + diff = now - t->sched_info.last_queued; + sched_info_dequeued(t); + t->sched_info.run_delay += diff; + t->sched_info.last_arrival = now; + t->sched_info.pcnt++; + + if (!rq) + return; + + rq->rq_sched_info.run_delay += diff; + rq->rq_sched_info.pcnt++; +} + +/* + * Called when a process is queued into either the active or expired + * array. The time is noted and later used to determine how long we + * had to wait for us to reach the cpu. Since the expired queue will + * become the active queue after active queue is empty, without dequeuing + * and requeuing any tasks, we are interested in queuing to either. It + * is unusual but not impossible for tasks to be dequeued and immediately + * requeued in the same or another array: this can happen in sched_yield(), + * set_user_nice(), and even load_balance() as it moves tasks from runqueue + * to runqueue. + * + * This function is only called from enqueue_task(), but also only updates + * the timestamp if it is already not set. It's assumed that + * sched_info_dequeued() will clear that stamp when appropriate. + */ +static inline void sched_info_queued(task_t *t) +{ + if (!t->sched_info.last_queued) + t->sched_info.last_queued = jiffies; +} + +/* + * Called when a process ceases being the active-running process, either + * voluntarily or involuntarily. Now we can calculate how long we ran. + */ +static inline void sched_info_depart(task_t *t) +{ + struct runqueue *rq = task_rq(t); + unsigned long diff = jiffies - t->sched_info.last_arrival; + + t->sched_info.cpu_time += diff; + + if (rq) + rq->rq_sched_info.cpu_time += diff; +} + +/* + * Called when tasks are switched involuntarily due, typically, to expiring + * their time slice. (This may also be called when switching to or from + * the idle task.) We are only called when prev != next. + */ +static inline void sched_info_switch(task_t *prev, task_t *next) +{ + struct runqueue *rq = task_rq(prev); + + /* + * prev now departs the cpu. It's not interesting to record + * stats about how efficient we were at scheduling the idle + * process, however. + */ + if (prev != rq->idle) + sched_info_depart(prev); + + if (next != rq->idle) + sched_info_arrive(next); +} +#else +#define sched_info_queued(t) do { } while (0) +#define sched_info_switch(t, next) do { } while (0) +#endif /* CONFIG_SCHEDSTATS */ + /* * Adding/removing a task to/from a priority array: */ @@ -315,6 +710,7 @@ static void dequeue_task(struct task_struct *p, prio_array_t *array) static void enqueue_task(struct task_struct *p, prio_array_t *array) { + sched_info_queued(p); list_add_tail(&p->run_list, array->queue + p->prio); __set_bit(p->prio, array->bitmap); array->nr_active++; @@ -358,7 +754,7 @@ static int effective_prio(task_t *p) bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; prio = p->static_prio - bonus; - if (__vx_task_flags(p, VXF_SCHED_PRIO, 0)) + if (task_vx_flags(p, VXF_SCHED_PRIO, 0)) prio += effective_vavavoom(p, MAX_USER_PRIO); if (prio < MAX_RT_PRIO) @@ -406,7 +802,7 @@ static void recalc_task_prio(task_t *p, unsigned long long now) if (p->mm && p->activated != -1 && sleep_time > INTERACTIVE_SLEEP(p)) { p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG - - AVG_TIMESLICE); + DEF_TIMESLICE); if (!HIGH_CREDIT(p)) p->interactive_credit++; } else { @@ -506,21 +902,29 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) } p->timestamp = now; + vx_activate_task(p); __activate_task(p, rq); } /* * deactivate_task - remove a task from the runqueue. */ -static void deactivate_task(struct task_struct *p, runqueue_t *rq) +static void __deactivate_task(struct task_struct *p, runqueue_t *rq) { rq->nr_running--; if (p->state == TASK_UNINTERRUPTIBLE) rq->nr_uninterruptible++; dequeue_task(p, p->array); + p->array = NULL; } +static void deactivate_task(struct task_struct *p, runqueue_t *rq) +{ + __deactivate_task(p, rq); + vx_deactivate_task(p); +} + /* * resched_task - mark a task 'to be rescheduled now'. * @@ -533,7 +937,8 @@ static void resched_task(task_t *p) { int need_resched, nrpolling; - preempt_disable(); + BUG_ON(!spin_is_locked(&task_rq(p)->lock)); + /* minimise the chance of sending an interrupt to poll_idle() */ nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED); @@ -541,7 +946,6 @@ static void resched_task(task_t *p) if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id())) smp_send_reschedule(task_cpu(p)); - preempt_enable(); } #else static inline void resched_task(task_t *p) @@ -747,6 +1151,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) #endif rq = task_rq_lock(p, &flags); + schedstat_inc(rq, ttwu_cnt); old_state = p->state; if (!(old_state & state)) goto out; @@ -794,23 +1199,35 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) */ imbalance = sd->imbalance_pct + (sd->imbalance_pct - 100) / 2; - if ( ((sd->flags & SD_WAKE_AFFINE) && - !task_hot(p, rq->timestamp_last_tick, sd)) - || ((sd->flags & SD_WAKE_BALANCE) && - imbalance*this_load <= 100*load) ) { + if ((sd->flags & SD_WAKE_AFFINE) && + !task_hot(p, rq->timestamp_last_tick, sd)) { + /* + * This domain has SD_WAKE_AFFINE and p is cache cold + * in this domain. + */ + if (cpu_isset(cpu, sd->span)) { + schedstat_inc(sd, ttwu_wake_affine); + goto out_set_cpu; + } + } else if ((sd->flags & SD_WAKE_BALANCE) && + imbalance*this_load <= 100*load) { /* - * Now sd has SD_WAKE_AFFINE and p is cache cold in sd - * or sd has SD_WAKE_BALANCE and there is an imbalance + * This domain has SD_WAKE_BALANCE and there is + * an imbalance. */ - if (cpu_isset(cpu, sd->span)) + if (cpu_isset(cpu, sd->span)) { + schedstat_inc(sd, ttwu_wake_balance); goto out_set_cpu; + } } } new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ out_set_cpu: + schedstat_inc(rq, ttwu_attempts); new_cpu = wake_idle(new_cpu, p); if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) { + schedstat_inc(rq, ttwu_moved); set_task_cpu(p, new_cpu); task_rq_unlock(rq, &flags); /* might preempt at this point */ @@ -861,7 +1278,7 @@ out: int fastcall wake_up_process(task_t * p) { - return try_to_wake_up(p, TASK_STOPPED | + return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); } @@ -872,6 +1289,11 @@ int fastcall wake_up_state(task_t *p, unsigned int state) return try_to_wake_up(p, state, 0); } +#ifdef CONFIG_SMP +static int find_idlest_cpu(struct task_struct *p, int this_cpu, + struct sched_domain *sd); +#endif + /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. @@ -888,6 +1310,9 @@ void fastcall sched_fork(task_t *p) INIT_LIST_HEAD(&p->run_list); p->array = NULL; spin_lock_init(&p->switch_lock); +#ifdef CONFIG_SCHEDSTATS + memset(&p->sched_info, 0, sizeof(p->sched_info)); +#endif #ifdef CONFIG_PREEMPT /* * During context-switch we hold precisely one spinlock, which @@ -911,7 +1336,7 @@ void fastcall sched_fork(task_t *p) p->first_time_slice = 1; current->time_slice >>= 1; p->timestamp = sched_clock(); - if (!current->time_slice) { + if (unlikely(!current->time_slice)) { /* * This case is rare, it happens when the parent has only * a single jiffy left from its timeslice. Taking the @@ -927,44 +1352,90 @@ void fastcall sched_fork(task_t *p) } /* - * wake_up_forked_process - wake up a freshly forked process. + * wake_up_new_task - wake up a newly created task for the first time. * * This function will do some initial scheduler statistics housekeeping - * that must be done for every newly created process. + * that must be done for every newly created context, then puts the task + * on the runqueue and wakes it. */ -void fastcall wake_up_forked_process(task_t * p) +void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) { unsigned long flags; - runqueue_t *rq = task_rq_lock(current, &flags); + int this_cpu, cpu; + runqueue_t *rq, *this_rq; + + rq = task_rq_lock(p, &flags); + cpu = task_cpu(p); + this_cpu = smp_processor_id(); BUG_ON(p->state != TASK_RUNNING); + schedstat_inc(rq, wunt_cnt); /* * We decrease the sleep average of forking parents * and children as well, to keep max-interactive tasks - * from forking tasks that are max-interactive. + * from forking tasks that are max-interactive. The parent + * (current) is done further down, under its lock. */ - current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) * - PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); - p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) * CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); p->interactive_credit = 0; p->prio = effective_prio(p); - set_task_cpu(p, smp_processor_id()); - if (unlikely(!current->array)) + vx_activate_task(p); + if (likely(cpu == this_cpu)) { + if (!(clone_flags & CLONE_VM)) { + /* + * The VM isn't cloned, so we're in a good position to + * do child-runs-first in anticipation of an exec. This + * usually avoids a lot of COW overhead. + */ + if (unlikely(!current->array)) + __activate_task(p, rq); + else { + p->prio = current->prio; + list_add_tail(&p->run_list, ¤t->run_list); + p->array = current->array; + p->array->nr_active++; + rq->nr_running++; + } + set_need_resched(); + } else + /* Run child last */ + __activate_task(p, rq); + /* + * We skip the following code due to cpu == this_cpu + * + * task_rq_unlock(rq, &flags); + * this_rq = task_rq_lock(current, &flags); + */ + this_rq = rq; + } else { + this_rq = cpu_rq(this_cpu); + + /* + * Not the local CPU - must adjust timestamp. This should + * get optimised away in the !CONFIG_SMP case. + */ + p->timestamp = (p->timestamp - this_rq->timestamp_last_tick) + + rq->timestamp_last_tick; __activate_task(p, rq); - else { - p->prio = current->prio; - list_add_tail(&p->run_list, ¤t->run_list); - p->array = current->array; - p->array->nr_active++; - rq->nr_running++; + if (TASK_PREEMPTS_CURR(p, rq)) + resched_task(rq->curr); + + schedstat_inc(rq, wunt_moved); + /* + * Parent and child are on different CPUs, now get the + * parent runqueue to update the parent's ->sleep_avg: + */ + task_rq_unlock(rq, &flags); + this_rq = task_rq_lock(current, &flags); } - task_rq_unlock(rq, &flags); + current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) * + PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); + task_rq_unlock(this_rq, &flags); } /* @@ -981,18 +1452,16 @@ void fastcall sched_exit(task_t * p) unsigned long flags; runqueue_t *rq; - local_irq_save(flags); - if (p->first_time_slice) { - p->parent->time_slice += p->time_slice; - if (unlikely(p->parent->time_slice > MAX_TIMESLICE)) - p->parent->time_slice = MAX_TIMESLICE; - } - local_irq_restore(flags); /* * If the child was a (relative-) CPU hog then decrease * the sleep_avg of the parent as well. */ rq = task_rq_lock(p->parent, &flags); + if (p->first_time_slice) { + p->parent->time_slice += p->time_slice; + if (unlikely(p->parent->time_slice > task_timeslice(p))) + p->parent->time_slice = task_timeslice(p); + } if (p->sleep_avg < p->parent->sleep_avg) p->parent->sleep_avg = p->parent->sleep_avg / (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg / @@ -1092,7 +1561,7 @@ unsigned long nr_running(void) { unsigned long i, sum = 0; - for_each_cpu(i) + for_each_online_cpu(i) sum += cpu_rq(i)->nr_running; return sum; @@ -1128,6 +1597,8 @@ unsigned long nr_iowait(void) return sum; } +#ifdef CONFIG_SMP + /* * double_rq_lock - safely lock two runqueues * @@ -1162,14 +1633,20 @@ static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) spin_unlock(&rq2->lock); } -enum idle_type +/* + * double_lock_balance - lock the busiest runqueue, this_rq is locked already. + */ +static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) { - IDLE, - NOT_IDLE, - NEWLY_IDLE, -}; - -#ifdef CONFIG_SMP + if (unlikely(!spin_trylock(&busiest->lock))) { + if (busiest < this_rq) { + spin_unlock(&this_rq->lock); + spin_lock(&busiest->lock); + spin_lock(&this_rq->lock); + } else + spin_lock(&busiest->lock); + } +} /* * find_idlest_cpu - find the least busy runqueue. @@ -1217,89 +1694,6 @@ static int find_idlest_cpu(struct task_struct *p, int this_cpu, return this_cpu; } -/* - * wake_up_forked_thread - wake up a freshly forked thread. - * - * This function will do some initial scheduler statistics housekeeping - * that must be done for every newly created context, and it also does - * runqueue balancing. - */ -void fastcall wake_up_forked_thread(task_t * p) -{ - unsigned long flags; - int this_cpu = get_cpu(), cpu; - struct sched_domain *tmp, *sd = NULL; - runqueue_t *this_rq = cpu_rq(this_cpu), *rq; - - /* - * Find the largest domain that this CPU is part of that - * is willing to balance on clone: - */ - for_each_domain(this_cpu, tmp) - if (tmp->flags & SD_BALANCE_CLONE) - sd = tmp; - if (sd) - cpu = find_idlest_cpu(p, this_cpu, sd); - else - cpu = this_cpu; - - local_irq_save(flags); -lock_again: - rq = cpu_rq(cpu); - double_rq_lock(this_rq, rq); - - BUG_ON(p->state != TASK_RUNNING); - - /* - * We did find_idlest_cpu() unlocked, so in theory - * the mask could have changed - just dont migrate - * in this case: - */ - if (unlikely(!cpu_isset(cpu, p->cpus_allowed))) { - cpu = this_cpu; - double_rq_unlock(this_rq, rq); - goto lock_again; - } - /* - * We decrease the sleep average of forking parents - * and children as well, to keep max-interactive tasks - * from forking tasks that are max-interactive. - */ - current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) * - PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); - - p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) * - CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); - - p->interactive_credit = 0; - - p->prio = effective_prio(p); - set_task_cpu(p, cpu); - - if (cpu == this_cpu) { - if (unlikely(!current->array)) - __activate_task(p, rq); - else { - p->prio = current->prio; - list_add_tail(&p->run_list, ¤t->run_list); - p->array = current->array; - p->array->nr_active++; - rq->nr_running++; - } - } else { - /* Not the local CPU - must adjust timestamp */ - p->timestamp = (p->timestamp - this_rq->timestamp_last_tick) - + rq->timestamp_last_tick; - __activate_task(p, rq); - if (TASK_PREEMPTS_CURR(p, rq)) - resched_task(rq->curr); - } - - double_rq_unlock(this_rq, rq); - local_irq_restore(flags); - put_cpu(); -} - /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -1317,6 +1711,7 @@ static void sched_migrate_task(task_t *p, int dest_cpu) || unlikely(cpu_is_offline(dest_cpu))) goto out; + schedstat_inc(rq, smt_cnt); /* force the process onto the specified CPU */ if (migrate_task(p, dest_cpu, &req)) { /* Need to wait for migration thread (might exit: take ref). */ @@ -1333,17 +1728,18 @@ out: } /* - * sched_balance_exec(): find the highest-level, exec-balance-capable + * sched_exec(): find the highest-level, exec-balance-capable * domain and try to migrate the task to the least loaded CPU. * * execve() is a valuable balancing opportunity, because at this point * the task has the smallest effective memory and cache footprint. */ -void sched_balance_exec(void) +void sched_exec(void) { struct sched_domain *tmp, *sd = NULL; int new_cpu, this_cpu = get_cpu(); + schedstat_inc(this_rq(), sbe_cnt); /* Prefer the current CPU if there's only this task running */ if (this_rq()->nr_running <= 1) goto out; @@ -1353,8 +1749,10 @@ void sched_balance_exec(void) sd = tmp; if (sd) { + schedstat_inc(sd, sbe_attempts); new_cpu = find_idlest_cpu(current, this_cpu, sd); if (new_cpu != this_cpu) { + schedstat_inc(sd, sbe_pushed); put_cpu(); sched_migrate_task(current, new_cpu); return; @@ -1364,21 +1762,6 @@ out: put_cpu(); } -/* - * double_lock_balance - lock the busiest runqueue, this_rq is locked already. - */ -static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) -{ - if (unlikely(!spin_trylock(&busiest->lock))) { - if (busiest < this_rq) { - spin_unlock(&this_rq->lock); - spin_lock(&busiest->lock); - spin_lock(&this_rq->lock); - } else - spin_lock(&busiest->lock); - } -} - /* * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. @@ -1493,6 +1876,15 @@ skip_queue: idx++; goto skip_bitmap; } + + /* + * Right now, this is the only place pull_task() is called, + * so we can safely collect pull_task() stats here rather than + * inside pull_task(). + */ + schedstat_inc(this_rq, pt_gained[idle]); + schedstat_inc(busiest, pt_lost[idle]); + pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); pulled++; @@ -1687,14 +2079,20 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, int nr_moved; spin_lock(&this_rq->lock); + schedstat_inc(sd, lb_cnt[idle]); group = find_busiest_group(sd, this_cpu, &imbalance, idle); - if (!group) + if (!group) { + schedstat_inc(sd, lb_nobusyg[idle]); goto out_balanced; + } busiest = find_busiest_queue(group); - if (!busiest) + if (!busiest) { + schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; + } + /* * This should be "impossible", but since load * balancing is inherently racy and statistical, @@ -1705,6 +2103,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, goto out_balanced; } + schedstat_add(sd, lb_imbalance[idle], imbalance); + nr_moved = 0; if (busiest->nr_running > 1) { /* @@ -1721,6 +2121,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, spin_unlock(&this_rq->lock); if (!nr_moved) { + schedstat_inc(sd, lb_failed[idle]); sd->nr_balance_failed++; if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { @@ -1775,19 +2176,27 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, unsigned long imbalance; int nr_moved = 0; + schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); - if (!group) + if (!group) { + schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); goto out; + } busiest = find_busiest_queue(group); - if (!busiest || busiest == this_rq) + if (!busiest || busiest == this_rq) { + schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); goto out; + } /* Attempt to move tasks */ double_lock_balance(this_rq, busiest); + schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); nr_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, NEWLY_IDLE); + if (!nr_moved) + schedstat_inc(sd, lb_failed[NEWLY_IDLE]); spin_unlock(&busiest->lock); @@ -1827,40 +2236,39 @@ static void active_load_balance(runqueue_t *busiest, int busiest_cpu) struct sched_group *group, *busy_group; int i; + schedstat_inc(busiest, alb_cnt); if (busiest->nr_running <= 1) return; for_each_domain(busiest_cpu, sd) if (cpu_isset(busiest->push_cpu, sd->span)) break; - if (!sd) { - WARN_ON(1); + if (!sd) return; - } - group = sd->groups; + group = sd->groups; while (!cpu_isset(busiest_cpu, group->cpumask)) - group = group->next; - busy_group = group; + group = group->next; + busy_group = group; - group = sd->groups; - do { + group = sd->groups; + do { cpumask_t tmp; runqueue_t *rq; int push_cpu = 0; - if (group == busy_group) - goto next_group; + if (group == busy_group) + goto next_group; cpus_and(tmp, group->cpumask, cpu_online_map); if (!cpus_weight(tmp)) goto next_group; - for_each_cpu_mask(i, tmp) { + for_each_cpu_mask(i, tmp) { if (!idle_cpu(i)) goto next_group; - push_cpu = i; - } + push_cpu = i; + } rq = cpu_rq(push_cpu); @@ -1873,7 +2281,12 @@ static void active_load_balance(runqueue_t *busiest, int busiest_cpu) if (unlikely(busiest == rq)) goto next_group; double_lock_balance(busiest, rq); - move_tasks(rq, push_cpu, busiest, 1, sd, IDLE); + if (move_tasks(rq, push_cpu, busiest, 1, sd, IDLE)) { + schedstat_inc(busiest, alb_lost); + schedstat_inc(rq, alb_gained); + } else { + schedstat_inc(busiest, alb_failed); + } spin_unlock(&rq->lock); next_group: group = group->next; @@ -1945,17 +2358,20 @@ static inline void idle_balance(int cpu, runqueue_t *rq) static inline int wake_priority_sleeper(runqueue_t *rq) { + int ret = 0; #ifdef CONFIG_SCHED_SMT + spin_lock(&rq->lock); /* * If an SMT sibling task has been put to sleep for priority * reasons reschedule the idle task to see if it can now run. */ if (rq->nr_running) { resched_task(rq->idle); - return 1; + ret = 1; } + spin_unlock(&rq->lock); #endif - return 0; + return ret; } DEFINE_PER_CPU(struct kernel_stat, kstat); @@ -1991,12 +2407,18 @@ void scheduler_tick(int user_ticks, int sys_ticks) struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; runqueue_t *rq = this_rq(); task_t *p = current; + struct vx_info *vxi = p->vx_info; rq->timestamp_last_tick = sched_clock(); if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user_ticks); + if (vxi) { + vxi->sched.cpu[cpu].user_ticks += user_ticks; + vxi->sched.cpu[cpu].sys_ticks += sys_ticks; + } + /* note: this timer irq context must be accounted for as well */ if (hardirq_count() - HARDIRQ_OFFSET) { cpustat->irq += sys_ticks; @@ -2007,15 +2429,20 @@ void scheduler_tick(int user_ticks, int sys_ticks) } if (p == rq->idle) { - if (!--rq->idle_tokens && !list_empty(&rq->hold_queue)) - set_need_resched(); - if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait += sys_ticks; + // vx_cpustat_acc(vxi, iowait, cpu, cpustat, sys_ticks); else cpustat->idle += sys_ticks; + // vx_cpustat_acc(vxi, idle, cpu, cpustat, sys_ticks); + if (wake_priority_sleeper(rq)) goto out; + +#ifdef CONFIG_VSERVER_HARDCPU_IDLE + if (!--rq->idle_tokens && !list_empty(&rq->hold_queue)) + set_need_resched(); +#endif rebalance_tick(cpu, rq, IDLE); return; } @@ -2038,7 +2465,7 @@ void scheduler_tick(int user_ticks, int sys_ticks) * timeslice. This makes it possible for interactive tasks * to use up their timeslices at their highest priority levels. */ - if (unlikely(rt_task(p))) { + if (rt_task(p)) { /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. @@ -2104,23 +2531,34 @@ out: } #ifdef CONFIG_SCHED_SMT -static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq) +static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { - int i; - struct sched_domain *sd = rq->sd; + struct sched_domain *sd = this_rq->sd; cpumask_t sibling_map; + int i; if (!(sd->flags & SD_SHARE_CPUPOWER)) return; + /* + * Unlock the current runqueue because we have to lock in + * CPU order to avoid deadlocks. Caller knows that we might + * unlock. We keep IRQs disabled. + */ + spin_unlock(&this_rq->lock); + cpus_and(sibling_map, sd->span, cpu_online_map); - for_each_cpu_mask(i, sibling_map) { - runqueue_t *smt_rq; - if (i == cpu) - continue; + for_each_cpu_mask(i, sibling_map) + spin_lock(&cpu_rq(i)->lock); + /* + * We clear this CPU from the mask. This both simplifies the + * inner loop and keps this_rq locked when we exit: + */ + cpu_clear(this_cpu, sibling_map); - smt_rq = cpu_rq(i); + for_each_cpu_mask(i, sibling_map) { + runqueue_t *smt_rq = cpu_rq(i); /* * If an SMT sibling task is sleeping due to priority @@ -2129,27 +2567,53 @@ static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq) if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running) resched_task(smt_rq->idle); } + + for_each_cpu_mask(i, sibling_map) + spin_unlock(&cpu_rq(i)->lock); + /* + * We exit with this_cpu's rq still held and IRQs + * still disabled: + */ } -static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p) +static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { - struct sched_domain *sd = rq->sd; + struct sched_domain *sd = this_rq->sd; cpumask_t sibling_map; + prio_array_t *array; int ret = 0, i; + task_t *p; if (!(sd->flags & SD_SHARE_CPUPOWER)) return 0; + /* + * The same locking rules and details apply as for + * wake_sleeping_dependent(): + */ + spin_unlock(&this_rq->lock); cpus_and(sibling_map, sd->span, cpu_online_map); - for_each_cpu_mask(i, sibling_map) { - runqueue_t *smt_rq; - task_t *smt_curr; + for_each_cpu_mask(i, sibling_map) + spin_lock(&cpu_rq(i)->lock); + cpu_clear(this_cpu, sibling_map); - if (i == cpu) - continue; + /* + * Establish next task to be run - it might have gone away because + * we released the runqueue lock above: + */ + if (!this_rq->nr_running) + goto out_unlock; + array = this_rq->active; + if (!array->nr_active) + array = this_rq->expired; + BUG_ON(!array->nr_active); + + p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next, + task_t, run_list); - smt_rq = cpu_rq(i); - smt_curr = smt_rq->curr; + for_each_cpu_mask(i, sibling_map) { + runqueue_t *smt_rq = cpu_rq(i); + task_t *smt_curr = smt_rq->curr; /* * If a user task with lower static priority than the @@ -2175,14 +2639,17 @@ static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p) (smt_curr == smt_rq->idle && smt_rq->nr_running)) resched_task(smt_curr); } +out_unlock: + for_each_cpu_mask(i, sibling_map) + spin_unlock(&cpu_rq(i)->lock); return ret; } #else -static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq) +static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { } -static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p) +static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { return 0; } @@ -2200,7 +2667,7 @@ asmlinkage void __sched schedule(void) struct list_head *queue; unsigned long long now; unsigned long run_time; -#ifdef CONFIG_VSERVER_HARDCPU +#ifdef CONFIG_VSERVER_HARDCPU struct vx_info *vxi; int maxidle = -HZ; #endif @@ -2223,7 +2690,17 @@ need_resched: prev = current; rq = this_rq(); + /* + * The idle thread is not allowed to schedule! + * Remove this check after it has been exercised a bit. + */ + if (unlikely(current == rq->idle) && current->state != TASK_RUNNING) { + printk(KERN_ERR "bad: scheduling from the idle thread!\n"); + dump_stack(); + } + release_kernel_lock(prev); + schedstat_inc(rq, sched_cnt); now = sched_clock(); if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG)) run_time = now - prev->timestamp; @@ -2254,7 +2731,7 @@ need_resched: deactivate_task(prev, rq); } -#ifdef CONFIG_VSERVER_HARDCPU +#ifdef CONFIG_VSERVER_HARDCPU if (!list_empty(&rq->hold_queue)) { struct list_head *l, *n; int ret; @@ -2272,28 +2749,56 @@ need_resched: if (ret > 0) { list_del(&next->run_list); next->state &= ~TASK_ONHOLD; - recalc_task_prio(next, now); - __activate_task(next, rq); - // printk("··· unhold %p\n", next); + // one less waiting + vx_onhold_dec(vxi); + array = rq->expired; + next->prio = MAX_PRIO-1; + enqueue_task(next, array); + rq->nr_running++; + if (next->static_prio < rq->best_expired_prio) + rq->best_expired_prio = next->static_prio; + + // printk("··· %8lu unhold %p [%d]\n", jiffies, next, next->prio); break; } if ((ret < 0) && (maxidle < ret)) maxidle = ret; - } + } } rq->idle_tokens = -maxidle; pick_next: #endif + cpu = smp_processor_id(); if (unlikely(!rq->nr_running)) { +go_idle: idle_balance(cpu, rq); if (!rq->nr_running) { next = rq->idle; rq->expired_timestamp = 0; wake_sleeping_dependent(cpu, rq); + /* + * wake_sleeping_dependent() might have released + * the runqueue, so break out if we got new + * tasks meanwhile: + */ + if (!rq->nr_running) + goto switch_tasks; + } + } else { + if (dependent_sleeper(cpu, rq)) { + schedstat_inc(rq, sched_goidle); + next = rq->idle; goto switch_tasks; } + /* + * dependent_sleeper() releases and reacquires the runqueue + * lock, hence go into the idle loop if the rq went + * empty meanwhile: + */ + if (unlikely(!rq->nr_running)) + goto go_idle; } array = rq->active; @@ -2301,34 +2806,34 @@ pick_next: /* * Switch the active and expired arrays. */ + schedstat_inc(rq, sched_switch); rq->active = rq->expired; rq->expired = array; array = rq->active; rq->expired_timestamp = 0; rq->best_expired_prio = MAX_PRIO; - } + } else + schedstat_inc(rq, sched_noswitch); idx = sched_find_first_bit(array->bitmap); queue = array->queue + idx; next = list_entry(queue->next, task_t, run_list); - if (dependent_sleeper(cpu, rq, next)) { - next = rq->idle; - goto switch_tasks; - } - -#ifdef CONFIG_VSERVER_HARDCPU +#ifdef CONFIG_VSERVER_HARDCPU vxi = next->vx_info; - if (vxi && __vx_flags(vxi->vx_flags, - VXF_SCHED_PAUSE|VXF_SCHED_HARD, 0)) { + if (vx_info_flags(vxi, VXF_SCHED_PAUSE|VXF_SCHED_HARD, 0)) { int ret = vx_tokens_recalc(vxi); if (unlikely(ret <= 0)) { if (ret && (rq->idle_tokens > -ret)) rq->idle_tokens = -ret; - deactivate_task(next, rq); + __deactivate_task(next, rq); + recalc_task_prio(next, now); + // a new one on hold + vx_onhold_inc(vxi); + next->state |= TASK_ONHOLD; list_add_tail(&next->run_list, &rq->hold_queue); - next->state |= TASK_ONHOLD; + //printk("··· %8lu hold %p [%d]\n", jiffies, next, next->prio); goto pick_next; } } @@ -2349,7 +2854,7 @@ pick_next: switch_tasks: prefetch(next); clear_tsk_need_resched(prev); - RCU_qsctr(task_cpu(prev))++; + rcu_qsctr_inc(task_cpu(prev)); prev->sleep_avg -= run_time; if ((long)prev->sleep_avg <= 0) { @@ -2357,8 +2862,9 @@ switch_tasks: if (!(HIGH_CREDIT(prev) || LOW_CREDIT(prev))) prev->interactive_credit--; } - prev->timestamp = now; + prev->timestamp = prev->last_ran = now; + sched_info_switch(prev, next); if (likely(prev != next)) { next->timestamp = now; rq->nr_switches++; @@ -2375,7 +2881,7 @@ switch_tasks: reacquire_kernel_lock(current); preempt_enable_no_resched(); - if (test_thread_flag(TIF_NEED_RESCHED)) + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } @@ -2688,6 +3194,8 @@ asmlinkage long sys_nice(int increment) * and we have a single winner. */ if (increment < 0) { + if (vx_flags(VXF_IGNEG_NICE, 0)) + return 0; if (!capable(CAP_SYS_NICE)) return -EPERM; if (increment < -40) @@ -2813,6 +3321,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param) policy != SCHED_NORMAL) goto out_unlock; } + profile_hit(SCHED_PROFILING, __builtin_return_address(0)); /* * Valid priorities for SCHED_FIFO and SCHED_RR are @@ -2843,6 +3352,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param) oldprio = p->prio; __setscheduler(p, policy, lp.sched_priority); if (array) { + vx_activate_task(p); __activate_task(p, task_rq(p)); /* * Reschedule if we are currently running on this runqueue and @@ -2953,24 +3463,10 @@ out_unlock: return retval; } -/** - * sys_sched_setaffinity - set the cpu affinity of a process - * @pid: pid of the process - * @len: length in bytes of the bitmask pointed to by user_mask_ptr - * @user_mask_ptr: user-space pointer to the new cpu mask - */ -asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, - unsigned long __user *user_mask_ptr) +long sched_setaffinity(pid_t pid, cpumask_t new_mask) { - cpumask_t new_mask; - int retval; task_t *p; - - if (len < sizeof(new_mask)) - return -EINVAL; - - if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) - return -EFAULT; + int retval; lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -3003,6 +3499,36 @@ out_unlock: return retval; } +static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, + cpumask_t *new_mask) +{ + if (len < sizeof(cpumask_t)) { + memset(new_mask, 0, sizeof(cpumask_t)); + } else if (len > sizeof(cpumask_t)) { + len = sizeof(cpumask_t); + } + return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; +} + +/** + * sys_sched_setaffinity - set the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to the new cpu mask + */ +asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr) +{ + cpumask_t new_mask; + int retval; + + retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); + if (retval) + return retval; + + return sched_setaffinity(pid, new_mask); +} + /* * Represents all cpu's present in the system * In systems capable of hotplug, this map could dynamically grow @@ -3018,24 +3544,11 @@ cpumask_t cpu_online_map = CPU_MASK_ALL; cpumask_t cpu_possible_map = CPU_MASK_ALL; #endif -/** - * sys_sched_getaffinity - get the cpu affinity of a process - * @pid: pid of the process - * @len: length in bytes of the bitmask pointed to by user_mask_ptr - * @user_mask_ptr: user-space pointer to hold the current cpu mask - */ -asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, - unsigned long __user *user_mask_ptr) +long sched_getaffinity(pid_t pid, cpumask_t *mask) { - unsigned int real_len; - cpumask_t mask; int retval; task_t *p; - real_len = sizeof(mask); - if (len < real_len) - return -EINVAL; - lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -3045,16 +3558,40 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, goto out_unlock; retval = 0; - cpus_and(mask, p->cpus_allowed, cpu_possible_map); + cpus_and(*mask, p->cpus_allowed, cpu_possible_map); out_unlock: read_unlock(&tasklist_lock); unlock_cpu_hotplug(); if (retval) return retval; - if (copy_to_user(user_mask_ptr, &mask, real_len)) + + return 0; +} + +/** + * sys_sched_getaffinity - get the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to hold the current cpu mask + */ +asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr) +{ + int ret; + cpumask_t mask; + + if (len < sizeof(cpumask_t)) + return -EINVAL; + + ret = sched_getaffinity(pid, &mask); + if (ret < 0) + return ret; + + if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) return -EFAULT; - return real_len; + + return sizeof(cpumask_t); } /** @@ -3070,6 +3607,7 @@ asmlinkage long sys_sched_yield(void) prio_array_t *array = current->array; prio_array_t *target = rq->expired; + schedstat_inc(rq, yld_cnt); /* * We implement yielding by moving the task into the expired * queue. @@ -3077,9 +3615,16 @@ asmlinkage long sys_sched_yield(void) * (special rule: RT tasks will just roundrobin in the active * array.) */ - if (unlikely(rt_task(current))) + if (rt_task(current)) target = rq->active; + if (current->array->nr_active == 1) { + schedstat_inc(rq, yld_act_empty); + if (!rq->expired->nr_active) + schedstat_inc(rq, yld_both_empty); + } else if (!rq->expired->nr_active) + schedstat_inc(rq, yld_exp_empty); + dequeue_task(current, array); enqueue_task(current, target); @@ -3253,7 +3798,7 @@ static void show_task(task_t * p) task_t *relative; unsigned state; unsigned long free = 0; - static const char *stat_nam[] = { "R", "S", "D", "T", "Z", "W" }; + static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; printk("%-13.13s ", p->comm); state = p->state ? __ffs(p->state) + 1 : 0; @@ -3330,21 +3875,20 @@ void show_state(void) void __devinit init_idle(task_t *idle, int cpu) { - runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle)); + runqueue_t *rq = cpu_rq(cpu); unsigned long flags; - local_irq_save(flags); - double_rq_lock(idle_rq, rq); - - idle_rq->curr = idle_rq->idle = idle; - deactivate_task(idle, rq); + idle->sleep_avg = 0; + idle->interactive_credit = 0; idle->array = NULL; idle->prio = MAX_PRIO; idle->state = TASK_RUNNING; set_task_cpu(idle, cpu); - double_rq_unlock(idle_rq, rq); + + spin_lock_irqsave(&rq->lock, flags); + rq->curr = rq->idle = idle; set_tsk_need_resched(idle); - local_irq_restore(flags); + spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ #ifdef CONFIG_PREEMPT @@ -3426,7 +3970,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() * away from this CPU, or CPU going down), or because we're - * attempting to rebalance this task on exec (sched_balance_exec). + * attempting to rebalance this task on exec (sched_exec). * * So we race with normal scheduler movements, but that's OK, as long * as the task is no longer on this CPU. @@ -3438,7 +3982,7 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) if (unlikely(cpu_is_offline(dest_cpu))) return; - rq_src = cpu_rq(src_cpu); + rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); double_rq_lock(rq_src, rq_dest); @@ -3543,49 +4087,52 @@ wait_to_die: } #ifdef CONFIG_HOTPLUG_CPU -/* migrate_all_tasks - function to migrate all tasks from the dead cpu. */ -static void migrate_all_tasks(int src_cpu) +/* Figure out where task on dead CPU should go, use force if neccessary. */ +static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) { - struct task_struct *tsk, *t; int dest_cpu; - unsigned int node; + cpumask_t mask; - write_lock_irq(&tasklist_lock); + /* On same node? */ + mask = node_to_cpumask(cpu_to_node(dead_cpu)); + cpus_and(mask, mask, tsk->cpus_allowed); + dest_cpu = any_online_cpu(mask); - /* watch out for per node tasks, let's stay on this node */ - node = cpu_to_node(src_cpu); + /* On any allowed CPU? */ + if (dest_cpu == NR_CPUS) + dest_cpu = any_online_cpu(tsk->cpus_allowed); + + /* No more Mr. Nice Guy. */ + if (dest_cpu == NR_CPUS) { + cpus_setall(tsk->cpus_allowed); + dest_cpu = any_online_cpu(tsk->cpus_allowed); + + /* + * Don't tell them about moving exiting tasks or + * kernel threads (both mm NULL), since they never + * leave kernel. + */ + if (tsk->mm && printk_ratelimit()) + printk(KERN_INFO "process %d (%s) no " + "longer affine to cpu%d\n", + tsk->pid, tsk->comm, dead_cpu); + } + __migrate_task(tsk, dead_cpu, dest_cpu); +} + +/* Run through task list and migrate tasks from the dead cpu. */ +static void migrate_live_tasks(int src_cpu) +{ + struct task_struct *tsk, *t; + + write_lock_irq(&tasklist_lock); do_each_thread(t, tsk) { - cpumask_t mask; if (tsk == current) continue; - if (task_cpu(tsk) != src_cpu) - continue; - - /* Figure out where this task should go (attempting to - * keep it on-node), and check if it can be migrated - * as-is. NOTE that kernel threads bound to more than - * one online cpu will be migrated. */ - mask = node_to_cpumask(node); - cpus_and(mask, mask, tsk->cpus_allowed); - dest_cpu = any_online_cpu(mask); - if (dest_cpu == NR_CPUS) - dest_cpu = any_online_cpu(tsk->cpus_allowed); - if (dest_cpu == NR_CPUS) { - cpus_setall(tsk->cpus_allowed); - dest_cpu = any_online_cpu(tsk->cpus_allowed); - - /* Don't tell them about moving exiting tasks - or kernel threads (both mm NULL), since - they never leave kernel. */ - if (tsk->mm && printk_ratelimit()) - printk(KERN_INFO "process %d (%s) no " - "longer affine to cpu%d\n", - tsk->pid, tsk->comm, src_cpu); - } - - __migrate_task(tsk, src_cpu, dest_cpu); + if (task_cpu(tsk) == src_cpu) + move_task_off_dead_cpu(src_cpu, tsk); } while_each_thread(t, tsk); write_unlock_irq(&tasklist_lock); @@ -3616,6 +4163,47 @@ void sched_idle_next(void) spin_unlock_irqrestore(&rq->lock, flags); } + +static void migrate_dead(unsigned int dead_cpu, task_t *tsk) +{ + struct runqueue *rq = cpu_rq(dead_cpu); + + /* Must be exiting, otherwise would be on tasklist. */ + BUG_ON(tsk->state != TASK_ZOMBIE && tsk->state != TASK_DEAD); + + /* Cannot have done final schedule yet: would have vanished. */ + BUG_ON(tsk->flags & PF_DEAD); + + get_task_struct(tsk); + + /* + * Drop lock around migration; if someone else moves it, + * that's OK. No task can be added to this CPU, so iteration is + * fine. + */ + spin_unlock_irq(&rq->lock); + move_task_off_dead_cpu(dead_cpu, tsk); + spin_lock_irq(&rq->lock); + + put_task_struct(tsk); +} + +/* release_task() removes task from tasklist, so we won't find dead tasks. */ +static void migrate_dead_tasks(unsigned int dead_cpu) +{ + unsigned arr, i; + struct runqueue *rq = cpu_rq(dead_cpu); + + for (arr = 0; arr < 2; arr++) { + for (i = 0; i < MAX_PRIO; i++) { + struct list_head *list = &rq->arrays[arr].queue[i]; + while (!list_empty(list)) + migrate_dead(dead_cpu, + list_entry(list->next, task_t, + run_list)); + } + } +} #endif /* CONFIG_HOTPLUG_CPU */ /* @@ -3655,7 +4243,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, cpu_rq(cpu)->migration_thread = NULL; break; case CPU_DEAD: - migrate_all_tasks(cpu); + migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); rq->migration_thread = NULL; @@ -3664,8 +4252,9 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, deactivate_task(rq->idle, rq); rq->idle->static_prio = MAX_PRIO; __setscheduler(rq->idle, SCHED_NORMAL, 0); + migrate_dead_tasks(cpu); task_rq_unlock(rq, &flags); - BUG_ON(rq->nr_running != 0); + BUG_ON(rq->nr_running != 0); /* No need to migrate the tasks: it was best-effort if * they didn't do lock_cpu_hotplug(). Just wake up @@ -3680,7 +4269,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, complete(&req->done); } spin_unlock_irq(&rq->lock); - break; + break; #endif } return NOTIFY_OK; @@ -3722,7 +4311,7 @@ EXPORT_SYMBOL(kernel_flag); #ifdef CONFIG_SMP /* Attach the domain 'sd' to 'cpu' as its base domain */ -void cpu_attach_domain(struct sched_domain *sd, int cpu) +static void cpu_attach_domain(struct sched_domain *sd, int cpu) { migration_req_t req; unsigned long flags; @@ -3753,124 +4342,343 @@ void cpu_attach_domain(struct sched_domain *sd, int cpu) unlock_cpu_hotplug(); } -#ifdef ARCH_HAS_SCHED_DOMAIN -extern void __init arch_init_sched_domains(void); -#else -static struct sched_group sched_group_cpus[NR_CPUS]; +/* + * To enable disjoint top-level NUMA domains, define SD_NODES_PER_DOMAIN + * in arch code. That defines the number of nearby nodes in a node's top + * level scheduling domain. + */ +#if defined(CONFIG_NUMA) && defined(SD_NODES_PER_DOMAIN) +/** + * find_next_best_node - find the next node to include in a sched_domain + * @node: node whose sched_domain we're building + * @used_nodes: nodes already in the sched_domain + * + * Find the next node to include in a given scheduling domain. Simply + * finds the closest node not already in the @used_nodes map. + * + * Should use nodemask_t. + */ +static int __init find_next_best_node(int node, unsigned long *used_nodes) +{ + int i, n, val, min_val, best_node = 0; + + min_val = INT_MAX; + + for (i = 0; i < numnodes; i++) { + /* Start at @node */ + n = (node + i) % numnodes; + + /* Skip already used nodes */ + if (test_bit(n, used_nodes)) + continue; + + /* Simple min distance search */ + val = node_distance(node, i); + + if (val < min_val) { + min_val = val; + best_node = n; + } + } + + set_bit(best_node, used_nodes); + return best_node; +} + +/** + * sched_domain_node_span - get a cpumask for a node's sched_domain + * @node: node whose cpumask we're constructing + * @size: number of nodes to include in this span + * + * Given a node, construct a good cpumask for its sched_domain to span. It + * should be one that prevents unnecessary balancing, but also spreads tasks + * out optimally. + */ +cpumask_t __init sched_domain_node_span(int node) +{ + int i; + cpumask_t span; + DECLARE_BITMAP(used_nodes, MAX_NUMNODES); + + cpus_clear(span); + bitmap_zero(used_nodes, MAX_NUMNODES); + + for (i = 0; i < SD_NODES_PER_DOMAIN; i++) { + int next_node = find_next_best_node(node, used_nodes); + cpumask_t nodemask; + + nodemask = node_to_cpumask(next_node); + cpus_or(span, span, nodemask); + } + + return span; +} +#else /* CONFIG_NUMA && SD_NODES_PER_DOMAIN */ +cpumask_t __init sched_domain_node_span(int node) +{ + return cpu_possible_map; +} +#endif /* CONFIG_NUMA && SD_NODES_PER_DOMAIN */ + +#ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +static struct sched_group sched_group_cpus[NR_CPUS]; +__init static int cpu_to_cpu_group(int cpu) +{ + return cpu; +} +#endif + +static DEFINE_PER_CPU(struct sched_domain, phys_domains); +static struct sched_group sched_group_phys[NR_CPUS]; +__init static int cpu_to_phys_group(int cpu) +{ +#ifdef CONFIG_SCHED_SMT + return first_cpu(cpu_sibling_map[cpu]); +#else + return cpu; +#endif +} + #ifdef CONFIG_NUMA -static struct sched_group sched_group_nodes[MAX_NUMNODES]; + static DEFINE_PER_CPU(struct sched_domain, node_domains); -static void __init arch_init_sched_domains(void) +static struct sched_group sched_group_nodes[MAX_NUMNODES]; +__init static int cpu_to_node_group(int cpu) { - int i; - struct sched_group *first_node = NULL, *last_node = NULL; + return cpu_to_node(cpu); +} +#endif - /* Set up domains */ - for_each_cpu(i) { - int node = cpu_to_node(i); - cpumask_t nodemask = node_to_cpumask(node); - struct sched_domain *node_sd = &per_cpu(node_domains, i); - struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); - - *node_sd = SD_NODE_INIT; - node_sd->span = cpu_possible_map; - node_sd->groups = &sched_group_nodes[cpu_to_node(i)]; - - *cpu_sd = SD_CPU_INIT; - cpus_and(cpu_sd->span, nodemask, cpu_possible_map); - cpu_sd->groups = &sched_group_cpus[i]; - cpu_sd->parent = node_sd; - } +/* Groups for isolated scheduling domains */ +static struct sched_group sched_group_isolated[NR_CPUS]; - /* Set up groups */ - for (i = 0; i < MAX_NUMNODES; i++) { - cpumask_t tmp = node_to_cpumask(i); - cpumask_t nodemask; - struct sched_group *first_cpu = NULL, *last_cpu = NULL; - struct sched_group *node = &sched_group_nodes[i]; - int j; +/* cpus with isolated domains */ +cpumask_t __initdata cpu_isolated_map = CPU_MASK_NONE; - cpus_and(nodemask, tmp, cpu_possible_map); +__init static int cpu_to_isolated_group(int cpu) +{ + return cpu; +} - if (cpus_empty(nodemask)) - continue; +/* Setup the mask of cpus configured for isolated domains */ +static int __init isolated_cpu_setup(char *str) +{ + int ints[NR_CPUS], i; - node->cpumask = nodemask; - node->cpu_power = SCHED_LOAD_SCALE * cpus_weight(node->cpumask); + str = get_options(str, ARRAY_SIZE(ints), ints); + cpus_clear(cpu_isolated_map); + for (i = 1; i <= ints[0]; i++) + cpu_set(ints[i], cpu_isolated_map); + return 1; +} - for_each_cpu_mask(j, node->cpumask) { - struct sched_group *cpu = &sched_group_cpus[j]; +__setup ("isolcpus=", isolated_cpu_setup); - cpus_clear(cpu->cpumask); - cpu_set(j, cpu->cpumask); - cpu->cpu_power = SCHED_LOAD_SCALE; +/* + * init_sched_build_groups takes an array of groups, the cpumask we wish + * to span, and a pointer to a function which identifies what group a CPU + * belongs to. The return value of group_fn must be a valid index into the + * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we + * keep track of groups covered with a cpumask_t). + * + * init_sched_build_groups will build a circular linked list of the groups + * covered by the given span, and will set each group's ->cpumask correctly, + * and ->cpu_power to 0. + */ +__init static void init_sched_build_groups(struct sched_group groups[], + cpumask_t span, int (*group_fn)(int cpu)) +{ + struct sched_group *first = NULL, *last = NULL; + cpumask_t covered = CPU_MASK_NONE; + int i; - if (!first_cpu) - first_cpu = cpu; - if (last_cpu) - last_cpu->next = cpu; - last_cpu = cpu; - } - last_cpu->next = first_cpu; + for_each_cpu_mask(i, span) { + int group = group_fn(i); + struct sched_group *sg = &groups[group]; + int j; - if (!first_node) - first_node = node; - if (last_node) - last_node->next = node; - last_node = node; - } - last_node->next = first_node; + if (cpu_isset(i, covered)) + continue; - mb(); - for_each_cpu(i) { - struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); - cpu_attach_domain(cpu_sd, i); + sg->cpumask = CPU_MASK_NONE; + sg->cpu_power = 0; + + for_each_cpu_mask(j, span) { + if (group_fn(j) != group) + continue; + + cpu_set(j, covered); + cpu_set(j, sg->cpumask); + } + if (!first) + first = sg; + if (last) + last->next = sg; + last = sg; } + last->next = first; } -#else /* !CONFIG_NUMA */ -static void __init arch_init_sched_domains(void) +__init static void arch_init_sched_domains(void) { int i; - struct sched_group *first_cpu = NULL, *last_cpu = NULL; + cpumask_t cpu_default_map; + + /* + * Setup mask for cpus without special case scheduling requirements. + * For now this just excludes isolated cpus, but could be used to + * exclude other special cases in the future. + */ + cpus_complement(cpu_default_map, cpu_isolated_map); + cpus_and(cpu_default_map, cpu_default_map, cpu_possible_map); /* Set up domains */ for_each_cpu(i) { - struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + int group; + struct sched_domain *sd = NULL, *p; + cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); + + cpus_and(nodemask, nodemask, cpu_default_map); + + /* + * Set up isolated domains. + * Unlike those of other cpus, the domains and groups are + * single level, and span a single cpu. + */ + if (cpu_isset(i, cpu_isolated_map)) { +#ifdef CONFIG_SCHED_SMT + sd = &per_cpu(cpu_domains, i); +#else + sd = &per_cpu(phys_domains, i); +#endif + group = cpu_to_isolated_group(i); + *sd = SD_CPU_INIT; + cpu_set(i, sd->span); + sd->balance_interval = INT_MAX; /* Don't balance */ + sd->flags = 0; /* Avoid WAKE_ */ + sd->groups = &sched_group_isolated[group]; + printk(KERN_INFO "Setting up cpu %d isolated.\n", i); + /* Single level, so continue with next cpu */ + continue; + } + +#ifdef CONFIG_NUMA + sd = &per_cpu(node_domains, i); + group = cpu_to_node_group(i); + *sd = SD_NODE_INIT; + /* FIXME: should be multilevel, in arch code */ + sd->span = sched_domain_node_span(i); + cpus_and(sd->span, sd->span, cpu_default_map); + sd->groups = &sched_group_nodes[group]; +#endif - *cpu_sd = SD_CPU_INIT; - cpu_sd->span = cpu_possible_map; - cpu_sd->groups = &sched_group_cpus[i]; + p = sd; + sd = &per_cpu(phys_domains, i); + group = cpu_to_phys_group(i); + *sd = SD_CPU_INIT; +#ifdef CONFIG_NUMA + sd->span = nodemask; +#else + sd->span = cpu_possible_map; +#endif + sd->parent = p; + sd->groups = &sched_group_phys[group]; + +#ifdef CONFIG_SCHED_SMT + p = sd; + sd = &per_cpu(cpu_domains, i); + group = cpu_to_cpu_group(i); + *sd = SD_SIBLING_INIT; + sd->span = cpu_sibling_map[i]; + cpus_and(sd->span, sd->span, cpu_default_map); + sd->parent = p; + sd->groups = &sched_group_cpus[group]; +#endif } - /* Set up CPU groups */ - for_each_cpu_mask(i, cpu_possible_map) { - struct sched_group *cpu = &sched_group_cpus[i]; +#ifdef CONFIG_SCHED_SMT + /* Set up CPU (sibling) groups */ + for_each_cpu(i) { + cpumask_t this_sibling_map = cpu_sibling_map[i]; + cpus_and(this_sibling_map, this_sibling_map, cpu_default_map); + if (i != first_cpu(this_sibling_map)) + continue; + + init_sched_build_groups(sched_group_cpus, this_sibling_map, + &cpu_to_cpu_group); + } +#endif + + /* Set up isolated groups */ + for_each_cpu_mask(i, cpu_isolated_map) { + cpumask_t mask; + cpus_clear(mask); + cpu_set(i, mask); + init_sched_build_groups(sched_group_isolated, mask, + &cpu_to_isolated_group); + } + +#ifdef CONFIG_NUMA + /* Set up physical groups */ + for (i = 0; i < MAX_NUMNODES; i++) { + cpumask_t nodemask = node_to_cpumask(i); + + cpus_and(nodemask, nodemask, cpu_default_map); + if (cpus_empty(nodemask)) + continue; + + init_sched_build_groups(sched_group_phys, nodemask, + &cpu_to_phys_group); + } +#else + init_sched_build_groups(sched_group_phys, cpu_possible_map, + &cpu_to_phys_group); +#endif + +#ifdef CONFIG_NUMA + /* Set up node groups */ + init_sched_build_groups(sched_group_nodes, cpu_default_map, + &cpu_to_node_group); +#endif + + /* Calculate CPU power for physical packages and nodes */ + for_each_cpu_mask(i, cpu_default_map) { + int power; + struct sched_domain *sd; +#ifdef CONFIG_SCHED_SMT + sd = &per_cpu(cpu_domains, i); + power = SCHED_LOAD_SCALE; + sd->groups->cpu_power = power; +#endif - cpus_clear(cpu->cpumask); - cpu_set(i, cpu->cpumask); - cpu->cpu_power = SCHED_LOAD_SCALE; + sd = &per_cpu(phys_domains, i); + power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * + (cpus_weight(sd->groups->cpumask)-1) / 10; + sd->groups->cpu_power = power; - if (!first_cpu) - first_cpu = cpu; - if (last_cpu) - last_cpu->next = cpu; - last_cpu = cpu; +#ifdef CONFIG_NUMA + if (i == first_cpu(sd->groups->cpumask)) { + /* Only add "power" once for each physical package. */ + sd = &per_cpu(node_domains, i); + sd->groups->cpu_power += power; + } +#endif } - last_cpu->next = first_cpu; - mb(); /* domains were modified outside the lock */ + /* Attach the domains */ for_each_cpu(i) { - struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); - cpu_attach_domain(cpu_sd, i); + struct sched_domain *sd; +#ifdef CONFIG_SCHED_SMT + sd = &per_cpu(cpu_domains, i); +#else + sd = &per_cpu(phys_domains, i); +#endif + cpu_attach_domain(sd, i); } } -#endif /* CONFIG_NUMA */ -#endif /* ARCH_HAS_SCHED_DOMAIN */ - -#define SCHED_DOMAIN_DEBUG +#undef SCHED_DOMAIN_DEBUG #ifdef SCHED_DOMAIN_DEBUG void sched_domain_debug(void) { @@ -3965,8 +4773,9 @@ int in_sched_functions(unsigned long addr) { /* Linker adds these: start and end of __sched functions */ extern char __sched_text_start[], __sched_text_end[]; - return addr >= (unsigned long)__sched_text_start - && addr < (unsigned long)__sched_text_end; + return in_lock_functions(addr) || + (addr >= (unsigned long)__sched_text_start + && addr < (unsigned long)__sched_text_end); } void __init sched_init(void) @@ -4009,7 +4818,9 @@ void __init sched_init(void) rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); #endif +#ifdef CONFIG_VSERVER_HARDCPU INIT_LIST_HEAD(&rq->hold_queue); +#endif atomic_set(&rq->nr_iowait, 0); for (j = 0; j < 2; j++) { @@ -4022,21 +4833,20 @@ void __init sched_init(void) __set_bit(MAX_PRIO, array->bitmap); } } - /* - * We have to do a little magic to get the first - * thread right in SMP mode. - */ - rq = this_rq(); - rq->curr = current; - rq->idle = current; - set_task_cpu(current, smp_processor_id()); - wake_up_forked_process(current); /* * The boot idle thread does lazy MMU switching as well: */ atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current); + + /* + * Make us the idle thread. Technically, schedule() should not be + * called from this thread, however somewhere below it might be, + * but because we are the idle thread, we just pick up running again + * when this runqueue becomes "idle". + */ + init_idle(current, smp_processor_id()); } #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP @@ -4060,49 +4870,3 @@ void __might_sleep(char *file, int line) } EXPORT_SYMBOL(__might_sleep); #endif - - -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) -/* - * This could be a long-held lock. If another CPU holds it for a long time, - * and that CPU is not asked to reschedule then *this* CPU will spin on the - * lock for a long time, even if *this* CPU is asked to reschedule. - * - * So what we do here, in the slow (contended) path is to spin on the lock by - * hand while permitting preemption. - * - * Called inside preempt_disable(). - */ -void __sched __preempt_spin_lock(spinlock_t *lock) -{ - if (preempt_count() > 1) { - _raw_spin_lock(lock); - return; - } - do { - preempt_enable(); - while (spin_is_locked(lock)) - cpu_relax(); - preempt_disable(); - } while (!_raw_spin_trylock(lock)); -} - -EXPORT_SYMBOL(__preempt_spin_lock); - -void __sched __preempt_write_lock(rwlock_t *lock) -{ - if (preempt_count() > 1) { - _raw_write_lock(lock); - return; - } - - do { - preempt_enable(); - while (rwlock_is_locked(lock)) - cpu_relax(); - preempt_disable(); - } while (!_raw_write_trylock(lock)); -} - -EXPORT_SYMBOL(__preempt_write_lock); -#endif /* defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) */