#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#define task_is_running(p) (this_rq() == task_rq(p))
+
/*
* Default context-switch locking:
*/
clear_tsk_need_resched(prev);
rcu_qsctr_inc(task_cpu(prev));
+ add_delay_ts(prev, runcpu_total, prev->timestamp, now);
prev->sleep_avg -= run_time;
if ((long)prev->sleep_avg <= 0) {
prev->sleep_avg = 0;
sched_info_switch(prev, next);
if (likely(prev != next)) {
+ add_delay_ts(next, waitcpu_total, next->timestamp, now);
+ inc_delay(next, runs);
next->timestamp = now;
rq->nr_switches++;
rq->curr = next;
{
struct runqueue *rq = this_rq();
+ def_delay_var(dstart);
+ start_delay_set(dstart, PF_IOWAIT);
atomic_inc(&rq->nr_iowait);
schedule();
atomic_dec(&rq->nr_iowait);
+ add_io_delay(dstart);
}
EXPORT_SYMBOL(io_schedule);
{
struct runqueue *rq = this_rq();
long ret;
+ def_delay_var(dstart);
+ start_delay_set(dstart,PF_IOWAIT);
atomic_inc(&rq->nr_iowait);
ret = schedule_timeout(timeout);
atomic_dec(&rq->nr_iowait);
+ add_io_delay(dstart);
return ret;
}
}
#endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_DELAY_ACCT
+int task_running_sys(struct task_struct *p)
+{
+ return task_is_running(p);
+}
+EXPORT_SYMBOL_GPL(task_running_sys);
+#endif
+