From: Sapan Bhatia Date: Tue, 2 Feb 2010 00:36:20 +0000 (+0000) Subject: Ok, scheduler changes this time applied on top of Caglar's fixes. X-Git-Tag: linux-2.6-27-4~7 X-Git-Url: http://git.onelab.eu/?p=linux-2.6.git;a=commitdiff_plain;h=1c2c917a7c14d7a5f5e17904ab563573717da0d6 Ok, scheduler changes this time applied on top of Caglar's fixes. x --- diff --git a/linux-2.6-591-chopstix-intern.patch b/linux-2.6-591-chopstix-intern.patch index 433d55818..c2fceef8d 100644 --- a/linux-2.6-591-chopstix-intern.patch +++ b/linux-2.6-591-chopstix-intern.patch @@ -1,8 +1,7 @@ -Index: linux-2.6.27.y/arch/Kconfig -=================================================================== ---- linux-2.6.27.y.orig/arch/Kconfig -+++ linux-2.6.27.y/arch/Kconfig -@@ -13,9 +13,18 @@ config OPROFILE +diff -Nurb linux-2.6.27-590/arch/Kconfig linux-2.6.27-591/arch/Kconfig +--- linux-2.6.27-590/arch/Kconfig 2010-02-01 19:42:05.000000000 -0500 ++++ linux-2.6.27-591/arch/Kconfig 2010-02-01 19:42:30.000000000 -0500 +@@ -13,9 +13,18 @@ If unsure, say N. @@ -21,10 +20,9 @@ Index: linux-2.6.27.y/arch/Kconfig config KPROBES bool "Kprobes" depends on KALLSYMS && MODULES -Index: linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c -=================================================================== ---- linux-2.6.27.y.orig/arch/x86/kernel/asm-offsets_32.c -+++ linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c +diff -Nurb linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c +--- linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c 2008-10-09 18:13:53.000000000 -0400 ++++ linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c 2010-02-01 19:42:30.000000000 -0500 @@ -9,6 +9,7 @@ #include #include @@ -54,7 +52,7 @@ Index: linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c void foo(void) { OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax); -@@ -50,6 +62,16 @@ void foo(void) +@@ -50,6 +62,16 @@ OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); BLANK(); @@ -71,11 +69,10 @@ Index: linux-2.6.27.y/arch/x86/kernel/asm-offsets_32.c OFFSET(TI_task, thread_info, task); OFFSET(TI_exec_domain, thread_info, exec_domain); OFFSET(TI_flags, thread_info, flags); -Index: linux-2.6.27.y/arch/x86/kernel/entry_32.S -=================================================================== ---- linux-2.6.27.y.orig/arch/x86/kernel/entry_32.S -+++ linux-2.6.27.y/arch/x86/kernel/entry_32.S -@@ -426,6 +426,33 @@ ENTRY(system_call) +diff -Nurb linux-2.6.27-590/arch/x86/kernel/entry_32.S linux-2.6.27-591/arch/x86/kernel/entry_32.S +--- linux-2.6.27-590/arch/x86/kernel/entry_32.S 2008-10-09 18:13:53.000000000 -0400 ++++ linux-2.6.27-591/arch/x86/kernel/entry_32.S 2010-02-01 19:42:30.000000000 -0500 +@@ -426,6 +426,33 @@ cmpl $(nr_syscalls), %eax jae syscall_badsys syscall_call: @@ -109,11 +106,10 @@ Index: linux-2.6.27.y/arch/x86/kernel/entry_32.S call *sys_call_table(,%eax,4) movl %eax,PT_EAX(%esp) # store the return value syscall_exit: -Index: linux-2.6.27.y/arch/x86/mm/fault.c -=================================================================== ---- linux-2.6.27.y.orig/arch/x86/mm/fault.c -+++ linux-2.6.27.y/arch/x86/mm/fault.c -@@ -79,6 +79,15 @@ static inline int notify_page_fault(stru +diff -Nurb linux-2.6.27-590/arch/x86/mm/fault.c linux-2.6.27-591/arch/x86/mm/fault.c +--- linux-2.6.27-590/arch/x86/mm/fault.c 2010-02-01 19:42:05.000000000 -0500 ++++ linux-2.6.27-591/arch/x86/mm/fault.c 2010-02-01 19:42:30.000000000 -0500 +@@ -79,6 +79,15 @@ #endif } @@ -129,10 +125,9 @@ Index: linux-2.6.27.y/arch/x86/mm/fault.c /* * X86_32 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. -Index: linux-2.6.27.y/drivers/oprofile/cpu_buffer.c -=================================================================== ---- linux-2.6.27.y.orig/drivers/oprofile/cpu_buffer.c -+++ linux-2.6.27.y/drivers/oprofile/cpu_buffer.c +diff -Nurb linux-2.6.27-590/drivers/oprofile/cpu_buffer.c linux-2.6.27-591/drivers/oprofile/cpu_buffer.c +--- linux-2.6.27-590/drivers/oprofile/cpu_buffer.c 2008-10-09 18:13:53.000000000 -0400 ++++ linux-2.6.27-591/drivers/oprofile/cpu_buffer.c 2010-02-01 19:42:30.000000000 -0500 @@ -21,6 +21,7 @@ #include #include @@ -141,7 +136,7 @@ Index: linux-2.6.27.y/drivers/oprofile/cpu_buffer.c #include "event_buffer.h" #include "cpu_buffer.h" -@@ -147,6 +148,17 @@ static void increment_head(struct oprofi +@@ -147,6 +148,17 @@ b->head_pos = 0; } @@ -159,7 +154,7 @@ Index: linux-2.6.27.y/drivers/oprofile/cpu_buffer.c static inline void add_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, unsigned long event) -@@ -155,6 +167,7 @@ add_sample(struct oprofile_cpu_buffer * +@@ -155,6 +167,7 @@ entry->eip = pc; entry->event = event; increment_head(cpu_buf); @@ -167,7 +162,7 @@ Index: linux-2.6.27.y/drivers/oprofile/cpu_buffer.c } static inline void -@@ -250,8 +263,28 @@ void oprofile_add_sample(struct pt_regs +@@ -250,8 +263,28 @@ { int is_kernel = !user_mode(regs); unsigned long pc = profile_pc(regs); @@ -196,10 +191,9 @@ Index: linux-2.6.27.y/drivers/oprofile/cpu_buffer.c } void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) -Index: linux-2.6.27.y/fs/bio.c -=================================================================== ---- linux-2.6.27.y.orig/fs/bio.c -+++ linux-2.6.27.y/fs/bio.c +diff -Nurb linux-2.6.27-590/fs/bio.c linux-2.6.27-591/fs/bio.c +--- linux-2.6.27-590/fs/bio.c 2008-10-09 18:13:53.000000000 -0400 ++++ linux-2.6.27-591/fs/bio.c 2010-02-01 19:42:30.000000000 -0500 @@ -27,6 +27,7 @@ #include #include @@ -208,7 +202,7 @@ Index: linux-2.6.27.y/fs/bio.c static struct kmem_cache *bio_slab __read_mostly; -@@ -44,6 +45,7 @@ static struct biovec_slab bvec_slabs[BIO +@@ -44,6 +45,7 @@ }; #undef BV @@ -216,7 +210,7 @@ Index: linux-2.6.27.y/fs/bio.c /* * fs_bio_set is the bio_set containing bio and iovec memory pools used by * IO code that does not need private memory pools. -@@ -1171,6 +1173,14 @@ void bio_check_pages_dirty(struct bio *b +@@ -1171,6 +1173,14 @@ } } @@ -231,7 +225,7 @@ Index: linux-2.6.27.y/fs/bio.c /** * bio_endio - end I/O on a bio * @bio: bio -@@ -1192,6 +1202,24 @@ void bio_endio(struct bio *bio, int erro +@@ -1192,6 +1202,24 @@ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) error = -EIO; @@ -256,10 +250,9 @@ Index: linux-2.6.27.y/fs/bio.c if (bio->bi_end_io) bio->bi_end_io(bio, error); } -Index: linux-2.6.27.y/fs/exec.c -=================================================================== ---- linux-2.6.27.y.orig/fs/exec.c -+++ linux-2.6.27.y/fs/exec.c +diff -Nurb linux-2.6.27-590/fs/exec.c linux-2.6.27-591/fs/exec.c +--- linux-2.6.27-590/fs/exec.c 2010-02-01 19:42:07.000000000 -0500 ++++ linux-2.6.27-591/fs/exec.c 2010-02-01 19:42:31.000000000 -0500 @@ -27,6 +27,7 @@ #include #include @@ -268,7 +261,7 @@ Index: linux-2.6.27.y/fs/exec.c #include #include #include -@@ -698,6 +699,13 @@ struct file *open_exec(const char *name) +@@ -698,6 +699,13 @@ goto out; } @@ -282,10 +275,9 @@ Index: linux-2.6.27.y/fs/exec.c return file; out_path_put: -Index: linux-2.6.27.y/include/linux/arrays.h -=================================================================== ---- /dev/null -+++ linux-2.6.27.y/include/linux/arrays.h +diff -Nurb linux-2.6.27-590/include/linux/arrays.h linux-2.6.27-591/include/linux/arrays.h +--- linux-2.6.27-590/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.27-591/include/linux/arrays.h 2010-02-01 19:42:31.000000000 -0500 @@ -0,0 +1,36 @@ +#ifndef __ARRAYS_H__ +#define __ARRAYS_H__ @@ -323,10 +315,24 @@ Index: linux-2.6.27.y/include/linux/arrays.h + struct task_struct *task; +}; +#endif -Index: linux-2.6.27.y/include/linux/sched.h.rej -=================================================================== ---- /dev/null -+++ linux-2.6.27.y/include/linux/sched.h.rej +diff -Nurb linux-2.6.27-590/include/linux/sched.h linux-2.6.27-591/include/linux/sched.h +--- linux-2.6.27-590/include/linux/sched.h 2010-02-01 19:42:07.000000000 -0500 ++++ linux-2.6.27-591/include/linux/sched.h 2010-02-01 19:47:30.000000000 -0500 +@@ -1133,6 +1133,11 @@ + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; + cputime_t prev_utime, prev_stime; ++ ++ #ifdef CONFIG_CHOPSTIX ++ unsigned long last_interrupted, last_ran_j; ++ #endif ++ + unsigned long nvcsw, nivcsw; /* context switch counts */ + struct timespec start_time; /* monotonic time */ + struct timespec real_start_time; /* boot based time */ +diff -Nurb linux-2.6.27-590/include/linux/sched.h.rej linux-2.6.27-591/include/linux/sched.h.rej +--- linux-2.6.27-590/include/linux/sched.h.rej 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.27-591/include/linux/sched.h.rej 2010-02-01 19:42:31.000000000 -0500 @@ -0,0 +1,19 @@ +*************** +*** 850,855 **** @@ -347,10 +353,9 @@ Index: linux-2.6.27.y/include/linux/sched.h.rej + unsigned long long sched_time; /* sched_clock time spent running */ + enum sleep_type sleep_type; + -Index: linux-2.6.27.y/kernel/sched.c -=================================================================== ---- linux-2.6.27.y.orig/kernel/sched.c -+++ linux-2.6.27.y/kernel/sched.c +diff -Nurb linux-2.6.27-590/kernel/sched.c linux-2.6.27-591/kernel/sched.c +--- linux-2.6.27-590/kernel/sched.c 2010-02-01 19:42:07.000000000 -0500 ++++ linux-2.6.27-591/kernel/sched.c 2010-02-01 19:47:30.000000000 -0500 @@ -10,7 +10,7 @@ * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli @@ -377,7 +382,18 @@ Index: linux-2.6.27.y/kernel/sched.c /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], -@@ -4436,6 +4440,29 @@ pick_next_task(struct rq *rq, struct tas +@@ -2368,6 +2372,10 @@ + INIT_HLIST_HEAD(&p->preempt_notifiers); + #endif + ++#ifdef CONFIG_CHOPSTIX ++ p->last_ran_j = jiffies; ++ p->last_interrupted = INTERRUPTIBLE; ++#endif + /* + * We mark the process as running here, but have not actually + * inserted it onto the runqueue yet. This guarantees that +@@ -4428,6 +4436,29 @@ } } @@ -407,7 +423,69 @@ Index: linux-2.6.27.y/kernel/sched.c /* * schedule() is the main scheduler function. */ -@@ -5382,6 +5409,7 @@ long sched_setaffinity(pid_t pid, const +@@ -4482,6 +4513,61 @@ + next = pick_next_task(rq, prev); + + if (likely(prev != next)) { ++ ++#ifdef CONFIG_CHOPSTIX ++ /* Run only if the Chopstix module so decrees it */ ++ if (rec_event) { ++ unsigned long diff; ++ int sampling_reason; ++ prev->last_ran_j = jiffies; ++ if (next->last_interrupted!=INTERRUPTIBLE) { ++ if (next->last_interrupted!=RUNNING) { ++ diff = (jiffies-next->last_interrupted); ++ sampling_reason = 0;/* BLOCKING */ ++ } ++ else { ++ diff = jiffies-next->last_ran_j; ++ sampling_reason = 1;/* PREEMPTION */ ++ } ++ ++ if (diff >= HZ/10) { ++ struct event_spec { ++ unsigned long pc; ++ unsigned long dcookie; ++ unsigned int count; ++ unsigned int reason; ++ }; ++ ++ struct event event; ++ struct event_spec espec; ++ struct pt_regs *regs; ++ regs = task_pt_regs(current); ++ ++ espec.reason = sampling_reason; ++ event.event_data=&espec; ++ event.task=next; ++ espec.pc=regs->ip; ++ event.event_type=2; ++ /* index in the event array currently set up */ ++ /* make sure the counters are loaded in the order we want them to show up*/ ++ (*rec_event)(&event, diff); ++ } ++ } ++ /* next has been elected to run */ ++ next->last_interrupted=0; ++ ++ /* An uninterruptible process just yielded. Record the current jiffy */ ++ if (prev->state & TASK_UNINTERRUPTIBLE) { ++ prev->last_interrupted=jiffies; ++ } ++ /* An interruptible process just yielded, or it got preempted. ++ * Mark it as interruptible */ ++ else if (prev->state & TASK_INTERRUPTIBLE) { ++ prev->last_interrupted=INTERRUPTIBLE; ++ } ++ } ++#endif ++ + sched_info_switch(prev, next); + + rq->nr_switches++; +@@ -5369,6 +5455,7 @@ get_task_struct(p); read_unlock(&tasklist_lock); @@ -415,326 +493,12692 @@ Index: linux-2.6.27.y/kernel/sched.c retval = -EPERM; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) -Index: linux-2.6.27.y/kernel/sched.c.rej -=================================================================== ---- /dev/null -+++ linux-2.6.27.y/kernel/sched.c.rej -@@ -0,0 +1,258 @@ -+*************** -+*** 23,28 **** -+ #include -+ #include -+ #include -+ #include -+ #include -+ #include -+--- 23,29 ---- -+ #include -+ #include -+ #include -++ #include -+ #include -+ #include -+ #include -+*************** -+*** 451,456 **** -+ -+ repeat_lock_task: -+ rq = task_rq(p); -+ spin_lock(&rq->lock); -+ if (unlikely(rq != task_rq(p))) { -+ spin_unlock(&rq->lock); -+--- 455,461 ---- -+ -+ repeat_lock_task: -+ rq = task_rq(p); -++ -+ spin_lock(&rq->lock); -+ if (unlikely(rq != task_rq(p))) { -+ spin_unlock(&rq->lock); -+*************** -+*** 1761,1766 **** -+ * event cannot wake it up and insert it on the runqueue either. -+ */ -+ p->state = TASK_RUNNING; -+ -+ /* -+ * Make sure we do not leak PI boosting priority to the child: -+--- 1766,1786 ---- -+ * event cannot wake it up and insert it on the runqueue either. -+ */ -+ p->state = TASK_RUNNING; -++ #ifdef CONFIG_CHOPSTIX -++ /* The jiffy of last interruption */ -++ if (p->state & TASK_UNINTERRUPTIBLE) { -++ p->last_interrupted=jiffies; -++ } -++ else -++ if (p->state & TASK_INTERRUPTIBLE) { -++ p->last_interrupted=INTERRUPTIBLE; -++ } -++ else -++ p->last_interrupted=RUNNING; -++ -++ /* The jiffy of last execution */ -++ p->last_ran_j=jiffies; -++ #endif -+ -+ /* -+ * Make sure we do not leak PI boosting priority to the child: -+*************** -+*** 3628,3633 **** -+ -+ #endif -+ -+ static inline int interactive_sleep(enum sleep_type sleep_type) -+ { -+ return (sleep_type == SLEEP_INTERACTIVE || -+--- 3648,3654 ---- -+ -+ #endif -+ -++ -+ static inline int interactive_sleep(enum sleep_type sleep_type) -+ { -+ return (sleep_type == SLEEP_INTERACTIVE || -+*************** -+*** 3637,3652 **** -+ /* -+ * schedule() is the main scheduler function. -+ */ -+ asmlinkage void __sched schedule(void) -+ { -+ struct task_struct *prev, *next; -+ struct prio_array *array; -+ struct list_head *queue; -+ unsigned long long now; -+- unsigned long run_time; -+ int cpu, idx, new_prio; -+ long *switch_count; -+ struct rq *rq; -+ -+ /* -+ * Test if we are atomic. Since do_exit() needs to call into -+--- 3658,3685 ---- -+ /* -+ * schedule() is the main scheduler function. -+ */ -++ -++ #ifdef CONFIG_CHOPSTIX -++ extern void (*rec_event)(void *,unsigned int); -++ struct event_spec { -++ unsigned long pc; -++ unsigned long dcookie; -++ unsigned int count; -++ unsigned int reason; -++ }; -++ #endif -++ -+ asmlinkage void __sched schedule(void) -+ { -+ struct task_struct *prev, *next; -+ struct prio_array *array; -+ struct list_head *queue; -+ unsigned long long now; -++ unsigned long run_time, diff; -+ int cpu, idx, new_prio; -+ long *switch_count; -+ struct rq *rq; -++ int sampling_reason; -+ -+ /* -+ * Test if we are atomic. Since do_exit() needs to call into -+*************** -+*** 3700,3705 **** -+ switch_count = &prev->nivcsw; -+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { -+ switch_count = &prev->nvcsw; -+ if (unlikely((prev->state & TASK_INTERRUPTIBLE) && -+ unlikely(signal_pending(prev)))) -+ prev->state = TASK_RUNNING; -+--- 3733,3739 ---- -+ switch_count = &prev->nivcsw; -+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { -+ switch_count = &prev->nvcsw; -++ -+ if (unlikely((prev->state & TASK_INTERRUPTIBLE) && -+ unlikely(signal_pending(prev)))) -+ prev->state = TASK_RUNNING; -+*************** -+*** 3709,3714 **** -+ vx_uninterruptible_inc(prev); -+ } -+ deactivate_task(prev, rq); -+ } -+ } -+ -+--- 3743,3759 ---- -+ vx_uninterruptible_inc(prev); -+ } -+ deactivate_task(prev, rq); -++ #ifdef CONFIG_CHOPSTIX -++ /* An uninterruptible process just yielded. Record the current jiffie */ -++ if (prev->state & TASK_UNINTERRUPTIBLE) { -++ prev->last_interrupted=jiffies; -++ } -++ /* An interruptible process just yielded, or it got preempted. -++ * Mark it as interruptible */ -++ else if (prev->state & TASK_INTERRUPTIBLE) { -++ prev->last_interrupted=INTERRUPTIBLE; -++ } -++ #endif -+ } -+ } -+ -+*************** -+*** 3785,3790 **** -+ prev->sleep_avg = 0; -+ prev->timestamp = prev->last_ran = now; -+ -+ sched_info_switch(prev, next); -+ if (likely(prev != next)) { -+ next->timestamp = next->last_ran = now; -+--- 3830,3869 ---- -+ prev->sleep_avg = 0; -+ prev->timestamp = prev->last_ran = now; -+ -++ #ifdef CONFIG_CHOPSTIX -++ /* Run only if the Chopstix module so decrees it */ -++ if (rec_event) { -++ prev->last_ran_j = jiffies; -++ if (next->last_interrupted!=INTERRUPTIBLE) { -++ if (next->last_interrupted!=RUNNING) { -++ diff = (jiffies-next->last_interrupted); -++ sampling_reason = 0;/* BLOCKING */ -++ } -++ else { -++ diff = jiffies-next->last_ran_j; -++ sampling_reason = 1;/* PREEMPTION */ -++ } -++ -++ if (diff >= HZ/10) { -++ struct event event; -++ struct event_spec espec; -++ struct pt_regs *regs; -++ regs = task_pt_regs(current); -++ -++ espec.reason = sampling_reason; -++ event.event_data=&espec; -++ event.task=next; -++ espec.pc=regs->eip; -++ event.event_type=2; -++ /* index in the event array currently set up */ -++ /* make sure the counters are loaded in the order we want them to show up*/ -++ (*rec_event)(&event, diff); -++ } -++ } -++ /* next has been elected to run */ -++ next->last_interrupted=0; -++ } -++ #endif -+ sched_info_switch(prev, next); -+ if (likely(prev != next)) { -+ next->timestamp = next->last_ran = now; -+*************** -+*** 5737,5742 **** -+ jiffies_to_timespec(p->policy == SCHED_FIFO ? -+ 0 : task_timeslice(p), &t); -+ read_unlock(&tasklist_lock); -+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; -+ out_nounlock: -+ return retval; -+--- 5817,5823 ---- -+ jiffies_to_timespec(p->policy == SCHED_FIFO ? -+ 0 : task_timeslice(p), &t); -+ read_unlock(&tasklist_lock); -++ -+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; -+ out_nounlock: -+ return retval; -+*************** -+*** 7980,7982 **** -+ } -+ -+ #endif -+--- 8061,8080 ---- -+ } -+ -+ #endif -++ -++ #ifdef CONFIG_CHOPSTIX -++ void (*rec_event)(void *,unsigned int) = NULL; -++ -++ /* To support safe calling from asm */ -++ asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) { -++ struct pt_regs *regs; -++ struct event_spec *es = event_signature_in->event_data; -++ regs = task_pt_regs(current); -++ event_signature_in->task=current; -++ es->pc=regs->eip; -++ event_signature_in->count=1; -++ (*rec_event)(event_signature_in, count); -++ } -++ EXPORT_SYMBOL(rec_event); -++ EXPORT_SYMBOL(in_sched_functions); -++ #endif -Index: linux-2.6.27.y/mm/memory.c -=================================================================== ---- linux-2.6.27.y.orig/mm/memory.c -+++ linux-2.6.27.y/mm/memory.c -@@ -61,6 +61,7 @@ - - #include - #include +diff -Nurb linux-2.6.27-590/kernel/sched.c.orig linux-2.6.27-591/kernel/sched.c.orig +--- linux-2.6.27-590/kernel/sched.c.orig 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.27-591/kernel/sched.c.orig 2010-02-01 19:43:07.000000000 -0500 +@@ -0,0 +1,9326 @@ ++/* ++ * kernel/sched.c ++ * ++ * Kernel scheduler and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and ++ * make semaphores SMP safe ++ * 1998-11-19 Implemented schedule_timeout() and related stuff ++ * by Andrea Arcangeli ++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: ++ * hybrid priority-list and round-robin deventn with ++ * an array-switch method of distributing timeslices ++ * and per-CPU runqueues. Cleanups and useful suggestions ++ * by Davide Libenzi, preemptible kernel bits by Robert Love. ++ * 2003-09-03 Interactivity tuning by Con Kolivas. ++ * 2004-04-02 Scheduler domains code by Nick Piggin ++ * 2007-04-15 Work begun on replacing all interactivity tuning with a ++ * fair scheduling design by Con Kolivas. ++ * 2007-05-05 Load balancing (smp-nice) and other improvements ++ * by Peter Williams ++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith ++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri ++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, ++ * Thomas Gleixner, Mike Kravetz ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include +#include - - #include "internal.h" - -@@ -2753,6 +2754,15 @@ out: - return ret; - } - -+extern void (*rec_event)(void *,unsigned int); -+struct event_spec { -+ unsigned long pc; -+ unsigned long dcookie; -+ unsigned count; -+ unsigned char reason; ++ ++#include ++#include ++ ++#include "sched_cpupri.h" ++ ++#define INTERRUPTIBLE -1 ++#define RUNNING 0 ++ ++/* ++ * Convert user-nice values [ -20 ... 0 ... 19 ] ++ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], ++ * and back. ++ */ ++#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) ++#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) ++#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) ++ ++/* ++ * 'User priority' is the nice value converted to something we ++ * can work with better when scaling various scheduler parameters, ++ * it's a [ 0 ... 39 ] range. ++ */ ++#define USER_PRIO(p) ((p)-MAX_RT_PRIO) ++#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) ++#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) ++ ++/* ++ * Helpers for converting nanosecond timing to jiffy resolution ++ */ ++#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) ++ ++#define NICE_0_LOAD SCHED_LOAD_SCALE ++#define NICE_0_SHIFT SCHED_LOAD_SHIFT ++ ++/* ++ * These are the 'tuning knobs' of the scheduler: ++ * ++ * default timeslice is 100 msecs (used only for SCHED_RR tasks). ++ * Timeslices get refilled after they expire. ++ */ ++#define DEF_TIMESLICE (100 * HZ / 1000) ++ ++/* ++ * single value that denotes runtime == period, ie unlimited time. ++ */ ++#define RUNTIME_INF ((u64)~0ULL) ++ ++#ifdef CONFIG_SMP ++/* ++ * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) ++ * Since cpu_power is a 'constant', we can use a reciprocal divide. ++ */ ++static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load) ++{ ++ return reciprocal_divide(load, sg->reciprocal_cpu_power); ++} ++ ++/* ++ * Each time a sched group cpu_power is changed, ++ * we must compute its reciprocal value ++ */ ++static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) ++{ ++ sg->__cpu_power += val; ++ sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power); ++} ++#endif ++ ++static inline int rt_policy(int policy) ++{ ++ if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) ++ return 1; ++ return 0; ++} ++ ++static inline int task_has_rt_policy(struct task_struct *p) ++{ ++ return rt_policy(p->policy); ++} ++ ++/* ++ * This is the priority-queue data structure of the RT scheduling class: ++ */ ++struct rt_prio_array { ++ DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ ++ struct list_head queue[MAX_RT_PRIO]; +}; + ++struct rt_bandwidth { ++ /* nests inside the rq lock: */ ++ spinlock_t rt_runtime_lock; ++ ktime_t rt_period; ++ u64 rt_runtime; ++ struct hrtimer rt_period_timer; ++}; + - /* - * By the time we get here, we already hold the mm semaphore - */ -@@ -2782,6 +2792,24 @@ int handle_mm_fault(struct mm_struct *mm - if (!pte) - return VM_FAULT_OOM; - -+#ifdef CONFIG_CHOPSTIX -+ if (rec_event) { -+ struct event event; -+ struct event_spec espec; -+ struct pt_regs *regs; -+ unsigned int pc; -+ regs = task_pt_regs(current); -+ pc = regs->ip & (unsigned int) ~4095; ++static struct rt_bandwidth def_rt_bandwidth; + -+ espec.reason = 0; /* alloc */ -+ event.event_data=&espec; -+ event.task = current; -+ espec.pc=pc; -+ event.event_type=5; -+ (*rec_event)(&event, 1); ++static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); ++ ++static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) ++{ ++ struct rt_bandwidth *rt_b = ++ container_of(timer, struct rt_bandwidth, rt_period_timer); ++ ktime_t now; ++ int overrun; ++ int idle = 0; ++ ++ for (;;) { ++ now = hrtimer_cb_get_time(timer); ++ overrun = hrtimer_forward(timer, now, rt_b->rt_period); ++ ++ if (!overrun) ++ break; ++ ++ idle = do_sched_rt_period_timer(rt_b, overrun); ++ } ++ ++ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; ++} ++ ++static ++void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) ++{ ++ rt_b->rt_period = ns_to_ktime(period); ++ rt_b->rt_runtime = runtime; ++ ++ spin_lock_init(&rt_b->rt_runtime_lock); ++ ++ hrtimer_init(&rt_b->rt_period_timer, ++ CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rt_b->rt_period_timer.function = sched_rt_period_timer; ++ rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; ++} ++ ++static void start_rt_bandwidth(struct rt_bandwidth *rt_b) ++{ ++ ktime_t now; ++ ++ if (rt_b->rt_runtime == RUNTIME_INF) ++ return; ++ ++ if (hrtimer_active(&rt_b->rt_period_timer)) ++ return; ++ ++ spin_lock(&rt_b->rt_runtime_lock); ++ for (;;) { ++ if (hrtimer_active(&rt_b->rt_period_timer)) ++ break; ++ ++ now = hrtimer_cb_get_time(&rt_b->rt_period_timer); ++ hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); ++ hrtimer_start(&rt_b->rt_period_timer, ++ rt_b->rt_period_timer.expires, ++ HRTIMER_MODE_ABS); + } ++ spin_unlock(&rt_b->rt_runtime_lock); ++} ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) ++{ ++ hrtimer_cancel(&rt_b->rt_period_timer); ++} +#endif + - return handle_pte_fault(mm, vma, address, pte, pmd, write_access); - } - -Index: linux-2.6.27.y/mm/slab.c -=================================================================== ---- linux-2.6.27.y.orig/mm/slab.c -+++ linux-2.6.27.y/mm/slab.c ++/* ++ * sched_domains_mutex serializes calls to arch_init_sched_domains, ++ * detach_destroy_domains and partition_sched_domains. ++ */ ++static DEFINE_MUTEX(sched_domains_mutex); ++ ++#ifdef CONFIG_GROUP_SCHED ++ ++#include ++ ++struct cfs_rq; ++ ++static LIST_HEAD(task_groups); ++ ++/* task group related information */ ++struct task_group { ++#ifdef CONFIG_CGROUP_SCHED ++ struct cgroup_subsys_state css; ++#endif ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ /* schedulable entities of this group on each cpu */ ++ struct sched_entity **se; ++ /* runqueue "owned" by this group on each cpu */ ++ struct cfs_rq **cfs_rq; ++ unsigned long shares; ++#endif ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++ struct sched_rt_entity **rt_se; ++ struct rt_rq **rt_rq; ++ ++ struct rt_bandwidth rt_bandwidth; ++#endif ++ ++ struct rcu_head rcu; ++ struct list_head list; ++ ++ struct task_group *parent; ++ struct list_head siblings; ++ struct list_head children; ++}; ++ ++#ifdef CONFIG_USER_SCHED ++ ++/* ++ * Root task group. ++ * Every UID task group (including init_task_group aka UID-0) will ++ * be a child to this group. ++ */ ++struct task_group root_task_group; ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++/* Default task group's sched entity on each cpu */ ++static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); ++/* Default task group's cfs_rq on each cpu */ ++static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; ++#endif /* CONFIG_FAIR_GROUP_SCHED */ ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); ++static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; ++#endif /* CONFIG_RT_GROUP_SCHED */ ++#else /* !CONFIG_FAIR_GROUP_SCHED */ ++#define root_task_group init_task_group ++#endif /* CONFIG_FAIR_GROUP_SCHED */ ++ ++/* task_group_lock serializes add/remove of task groups and also changes to ++ * a task group's cpu shares. ++ */ ++static DEFINE_SPINLOCK(task_group_lock); ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++#ifdef CONFIG_USER_SCHED ++# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) ++#else /* !CONFIG_USER_SCHED */ ++# define INIT_TASK_GROUP_LOAD NICE_0_LOAD ++#endif /* CONFIG_USER_SCHED */ ++ ++/* ++ * A weight of 0 or 1 can cause arithmetics problems. ++ * A weight of a cfs_rq is the sum of weights of which entities ++ * are queued on this cfs_rq, so a weight of a entity should not be ++ * too large, so as the shares value of a task group. ++ * (The default weight is 1024 - so there's no practical ++ * limitation from this.) ++ */ ++#define MIN_SHARES 2 ++#define MAX_SHARES (1UL << 18) ++ ++static int init_task_group_load = INIT_TASK_GROUP_LOAD; ++#endif ++ ++/* Default task group. ++ * Every task in system belong to this group at bootup. ++ */ ++struct task_group init_task_group; ++ ++/* return group to which a task belongs */ ++static inline struct task_group *task_group(struct task_struct *p) ++{ ++ struct task_group *tg; ++ ++#ifdef CONFIG_USER_SCHED ++ tg = p->user->tg; ++#elif defined(CONFIG_CGROUP_SCHED) ++ tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), ++ struct task_group, css); ++#else ++ tg = &init_task_group; ++#endif ++ return tg; ++} ++ ++/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ ++static inline void set_task_rq(struct task_struct *p, unsigned int cpu) ++{ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; ++ p->se.parent = task_group(p)->se[cpu]; ++#endif ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++ p->rt.rt_rq = task_group(p)->rt_rq[cpu]; ++ p->rt.parent = task_group(p)->rt_se[cpu]; ++#endif ++} ++ ++#else ++ ++static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } ++static inline struct task_group *task_group(struct task_struct *p) ++{ ++ return NULL; ++} ++ ++#endif /* CONFIG_GROUP_SCHED */ ++ ++/* CFS-related fields in a runqueue */ ++struct cfs_rq { ++ struct load_weight load; ++ unsigned long nr_running; ++ ++ u64 exec_clock; ++ u64 min_vruntime; ++ u64 pair_start; ++ ++ struct rb_root tasks_timeline; ++ struct rb_node *rb_leftmost; ++ ++ struct list_head tasks; ++ struct list_head *balance_iterator; ++ ++ /* ++ * 'curr' points to currently running entity on this cfs_rq. ++ * It is set to NULL otherwise (i.e when none are currently running). ++ */ ++ struct sched_entity *curr, *next; ++ ++ unsigned long nr_spread_over; ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ ++ ++ /* ++ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in ++ * a hierarchy). Non-leaf lrqs hold other higher schedulable entities ++ * (like users, containers etc.) ++ * ++ * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This ++ * list is used during load balance. ++ */ ++ struct list_head leaf_cfs_rq_list; ++ struct task_group *tg; /* group that "owns" this runqueue */ ++ ++#ifdef CONFIG_SMP ++ /* ++ * the part of load.weight contributed by tasks ++ */ ++ unsigned long task_weight; ++ ++ /* ++ * h_load = weight * f(tg) ++ * ++ * Where f(tg) is the recursive weight fraction assigned to ++ * this group. ++ */ ++ unsigned long h_load; ++ ++ /* ++ * this cpu's part of tg->shares ++ */ ++ unsigned long shares; ++ ++ /* ++ * load.weight at the time we set shares ++ */ ++ unsigned long rq_weight; ++#endif ++#endif ++}; ++ ++/* Real-Time classes' related field in a runqueue: */ ++struct rt_rq { ++ struct rt_prio_array active; ++ unsigned long rt_nr_running; ++#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED ++ int highest_prio; /* highest queued rt task prio */ ++#endif ++#ifdef CONFIG_SMP ++ unsigned long rt_nr_migratory; ++ int overloaded; ++#endif ++ int rt_throttled; ++ u64 rt_time; ++ u64 rt_runtime; ++ /* Nests inside the rq lock: */ ++ spinlock_t rt_runtime_lock; ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++ unsigned long rt_nr_boosted; ++ ++ struct rq *rq; ++ struct list_head leaf_rt_rq_list; ++ struct task_group *tg; ++ struct sched_rt_entity *rt_se; ++#endif ++}; ++ ++#ifdef CONFIG_SMP ++ ++/* ++ * We add the notion of a root-domain which will be used to define per-domain ++ * variables. Each exclusive cpuset essentially defines an island domain by ++ * fully partitioning the member cpus from any other cpuset. Whenever a new ++ * exclusive cpuset is created, we also create and attach a new root-domain ++ * object. ++ * ++ */ ++struct root_domain { ++ atomic_t refcount; ++ cpumask_t span; ++ cpumask_t online; ++ ++ /* ++ * The "RT overload" flag: it gets set if a CPU has more than ++ * one runnable RT task. ++ */ ++ cpumask_t rto_mask; ++ atomic_t rto_count; ++#ifdef CONFIG_SMP ++ struct cpupri cpupri; ++#endif ++}; ++ ++/* ++ * By default the system creates a single root-domain with all cpus as ++ * members (mimicking the global state we have today). ++ */ ++static struct root_domain def_root_domain; ++ ++#endif ++ unsigned long norm_time; ++ unsigned long idle_time; ++#ifdef CONFIG_VSERVER_IDLETIME ++ int idle_skip; ++#endif ++#ifdef CONFIG_VSERVER_HARDCPU ++ struct list_head hold_queue; ++ unsigned long nr_onhold; ++ int idle_tokens; ++#endif ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * ++ * Locking rule: those places that want to lock multiple runqueues ++ * (such as the load balancing or the thread migration code), lock ++ * acquire operations must be ordered by ascending &runqueue. ++ */ ++struct rq { ++ /* runqueue lock: */ ++ spinlock_t lock; ++ ++ /* ++ * nr_running and cpu_load should be in the same cacheline because ++ * remote CPUs use both these fields when doing load calculation. ++ */ ++ unsigned long nr_running; ++ #define CPU_LOAD_IDX_MAX 5 ++ unsigned long cpu_load[CPU_LOAD_IDX_MAX]; ++ unsigned char idle_at_tick; ++#ifdef CONFIG_NO_HZ ++ unsigned long last_tick_seen; ++ unsigned char in_nohz_recently; ++#endif ++ /* capture load from *all* tasks on this cpu: */ ++ struct load_weight load; ++ unsigned long nr_load_updates; ++ u64 nr_switches; ++ ++ struct cfs_rq cfs; ++ struct rt_rq rt; ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ /* list of leaf cfs_rq on this cpu: */ ++ struct list_head leaf_cfs_rq_list; ++#endif ++#ifdef CONFIG_RT_GROUP_SCHED ++ struct list_head leaf_rt_rq_list; ++#endif ++ ++ /* ++ * This is part of a global counter where only the total sum ++ * over all CPUs matters. A task can increase this counter on ++ * one CPU and if it got migrated afterwards it may decrease ++ * it on another CPU. Always updated under the runqueue lock: ++ */ ++ unsigned long nr_uninterruptible; ++ ++ struct task_struct *curr, *idle; ++ unsigned long next_balance; ++ struct mm_struct *prev_mm; ++ ++ u64 clock; ++ ++ atomic_t nr_iowait; ++ ++#ifdef CONFIG_SMP ++ struct root_domain *rd; ++ struct sched_domain *sd; ++ ++ /* For active balancing */ ++ int active_balance; ++ int push_cpu; ++ /* cpu of this runqueue: */ ++ int cpu; ++ int online; ++ ++ unsigned long avg_load_per_task; ++ ++ struct task_struct *migration_thread; ++ struct list_head migration_queue; ++#endif ++ ++#ifdef CONFIG_SCHED_HRTICK ++#ifdef CONFIG_SMP ++ int hrtick_csd_pending; ++ struct call_single_data hrtick_csd; ++#endif ++ struct hrtimer hrtick_timer; ++#endif ++ ++#ifdef CONFIG_SCHEDSTATS ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_exp_empty; ++ unsigned int yld_act_empty; ++ unsigned int yld_both_empty; ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++ ++ /* BKL stats */ ++ unsigned int bkl_count; ++#endif ++}; ++ ++static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++ ++static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) ++{ ++ rq->curr->sched_class->check_preempt_curr(rq, p); ++} ++ ++static inline int cpu_of(struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ return rq->cpu; ++#else ++ return 0; ++#endif ++} ++ ++/* ++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ++ * See detach_destroy_domains: synchronize_sched for details. ++ * ++ * The domain tree of any CPU may only be accessed from within ++ * preempt-disabled sections. ++ */ ++#define for_each_domain(cpu, __sd) \ ++ for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) ++ ++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) ++#define this_rq() (&__get_cpu_var(runqueues)) ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ rq->clock = sched_clock_cpu(cpu_of(rq)); ++} ++ ++/* ++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off: ++ */ ++#ifdef CONFIG_SCHED_DEBUG ++# define const_debug __read_mostly ++#else ++# define const_debug static const ++#endif ++ ++/** ++ * runqueue_is_locked ++ * ++ * Returns true if the current cpu runqueue is locked. ++ * This interface allows printk to be called with the runqueue lock ++ * held and know whether or not it is OK to wake up the klogd. ++ */ ++int runqueue_is_locked(void) ++{ ++ int cpu = get_cpu(); ++ struct rq *rq = cpu_rq(cpu); ++ int ret; ++ ++ ret = spin_is_locked(&rq->lock); ++ put_cpu(); ++ return ret; ++} ++ ++/* ++ * Debugging: various feature bits ++ */ ++ ++#define SCHED_FEAT(name, enabled) \ ++ __SCHED_FEAT_##name , ++ ++enum { ++#include "sched_features.h" ++}; ++ ++#undef SCHED_FEAT ++ ++#define SCHED_FEAT(name, enabled) \ ++ (1UL << __SCHED_FEAT_##name) * enabled | ++ ++const_debug unsigned int sysctl_sched_features = ++#include "sched_features.h" ++ 0; ++ ++#undef SCHED_FEAT ++ ++#ifdef CONFIG_SCHED_DEBUG ++#define SCHED_FEAT(name, enabled) \ ++ #name , ++ ++static __read_mostly char *sched_feat_names[] = { ++#include "sched_features.h" ++ NULL ++}; ++ ++#undef SCHED_FEAT ++ ++static int sched_feat_open(struct inode *inode, struct file *filp) ++{ ++ filp->private_data = inode->i_private; ++ return 0; ++} ++ ++static ssize_t ++sched_feat_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char *buf; ++ int r = 0; ++ int len = 0; ++ int i; ++ ++ for (i = 0; sched_feat_names[i]; i++) { ++ len += strlen(sched_feat_names[i]); ++ len += 4; ++ } ++ ++ buf = kmalloc(len + 2, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ for (i = 0; sched_feat_names[i]; i++) { ++ if (sysctl_sched_features & (1UL << i)) ++ r += sprintf(buf + r, "%s ", sched_feat_names[i]); ++ else ++ r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); ++ } ++ ++ r += sprintf(buf + r, "\n"); ++ WARN_ON(r >= len + 2); ++ ++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++ ++ kfree(buf); ++ ++ return r; ++} ++ ++static ssize_t ++sched_feat_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ char *cmp = buf; ++ int neg = 0; ++ int i; ++ ++ if (cnt > 63) ++ cnt = 63; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = 0; ++ ++ if (strncmp(buf, "NO_", 3) == 0) { ++ neg = 1; ++ cmp += 3; ++ } ++ ++ for (i = 0; sched_feat_names[i]; i++) { ++ int len = strlen(sched_feat_names[i]); ++ ++ if (strncmp(cmp, sched_feat_names[i], len) == 0) { ++ if (neg) ++ sysctl_sched_features &= ~(1UL << i); ++ else ++ sysctl_sched_features |= (1UL << i); ++ break; ++ } ++ } ++ ++ if (!sched_feat_names[i]) ++ return -EINVAL; ++ ++ filp->f_pos += cnt; ++ ++ return cnt; ++} ++ ++static struct file_operations sched_feat_fops = { ++ .open = sched_feat_open, ++ .read = sched_feat_read, ++ .write = sched_feat_write, ++}; ++ ++static __init int sched_init_debug(void) ++{ ++ debugfs_create_file("sched_features", 0644, NULL, NULL, ++ &sched_feat_fops); ++ ++ return 0; ++} ++late_initcall(sched_init_debug); ++ ++#endif ++ ++#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) ++ ++/* ++ * Number of tasks to iterate in a single balance run. ++ * Limited because this is done with IRQs disabled. ++ */ ++const_debug unsigned int sysctl_sched_nr_migrate = 32; ++ ++/* ++ * ratelimit for updating the group shares. ++ * default: 0.25ms ++ */ ++unsigned int sysctl_sched_shares_ratelimit = 250000; ++ ++/* ++ * period over which we measure -rt task cpu usage in us. ++ * default: 1s ++ */ ++unsigned int sysctl_sched_rt_period = 1000000; ++ ++static __read_mostly int scheduler_running; ++ ++/* ++ * part of the period that we allow rt tasks to run in us. ++ * default: 0.95s ++ */ ++int sysctl_sched_rt_runtime = 950000; ++ ++static inline u64 global_rt_period(void) ++{ ++ return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; ++} ++ ++static inline u64 global_rt_runtime(void) ++{ ++ if (sysctl_sched_rt_runtime < 0) ++ return RUNTIME_INF; ++ ++ return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; ++} ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_switch ++# define finish_arch_switch(prev) do { } while (0) ++#endif ++ ++static inline int task_current(struct rq *rq, struct task_struct *p) ++{ ++ return rq->curr == p; ++} ++ ++#ifndef __ARCH_WANT_UNLOCKED_CTXSW ++static inline int task_running(struct rq *rq, struct task_struct *p) ++{ ++ return task_current(rq, p); ++} ++ ++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++} ++ ++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ++{ ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ rq->lock.owner = current; ++#endif ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); ++ ++ spin_unlock_irq(&rq->lock); ++} ++ ++#else /* __ARCH_WANT_UNLOCKED_CTXSW */ ++static inline int task_running(struct rq *rq, struct task_struct *p) ++{ ++#ifdef CONFIG_SMP ++ return p->oncpu; ++#else ++ return task_current(rq, p); ++#endif ++} ++ ++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * We can optimise this out completely for !SMP, because the ++ * SMP rebalancing from interrupt is the only thing that cares ++ * here. ++ */ ++ next->oncpu = 1; ++#endif ++#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW ++ spin_unlock_irq(&rq->lock); ++#else ++ spin_unlock(&rq->lock); ++#endif ++} ++ ++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * After ->oncpu is cleared, the task can be moved to a different CPU. ++ * We must ensure this doesn't happen until the switch is completely ++ * finished. ++ */ ++ smp_wmb(); ++ prev->oncpu = 0; ++#endif ++#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW ++ local_irq_enable(); ++#endif ++} ++#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ ++ ++/* ++ * __task_rq_lock - lock the runqueue a given task resides on. ++ * Must be called interrupts disabled. ++ */ ++static inline struct rq *__task_rq_lock(struct task_struct *p) ++ __acquires(rq->lock) ++{ ++ for (;;) { ++ struct rq *rq = task_rq(p); ++ spin_lock(&rq->lock); ++ if (likely(rq == task_rq(p))) ++ return rq; ++ spin_unlock(&rq->lock); ++ } ++} ++ ++/* ++ * task_rq_lock - lock the runqueue a given task resides on and disable ++ * interrupts. Note the ordering: we can safely lookup the task_rq without ++ * explicitly disabling preemption. ++ */ ++static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ for (;;) { ++ local_irq_save(*flags); ++ rq = task_rq(p); ++ spin_lock(&rq->lock); ++ if (likely(rq == task_rq(p))) ++ return rq; ++ spin_unlock_irqrestore(&rq->lock, *flags); ++ } ++} ++ ++static void __task_rq_unlock(struct rq *rq) ++ __releases(rq->lock) ++{ ++ spin_unlock(&rq->lock); ++} ++ ++static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) ++ __releases(rq->lock) ++{ ++ spin_unlock_irqrestore(&rq->lock, *flags); ++} ++ ++/* ++ * this_rq_lock - lock this runqueue and disable interrupts. ++ */ ++static struct rq *this_rq_lock(void) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ spin_lock(&rq->lock); ++ ++ return rq; ++} ++ ++#ifdef CONFIG_SCHED_HRTICK ++/* ++ * Use HR-timers to deliver accurate preemption points. ++ * ++ * Its all a bit involved since we cannot program an hrt while holding the ++ * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a ++ * reschedule event. ++ * ++ * When we get rescheduled we reprogram the hrtick_timer outside of the ++ * rq->lock. ++ */ ++ ++/* ++ * Use hrtick when: ++ * - enabled by features ++ * - hrtimer is actually high res ++ */ ++static inline int hrtick_enabled(struct rq *rq) ++{ ++ if (!sched_feat(HRTICK)) ++ return 0; ++ if (!cpu_active(cpu_of(rq))) ++ return 0; ++ return hrtimer_is_hres_active(&rq->hrtick_timer); ++} ++ ++static void hrtick_clear(struct rq *rq) ++{ ++ if (hrtimer_active(&rq->hrtick_timer)) ++ hrtimer_cancel(&rq->hrtick_timer); ++} ++ ++/* ++ * High-resolution timer tick. ++ * Runs from hardirq context with interrupts disabled. ++ */ ++static enum hrtimer_restart hrtick(struct hrtimer *timer) ++{ ++ struct rq *rq = container_of(timer, struct rq, hrtick_timer); ++ ++ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); ++ ++ spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ rq->curr->sched_class->task_tick(rq, rq->curr, 1); ++ spin_unlock(&rq->lock); ++ ++ return HRTIMER_NORESTART; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * called from hardirq (IPI) context ++ */ ++static void __hrtick_start(void *arg) ++{ ++ struct rq *rq = arg; ++ ++ spin_lock(&rq->lock); ++ hrtimer_restart(&rq->hrtick_timer); ++ rq->hrtick_csd_pending = 0; ++ spin_unlock(&rq->lock); ++} ++ ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++static void hrtick_start(struct rq *rq, u64 delay) ++{ ++ struct hrtimer *timer = &rq->hrtick_timer; ++ ktime_t time = ktime_add_ns(timer->base->get_time(), delay); ++ ++ timer->expires = time; ++ ++ if (rq == this_rq()) { ++ hrtimer_restart(timer); ++ } else if (!rq->hrtick_csd_pending) { ++ __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); ++ rq->hrtick_csd_pending = 1; ++ } ++} ++ ++static int ++hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) ++{ ++ int cpu = (int)(long)hcpu; ++ ++ switch (action) { ++ case CPU_UP_CANCELED: ++ case CPU_UP_CANCELED_FROZEN: ++ case CPU_DOWN_PREPARE: ++ case CPU_DOWN_PREPARE_FROZEN: ++ case CPU_DEAD: ++ case CPU_DEAD_FROZEN: ++ hrtick_clear(cpu_rq(cpu)); ++ return NOTIFY_OK; ++ } ++ ++ return NOTIFY_DONE; ++} ++ ++static __init void init_hrtick(void) ++{ ++ hotcpu_notifier(hotplug_hrtick, 0); ++} ++#else ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++static void hrtick_start(struct rq *rq, u64 delay) ++{ ++ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); ++} ++ ++static void init_hrtick(void) ++{ ++} ++#endif /* CONFIG_SMP */ ++ ++static void init_rq_hrtick(struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ rq->hrtick_csd_pending = 0; ++ ++ rq->hrtick_csd.flags = 0; ++ rq->hrtick_csd.func = __hrtick_start; ++ rq->hrtick_csd.info = rq; ++#endif ++ ++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rq->hrtick_timer.function = hrtick; ++ rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; ++} ++#else ++static inline void hrtick_clear(struct rq *rq) ++{ ++} ++ ++static inline void init_rq_hrtick(struct rq *rq) ++{ ++} ++ ++static inline void init_hrtick(void) ++{ ++} ++#endif ++ ++/* ++ * resched_task - mark a task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++#ifdef CONFIG_SMP ++ ++#ifndef tsk_is_polling ++#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) ++#endif ++ ++static void resched_task(struct task_struct *p) ++{ ++ int cpu; ++ ++ assert_spin_locked(&task_rq(p)->lock); ++ ++ if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) ++ return; ++ ++ set_tsk_thread_flag(p, TIF_NEED_RESCHED); ++ ++ cpu = task_cpu(p); ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* NEED_RESCHED must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(p)) ++ smp_send_reschedule(cpu); ++} ++ ++static void resched_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ if (!spin_trylock_irqsave(&rq->lock, flags)) ++ return; ++ resched_task(cpu_curr(cpu)); ++ spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++#ifdef CONFIG_NO_HZ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++void wake_up_idle_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* ++ * This is safe, as this function is called with the timer ++ * wheel base lock of (cpu) held. When the CPU is on the way ++ * to idle and has not yet set rq->curr to idle then it will ++ * be serialized on the timer wheel base lock and take the new ++ * timer into account automatically. ++ */ ++ if (rq->curr != rq->idle) ++ return; ++ ++ /* ++ * We can set TIF_RESCHED on the idle task of the other CPU ++ * lockless. The worst case is that the other CPU runs the ++ * idle task through an additional NOOP schedule() ++ */ ++ set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); ++ ++ /* NEED_RESCHED must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(rq->idle)) ++ smp_send_reschedule(cpu); ++} ++#endif /* CONFIG_NO_HZ */ ++ ++#else /* !CONFIG_SMP */ ++static void resched_task(struct task_struct *p) ++{ ++ assert_spin_locked(&task_rq(p)->lock); ++ set_tsk_need_resched(p); ++} ++#endif /* CONFIG_SMP */ ++ ++#if BITS_PER_LONG == 32 ++# define WMULT_CONST (~0UL) ++#else ++# define WMULT_CONST (1UL << 32) ++#endif ++ ++#define WMULT_SHIFT 32 ++ ++/* ++ * Shift right and round: ++ */ ++#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) ++ ++/* ++ * delta *= weight / lw ++ */ ++static unsigned long ++calc_delta_mine(unsigned long delta_exec, unsigned long weight, ++ struct load_weight *lw) ++{ ++ u64 tmp; ++ ++ if (!lw->inv_weight) { ++ if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) ++ lw->inv_weight = 1; ++ else ++ lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) ++ / (lw->weight+1); ++ } ++ ++ tmp = (u64)delta_exec * weight; ++ /* ++ * Check whether we'd overflow the 64-bit multiplication: ++ */ ++ if (unlikely(tmp > WMULT_CONST)) ++ tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, ++ WMULT_SHIFT/2); ++ else ++ tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); ++ ++ return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); ++} ++ ++static inline void update_load_add(struct load_weight *lw, unsigned long inc) ++{ ++ lw->weight += inc; ++ lw->inv_weight = 0; ++} ++ ++static inline void update_load_sub(struct load_weight *lw, unsigned long dec) ++{ ++ lw->weight -= dec; ++ lw->inv_weight = 0; ++} ++ ++/* ++ * To aid in avoiding the subversion of "niceness" due to uneven distribution ++ * of tasks with abnormal "nice" values across CPUs the contribution that ++ * each task makes to its run queue's load is weighted according to its ++ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a ++ * scaled version of the new time slice allocation that they receive on time ++ * slice expiry etc. ++ */ ++ ++#define WEIGHT_IDLEPRIO 2 ++#define WMULT_IDLEPRIO (1 << 31) ++ ++/* ++ * Nice levels are multiplicative, with a gentle 10% change for every ++ * nice level changed. I.e. when a CPU-bound task goes from nice 0 to ++ * nice 1, it will get ~10% less CPU time than another CPU-bound task ++ * that remained on nice 0. ++ * ++ * The "10% effect" is relative and cumulative: from _any_ nice level, ++ * if you go up 1 level, it's -10% CPU usage, if you go down 1 level ++ * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. ++ * If a task goes up by ~10% and another task goes down by ~10% then ++ * the relative distance between them is ~25%.) ++ */ ++static const int prio_to_weight[40] = { ++ /* -20 */ 88761, 71755, 56483, 46273, 36291, ++ /* -15 */ 29154, 23254, 18705, 14949, 11916, ++ /* -10 */ 9548, 7620, 6100, 4904, 3906, ++ /* -5 */ 3121, 2501, 1991, 1586, 1277, ++ /* 0 */ 1024, 820, 655, 526, 423, ++ /* 5 */ 335, 272, 215, 172, 137, ++ /* 10 */ 110, 87, 70, 56, 45, ++ /* 15 */ 36, 29, 23, 18, 15, ++}; ++ ++/* ++ * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. ++ * ++ * In cases where the weight does not change often, we can use the ++ * precalculated inverse to speed up arithmetics by turning divisions ++ * into multiplications: ++ */ ++static const u32 prio_to_wmult[40] = { ++ /* -20 */ 48388, 59856, 76040, 92818, 118348, ++ /* -15 */ 147320, 184698, 229616, 287308, 360437, ++ /* -10 */ 449829, 563644, 704093, 875809, 1099582, ++ /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, ++ /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, ++ /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, ++ /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, ++ /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, ++}; ++ ++static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); ++ ++/* ++ * runqueue iterator, to support SMP load-balancing between different ++ * scheduling classes, without having to expose their internal data ++ * structures to the load-balancing proper: ++ */ ++struct rq_iterator { ++ void *arg; ++ struct task_struct *(*start)(void *); ++ struct task_struct *(*next)(void *); ++}; ++ ++#ifdef CONFIG_SMP ++static unsigned long ++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, ++ unsigned long max_load_move, struct sched_domain *sd, ++ enum cpu_idle_type idle, int *all_pinned, ++ int *this_best_prio, struct rq_iterator *iterator); ++ ++static int ++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, ++ struct sched_domain *sd, enum cpu_idle_type idle, ++ struct rq_iterator *iterator); ++#endif ++ ++#ifdef CONFIG_CGROUP_CPUACCT ++static void cpuacct_charge(struct task_struct *tsk, u64 cputime); ++#else ++static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} ++#endif ++ ++static inline void inc_cpu_load(struct rq *rq, unsigned long load) ++{ ++ update_load_add(&rq->load, load); ++} ++ ++static inline void dec_cpu_load(struct rq *rq, unsigned long load) ++{ ++ update_load_sub(&rq->load, load); ++} ++ ++#ifdef CONFIG_SMP ++static unsigned long source_load(int cpu, int type); ++static unsigned long target_load(int cpu, int type); ++static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); ++ ++static unsigned long cpu_avg_load_per_task(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (rq->nr_running) ++ rq->avg_load_per_task = rq->load.weight / rq->nr_running; ++ ++ return rq->avg_load_per_task; ++} ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ ++typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); ++ ++/* ++ * Iterate the full tree, calling @down when first entering a node and @up when ++ * leaving it for the final time. ++ */ ++static void ++walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd) ++{ ++ struct task_group *parent, *child; ++ ++ rcu_read_lock(); ++ parent = &root_task_group; ++down: ++ (*down)(parent, cpu, sd); ++ list_for_each_entry_rcu(child, &parent->children, siblings) { ++ parent = child; ++ goto down; ++ ++up: ++ continue; ++ } ++ (*up)(parent, cpu, sd); ++ ++ child = parent; ++ parent = parent->parent; ++ if (parent) ++ goto up; ++ rcu_read_unlock(); ++} ++ ++static void __set_se_shares(struct sched_entity *se, unsigned long shares); ++ ++/* ++ * Calculate and set the cpu's group shares. ++ */ ++static void ++__update_group_shares_cpu(struct task_group *tg, int cpu, ++ unsigned long sd_shares, unsigned long sd_rq_weight) ++{ ++ int boost = 0; ++ unsigned long shares; ++ unsigned long rq_weight; ++ ++ if (!tg->se[cpu]) ++ return; ++ ++ rq_weight = tg->cfs_rq[cpu]->load.weight; ++ ++ /* ++ * If there are currently no tasks on the cpu pretend there is one of ++ * average load so that when a new task gets to run here it will not ++ * get delayed by group starvation. ++ */ ++ if (!rq_weight) { ++ boost = 1; ++ rq_weight = NICE_0_LOAD; ++ } ++ ++ if (unlikely(rq_weight > sd_rq_weight)) ++ rq_weight = sd_rq_weight; ++ ++ /* ++ * \Sum shares * rq_weight ++ * shares = ----------------------- ++ * \Sum rq_weight ++ * ++ */ ++ shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); ++ ++ /* ++ * record the actual number of shares, not the boosted amount. ++ */ ++ tg->cfs_rq[cpu]->shares = boost ? 0 : shares; ++ tg->cfs_rq[cpu]->rq_weight = rq_weight; ++ ++ if (shares < MIN_SHARES) ++ shares = MIN_SHARES; ++ else if (shares > MAX_SHARES) ++ shares = MAX_SHARES; ++ ++ __set_se_shares(tg->se[cpu], shares); ++} ++ ++/* ++ * Re-compute the task group their per cpu shares over the given domain. ++ * This needs to be done in a bottom-up fashion because the rq weight of a ++ * parent group depends on the shares of its child groups. ++ */ ++static void ++tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) ++{ ++ unsigned long rq_weight = 0; ++ unsigned long shares = 0; ++ int i; ++ ++ for_each_cpu_mask(i, sd->span) { ++ rq_weight += tg->cfs_rq[i]->load.weight; ++ shares += tg->cfs_rq[i]->shares; ++ } ++ ++ if ((!shares && rq_weight) || shares > tg->shares) ++ shares = tg->shares; ++ ++ if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) ++ shares = tg->shares; ++ ++ if (!rq_weight) ++ rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; ++ ++ for_each_cpu_mask(i, sd->span) { ++ struct rq *rq = cpu_rq(i); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rq->lock, flags); ++ __update_group_shares_cpu(tg, i, shares, rq_weight); ++ spin_unlock_irqrestore(&rq->lock, flags); ++ } ++} ++ ++/* ++ * Compute the cpu's hierarchical load factor for each task group. ++ * This needs to be done in a top-down fashion because the load of a child ++ * group is a fraction of its parents load. ++ */ ++static void ++tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) ++{ ++ unsigned long load; ++ ++ if (!tg->parent) { ++ load = cpu_rq(cpu)->load.weight; ++ } else { ++ load = tg->parent->cfs_rq[cpu]->h_load; ++ load *= tg->cfs_rq[cpu]->shares; ++ load /= tg->parent->cfs_rq[cpu]->load.weight + 1; ++ } ++ ++ tg->cfs_rq[cpu]->h_load = load; ++} ++ ++static void ++tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd) ++{ ++} ++ ++static void update_shares(struct sched_domain *sd) ++{ ++ u64 now = cpu_clock(raw_smp_processor_id()); ++ s64 elapsed = now - sd->last_update; ++ ++ if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { ++ sd->last_update = now; ++ walk_tg_tree(tg_nop, tg_shares_up, 0, sd); ++ } ++} ++ ++static void update_shares_locked(struct rq *rq, struct sched_domain *sd) ++{ ++ spin_unlock(&rq->lock); ++ update_shares(sd); ++ spin_lock(&rq->lock); ++} ++ ++static void update_h_load(int cpu) ++{ ++ walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); ++} ++ ++#else ++ ++static inline void update_shares(struct sched_domain *sd) ++{ ++} ++ ++static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) ++{ ++} ++ ++#endif ++ ++#endif ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) ++{ ++#ifdef CONFIG_SMP ++ cfs_rq->shares = shares; ++#endif ++} ++#endif ++ ++#include "sched_stats.h" ++#include "sched_idletask.c" ++#include "sched_fair.c" ++#include "sched_rt.c" ++#ifdef CONFIG_SCHED_DEBUG ++# include "sched_debug.c" ++#endif ++ ++#define sched_class_highest (&rt_sched_class) ++#define for_each_class(class) \ ++ for (class = sched_class_highest; class; class = class->next) ++ ++static void inc_nr_running(struct rq *rq) ++{ ++ rq->nr_running++; ++} ++ ++static void dec_nr_running(struct rq *rq) ++{ ++ rq->nr_running--; ++} ++ ++static void set_load_weight(struct task_struct *p) ++{ ++ if (task_has_rt_policy(p)) { ++ p->se.load.weight = prio_to_weight[0] * 2; ++ p->se.load.inv_weight = prio_to_wmult[0] >> 1; ++ return; ++ } ++ ++ /* ++ * SCHED_IDLE tasks get minimal weight: ++ */ ++ if (p->policy == SCHED_IDLE) { ++ p->se.load.weight = WEIGHT_IDLEPRIO; ++ p->se.load.inv_weight = WMULT_IDLEPRIO; ++ return; ++ } ++ ++ p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO]; ++ p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; ++} ++ ++static void update_avg(u64 *avg, u64 sample) ++{ ++ s64 diff = sample - *avg; ++ *avg += diff >> 3; ++} ++ ++static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) ++{ ++ // BUG_ON(p->state & TASK_ONHOLD); ++ sched_info_queued(p); ++ p->sched_class->enqueue_task(rq, p, wakeup); ++ p->se.on_rq = 1; ++} ++ ++static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) ++{ ++ if (sleep && p->se.last_wakeup) { ++ update_avg(&p->se.avg_overlap, ++ p->se.sum_exec_runtime - p->se.last_wakeup); ++ p->se.last_wakeup = 0; ++ } ++ ++ sched_info_dequeued(p); ++ p->sched_class->dequeue_task(rq, p, sleep); ++ p->se.on_rq = 0; ++} ++ ++/* ++ * __normal_prio - return the priority that is based on the static prio ++ */ ++static inline int __normal_prio(struct task_struct *p) ++{ ++ return p->static_prio; ++} ++ ++/* ++ * Calculate the expected normal priority: i.e. priority ++ * without taking RT-inheritance into account. Might be ++ * boosted by interactivity modifiers. Changes upon fork, ++ * setprio syscalls, and whenever the interactivity ++ * estimator recalculates. ++ */ ++static inline int normal_prio(struct task_struct *p) ++{ ++ int prio; ++ ++ if (task_has_rt_policy(p)) ++ prio = MAX_RT_PRIO-1 - p->rt_priority; ++ else ++ prio = __normal_prio(p); ++ return prio; ++} ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks, or might be boosted by ++ * interactivity modifiers. Will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. ++ */ ++static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) ++{ ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible--; ++ ++ enqueue_task(rq, p, wakeup); ++ inc_nr_running(rq); ++} ++ ++/* ++ * deactivate_task - remove a task from the runqueue. ++ */ ++static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) ++{ ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible++; ++ ++ dequeue_task(rq, p, sleep); ++ dec_nr_running(rq); ++} ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) ++{ ++ set_task_rq(p, cpu); ++#ifdef CONFIG_SMP ++ /* ++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be ++ * successfuly executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ task_thread_info(p)->cpu = cpu; ++#endif ++} ++ ++static inline void check_class_changed(struct rq *rq, struct task_struct *p, ++ const struct sched_class *prev_class, ++ int oldprio, int running) ++{ ++ if (prev_class != p->sched_class) { ++ if (prev_class->switched_from) ++ prev_class->switched_from(rq, p, running); ++ p->sched_class->switched_to(rq, p, running); ++ } else ++ p->sched_class->prio_changed(rq, p, oldprio, running); ++} ++ ++#ifdef CONFIG_SMP ++ ++/* Used instead of source_load when we know the type == 0 */ ++static unsigned long weighted_cpuload(const int cpu) ++{ ++ return cpu_rq(cpu)->load.weight; ++} ++ ++/* ++ * Is this task likely cache-hot: ++ */ ++static int ++task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) ++{ ++ s64 delta; ++ ++ /* ++ * Buddy candidates are cache hot: ++ */ ++ if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) ++ return 1; ++ ++ if (p->sched_class != &fair_sched_class) ++ return 0; ++ ++ if (sysctl_sched_migration_cost == -1) ++ return 1; ++ if (sysctl_sched_migration_cost == 0) ++ return 0; ++ ++ delta = now - p->se.exec_start; ++ ++ return delta < (s64)sysctl_sched_migration_cost; ++} ++ ++ ++void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ++{ ++ int old_cpu = task_cpu(p); ++ struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); ++ struct cfs_rq *old_cfsrq = task_cfs_rq(p), ++ *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); ++ u64 clock_offset; ++ ++ clock_offset = old_rq->clock - new_rq->clock; ++ ++#ifdef CONFIG_SCHEDSTATS ++ if (p->se.wait_start) ++ p->se.wait_start -= clock_offset; ++ if (p->se.sleep_start) ++ p->se.sleep_start -= clock_offset; ++ if (p->se.block_start) ++ p->se.block_start -= clock_offset; ++ if (old_cpu != new_cpu) { ++ schedstat_inc(p, se.nr_migrations); ++ if (task_hot(p, old_rq->clock, NULL)) ++ schedstat_inc(p, se.nr_forced2_migrations); ++ } ++#endif ++ p->se.vruntime -= old_cfsrq->min_vruntime - ++ new_cfsrq->min_vruntime; ++ ++ __set_task_cpu(p, new_cpu); ++} ++ ++struct migration_req { ++ struct list_head list; ++ ++ struct task_struct *task; ++ int dest_cpu; ++ ++ struct completion done; ++}; ++ ++#include "sched_mon.h" ++ ++ ++/* ++ * The task's runqueue lock must be held. ++ * Returns true if you have to wait for migration thread. ++ */ ++static int ++migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) ++{ ++ struct rq *rq = task_rq(p); ++ ++ vxm_migrate_task(p, rq, dest_cpu); ++ /* ++ * If the task is not on a runqueue (and not running), then ++ * it is sufficient to simply update the task's cpu field. ++ */ ++ if (!p->se.on_rq && !task_running(rq, p)) { ++ set_task_cpu(p, dest_cpu); ++ return 0; ++ } ++ ++ init_completion(&req->done); ++ req->task = p; ++ req->dest_cpu = dest_cpu; ++ list_add(&req->list, &rq->migration_queue); ++ ++ return 1; ++} ++ ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, long match_state) ++{ ++ unsigned long flags; ++ int running, on_rq; ++ unsigned long ncsw; ++ struct rq *rq; ++ ++ for (;;) { ++ /* ++ * We do the initial early heuristics without holding ++ * any task-queue locks at all. We'll only try to get ++ * the runqueue lock when things look like they will ++ * work out! ++ */ ++ rq = task_rq(p); ++ ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since "task_running()" will ++ * return false if the runqueue has changed and p ++ * is actually now running somewhere else! ++ */ ++ while (task_running(rq, p)) { ++ if (match_state && unlikely(p->state != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the rq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ rq = task_rq_lock(p, &flags); ++ running = task_running(rq, p); ++ on_rq = p->se.on_rq; ++ ncsw = 0; ++ if (!match_state || p->state == match_state) { ++ ncsw = p->nivcsw + p->nvcsw; ++ if (unlikely(!ncsw)) ++ ncsw = 1; ++ } ++ task_rq_unlock(rq, &flags); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it wa still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(on_rq)) { ++ schedule_timeout_uninterruptible(1); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesnt have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_send_reschedule(cpu); ++ preempt_enable(); ++} ++ ++/* ++ * Return a low guess at the load of a migration-source cpu weighted ++ * according to the scheduling class and "nice" value. ++ * ++ * We want to under-estimate the load of migration sources, to ++ * balance conservatively. ++ */ ++static unsigned long source_load(int cpu, int type) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long total = weighted_cpuload(cpu); ++ ++ if (type == 0 || !sched_feat(LB_BIAS)) ++ return total; ++ ++ return min(rq->cpu_load[type-1], total); ++} ++ ++/* ++ * Return a high guess at the load of a migration-target cpu weighted ++ * according to the scheduling class and "nice" value. ++ */ ++static unsigned long target_load(int cpu, int type) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long total = weighted_cpuload(cpu); ++ ++ if (type == 0 || !sched_feat(LB_BIAS)) ++ return total; ++ ++ return max(rq->cpu_load[type-1], total); ++} ++ ++/* ++ * find_idlest_group finds and returns the least busy CPU group within the ++ * domain. ++ */ ++static struct sched_group * ++find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ++{ ++ struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; ++ unsigned long min_load = ULONG_MAX, this_load = 0; ++ int load_idx = sd->forkexec_idx; ++ int imbalance = 100 + (sd->imbalance_pct-100)/2; ++ ++ do { ++ unsigned long load, avg_load; ++ int local_group; ++ int i; ++ ++ /* Skip over this group if it has no CPUs allowed */ ++ if (!cpus_intersects(group->cpumask, p->cpus_allowed)) ++ continue; ++ ++ local_group = cpu_isset(this_cpu, group->cpumask); ++ ++ /* Tally up the load of all CPUs in the group */ ++ avg_load = 0; ++ ++ for_each_cpu_mask_nr(i, group->cpumask) { ++ /* Bias balancing toward cpus of our domain */ ++ if (local_group) ++ load = source_load(i, load_idx); ++ else ++ load = target_load(i, load_idx); ++ ++ avg_load += load; ++ } ++ ++ /* Adjust by relative CPU power of the group */ ++ avg_load = sg_div_cpu_power(group, ++ avg_load * SCHED_LOAD_SCALE); ++ ++ if (local_group) { ++ this_load = avg_load; ++ this = group; ++ } else if (avg_load < min_load) { ++ min_load = avg_load; ++ idlest = group; ++ } ++ } while (group = group->next, group != sd->groups); ++ ++ if (!idlest || 100*this_load < imbalance*min_load) ++ return NULL; ++ return idlest; ++} ++ ++/* ++ * find_idlest_cpu - find the idlest cpu among the cpus in group. ++ */ ++static int ++find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, ++ cpumask_t *tmp) ++{ ++ unsigned long load, min_load = ULONG_MAX; ++ int idlest = -1; ++ int i; ++ ++ /* Traverse only the allowed CPUs */ ++ cpus_and(*tmp, group->cpumask, p->cpus_allowed); ++ ++ for_each_cpu_mask_nr(i, *tmp) { ++ load = weighted_cpuload(i); ++ ++ if (load < min_load || (load == min_load && i == this_cpu)) { ++ min_load = load; ++ idlest = i; ++ } ++ } ++ ++ return idlest; ++} ++ ++/* ++ * sched_balance_self: balance the current task (running on cpu) in domains ++ * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and ++ * SD_BALANCE_EXEC. ++ * ++ * Balance, ie. select the least loaded group. ++ * ++ * Returns the target CPU number, or the same CPU if no balancing is needed. ++ * ++ * preempt must be disabled. ++ */ ++static int sched_balance_self(int cpu, int flag) ++{ ++ struct task_struct *t = current; ++ struct sched_domain *tmp, *sd = NULL; ++ ++ for_each_domain(cpu, tmp) { ++ /* ++ * If power savings logic is enabled for a domain, stop there. ++ */ ++ if (tmp->flags & SD_POWERSAVINGS_BALANCE) ++ break; ++ if (tmp->flags & flag) ++ sd = tmp; ++ } ++ ++ if (sd) ++ update_shares(sd); ++ ++ while (sd) { ++ cpumask_t span, tmpmask; ++ struct sched_group *group; ++ int new_cpu, weight; ++ ++ if (!(sd->flags & flag)) { ++ sd = sd->child; ++ continue; ++ } ++ ++ span = sd->span; ++ group = find_idlest_group(sd, t, cpu); ++ if (!group) { ++ sd = sd->child; ++ continue; ++ } ++ ++ new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); ++ if (new_cpu == -1 || new_cpu == cpu) { ++ /* Now try balancing at a lower domain level of cpu */ ++ sd = sd->child; ++ continue; ++ } ++ ++ /* Now try balancing at a lower domain level of new_cpu */ ++ cpu = new_cpu; ++ sd = NULL; ++ weight = cpus_weight(span); ++ for_each_domain(cpu, tmp) { ++ if (weight <= cpus_weight(tmp->span)) ++ break; ++ if (tmp->flags & flag) ++ sd = tmp; ++ } ++ /* while loop will break here if sd == NULL */ ++ } ++ ++ return cpu; ++} ++ ++#endif /* CONFIG_SMP */ ++ ++/*** ++ * try_to_wake_up - wake up a thread ++ * @p: the to-be-woken-up thread ++ * @state: the mask of task states that can be woken ++ * @sync: do a synchronous wakeup? ++ * ++ * Put it on the run-queue if it's not already there. The "current" ++ * thread is always on the run-queue (except when the actual ++ * re-schedule is in progress), and as such you're allowed to do ++ * the simpler "current->state = TASK_RUNNING" to mark yourself ++ * runnable without the overhead of this. ++ * ++ * returns failure only if the task is already active. ++ */ ++static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ++{ ++ int cpu, orig_cpu, this_cpu, success = 0; ++ unsigned long flags; ++ long old_state; ++ struct rq *rq; ++ ++ if (!sched_feat(SYNC_WAKEUPS)) ++ sync = 0; ++ ++#ifdef CONFIG_SMP ++ if (sched_feat(LB_WAKEUP_UPDATE)) { ++ struct sched_domain *sd; ++ ++ this_cpu = raw_smp_processor_id(); ++ cpu = task_cpu(p); ++ ++ for_each_domain(this_cpu, sd) { ++ if (cpu_isset(cpu, sd->span)) { ++ update_shares(sd); ++ break; ++ } ++ } ++ } ++#endif ++ ++ smp_wmb(); ++ rq = task_rq_lock(p, &flags); ++ old_state = p->state; ++ if (!(old_state & state)) ++ goto out; ++ ++ if (p->se.on_rq) ++ goto out_running; ++ ++ cpu = task_cpu(p); ++ orig_cpu = cpu; ++ this_cpu = smp_processor_id(); ++ ++#ifdef CONFIG_SMP ++ if (unlikely(task_running(rq, p))) ++ goto out_activate; ++ ++ cpu = p->sched_class->select_task_rq(p, sync); ++ if (cpu != orig_cpu) { ++ set_task_cpu(p, cpu); ++ task_rq_unlock(rq, &flags); ++ /* might preempt at this point */ ++ rq = task_rq_lock(p, &flags); ++ old_state = p->state; ++ ++ /* we need to unhold suspended tasks ++ if (old_state & TASK_ONHOLD) { ++ vx_unhold_task(p, rq); ++ old_state = p->state; ++ } */ ++ if (!(old_state & state)) ++ goto out; ++ if (p->se.on_rq) ++ goto out_running; ++ ++ this_cpu = smp_processor_id(); ++ cpu = task_cpu(p); ++ } ++ ++#ifdef CONFIG_SCHEDSTATS ++ schedstat_inc(rq, ttwu_count); ++ if (cpu == this_cpu) ++ schedstat_inc(rq, ttwu_local); ++ else { ++ struct sched_domain *sd; ++ for_each_domain(this_cpu, sd) { ++ if (cpu_isset(cpu, sd->span)) { ++ schedstat_inc(sd, ttwu_wake_remote); ++ break; ++ } ++ } ++ } ++#endif /* CONFIG_SCHEDSTATS */ ++ ++out_activate: ++#endif /* CONFIG_SMP */ ++ schedstat_inc(p, se.nr_wakeups); ++ if (sync) ++ schedstat_inc(p, se.nr_wakeups_sync); ++ if (orig_cpu != cpu) ++ schedstat_inc(p, se.nr_wakeups_migrate); ++ if (cpu == this_cpu) ++ schedstat_inc(p, se.nr_wakeups_local); ++ else ++ schedstat_inc(p, se.nr_wakeups_remote); ++ update_rq_clock(rq); ++ activate_task(rq, p, 1); ++ success = 1; ++ ++out_running: ++ trace_mark(kernel_sched_wakeup, ++ "pid %d state %ld ## rq %p task %p rq->curr %p", ++ p->pid, p->state, rq, p, rq->curr); ++ check_preempt_curr(rq, p); ++ ++ p->state = TASK_RUNNING; ++#ifdef CONFIG_SMP ++ if (p->sched_class->task_wake_up) ++ p->sched_class->task_wake_up(rq, p); ++#endif ++out: ++ current->se.last_wakeup = current->se.sum_exec_runtime; ++ ++ task_rq_unlock(rq, &flags); ++ ++ return success; ++} ++ ++int wake_up_process(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_ALL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ * ++ * __sched_fork() is basic setup used by init_idle() too: ++ */ ++static void __sched_fork(struct task_struct *p) ++{ ++ p->se.exec_start = 0; ++ p->se.sum_exec_runtime = 0; ++ p->se.prev_sum_exec_runtime = 0; ++ p->se.last_wakeup = 0; ++ p->se.avg_overlap = 0; ++ ++#ifdef CONFIG_SCHEDSTATS ++ p->se.wait_start = 0; ++ p->se.sum_sleep_runtime = 0; ++ p->se.sleep_start = 0; ++ p->se.block_start = 0; ++ p->se.sleep_max = 0; ++ p->se.block_max = 0; ++ p->se.exec_max = 0; ++ p->se.slice_max = 0; ++ p->se.wait_max = 0; ++#endif ++ ++ INIT_LIST_HEAD(&p->rt.run_list); ++ p->se.on_rq = 0; ++ INIT_LIST_HEAD(&p->se.group_node); ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ ++ /* ++ * We mark the process as running here, but have not actually ++ * inserted it onto the runqueue yet. This guarantees that ++ * nobody will actually run it, and a signal or other external ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->state = TASK_RUNNING; ++} ++ ++/* ++ * fork()/clone()-time setup: ++ */ ++void sched_fork(struct task_struct *p, int clone_flags) ++{ ++ int cpu = get_cpu(); ++ ++ __sched_fork(p); ++ ++#ifdef CONFIG_SMP ++ cpu = sched_balance_self(cpu, SD_BALANCE_FORK); ++#endif ++ set_task_cpu(p, cpu); ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child: ++ */ ++ p->prio = current->normal_prio; ++ if (!rt_prio(p->prio)) ++ p->sched_class = &fair_sched_class; ++ ++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) ++ if (likely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) ++ p->oncpu = 0; ++#endif ++#ifdef CONFIG_PREEMPT ++ /* Want to start with kernel preemption disabled. */ ++ task_thread_info(p)->preempt_count = 1; ++#endif ++ put_cpu(); ++} ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ ++ rq = task_rq_lock(p, &flags); ++ BUG_ON(p->state != TASK_RUNNING); ++ update_rq_clock(rq); ++ ++ p->prio = effective_prio(p); ++ ++ if (!p->sched_class->task_new || !current->se.on_rq) { ++ activate_task(rq, p, 0); ++ } else { ++ /* ++ * Let the scheduling class do new task startup ++ * management (if any): ++ */ ++ p->sched_class->task_new(rq, p); ++ inc_nr_running(rq); ++ } ++ trace_mark(kernel_sched_wakeup_new, ++ "pid %d state %ld ## rq %p task %p rq->curr %p", ++ p->pid, p->state, rq, p, rq->curr); ++ check_preempt_curr(rq, p); ++#ifdef CONFIG_SMP ++ if (p->sched_class->task_wake_up) ++ p->sched_class->task_wake_up(rq, p); ++#endif ++ task_rq_unlock(rq, &flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++/** ++ * preempt_notifier_register - tell me when current is being being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ struct hlist_node *node; ++ ++ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ struct hlist_node *node; ++ ++ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @prev: the current task that is being switched out ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ fire_sched_out_preempt_notifiers(prev, next); ++ prepare_lock_switch(rq, next); ++ prepare_arch_switch(next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ */ ++static void finish_task_switch(struct rq *rq, struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct mm_struct *mm = rq->prev_mm; ++ long prev_state; ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * The test for TASK_DEAD must occur while the runqueue locks are ++ * still held, otherwise prev could be scheduled on another cpu, die ++ * there before we look at prev->state, and then the reference would ++ * be dropped twice. ++ * Manfred Spraul ++ */ ++ prev_state = prev->state; ++ finish_arch_switch(prev); ++ finish_lock_switch(rq, prev); ++#ifdef CONFIG_SMP ++ if (current->sched_class->post_schedule) ++ current->sched_class->post_schedule(rq); ++#endif ++ ++ fire_sched_in_preempt_notifiers(current); ++ if (mm) ++ mmdrop(mm); ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(prev); ++ put_task_struct(prev); ++ } ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage void schedule_tail(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq = this_rq(); ++ ++ finish_task_switch(rq, prev); ++#ifdef __ARCH_WANT_UNLOCKED_CTXSW ++ /* In this case, finish_task_switch does not reenable preemption */ ++ preempt_enable(); ++#endif ++ if (current->set_child_tid) ++ put_user(task_pid_vnr(current), current->set_child_tid); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new ++ * thread's register state. ++ */ ++static inline void ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ struct mm_struct *mm, *oldmm; ++ ++ prepare_task_switch(rq, prev, next); ++ trace_mark(kernel_sched_schedule, ++ "prev_pid %d next_pid %d prev_state %ld " ++ "## rq %p prev %p next %p", ++ prev->pid, next->pid, prev->state, ++ rq, prev, next); ++ mm = next->mm; ++ oldmm = prev->active_mm; ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_enter_lazy_cpu_mode(); ++ ++ if (unlikely(!mm)) { ++ next->active_mm = oldmm; ++ atomic_inc(&oldmm->mm_count); ++ enter_lazy_tlb(oldmm, next); ++ } else ++ switch_mm(oldmm, mm, next); ++ ++ if (unlikely(!prev->mm)) { ++ prev->active_mm = NULL; ++ rq->prev_mm = oldmm; ++ } ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++#ifndef __ARCH_WANT_UNLOCKED_CTXSW ++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_); ++#endif ++ ++ /* Here we just switch the register state and the stack. */ ++ switch_to(prev, next, prev); ++ ++ barrier(); ++ /* ++ * this_rq must be evaluated again because prev may have moved ++ * CPUs since it called schedule(), thus the 'rq' on its stack ++ * frame will be invalid. ++ */ ++ finish_task_switch(this_rq(), prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, current number of uninterruptible-sleeping threads, total ++ * number of context switches performed since bootup. ++ */ ++unsigned long nr_running(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_running; ++ ++ return sum; ++} ++ ++unsigned long nr_uninterruptible(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += cpu_rq(i)->nr_uninterruptible; ++ ++ /* ++ * Since we read the counters lockless, it might be slightly ++ * inaccurate. Do not allow it to go below zero though: ++ */ ++ if (unlikely((long)sum < 0)) ++ sum = 0; ++ ++ return sum; ++} ++ ++unsigned long long nr_context_switches(void) ++{ ++ int i; ++ unsigned long long sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += cpu_rq(i)->nr_switches; ++ ++ return sum; ++} ++ ++unsigned long nr_iowait(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += atomic_read(&cpu_rq(i)->nr_iowait); ++ ++ return sum; ++} ++ ++unsigned long nr_active(void) ++{ ++ unsigned long i, running = 0, uninterruptible = 0; ++ ++ for_each_online_cpu(i) { ++ running += cpu_rq(i)->nr_running; ++ uninterruptible += cpu_rq(i)->nr_uninterruptible; ++ } ++ ++ if (unlikely((long)uninterruptible < 0)) ++ uninterruptible = 0; ++ ++ return running + uninterruptible; ++} ++ ++/* ++ * Update rq->cpu_load[] statistics. This function is usually called every ++ * scheduler tick (TICK_NSEC). ++ */ ++static void update_cpu_load(struct rq *this_rq) ++{ ++ unsigned long this_load = this_rq->load.weight; ++ int i, scale; ++ ++ this_rq->nr_load_updates++; ++ ++ /* Update our load: */ ++ for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { ++ unsigned long old_load, new_load; ++ ++ /* scale is effectively 1 << i now, and >> i divides by scale */ ++ ++ old_load = this_rq->cpu_load[i]; ++ new_load = this_load; ++ /* ++ * Round up the averaging division if load is increasing. This ++ * prevents us from getting stuck on 9 if the load is 10, for ++ * example. ++ */ ++ if (new_load > old_load) ++ new_load += scale-1; ++ this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; ++ } ++} ++ ++#ifdef CONFIG_SMP ++ ++/* ++ * double_rq_lock - safely lock two runqueues ++ * ++ * Note this does not disable interrupts like task_rq_lock, ++ * you need to do so manually before calling. ++ */ ++static void double_rq_lock(struct rq *rq1, struct rq *rq2) ++ __acquires(rq1->lock) ++ __acquires(rq2->lock) ++{ ++ BUG_ON(!irqs_disabled()); ++ if (rq1 == rq2) { ++ spin_lock(&rq1->lock); ++ __acquire(rq2->lock); /* Fake it out ;) */ ++ } else { ++ if (rq1 < rq2) { ++ spin_lock(&rq1->lock); ++ spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); ++ } else { ++ spin_lock(&rq2->lock); ++ spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); ++ } ++ } ++ update_rq_clock(rq1); ++ update_rq_clock(rq2); ++} ++ ++/* ++ * double_rq_unlock - safely unlock two runqueues ++ * ++ * Note this does not restore interrupts like task_rq_unlock, ++ * you need to do so manually after calling. ++ */ ++static void double_rq_unlock(struct rq *rq1, struct rq *rq2) ++ __releases(rq1->lock) ++ __releases(rq2->lock) ++{ ++ spin_unlock(&rq1->lock); ++ if (rq1 != rq2) ++ spin_unlock(&rq2->lock); ++ else ++ __release(rq2->lock); ++} ++ ++/* ++ * double_lock_balance - lock the busiest runqueue, this_rq is locked already. ++ */ ++static int double_lock_balance(struct rq *this_rq, struct rq *busiest) ++ __releases(this_rq->lock) ++ __acquires(busiest->lock) ++ __acquires(this_rq->lock) ++{ ++ int ret = 0; ++ ++ if (unlikely(!irqs_disabled())) { ++ /* printk() doesn't work good under rq->lock */ ++ spin_unlock(&this_rq->lock); ++ BUG_ON(1); ++ } ++ if (unlikely(!spin_trylock(&busiest->lock))) { ++ if (busiest < this_rq) { ++ spin_unlock(&this_rq->lock); ++ spin_lock(&busiest->lock); ++ spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); ++ ret = 1; ++ } else ++ spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); ++ } ++ return ret; ++} ++ ++static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) ++ __releases(busiest->lock) ++{ ++ spin_unlock(&busiest->lock); ++ lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); ++} ++ ++/* ++ * If dest_cpu is allowed for this process, migrate the task to it. ++ * This is accomplished by forcing the cpu_allowed mask to only ++ * allow dest_cpu, which will force the cpu onto dest_cpu. Then ++ * the cpu_allowed mask is restored. ++ */ ++static void sched_migrate_task(struct task_struct *p, int dest_cpu) ++{ ++ struct migration_req req; ++ unsigned long flags; ++ struct rq *rq; ++ ++ rq = task_rq_lock(p, &flags); ++ if (!cpu_isset(dest_cpu, p->cpus_allowed) ++ || unlikely(!cpu_active(dest_cpu))) ++ goto out; ++ ++ /* force the process onto the specified CPU */ ++ if (migrate_task(p, dest_cpu, &req)) { ++ /* Need to wait for migration thread (might exit: take ref). */ ++ struct task_struct *mt = rq->migration_thread; ++ ++ get_task_struct(mt); ++ task_rq_unlock(rq, &flags); ++ wake_up_process(mt); ++ put_task_struct(mt); ++ wait_for_completion(&req.done); ++ ++ return; ++ } ++out: ++ task_rq_unlock(rq, &flags); ++} ++ ++/* ++ * sched_exec - execve() is a valuable balancing opportunity, because at ++ * this point the task has the smallest effective memory and cache footprint. ++ */ ++void sched_exec(void) ++{ ++ int new_cpu, this_cpu = get_cpu(); ++ new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC); ++ put_cpu(); ++ if (new_cpu != this_cpu) ++ sched_migrate_task(current, new_cpu); ++} ++ ++/* ++ * pull_task - move a task from a remote runqueue to the local runqueue. ++ * Both runqueues must be locked. ++ */ ++static void pull_task(struct rq *src_rq, struct task_struct *p, ++ struct rq *this_rq, int this_cpu) ++{ ++ deactivate_task(src_rq, p, 0); ++ set_task_cpu(p, this_cpu); ++ activate_task(this_rq, p, 0); ++ /* ++ * Note that idle threads have a prio of MAX_PRIO, for this test ++ * to be always true for them. ++ */ ++ check_preempt_curr(this_rq, p); ++} ++ ++/* ++ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? ++ */ ++static ++int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, ++ struct sched_domain *sd, enum cpu_idle_type idle, ++ int *all_pinned) ++{ ++ /* ++ * We do not migrate tasks that are: ++ * 1) running (obviously), or ++ * 2) cannot be migrated to this CPU due to cpus_allowed, or ++ * 3) are cache-hot on their current CPU. ++ */ ++ if (!cpu_isset(this_cpu, p->cpus_allowed)) { ++ schedstat_inc(p, se.nr_failed_migrations_affine); ++ return 0; ++ } ++ *all_pinned = 0; ++ ++ if (task_running(rq, p)) { ++ schedstat_inc(p, se.nr_failed_migrations_running); ++ return 0; ++ } ++ ++ /* ++ * Aggressive migration if: ++ * 1) task is cache cold, or ++ * 2) too many balance attempts have failed. ++ */ ++ ++ if (!task_hot(p, rq->clock, sd) || ++ sd->nr_balance_failed > sd->cache_nice_tries) { ++#ifdef CONFIG_SCHEDSTATS ++ if (task_hot(p, rq->clock, sd)) { ++ schedstat_inc(sd, lb_hot_gained[idle]); ++ schedstat_inc(p, se.nr_forced_migrations); ++ } ++#endif ++ return 1; ++ } ++ ++ if (task_hot(p, rq->clock, sd)) { ++ schedstat_inc(p, se.nr_failed_migrations_hot); ++ return 0; ++ } ++ return 1; ++} ++ ++static unsigned long ++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, ++ unsigned long max_load_move, struct sched_domain *sd, ++ enum cpu_idle_type idle, int *all_pinned, ++ int *this_best_prio, struct rq_iterator *iterator) ++{ ++ int loops = 0, pulled = 0, pinned = 0; ++ struct task_struct *p; ++ long rem_load_move = max_load_move; ++ ++ if (max_load_move == 0) ++ goto out; ++ ++ pinned = 1; ++ ++ /* ++ * Start the load-balancing iterator: ++ */ ++ p = iterator->start(iterator->arg); ++next: ++ if (!p || loops++ > sysctl_sched_nr_migrate) ++ goto out; ++ ++ if ((p->se.load.weight >> 1) > rem_load_move || ++ !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { ++ p = iterator->next(iterator->arg); ++ goto next; ++ } ++ ++ pull_task(busiest, p, this_rq, this_cpu); ++ pulled++; ++ rem_load_move -= p->se.load.weight; ++ ++ /* ++ * We only want to steal up to the prescribed amount of weighted load. ++ */ ++ if (rem_load_move > 0) { ++ if (p->prio < *this_best_prio) ++ *this_best_prio = p->prio; ++ p = iterator->next(iterator->arg); ++ goto next; ++ } ++out: ++ /* ++ * Right now, this is one of only two places pull_task() is called, ++ * so we can safely collect pull_task() stats here rather than ++ * inside pull_task(). ++ */ ++ schedstat_add(sd, lb_gained[idle], pulled); ++ ++ if (all_pinned) ++ *all_pinned = pinned; ++ ++ return max_load_move - rem_load_move; ++} ++ ++/* ++ * move_tasks tries to move up to max_load_move weighted load from busiest to ++ * this_rq, as part of a balancing operation within domain "sd". ++ * Returns 1 if successful and 0 otherwise. ++ * ++ * Called with both runqueues locked. ++ */ ++static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, ++ unsigned long max_load_move, ++ struct sched_domain *sd, enum cpu_idle_type idle, ++ int *all_pinned) ++{ ++ const struct sched_class *class = sched_class_highest; ++ unsigned long total_load_moved = 0; ++ int this_best_prio = this_rq->curr->prio; ++ ++ do { ++ total_load_moved += ++ class->load_balance(this_rq, this_cpu, busiest, ++ max_load_move - total_load_moved, ++ sd, idle, all_pinned, &this_best_prio); ++ class = class->next; ++ ++ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) ++ break; ++ ++ } while (class && max_load_move > total_load_moved); ++ ++ return total_load_moved > 0; ++} ++ ++static int ++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, ++ struct sched_domain *sd, enum cpu_idle_type idle, ++ struct rq_iterator *iterator) ++{ ++ struct task_struct *p = iterator->start(iterator->arg); ++ int pinned = 0; ++ ++ while (p) { ++ if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { ++ pull_task(busiest, p, this_rq, this_cpu); ++ /* ++ * Right now, this is only the second place pull_task() ++ * is called, so we can safely collect pull_task() ++ * stats here rather than inside pull_task(). ++ */ ++ schedstat_inc(sd, lb_gained[idle]); ++ ++ return 1; ++ } ++ p = iterator->next(iterator->arg); ++ } ++ ++ return 0; ++} ++ ++/* ++ * move_one_task tries to move exactly one task from busiest to this_rq, as ++ * part of active balancing operations within "domain". ++ * Returns 1 if successful and 0 otherwise. ++ * ++ * Called with both runqueues locked. ++ */ ++static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, ++ struct sched_domain *sd, enum cpu_idle_type idle) ++{ ++ const struct sched_class *class; ++ ++ for (class = sched_class_highest; class; class = class->next) ++ if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) ++ return 1; ++ ++ return 0; ++} ++ ++/* ++ * find_busiest_group finds and returns the busiest CPU group within the ++ * domain. It calculates and returns the amount of weighted load which ++ * should be moved to restore balance via the imbalance parameter. ++ */ ++static struct sched_group * ++find_busiest_group(struct sched_domain *sd, int this_cpu, ++ unsigned long *imbalance, enum cpu_idle_type idle, ++ int *sd_idle, const cpumask_t *cpus, int *balance) ++{ ++ struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; ++ unsigned long max_load, avg_load, total_load, this_load, total_pwr; ++ unsigned long max_pull; ++ unsigned long busiest_load_per_task, busiest_nr_running; ++ unsigned long this_load_per_task, this_nr_running; ++ int load_idx, group_imb = 0; ++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) ++ int power_savings_balance = 1; ++ unsigned long leader_nr_running = 0, min_load_per_task = 0; ++ unsigned long min_nr_running = ULONG_MAX; ++ struct sched_group *group_min = NULL, *group_leader = NULL; ++#endif ++ ++ max_load = this_load = total_load = total_pwr = 0; ++ busiest_load_per_task = busiest_nr_running = 0; ++ this_load_per_task = this_nr_running = 0; ++ ++ if (idle == CPU_NOT_IDLE) ++ load_idx = sd->busy_idx; ++ else if (idle == CPU_NEWLY_IDLE) ++ load_idx = sd->newidle_idx; ++ else ++ load_idx = sd->idle_idx; ++ ++ do { ++ unsigned long load, group_capacity, max_cpu_load, min_cpu_load; ++ int local_group; ++ int i; ++ int __group_imb = 0; ++ unsigned int balance_cpu = -1, first_idle_cpu = 0; ++ unsigned long sum_nr_running, sum_weighted_load; ++ unsigned long sum_avg_load_per_task; ++ unsigned long avg_load_per_task; ++ ++ local_group = cpu_isset(this_cpu, group->cpumask); ++ ++ if (local_group) ++ balance_cpu = first_cpu(group->cpumask); ++ ++ /* Tally up the load of all CPUs in the group */ ++ sum_weighted_load = sum_nr_running = avg_load = 0; ++ sum_avg_load_per_task = avg_load_per_task = 0; ++ ++ max_cpu_load = 0; ++ min_cpu_load = ~0UL; ++ ++ for_each_cpu_mask_nr(i, group->cpumask) { ++ struct rq *rq; ++ ++ if (!cpu_isset(i, *cpus)) ++ continue; ++ ++ rq = cpu_rq(i); ++ ++ if (*sd_idle && rq->nr_running) ++ *sd_idle = 0; ++ ++ /* Bias balancing toward cpus of our domain */ ++ if (local_group) { ++ if (idle_cpu(i) && !first_idle_cpu) { ++ first_idle_cpu = 1; ++ balance_cpu = i; ++ } ++ ++ load = target_load(i, load_idx); ++ } else { ++ load = source_load(i, load_idx); ++ if (load > max_cpu_load) ++ max_cpu_load = load; ++ if (min_cpu_load > load) ++ min_cpu_load = load; ++ } ++ ++ avg_load += load; ++ sum_nr_running += rq->nr_running; ++ sum_weighted_load += weighted_cpuload(i); ++ ++ sum_avg_load_per_task += cpu_avg_load_per_task(i); ++ } ++ ++ /* ++ * First idle cpu or the first cpu(busiest) in this sched group ++ * is eligible for doing load balancing at this and above ++ * domains. In the newly idle case, we will allow all the cpu's ++ * to do the newly idle load balance. ++ */ ++ if (idle != CPU_NEWLY_IDLE && local_group && ++ balance_cpu != this_cpu && balance) { ++ *balance = 0; ++ goto ret; ++ } ++ ++ total_load += avg_load; ++ total_pwr += group->__cpu_power; ++ ++ /* Adjust by relative CPU power of the group */ ++ avg_load = sg_div_cpu_power(group, ++ avg_load * SCHED_LOAD_SCALE); ++ ++ ++ /* ++ * Consider the group unbalanced when the imbalance is larger ++ * than the average weight of two tasks. ++ * ++ * APZ: with cgroup the avg task weight can vary wildly and ++ * might not be a suitable number - should we keep a ++ * normalized nr_running number somewhere that negates ++ * the hierarchy? ++ */ ++ avg_load_per_task = sg_div_cpu_power(group, ++ sum_avg_load_per_task * SCHED_LOAD_SCALE); ++ ++ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) ++ __group_imb = 1; ++ ++ group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; ++ ++ if (local_group) { ++ this_load = avg_load; ++ this = group; ++ this_nr_running = sum_nr_running; ++ this_load_per_task = sum_weighted_load; ++ } else if (avg_load > max_load && ++ (sum_nr_running > group_capacity || __group_imb)) { ++ max_load = avg_load; ++ busiest = group; ++ busiest_nr_running = sum_nr_running; ++ busiest_load_per_task = sum_weighted_load; ++ group_imb = __group_imb; ++ } ++ ++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) ++ /* ++ * Busy processors will not participate in power savings ++ * balance. ++ */ ++ if (idle == CPU_NOT_IDLE || ++ !(sd->flags & SD_POWERSAVINGS_BALANCE)) ++ goto group_next; ++ ++ /* ++ * If the local group is idle or completely loaded ++ * no need to do power savings balance at this domain ++ */ ++ if (local_group && (this_nr_running >= group_capacity || ++ !this_nr_running)) ++ power_savings_balance = 0; ++ ++ /* ++ * If a group is already running at full capacity or idle, ++ * don't include that group in power savings calculations ++ */ ++ if (!power_savings_balance || sum_nr_running >= group_capacity ++ || !sum_nr_running) ++ goto group_next; ++ ++ /* ++ * Calculate the group which has the least non-idle load. ++ * This is the group from where we need to pick up the load ++ * for saving power ++ */ ++ if ((sum_nr_running < min_nr_running) || ++ (sum_nr_running == min_nr_running && ++ first_cpu(group->cpumask) < ++ first_cpu(group_min->cpumask))) { ++ group_min = group; ++ min_nr_running = sum_nr_running; ++ min_load_per_task = sum_weighted_load / ++ sum_nr_running; ++ } ++ ++ /* ++ * Calculate the group which is almost near its ++ * capacity but still has some space to pick up some load ++ * from other group and save more power ++ */ ++ if (sum_nr_running <= group_capacity - 1) { ++ if (sum_nr_running > leader_nr_running || ++ (sum_nr_running == leader_nr_running && ++ first_cpu(group->cpumask) > ++ first_cpu(group_leader->cpumask))) { ++ group_leader = group; ++ leader_nr_running = sum_nr_running; ++ } ++ } ++group_next: ++#endif ++ group = group->next; ++ } while (group != sd->groups); ++ ++ if (!busiest || this_load >= max_load || busiest_nr_running == 0) ++ goto out_balanced; ++ ++ avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; ++ ++ if (this_load >= avg_load || ++ 100*max_load <= sd->imbalance_pct*this_load) ++ goto out_balanced; ++ ++ busiest_load_per_task /= busiest_nr_running; ++ if (group_imb) ++ busiest_load_per_task = min(busiest_load_per_task, avg_load); ++ ++ /* ++ * We're trying to get all the cpus to the average_load, so we don't ++ * want to push ourselves above the average load, nor do we wish to ++ * reduce the max loaded cpu below the average load, as either of these ++ * actions would just result in more rebalancing later, and ping-pong ++ * tasks around. Thus we look for the minimum possible imbalance. ++ * Negative imbalances (*we* are more loaded than anyone else) will ++ * be counted as no imbalance for these purposes -- we can't fix that ++ * by pulling tasks to us. Be careful of negative numbers as they'll ++ * appear as very large values with unsigned longs. ++ */ ++ if (max_load <= busiest_load_per_task) ++ goto out_balanced; ++ ++ /* ++ * In the presence of smp nice balancing, certain scenarios can have ++ * max load less than avg load(as we skip the groups at or below ++ * its cpu_power, while calculating max_load..) ++ */ ++ if (max_load < avg_load) { ++ *imbalance = 0; ++ goto small_imbalance; ++ } ++ ++ /* Don't want to pull so many tasks that a group would go idle */ ++ max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); ++ ++ /* How much load to actually move to equalise the imbalance */ ++ *imbalance = min(max_pull * busiest->__cpu_power, ++ (avg_load - this_load) * this->__cpu_power) ++ / SCHED_LOAD_SCALE; ++ ++ /* ++ * if *imbalance is less than the average load per runnable task ++ * there is no gaurantee that any tasks will be moved so we'll have ++ * a think about bumping its value to force at least one task to be ++ * moved ++ */ ++ if (*imbalance < busiest_load_per_task) { ++ unsigned long tmp, pwr_now, pwr_move; ++ unsigned int imbn; ++ ++small_imbalance: ++ pwr_move = pwr_now = 0; ++ imbn = 2; ++ if (this_nr_running) { ++ this_load_per_task /= this_nr_running; ++ if (busiest_load_per_task > this_load_per_task) ++ imbn = 1; ++ } else ++ this_load_per_task = cpu_avg_load_per_task(this_cpu); ++ ++ if (max_load - this_load + 2*busiest_load_per_task >= ++ busiest_load_per_task * imbn) { ++ *imbalance = busiest_load_per_task; ++ return busiest; ++ } ++ ++ /* ++ * OK, we don't have enough imbalance to justify moving tasks, ++ * however we may be able to increase total CPU power used by ++ * moving them. ++ */ ++ ++ pwr_now += busiest->__cpu_power * ++ min(busiest_load_per_task, max_load); ++ pwr_now += this->__cpu_power * ++ min(this_load_per_task, this_load); ++ pwr_now /= SCHED_LOAD_SCALE; ++ ++ /* Amount of load we'd subtract */ ++ tmp = sg_div_cpu_power(busiest, ++ busiest_load_per_task * SCHED_LOAD_SCALE); ++ if (max_load > tmp) ++ pwr_move += busiest->__cpu_power * ++ min(busiest_load_per_task, max_load - tmp); ++ ++ /* Amount of load we'd add */ ++ if (max_load * busiest->__cpu_power < ++ busiest_load_per_task * SCHED_LOAD_SCALE) ++ tmp = sg_div_cpu_power(this, ++ max_load * busiest->__cpu_power); ++ else ++ tmp = sg_div_cpu_power(this, ++ busiest_load_per_task * SCHED_LOAD_SCALE); ++ pwr_move += this->__cpu_power * ++ min(this_load_per_task, this_load + tmp); ++ pwr_move /= SCHED_LOAD_SCALE; ++ ++ /* Move if we gain throughput */ ++ if (pwr_move > pwr_now) ++ *imbalance = busiest_load_per_task; ++ } ++ ++ return busiest; ++ ++out_balanced: ++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) ++ if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) ++ goto ret; ++ ++ if (this == group_leader && group_leader != group_min) { ++ *imbalance = min_load_per_task; ++ return group_min; ++ } ++#endif ++ret: ++ *imbalance = 0; ++ return NULL; ++} ++ ++/* ++ * find_busiest_queue - find the busiest runqueue among the cpus in group. ++ */ ++static struct rq * ++find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, ++ unsigned long imbalance, const cpumask_t *cpus) ++{ ++ struct rq *busiest = NULL, *rq; ++ unsigned long max_load = 0; ++ int i; ++ ++ for_each_cpu_mask_nr(i, group->cpumask) { ++ unsigned long wl; ++ ++ if (!cpu_isset(i, *cpus)) ++ continue; ++ ++ rq = cpu_rq(i); ++ wl = weighted_cpuload(i); ++ ++ if (rq->nr_running == 1 && wl > imbalance) ++ continue; ++ ++ if (wl > max_load) { ++ max_load = wl; ++ busiest = rq; ++ } ++ } ++ ++ return busiest; ++} ++ ++/* ++ * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but ++ * so long as it is large enough. ++ */ ++#define MAX_PINNED_INTERVAL 512 ++ ++/* ++ * Check this_cpu to ensure it is balanced within domain. Attempt to move ++ * tasks if there is an imbalance. ++ */ ++static int load_balance(int this_cpu, struct rq *this_rq, ++ struct sched_domain *sd, enum cpu_idle_type idle, ++ int *balance, cpumask_t *cpus) ++{ ++ int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; ++ struct sched_group *group; ++ unsigned long imbalance; ++ struct rq *busiest; ++ unsigned long flags; ++ ++ cpus_setall(*cpus); ++ ++ /* ++ * When power savings policy is enabled for the parent domain, idle ++ * sibling can pick up load irrespective of busy siblings. In this case, ++ * let the state of idle sibling percolate up as CPU_IDLE, instead of ++ * portraying it as CPU_NOT_IDLE. ++ */ ++ if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && ++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) ++ sd_idle = 1; ++ ++ schedstat_inc(sd, lb_count[idle]); ++ ++redo: ++ update_shares(sd); ++ group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, ++ cpus, balance); ++ ++ if (*balance == 0) ++ goto out_balanced; ++ ++ if (!group) { ++ schedstat_inc(sd, lb_nobusyg[idle]); ++ goto out_balanced; ++ } ++ ++ busiest = find_busiest_queue(group, idle, imbalance, cpus); ++ if (!busiest) { ++ schedstat_inc(sd, lb_nobusyq[idle]); ++ goto out_balanced; ++ } ++ ++ BUG_ON(busiest == this_rq); ++ ++ schedstat_add(sd, lb_imbalance[idle], imbalance); ++ ++ ld_moved = 0; ++ if (busiest->nr_running > 1) { ++ /* ++ * Attempt to move tasks. If find_busiest_group has found ++ * an imbalance but busiest->nr_running <= 1, the group is ++ * still unbalanced. ld_moved simply stays zero, so it is ++ * correctly treated as an imbalance. ++ */ ++ local_irq_save(flags); ++ double_rq_lock(this_rq, busiest); ++ ld_moved = move_tasks(this_rq, this_cpu, busiest, ++ imbalance, sd, idle, &all_pinned); ++ double_rq_unlock(this_rq, busiest); ++ local_irq_restore(flags); ++ ++ /* ++ * some other cpu did the load balance for us. ++ */ ++ if (ld_moved && this_cpu != smp_processor_id()) ++ resched_cpu(this_cpu); ++ ++ /* All tasks on this runqueue were pinned by CPU affinity */ ++ if (unlikely(all_pinned)) { ++ cpu_clear(cpu_of(busiest), *cpus); ++ if (!cpus_empty(*cpus)) ++ goto redo; ++ goto out_balanced; ++ } ++ } ++ ++ if (!ld_moved) { ++ schedstat_inc(sd, lb_failed[idle]); ++ sd->nr_balance_failed++; ++ ++ if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { ++ ++ spin_lock_irqsave(&busiest->lock, flags); ++ ++ /* don't kick the migration_thread, if the curr ++ * task on busiest cpu can't be moved to this_cpu ++ */ ++ if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { ++ spin_unlock_irqrestore(&busiest->lock, flags); ++ all_pinned = 1; ++ goto out_one_pinned; ++ } ++ ++ if (!busiest->active_balance) { ++ busiest->active_balance = 1; ++ busiest->push_cpu = this_cpu; ++ active_balance = 1; ++ } ++ spin_unlock_irqrestore(&busiest->lock, flags); ++ if (active_balance) ++ wake_up_process(busiest->migration_thread); ++ ++ /* ++ * We've kicked active balancing, reset the failure ++ * counter. ++ */ ++ sd->nr_balance_failed = sd->cache_nice_tries+1; ++ } ++ } else ++ sd->nr_balance_failed = 0; ++ ++ if (likely(!active_balance)) { ++ /* We were unbalanced, so reset the balancing interval */ ++ sd->balance_interval = sd->min_interval; ++ } else { ++ /* ++ * If we've begun active balancing, start to back off. This ++ * case may not be covered by the all_pinned logic if there ++ * is only 1 task on the busy runqueue (because we don't call ++ * move_tasks). ++ */ ++ if (sd->balance_interval < sd->max_interval) ++ sd->balance_interval *= 2; ++ } ++ ++ if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && ++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) ++ ld_moved = -1; ++ ++ goto out; ++ ++out_balanced: ++ schedstat_inc(sd, lb_balanced[idle]); ++ ++ sd->nr_balance_failed = 0; ++ ++out_one_pinned: ++ /* tune up the balancing interval */ ++ if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || ++ (sd->balance_interval < sd->max_interval)) ++ sd->balance_interval *= 2; ++ ++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && ++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) ++ ld_moved = -1; ++ else ++ ld_moved = 0; ++out: ++ if (ld_moved) ++ update_shares(sd); ++ return ld_moved; ++} ++ ++/* ++ * Check this_cpu to ensure it is balanced within domain. Attempt to move ++ * tasks if there is an imbalance. ++ * ++ * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE). ++ * this_rq is locked. ++ */ ++static int ++load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, ++ cpumask_t *cpus) ++{ ++ struct sched_group *group; ++ struct rq *busiest = NULL; ++ unsigned long imbalance; ++ int ld_moved = 0; ++ int sd_idle = 0; ++ int all_pinned = 0; ++ ++ cpus_setall(*cpus); ++ ++ /* ++ * When power savings policy is enabled for the parent domain, idle ++ * sibling can pick up load irrespective of busy siblings. In this case, ++ * let the state of idle sibling percolate up as IDLE, instead of ++ * portraying it as CPU_NOT_IDLE. ++ */ ++ if (sd->flags & SD_SHARE_CPUPOWER && ++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) ++ sd_idle = 1; ++ ++ schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); ++redo: ++ update_shares_locked(this_rq, sd); ++ group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, ++ &sd_idle, cpus, NULL); ++ if (!group) { ++ schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]); ++ goto out_balanced; ++ } ++ ++ busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus); ++ if (!busiest) { ++ schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]); ++ goto out_balanced; ++ } ++ ++ BUG_ON(busiest == this_rq); ++ ++ schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance); ++ ++ ld_moved = 0; ++ if (busiest->nr_running > 1) { ++ /* Attempt to move tasks */ ++ double_lock_balance(this_rq, busiest); ++ /* this_rq->clock is already updated */ ++ update_rq_clock(busiest); ++ ld_moved = move_tasks(this_rq, this_cpu, busiest, ++ imbalance, sd, CPU_NEWLY_IDLE, ++ &all_pinned); ++ double_unlock_balance(this_rq, busiest); ++ ++ if (unlikely(all_pinned)) { ++ cpu_clear(cpu_of(busiest), *cpus); ++ if (!cpus_empty(*cpus)) ++ goto redo; ++ } ++ } ++ ++ if (!ld_moved) { ++ schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); ++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && ++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) ++ return -1; ++ } else ++ sd->nr_balance_failed = 0; ++ ++ update_shares_locked(this_rq, sd); ++ return ld_moved; ++ ++out_balanced: ++ schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]); ++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && ++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) ++ return -1; ++ sd->nr_balance_failed = 0; ++ ++ return 0; ++} ++ ++/* ++ * idle_balance is called by schedule() if this_cpu is about to become ++ * idle. Attempts to pull tasks from other CPUs. ++ */ ++static void idle_balance(int this_cpu, struct rq *this_rq) ++{ ++ struct sched_domain *sd; ++ int pulled_task = -1; ++ unsigned long next_balance = jiffies + HZ; ++ cpumask_t tmpmask; ++ ++ for_each_domain(this_cpu, sd) { ++ unsigned long interval; ++ ++ if (!(sd->flags & SD_LOAD_BALANCE)) ++ continue; ++ ++ if (sd->flags & SD_BALANCE_NEWIDLE) ++ /* If we've pulled tasks over stop searching: */ ++ pulled_task = load_balance_newidle(this_cpu, this_rq, ++ sd, &tmpmask); ++ ++ interval = msecs_to_jiffies(sd->balance_interval); ++ if (time_after(next_balance, sd->last_balance + interval)) ++ next_balance = sd->last_balance + interval; ++ if (pulled_task) ++ break; ++ } ++ if (pulled_task || time_after(jiffies, this_rq->next_balance)) { ++ /* ++ * We are going idle. next_balance may be set based on ++ * a busy processor. So reset next_balance. ++ */ ++ this_rq->next_balance = next_balance; ++ } ++} ++ ++/* ++ * active_load_balance is run by migration threads. It pushes running tasks ++ * off the busiest CPU onto idle CPUs. It requires at least 1 task to be ++ * running on each physical CPU where possible, and avoids physical / ++ * logical imbalances. ++ * ++ * Called with busiest_rq locked. ++ */ ++static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) ++{ ++ int target_cpu = busiest_rq->push_cpu; ++ struct sched_domain *sd; ++ struct rq *target_rq; ++ ++ /* Is there any task to move? */ ++ if (busiest_rq->nr_running <= 1) ++ return; ++ ++ target_rq = cpu_rq(target_cpu); ++ ++ /* ++ * This condition is "impossible", if it occurs ++ * we need to fix it. Originally reported by ++ * Bjorn Helgaas on a 128-cpu setup. ++ */ ++ BUG_ON(busiest_rq == target_rq); ++ ++ /* move a task from busiest_rq to target_rq */ ++ double_lock_balance(busiest_rq, target_rq); ++ update_rq_clock(busiest_rq); ++ update_rq_clock(target_rq); ++ ++ /* Search for an sd spanning us and the target CPU. */ ++ for_each_domain(target_cpu, sd) { ++ if ((sd->flags & SD_LOAD_BALANCE) && ++ cpu_isset(busiest_cpu, sd->span)) ++ break; ++ } ++ ++ if (likely(sd)) { ++ schedstat_inc(sd, alb_count); ++ ++ if (move_one_task(target_rq, target_cpu, busiest_rq, ++ sd, CPU_IDLE)) ++ schedstat_inc(sd, alb_pushed); ++ else ++ schedstat_inc(sd, alb_failed); ++ } ++ double_unlock_balance(busiest_rq, target_rq); ++} ++ ++#ifdef CONFIG_NO_HZ ++static struct { ++ atomic_t load_balancer; ++ cpumask_t cpu_mask; ++} nohz ____cacheline_aligned = { ++ .load_balancer = ATOMIC_INIT(-1), ++ .cpu_mask = CPU_MASK_NONE, ++}; ++ ++/* ++ * This routine will try to nominate the ilb (idle load balancing) ++ * owner among the cpus whose ticks are stopped. ilb owner will do the idle ++ * load balancing on behalf of all those cpus. If all the cpus in the system ++ * go into this tickless mode, then there will be no ilb owner (as there is ++ * no need for one) and all the cpus will sleep till the next wakeup event ++ * arrives... ++ * ++ * For the ilb owner, tick is not stopped. And this tick will be used ++ * for idle load balancing. ilb owner will still be part of ++ * nohz.cpu_mask.. ++ * ++ * While stopping the tick, this cpu will become the ilb owner if there ++ * is no other owner. And will be the owner till that cpu becomes busy ++ * or if all cpus in the system stop their ticks at which point ++ * there is no need for ilb owner. ++ * ++ * When the ilb owner becomes busy, it nominates another owner, during the ++ * next busy scheduler_tick() ++ */ ++int select_nohz_load_balancer(int stop_tick) ++{ ++ int cpu = smp_processor_id(); ++ ++ if (stop_tick) { ++ cpu_set(cpu, nohz.cpu_mask); ++ cpu_rq(cpu)->in_nohz_recently = 1; ++ ++ /* ++ * If we are going offline and still the leader, give up! ++ */ ++ if (!cpu_active(cpu) && ++ atomic_read(&nohz.load_balancer) == cpu) { ++ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) ++ BUG(); ++ return 0; ++ } ++ ++ /* time for ilb owner also to sleep */ ++ if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { ++ if (atomic_read(&nohz.load_balancer) == cpu) ++ atomic_set(&nohz.load_balancer, -1); ++ return 0; ++ } ++ ++ if (atomic_read(&nohz.load_balancer) == -1) { ++ /* make me the ilb owner */ ++ if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) ++ return 1; ++ } else if (atomic_read(&nohz.load_balancer) == cpu) ++ return 1; ++ } else { ++ if (!cpu_isset(cpu, nohz.cpu_mask)) ++ return 0; ++ ++ cpu_clear(cpu, nohz.cpu_mask); ++ ++ if (atomic_read(&nohz.load_balancer) == cpu) ++ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) ++ BUG(); ++ } ++ return 0; ++} ++#endif ++ ++static DEFINE_SPINLOCK(balancing); ++ ++/* ++ * It checks each scheduling domain to see if it is due to be balanced, ++ * and initiates a balancing operation if so. ++ * ++ * Balancing parameters are set up in arch_init_sched_domains. ++ */ ++static void rebalance_domains(int cpu, enum cpu_idle_type idle) ++{ ++ int balance = 1; ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long interval; ++ struct sched_domain *sd; ++ /* Earliest time when we have to do rebalance again */ ++ unsigned long next_balance = jiffies + 60*HZ; ++ int update_next_balance = 0; ++ int need_serialize; ++ cpumask_t tmp; ++ ++ for_each_domain(cpu, sd) { ++ if (!(sd->flags & SD_LOAD_BALANCE)) ++ continue; ++ ++ interval = sd->balance_interval; ++ if (idle != CPU_IDLE) ++ interval *= sd->busy_factor; ++ ++ /* scale ms to jiffies */ ++ interval = msecs_to_jiffies(interval); ++ if (unlikely(!interval)) ++ interval = 1; ++ if (interval > HZ*NR_CPUS/10) ++ interval = HZ*NR_CPUS/10; ++ ++ need_serialize = sd->flags & SD_SERIALIZE; ++ ++ if (need_serialize) { ++ if (!spin_trylock(&balancing)) ++ goto out; ++ } ++ ++ if (time_after_eq(jiffies, sd->last_balance + interval)) { ++ if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { ++ /* ++ * We've pulled tasks over so either we're no ++ * longer idle, or one of our SMT siblings is ++ * not idle. ++ */ ++ idle = CPU_NOT_IDLE; ++ } ++ sd->last_balance = jiffies; ++ } ++ if (need_serialize) ++ spin_unlock(&balancing); ++out: ++ if (time_after(next_balance, sd->last_balance + interval)) { ++ next_balance = sd->last_balance + interval; ++ update_next_balance = 1; ++ } ++ ++ /* ++ * Stop the load balance at this level. There is another ++ * CPU in our sched group which is doing load balancing more ++ * actively. ++ */ ++ if (!balance) ++ break; ++ } ++ ++ /* ++ * next_balance will be updated only when there is a need. ++ * When the cpu is attached to null domain for ex, it will not be ++ * updated. ++ */ ++ if (likely(update_next_balance)) ++ rq->next_balance = next_balance; ++} ++ ++/* ++ * run_rebalance_domains is triggered when needed from the scheduler tick. ++ * In CONFIG_NO_HZ case, the idle load balance owner will do the ++ * rebalancing for all the cpus for whom scheduler ticks are stopped. ++ */ ++static void run_rebalance_domains(struct softirq_action *h) ++{ ++ int this_cpu = smp_processor_id(); ++ struct rq *this_rq = cpu_rq(this_cpu); ++ enum cpu_idle_type idle = this_rq->idle_at_tick ? ++ CPU_IDLE : CPU_NOT_IDLE; ++ ++ rebalance_domains(this_cpu, idle); ++ ++#ifdef CONFIG_NO_HZ ++ /* ++ * If this cpu is the owner for idle load balancing, then do the ++ * balancing on behalf of the other idle cpus whose ticks are ++ * stopped. ++ */ ++ if (this_rq->idle_at_tick && ++ atomic_read(&nohz.load_balancer) == this_cpu) { ++ cpumask_t cpus = nohz.cpu_mask; ++ struct rq *rq; ++ int balance_cpu; ++ ++ cpu_clear(this_cpu, cpus); ++ for_each_cpu_mask_nr(balance_cpu, cpus) { ++ /* ++ * If this cpu gets work to do, stop the load balancing ++ * work being done for other cpus. Next load ++ * balancing owner will pick it up. ++ */ ++ if (need_resched()) ++ break; ++ ++ rebalance_domains(balance_cpu, CPU_IDLE); ++ ++ rq = cpu_rq(balance_cpu); ++ if (time_after(this_rq->next_balance, rq->next_balance)) ++ this_rq->next_balance = rq->next_balance; ++ } ++ } ++#endif ++} ++ ++/* ++ * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. ++ * ++ * In case of CONFIG_NO_HZ, this is the place where we nominate a new ++ * idle load balancing owner or decide to stop the periodic load balancing, ++ * if the whole system is idle. ++ */ ++static inline void trigger_load_balance(struct rq *rq, int cpu) ++{ ++#ifdef CONFIG_NO_HZ ++ /* ++ * If we were in the nohz mode recently and busy at the current ++ * scheduler tick, then check if we need to nominate new idle ++ * load balancer. ++ */ ++ if (rq->in_nohz_recently && !rq->idle_at_tick) { ++ rq->in_nohz_recently = 0; ++ ++ if (atomic_read(&nohz.load_balancer) == cpu) { ++ cpu_clear(cpu, nohz.cpu_mask); ++ atomic_set(&nohz.load_balancer, -1); ++ } ++ ++ if (atomic_read(&nohz.load_balancer) == -1) { ++ /* ++ * simple selection for now: Nominate the ++ * first cpu in the nohz list to be the next ++ * ilb owner. ++ * ++ * TBD: Traverse the sched domains and nominate ++ * the nearest cpu in the nohz.cpu_mask. ++ */ ++ int ilb = first_cpu(nohz.cpu_mask); ++ ++ if (ilb < nr_cpu_ids) ++ resched_cpu(ilb); ++ } ++ } ++ ++ /* ++ * If this cpu is idle and doing idle load balancing for all the ++ * cpus with ticks stopped, is it time for that to stop? ++ */ ++ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && ++ cpus_weight(nohz.cpu_mask) == num_online_cpus()) { ++ resched_cpu(cpu); ++ return; ++ } ++ ++ /* ++ * If this cpu is idle and the idle load balancing is done by ++ * someone else, then no need raise the SCHED_SOFTIRQ ++ */ ++ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && ++ cpu_isset(cpu, nohz.cpu_mask)) ++ return; ++#endif ++ if (time_after_eq(jiffies, rq->next_balance)) ++ raise_softirq(SCHED_SOFTIRQ); ++} ++ ++#else /* CONFIG_SMP */ ++ ++/* ++ * on UP we do not need to balance between CPUs: ++ */ ++static inline void idle_balance(int cpu, struct rq *rq) ++{ ++} ++ ++#endif ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++ ++/* ++ * Return p->sum_exec_runtime plus any more ns on the sched_clock ++ * that have not yet been banked in case the task is currently running. ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ unsigned long flags; ++ u64 ns, delta_exec; ++ struct rq *rq; ++ ++ rq = task_rq_lock(p, &flags); ++ ns = p->se.sum_exec_runtime; ++ if (task_current(rq, p)) { ++ update_rq_clock(rq); ++ delta_exec = rq->clock - p->se.exec_start; ++ if ((s64)delta_exec > 0) ++ ns += delta_exec; ++ } ++ task_rq_unlock(rq, &flags); ++ ++ return ns; ++} ++ ++/* ++ * Account user cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @cputime: the cpu time spent in user space since the last update ++ */ ++void account_user_time(struct task_struct *p, cputime_t cputime) ++{ ++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; ++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */ ++ cputime64_t tmp; ++ int nice = (TASK_NICE(p) > 0); ++ ++ p->utime = cputime_add(p->utime, cputime); ++ vx_account_user(vxi, cputime, nice); ++ ++ /* Add user time to cpustat. */ ++ tmp = cputime_to_cputime64(cputime); ++ if (nice) ++ cpustat->nice = cputime64_add(cpustat->nice, tmp); ++ else ++ cpustat->user = cputime64_add(cpustat->user, tmp); ++ /* Account for user time used */ ++ acct_update_integrals(p); ++} ++ ++/* ++ * Account guest cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @cputime: the cpu time spent in virtual machine since the last update ++ */ ++static void account_guest_time(struct task_struct *p, cputime_t cputime) ++{ ++ cputime64_t tmp; ++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; ++ ++ tmp = cputime_to_cputime64(cputime); ++ ++ p->utime = cputime_add(p->utime, cputime); ++ p->gtime = cputime_add(p->gtime, cputime); ++ ++ cpustat->user = cputime64_add(cpustat->user, tmp); ++ cpustat->guest = cputime64_add(cpustat->guest, tmp); ++} ++ ++/* ++ * Account scaled user cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @cputime: the cpu time spent in user space since the last update ++ */ ++void account_user_time_scaled(struct task_struct *p, cputime_t cputime) ++{ ++ p->utimescaled = cputime_add(p->utimescaled, cputime); ++} ++ ++/* ++ * Account system cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @hardirq_offset: the offset to subtract from hardirq_count() ++ * @cputime: the cpu time spent in kernel space since the last update ++ */ ++void account_system_time(struct task_struct *p, int hardirq_offset, ++ cputime_t cputime) ++{ ++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; ++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */ ++ struct rq *rq = this_rq(); ++ cputime64_t tmp; ++ ++ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { ++ account_guest_time(p, cputime); ++ return; ++ } ++ ++ p->stime = cputime_add(p->stime, cputime); ++ vx_account_system(vxi, cputime, (p == rq->idle)); ++ ++ /* Add system time to cpustat. */ ++ tmp = cputime_to_cputime64(cputime); ++ if (hardirq_count() - hardirq_offset) ++ cpustat->irq = cputime64_add(cpustat->irq, tmp); ++ else if (softirq_count()) ++ cpustat->softirq = cputime64_add(cpustat->softirq, tmp); ++ else if (p != rq->idle) ++ cpustat->system = cputime64_add(cpustat->system, tmp); ++ else if (atomic_read(&rq->nr_iowait) > 0) ++ cpustat->iowait = cputime64_add(cpustat->iowait, tmp); ++ else ++ cpustat->idle = cputime64_add(cpustat->idle, tmp); ++ /* Account for system time used */ ++ acct_update_integrals(p); ++} ++ ++/* ++ * Account scaled system cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @hardirq_offset: the offset to subtract from hardirq_count() ++ * @cputime: the cpu time spent in kernel space since the last update ++ */ ++void account_system_time_scaled(struct task_struct *p, cputime_t cputime) ++{ ++ p->stimescaled = cputime_add(p->stimescaled, cputime); ++} ++ ++/* ++ * Account for involuntary wait time. ++ * @p: the process from which the cpu time has been stolen ++ * @steal: the cpu time spent in involuntary wait ++ */ ++void account_steal_time(struct task_struct *p, cputime_t steal) ++{ ++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; ++ cputime64_t tmp = cputime_to_cputime64(steal); ++ struct rq *rq = this_rq(); ++ ++ if (p == rq->idle) { ++ p->stime = cputime_add(p->stime, steal); ++ if (atomic_read(&rq->nr_iowait) > 0) ++ cpustat->iowait = cputime64_add(cpustat->iowait, tmp); ++ else ++ cpustat->idle = cputime64_add(cpustat->idle, tmp); ++ } else ++ cpustat->steal = cputime64_add(cpustat->steal, tmp); ++} ++ ++/* ++ * Use precise platform statistics if available: ++ */ ++#ifdef CONFIG_VIRT_CPU_ACCOUNTING ++cputime_t task_utime(struct task_struct *p) ++{ ++ return p->utime; ++} ++ ++cputime_t task_stime(struct task_struct *p) ++{ ++ return p->stime; ++} ++#else ++cputime_t task_utime(struct task_struct *p) ++{ ++ clock_t utime = cputime_to_clock_t(p->utime), ++ total = utime + cputime_to_clock_t(p->stime); ++ u64 temp; ++ ++ /* ++ * Use CFS's precise accounting: ++ */ ++ temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); ++ ++ if (total) { ++ temp *= utime; ++ do_div(temp, total); ++ } ++ utime = (clock_t)temp; ++ ++ p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); ++ return p->prev_utime; ++} ++ ++cputime_t task_stime(struct task_struct *p) ++{ ++ clock_t stime; ++ ++ /* ++ * Use CFS's precise accounting. (we subtract utime from ++ * the total, to make sure the total observed by userspace ++ * grows monotonically - apps rely on that): ++ */ ++ stime = nsec_to_clock_t(p->se.sum_exec_runtime) - ++ cputime_to_clock_t(task_utime(p)); ++ ++ if (stime >= 0) ++ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); ++ ++ return p->prev_stime; ++} ++#endif ++ ++inline cputime_t task_gtime(struct task_struct *p) ++{ ++ return p->gtime; ++} ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. ++ * ++ * It also gets called by the fork code, when changing the parent's ++ * timeslices. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ struct task_struct *curr = rq->curr; ++ ++ sched_clock_tick(); ++ ++ spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ update_cpu_load(rq); ++ curr->sched_class->task_tick(rq, curr, 0); ++ spin_unlock(&rq->lock); ++ ++#ifdef CONFIG_SMP ++ rq->idle_at_tick = idle_cpu(cpu); ++ trigger_load_balance(rq, cpu); ++#endif ++} ++ ++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_PREEMPT_TRACER)) ++ ++static inline unsigned long get_parent_ip(unsigned long addr) ++{ ++ if (in_lock_functions(addr)) { ++ addr = CALLER_ADDR2; ++ if (in_lock_functions(addr)) ++ addr = CALLER_ADDR3; ++ } ++ return addr; ++} ++ ++void __kprobes add_preempt_count(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ preempt_count() += val; ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ if (preempt_count() == val) ++ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++} ++EXPORT_SYMBOL(add_preempt_count); ++ ++void __kprobes sub_preempt_count(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++ preempt_count() -= val; ++} ++EXPORT_SYMBOL(sub_preempt_count); ++ ++#endif ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ struct pt_regs *regs = get_irq_regs(); ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++ ++ if (regs) ++ show_regs(regs); ++ else ++ dump_stack(); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev) ++{ ++ /* ++ * Test if we are atomic. Since do_exit() needs to call into ++ * schedule() atomically, we ignore that path for now. ++ * Otherwise, whine if we are scheduling when we should not be. ++ */ ++ if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) ++ __schedule_bug(prev); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq(), sched_count); ++#ifdef CONFIG_SCHEDSTATS ++ if (unlikely(prev->lock_depth >= 0)) { ++ schedstat_inc(this_rq(), bkl_count); ++ schedstat_inc(prev, sched_info.bkl_count); ++ } ++#endif ++} ++ ++/* ++ * Pick up the highest-prio task: ++ */ ++static inline struct task_struct * ++pick_next_task(struct rq *rq, struct task_struct *prev) ++{ ++ const struct sched_class *class; ++ struct task_struct *p; ++ ++ /* ++ * Optimization: we know that if all tasks are in ++ * the fair class we can call that function directly: ++ */ ++ if (likely(rq->nr_running == rq->cfs.nr_running)) { ++ p = fair_sched_class.pick_next_task(rq); ++ if (likely(p)) ++ return p; ++ } ++ ++ class = sched_class_highest; ++ for ( ; ; ) { ++ p = class->pick_next_task(rq); ++ if (p) ++ return p; ++ /* ++ * Will never be NULL as the idle class always ++ * returns a non-NULL p: ++ */ ++ class = class->next; ++ } ++} ++ ++void (*rec_event)(void *,unsigned int) = NULL; ++EXPORT_SYMBOL(rec_event); ++#ifdef CONFIG_CHOPSTIX ++ ++struct event_spec { ++ unsigned long pc; ++ unsigned long dcookie; ++ unsigned int count; ++ unsigned int reason; ++}; ++ ++/* To support safe calling from asm */ ++asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) { ++ struct pt_regs *regs; ++ struct event_spec *es = event_signature_in->event_data; ++ regs = task_pt_regs(current); ++ event_signature_in->task=current; ++ es->pc=regs->ip; ++ event_signature_in->count=1; ++ (*rec_event)(event_signature_in, count); ++} ++#endif ++ ++/* ++ * schedule() is the main scheduler function. ++ */ ++asmlinkage void __sched schedule(void) ++{ ++ struct task_struct *prev, *next; ++ unsigned long *switch_count; ++ struct rq *rq; ++ int cpu; ++ ++need_resched: ++ preempt_disable(); ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ rcu_qsctr_inc(cpu); ++ prev = rq->curr; ++ switch_count = &prev->nivcsw; ++ ++ release_kernel_lock(prev); ++need_resched_nonpreemptible: ++ ++ schedule_debug(prev); ++ ++ if (sched_feat(HRTICK)) ++ hrtick_clear(rq); ++ ++ /* ++ * Do the rq-clock update outside the rq lock: ++ */ ++ local_irq_disable(); ++ update_rq_clock(rq); ++ spin_lock(&rq->lock); ++ clear_tsk_need_resched(prev); ++ ++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { ++ if (unlikely(signal_pending_state(prev->state, prev))) ++ prev->state = TASK_RUNNING; ++ else ++ deactivate_task(rq, prev, 1); ++ switch_count = &prev->nvcsw; ++ } ++ ++#ifdef CONFIG_SMP ++ if (prev->sched_class->pre_schedule) ++ prev->sched_class->pre_schedule(rq, prev); ++#endif ++ ++ if (unlikely(!rq->nr_running)) ++ idle_balance(cpu, rq); ++ ++ prev->sched_class->put_prev_task(rq, prev); ++ next = pick_next_task(rq, prev); ++ ++ if (likely(prev != next)) { ++ sched_info_switch(prev, next); ++ ++ rq->nr_switches++; ++ rq->curr = next; ++ ++*switch_count; ++ ++ context_switch(rq, prev, next); /* unlocks the rq */ ++ /* ++ * the context switch might have flipped the stack from under ++ * us, hence refresh the local variables. ++ */ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ } else ++ spin_unlock_irq(&rq->lock); ++ ++ if (unlikely(reacquire_kernel_lock(current) < 0)) ++ goto need_resched_nonpreemptible; ++ ++ preempt_enable_no_resched(); ++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) ++ goto need_resched; ++} ++EXPORT_SYMBOL(schedule); ++ ++#ifdef CONFIG_PREEMPT ++/* ++ * this is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. Kernel preemptions off return from interrupt ++ * occur there and call schedule directly. ++ */ ++asmlinkage void __sched preempt_schedule(void) ++{ ++ struct thread_info *ti = current_thread_info(); ++ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(ti->preempt_count || irqs_disabled())) ++ return; ++ ++ do { ++ add_preempt_count(PREEMPT_ACTIVE); ++ schedule(); ++ sub_preempt_count(PREEMPT_ACTIVE); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ barrier(); ++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); ++} ++EXPORT_SYMBOL(preempt_schedule); ++ ++/* ++ * this is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage void __sched preempt_schedule_irq(void) ++{ ++ struct thread_info *ti = current_thread_info(); ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(ti->preempt_count || !irqs_disabled()); ++ ++ do { ++ add_preempt_count(PREEMPT_ACTIVE); ++ local_irq_enable(); ++ schedule(); ++ local_irq_disable(); ++ sub_preempt_count(PREEMPT_ACTIVE); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ barrier(); ++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); ++} ++ ++#endif /* CONFIG_PREEMPT */ ++ ++int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, ++ void *key) ++{ ++ return try_to_wake_up(curr->private, mode, sync); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++/* ++ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just ++ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve ++ * number) then we wake all the non-exclusive tasks and one exclusive task. ++ * ++ * There are circumstances in which we can try to wake a task which has already ++ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns ++ * zero in this (rare) case, and we handle it by continuing to scan the queue. ++ */ ++static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, ++ int nr_exclusive, int sync, void *key) ++{ ++ wait_queue_t *curr, *next; ++ ++ list_for_each_entry_safe(curr, next, &q->task_list, task_list) { ++ unsigned flags = curr->flags; ++ ++ if (curr->func(curr, mode, sync, key) && ++ (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) ++ break; ++ } ++} ++ ++/** ++ * __wake_up - wake up threads blocked on a waitqueue. ++ * @q: the waitqueue ++ * @mode: which threads ++ * @nr_exclusive: how many wake-one or wake-many threads to wake up ++ * @key: is directly passed to the wakeup function ++ */ ++void __wake_up(wait_queue_head_t *q, unsigned int mode, ++ int nr_exclusive, void *key) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&q->lock, flags); ++ __wake_up_common(q, mode, nr_exclusive, 0, key); ++ spin_unlock_irqrestore(&q->lock, flags); ++} ++EXPORT_SYMBOL(__wake_up); ++ ++/* ++ * Same as __wake_up but called with the spinlock in wait_queue_head_t held. ++ */ ++void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) ++{ ++ __wake_up_common(q, mode, 1, 0, NULL); ++} ++ ++/** ++ * __wake_up_sync - wake up threads blocked on a waitqueue. ++ * @q: the waitqueue ++ * @mode: which threads ++ * @nr_exclusive: how many wake-one or wake-many threads to wake up ++ * ++ * The sync wakeup differs that the waker knows that it will schedule ++ * away soon, so while the target thread will be woken up, it will not ++ * be migrated to another CPU - ie. the two threads are 'synchronized' ++ * with each other. This can prevent needless bouncing between CPUs. ++ * ++ * On UP it can prevent extra preemption. ++ */ ++void ++__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) ++{ ++ unsigned long flags; ++ int sync = 1; ++ ++ if (unlikely(!q)) ++ return; ++ ++ if (unlikely(!nr_exclusive)) ++ sync = 0; ++ ++ spin_lock_irqsave(&q->lock, flags); ++ __wake_up_common(q, mode, nr_exclusive, sync, NULL); ++ spin_unlock_irqrestore(&q->lock, flags); ++} ++EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ ++ ++void complete(struct completion *x) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&x->wait.lock, flags); ++ x->done++; ++ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); ++ spin_unlock_irqrestore(&x->wait.lock, flags); ++} ++EXPORT_SYMBOL(complete); ++ ++void complete_all(struct completion *x) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&x->wait.lock, flags); ++ x->done += UINT_MAX/2; ++ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); ++ spin_unlock_irqrestore(&x->wait.lock, flags); ++} ++EXPORT_SYMBOL(complete_all); ++ ++static inline long __sched ++do_wait_for_common(struct completion *x, long timeout, int state) ++{ ++ if (!x->done) { ++ DECLARE_WAITQUEUE(wait, current); ++ ++ wait.flags |= WQ_FLAG_EXCLUSIVE; ++ __add_wait_queue_tail(&x->wait, &wait); ++ do { ++ if ((state == TASK_INTERRUPTIBLE && ++ signal_pending(current)) || ++ (state == TASK_KILLABLE && ++ fatal_signal_pending(current))) { ++ timeout = -ERESTARTSYS; ++ break; ++ } ++ __set_current_state(state); ++ spin_unlock_irq(&x->wait.lock); ++ timeout = schedule_timeout(timeout); ++ spin_lock_irq(&x->wait.lock); ++ } while (!x->done && timeout); ++ __remove_wait_queue(&x->wait, &wait); ++ if (!x->done) ++ return timeout; ++ } ++ x->done--; ++ return timeout ?: 1; ++} ++ ++static long __sched ++wait_for_common(struct completion *x, long timeout, int state) ++{ ++ might_sleep(); ++ ++ spin_lock_irq(&x->wait.lock); ++ timeout = do_wait_for_common(x, timeout, state); ++ spin_unlock_irq(&x->wait.lock); ++ return timeout; ++} ++ ++void __sched wait_for_completion(struct completion *x) ++{ ++ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion); ++ ++unsigned long __sched ++wait_for_completion_timeout(struct completion *x, unsigned long timeout) ++{ ++ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion_timeout); ++ ++int __sched wait_for_completion_interruptible(struct completion *x) ++{ ++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); ++ if (t == -ERESTARTSYS) ++ return t; ++ return 0; ++} ++EXPORT_SYMBOL(wait_for_completion_interruptible); ++ ++unsigned long __sched ++wait_for_completion_interruptible_timeout(struct completion *x, ++ unsigned long timeout) ++{ ++ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); ++ ++int __sched wait_for_completion_killable(struct completion *x) ++{ ++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); ++ if (t == -ERESTARTSYS) ++ return t; ++ return 0; ++} ++EXPORT_SYMBOL(wait_for_completion_killable); ++ ++/** ++ * try_wait_for_completion - try to decrement a completion without blocking ++ * @x: completion structure ++ * ++ * Returns: 0 if a decrement cannot be done without blocking ++ * 1 if a decrement succeeded. ++ * ++ * If a completion is being used as a counting completion, ++ * attempt to decrement the counter without blocking. This ++ * enables us to avoid waiting if the resource the completion ++ * is protecting is not available. ++ */ ++bool try_wait_for_completion(struct completion *x) ++{ ++ int ret = 1; ++ ++ spin_lock_irq(&x->wait.lock); ++ if (!x->done) ++ ret = 0; ++ else ++ x->done--; ++ spin_unlock_irq(&x->wait.lock); ++ return ret; ++} ++EXPORT_SYMBOL(try_wait_for_completion); ++ ++/** ++ * completion_done - Test to see if a completion has any waiters ++ * @x: completion structure ++ * ++ * Returns: 0 if there are waiters (wait_for_completion() in progress) ++ * 1 if there are no waiters. ++ * ++ */ ++bool completion_done(struct completion *x) ++{ ++ int ret = 1; ++ ++ spin_lock_irq(&x->wait.lock); ++ if (!x->done) ++ ret = 0; ++ spin_unlock_irq(&x->wait.lock); ++ return ret; ++} ++EXPORT_SYMBOL(completion_done); ++ ++static long __sched ++sleep_on_common(wait_queue_head_t *q, int state, long timeout) ++{ ++ unsigned long flags; ++ wait_queue_t wait; ++ ++ init_waitqueue_entry(&wait, current); ++ ++ __set_current_state(state); ++ ++ spin_lock_irqsave(&q->lock, flags); ++ __add_wait_queue(q, &wait); ++ spin_unlock(&q->lock); ++ timeout = schedule_timeout(timeout); ++ spin_lock_irq(&q->lock); ++ __remove_wait_queue(q, &wait); ++ spin_unlock_irqrestore(&q->lock, flags); ++ ++ return timeout; ++} ++ ++void __sched interruptible_sleep_on(wait_queue_head_t *q) ++{ ++ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); ++} ++EXPORT_SYMBOL(interruptible_sleep_on); ++ ++long __sched ++interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) ++{ ++ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); ++} ++EXPORT_SYMBOL(interruptible_sleep_on_timeout); ++ ++void __sched sleep_on(wait_queue_head_t *q) ++{ ++ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); ++} ++EXPORT_SYMBOL(sleep_on); ++ ++long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) ++{ ++ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); ++} ++EXPORT_SYMBOL(sleep_on_timeout); ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task ++ * @prio: prio value (kernel-internal form) ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance logic. ++ */ ++void rt_mutex_setprio(struct task_struct *p, int prio) ++{ ++ unsigned long flags; ++ int oldprio, on_rq, running; ++ struct rq *rq; ++ const struct sched_class *prev_class = p->sched_class; ++ ++ BUG_ON(prio < 0 || prio > MAX_PRIO); ++ ++ rq = task_rq_lock(p, &flags); ++ update_rq_clock(rq); ++ ++ oldprio = p->prio; ++ on_rq = p->se.on_rq; ++ running = task_current(rq, p); ++ if (on_rq) ++ dequeue_task(rq, p, 0); ++ if (running) ++ p->sched_class->put_prev_task(rq, p); ++ ++ if (rt_prio(prio)) ++ p->sched_class = &rt_sched_class; ++ else ++ p->sched_class = &fair_sched_class; ++ ++ p->prio = prio; ++ ++ if (running) ++ p->sched_class->set_curr_task(rq); ++ if (on_rq) { ++ enqueue_task(rq, p, 0); ++ ++ check_class_changed(rq, p, prev_class, oldprio, running); ++ } ++ task_rq_unlock(rq, &flags); ++} ++ ++#endif ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ int old_prio, delta, on_rq; ++ unsigned long flags; ++ struct rq *rq; ++ ++ if (TASK_NICE(p) == nice || nice < -20 || nice > 19) ++ return; ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ rq = task_rq_lock(p, &flags); ++ update_rq_clock(rq); ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it wont have any effect on scheduling until the task is ++ * SCHED_FIFO/SCHED_RR: ++ */ ++ if (task_has_rt_policy(p)) { ++ p->static_prio = NICE_TO_PRIO(nice); ++ goto out_unlock; ++ } ++ on_rq = p->se.on_rq; ++ if (on_rq) ++ dequeue_task(rq, p, 0); ++ ++ p->static_prio = NICE_TO_PRIO(nice); ++ set_load_weight(p); ++ old_prio = p->prio; ++ p->prio = effective_prio(p); ++ delta = p->prio - old_prio; ++ ++ if (on_rq) { ++ enqueue_task(rq, p, 0); ++ /* ++ * If the task increased its priority or is running and ++ * lowered its priority, then reschedule its CPU: ++ */ ++ if (delta < 0 || (delta > 0 && task_running(rq, p))) ++ resched_task(rq->curr); ++ } ++out_unlock: ++ task_rq_unlock(rq, &flags); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = 20 - nice; ++ ++ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ if (increment < -40) ++ increment = -40; ++ if (increment > 40) ++ increment = 40; ++ ++ nice = PRIO_TO_NICE(current->static_prio) + increment; ++ if (nice < -20) ++ nice = -20; ++ if (nice > 19) ++ nice = 19; ++ ++ if (increment < 0 && !can_nice(current, nice)) ++ return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * This is the priority value as seen by users in /proc. ++ * RT tasks are offset by -200. Normal tasks are centered ++ * around 0, value goes from -16 to +15. ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ return p->prio - MAX_RT_PRIO; ++} ++ ++/** ++ * task_nice - return the nice value of a given task. ++ * @p: the task in question. ++ */ ++int task_nice(const struct task_struct *p) ++{ ++ return TASK_NICE(p); ++} ++EXPORT_SYMBOL(task_nice); ++ ++/** ++ * idle_cpu - is a given cpu idle currently? ++ * @cpu: the processor in question. ++ */ ++int idle_cpu(int cpu) ++{ ++ return cpu_curr(cpu) == cpu_rq(cpu)->idle; ++} ++ ++/** ++ * idle_task - return the idle task for a given cpu. ++ * @cpu: the processor in question. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ */ ++static struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++/* Actually do priority change: must hold rq lock. */ ++static void ++__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) ++{ ++ BUG_ON(p->se.on_rq); ++ ++ p->policy = policy; ++ switch (p->policy) { ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ p->sched_class = &fair_sched_class; ++ break; ++ case SCHED_FIFO: ++ case SCHED_RR: ++ p->sched_class = &rt_sched_class; ++ break; ++ } ++ ++ p->rt_priority = prio; ++ p->normal_prio = normal_prio(p); ++ /* we are holding p->pi_lock already */ ++ p->prio = rt_mutex_getprio(p); ++ set_load_weight(p); ++} ++ ++static int __sched_setscheduler(struct task_struct *p, int policy, ++ struct sched_param *param, bool user) ++{ ++ int retval, oldprio, oldpolicy = -1, on_rq, running; ++ unsigned long flags; ++ const struct sched_class *prev_class = p->sched_class; ++ struct rq *rq; ++ ++ /* may grab non-irq protected spin_locks */ ++ BUG_ON(in_interrupt()); ++recheck: ++ /* double check policy once rq lock held */ ++ if (policy < 0) ++ policy = oldpolicy = p->policy; ++ else if (policy != SCHED_FIFO && policy != SCHED_RR && ++ policy != SCHED_NORMAL && policy != SCHED_BATCH && ++ policy != SCHED_IDLE) ++ return -EINVAL; ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, ++ * SCHED_BATCH and SCHED_IDLE is 0. ++ */ ++ if (param->sched_priority < 0 || ++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || ++ (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) ++ return -EINVAL; ++ if (rt_policy(policy) != (param->sched_priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (rt_policy(policy)) { ++ unsigned long rlim_rtprio; ++ ++ if (!lock_task_sighand(p, &flags)) ++ return -ESRCH; ++ rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; ++ unlock_task_sighand(p, &flags); ++ ++ /* can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* can't increase priority */ ++ if (param->sched_priority > p->rt_priority && ++ param->sched_priority > rlim_rtprio) ++ return -EPERM; ++ } ++ /* ++ * Like positive nice levels, dont allow tasks to ++ * move out of SCHED_IDLE either: ++ */ ++ if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) ++ return -EPERM; ++ ++ /* can't change other user's priorities */ ++ if ((current->euid != p->euid) && ++ (current->euid != p->uid)) ++ return -EPERM; ++ } ++ ++ if (user) { ++#ifdef CONFIG_RT_GROUP_SCHED ++ /* ++ * Do not allow realtime tasks into groups that have no runtime ++ * assigned. ++ */ ++ if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) ++ return -EPERM; ++#endif ++ ++ retval = security_task_setscheduler(p, policy, param); ++ if (retval) ++ return retval; ++ } ++ ++ /* ++ * make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ */ ++ spin_lock_irqsave(&p->pi_lock, flags); ++ /* ++ * To be able to change p->policy safely, the apropriate ++ * runqueue lock must be held. ++ */ ++ rq = __task_rq_lock(p); ++ /* recheck policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ __task_rq_unlock(rq); ++ spin_unlock_irqrestore(&p->pi_lock, flags); ++ goto recheck; ++ } ++ update_rq_clock(rq); ++ on_rq = p->se.on_rq; ++ running = task_current(rq, p); ++ if (on_rq) ++ deactivate_task(rq, p, 0); ++ if (running) ++ p->sched_class->put_prev_task(rq, p); ++ ++ oldprio = p->prio; ++ __setscheduler(rq, p, policy, param->sched_priority); ++ ++ if (running) ++ p->sched_class->set_curr_task(rq); ++ if (on_rq) { ++ activate_task(rq, p, 0); ++ ++ check_class_changed(rq, p, prev_class, oldprio, running); ++ } ++ __task_rq_unlock(rq); ++ spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ rt_mutex_adjust_pi(p); ++ ++ return 0; ++} ++ ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ struct sched_param *param) ++{ ++ return __sched_setscheduler(p, policy, param, true); ++} ++EXPORT_SYMBOL_GPL(sched_setscheduler); ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ struct sched_param *param) ++{ ++ return __sched_setscheduler(p, policy, param, false); ++} ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setscheduler(p, policy, &lparam); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ */ ++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, ++ struct sched_param __user *, param) ++{ ++ /* negative values for policy are not valid */ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, -1, param); ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval; ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ read_lock(&tasklist_lock); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ read_unlock(&tasklist_lock); ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ ++ read_lock(&tasklist_lock); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ lp.sched_priority = p->rt_priority; ++ read_unlock(&tasklist_lock); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++ return retval; ++ ++out_unlock: ++ read_unlock(&tasklist_lock); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) ++{ ++ cpumask_t cpus_allowed; ++ cpumask_t new_mask = *in_mask; ++ struct task_struct *p; ++ int retval; ++ ++ get_online_cpus(); ++ read_lock(&tasklist_lock); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ read_unlock(&tasklist_lock); ++ put_online_cpus(); ++ return -ESRCH; ++ } ++ ++ /* ++ * It is not safe to call set_cpus_allowed with the ++ * tasklist_lock held. We will bump the task_struct's ++ * usage count and then drop tasklist_lock. ++ */ ++ get_task_struct(p); ++ read_unlock(&tasklist_lock); ++ ++ ++ retval = -EPERM; ++ if ((current->euid != p->euid) && (current->euid != p->uid) && ++ !capable(CAP_SYS_NICE)) ++ goto out_unlock; ++ ++ retval = security_task_setscheduler(p, 0, NULL); ++ if (retval) ++ goto out_unlock; ++ ++ cpuset_cpus_allowed(p, &cpus_allowed); ++ cpus_and(new_mask, new_mask, cpus_allowed); ++ again: ++ retval = set_cpus_allowed_ptr(p, &new_mask); ++ ++ if (!retval) { ++ cpuset_cpus_allowed(p, &cpus_allowed); ++ if (!cpus_subset(new_mask, cpus_allowed)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_allowed to the ++ * cpuset's cpus_allowed ++ */ ++ new_mask = cpus_allowed; ++ goto again; ++ } ++ } ++out_unlock: ++ put_task_struct(p); ++ put_online_cpus(); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ cpumask_t *new_mask) ++{ ++ if (len < sizeof(cpumask_t)) { ++ memset(new_mask, 0, sizeof(cpumask_t)); ++ } else if (len > sizeof(cpumask_t)) { ++ len = sizeof(cpumask_t); ++ } ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++/** ++ * sys_sched_setaffinity - set the cpu affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new cpu mask ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_t new_mask; ++ int retval; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); ++ if (retval) ++ return retval; ++ ++ return sched_setaffinity(pid, &new_mask); ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ int retval; ++ ++ get_online_cpus(); ++ read_lock(&tasklist_lock); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ cpus_and(*mask, p->cpus_allowed, cpu_online_map); ++ ++out_unlock: ++ read_unlock(&tasklist_lock); ++ put_online_cpus(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the cpu affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current cpu mask ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_t mask; ++ ++ if (len < sizeof(cpumask_t)) ++ return -EINVAL; ++ ++ ret = sched_getaffinity(pid, &mask); ++ if (ret < 0) ++ return ret; ++ ++ if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) ++ return -EFAULT; ++ ++ return sizeof(cpumask_t); ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. If there are no ++ * other threads running on this CPU then this function will return. ++ */ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ struct rq *rq = this_rq_lock(); ++ ++ schedstat_inc(rq, yld_count); ++ current->sched_class->yield_task(rq); ++ ++ /* ++ * Since we are going to call schedule() anyway, there's ++ * no need to preempt or enable interrupts: ++ */ ++ __release(rq->lock); ++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_); ++ _raw_spin_unlock(&rq->lock); ++ preempt_enable_no_resched(); ++ ++ schedule(); ++ ++ return 0; ++} ++ ++static void __cond_resched(void) ++{ ++#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP ++ __might_sleep(__FILE__, __LINE__); ++#endif ++ /* ++ * The BKS might be reacquired before we have dropped ++ * PREEMPT_ACTIVE, which could trigger a second ++ * cond_resched() call. ++ */ ++ do { ++ add_preempt_count(PREEMPT_ACTIVE); ++ schedule(); ++ sub_preempt_count(PREEMPT_ACTIVE); ++ } while (need_resched()); ++} ++ ++int __sched _cond_resched(void) ++{ ++ if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && ++ system_state == SYSTEM_RUNNING) { ++ __cond_resched(); ++ return 1; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(_cond_resched); ++ ++/* ++ * cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = need_resched() && system_state == SYSTEM_RUNNING; ++ int ret = 0; ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (resched && need_resched()) ++ __cond_resched(); ++ else ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(cond_resched_lock); ++ ++int __sched cond_resched_softirq(void) ++{ ++ BUG_ON(!in_softirq()); ++ ++ if (need_resched() && system_state == SYSTEM_RUNNING) { ++ local_bh_enable(); ++ __cond_resched(); ++ local_bh_disable(); ++ return 1; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(cond_resched_softirq); ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * This is a shortcut for kernel-space yielding - it marks the ++ * thread runnable and calls sys_sched_yield(). ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ sys_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++void __sched io_schedule(void) ++{ ++ struct rq *rq = &__raw_get_cpu_var(runqueues); ++ ++ delayacct_blkio_start(); ++ atomic_inc(&rq->nr_iowait); ++ schedule(); ++ atomic_dec(&rq->nr_iowait); ++ delayacct_blkio_end(); ++} ++EXPORT_SYMBOL(io_schedule); ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ struct rq *rq = &__raw_get_cpu_var(runqueues); ++ long ret; ++ ++ delayacct_blkio_start(); ++ atomic_inc(&rq->nr_iowait); ++ ret = schedule_timeout(timeout); ++ atomic_dec(&rq->nr_iowait); ++ delayacct_blkio_end(); ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * this syscall returns the maximum rt_priority that can be used ++ * by a given scheduling class. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_USER_RT_PRIO-1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * this syscall returns the minimum rt_priority that can be used ++ * by a given scheduling class. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ ret = 0; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * this syscall writes the default timeslice value of a given process ++ * into the user-space timespec buffer. A value of '0' means infinity. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct timespec __user *, interval) ++{ ++ struct task_struct *p; ++ unsigned int time_slice; ++ int retval; ++ struct timespec t; ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ read_lock(&tasklist_lock); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ /* ++ * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER ++ * tasks that are on an otherwise idle runqueue: ++ */ ++ time_slice = 0; ++ if (p->policy == SCHED_RR) { ++ time_slice = DEF_TIMESLICE; ++ } else if (p->policy != SCHED_FIFO) { ++ struct sched_entity *se = &p->se; ++ unsigned long flags; ++ struct rq *rq; ++ ++ rq = task_rq_lock(p, &flags); ++ if (rq->cfs.load.weight) ++ time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); ++ task_rq_unlock(rq, &flags); ++ } ++ read_unlock(&tasklist_lock); ++ jiffies_to_timespec(time_slice, &t); ++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; ++ return retval; ++ ++out_unlock: ++ read_unlock(&tasklist_lock); ++ return retval; ++} ++ ++static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ unsigned state; ++ ++ state = p->state ? __ffs(p->state) + 1 : 0; ++ printk(KERN_INFO "%-13.13s %c", p->comm, ++ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); ++#if BITS_PER_LONG == 32 ++ if (state == TASK_RUNNING) ++ printk(KERN_CONT " running "); ++ else ++ printk(KERN_CONT " %08lx ", thread_saved_pc(p)); ++#else ++ if (state == TASK_RUNNING) ++ printk(KERN_CONT " running task "); ++ else ++ printk(KERN_CONT " %016lx ", thread_saved_pc(p)); ++#endif ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ { ++ unsigned long *n = end_of_stack(p); ++ while (!*n) ++ n++; ++ free = (unsigned long)n - (unsigned long)end_of_stack(p); ++ } ++#endif ++ printk(KERN_CONT "%5lu %5d %6d\n", free, ++ task_pid_nr(p), task_pid_nr(p->real_parent)); ++ ++ show_stack(p, NULL); ++} ++ ++void show_state_filter(unsigned long state_filter) ++{ ++ struct task_struct *g, *p; ++ ++#if BITS_PER_LONG == 32 ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#else ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#endif ++ read_lock(&tasklist_lock); ++ do_each_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take alot of time: ++ */ ++ touch_nmi_watchdog(); ++ if (!state_filter || (p->state & state_filter)) ++ sched_show_task(p); ++ } while_each_thread(g, p); ++ ++ touch_all_softlockup_watchdogs(); ++ ++#ifdef CONFIG_SCHED_DEBUG ++ sysrq_sched_debug_show(); ++#endif ++ read_unlock(&tasklist_lock); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (state_filter == -1) ++ debug_show_all_locks(); ++} ++ ++void __cpuinit init_idle_bootup_task(struct task_struct *idle) ++{ ++ idle->sched_class = &idle_sched_class; ++} ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: cpu the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void __cpuinit init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ __sched_fork(idle); ++ idle->se.exec_start = sched_clock(); ++ ++ idle->prio = idle->normal_prio = MAX_PRIO; ++ idle->cpus_allowed = cpumask_of_cpu(cpu); ++ __set_task_cpu(idle, cpu); ++ ++ spin_lock_irqsave(&rq->lock, flags); ++ rq->curr = rq->idle = idle; ++#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) ++ idle->oncpu = 1; ++#endif ++ spin_unlock_irqrestore(&rq->lock, flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++#if defined(CONFIG_PREEMPT) ++ task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); ++#else ++ task_thread_info(idle)->preempt_count = 0; ++#endif ++ /* ++ * The idle tasks have their own, simple scheduling class: ++ */ ++ idle->sched_class = &idle_sched_class; ++} ++ ++/* ++ * In a system that switches off the HZ timer nohz_cpu_mask ++ * indicates which cpus entered this state. This is used ++ * in the rcu update to wait only for active cpus. For system ++ * which do not switch off the HZ timer nohz_cpu_mask should ++ * always be CPU_MASK_NONE. ++ */ ++cpumask_t nohz_cpu_mask = CPU_MASK_NONE; ++ ++/* ++ * Increase the granularity value when there are more CPUs, ++ * because with more CPUs the 'effective latency' as visible ++ * to users decreases. But the relationship is not linear, ++ * so pick a second-best guess by going with the log2 of the ++ * number of CPUs. ++ * ++ * This idea comes from the SD scheduler of Con Kolivas: ++ */ ++static inline void sched_init_granularity(void) ++{ ++ unsigned int factor = 1 + ilog2(num_online_cpus()); ++ const unsigned long limit = 200000000; ++ ++ sysctl_sched_min_granularity *= factor; ++ if (sysctl_sched_min_granularity > limit) ++ sysctl_sched_min_granularity = limit; ++ ++ sysctl_sched_latency *= factor; ++ if (sysctl_sched_latency > limit) ++ sysctl_sched_latency = limit; ++ ++ sysctl_sched_wakeup_granularity *= factor; ++ ++ sysctl_sched_shares_ratelimit *= factor; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * This is how migration works: ++ * ++ * 1) we queue a struct migration_req structure in the source CPU's ++ * runqueue and wake up that CPU's migration thread. ++ * 2) we down() the locked semaphore => thread blocks. ++ * 3) migration thread wakes up (implicitly it forces the migrated ++ * thread off the CPU) ++ * 4) it gets the migration request and checks whether the migrated ++ * task is still in the wrong runqueue. ++ * 5) if it's in the wrong runqueue then the migration thread removes ++ * it and puts it into the right queue. ++ * 6) migration thread up()s the semaphore. ++ * 7) we wake up and the migration is done. ++ */ ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) ++{ ++ struct migration_req req; ++ unsigned long flags; ++ struct rq *rq; ++ int ret = 0; ++ ++ rq = task_rq_lock(p, &flags); ++ if (!cpus_intersects(*new_mask, cpu_online_map)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && ++ !cpus_equal(p->cpus_allowed, *new_mask))) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, new_mask); ++ else { ++ p->cpus_allowed = *new_mask; ++ p->rt.nr_cpus_allowed = cpus_weight(*new_mask); ++ } ++ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (cpu_isset(task_cpu(p), *new_mask)) ++ goto out; ++ ++ if (migrate_task(p, any_online_cpu(*new_mask), &req)) { ++ /* Need help from migration thread: drop lock and wait. */ ++ task_rq_unlock(rq, &flags); ++ wake_up_process(rq->migration_thread); ++ wait_for_completion(&req.done); ++ tlb_migrate_finish(p->mm); ++ return 0; ++ } ++out: ++ task_rq_unlock(rq, &flags); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++/* ++ * Move (not current) task off this cpu, onto dest cpu. We're doing ++ * this because either it can't run here any more (set_cpus_allowed() ++ * away from this CPU, or CPU going down), or because we're ++ * attempting to rebalance this task on exec (sched_exec). ++ * ++ * So we race with normal scheduler movements, but that's OK, as long ++ * as the task is no longer on this CPU. ++ * ++ * Returns non-zero if task was successfully migrated. ++ */ ++static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ++{ ++ struct rq *rq_dest, *rq_src; ++ int ret = 0, on_rq; ++ ++ if (unlikely(!cpu_active(dest_cpu))) ++ return ret; ++ ++ rq_src = cpu_rq(src_cpu); ++ rq_dest = cpu_rq(dest_cpu); ++ ++ double_rq_lock(rq_src, rq_dest); ++ /* Already moved. */ ++ if (task_cpu(p) != src_cpu) ++ goto done; ++ /* Affinity changed (again). */ ++ if (!cpu_isset(dest_cpu, p->cpus_allowed)) ++ goto fail; ++ ++ on_rq = p->se.on_rq; ++ if (on_rq) ++ deactivate_task(rq_src, p, 0); ++ ++ set_task_cpu(p, dest_cpu); ++ if (on_rq) { ++ activate_task(rq_dest, p, 0); ++ check_preempt_curr(rq_dest, p); ++ } ++done: ++ ret = 1; ++fail: ++ double_rq_unlock(rq_src, rq_dest); ++ return ret; ++} ++ ++/* ++ * migration_thread - this is a highprio system thread that performs ++ * thread migration by bumping thread off CPU then 'pushing' onto ++ * another runqueue. ++ */ ++static int migration_thread(void *data) ++{ ++ int cpu = (long)data; ++ struct rq *rq; ++ ++ rq = cpu_rq(cpu); ++ BUG_ON(rq->migration_thread != current); ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ struct migration_req *req; ++ struct list_head *head; ++ ++ spin_lock_irq(&rq->lock); ++ ++ if (cpu_is_offline(cpu)) { ++ spin_unlock_irq(&rq->lock); ++ goto wait_to_die; ++ } ++ ++ if (rq->active_balance) { ++ active_load_balance(rq, cpu); ++ rq->active_balance = 0; ++ } ++ ++ head = &rq->migration_queue; ++ ++ if (list_empty(head)) { ++ spin_unlock_irq(&rq->lock); ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ continue; ++ } ++ req = list_entry(head->next, struct migration_req, list); ++ list_del_init(head->next); ++ ++ spin_unlock(&rq->lock); ++ __migrate_task(req->task, cpu, req->dest_cpu); ++ local_irq_enable(); ++ ++ complete(&req->done); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++ ++wait_to_die: ++ /* Wait for kthread_stop */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++ ++static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) ++{ ++ int ret; ++ ++ local_irq_disable(); ++ ret = __migrate_task(p, src_cpu, dest_cpu); ++ local_irq_enable(); ++ return ret; ++} ++ ++/* ++ * Figure out where task on dead CPU should go, use force if necessary. ++ * NOTE: interrupts should be disabled by the caller ++ */ ++static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) ++{ ++ unsigned long flags; ++ cpumask_t mask; ++ struct rq *rq; ++ int dest_cpu; ++ ++ do { ++ /* On same node? */ ++ mask = node_to_cpumask(cpu_to_node(dead_cpu)); ++ cpus_and(mask, mask, p->cpus_allowed); ++ dest_cpu = any_online_cpu(mask); ++ ++ /* On any allowed CPU? */ ++ if (dest_cpu >= nr_cpu_ids) ++ dest_cpu = any_online_cpu(p->cpus_allowed); ++ ++ /* No more Mr. Nice Guy. */ ++ if (dest_cpu >= nr_cpu_ids) { ++ cpumask_t cpus_allowed; ++ ++ cpuset_cpus_allowed_locked(p, &cpus_allowed); ++ /* ++ * Try to stay on the same cpuset, where the ++ * current cpuset may be a subset of all cpus. ++ * The cpuset_cpus_allowed_locked() variant of ++ * cpuset_cpus_allowed() will not block. It must be ++ * called within calls to cpuset_lock/cpuset_unlock. ++ */ ++ rq = task_rq_lock(p, &flags); ++ p->cpus_allowed = cpus_allowed; ++ dest_cpu = any_online_cpu(p->cpus_allowed); ++ task_rq_unlock(rq, &flags); ++ ++ /* ++ * Don't tell them about moving exiting tasks or ++ * kernel threads (both mm NULL), since they never ++ * leave kernel. ++ */ ++ if (p->mm && printk_ratelimit()) { ++ printk(KERN_INFO "process %d (%s) no " ++ "longer affine to cpu%d\n", ++ task_pid_nr(p), p->comm, dead_cpu); ++ } ++ } ++ } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); ++} ++ ++/* ++ * While a dead CPU has no uninterruptible tasks queued at this point, ++ * it might still have a nonzero ->nr_uninterruptible counter, because ++ * for performance reasons the counter is not stricly tracking tasks to ++ * their home CPUs. So we just add the counter to another CPU's counter, ++ * to keep the global sum constant after CPU-down: ++ */ ++static void migrate_nr_uninterruptible(struct rq *rq_src) ++{ ++ struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ double_rq_lock(rq_src, rq_dest); ++ rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; ++ rq_src->nr_uninterruptible = 0; ++ double_rq_unlock(rq_src, rq_dest); ++ local_irq_restore(flags); ++} ++ ++/* Run through task list and migrate tasks from the dead cpu. */ ++static void migrate_live_tasks(int src_cpu) ++{ ++ struct task_struct *p, *t; ++ ++ read_lock(&tasklist_lock); ++ ++ do_each_thread(t, p) { ++ if (p == current) ++ continue; ++ ++ if (task_cpu(p) == src_cpu) ++ move_task_off_dead_cpu(src_cpu, p); ++ } while_each_thread(t, p); ++ ++ read_unlock(&tasklist_lock); ++} ++ ++/* ++ * Schedules idle task to be the next runnable task on current CPU. ++ * It does so by boosting its priority to highest possible. ++ * Used by CPU offline code. ++ */ ++void sched_idle_next(void) ++{ ++ int this_cpu = smp_processor_id(); ++ struct rq *rq = cpu_rq(this_cpu); ++ struct task_struct *p = rq->idle; ++ unsigned long flags; ++ ++ /* cpu has to be offline */ ++ BUG_ON(cpu_online(this_cpu)); ++ ++ /* ++ * Strictly not necessary since rest of the CPUs are stopped by now ++ * and interrupts disabled on the current cpu. ++ */ ++ spin_lock_irqsave(&rq->lock, flags); ++ ++ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); ++ ++ update_rq_clock(rq); ++ activate_task(rq, p, 0); ++ ++ spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++/* ++ * Ensures that the idle task is using init_mm right before its cpu goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(cpu_online(smp_processor_id())); ++ ++ if (mm != &init_mm) ++ switch_mm(mm, &init_mm, current); ++ mmdrop(mm); ++} ++ ++/* called under rq->lock with disabled interrupts */ ++static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) ++{ ++ struct rq *rq = cpu_rq(dead_cpu); ++ ++ /* Must be exiting, otherwise would be on tasklist. */ ++ BUG_ON(!p->exit_state); ++ ++ /* Cannot have done final schedule yet: would have vanished. */ ++ BUG_ON(p->state == TASK_DEAD); ++ ++ get_task_struct(p); ++ ++ /* ++ * Drop lock around migration; if someone else moves it, ++ * that's OK. No task can be added to this CPU, so iteration is ++ * fine. ++ */ ++ spin_unlock_irq(&rq->lock); ++ move_task_off_dead_cpu(dead_cpu, p); ++ spin_lock_irq(&rq->lock); ++ ++ put_task_struct(p); ++} ++ ++/* release_task() removes task from tasklist, so we won't find dead tasks. */ ++static void migrate_dead_tasks(unsigned int dead_cpu) ++{ ++ struct rq *rq = cpu_rq(dead_cpu); ++ struct task_struct *next; ++ ++ for ( ; ; ) { ++ if (!rq->nr_running) ++ break; ++ update_rq_clock(rq); ++ next = pick_next_task(rq, rq->curr); ++ if (!next) ++ break; ++ next->sched_class->put_prev_task(rq, next); ++ migrate_dead(dead_cpu, next); ++ ++ } ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++ ++static struct ctl_table sd_ctl_dir[] = { ++ { ++ .procname = "sched_domain", ++ .mode = 0555, ++ }, ++ {0, }, ++}; ++ ++static struct ctl_table sd_ctl_root[] = { ++ { ++ .ctl_name = CTL_KERN, ++ .procname = "kernel", ++ .mode = 0555, ++ .child = sd_ctl_dir, ++ }, ++ {0, }, ++}; ++ ++static struct ctl_table *sd_alloc_ctl_entry(int n) ++{ ++ struct ctl_table *entry = ++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); ++ ++ return entry; ++} ++ ++static void sd_free_ctl_entry(struct ctl_table **tablep) ++{ ++ struct ctl_table *entry; ++ ++ /* ++ * In the intermediate directories, both the child directory and ++ * procname are dynamically allocated and could fail but the mode ++ * will always be set. In the lowest directory the names are ++ * static strings and all have proc handlers. ++ */ ++ for (entry = *tablep; entry->mode; entry++) { ++ if (entry->child) ++ sd_free_ctl_entry(&entry->child); ++ if (entry->proc_handler == NULL) ++ kfree(entry->procname); ++ } ++ ++ kfree(*tablep); ++ *tablep = NULL; ++} ++ ++static void ++set_table_entry(struct ctl_table *entry, ++ const char *procname, void *data, int maxlen, ++ mode_t mode, proc_handler *proc_handler) ++{ ++ entry->procname = procname; ++ entry->data = data; ++ entry->maxlen = maxlen; ++ entry->mode = mode; ++ entry->proc_handler = proc_handler; ++} ++ ++static struct ctl_table * ++sd_alloc_ctl_domain_table(struct sched_domain *sd) ++{ ++ struct ctl_table *table = sd_alloc_ctl_entry(12); ++ ++ if (table == NULL) ++ return NULL; ++ ++ set_table_entry(&table[0], "min_interval", &sd->min_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax); ++ set_table_entry(&table[1], "max_interval", &sd->max_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax); ++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[9], "cache_nice_tries", ++ &sd->cache_nice_tries, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[10], "flags", &sd->flags, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ /* &table[11] is terminator */ ++ ++ return table; ++} ++ ++static ctl_table *sd_alloc_ctl_cpu_table(int cpu) ++{ ++ struct ctl_table *entry, *table; ++ struct sched_domain *sd; ++ int domain_num = 0, i; ++ char buf[32]; ++ ++ for_each_domain(cpu, sd) ++ domain_num++; ++ entry = table = sd_alloc_ctl_entry(domain_num + 1); ++ if (table == NULL) ++ return NULL; ++ ++ i = 0; ++ for_each_domain(cpu, sd) { ++ snprintf(buf, 32, "domain%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_domain_table(sd); ++ entry++; ++ i++; ++ } ++ return table; ++} ++ ++static struct ctl_table_header *sd_sysctl_header; ++static void register_sched_domain_sysctl(void) ++{ ++ int i, cpu_num = num_online_cpus(); ++ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); ++ char buf[32]; ++ ++ WARN_ON(sd_ctl_dir[0].child); ++ sd_ctl_dir[0].child = entry; ++ ++ if (entry == NULL) ++ return; ++ ++ for_each_online_cpu(i) { ++ snprintf(buf, 32, "cpu%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_cpu_table(i); ++ entry++; ++ } ++ ++ WARN_ON(sd_sysctl_header); ++ sd_sysctl_header = register_sysctl_table(sd_ctl_root); ++} ++ ++/* may be called multiple times per register */ ++static void unregister_sched_domain_sysctl(void) ++{ ++ if (sd_sysctl_header) ++ unregister_sysctl_table(sd_sysctl_header); ++ sd_sysctl_header = NULL; ++ if (sd_ctl_dir[0].child) ++ sd_free_ctl_entry(&sd_ctl_dir[0].child); ++} ++#else ++static void register_sched_domain_sysctl(void) ++{ ++} ++static void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif ++ ++static void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) { ++ const struct sched_class *class; ++ ++ cpu_set(rq->cpu, rq->rd->online); ++ rq->online = 1; ++ ++ for_each_class(class) { ++ if (class->rq_online) ++ class->rq_online(rq); ++ } ++ } ++} ++ ++static void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) { ++ const struct sched_class *class; ++ ++ for_each_class(class) { ++ if (class->rq_offline) ++ class->rq_offline(rq); ++ } ++ ++ cpu_clear(rq->cpu, rq->rd->online); ++ rq->online = 0; ++ } ++} ++ ++/* ++ * migration_call - callback that gets triggered when a CPU is added. ++ * Here we can start up the necessary migration thread for the new CPU. ++ */ ++static int __cpuinit ++migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ++{ ++ struct task_struct *p; ++ int cpu = (long)hcpu; ++ unsigned long flags; ++ struct rq *rq; ++ ++ switch (action) { ++ ++ case CPU_UP_PREPARE: ++ case CPU_UP_PREPARE_FROZEN: ++ p = kthread_create(migration_thread, hcpu, "migration/%d", cpu); ++ if (IS_ERR(p)) ++ return NOTIFY_BAD; ++ kthread_bind(p, cpu); ++ /* Must be high prio: stop_machine expects to yield to it. */ ++ rq = task_rq_lock(p, &flags); ++ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); ++ task_rq_unlock(rq, &flags); ++ cpu_rq(cpu)->migration_thread = p; ++ break; ++ ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ /* Strictly unnecessary, as first user will wake it. */ ++ wake_up_process(cpu_rq(cpu)->migration_thread); ++ ++ /* Update our root-domain */ ++ rq = cpu_rq(cpu); ++ spin_lock_irqsave(&rq->lock, flags); ++ if (rq->rd) { ++ BUG_ON(!cpu_isset(cpu, rq->rd->span)); ++ ++ set_rq_online(rq); ++ } ++ spin_unlock_irqrestore(&rq->lock, flags); ++ break; ++ ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_UP_CANCELED: ++ case CPU_UP_CANCELED_FROZEN: ++ if (!cpu_rq(cpu)->migration_thread) ++ break; ++ /* Unbind it from offline cpu so it can run. Fall thru. */ ++ kthread_bind(cpu_rq(cpu)->migration_thread, ++ any_online_cpu(cpu_online_map)); ++ kthread_stop(cpu_rq(cpu)->migration_thread); ++ cpu_rq(cpu)->migration_thread = NULL; ++ break; ++ ++ case CPU_DEAD: ++ case CPU_DEAD_FROZEN: ++ cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ ++ migrate_live_tasks(cpu); ++ rq = cpu_rq(cpu); ++ kthread_stop(rq->migration_thread); ++ rq->migration_thread = NULL; ++ /* Idle task back to normal (off runqueue, low prio) */ ++ spin_lock_irq(&rq->lock); ++ update_rq_clock(rq); ++ deactivate_task(rq, rq->idle, 0); ++ rq->idle->static_prio = MAX_PRIO; ++ __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); ++ rq->idle->sched_class = &idle_sched_class; ++ migrate_dead_tasks(cpu); ++ spin_unlock_irq(&rq->lock); ++ cpuset_unlock(); ++ migrate_nr_uninterruptible(rq); ++ BUG_ON(rq->nr_running != 0); ++ ++ /* ++ * No need to migrate the tasks: it was best-effort if ++ * they didn't take sched_hotcpu_mutex. Just wake up ++ * the requestors. ++ */ ++ spin_lock_irq(&rq->lock); ++ while (!list_empty(&rq->migration_queue)) { ++ struct migration_req *req; ++ ++ req = list_entry(rq->migration_queue.next, ++ struct migration_req, list); ++ list_del_init(&req->list); ++ spin_unlock_irq(&rq->lock); ++ complete(&req->done); ++ spin_lock_irq(&rq->lock); ++ } ++ spin_unlock_irq(&rq->lock); ++ break; ++ ++ case CPU_DYING: ++ case CPU_DYING_FROZEN: ++ /* Update our root-domain */ ++ rq = cpu_rq(cpu); ++ spin_lock_irqsave(&rq->lock, flags); ++ if (rq->rd) { ++ BUG_ON(!cpu_isset(cpu, rq->rd->span)); ++ set_rq_offline(rq); ++ } ++ spin_unlock_irqrestore(&rq->lock, flags); ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* Register at highest priority so that task migration (migrate_all_tasks) ++ * happens before everything else. ++ */ ++static struct notifier_block __cpuinitdata migration_notifier = { ++ .notifier_call = migration_call, ++ .priority = 10 ++}; ++ ++static int __init migration_init(void) ++{ ++ void *cpu = (void *)(long)smp_processor_id(); ++ int err; ++ ++ /* Start one for the boot CPU: */ ++ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); ++ BUG_ON(err == NOTIFY_BAD); ++ migration_call(&migration_notifier, CPU_ONLINE, cpu); ++ register_cpu_notifier(&migration_notifier); ++ ++ return err; ++} ++early_initcall(migration_init); ++#endif ++ ++#ifdef CONFIG_SMP ++ ++#ifdef CONFIG_SCHED_DEBUG ++ ++static inline const char *sd_level_to_string(enum sched_domain_level lvl) ++{ ++ switch (lvl) { ++ case SD_LV_NONE: ++ return "NONE"; ++ case SD_LV_SIBLING: ++ return "SIBLING"; ++ case SD_LV_MC: ++ return "MC"; ++ case SD_LV_CPU: ++ return "CPU"; ++ case SD_LV_NODE: ++ return "NODE"; ++ case SD_LV_ALLNODES: ++ return "ALLNODES"; ++ case SD_LV_MAX: ++ return "MAX"; ++ ++ } ++ return "MAX"; ++} ++ ++static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ++ cpumask_t *groupmask) ++{ ++ struct sched_group *group = sd->groups; ++ char str[256]; ++ ++ cpulist_scnprintf(str, sizeof(str), sd->span); ++ cpus_clear(*groupmask); ++ ++ printk(KERN_DEBUG "%*s domain %d: ", level, "", level); ++ ++ if (!(sd->flags & SD_LOAD_BALANCE)) { ++ printk("does not load-balance\n"); ++ if (sd->parent) ++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" ++ " has parent"); ++ return -1; ++ } ++ ++ printk(KERN_CONT "span %s level %s\n", ++ str, sd_level_to_string(sd->level)); ++ ++ if (!cpu_isset(cpu, sd->span)) { ++ printk(KERN_ERR "ERROR: domain->span does not contain " ++ "CPU%d\n", cpu); ++ } ++ if (!cpu_isset(cpu, group->cpumask)) { ++ printk(KERN_ERR "ERROR: domain->groups does not contain" ++ " CPU%d\n", cpu); ++ } ++ ++ printk(KERN_DEBUG "%*s groups:", level + 1, ""); ++ do { ++ if (!group) { ++ printk("\n"); ++ printk(KERN_ERR "ERROR: group is NULL\n"); ++ break; ++ } ++ ++ if (!group->__cpu_power) { ++ printk(KERN_CONT "\n"); ++ printk(KERN_ERR "ERROR: domain->cpu_power not " ++ "set\n"); ++ break; ++ } ++ ++ if (!cpus_weight(group->cpumask)) { ++ printk(KERN_CONT "\n"); ++ printk(KERN_ERR "ERROR: empty group\n"); ++ break; ++ } ++ ++ if (cpus_intersects(*groupmask, group->cpumask)) { ++ printk(KERN_CONT "\n"); ++ printk(KERN_ERR "ERROR: repeated CPUs\n"); ++ break; ++ } ++ ++ cpus_or(*groupmask, *groupmask, group->cpumask); ++ ++ cpulist_scnprintf(str, sizeof(str), group->cpumask); ++ printk(KERN_CONT " %s", str); ++ ++ group = group->next; ++ } while (group != sd->groups); ++ printk(KERN_CONT "\n"); ++ ++ if (!cpus_equal(sd->span, *groupmask)) ++ printk(KERN_ERR "ERROR: groups don't span domain->span\n"); ++ ++ if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) ++ printk(KERN_ERR "ERROR: parent span is not a superset " ++ "of domain->span\n"); ++ return 0; ++} ++ ++static void sched_domain_debug(struct sched_domain *sd, int cpu) ++{ ++ cpumask_t *groupmask; ++ int level = 0; ++ ++ if (!sd) { ++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); ++ return; ++ } ++ ++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); ++ ++ groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); ++ if (!groupmask) { ++ printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); ++ return; ++ } ++ ++ for (;;) { ++ if (sched_domain_debug_one(sd, cpu, level, groupmask)) ++ break; ++ level++; ++ sd = sd->parent; ++ if (!sd) ++ break; ++ } ++ kfree(groupmask); ++} ++#else /* !CONFIG_SCHED_DEBUG */ ++# define sched_domain_debug(sd, cpu) do { } while (0) ++#endif /* CONFIG_SCHED_DEBUG */ ++ ++static int sd_degenerate(struct sched_domain *sd) ++{ ++ if (cpus_weight(sd->span) == 1) ++ return 1; ++ ++ /* Following flags need at least 2 groups */ ++ if (sd->flags & (SD_LOAD_BALANCE | ++ SD_BALANCE_NEWIDLE | ++ SD_BALANCE_FORK | ++ SD_BALANCE_EXEC | ++ SD_SHARE_CPUPOWER | ++ SD_SHARE_PKG_RESOURCES)) { ++ if (sd->groups != sd->groups->next) ++ return 0; ++ } ++ ++ /* Following flags don't use groups */ ++ if (sd->flags & (SD_WAKE_IDLE | ++ SD_WAKE_AFFINE | ++ SD_WAKE_BALANCE)) ++ return 0; ++ ++ return 1; ++} ++ ++static int ++sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ++{ ++ unsigned long cflags = sd->flags, pflags = parent->flags; ++ ++ if (sd_degenerate(parent)) ++ return 1; ++ ++ if (!cpus_equal(sd->span, parent->span)) ++ return 0; ++ ++ /* Does parent contain flags not in child? */ ++ /* WAKE_BALANCE is a subset of WAKE_AFFINE */ ++ if (cflags & SD_WAKE_AFFINE) ++ pflags &= ~SD_WAKE_BALANCE; ++ /* Flags needing groups don't count if only 1 group in parent */ ++ if (parent->groups == parent->groups->next) { ++ pflags &= ~(SD_LOAD_BALANCE | ++ SD_BALANCE_NEWIDLE | ++ SD_BALANCE_FORK | ++ SD_BALANCE_EXEC | ++ SD_SHARE_CPUPOWER | ++ SD_SHARE_PKG_RESOURCES); ++ } ++ if (~cflags & pflags) ++ return 0; ++ ++ return 1; ++} ++ ++static void rq_attach_root(struct rq *rq, struct root_domain *rd) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rq->lock, flags); ++ ++ if (rq->rd) { ++ struct root_domain *old_rd = rq->rd; ++ ++ if (cpu_isset(rq->cpu, old_rd->online)) ++ set_rq_offline(rq); ++ ++ cpu_clear(rq->cpu, old_rd->span); ++ ++ if (atomic_dec_and_test(&old_rd->refcount)) ++ kfree(old_rd); ++ } ++ ++ atomic_inc(&rd->refcount); ++ rq->rd = rd; ++ ++ cpu_set(rq->cpu, rd->span); ++ if (cpu_isset(rq->cpu, cpu_online_map)) ++ set_rq_online(rq); ++ ++ spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++static void init_rootdomain(struct root_domain *rd) ++{ ++ memset(rd, 0, sizeof(*rd)); ++ ++ cpus_clear(rd->span); ++ cpus_clear(rd->online); ++ ++ cpupri_init(&rd->cpupri); ++} ++ ++static void init_defrootdomain(void) ++{ ++ init_rootdomain(&def_root_domain); ++ atomic_set(&def_root_domain.refcount, 1); ++} ++ ++static struct root_domain *alloc_rootdomain(void) ++{ ++ struct root_domain *rd; ++ ++ rd = kmalloc(sizeof(*rd), GFP_KERNEL); ++ if (!rd) ++ return NULL; ++ ++ init_rootdomain(rd); ++ ++ return rd; ++} ++ ++/* ++ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must ++ * hold the hotplug lock. ++ */ ++static void ++cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct sched_domain *tmp; ++ ++ /* Remove the sched domains which do not contribute to scheduling. */ ++ for (tmp = sd; tmp; ) { ++ struct sched_domain *parent = tmp->parent; ++ if (!parent) ++ break; ++ ++ if (sd_parent_degenerate(tmp, parent)) { ++ tmp->parent = parent->parent; ++ if (parent->parent) ++ parent->parent->child = tmp; ++ } else ++ tmp = tmp->parent; ++ } ++ ++ if (sd && sd_degenerate(sd)) { ++ sd = sd->parent; ++ if (sd) ++ sd->child = NULL; ++ } ++ ++ sched_domain_debug(sd, cpu); ++ ++ rq_attach_root(rq, rd); ++ rcu_assign_pointer(rq->sd, sd); ++} ++ ++/* cpus with isolated domains */ ++static cpumask_t cpu_isolated_map = CPU_MASK_NONE; ++ ++/* Setup the mask of cpus configured for isolated domains */ ++static int __init isolated_cpu_setup(char *str) ++{ ++ static int __initdata ints[NR_CPUS]; ++ int i; ++ ++ str = get_options(str, ARRAY_SIZE(ints), ints); ++ cpus_clear(cpu_isolated_map); ++ for (i = 1; i <= ints[0]; i++) ++ if (ints[i] < NR_CPUS) ++ cpu_set(ints[i], cpu_isolated_map); ++ return 1; ++} ++ ++__setup("isolcpus=", isolated_cpu_setup); ++ ++/* ++ * init_sched_build_groups takes the cpumask we wish to span, and a pointer ++ * to a function which identifies what group(along with sched group) a CPU ++ * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS ++ * (due to the fact that we keep track of groups covered with a cpumask_t). ++ * ++ * init_sched_build_groups will build a circular linked list of the groups ++ * covered by the given span, and will set each group's ->cpumask correctly, ++ * and ->cpu_power to 0. ++ */ ++static void ++init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, ++ int (*group_fn)(int cpu, const cpumask_t *cpu_map, ++ struct sched_group **sg, ++ cpumask_t *tmpmask), ++ cpumask_t *covered, cpumask_t *tmpmask) ++{ ++ struct sched_group *first = NULL, *last = NULL; ++ int i; ++ ++ cpus_clear(*covered); ++ ++ for_each_cpu_mask_nr(i, *span) { ++ struct sched_group *sg; ++ int group = group_fn(i, cpu_map, &sg, tmpmask); ++ int j; ++ ++ if (cpu_isset(i, *covered)) ++ continue; ++ ++ cpus_clear(sg->cpumask); ++ sg->__cpu_power = 0; ++ ++ for_each_cpu_mask_nr(j, *span) { ++ if (group_fn(j, cpu_map, NULL, tmpmask) != group) ++ continue; ++ ++ cpu_set(j, *covered); ++ cpu_set(j, sg->cpumask); ++ } ++ if (!first) ++ first = sg; ++ if (last) ++ last->next = sg; ++ last = sg; ++ } ++ last->next = first; ++} ++ ++#define SD_NODES_PER_DOMAIN 16 ++ ++#ifdef CONFIG_NUMA ++ ++/** ++ * find_next_best_node - find the next node to include in a sched_domain ++ * @node: node whose sched_domain we're building ++ * @used_nodes: nodes already in the sched_domain ++ * ++ * Find the next node to include in a given scheduling domain. Simply ++ * finds the closest node not already in the @used_nodes map. ++ * ++ * Should use nodemask_t. ++ */ ++static int find_next_best_node(int node, nodemask_t *used_nodes) ++{ ++ int i, n, val, min_val, best_node = 0; ++ ++ min_val = INT_MAX; ++ ++ for (i = 0; i < nr_node_ids; i++) { ++ /* Start at @node */ ++ n = (node + i) % nr_node_ids; ++ ++ if (!nr_cpus_node(n)) ++ continue; ++ ++ /* Skip already used nodes */ ++ if (node_isset(n, *used_nodes)) ++ continue; ++ ++ /* Simple min distance search */ ++ val = node_distance(node, n); ++ ++ if (val < min_val) { ++ min_val = val; ++ best_node = n; ++ } ++ } ++ ++ node_set(best_node, *used_nodes); ++ return best_node; ++} ++ ++/** ++ * sched_domain_node_span - get a cpumask for a node's sched_domain ++ * @node: node whose cpumask we're constructing ++ * @span: resulting cpumask ++ * ++ * Given a node, construct a good cpumask for its sched_domain to span. It ++ * should be one that prevents unnecessary balancing, but also spreads tasks ++ * out optimally. ++ */ ++static void sched_domain_node_span(int node, cpumask_t *span) ++{ ++ nodemask_t used_nodes; ++ node_to_cpumask_ptr(nodemask, node); ++ int i; ++ ++ cpus_clear(*span); ++ nodes_clear(used_nodes); ++ ++ cpus_or(*span, *span, *nodemask); ++ node_set(node, used_nodes); ++ ++ for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { ++ int next_node = find_next_best_node(node, &used_nodes); ++ ++ node_to_cpumask_ptr_next(nodemask, next_node); ++ cpus_or(*span, *span, *nodemask); ++ } ++} ++#endif /* CONFIG_NUMA */ ++ ++int sched_smt_power_savings = 0, sched_mc_power_savings = 0; ++ ++/* ++ * SMT sched-domains: ++ */ ++#ifdef CONFIG_SCHED_SMT ++static DEFINE_PER_CPU(struct sched_domain, cpu_domains); ++static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); ++ ++static int ++cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, ++ cpumask_t *unused) ++{ ++ if (sg) ++ *sg = &per_cpu(sched_group_cpus, cpu); ++ return cpu; ++} ++#endif /* CONFIG_SCHED_SMT */ ++ ++/* ++ * multi-core sched-domains: ++ */ ++#ifdef CONFIG_SCHED_MC ++static DEFINE_PER_CPU(struct sched_domain, core_domains); ++static DEFINE_PER_CPU(struct sched_group, sched_group_core); ++#endif /* CONFIG_SCHED_MC */ ++ ++#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) ++static int ++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, ++ cpumask_t *mask) ++{ ++ int group; ++ ++ *mask = per_cpu(cpu_sibling_map, cpu); ++ cpus_and(*mask, *mask, *cpu_map); ++ group = first_cpu(*mask); ++ if (sg) ++ *sg = &per_cpu(sched_group_core, group); ++ return group; ++} ++#elif defined(CONFIG_SCHED_MC) ++static int ++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, ++ cpumask_t *unused) ++{ ++ if (sg) ++ *sg = &per_cpu(sched_group_core, cpu); ++ return cpu; ++} ++#endif ++ ++static DEFINE_PER_CPU(struct sched_domain, phys_domains); ++static DEFINE_PER_CPU(struct sched_group, sched_group_phys); ++ ++static int ++cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, ++ cpumask_t *mask) ++{ ++ int group; ++#ifdef CONFIG_SCHED_MC ++ *mask = cpu_coregroup_map(cpu); ++ cpus_and(*mask, *mask, *cpu_map); ++ group = first_cpu(*mask); ++#elif defined(CONFIG_SCHED_SMT) ++ *mask = per_cpu(cpu_sibling_map, cpu); ++ cpus_and(*mask, *mask, *cpu_map); ++ group = first_cpu(*mask); ++#else ++ group = cpu; ++#endif ++ if (sg) ++ *sg = &per_cpu(sched_group_phys, group); ++ return group; ++} ++ ++#ifdef CONFIG_NUMA ++/* ++ * The init_sched_build_groups can't handle what we want to do with node ++ * groups, so roll our own. Now each node has its own list of groups which ++ * gets dynamically allocated. ++ */ ++static DEFINE_PER_CPU(struct sched_domain, node_domains); ++static struct sched_group ***sched_group_nodes_bycpu; ++ ++static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); ++static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); ++ ++static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, ++ struct sched_group **sg, cpumask_t *nodemask) ++{ ++ int group; ++ ++ *nodemask = node_to_cpumask(cpu_to_node(cpu)); ++ cpus_and(*nodemask, *nodemask, *cpu_map); ++ group = first_cpu(*nodemask); ++ ++ if (sg) ++ *sg = &per_cpu(sched_group_allnodes, group); ++ return group; ++} ++ ++static void init_numa_sched_groups_power(struct sched_group *group_head) ++{ ++ struct sched_group *sg = group_head; ++ int j; ++ ++ if (!sg) ++ return; ++ do { ++ for_each_cpu_mask_nr(j, sg->cpumask) { ++ struct sched_domain *sd; ++ ++ sd = &per_cpu(phys_domains, j); ++ if (j != first_cpu(sd->groups->cpumask)) { ++ /* ++ * Only add "power" once for each ++ * physical package. ++ */ ++ continue; ++ } ++ ++ sg_inc_cpu_power(sg, sd->groups->__cpu_power); ++ } ++ sg = sg->next; ++ } while (sg != group_head); ++} ++#endif /* CONFIG_NUMA */ ++ ++#ifdef CONFIG_NUMA ++/* Free memory allocated for various sched_group structures */ ++static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) ++{ ++ int cpu, i; ++ ++ for_each_cpu_mask_nr(cpu, *cpu_map) { ++ struct sched_group **sched_group_nodes ++ = sched_group_nodes_bycpu[cpu]; ++ ++ if (!sched_group_nodes) ++ continue; ++ ++ for (i = 0; i < nr_node_ids; i++) { ++ struct sched_group *oldsg, *sg = sched_group_nodes[i]; ++ ++ *nodemask = node_to_cpumask(i); ++ cpus_and(*nodemask, *nodemask, *cpu_map); ++ if (cpus_empty(*nodemask)) ++ continue; ++ ++ if (sg == NULL) ++ continue; ++ sg = sg->next; ++next_sg: ++ oldsg = sg; ++ sg = sg->next; ++ kfree(oldsg); ++ if (oldsg != sched_group_nodes[i]) ++ goto next_sg; ++ } ++ kfree(sched_group_nodes); ++ sched_group_nodes_bycpu[cpu] = NULL; ++ } ++} ++#else /* !CONFIG_NUMA */ ++static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) ++{ ++} ++#endif /* CONFIG_NUMA */ ++ ++/* ++ * Initialize sched groups cpu_power. ++ * ++ * cpu_power indicates the capacity of sched group, which is used while ++ * distributing the load between different sched groups in a sched domain. ++ * Typically cpu_power for all the groups in a sched domain will be same unless ++ * there are asymmetries in the topology. If there are asymmetries, group ++ * having more cpu_power will pickup more load compared to the group having ++ * less cpu_power. ++ * ++ * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents ++ * the maximum number of tasks a group can handle in the presence of other idle ++ * or lightly loaded groups in the same sched domain. ++ */ ++static void init_sched_groups_power(int cpu, struct sched_domain *sd) ++{ ++ struct sched_domain *child; ++ struct sched_group *group; ++ ++ WARN_ON(!sd || !sd->groups); ++ ++ if (cpu != first_cpu(sd->groups->cpumask)) ++ return; ++ ++ child = sd->child; ++ ++ sd->groups->__cpu_power = 0; ++ ++ /* ++ * For perf policy, if the groups in child domain share resources ++ * (for example cores sharing some portions of the cache hierarchy ++ * or SMT), then set this domain groups cpu_power such that each group ++ * can handle only one task, when there are other idle groups in the ++ * same sched domain. ++ */ ++ if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) && ++ (child->flags & ++ (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) { ++ sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE); ++ return; ++ } ++ ++ /* ++ * add cpu_power of each child group to this groups cpu_power ++ */ ++ group = child->groups; ++ do { ++ sg_inc_cpu_power(sd->groups, group->__cpu_power); ++ group = group->next; ++ } while (group != child->groups); ++} ++ ++/* ++ * Initializers for schedule domains ++ * Non-inlined to reduce accumulated stack pressure in build_sched_domains() ++ */ ++ ++#define SD_INIT(sd, type) sd_init_##type(sd) ++#define SD_INIT_FUNC(type) \ ++static noinline void sd_init_##type(struct sched_domain *sd) \ ++{ \ ++ memset(sd, 0, sizeof(*sd)); \ ++ *sd = SD_##type##_INIT; \ ++ sd->level = SD_LV_##type; \ ++} ++ ++SD_INIT_FUNC(CPU) ++#ifdef CONFIG_NUMA ++ SD_INIT_FUNC(ALLNODES) ++ SD_INIT_FUNC(NODE) ++#endif ++#ifdef CONFIG_SCHED_SMT ++ SD_INIT_FUNC(SIBLING) ++#endif ++#ifdef CONFIG_SCHED_MC ++ SD_INIT_FUNC(MC) ++#endif ++ ++/* ++ * To minimize stack usage kmalloc room for cpumasks and share the ++ * space as the usage in build_sched_domains() dictates. Used only ++ * if the amount of space is significant. ++ */ ++struct allmasks { ++ cpumask_t tmpmask; /* make this one first */ ++ union { ++ cpumask_t nodemask; ++ cpumask_t this_sibling_map; ++ cpumask_t this_core_map; ++ }; ++ cpumask_t send_covered; ++ ++#ifdef CONFIG_NUMA ++ cpumask_t domainspan; ++ cpumask_t covered; ++ cpumask_t notcovered; ++#endif ++}; ++ ++#if NR_CPUS > 128 ++#define SCHED_CPUMASK_ALLOC 1 ++#define SCHED_CPUMASK_FREE(v) kfree(v) ++#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v ++#else ++#define SCHED_CPUMASK_ALLOC 0 ++#define SCHED_CPUMASK_FREE(v) ++#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v ++#endif ++ ++#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ ++ ((unsigned long)(a) + offsetof(struct allmasks, v)) ++ ++static int default_relax_domain_level = -1; ++ ++static int __init setup_relax_domain_level(char *str) ++{ ++ unsigned long val; ++ ++ val = simple_strtoul(str, NULL, 0); ++ if (val < SD_LV_MAX) ++ default_relax_domain_level = val; ++ ++ return 1; ++} ++__setup("relax_domain_level=", setup_relax_domain_level); ++ ++static void set_domain_attribute(struct sched_domain *sd, ++ struct sched_domain_attr *attr) ++{ ++ int request; ++ ++ if (!attr || attr->relax_domain_level < 0) { ++ if (default_relax_domain_level < 0) ++ return; ++ else ++ request = default_relax_domain_level; ++ } else ++ request = attr->relax_domain_level; ++ if (request < sd->level) { ++ /* turn off idle balance on this domain */ ++ sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE); ++ } else { ++ /* turn on idle balance on this domain */ ++ sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE); ++ } ++} ++ ++/* ++ * Build sched domains for a given set of cpus and attach the sched domains ++ * to the individual cpus ++ */ ++static int __build_sched_domains(const cpumask_t *cpu_map, ++ struct sched_domain_attr *attr) ++{ ++ int i; ++ struct root_domain *rd; ++ SCHED_CPUMASK_DECLARE(allmasks); ++ cpumask_t *tmpmask; ++#ifdef CONFIG_NUMA ++ struct sched_group **sched_group_nodes = NULL; ++ int sd_allnodes = 0; ++ ++ /* ++ * Allocate the per-node list of sched groups ++ */ ++ sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), ++ GFP_KERNEL); ++ if (!sched_group_nodes) { ++ printk(KERN_WARNING "Can not alloc sched group node list\n"); ++ return -ENOMEM; ++ } ++#endif ++ ++ rd = alloc_rootdomain(); ++ if (!rd) { ++ printk(KERN_WARNING "Cannot alloc root domain\n"); ++#ifdef CONFIG_NUMA ++ kfree(sched_group_nodes); ++#endif ++ return -ENOMEM; ++ } ++ ++#if SCHED_CPUMASK_ALLOC ++ /* get space for all scratch cpumask variables */ ++ allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); ++ if (!allmasks) { ++ printk(KERN_WARNING "Cannot alloc cpumask array\n"); ++ kfree(rd); ++#ifdef CONFIG_NUMA ++ kfree(sched_group_nodes); ++#endif ++ return -ENOMEM; ++ } ++#endif ++ tmpmask = (cpumask_t *)allmasks; ++ ++ ++#ifdef CONFIG_NUMA ++ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; ++#endif ++ ++ /* ++ * Set up domains for cpus specified by the cpu_map. ++ */ ++ for_each_cpu_mask_nr(i, *cpu_map) { ++ struct sched_domain *sd = NULL, *p; ++ SCHED_CPUMASK_VAR(nodemask, allmasks); ++ ++ *nodemask = node_to_cpumask(cpu_to_node(i)); ++ cpus_and(*nodemask, *nodemask, *cpu_map); ++ ++#ifdef CONFIG_NUMA ++ if (cpus_weight(*cpu_map) > ++ SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { ++ sd = &per_cpu(allnodes_domains, i); ++ SD_INIT(sd, ALLNODES); ++ set_domain_attribute(sd, attr); ++ sd->span = *cpu_map; ++ cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); ++ p = sd; ++ sd_allnodes = 1; ++ } else ++ p = NULL; ++ ++ sd = &per_cpu(node_domains, i); ++ SD_INIT(sd, NODE); ++ set_domain_attribute(sd, attr); ++ sched_domain_node_span(cpu_to_node(i), &sd->span); ++ sd->parent = p; ++ if (p) ++ p->child = sd; ++ cpus_and(sd->span, sd->span, *cpu_map); ++#endif ++ ++ p = sd; ++ sd = &per_cpu(phys_domains, i); ++ SD_INIT(sd, CPU); ++ set_domain_attribute(sd, attr); ++ sd->span = *nodemask; ++ sd->parent = p; ++ if (p) ++ p->child = sd; ++ cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask); ++ ++#ifdef CONFIG_SCHED_MC ++ p = sd; ++ sd = &per_cpu(core_domains, i); ++ SD_INIT(sd, MC); ++ set_domain_attribute(sd, attr); ++ sd->span = cpu_coregroup_map(i); ++ cpus_and(sd->span, sd->span, *cpu_map); ++ sd->parent = p; ++ p->child = sd; ++ cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); ++#endif ++ ++#ifdef CONFIG_SCHED_SMT ++ p = sd; ++ sd = &per_cpu(cpu_domains, i); ++ SD_INIT(sd, SIBLING); ++ set_domain_attribute(sd, attr); ++ sd->span = per_cpu(cpu_sibling_map, i); ++ cpus_and(sd->span, sd->span, *cpu_map); ++ sd->parent = p; ++ p->child = sd; ++ cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); ++#endif ++ } ++ ++#ifdef CONFIG_SCHED_SMT ++ /* Set up CPU (sibling) groups */ ++ for_each_cpu_mask_nr(i, *cpu_map) { ++ SCHED_CPUMASK_VAR(this_sibling_map, allmasks); ++ SCHED_CPUMASK_VAR(send_covered, allmasks); ++ ++ *this_sibling_map = per_cpu(cpu_sibling_map, i); ++ cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); ++ if (i != first_cpu(*this_sibling_map)) ++ continue; ++ ++ init_sched_build_groups(this_sibling_map, cpu_map, ++ &cpu_to_cpu_group, ++ send_covered, tmpmask); ++ } ++#endif ++ ++#ifdef CONFIG_SCHED_MC ++ /* Set up multi-core groups */ ++ for_each_cpu_mask_nr(i, *cpu_map) { ++ SCHED_CPUMASK_VAR(this_core_map, allmasks); ++ SCHED_CPUMASK_VAR(send_covered, allmasks); ++ ++ *this_core_map = cpu_coregroup_map(i); ++ cpus_and(*this_core_map, *this_core_map, *cpu_map); ++ if (i != first_cpu(*this_core_map)) ++ continue; ++ ++ init_sched_build_groups(this_core_map, cpu_map, ++ &cpu_to_core_group, ++ send_covered, tmpmask); ++ } ++#endif ++ ++ /* Set up physical groups */ ++ for (i = 0; i < nr_node_ids; i++) { ++ SCHED_CPUMASK_VAR(nodemask, allmasks); ++ SCHED_CPUMASK_VAR(send_covered, allmasks); ++ ++ *nodemask = node_to_cpumask(i); ++ cpus_and(*nodemask, *nodemask, *cpu_map); ++ if (cpus_empty(*nodemask)) ++ continue; ++ ++ init_sched_build_groups(nodemask, cpu_map, ++ &cpu_to_phys_group, ++ send_covered, tmpmask); ++ } ++ ++#ifdef CONFIG_NUMA ++ /* Set up node groups */ ++ if (sd_allnodes) { ++ SCHED_CPUMASK_VAR(send_covered, allmasks); ++ ++ init_sched_build_groups(cpu_map, cpu_map, ++ &cpu_to_allnodes_group, ++ send_covered, tmpmask); ++ } ++ ++ for (i = 0; i < nr_node_ids; i++) { ++ /* Set up node groups */ ++ struct sched_group *sg, *prev; ++ SCHED_CPUMASK_VAR(nodemask, allmasks); ++ SCHED_CPUMASK_VAR(domainspan, allmasks); ++ SCHED_CPUMASK_VAR(covered, allmasks); ++ int j; ++ ++ *nodemask = node_to_cpumask(i); ++ cpus_clear(*covered); ++ ++ cpus_and(*nodemask, *nodemask, *cpu_map); ++ if (cpus_empty(*nodemask)) { ++ sched_group_nodes[i] = NULL; ++ continue; ++ } ++ ++ sched_domain_node_span(i, domainspan); ++ cpus_and(*domainspan, *domainspan, *cpu_map); ++ ++ sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); ++ if (!sg) { ++ printk(KERN_WARNING "Can not alloc domain group for " ++ "node %d\n", i); ++ goto error; ++ } ++ sched_group_nodes[i] = sg; ++ for_each_cpu_mask_nr(j, *nodemask) { ++ struct sched_domain *sd; ++ ++ sd = &per_cpu(node_domains, j); ++ sd->groups = sg; ++ } ++ sg->__cpu_power = 0; ++ sg->cpumask = *nodemask; ++ sg->next = sg; ++ cpus_or(*covered, *covered, *nodemask); ++ prev = sg; ++ ++ for (j = 0; j < nr_node_ids; j++) { ++ SCHED_CPUMASK_VAR(notcovered, allmasks); ++ int n = (i + j) % nr_node_ids; ++ node_to_cpumask_ptr(pnodemask, n); ++ ++ cpus_complement(*notcovered, *covered); ++ cpus_and(*tmpmask, *notcovered, *cpu_map); ++ cpus_and(*tmpmask, *tmpmask, *domainspan); ++ if (cpus_empty(*tmpmask)) ++ break; ++ ++ cpus_and(*tmpmask, *tmpmask, *pnodemask); ++ if (cpus_empty(*tmpmask)) ++ continue; ++ ++ sg = kmalloc_node(sizeof(struct sched_group), ++ GFP_KERNEL, i); ++ if (!sg) { ++ printk(KERN_WARNING ++ "Can not alloc domain group for node %d\n", j); ++ goto error; ++ } ++ sg->__cpu_power = 0; ++ sg->cpumask = *tmpmask; ++ sg->next = prev->next; ++ cpus_or(*covered, *covered, *tmpmask); ++ prev->next = sg; ++ prev = sg; ++ } ++ } ++#endif ++ ++ /* Calculate CPU power for physical packages and nodes */ ++#ifdef CONFIG_SCHED_SMT ++ for_each_cpu_mask_nr(i, *cpu_map) { ++ struct sched_domain *sd = &per_cpu(cpu_domains, i); ++ ++ init_sched_groups_power(i, sd); ++ } ++#endif ++#ifdef CONFIG_SCHED_MC ++ for_each_cpu_mask_nr(i, *cpu_map) { ++ struct sched_domain *sd = &per_cpu(core_domains, i); ++ ++ init_sched_groups_power(i, sd); ++ } ++#endif ++ ++ for_each_cpu_mask_nr(i, *cpu_map) { ++ struct sched_domain *sd = &per_cpu(phys_domains, i); ++ ++ init_sched_groups_power(i, sd); ++ } ++ ++#ifdef CONFIG_NUMA ++ for (i = 0; i < nr_node_ids; i++) ++ init_numa_sched_groups_power(sched_group_nodes[i]); ++ ++ if (sd_allnodes) { ++ struct sched_group *sg; ++ ++ cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, ++ tmpmask); ++ init_numa_sched_groups_power(sg); ++ } ++#endif ++ ++ /* Attach the domains */ ++ for_each_cpu_mask_nr(i, *cpu_map) { ++ struct sched_domain *sd; ++#ifdef CONFIG_SCHED_SMT ++ sd = &per_cpu(cpu_domains, i); ++#elif defined(CONFIG_SCHED_MC) ++ sd = &per_cpu(core_domains, i); ++#else ++ sd = &per_cpu(phys_domains, i); ++#endif ++ cpu_attach_domain(sd, rd, i); ++ } ++ ++ SCHED_CPUMASK_FREE((void *)allmasks); ++ return 0; ++ ++#ifdef CONFIG_NUMA ++error: ++ free_sched_groups(cpu_map, tmpmask); ++ SCHED_CPUMASK_FREE((void *)allmasks); ++ return -ENOMEM; ++#endif ++} ++ ++static int build_sched_domains(const cpumask_t *cpu_map) ++{ ++ return __build_sched_domains(cpu_map, NULL); ++} ++ ++static cpumask_t *doms_cur; /* current sched domains */ ++static int ndoms_cur; /* number of sched domains in 'doms_cur' */ ++static struct sched_domain_attr *dattr_cur; ++ /* attribues of custom domains in 'doms_cur' */ ++ ++/* ++ * Special case: If a kmalloc of a doms_cur partition (array of ++ * cpumask_t) fails, then fallback to a single sched domain, ++ * as determined by the single cpumask_t fallback_doms. ++ */ ++static cpumask_t fallback_doms; ++ ++void __attribute__((weak)) arch_update_cpu_topology(void) ++{ ++} ++ ++/* ++ * Set up scheduler domains and groups. Callers must hold the hotplug lock. ++ * For now this just excludes isolated cpus, but could be used to ++ * exclude other special cases in the future. ++ */ ++static int arch_init_sched_domains(const cpumask_t *cpu_map) ++{ ++ int err; ++ ++ arch_update_cpu_topology(); ++ ndoms_cur = 1; ++ doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); ++ if (!doms_cur) ++ doms_cur = &fallback_doms; ++ cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); ++ dattr_cur = NULL; ++ err = build_sched_domains(doms_cur); ++ register_sched_domain_sysctl(); ++ ++ return err; ++} ++ ++static void arch_destroy_sched_domains(const cpumask_t *cpu_map, ++ cpumask_t *tmpmask) ++{ ++ free_sched_groups(cpu_map, tmpmask); ++} ++ ++/* ++ * Detach sched domains from a group of cpus specified in cpu_map ++ * These cpus will now be attached to the NULL domain ++ */ ++static void detach_destroy_domains(const cpumask_t *cpu_map) ++{ ++ cpumask_t tmpmask; ++ int i; ++ ++ unregister_sched_domain_sysctl(); ++ ++ for_each_cpu_mask_nr(i, *cpu_map) ++ cpu_attach_domain(NULL, &def_root_domain, i); ++ synchronize_sched(); ++ arch_destroy_sched_domains(cpu_map, &tmpmask); ++} ++ ++/* handle null as "default" */ ++static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, ++ struct sched_domain_attr *new, int idx_new) ++{ ++ struct sched_domain_attr tmp; ++ ++ /* fast path */ ++ if (!new && !cur) ++ return 1; ++ ++ tmp = SD_ATTR_INIT; ++ return !memcmp(cur ? (cur + idx_cur) : &tmp, ++ new ? (new + idx_new) : &tmp, ++ sizeof(struct sched_domain_attr)); ++} ++ ++/* ++ * Partition sched domains as specified by the 'ndoms_new' ++ * cpumasks in the array doms_new[] of cpumasks. This compares ++ * doms_new[] to the current sched domain partitioning, doms_cur[]. ++ * It destroys each deleted domain and builds each new domain. ++ * ++ * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. ++ * The masks don't intersect (don't overlap.) We should setup one ++ * sched domain for each mask. CPUs not in any of the cpumasks will ++ * not be load balanced. If the same cpumask appears both in the ++ * current 'doms_cur' domains and in the new 'doms_new', we can leave ++ * it as it is. ++ * ++ * The passed in 'doms_new' should be kmalloc'd. This routine takes ++ * ownership of it and will kfree it when done with it. If the caller ++ * failed the kmalloc call, then it can pass in doms_new == NULL && ++ * ndoms_new == 1, and partition_sched_domains() will fallback to ++ * the single partition 'fallback_doms', it also forces the domains ++ * to be rebuilt. ++ * ++ * If doms_new == NULL it will be replaced with cpu_online_map. ++ * ndoms_new == 0 is a special case for destroying existing domains, ++ * and it will not create the default domain. ++ * ++ * Call with hotplug lock held ++ */ ++void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, ++ struct sched_domain_attr *dattr_new) ++{ ++ int i, j, n; ++ ++ mutex_lock(&sched_domains_mutex); ++ ++ /* always unregister in case we don't destroy any domains */ ++ unregister_sched_domain_sysctl(); ++ ++ n = doms_new ? ndoms_new : 0; ++ ++ /* Destroy deleted domains */ ++ for (i = 0; i < ndoms_cur; i++) { ++ for (j = 0; j < n; j++) { ++ if (cpus_equal(doms_cur[i], doms_new[j]) ++ && dattrs_equal(dattr_cur, i, dattr_new, j)) ++ goto match1; ++ } ++ /* no match - a current sched domain not in new doms_new[] */ ++ detach_destroy_domains(doms_cur + i); ++match1: ++ ; ++ } ++ ++ if (doms_new == NULL) { ++ ndoms_cur = 0; ++ doms_new = &fallback_doms; ++ cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); ++ dattr_new = NULL; ++ } ++ ++ /* Build new domains */ ++ for (i = 0; i < ndoms_new; i++) { ++ for (j = 0; j < ndoms_cur; j++) { ++ if (cpus_equal(doms_new[i], doms_cur[j]) ++ && dattrs_equal(dattr_new, i, dattr_cur, j)) ++ goto match2; ++ } ++ /* no match - add a new doms_new */ ++ __build_sched_domains(doms_new + i, ++ dattr_new ? dattr_new + i : NULL); ++match2: ++ ; ++ } ++ ++ /* Remember the new sched domains */ ++ if (doms_cur != &fallback_doms) ++ kfree(doms_cur); ++ kfree(dattr_cur); /* kfree(NULL) is safe */ ++ doms_cur = doms_new; ++ dattr_cur = dattr_new; ++ ndoms_cur = ndoms_new; ++ ++ register_sched_domain_sysctl(); ++ ++ mutex_unlock(&sched_domains_mutex); ++} ++ ++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) ++int arch_reinit_sched_domains(void) ++{ ++ get_online_cpus(); ++ ++ /* Destroy domains first to force the rebuild */ ++ partition_sched_domains(0, NULL, NULL); ++ ++ rebuild_sched_domains(); ++ put_online_cpus(); ++ ++ return 0; ++} ++ ++static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) ++{ ++ int ret; ++ ++ if (buf[0] != '0' && buf[0] != '1') ++ return -EINVAL; ++ ++ if (smt) ++ sched_smt_power_savings = (buf[0] == '1'); ++ else ++ sched_mc_power_savings = (buf[0] == '1'); ++ ++ ret = arch_reinit_sched_domains(); ++ ++ return ret ? ret : count; ++} ++ ++#ifdef CONFIG_SCHED_MC ++static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, ++ char *page) ++{ ++ return sprintf(page, "%u\n", sched_mc_power_savings); ++} ++static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, ++ const char *buf, size_t count) ++{ ++ return sched_power_savings_store(buf, count, 0); ++} ++static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, ++ sched_mc_power_savings_show, ++ sched_mc_power_savings_store); ++#endif ++ ++#ifdef CONFIG_SCHED_SMT ++static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, ++ char *page) ++{ ++ return sprintf(page, "%u\n", sched_smt_power_savings); ++} ++static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, ++ const char *buf, size_t count) ++{ ++ return sched_power_savings_store(buf, count, 1); ++} ++static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, ++ sched_smt_power_savings_show, ++ sched_smt_power_savings_store); ++#endif ++ ++int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) ++{ ++ int err = 0; ++ ++#ifdef CONFIG_SCHED_SMT ++ if (smt_capable()) ++ err = sysfs_create_file(&cls->kset.kobj, ++ &attr_sched_smt_power_savings.attr); ++#endif ++#ifdef CONFIG_SCHED_MC ++ if (!err && mc_capable()) ++ err = sysfs_create_file(&cls->kset.kobj, ++ &attr_sched_mc_power_savings.attr); ++#endif ++ return err; ++} ++#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ ++ ++#ifndef CONFIG_CPUSETS ++/* ++ * Add online and remove offline CPUs from the scheduler domains. ++ * When cpusets are enabled they take over this function. ++ */ ++static int update_sched_domains(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ switch (action) { ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ case CPU_DEAD: ++ case CPU_DEAD_FROZEN: ++ partition_sched_domains(1, NULL, NULL); ++ return NOTIFY_OK; ++ ++ default: ++ return NOTIFY_DONE; ++ } ++} ++#endif ++ ++static int update_runtime(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ int cpu = (int)(long)hcpu; ++ ++ switch (action) { ++ case CPU_DOWN_PREPARE: ++ case CPU_DOWN_PREPARE_FROZEN: ++ disable_runtime(cpu_rq(cpu)); ++ return NOTIFY_OK; ++ ++ case CPU_DOWN_FAILED: ++ case CPU_DOWN_FAILED_FROZEN: ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ enable_runtime(cpu_rq(cpu)); ++ return NOTIFY_OK; ++ ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ ++void __init sched_init_smp(void) ++{ ++ cpumask_t non_isolated_cpus; ++ ++#if defined(CONFIG_NUMA) ++ sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), ++ GFP_KERNEL); ++ BUG_ON(sched_group_nodes_bycpu == NULL); ++#endif ++ get_online_cpus(); ++ mutex_lock(&sched_domains_mutex); ++ arch_init_sched_domains(&cpu_online_map); ++ cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); ++ if (cpus_empty(non_isolated_cpus)) ++ cpu_set(smp_processor_id(), non_isolated_cpus); ++ mutex_unlock(&sched_domains_mutex); ++ put_online_cpus(); ++ ++#ifndef CONFIG_CPUSETS ++ /* XXX: Theoretical race here - CPU may be hotplugged now */ ++ hotcpu_notifier(update_sched_domains, 0); ++#endif ++ ++ /* RT runtime code needs to handle some hotplug events */ ++ hotcpu_notifier(update_runtime, 0); ++ ++ init_hrtick(); ++ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) ++ BUG(); ++ sched_init_granularity(); ++} ++#else ++void __init sched_init_smp(void) ++{ ++ sched_init_granularity(); ++} ++#endif /* CONFIG_SMP */ ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) ++{ ++ cfs_rq->tasks_timeline = RB_ROOT; ++ INIT_LIST_HEAD(&cfs_rq->tasks); ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ cfs_rq->rq = rq; ++#endif ++ cfs_rq->min_vruntime = (u64)(-(1LL << 20)); ++} ++ ++static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) ++{ ++ struct rt_prio_array *array; ++ int i; ++ ++ array = &rt_rq->active; ++ for (i = 0; i < MAX_RT_PRIO; i++) { ++ INIT_LIST_HEAD(array->queue + i); ++ __clear_bit(i, array->bitmap); ++ } ++ /* delimiter for bitsearch: */ ++ __set_bit(MAX_RT_PRIO, array->bitmap); ++ ++#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED ++ rt_rq->highest_prio = MAX_RT_PRIO; ++#endif ++#ifdef CONFIG_SMP ++ rt_rq->rt_nr_migratory = 0; ++ rt_rq->overloaded = 0; ++#endif ++ ++ rt_rq->rt_time = 0; ++ rt_rq->rt_throttled = 0; ++ rt_rq->rt_runtime = 0; ++ spin_lock_init(&rt_rq->rt_runtime_lock); ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++ rt_rq->rt_nr_boosted = 0; ++ rt_rq->rq = rq; ++#endif ++} ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, ++ struct sched_entity *se, int cpu, int add, ++ struct sched_entity *parent) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ tg->cfs_rq[cpu] = cfs_rq; ++ init_cfs_rq(cfs_rq, rq); ++ cfs_rq->tg = tg; ++ if (add) ++ list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); ++ ++ tg->se[cpu] = se; ++ /* se could be NULL for init_task_group */ ++ if (!se) ++ return; ++ ++ if (!parent) ++ se->cfs_rq = &rq->cfs; ++ else ++ se->cfs_rq = parent->my_q; ++ ++ se->my_q = cfs_rq; ++ se->load.weight = tg->shares; ++ se->load.inv_weight = 0; ++ se->parent = parent; ++} ++#endif ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, ++ struct sched_rt_entity *rt_se, int cpu, int add, ++ struct sched_rt_entity *parent) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ tg->rt_rq[cpu] = rt_rq; ++ init_rt_rq(rt_rq, rq); ++ rt_rq->tg = tg; ++ rt_rq->rt_se = rt_se; ++ rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; ++ if (add) ++ list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); ++ ++ tg->rt_se[cpu] = rt_se; ++ if (!rt_se) ++ return; ++ ++ if (!parent) ++ rt_se->rt_rq = &rq->rt; ++ else ++ rt_se->rt_rq = parent->my_q; ++ ++ rt_se->my_q = rt_rq; ++ rt_se->parent = parent; ++ INIT_LIST_HEAD(&rt_se->run_list); ++} ++#endif ++ ++void __init sched_init(void) ++{ ++ int i, j; ++ unsigned long alloc_size = 0, ptr; ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ alloc_size += 2 * nr_cpu_ids * sizeof(void **); ++#endif ++#ifdef CONFIG_RT_GROUP_SCHED ++ alloc_size += 2 * nr_cpu_ids * sizeof(void **); ++#endif ++#ifdef CONFIG_USER_SCHED ++ alloc_size *= 2; ++#endif ++ /* ++ * As sched_init() is called before page_alloc is setup, ++ * we use alloc_bootmem(). ++ */ ++ if (alloc_size) { ++ ptr = (unsigned long)alloc_bootmem(alloc_size); ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ init_task_group.se = (struct sched_entity **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++ ++ init_task_group.cfs_rq = (struct cfs_rq **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++ ++#ifdef CONFIG_USER_SCHED ++ root_task_group.se = (struct sched_entity **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++ ++ root_task_group.cfs_rq = (struct cfs_rq **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++#endif /* CONFIG_USER_SCHED */ ++#endif /* CONFIG_FAIR_GROUP_SCHED */ ++#ifdef CONFIG_RT_GROUP_SCHED ++ init_task_group.rt_se = (struct sched_rt_entity **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++ ++ init_task_group.rt_rq = (struct rt_rq **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++ ++#ifdef CONFIG_USER_SCHED ++ root_task_group.rt_se = (struct sched_rt_entity **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++ ++ root_task_group.rt_rq = (struct rt_rq **)ptr; ++ ptr += nr_cpu_ids * sizeof(void **); ++#endif /* CONFIG_USER_SCHED */ ++#endif /* CONFIG_RT_GROUP_SCHED */ ++ } ++ ++#ifdef CONFIG_SMP ++ init_defrootdomain(); ++#endif ++ ++ init_rt_bandwidth(&def_rt_bandwidth, ++ global_rt_period(), global_rt_runtime()); ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++ init_rt_bandwidth(&init_task_group.rt_bandwidth, ++ global_rt_period(), global_rt_runtime()); ++#ifdef CONFIG_USER_SCHED ++ init_rt_bandwidth(&root_task_group.rt_bandwidth, ++ global_rt_period(), RUNTIME_INF); ++#endif /* CONFIG_USER_SCHED */ ++#endif /* CONFIG_RT_GROUP_SCHED */ ++ ++#ifdef CONFIG_GROUP_SCHED ++ list_add(&init_task_group.list, &task_groups); ++ INIT_LIST_HEAD(&init_task_group.children); ++ ++#ifdef CONFIG_USER_SCHED ++ INIT_LIST_HEAD(&root_task_group.children); ++ init_task_group.parent = &root_task_group; ++ list_add(&init_task_group.siblings, &root_task_group.children); ++#endif /* CONFIG_USER_SCHED */ ++#endif /* CONFIG_GROUP_SCHED */ ++ ++ for_each_possible_cpu(i) { ++ struct rq *rq; ++ ++ rq = cpu_rq(i); ++ spin_lock_init(&rq->lock); ++ rq->nr_running = 0; ++ init_cfs_rq(&rq->cfs, rq); ++ init_rt_rq(&rq->rt, rq); ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ init_task_group.shares = init_task_group_load; ++ INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); ++#ifdef CONFIG_CGROUP_SCHED ++ /* ++ * How much cpu bandwidth does init_task_group get? ++ * ++ * In case of task-groups formed thr' the cgroup filesystem, it ++ * gets 100% of the cpu resources in the system. This overall ++ * system cpu resource is divided among the tasks of ++ * init_task_group and its child task-groups in a fair manner, ++ * based on each entity's (task or task-group's) weight ++ * (se->load.weight). ++ * ++ * In other words, if init_task_group has 10 tasks of weight ++ * 1024) and two child groups A0 and A1 (of weight 1024 each), ++ * then A0's share of the cpu resource is: ++ * ++ * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% ++ * ++ * We achieve this by letting init_task_group's tasks sit ++ * directly in rq->cfs (i.e init_task_group->se[] = NULL). ++ */ ++ init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); ++#elif defined CONFIG_USER_SCHED ++ root_task_group.shares = NICE_0_LOAD; ++ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL); ++ /* ++ * In case of task-groups formed thr' the user id of tasks, ++ * init_task_group represents tasks belonging to root user. ++ * Hence it forms a sibling of all subsequent groups formed. ++ * In this case, init_task_group gets only a fraction of overall ++ * system cpu resource, based on the weight assigned to root ++ * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished ++ * by letting tasks of init_task_group sit in a separate cfs_rq ++ * (init_cfs_rq) and having one entity represent this group of ++ * tasks in rq->cfs (i.e init_task_group->se[] != NULL). ++ */ ++ init_tg_cfs_entry(&init_task_group, ++ &per_cpu(init_cfs_rq, i), ++ &per_cpu(init_sched_entity, i), i, 1, ++ root_task_group.se[i]); ++ ++#endif ++#endif /* CONFIG_FAIR_GROUP_SCHED */ ++ ++ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; ++#ifdef CONFIG_RT_GROUP_SCHED ++ INIT_LIST_HEAD(&rq->leaf_rt_rq_list); ++#ifdef CONFIG_CGROUP_SCHED ++ init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); ++#elif defined CONFIG_USER_SCHED ++ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); ++ init_tg_rt_entry(&init_task_group, ++ &per_cpu(init_rt_rq, i), ++ &per_cpu(init_sched_rt_entity, i), i, 1, ++ root_task_group.rt_se[i]); ++#endif ++#endif ++ ++ for (j = 0; j < CPU_LOAD_IDX_MAX; j++) ++ rq->cpu_load[j] = 0; ++#ifdef CONFIG_SMP ++ rq->sd = NULL; ++ rq->rd = NULL; ++ rq->active_balance = 0; ++ rq->next_balance = jiffies; ++ rq->push_cpu = 0; ++ rq->cpu = i; ++ rq->online = 0; ++ rq->migration_thread = NULL; ++ INIT_LIST_HEAD(&rq->migration_queue); ++ rq_attach_root(rq, &def_root_domain); ++#endif ++ init_rq_hrtick(rq); ++ atomic_set(&rq->nr_iowait, 0); ++ } ++ ++ set_load_weight(&init_task); ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&init_task.preempt_notifiers); ++#endif ++ ++#ifdef CONFIG_SMP ++ open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); ++#endif ++ ++#ifdef CONFIG_RT_MUTEXES ++ plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); ++#endif ++ ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ atomic_inc(&init_mm.mm_count); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ /* ++ * During early bootup we pretend to be a normal task: ++ */ ++ current->sched_class = &fair_sched_class; ++ ++ scheduler_running = 1; ++} ++ ++#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP ++void __might_sleep(char *file, int line) ++{ ++#ifdef in_atomic ++ static unsigned long prev_jiffy; /* ratelimiting */ ++ ++ if ((in_atomic() || irqs_disabled()) && ++ system_state == SYSTEM_RUNNING && !oops_in_progress) { ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ printk(KERN_ERR "BUG: sleeping function called from invalid" ++ " context at %s:%d\n", file, line); ++ printk("in_atomic():%d, irqs_disabled():%d\n", ++ in_atomic(), irqs_disabled()); ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++ dump_stack(); ++ } ++#endif ++} ++EXPORT_SYMBOL(__might_sleep); ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++static void normalize_task(struct rq *rq, struct task_struct *p) ++{ ++ int on_rq; ++ ++ update_rq_clock(rq); ++ on_rq = p->se.on_rq; ++ if (on_rq) ++ deactivate_task(rq, p, 0); ++ __setscheduler(rq, p, SCHED_NORMAL, 0); ++ if (on_rq) { ++ activate_task(rq, p, 0); ++ resched_task(rq->curr); ++ } ++} ++ ++void normalize_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ unsigned long flags; ++ struct rq *rq; ++ ++ read_lock_irqsave(&tasklist_lock, flags); ++ do_each_thread(g, p) { ++ /* ++ * Only normalize user tasks: ++ */ ++ if (!p->mm) ++ continue; ++ ++ p->se.exec_start = 0; ++#ifdef CONFIG_SCHEDSTATS ++ p->se.wait_start = 0; ++ p->se.sleep_start = 0; ++ p->se.block_start = 0; ++#endif ++ ++ if (!rt_task(p)) { ++ /* ++ * Renice negative nice level userspace ++ * tasks back to 0: ++ */ ++ if (TASK_NICE(p) < 0 && p->mm) ++ set_user_nice(p, 0); ++ continue; ++ } ++ ++ spin_lock(&p->pi_lock); ++ rq = __task_rq_lock(p); ++ ++ normalize_task(rq, p); ++ ++ __task_rq_unlock(rq); ++ spin_unlock(&p->pi_lock); ++ } while_each_thread(g, p); ++ ++ read_unlock_irqrestore(&tasklist_lock, flags); ++} ++ ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#ifdef CONFIG_IA64 ++/* ++ * These functions are only useful for the IA64 MCA handling. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given cpu. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++/** ++ * set_curr_task - set the current task for a given cpu. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a cpu in a non-blocking manner. This function ++ * must be called with all CPU's synchronized, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static void free_fair_sched_group(struct task_group *tg) ++{ ++ int i; ++ ++ for_each_possible_cpu(i) { ++ if (tg->cfs_rq) ++ kfree(tg->cfs_rq[i]); ++ if (tg->se) ++ kfree(tg->se[i]); ++ } ++ ++ kfree(tg->cfs_rq); ++ kfree(tg->se); ++} ++ ++static ++int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) ++{ ++ struct cfs_rq *cfs_rq; ++ struct sched_entity *se, *parent_se; ++ struct rq *rq; ++ int i; ++ ++ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); ++ if (!tg->cfs_rq) ++ goto err; ++ tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); ++ if (!tg->se) ++ goto err; ++ ++ tg->shares = NICE_0_LOAD; ++ ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ ++ cfs_rq = kmalloc_node(sizeof(struct cfs_rq), ++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); ++ if (!cfs_rq) ++ goto err; ++ ++ se = kmalloc_node(sizeof(struct sched_entity), ++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); ++ if (!se) ++ goto err; ++ ++ parent_se = parent ? parent->se[i] : NULL; ++ init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); ++ } ++ ++ return 1; ++ ++ err: ++ return 0; ++} ++ ++static inline void register_fair_sched_group(struct task_group *tg, int cpu) ++{ ++ list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list, ++ &cpu_rq(cpu)->leaf_cfs_rq_list); ++} ++ ++static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) ++{ ++ list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); ++} ++#else /* !CONFG_FAIR_GROUP_SCHED */ ++static inline void free_fair_sched_group(struct task_group *tg) ++{ ++} ++ ++static inline ++int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) ++{ ++ return 1; ++} ++ ++static inline void register_fair_sched_group(struct task_group *tg, int cpu) ++{ ++} ++ ++static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) ++{ ++} ++#endif /* CONFIG_FAIR_GROUP_SCHED */ ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++static void free_rt_sched_group(struct task_group *tg) ++{ ++ int i; ++ ++ destroy_rt_bandwidth(&tg->rt_bandwidth); ++ ++ for_each_possible_cpu(i) { ++ if (tg->rt_rq) ++ kfree(tg->rt_rq[i]); ++ if (tg->rt_se) ++ kfree(tg->rt_se[i]); ++ } ++ ++ kfree(tg->rt_rq); ++ kfree(tg->rt_se); ++} ++ ++static ++int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) ++{ ++ struct rt_rq *rt_rq; ++ struct sched_rt_entity *rt_se, *parent_se; ++ struct rq *rq; ++ int i; ++ ++ tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); ++ if (!tg->rt_rq) ++ goto err; ++ tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); ++ if (!tg->rt_se) ++ goto err; ++ ++ init_rt_bandwidth(&tg->rt_bandwidth, ++ ktime_to_ns(def_rt_bandwidth.rt_period), 0); ++ ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ ++ rt_rq = kmalloc_node(sizeof(struct rt_rq), ++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); ++ if (!rt_rq) ++ goto err; ++ ++ rt_se = kmalloc_node(sizeof(struct sched_rt_entity), ++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); ++ if (!rt_se) ++ goto err; ++ ++ parent_se = parent ? parent->rt_se[i] : NULL; ++ init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); ++ } ++ ++ return 1; ++ ++ err: ++ return 0; ++} ++ ++static inline void register_rt_sched_group(struct task_group *tg, int cpu) ++{ ++ list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list, ++ &cpu_rq(cpu)->leaf_rt_rq_list); ++} ++ ++static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) ++{ ++ list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); ++} ++#else /* !CONFIG_RT_GROUP_SCHED */ ++static inline void free_rt_sched_group(struct task_group *tg) ++{ ++} ++ ++static inline ++int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) ++{ ++ return 1; ++} ++ ++static inline void register_rt_sched_group(struct task_group *tg, int cpu) ++{ ++} ++ ++static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) ++{ ++} ++#endif /* CONFIG_RT_GROUP_SCHED */ ++ ++#ifdef CONFIG_GROUP_SCHED ++static void free_sched_group(struct task_group *tg) ++{ ++ free_fair_sched_group(tg); ++ free_rt_sched_group(tg); ++ kfree(tg); ++} ++ ++/* allocate runqueue etc for a new task group */ ++struct task_group *sched_create_group(struct task_group *parent) ++{ ++ struct task_group *tg; ++ unsigned long flags; ++ int i; ++ ++ tg = kzalloc(sizeof(*tg), GFP_KERNEL); ++ if (!tg) ++ return ERR_PTR(-ENOMEM); ++ ++ if (!alloc_fair_sched_group(tg, parent)) ++ goto err; ++ ++ if (!alloc_rt_sched_group(tg, parent)) ++ goto err; ++ ++ spin_lock_irqsave(&task_group_lock, flags); ++ for_each_possible_cpu(i) { ++ register_fair_sched_group(tg, i); ++ register_rt_sched_group(tg, i); ++ } ++ list_add_rcu(&tg->list, &task_groups); ++ ++ WARN_ON(!parent); /* root should already exist */ ++ ++ tg->parent = parent; ++ INIT_LIST_HEAD(&tg->children); ++ list_add_rcu(&tg->siblings, &parent->children); ++ spin_unlock_irqrestore(&task_group_lock, flags); ++ ++ return tg; ++ ++err: ++ free_sched_group(tg); ++ return ERR_PTR(-ENOMEM); ++} ++ ++/* rcu callback to free various structures associated with a task group */ ++static void free_sched_group_rcu(struct rcu_head *rhp) ++{ ++ /* now it should be safe to free those cfs_rqs */ ++ free_sched_group(container_of(rhp, struct task_group, rcu)); ++} ++ ++/* Destroy runqueue etc associated with a task group */ ++void sched_destroy_group(struct task_group *tg) ++{ ++ unsigned long flags; ++ int i; ++ ++ spin_lock_irqsave(&task_group_lock, flags); ++ for_each_possible_cpu(i) { ++ unregister_fair_sched_group(tg, i); ++ unregister_rt_sched_group(tg, i); ++ } ++ list_del_rcu(&tg->list); ++ list_del_rcu(&tg->siblings); ++ spin_unlock_irqrestore(&task_group_lock, flags); ++ ++ /* wait for possible concurrent references to cfs_rqs complete */ ++ call_rcu(&tg->rcu, free_sched_group_rcu); ++} ++ ++/* change task's runqueue when it moves between groups. ++ * The caller of this function should have put the task in its new group ++ * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to ++ * reflect its new group. ++ */ ++void sched_move_task(struct task_struct *tsk) ++{ ++ int on_rq, running; ++ unsigned long flags; ++ struct rq *rq; ++ ++ rq = task_rq_lock(tsk, &flags); ++ ++ update_rq_clock(rq); ++ ++ running = task_current(rq, tsk); ++ on_rq = tsk->se.on_rq; ++ ++ if (on_rq) ++ dequeue_task(rq, tsk, 0); ++ if (unlikely(running)) ++ tsk->sched_class->put_prev_task(rq, tsk); ++ ++ set_task_rq(tsk, task_cpu(tsk)); ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ if (tsk->sched_class->moved_group) ++ tsk->sched_class->moved_group(tsk); ++#endif ++ ++ if (unlikely(running)) ++ tsk->sched_class->set_curr_task(rq); ++ if (on_rq) ++ enqueue_task(rq, tsk, 0); ++ ++ task_rq_unlock(rq, &flags); ++} ++#endif /* CONFIG_GROUP_SCHED */ ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static void __set_se_shares(struct sched_entity *se, unsigned long shares) ++{ ++ struct cfs_rq *cfs_rq = se->cfs_rq; ++ int on_rq; ++ ++ on_rq = se->on_rq; ++ if (on_rq) ++ dequeue_entity(cfs_rq, se, 0); ++ ++ se->load.weight = shares; ++ se->load.inv_weight = 0; ++ ++ if (on_rq) ++ enqueue_entity(cfs_rq, se, 0); ++} ++ ++static void set_se_shares(struct sched_entity *se, unsigned long shares) ++{ ++ struct cfs_rq *cfs_rq = se->cfs_rq; ++ struct rq *rq = cfs_rq->rq; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rq->lock, flags); ++ __set_se_shares(se, shares); ++ spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++static DEFINE_MUTEX(shares_mutex); ++ ++int sched_group_set_shares(struct task_group *tg, unsigned long shares) ++{ ++ int i; ++ unsigned long flags; ++ ++ /* ++ * We can't change the weight of the root cgroup. ++ */ ++ if (!tg->se[0]) ++ return -EINVAL; ++ ++ if (shares < MIN_SHARES) ++ shares = MIN_SHARES; ++ else if (shares > MAX_SHARES) ++ shares = MAX_SHARES; ++ ++ mutex_lock(&shares_mutex); ++ if (tg->shares == shares) ++ goto done; ++ ++ spin_lock_irqsave(&task_group_lock, flags); ++ for_each_possible_cpu(i) ++ unregister_fair_sched_group(tg, i); ++ list_del_rcu(&tg->siblings); ++ spin_unlock_irqrestore(&task_group_lock, flags); ++ ++ /* wait for any ongoing reference to this group to finish */ ++ synchronize_sched(); ++ ++ /* ++ * Now we are free to modify the group's share on each cpu ++ * w/o tripping rebalance_share or load_balance_fair. ++ */ ++ tg->shares = shares; ++ for_each_possible_cpu(i) { ++ /* ++ * force a rebalance ++ */ ++ cfs_rq_set_shares(tg->cfs_rq[i], 0); ++ set_se_shares(tg->se[i], shares); ++ } ++ ++ /* ++ * Enable load balance activity on this group, by inserting it back on ++ * each cpu's rq->leaf_cfs_rq_list. ++ */ ++ spin_lock_irqsave(&task_group_lock, flags); ++ for_each_possible_cpu(i) ++ register_fair_sched_group(tg, i); ++ list_add_rcu(&tg->siblings, &tg->parent->children); ++ spin_unlock_irqrestore(&task_group_lock, flags); ++done: ++ mutex_unlock(&shares_mutex); ++ return 0; ++} ++ ++unsigned long sched_group_shares(struct task_group *tg) ++{ ++ return tg->shares; ++} ++#endif ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++/* ++ * Ensure that the real time constraints are schedulable. ++ */ ++static DEFINE_MUTEX(rt_constraints_mutex); ++ ++static unsigned long to_ratio(u64 period, u64 runtime) ++{ ++ if (runtime == RUNTIME_INF) ++ return 1ULL << 16; ++ ++ return div64_u64(runtime << 16, period); ++} ++ ++#ifdef CONFIG_CGROUP_SCHED ++static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) ++{ ++ struct task_group *tgi, *parent = tg->parent; ++ unsigned long total = 0; ++ ++ if (!parent) { ++ if (global_rt_period() < period) ++ return 0; ++ ++ return to_ratio(period, runtime) < ++ to_ratio(global_rt_period(), global_rt_runtime()); ++ } ++ ++ if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period) ++ return 0; ++ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(tgi, &parent->children, siblings) { ++ if (tgi == tg) ++ continue; ++ ++ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), ++ tgi->rt_bandwidth.rt_runtime); ++ } ++ rcu_read_unlock(); ++ ++ return total + to_ratio(period, runtime) <= ++ to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period), ++ parent->rt_bandwidth.rt_runtime); ++} ++#elif defined CONFIG_USER_SCHED ++static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) ++{ ++ struct task_group *tgi; ++ unsigned long total = 0; ++ unsigned long global_ratio = ++ to_ratio(global_rt_period(), global_rt_runtime()); ++ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(tgi, &task_groups, list) { ++ if (tgi == tg) ++ continue; ++ ++ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), ++ tgi->rt_bandwidth.rt_runtime); ++ } ++ rcu_read_unlock(); ++ ++ return total + to_ratio(period, runtime) < global_ratio; ++} ++#endif ++ ++/* Must be called with tasklist_lock held */ ++static inline int tg_has_rt_tasks(struct task_group *tg) ++{ ++ struct task_struct *g, *p; ++ do_each_thread(g, p) { ++ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) ++ return 1; ++ } while_each_thread(g, p); ++ return 0; ++} ++ ++static int tg_set_bandwidth(struct task_group *tg, ++ u64 rt_period, u64 rt_runtime) ++{ ++ int i, err = 0; ++ ++ mutex_lock(&rt_constraints_mutex); ++ read_lock(&tasklist_lock); ++ if (rt_runtime == 0 && tg_has_rt_tasks(tg)) { ++ err = -EBUSY; ++ goto unlock; ++ } ++ if (!__rt_schedulable(tg, rt_period, rt_runtime)) { ++ err = -EINVAL; ++ goto unlock; ++ } ++ ++ spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); ++ tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); ++ tg->rt_bandwidth.rt_runtime = rt_runtime; ++ ++ for_each_possible_cpu(i) { ++ struct rt_rq *rt_rq = tg->rt_rq[i]; ++ ++ spin_lock(&rt_rq->rt_runtime_lock); ++ rt_rq->rt_runtime = rt_runtime; ++ spin_unlock(&rt_rq->rt_runtime_lock); ++ } ++ spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); ++ unlock: ++ read_unlock(&tasklist_lock); ++ mutex_unlock(&rt_constraints_mutex); ++ ++ return err; ++} ++ ++int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) ++{ ++ u64 rt_runtime, rt_period; ++ ++ rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); ++ rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; ++ if (rt_runtime_us < 0) ++ rt_runtime = RUNTIME_INF; ++ ++ return tg_set_bandwidth(tg, rt_period, rt_runtime); ++} ++ ++long sched_group_rt_runtime(struct task_group *tg) ++{ ++ u64 rt_runtime_us; ++ ++ if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) ++ return -1; ++ ++ rt_runtime_us = tg->rt_bandwidth.rt_runtime; ++ do_div(rt_runtime_us, NSEC_PER_USEC); ++ return rt_runtime_us; ++} ++ ++int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) ++{ ++ u64 rt_runtime, rt_period; ++ ++ rt_period = (u64)rt_period_us * NSEC_PER_USEC; ++ rt_runtime = tg->rt_bandwidth.rt_runtime; ++ ++ if (rt_period == 0) ++ return -EINVAL; ++ ++ return tg_set_bandwidth(tg, rt_period, rt_runtime); ++} ++ ++long sched_group_rt_period(struct task_group *tg) ++{ ++ u64 rt_period_us; ++ ++ rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); ++ do_div(rt_period_us, NSEC_PER_USEC); ++ return rt_period_us; ++} ++ ++static int sched_rt_global_constraints(void) ++{ ++ struct task_group *tg = &root_task_group; ++ u64 rt_runtime, rt_period; ++ int ret = 0; ++ ++ if (sysctl_sched_rt_period <= 0) ++ return -EINVAL; ++ ++ rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); ++ rt_runtime = tg->rt_bandwidth.rt_runtime; ++ ++ mutex_lock(&rt_constraints_mutex); ++ if (!__rt_schedulable(tg, rt_period, rt_runtime)) ++ ret = -EINVAL; ++ mutex_unlock(&rt_constraints_mutex); ++ ++ return ret; ++} ++#else /* !CONFIG_RT_GROUP_SCHED */ ++static int sched_rt_global_constraints(void) ++{ ++ unsigned long flags; ++ int i; ++ ++ if (sysctl_sched_rt_period <= 0) ++ return -EINVAL; ++ ++ spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); ++ for_each_possible_cpu(i) { ++ struct rt_rq *rt_rq = &cpu_rq(i)->rt; ++ ++ spin_lock(&rt_rq->rt_runtime_lock); ++ rt_rq->rt_runtime = global_rt_runtime(); ++ spin_unlock(&rt_rq->rt_runtime_lock); ++ } ++ spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); ++ ++ return 0; ++} ++#endif /* CONFIG_RT_GROUP_SCHED */ ++ ++int sched_rt_handler(struct ctl_table *table, int write, ++ struct file *filp, void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ int ret; ++ int old_period, old_runtime; ++ static DEFINE_MUTEX(mutex); ++ ++ mutex_lock(&mutex); ++ old_period = sysctl_sched_rt_period; ++ old_runtime = sysctl_sched_rt_runtime; ++ ++ ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); ++ ++ if (!ret && write) { ++ ret = sched_rt_global_constraints(); ++ if (ret) { ++ sysctl_sched_rt_period = old_period; ++ sysctl_sched_rt_runtime = old_runtime; ++ } else { ++ def_rt_bandwidth.rt_runtime = global_rt_runtime(); ++ def_rt_bandwidth.rt_period = ++ ns_to_ktime(global_rt_period()); ++ } ++ } ++ mutex_unlock(&mutex); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_CGROUP_SCHED ++ ++/* return corresponding task_group object of a cgroup */ ++static inline struct task_group *cgroup_tg(struct cgroup *cgrp) ++{ ++ return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), ++ struct task_group, css); ++} ++ ++static struct cgroup_subsys_state * ++cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) ++{ ++ struct task_group *tg, *parent; ++ ++ if (!cgrp->parent) { ++ /* This is early initialization for the top cgroup */ ++ init_task_group.css.cgroup = cgrp; ++ return &init_task_group.css; ++ } ++ ++ parent = cgroup_tg(cgrp->parent); ++ tg = sched_create_group(parent); ++ if (IS_ERR(tg)) ++ return ERR_PTR(-ENOMEM); ++ ++ /* Bind the cgroup to task_group object we just created */ ++ tg->css.cgroup = cgrp; ++ ++ return &tg->css; ++} ++ ++static void ++cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) ++{ ++ struct task_group *tg = cgroup_tg(cgrp); ++ ++ sched_destroy_group(tg); ++} ++ ++static int ++cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, ++ struct task_struct *tsk) ++{ ++#ifdef CONFIG_RT_GROUP_SCHED ++ /* Don't accept realtime tasks when there is no way for them to run */ ++ if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0) ++ return -EINVAL; ++#else ++ /* We don't support RT-tasks being in separate groups */ ++ if (tsk->sched_class != &fair_sched_class) ++ return -EINVAL; ++#endif ++ ++ return 0; ++} ++ ++static void ++cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, ++ struct cgroup *old_cont, struct task_struct *tsk) ++{ ++ sched_move_task(tsk); ++} ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, ++ u64 shareval) ++{ ++ return sched_group_set_shares(cgroup_tg(cgrp), shareval); ++} ++ ++static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) ++{ ++ struct task_group *tg = cgroup_tg(cgrp); ++ ++ return (u64) tg->shares; ++} ++#endif /* CONFIG_FAIR_GROUP_SCHED */ ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, ++ s64 val) ++{ ++ return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); ++} ++ ++static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) ++{ ++ return sched_group_rt_runtime(cgroup_tg(cgrp)); ++} ++ ++static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, ++ u64 rt_period_us) ++{ ++ return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); ++} ++ ++static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) ++{ ++ return sched_group_rt_period(cgroup_tg(cgrp)); ++} ++#endif /* CONFIG_RT_GROUP_SCHED */ ++ ++static struct cftype cpu_files[] = { ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ { ++ .name = "shares", ++ .read_u64 = cpu_shares_read_u64, ++ .write_u64 = cpu_shares_write_u64, ++ }, ++#endif ++#ifdef CONFIG_RT_GROUP_SCHED ++ { ++ .name = "rt_runtime_us", ++ .read_s64 = cpu_rt_runtime_read, ++ .write_s64 = cpu_rt_runtime_write, ++ }, ++ { ++ .name = "rt_period_us", ++ .read_u64 = cpu_rt_period_read_uint, ++ .write_u64 = cpu_rt_period_write_uint, ++ }, ++#endif ++}; ++ ++static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) ++{ ++ return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); ++} ++ ++struct cgroup_subsys cpu_cgroup_subsys = { ++ .name = "cpu", ++ .create = cpu_cgroup_create, ++ .destroy = cpu_cgroup_destroy, ++ .can_attach = cpu_cgroup_can_attach, ++ .attach = cpu_cgroup_attach, ++ .populate = cpu_cgroup_populate, ++ .subsys_id = cpu_cgroup_subsys_id, ++ .early_init = 1, ++}; ++ ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++#ifdef CONFIG_CGROUP_CPUACCT ++ ++/* ++ * CPU accounting code for task groups. ++ * ++ * Based on the work by Paul Menage (menage@google.com) and Balbir Singh ++ * (balbir@in.ibm.com). ++ */ ++ ++/* track cpu usage of a group of tasks */ ++struct cpuacct { ++ struct cgroup_subsys_state css; ++ /* cpuusage holds pointer to a u64-type object on every cpu */ ++ u64 *cpuusage; ++}; ++ ++struct cgroup_subsys cpuacct_subsys; ++ ++/* return cpu accounting group corresponding to this container */ ++static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) ++{ ++ return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), ++ struct cpuacct, css); ++} ++ ++/* return cpu accounting group to which this task belongs */ ++static inline struct cpuacct *task_ca(struct task_struct *tsk) ++{ ++ return container_of(task_subsys_state(tsk, cpuacct_subsys_id), ++ struct cpuacct, css); ++} ++ ++/* create a new cpu accounting group */ ++static struct cgroup_subsys_state *cpuacct_create( ++ struct cgroup_subsys *ss, struct cgroup *cgrp) ++{ ++ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); ++ ++ if (!ca) ++ return ERR_PTR(-ENOMEM); ++ ++ ca->cpuusage = alloc_percpu(u64); ++ if (!ca->cpuusage) { ++ kfree(ca); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ return &ca->css; ++} ++ ++/* destroy an existing cpu accounting group */ ++static void ++cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) ++{ ++ struct cpuacct *ca = cgroup_ca(cgrp); ++ ++ free_percpu(ca->cpuusage); ++ kfree(ca); ++} ++ ++/* return total cpu usage (in nanoseconds) of a group */ ++static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) ++{ ++ struct cpuacct *ca = cgroup_ca(cgrp); ++ u64 totalcpuusage = 0; ++ int i; ++ ++ for_each_possible_cpu(i) { ++ u64 *cpuusage = percpu_ptr(ca->cpuusage, i); ++ ++ /* ++ * Take rq->lock to make 64-bit addition safe on 32-bit ++ * platforms. ++ */ ++ spin_lock_irq(&cpu_rq(i)->lock); ++ totalcpuusage += *cpuusage; ++ spin_unlock_irq(&cpu_rq(i)->lock); ++ } ++ ++ return totalcpuusage; ++} ++ ++static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, ++ u64 reset) ++{ ++ struct cpuacct *ca = cgroup_ca(cgrp); ++ int err = 0; ++ int i; ++ ++ if (reset) { ++ err = -EINVAL; ++ goto out; ++ } ++ ++ for_each_possible_cpu(i) { ++ u64 *cpuusage = percpu_ptr(ca->cpuusage, i); ++ ++ spin_lock_irq(&cpu_rq(i)->lock); ++ *cpuusage = 0; ++ spin_unlock_irq(&cpu_rq(i)->lock); ++ } ++out: ++ return err; ++} ++ ++static struct cftype files[] = { ++ { ++ .name = "usage", ++ .read_u64 = cpuusage_read, ++ .write_u64 = cpuusage_write, ++ }, ++}; ++ ++static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) ++{ ++ return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files)); ++} ++ ++/* ++ * charge this task's execution time to its accounting group. ++ * ++ * called with rq->lock held. ++ */ ++static void cpuacct_charge(struct task_struct *tsk, u64 cputime) ++{ ++ struct cpuacct *ca; ++ ++ if (!cpuacct_subsys.active) ++ return; ++ ++ ca = task_ca(tsk); ++ if (ca) { ++ u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); ++ ++ *cpuusage += cputime; ++ } ++} ++ ++struct cgroup_subsys cpuacct_subsys = { ++ .name = "cpuacct", ++ .create = cpuacct_create, ++ .destroy = cpuacct_destroy, ++ .populate = cpuacct_populate, ++ .subsys_id = cpuacct_subsys_id, ++}; ++#endif /* CONFIG_CGROUP_CPUACCT */ +diff -Nurb linux-2.6.27-590/kernel/sched.c.rej linux-2.6.27-591/kernel/sched.c.rej +--- linux-2.6.27-590/kernel/sched.c.rej 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.27-591/kernel/sched.c.rej 2010-02-01 19:43:07.000000000 -0500 +@@ -0,0 +1,258 @@ ++*************** ++*** 23,28 **** ++ #include ++ #include ++ #include ++ #include ++ #include ++ #include ++--- 23,29 ---- ++ #include ++ #include ++ #include +++ #include ++ #include ++ #include ++ #include ++*************** ++*** 451,456 **** ++ ++ repeat_lock_task: ++ rq = task_rq(p); ++ spin_lock(&rq->lock); ++ if (unlikely(rq != task_rq(p))) { ++ spin_unlock(&rq->lock); ++--- 455,461 ---- ++ ++ repeat_lock_task: ++ rq = task_rq(p); +++ ++ spin_lock(&rq->lock); ++ if (unlikely(rq != task_rq(p))) { ++ spin_unlock(&rq->lock); ++*************** ++*** 1761,1766 **** ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->state = TASK_RUNNING; ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child: ++--- 1766,1786 ---- ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->state = TASK_RUNNING; +++ #ifdef CONFIG_CHOPSTIX +++ /* The jiffy of last interruption */ +++ if (p->state & TASK_UNINTERRUPTIBLE) { +++ p->last_interrupted=jiffies; +++ } +++ else +++ if (p->state & TASK_INTERRUPTIBLE) { +++ p->last_interrupted=INTERRUPTIBLE; +++ } +++ else +++ p->last_interrupted=RUNNING; +++ +++ /* The jiffy of last execution */ +++ p->last_ran_j=jiffies; +++ #endif ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child: ++*************** ++*** 3628,3633 **** ++ ++ #endif ++ ++ static inline int interactive_sleep(enum sleep_type sleep_type) ++ { ++ return (sleep_type == SLEEP_INTERACTIVE || ++--- 3648,3654 ---- ++ ++ #endif ++ +++ ++ static inline int interactive_sleep(enum sleep_type sleep_type) ++ { ++ return (sleep_type == SLEEP_INTERACTIVE || ++*************** ++*** 3637,3652 **** ++ /* ++ * schedule() is the main scheduler function. ++ */ ++ asmlinkage void __sched schedule(void) ++ { ++ struct task_struct *prev, *next; ++ struct prio_array *array; ++ struct list_head *queue; ++ unsigned long long now; ++- unsigned long run_time; ++ int cpu, idx, new_prio; ++ long *switch_count; ++ struct rq *rq; ++ ++ /* ++ * Test if we are atomic. Since do_exit() needs to call into ++--- 3658,3685 ---- ++ /* ++ * schedule() is the main scheduler function. ++ */ +++ +++ #ifdef CONFIG_CHOPSTIX +++ extern void (*rec_event)(void *,unsigned int); +++ struct event_spec { +++ unsigned long pc; +++ unsigned long dcookie; +++ unsigned int count; +++ unsigned int reason; +++ }; +++ #endif +++ ++ asmlinkage void __sched schedule(void) ++ { ++ struct task_struct *prev, *next; ++ struct prio_array *array; ++ struct list_head *queue; ++ unsigned long long now; +++ unsigned long run_time, diff; ++ int cpu, idx, new_prio; ++ long *switch_count; ++ struct rq *rq; +++ int sampling_reason; ++ ++ /* ++ * Test if we are atomic. Since do_exit() needs to call into ++*************** ++*** 3700,3705 **** ++ switch_count = &prev->nivcsw; ++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { ++ switch_count = &prev->nvcsw; ++ if (unlikely((prev->state & TASK_INTERRUPTIBLE) && ++ unlikely(signal_pending(prev)))) ++ prev->state = TASK_RUNNING; ++--- 3733,3739 ---- ++ switch_count = &prev->nivcsw; ++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { ++ switch_count = &prev->nvcsw; +++ ++ if (unlikely((prev->state & TASK_INTERRUPTIBLE) && ++ unlikely(signal_pending(prev)))) ++ prev->state = TASK_RUNNING; ++*************** ++*** 3709,3714 **** ++ vx_uninterruptible_inc(prev); ++ } ++ deactivate_task(prev, rq); ++ } ++ } ++ ++--- 3743,3759 ---- ++ vx_uninterruptible_inc(prev); ++ } ++ deactivate_task(prev, rq); +++ #ifdef CONFIG_CHOPSTIX +++ /* An uninterruptible process just yielded. Record the current jiffie */ +++ if (prev->state & TASK_UNINTERRUPTIBLE) { +++ prev->last_interrupted=jiffies; +++ } +++ /* An interruptible process just yielded, or it got preempted. +++ * Mark it as interruptible */ +++ else if (prev->state & TASK_INTERRUPTIBLE) { +++ prev->last_interrupted=INTERRUPTIBLE; +++ } +++ #endif ++ } ++ } ++ ++*************** ++*** 3785,3790 **** ++ prev->sleep_avg = 0; ++ prev->timestamp = prev->last_ran = now; ++ ++ sched_info_switch(prev, next); ++ if (likely(prev != next)) { ++ next->timestamp = next->last_ran = now; ++--- 3830,3869 ---- ++ prev->sleep_avg = 0; ++ prev->timestamp = prev->last_ran = now; ++ +++ #ifdef CONFIG_CHOPSTIX +++ /* Run only if the Chopstix module so decrees it */ +++ if (rec_event) { +++ prev->last_ran_j = jiffies; +++ if (next->last_interrupted!=INTERRUPTIBLE) { +++ if (next->last_interrupted!=RUNNING) { +++ diff = (jiffies-next->last_interrupted); +++ sampling_reason = 0;/* BLOCKING */ +++ } +++ else { +++ diff = jiffies-next->last_ran_j; +++ sampling_reason = 1;/* PREEMPTION */ +++ } +++ +++ if (diff >= HZ/10) { +++ struct event event; +++ struct event_spec espec; +++ struct pt_regs *regs; +++ regs = task_pt_regs(current); +++ +++ espec.reason = sampling_reason; +++ event.event_data=&espec; +++ event.task=next; +++ espec.pc=regs->eip; +++ event.event_type=2; +++ /* index in the event array currently set up */ +++ /* make sure the counters are loaded in the order we want them to show up*/ +++ (*rec_event)(&event, diff); +++ } +++ } +++ /* next has been elected to run */ +++ next->last_interrupted=0; +++ } +++ #endif ++ sched_info_switch(prev, next); ++ if (likely(prev != next)) { ++ next->timestamp = next->last_ran = now; ++*************** ++*** 5737,5742 **** ++ jiffies_to_timespec(p->policy == SCHED_FIFO ? ++ 0 : task_timeslice(p), &t); ++ read_unlock(&tasklist_lock); ++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; ++ out_nounlock: ++ return retval; ++--- 5817,5823 ---- ++ jiffies_to_timespec(p->policy == SCHED_FIFO ? ++ 0 : task_timeslice(p), &t); ++ read_unlock(&tasklist_lock); +++ ++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; ++ out_nounlock: ++ return retval; ++*************** ++*** 7980,7982 **** ++ } ++ ++ #endif ++--- 8061,8080 ---- ++ } ++ ++ #endif +++ +++ #ifdef CONFIG_CHOPSTIX +++ void (*rec_event)(void *,unsigned int) = NULL; +++ +++ /* To support safe calling from asm */ +++ asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) { +++ struct pt_regs *regs; +++ struct event_spec *es = event_signature_in->event_data; +++ regs = task_pt_regs(current); +++ event_signature_in->task=current; +++ es->pc=regs->eip; +++ event_signature_in->count=1; +++ (*rec_event)(event_signature_in, count); +++ } +++ EXPORT_SYMBOL(rec_event); +++ EXPORT_SYMBOL(in_sched_functions); +++ #endif +diff -Nurb linux-2.6.27-590/mm/memory.c linux-2.6.27-591/mm/memory.c +--- linux-2.6.27-590/mm/memory.c 2010-02-01 19:42:07.000000000 -0500 ++++ linux-2.6.27-591/mm/memory.c 2010-02-01 19:43:07.000000000 -0500 +@@ -61,6 +61,7 @@ + + #include + #include ++#include + + #include "internal.h" + +@@ -2690,6 +2691,15 @@ + return ret; + } + ++extern void (*rec_event)(void *,unsigned int); ++struct event_spec { ++ unsigned long pc; ++ unsigned long dcookie; ++ unsigned count; ++ unsigned char reason; ++}; ++ ++ + /* + * By the time we get here, we already hold the mm semaphore + */ +@@ -2719,6 +2729,24 @@ + if (!pte) + return VM_FAULT_OOM; + ++#ifdef CONFIG_CHOPSTIX ++ if (rec_event) { ++ struct event event; ++ struct event_spec espec; ++ struct pt_regs *regs; ++ unsigned int pc; ++ regs = task_pt_regs(current); ++ pc = regs->ip & (unsigned int) ~4095; ++ ++ espec.reason = 0; /* alloc */ ++ event.event_data=&espec; ++ event.task = current; ++ espec.pc=pc; ++ event.event_type=5; ++ (*rec_event)(&event, 1); ++ } ++#endif ++ + return handle_pte_fault(mm, vma, address, pte, pmd, write_access); + } + +diff -Nurb linux-2.6.27-590/mm/memory.c.orig linux-2.6.27-591/mm/memory.c.orig +--- linux-2.6.27-590/mm/memory.c.orig 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.27-591/mm/memory.c.orig 2010-02-01 19:42:07.000000000 -0500 +@@ -0,0 +1,3035 @@ ++/* ++ * linux/mm/memory.c ++ * ++ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds ++ */ ++ ++/* ++ * demand-loading started 01.12.91 - seems it is high on the list of ++ * things wanted, and it should be easy to implement. - Linus ++ */ ++ ++/* ++ * Ok, demand-loading was easy, shared pages a little bit tricker. Shared ++ * pages started 02.12.91, seems to work. - Linus. ++ * ++ * Tested sharing by executing about 30 /bin/sh: under the old kernel it ++ * would have taken more than the 6M I have free, but it worked well as ++ * far as I could see. ++ * ++ * Also corrected some "invalidate()"s - I wasn't doing enough of them. ++ */ ++ ++/* ++ * Real VM (paging to/from disk) started 18.12.91. Much more work and ++ * thought has to go into this. Oh, well.. ++ * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. ++ * Found it. Everything seems to work now. ++ * 20.12.91 - Ok, making the swap-device changeable like the root. ++ */ ++ ++/* ++ * 05.04.94 - Multi-page memory management added for v1.1. ++ * Idea by Alex Bligh (alex@cconcepts.co.uk) ++ * ++ * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG ++ * (Gerhard.Wichert@pdb.siemens.de) ++ * ++ * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "internal.h" ++ ++#ifndef CONFIG_NEED_MULTIPLE_NODES ++/* use the per-pgdat data instead for discontigmem - mbligh */ ++unsigned long max_mapnr; ++struct page *mem_map; ++ ++EXPORT_SYMBOL(max_mapnr); ++EXPORT_SYMBOL(mem_map); ++#endif ++ ++unsigned long num_physpages; ++/* ++ * A number of key systems in x86 including ioremap() rely on the assumption ++ * that high_memory defines the upper bound on direct map memory, then end ++ * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and ++ * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL ++ * and ZONE_HIGHMEM. ++ */ ++void * high_memory; ++ ++EXPORT_SYMBOL(num_physpages); ++EXPORT_SYMBOL(high_memory); ++ ++/* ++ * Randomize the address space (stacks, mmaps, brk, etc.). ++ * ++ * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, ++ * as ancient (libc5 based) binaries can segfault. ) ++ */ ++int randomize_va_space __read_mostly = ++#ifdef CONFIG_COMPAT_BRK ++ 1; ++#else ++ 2; ++#endif ++ ++static int __init disable_randmaps(char *s) ++{ ++ randomize_va_space = 0; ++ return 1; ++} ++__setup("norandmaps", disable_randmaps); ++ ++ ++/* ++ * If a p?d_bad entry is found while walking page tables, report ++ * the error, before resetting entry to p?d_none. Usually (but ++ * very seldom) called out from the p?d_none_or_clear_bad macros. ++ */ ++ ++void pgd_clear_bad(pgd_t *pgd) ++{ ++ pgd_ERROR(*pgd); ++ pgd_clear(pgd); ++} ++ ++void pud_clear_bad(pud_t *pud) ++{ ++ pud_ERROR(*pud); ++ pud_clear(pud); ++} ++ ++void pmd_clear_bad(pmd_t *pmd) ++{ ++ pmd_ERROR(*pmd); ++ pmd_clear(pmd); ++} ++ ++/* ++ * Note: this doesn't free the actual pages themselves. That ++ * has been handled earlier when unmapping all the memory regions. ++ */ ++static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) ++{ ++ pgtable_t token = pmd_pgtable(*pmd); ++ pmd_clear(pmd); ++ pte_free_tlb(tlb, token); ++ tlb->mm->nr_ptes--; ++} ++ ++static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, ++ unsigned long addr, unsigned long end, ++ unsigned long floor, unsigned long ceiling) ++{ ++ pmd_t *pmd; ++ unsigned long next; ++ unsigned long start; ++ ++ start = addr; ++ pmd = pmd_offset(pud, addr); ++ do { ++ next = pmd_addr_end(addr, end); ++ if (pmd_none_or_clear_bad(pmd)) ++ continue; ++ free_pte_range(tlb, pmd); ++ } while (pmd++, addr = next, addr != end); ++ ++ start &= PUD_MASK; ++ if (start < floor) ++ return; ++ if (ceiling) { ++ ceiling &= PUD_MASK; ++ if (!ceiling) ++ return; ++ } ++ if (end - 1 > ceiling - 1) ++ return; ++ ++ pmd = pmd_offset(pud, start); ++ pud_clear(pud); ++ pmd_free_tlb(tlb, pmd); ++} ++ ++static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ++ unsigned long addr, unsigned long end, ++ unsigned long floor, unsigned long ceiling) ++{ ++ pud_t *pud; ++ unsigned long next; ++ unsigned long start; ++ ++ start = addr; ++ pud = pud_offset(pgd, addr); ++ do { ++ next = pud_addr_end(addr, end); ++ if (pud_none_or_clear_bad(pud)) ++ continue; ++ free_pmd_range(tlb, pud, addr, next, floor, ceiling); ++ } while (pud++, addr = next, addr != end); ++ ++ start &= PGDIR_MASK; ++ if (start < floor) ++ return; ++ if (ceiling) { ++ ceiling &= PGDIR_MASK; ++ if (!ceiling) ++ return; ++ } ++ if (end - 1 > ceiling - 1) ++ return; ++ ++ pud = pud_offset(pgd, start); ++ pgd_clear(pgd); ++ pud_free_tlb(tlb, pud); ++} ++ ++/* ++ * This function frees user-level page tables of a process. ++ * ++ * Must be called with pagetable lock held. ++ */ ++void free_pgd_range(struct mmu_gather *tlb, ++ unsigned long addr, unsigned long end, ++ unsigned long floor, unsigned long ceiling) ++{ ++ pgd_t *pgd; ++ unsigned long next; ++ unsigned long start; ++ ++ /* ++ * The next few lines have given us lots of grief... ++ * ++ * Why are we testing PMD* at this top level? Because often ++ * there will be no work to do at all, and we'd prefer not to ++ * go all the way down to the bottom just to discover that. ++ * ++ * Why all these "- 1"s? Because 0 represents both the bottom ++ * of the address space and the top of it (using -1 for the ++ * top wouldn't help much: the masks would do the wrong thing). ++ * The rule is that addr 0 and floor 0 refer to the bottom of ++ * the address space, but end 0 and ceiling 0 refer to the top ++ * Comparisons need to use "end - 1" and "ceiling - 1" (though ++ * that end 0 case should be mythical). ++ * ++ * Wherever addr is brought up or ceiling brought down, we must ++ * be careful to reject "the opposite 0" before it confuses the ++ * subsequent tests. But what about where end is brought down ++ * by PMD_SIZE below? no, end can't go down to 0 there. ++ * ++ * Whereas we round start (addr) and ceiling down, by different ++ * masks at different levels, in order to test whether a table ++ * now has no other vmas using it, so can be freed, we don't ++ * bother to round floor or end up - the tests don't need that. ++ */ ++ ++ addr &= PMD_MASK; ++ if (addr < floor) { ++ addr += PMD_SIZE; ++ if (!addr) ++ return; ++ } ++ if (ceiling) { ++ ceiling &= PMD_MASK; ++ if (!ceiling) ++ return; ++ } ++ if (end - 1 > ceiling - 1) ++ end -= PMD_SIZE; ++ if (addr > end - 1) ++ return; ++ ++ start = addr; ++ pgd = pgd_offset(tlb->mm, addr); ++ do { ++ next = pgd_addr_end(addr, end); ++ if (pgd_none_or_clear_bad(pgd)) ++ continue; ++ free_pud_range(tlb, pgd, addr, next, floor, ceiling); ++ } while (pgd++, addr = next, addr != end); ++} ++ ++void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, ++ unsigned long floor, unsigned long ceiling) ++{ ++ while (vma) { ++ struct vm_area_struct *next = vma->vm_next; ++ unsigned long addr = vma->vm_start; ++ ++ /* ++ * Hide vma from rmap and vmtruncate before freeing pgtables ++ */ ++ anon_vma_unlink(vma); ++ unlink_file_vma(vma); ++ ++ if (is_vm_hugetlb_page(vma)) { ++ hugetlb_free_pgd_range(tlb, addr, vma->vm_end, ++ floor, next? next->vm_start: ceiling); ++ } else { ++ /* ++ * Optimization: gather nearby vmas into one call down ++ */ ++ while (next && next->vm_start <= vma->vm_end + PMD_SIZE ++ && !is_vm_hugetlb_page(next)) { ++ vma = next; ++ next = vma->vm_next; ++ anon_vma_unlink(vma); ++ unlink_file_vma(vma); ++ } ++ free_pgd_range(tlb, addr, vma->vm_end, ++ floor, next? next->vm_start: ceiling); ++ } ++ vma = next; ++ } ++} ++ ++int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) ++{ ++ pgtable_t new = pte_alloc_one(mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ /* ++ * Ensure all pte setup (eg. pte page lock and page clearing) are ++ * visible before the pte is made visible to other CPUs by being ++ * put into page tables. ++ * ++ * The other side of the story is the pointer chasing in the page ++ * table walking code (when walking the page table without locking; ++ * ie. most of the time). Fortunately, these data accesses consist ++ * of a chain of data-dependent loads, meaning most CPUs (alpha ++ * being the notable exception) will already guarantee loads are ++ * seen in-order. See the alpha page table accessors for the ++ * smp_read_barrier_depends() barriers in page table walking code. ++ */ ++ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ ++ ++ spin_lock(&mm->page_table_lock); ++ if (!pmd_present(*pmd)) { /* Has another populated it ? */ ++ mm->nr_ptes++; ++ pmd_populate(mm, pmd, new); ++ new = NULL; ++ } ++ spin_unlock(&mm->page_table_lock); ++ if (new) ++ pte_free(mm, new); ++ return 0; ++} ++ ++int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) ++{ ++ pte_t *new = pte_alloc_one_kernel(&init_mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ smp_wmb(); /* See comment in __pte_alloc */ ++ ++ spin_lock(&init_mm.page_table_lock); ++ if (!pmd_present(*pmd)) { /* Has another populated it ? */ ++ pmd_populate_kernel(&init_mm, pmd, new); ++ new = NULL; ++ } ++ spin_unlock(&init_mm.page_table_lock); ++ if (new) ++ pte_free_kernel(&init_mm, new); ++ return 0; ++} ++ ++static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) ++{ ++ if (file_rss) ++ add_mm_counter(mm, file_rss, file_rss); ++ if (anon_rss) ++ add_mm_counter(mm, anon_rss, anon_rss); ++} ++ ++/* ++ * This function is called to print an error when a bad pte ++ * is found. For example, we might have a PFN-mapped pte in ++ * a region that doesn't allow it. ++ * ++ * The calling function must still handle the error. ++ */ ++static void print_bad_pte(struct vm_area_struct *vma, pte_t pte, ++ unsigned long vaddr) ++{ ++ printk(KERN_ERR "Bad pte = %08llx, process = %s, " ++ "vm_flags = %lx, vaddr = %lx\n", ++ (long long)pte_val(pte), ++ (vma->vm_mm == current->mm ? current->comm : "???"), ++ vma->vm_flags, vaddr); ++ dump_stack(); ++} ++ ++static inline int is_cow_mapping(unsigned int flags) ++{ ++ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; ++} ++ ++/* ++ * vm_normal_page -- This function gets the "struct page" associated with a pte. ++ * ++ * "Special" mappings do not wish to be associated with a "struct page" (either ++ * it doesn't exist, or it exists but they don't want to touch it). In this ++ * case, NULL is returned here. "Normal" mappings do have a struct page. ++ * ++ * There are 2 broad cases. Firstly, an architecture may define a pte_special() ++ * pte bit, in which case this function is trivial. Secondly, an architecture ++ * may not have a spare pte bit, which requires a more complicated scheme, ++ * described below. ++ * ++ * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a ++ * special mapping (even if there are underlying and valid "struct pages"). ++ * COWed pages of a VM_PFNMAP are always normal. ++ * ++ * The way we recognize COWed pages within VM_PFNMAP mappings is through the ++ * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit ++ * set, and the vm_pgoff will point to the first PFN mapped: thus every special ++ * mapping will always honor the rule ++ * ++ * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) ++ * ++ * And for normal mappings this is false. ++ * ++ * This restricts such mappings to be a linear translation from virtual address ++ * to pfn. To get around this restriction, we allow arbitrary mappings so long ++ * as the vma is not a COW mapping; in that case, we know that all ptes are ++ * special (because none can have been COWed). ++ * ++ * ++ * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. ++ * ++ * VM_MIXEDMAP mappings can likewise contain memory with or without "struct ++ * page" backing, however the difference is that _all_ pages with a struct ++ * page (that is, those where pfn_valid is true) are refcounted and considered ++ * normal pages by the VM. The disadvantage is that pages are refcounted ++ * (which can be slower and simply not an option for some PFNMAP users). The ++ * advantage is that we don't have to follow the strict linearity rule of ++ * PFNMAP mappings in order to support COWable mappings. ++ * ++ */ ++#ifdef __HAVE_ARCH_PTE_SPECIAL ++# define HAVE_PTE_SPECIAL 1 ++#else ++# define HAVE_PTE_SPECIAL 0 ++#endif ++struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, ++ pte_t pte) ++{ ++ unsigned long pfn; ++ ++ if (HAVE_PTE_SPECIAL) { ++ if (likely(!pte_special(pte))) { ++ VM_BUG_ON(!pfn_valid(pte_pfn(pte))); ++ return pte_page(pte); ++ } ++ VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); ++ return NULL; ++ } ++ ++ /* !HAVE_PTE_SPECIAL case follows: */ ++ ++ pfn = pte_pfn(pte); ++ ++ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { ++ if (vma->vm_flags & VM_MIXEDMAP) { ++ if (!pfn_valid(pfn)) ++ return NULL; ++ goto out; ++ } else { ++ unsigned long off; ++ off = (addr - vma->vm_start) >> PAGE_SHIFT; ++ if (pfn == vma->vm_pgoff + off) ++ return NULL; ++ if (!is_cow_mapping(vma->vm_flags)) ++ return NULL; ++ } ++ } ++ ++ VM_BUG_ON(!pfn_valid(pfn)); ++ ++ /* ++ * NOTE! We still have PageReserved() pages in the page tables. ++ * ++ * eg. VDSO mappings can cause them to exist. ++ */ ++out: ++ return pfn_to_page(pfn); ++} ++ ++/* ++ * copy one vm_area from one task to the other. Assumes the page tables ++ * already present in the new task to be cleared in the whole range ++ * covered by this vma. ++ */ ++ ++static inline void ++copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ++ pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, ++ unsigned long addr, int *rss) ++{ ++ unsigned long vm_flags = vma->vm_flags; ++ pte_t pte = *src_pte; ++ struct page *page; ++ ++ /* pte contains position in swap or file, so copy. */ ++ if (unlikely(!pte_present(pte))) { ++ if (!pte_file(pte)) { ++ swp_entry_t entry = pte_to_swp_entry(pte); ++ ++ swap_duplicate(entry); ++ /* make sure dst_mm is on swapoff's mmlist. */ ++ if (unlikely(list_empty(&dst_mm->mmlist))) { ++ spin_lock(&mmlist_lock); ++ if (list_empty(&dst_mm->mmlist)) ++ list_add(&dst_mm->mmlist, ++ &src_mm->mmlist); ++ spin_unlock(&mmlist_lock); ++ } ++ if (is_write_migration_entry(entry) && ++ is_cow_mapping(vm_flags)) { ++ /* ++ * COW mappings require pages in both parent ++ * and child to be set to read. ++ */ ++ make_migration_entry_read(&entry); ++ pte = swp_entry_to_pte(entry); ++ set_pte_at(src_mm, addr, src_pte, pte); ++ } ++ } ++ goto out_set_pte; ++ } ++ ++ /* ++ * If it's a COW mapping, write protect it both ++ * in the parent and the child ++ */ ++ if (is_cow_mapping(vm_flags)) { ++ ptep_set_wrprotect(src_mm, addr, src_pte); ++ pte = pte_wrprotect(pte); ++ } ++ ++ /* ++ * If it's a shared mapping, mark it clean in ++ * the child ++ */ ++ if (vm_flags & VM_SHARED) ++ pte = pte_mkclean(pte); ++ pte = pte_mkold(pte); ++ ++ page = vm_normal_page(vma, addr, pte); ++ if (page) { ++ get_page(page); ++ page_dup_rmap(page, vma, addr); ++ rss[!!PageAnon(page)]++; ++ } ++ ++out_set_pte: ++ set_pte_at(dst_mm, addr, dst_pte, pte); ++} ++ ++static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ++ pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, ++ unsigned long addr, unsigned long end) ++{ ++ pte_t *src_pte, *dst_pte; ++ spinlock_t *src_ptl, *dst_ptl; ++ int progress = 0; ++ int rss[2]; ++ ++ if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1))) ++ return -ENOMEM; ++ ++again: ++ rss[1] = rss[0] = 0; ++ dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); ++ if (!dst_pte) ++ return -ENOMEM; ++ src_pte = pte_offset_map_nested(src_pmd, addr); ++ src_ptl = pte_lockptr(src_mm, src_pmd); ++ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); ++ arch_enter_lazy_mmu_mode(); ++ ++ do { ++ /* ++ * We are holding two locks at this point - either of them ++ * could generate latencies in another task on another CPU. ++ */ ++ if (progress >= 32) { ++ progress = 0; ++ if (need_resched() || ++ spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) ++ break; ++ } ++ if (pte_none(*src_pte)) { ++ progress++; ++ continue; ++ } ++ copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); ++ progress += 8; ++ } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); ++ ++ arch_leave_lazy_mmu_mode(); ++ spin_unlock(src_ptl); ++ pte_unmap_nested(src_pte - 1); ++ add_mm_rss(dst_mm, rss[0], rss[1]); ++ pte_unmap_unlock(dst_pte - 1, dst_ptl); ++ cond_resched(); ++ if (addr != end) ++ goto again; ++ return 0; ++} ++ ++static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ++ pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, ++ unsigned long addr, unsigned long end) ++{ ++ pmd_t *src_pmd, *dst_pmd; ++ unsigned long next; ++ ++ dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); ++ if (!dst_pmd) ++ return -ENOMEM; ++ src_pmd = pmd_offset(src_pud, addr); ++ do { ++ next = pmd_addr_end(addr, end); ++ if (pmd_none_or_clear_bad(src_pmd)) ++ continue; ++ if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, ++ vma, addr, next)) ++ return -ENOMEM; ++ } while (dst_pmd++, src_pmd++, addr = next, addr != end); ++ return 0; ++} ++ ++static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ++ pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, ++ unsigned long addr, unsigned long end) ++{ ++ pud_t *src_pud, *dst_pud; ++ unsigned long next; ++ ++ dst_pud = pud_alloc(dst_mm, dst_pgd, addr); ++ if (!dst_pud) ++ return -ENOMEM; ++ src_pud = pud_offset(src_pgd, addr); ++ do { ++ next = pud_addr_end(addr, end); ++ if (pud_none_or_clear_bad(src_pud)) ++ continue; ++ if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, ++ vma, addr, next)) ++ return -ENOMEM; ++ } while (dst_pud++, src_pud++, addr = next, addr != end); ++ return 0; ++} ++ ++int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ++ struct vm_area_struct *vma) ++{ ++ pgd_t *src_pgd, *dst_pgd; ++ unsigned long next; ++ unsigned long addr = vma->vm_start; ++ unsigned long end = vma->vm_end; ++ int ret; ++ ++ /* ++ * Don't copy ptes where a page fault will fill them correctly. ++ * Fork becomes much lighter when there are big shared or private ++ * readonly mappings. The tradeoff is that copy_page_range is more ++ * efficient than faulting. ++ */ ++ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { ++ if (!vma->anon_vma) ++ return 0; ++ } ++ ++ if (is_vm_hugetlb_page(vma)) ++ return copy_hugetlb_page_range(dst_mm, src_mm, vma); ++ ++ /* ++ * We need to invalidate the secondary MMU mappings only when ++ * there could be a permission downgrade on the ptes of the ++ * parent mm. And a permission downgrade will only happen if ++ * is_cow_mapping() returns true. ++ */ ++ if (is_cow_mapping(vma->vm_flags)) ++ mmu_notifier_invalidate_range_start(src_mm, addr, end); ++ ++ ret = 0; ++ dst_pgd = pgd_offset(dst_mm, addr); ++ src_pgd = pgd_offset(src_mm, addr); ++ do { ++ next = pgd_addr_end(addr, end); ++ if (pgd_none_or_clear_bad(src_pgd)) ++ continue; ++ if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, ++ vma, addr, next))) { ++ ret = -ENOMEM; ++ break; ++ } ++ } while (dst_pgd++, src_pgd++, addr = next, addr != end); ++ ++ if (is_cow_mapping(vma->vm_flags)) ++ mmu_notifier_invalidate_range_end(src_mm, ++ vma->vm_start, end); ++ return ret; ++} ++ ++static unsigned long zap_pte_range(struct mmu_gather *tlb, ++ struct vm_area_struct *vma, pmd_t *pmd, ++ unsigned long addr, unsigned long end, ++ long *zap_work, struct zap_details *details) ++{ ++ struct mm_struct *mm = tlb->mm; ++ pte_t *pte; ++ spinlock_t *ptl; ++ int file_rss = 0; ++ int anon_rss = 0; ++ ++ pte = pte_offset_map_lock(mm, pmd, addr, &ptl); ++ arch_enter_lazy_mmu_mode(); ++ do { ++ pte_t ptent = *pte; ++ if (pte_none(ptent)) { ++ (*zap_work)--; ++ continue; ++ } ++ ++ (*zap_work) -= PAGE_SIZE; ++ ++ if (pte_present(ptent)) { ++ struct page *page; ++ ++ page = vm_normal_page(vma, addr, ptent); ++ if (unlikely(details) && page) { ++ /* ++ * unmap_shared_mapping_pages() wants to ++ * invalidate cache without truncating: ++ * unmap shared but keep private pages. ++ */ ++ if (details->check_mapping && ++ details->check_mapping != page->mapping) ++ continue; ++ /* ++ * Each page->index must be checked when ++ * invalidating or truncating nonlinear. ++ */ ++ if (details->nonlinear_vma && ++ (page->index < details->first_index || ++ page->index > details->last_index)) ++ continue; ++ } ++ ptent = ptep_get_and_clear_full(mm, addr, pte, ++ tlb->fullmm); ++ tlb_remove_tlb_entry(tlb, pte, addr); ++ if (unlikely(!page)) ++ continue; ++ if (unlikely(details) && details->nonlinear_vma ++ && linear_page_index(details->nonlinear_vma, ++ addr) != page->index) ++ set_pte_at(mm, addr, pte, ++ pgoff_to_pte(page->index)); ++ if (PageAnon(page)) ++ anon_rss--; ++ else { ++ if (pte_dirty(ptent)) ++ set_page_dirty(page); ++ if (pte_young(ptent)) ++ SetPageReferenced(page); ++ file_rss--; ++ } ++ page_remove_rmap(page, vma); ++ tlb_remove_page(tlb, page); ++ continue; ++ } ++ /* ++ * If details->check_mapping, we leave swap entries; ++ * if details->nonlinear_vma, we leave file entries. ++ */ ++ if (unlikely(details)) ++ continue; ++ if (!pte_file(ptent)) ++ free_swap_and_cache(pte_to_swp_entry(ptent)); ++ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); ++ } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); ++ ++ add_mm_rss(mm, file_rss, anon_rss); ++ arch_leave_lazy_mmu_mode(); ++ pte_unmap_unlock(pte - 1, ptl); ++ ++ return addr; ++} ++ ++static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, ++ struct vm_area_struct *vma, pud_t *pud, ++ unsigned long addr, unsigned long end, ++ long *zap_work, struct zap_details *details) ++{ ++ pmd_t *pmd; ++ unsigned long next; ++ ++ pmd = pmd_offset(pud, addr); ++ do { ++ next = pmd_addr_end(addr, end); ++ if (pmd_none_or_clear_bad(pmd)) { ++ (*zap_work)--; ++ continue; ++ } ++ next = zap_pte_range(tlb, vma, pmd, addr, next, ++ zap_work, details); ++ } while (pmd++, addr = next, (addr != end && *zap_work > 0)); ++ ++ return addr; ++} ++ ++static inline unsigned long zap_pud_range(struct mmu_gather *tlb, ++ struct vm_area_struct *vma, pgd_t *pgd, ++ unsigned long addr, unsigned long end, ++ long *zap_work, struct zap_details *details) ++{ ++ pud_t *pud; ++ unsigned long next; ++ ++ pud = pud_offset(pgd, addr); ++ do { ++ next = pud_addr_end(addr, end); ++ if (pud_none_or_clear_bad(pud)) { ++ (*zap_work)--; ++ continue; ++ } ++ next = zap_pmd_range(tlb, vma, pud, addr, next, ++ zap_work, details); ++ } while (pud++, addr = next, (addr != end && *zap_work > 0)); ++ ++ return addr; ++} ++ ++static unsigned long unmap_page_range(struct mmu_gather *tlb, ++ struct vm_area_struct *vma, ++ unsigned long addr, unsigned long end, ++ long *zap_work, struct zap_details *details) ++{ ++ pgd_t *pgd; ++ unsigned long next; ++ ++ if (details && !details->check_mapping && !details->nonlinear_vma) ++ details = NULL; ++ ++ BUG_ON(addr >= end); ++ tlb_start_vma(tlb, vma); ++ pgd = pgd_offset(vma->vm_mm, addr); ++ do { ++ next = pgd_addr_end(addr, end); ++ if (pgd_none_or_clear_bad(pgd)) { ++ (*zap_work)--; ++ continue; ++ } ++ next = zap_pud_range(tlb, vma, pgd, addr, next, ++ zap_work, details); ++ } while (pgd++, addr = next, (addr != end && *zap_work > 0)); ++ tlb_end_vma(tlb, vma); ++ ++ return addr; ++} ++ ++#ifdef CONFIG_PREEMPT ++# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) ++#else ++/* No preempt: go for improved straight-line efficiency */ ++# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) ++#endif ++ ++/** ++ * unmap_vmas - unmap a range of memory covered by a list of vma's ++ * @tlbp: address of the caller's struct mmu_gather ++ * @vma: the starting vma ++ * @start_addr: virtual address at which to start unmapping ++ * @end_addr: virtual address at which to end unmapping ++ * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here ++ * @details: details of nonlinear truncation or shared cache invalidation ++ * ++ * Returns the end address of the unmapping (restart addr if interrupted). ++ * ++ * Unmap all pages in the vma list. ++ * ++ * We aim to not hold locks for too long (for scheduling latency reasons). ++ * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to ++ * return the ending mmu_gather to the caller. ++ * ++ * Only addresses between `start' and `end' will be unmapped. ++ * ++ * The VMA list must be sorted in ascending virtual address order. ++ * ++ * unmap_vmas() assumes that the caller will flush the whole unmapped address ++ * range after unmap_vmas() returns. So the only responsibility here is to ++ * ensure that any thus-far unmapped pages are flushed before unmap_vmas() ++ * drops the lock and schedules. ++ */ ++unsigned long unmap_vmas(struct mmu_gather **tlbp, ++ struct vm_area_struct *vma, unsigned long start_addr, ++ unsigned long end_addr, unsigned long *nr_accounted, ++ struct zap_details *details) ++{ ++ long zap_work = ZAP_BLOCK_SIZE; ++ unsigned long tlb_start = 0; /* For tlb_finish_mmu */ ++ int tlb_start_valid = 0; ++ unsigned long start = start_addr; ++ spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; ++ int fullmm = (*tlbp)->fullmm; ++ struct mm_struct *mm = vma->vm_mm; ++ ++ mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); ++ for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { ++ unsigned long end; ++ ++ start = max(vma->vm_start, start_addr); ++ if (start >= vma->vm_end) ++ continue; ++ end = min(vma->vm_end, end_addr); ++ if (end <= vma->vm_start) ++ continue; ++ ++ if (vma->vm_flags & VM_ACCOUNT) ++ *nr_accounted += (end - start) >> PAGE_SHIFT; ++ ++ while (start != end) { ++ if (!tlb_start_valid) { ++ tlb_start = start; ++ tlb_start_valid = 1; ++ } ++ ++ if (unlikely(is_vm_hugetlb_page(vma))) { ++ /* ++ * It is undesirable to test vma->vm_file as it ++ * should be non-null for valid hugetlb area. ++ * However, vm_file will be NULL in the error ++ * cleanup path of do_mmap_pgoff. When ++ * hugetlbfs ->mmap method fails, ++ * do_mmap_pgoff() nullifies vma->vm_file ++ * before calling this function to clean up. ++ * Since no pte has actually been setup, it is ++ * safe to do nothing in this case. ++ */ ++ if (vma->vm_file) { ++ unmap_hugepage_range(vma, start, end, NULL); ++ zap_work -= (end - start) / ++ pages_per_huge_page(hstate_vma(vma)); ++ } ++ ++ start = end; ++ } else ++ start = unmap_page_range(*tlbp, vma, ++ start, end, &zap_work, details); ++ ++ if (zap_work > 0) { ++ BUG_ON(start != end); ++ break; ++ } ++ ++ tlb_finish_mmu(*tlbp, tlb_start, start); ++ ++ if (need_resched() || ++ (i_mmap_lock && spin_needbreak(i_mmap_lock))) { ++ if (i_mmap_lock) { ++ *tlbp = NULL; ++ goto out; ++ } ++ cond_resched(); ++ } ++ ++ *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); ++ tlb_start_valid = 0; ++ zap_work = ZAP_BLOCK_SIZE; ++ } ++ } ++out: ++ mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); ++ return start; /* which is now the end (or restart) address */ ++} ++ ++/** ++ * zap_page_range - remove user pages in a given range ++ * @vma: vm_area_struct holding the applicable pages ++ * @address: starting address of pages to zap ++ * @size: number of bytes to zap ++ * @details: details of nonlinear truncation or shared cache invalidation ++ */ ++unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, ++ unsigned long size, struct zap_details *details) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ struct mmu_gather *tlb; ++ unsigned long end = address + size; ++ unsigned long nr_accounted = 0; ++ ++ lru_add_drain(); ++ tlb = tlb_gather_mmu(mm, 0); ++ update_hiwater_rss(mm); ++ end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); ++ if (tlb) ++ tlb_finish_mmu(tlb, address, end); ++ return end; ++} ++ ++/** ++ * zap_vma_ptes - remove ptes mapping the vma ++ * @vma: vm_area_struct holding ptes to be zapped ++ * @address: starting address of pages to zap ++ * @size: number of bytes to zap ++ * ++ * This function only unmaps ptes assigned to VM_PFNMAP vmas. ++ * ++ * The entire address range must be fully contained within the vma. ++ * ++ * Returns 0 if successful. ++ */ ++int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, ++ unsigned long size) ++{ ++ if (address < vma->vm_start || address + size > vma->vm_end || ++ !(vma->vm_flags & VM_PFNMAP)) ++ return -1; ++ zap_page_range(vma, address, size, NULL); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(zap_vma_ptes); ++ ++/* ++ * Do a quick page-table lookup for a single page. ++ */ ++struct page *follow_page(struct vm_area_struct *vma, unsigned long address, ++ unsigned int flags) ++{ ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep, pte; ++ spinlock_t *ptl; ++ struct page *page; ++ struct mm_struct *mm = vma->vm_mm; ++ ++ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); ++ if (!IS_ERR(page)) { ++ BUG_ON(flags & FOLL_GET); ++ goto out; ++ } ++ ++ page = NULL; ++ pgd = pgd_offset(mm, address); ++ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) ++ goto no_page_table; ++ ++ pud = pud_offset(pgd, address); ++ if (pud_none(*pud)) ++ goto no_page_table; ++ if (pud_huge(*pud)) { ++ BUG_ON(flags & FOLL_GET); ++ page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); ++ goto out; ++ } ++ if (unlikely(pud_bad(*pud))) ++ goto no_page_table; ++ ++ pmd = pmd_offset(pud, address); ++ if (pmd_none(*pmd)) ++ goto no_page_table; ++ if (pmd_huge(*pmd)) { ++ BUG_ON(flags & FOLL_GET); ++ page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); ++ goto out; ++ } ++ if (unlikely(pmd_bad(*pmd))) ++ goto no_page_table; ++ ++ ptep = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ++ pte = *ptep; ++ if (!pte_present(pte)) ++ goto no_page; ++ if ((flags & FOLL_WRITE) && !pte_write(pte)) ++ goto unlock; ++ page = vm_normal_page(vma, address, pte); ++ if (unlikely(!page)) ++ goto bad_page; ++ ++ if (flags & FOLL_GET) ++ get_page(page); ++ if (flags & FOLL_TOUCH) { ++ if ((flags & FOLL_WRITE) && ++ !pte_dirty(pte) && !PageDirty(page)) ++ set_page_dirty(page); ++ mark_page_accessed(page); ++ } ++unlock: ++ pte_unmap_unlock(ptep, ptl); ++out: ++ return page; ++ ++bad_page: ++ pte_unmap_unlock(ptep, ptl); ++ return ERR_PTR(-EFAULT); ++ ++no_page: ++ pte_unmap_unlock(ptep, ptl); ++ if (!pte_none(pte)) ++ return page; ++ /* Fall through to ZERO_PAGE handling */ ++no_page_table: ++ /* ++ * When core dumping an enormous anonymous area that nobody ++ * has touched so far, we don't want to allocate page tables. ++ */ ++ if (flags & FOLL_ANON) { ++ page = ZERO_PAGE(0); ++ if (flags & FOLL_GET) ++ get_page(page); ++ BUG_ON(flags & FOLL_WRITE); ++ } ++ return page; ++} ++ ++/* Can we do the FOLL_ANON optimization? */ ++static inline int use_zero_page(struct vm_area_struct *vma) ++{ ++ /* ++ * We don't want to optimize FOLL_ANON for make_pages_present() ++ * when it tries to page in a VM_LOCKED region. As to VM_SHARED, ++ * we want to get the page from the page tables to make sure ++ * that we serialize and update with any other user of that ++ * mapping. ++ */ ++ if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) ++ return 0; ++ /* ++ * And if we have a fault routine, it's not an anonymous region. ++ */ ++ return !vma->vm_ops || !vma->vm_ops->fault; ++} ++ ++int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ++ unsigned long start, int len, int write, int force, ++ struct page **pages, struct vm_area_struct **vmas) ++{ ++ int i; ++ unsigned int vm_flags; ++ ++ if (len <= 0) ++ return 0; ++ /* ++ * Require read or write permissions. ++ * If 'force' is set, we only require the "MAY" flags. ++ */ ++ vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); ++ vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); ++ i = 0; ++ ++ do { ++ struct vm_area_struct *vma; ++ unsigned int foll_flags; ++ ++ vma = find_extend_vma(mm, start); ++ if (!vma && in_gate_area(tsk, start)) { ++ unsigned long pg = start & PAGE_MASK; ++ struct vm_area_struct *gate_vma = get_gate_vma(tsk); ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *pte; ++ if (write) /* user gate pages are read-only */ ++ return i ? : -EFAULT; ++ if (pg > TASK_SIZE) ++ pgd = pgd_offset_k(pg); ++ else ++ pgd = pgd_offset_gate(mm, pg); ++ BUG_ON(pgd_none(*pgd)); ++ pud = pud_offset(pgd, pg); ++ BUG_ON(pud_none(*pud)); ++ pmd = pmd_offset(pud, pg); ++ if (pmd_none(*pmd)) ++ return i ? : -EFAULT; ++ pte = pte_offset_map(pmd, pg); ++ if (pte_none(*pte)) { ++ pte_unmap(pte); ++ return i ? : -EFAULT; ++ } ++ if (pages) { ++ struct page *page = vm_normal_page(gate_vma, start, *pte); ++ pages[i] = page; ++ if (page) ++ get_page(page); ++ } ++ pte_unmap(pte); ++ if (vmas) ++ vmas[i] = gate_vma; ++ i++; ++ start += PAGE_SIZE; ++ len--; ++ continue; ++ } ++ ++ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) ++ || !(vm_flags & vma->vm_flags)) ++ return i ? : -EFAULT; ++ ++ if (is_vm_hugetlb_page(vma)) { ++ i = follow_hugetlb_page(mm, vma, pages, vmas, ++ &start, &len, i, write); ++ continue; ++ } ++ ++ foll_flags = FOLL_TOUCH; ++ if (pages) ++ foll_flags |= FOLL_GET; ++ if (!write && use_zero_page(vma)) ++ foll_flags |= FOLL_ANON; ++ ++ do { ++ struct page *page; ++ ++ /* ++ * If tsk is ooming, cut off its access to large memory ++ * allocations. It has a pending SIGKILL, but it can't ++ * be processed until returning to user space. ++ */ ++ if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) ++ return i ? i : -ENOMEM; ++ ++ if (write) ++ foll_flags |= FOLL_WRITE; ++ ++ cond_resched(); ++ while (!(page = follow_page(vma, start, foll_flags))) { ++ int ret; ++ ret = handle_mm_fault(mm, vma, start, ++ foll_flags & FOLL_WRITE); ++ if (ret & VM_FAULT_ERROR) { ++ if (ret & VM_FAULT_OOM) ++ return i ? i : -ENOMEM; ++ else if (ret & VM_FAULT_SIGBUS) ++ return i ? i : -EFAULT; ++ BUG(); ++ } ++ if (ret & VM_FAULT_MAJOR) ++ tsk->maj_flt++; ++ else ++ tsk->min_flt++; ++ ++ /* ++ * The VM_FAULT_WRITE bit tells us that ++ * do_wp_page has broken COW when necessary, ++ * even if maybe_mkwrite decided not to set ++ * pte_write. We can thus safely do subsequent ++ * page lookups as if they were reads. ++ */ ++ if (ret & VM_FAULT_WRITE) ++ foll_flags &= ~FOLL_WRITE; ++ ++ cond_resched(); ++ } ++ if (IS_ERR(page)) ++ return i ? i : PTR_ERR(page); ++ if (pages) { ++ pages[i] = page; ++ ++ flush_anon_page(vma, page, start); ++ flush_dcache_page(page); ++ } ++ if (vmas) ++ vmas[i] = vma; ++ i++; ++ start += PAGE_SIZE; ++ len--; ++ } while (len && start < vma->vm_end); ++ } while (len); ++ return i; ++} ++EXPORT_SYMBOL(get_user_pages); ++ ++pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, ++ spinlock_t **ptl) ++{ ++ pgd_t * pgd = pgd_offset(mm, addr); ++ pud_t * pud = pud_alloc(mm, pgd, addr); ++ if (pud) { ++ pmd_t * pmd = pmd_alloc(mm, pud, addr); ++ if (pmd) ++ return pte_alloc_map_lock(mm, pmd, addr, ptl); ++ } ++ return NULL; ++} ++ ++/* ++ * This is the old fallback for page remapping. ++ * ++ * For historical reasons, it only allows reserved pages. Only ++ * old drivers should use this, and they needed to mark their ++ * pages reserved for the old functions anyway. ++ */ ++static int insert_page(struct vm_area_struct *vma, unsigned long addr, ++ struct page *page, pgprot_t prot) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ int retval; ++ pte_t *pte; ++ spinlock_t *ptl; ++ ++ retval = mem_cgroup_charge(page, mm, GFP_KERNEL); ++ if (retval) ++ goto out; ++ ++ retval = -EINVAL; ++ if (PageAnon(page)) ++ goto out_uncharge; ++ retval = -ENOMEM; ++ flush_dcache_page(page); ++ pte = get_locked_pte(mm, addr, &ptl); ++ if (!pte) ++ goto out_uncharge; ++ retval = -EBUSY; ++ if (!pte_none(*pte)) ++ goto out_unlock; ++ ++ /* Ok, finally just insert the thing.. */ ++ get_page(page); ++ inc_mm_counter(mm, file_rss); ++ page_add_file_rmap(page); ++ set_pte_at(mm, addr, pte, mk_pte(page, prot)); ++ ++ retval = 0; ++ pte_unmap_unlock(pte, ptl); ++ return retval; ++out_unlock: ++ pte_unmap_unlock(pte, ptl); ++out_uncharge: ++ mem_cgroup_uncharge_page(page); ++out: ++ return retval; ++} ++ ++/** ++ * vm_insert_page - insert single page into user vma ++ * @vma: user vma to map to ++ * @addr: target user address of this page ++ * @page: source kernel page ++ * ++ * This allows drivers to insert individual pages they've allocated ++ * into a user vma. ++ * ++ * The page has to be a nice clean _individual_ kernel allocation. ++ * If you allocate a compound page, you need to have marked it as ++ * such (__GFP_COMP), or manually just split the page up yourself ++ * (see split_page()). ++ * ++ * NOTE! Traditionally this was done with "remap_pfn_range()" which ++ * took an arbitrary page protection parameter. This doesn't allow ++ * that. Your vma protection will have to be set up correctly, which ++ * means that if you want a shared writable mapping, you'd better ++ * ask for a shared writable mapping! ++ * ++ * The page does not need to be reserved. ++ */ ++int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, ++ struct page *page) ++{ ++ if (addr < vma->vm_start || addr >= vma->vm_end) ++ return -EFAULT; ++ if (!page_count(page)) ++ return -EINVAL; ++ vma->vm_flags |= VM_INSERTPAGE; ++ return insert_page(vma, addr, page, vma->vm_page_prot); ++} ++EXPORT_SYMBOL(vm_insert_page); ++ ++static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, pgprot_t prot) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ int retval; ++ pte_t *pte, entry; ++ spinlock_t *ptl; ++ ++ retval = -ENOMEM; ++ pte = get_locked_pte(mm, addr, &ptl); ++ if (!pte) ++ goto out; ++ retval = -EBUSY; ++ if (!pte_none(*pte)) ++ goto out_unlock; ++ ++ /* Ok, finally just insert the thing.. */ ++ entry = pte_mkspecial(pfn_pte(pfn, prot)); ++ set_pte_at(mm, addr, pte, entry); ++ update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ ++ ++ retval = 0; ++out_unlock: ++ pte_unmap_unlock(pte, ptl); ++out: ++ return retval; ++} ++ ++/** ++ * vm_insert_pfn - insert single pfn into user vma ++ * @vma: user vma to map to ++ * @addr: target user address of this page ++ * @pfn: source kernel pfn ++ * ++ * Similar to vm_inert_page, this allows drivers to insert individual pages ++ * they've allocated into a user vma. Same comments apply. ++ * ++ * This function should only be called from a vm_ops->fault handler, and ++ * in that case the handler should return NULL. ++ * ++ * vma cannot be a COW mapping. ++ * ++ * As this is called only for pages that do not currently exist, we ++ * do not need to flush old virtual caches or the TLB. ++ */ ++int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn) ++{ ++ /* ++ * Technically, architectures with pte_special can avoid all these ++ * restrictions (same for remap_pfn_range). However we would like ++ * consistency in testing and feature parity among all, so we should ++ * try to keep these invariants in place for everybody. ++ */ ++ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); ++ BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == ++ (VM_PFNMAP|VM_MIXEDMAP)); ++ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); ++ BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); ++ ++ if (addr < vma->vm_start || addr >= vma->vm_end) ++ return -EFAULT; ++ return insert_pfn(vma, addr, pfn, vma->vm_page_prot); ++} ++EXPORT_SYMBOL(vm_insert_pfn); ++ ++int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn) ++{ ++ BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); ++ ++ if (addr < vma->vm_start || addr >= vma->vm_end) ++ return -EFAULT; ++ ++ /* ++ * If we don't have pte special, then we have to use the pfn_valid() ++ * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* ++ * refcount the page if pfn_valid is true (hence insert_page rather ++ * than insert_pfn). ++ */ ++ if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { ++ struct page *page; ++ ++ page = pfn_to_page(pfn); ++ return insert_page(vma, addr, page, vma->vm_page_prot); ++ } ++ return insert_pfn(vma, addr, pfn, vma->vm_page_prot); ++} ++EXPORT_SYMBOL(vm_insert_mixed); ++ ++/* ++ * maps a range of physical memory into the requested pages. the old ++ * mappings are removed. any references to nonexistent pages results ++ * in null mappings (currently treated as "copy-on-access") ++ */ ++static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, ++ unsigned long addr, unsigned long end, ++ unsigned long pfn, pgprot_t prot) ++{ ++ pte_t *pte; ++ spinlock_t *ptl; ++ ++ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); ++ if (!pte) ++ return -ENOMEM; ++ arch_enter_lazy_mmu_mode(); ++ do { ++ BUG_ON(!pte_none(*pte)); ++ set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); ++ pfn++; ++ } while (pte++, addr += PAGE_SIZE, addr != end); ++ arch_leave_lazy_mmu_mode(); ++ pte_unmap_unlock(pte - 1, ptl); ++ return 0; ++} ++ ++static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, ++ unsigned long addr, unsigned long end, ++ unsigned long pfn, pgprot_t prot) ++{ ++ pmd_t *pmd; ++ unsigned long next; ++ ++ pfn -= addr >> PAGE_SHIFT; ++ pmd = pmd_alloc(mm, pud, addr); ++ if (!pmd) ++ return -ENOMEM; ++ do { ++ next = pmd_addr_end(addr, end); ++ if (remap_pte_range(mm, pmd, addr, next, ++ pfn + (addr >> PAGE_SHIFT), prot)) ++ return -ENOMEM; ++ } while (pmd++, addr = next, addr != end); ++ return 0; ++} ++ ++static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, ++ unsigned long addr, unsigned long end, ++ unsigned long pfn, pgprot_t prot) ++{ ++ pud_t *pud; ++ unsigned long next; ++ ++ pfn -= addr >> PAGE_SHIFT; ++ pud = pud_alloc(mm, pgd, addr); ++ if (!pud) ++ return -ENOMEM; ++ do { ++ next = pud_addr_end(addr, end); ++ if (remap_pmd_range(mm, pud, addr, next, ++ pfn + (addr >> PAGE_SHIFT), prot)) ++ return -ENOMEM; ++ } while (pud++, addr = next, addr != end); ++ return 0; ++} ++ ++/** ++ * remap_pfn_range - remap kernel memory to userspace ++ * @vma: user vma to map to ++ * @addr: target user address to start at ++ * @pfn: physical address of kernel memory ++ * @size: size of map area ++ * @prot: page protection flags for this mapping ++ * ++ * Note: this is only safe if the mm semaphore is held when called. ++ */ ++int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, unsigned long size, pgprot_t prot) ++{ ++ pgd_t *pgd; ++ unsigned long next; ++ unsigned long end = addr + PAGE_ALIGN(size); ++ struct mm_struct *mm = vma->vm_mm; ++ int err; ++ ++ /* ++ * Physically remapped pages are special. Tell the ++ * rest of the world about it: ++ * VM_IO tells people not to look at these pages ++ * (accesses can have side effects). ++ * VM_RESERVED is specified all over the place, because ++ * in 2.4 it kept swapout's vma scan off this vma; but ++ * in 2.6 the LRU scan won't even find its pages, so this ++ * flag means no more than count its pages in reserved_vm, ++ * and omit it from core dump, even when VM_IO turned off. ++ * VM_PFNMAP tells the core MM that the base pages are just ++ * raw PFN mappings, and do not have a "struct page" associated ++ * with them. ++ * ++ * There's a horrible special case to handle copy-on-write ++ * behaviour that some programs depend on. We mark the "original" ++ * un-COW'ed pages by matching them up with "vma->vm_pgoff". ++ */ ++ if (is_cow_mapping(vma->vm_flags)) { ++ if (addr != vma->vm_start || end != vma->vm_end) ++ return -EINVAL; ++ vma->vm_pgoff = pfn; ++ } ++ ++ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; ++ ++ BUG_ON(addr >= end); ++ pfn -= addr >> PAGE_SHIFT; ++ pgd = pgd_offset(mm, addr); ++ flush_cache_range(vma, addr, end); ++ do { ++ next = pgd_addr_end(addr, end); ++ err = remap_pud_range(mm, pgd, addr, next, ++ pfn + (addr >> PAGE_SHIFT), prot); ++ if (err) ++ break; ++ } while (pgd++, addr = next, addr != end); ++ return err; ++} ++EXPORT_SYMBOL(remap_pfn_range); ++ ++static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, ++ unsigned long addr, unsigned long end, ++ pte_fn_t fn, void *data) ++{ ++ pte_t *pte; ++ int err; ++ pgtable_t token; ++ spinlock_t *uninitialized_var(ptl); ++ ++ pte = (mm == &init_mm) ? ++ pte_alloc_kernel(pmd, addr) : ++ pte_alloc_map_lock(mm, pmd, addr, &ptl); ++ if (!pte) ++ return -ENOMEM; ++ ++ BUG_ON(pmd_huge(*pmd)); ++ ++ token = pmd_pgtable(*pmd); ++ ++ do { ++ err = fn(pte, token, addr, data); ++ if (err) ++ break; ++ } while (pte++, addr += PAGE_SIZE, addr != end); ++ ++ if (mm != &init_mm) ++ pte_unmap_unlock(pte-1, ptl); ++ return err; ++} ++ ++static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, ++ unsigned long addr, unsigned long end, ++ pte_fn_t fn, void *data) ++{ ++ pmd_t *pmd; ++ unsigned long next; ++ int err; ++ ++ BUG_ON(pud_huge(*pud)); ++ ++ pmd = pmd_alloc(mm, pud, addr); ++ if (!pmd) ++ return -ENOMEM; ++ do { ++ next = pmd_addr_end(addr, end); ++ err = apply_to_pte_range(mm, pmd, addr, next, fn, data); ++ if (err) ++ break; ++ } while (pmd++, addr = next, addr != end); ++ return err; ++} ++ ++static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, ++ unsigned long addr, unsigned long end, ++ pte_fn_t fn, void *data) ++{ ++ pud_t *pud; ++ unsigned long next; ++ int err; ++ ++ pud = pud_alloc(mm, pgd, addr); ++ if (!pud) ++ return -ENOMEM; ++ do { ++ next = pud_addr_end(addr, end); ++ err = apply_to_pmd_range(mm, pud, addr, next, fn, data); ++ if (err) ++ break; ++ } while (pud++, addr = next, addr != end); ++ return err; ++} ++ ++/* ++ * Scan a region of virtual memory, filling in page tables as necessary ++ * and calling a provided function on each leaf page table. ++ */ ++int apply_to_page_range(struct mm_struct *mm, unsigned long addr, ++ unsigned long size, pte_fn_t fn, void *data) ++{ ++ pgd_t *pgd; ++ unsigned long next; ++ unsigned long start = addr, end = addr + size; ++ int err; ++ ++ BUG_ON(addr >= end); ++ mmu_notifier_invalidate_range_start(mm, start, end); ++ pgd = pgd_offset(mm, addr); ++ do { ++ next = pgd_addr_end(addr, end); ++ err = apply_to_pud_range(mm, pgd, addr, next, fn, data); ++ if (err) ++ break; ++ } while (pgd++, addr = next, addr != end); ++ mmu_notifier_invalidate_range_end(mm, start, end); ++ return err; ++} ++EXPORT_SYMBOL_GPL(apply_to_page_range); ++ ++/* ++ * handle_pte_fault chooses page fault handler according to an entry ++ * which was read non-atomically. Before making any commitment, on ++ * those architectures or configurations (e.g. i386 with PAE) which ++ * might give a mix of unmatched parts, do_swap_page and do_file_page ++ * must check under lock before unmapping the pte and proceeding ++ * (but do_wp_page is only called after already making such a check; ++ * and do_anonymous_page and do_no_page can safely check later on). ++ */ ++static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, ++ pte_t *page_table, pte_t orig_pte) ++{ ++ int same = 1; ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) ++ if (sizeof(pte_t) > sizeof(unsigned long)) { ++ spinlock_t *ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); ++ same = pte_same(*page_table, orig_pte); ++ spin_unlock(ptl); ++ } ++#endif ++ pte_unmap(page_table); ++ return same; ++} ++ ++/* ++ * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when ++ * servicing faults for write access. In the normal case, do always want ++ * pte_mkwrite. But get_user_pages can cause write faults for mappings ++ * that do not have writing enabled, when used by access_process_vm. ++ */ ++static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) ++{ ++ if (likely(vma->vm_flags & VM_WRITE)) ++ pte = pte_mkwrite(pte); ++ return pte; ++} ++ ++static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) ++{ ++ /* ++ * If the source page was a PFN mapping, we don't have ++ * a "struct page" for it. We do a best-effort copy by ++ * just copying from the original user address. If that ++ * fails, we just zero-fill it. Live with it. ++ */ ++ if (unlikely(!src)) { ++ void *kaddr = kmap_atomic(dst, KM_USER0); ++ void __user *uaddr = (void __user *)(va & PAGE_MASK); ++ ++ /* ++ * This really shouldn't fail, because the page is there ++ * in the page tables. But it might just be unreadable, ++ * in which case we just give up and fill the result with ++ * zeroes. ++ */ ++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) ++ memset(kaddr, 0, PAGE_SIZE); ++ kunmap_atomic(kaddr, KM_USER0); ++ flush_dcache_page(dst); ++ } else ++ copy_user_highpage(dst, src, va, vma); ++} ++ ++/* ++ * This routine handles present pages, when users try to write ++ * to a shared page. It is done by copying the page to a new address ++ * and decrementing the shared-page counter for the old page. ++ * ++ * Note that this routine assumes that the protection checks have been ++ * done by the caller (the low-level page fault routine in most cases). ++ * Thus we can safely just mark it writable once we've done any necessary ++ * COW. ++ * ++ * We also mark the page dirty at this point even though the page will ++ * change only once the write actually happens. This avoids a few races, ++ * and potentially makes it more efficient. ++ * ++ * We enter with non-exclusive mmap_sem (to exclude vma changes, ++ * but allow concurrent faults), with pte both mapped and locked. ++ * We return with mmap_sem still held, but pte unmapped and unlocked. ++ */ ++static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ++ unsigned long address, pte_t *page_table, pmd_t *pmd, ++ spinlock_t *ptl, pte_t orig_pte) ++{ ++ struct page *old_page, *new_page; ++ pte_t entry; ++ int reuse = 0, ret = 0; ++ int page_mkwrite = 0; ++ struct page *dirty_page = NULL; ++ ++ old_page = vm_normal_page(vma, address, orig_pte); ++ if (!old_page) { ++ /* ++ * VM_MIXEDMAP !pfn_valid() case ++ * ++ * We should not cow pages in a shared writeable mapping. ++ * Just mark the pages writable as we can't do any dirty ++ * accounting on raw pfn maps. ++ */ ++ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == ++ (VM_WRITE|VM_SHARED)) ++ goto reuse; ++ goto gotten; ++ } ++ ++ /* ++ * Take out anonymous pages first, anonymous shared vmas are ++ * not dirty accountable. ++ */ ++ if (PageAnon(old_page)) { ++ if (trylock_page(old_page)) { ++ reuse = can_share_swap_page(old_page); ++ unlock_page(old_page); ++ } ++ } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == ++ (VM_WRITE|VM_SHARED))) { ++ /* ++ * Only catch write-faults on shared writable pages, ++ * read-only shared pages can get COWed by ++ * get_user_pages(.write=1, .force=1). ++ */ ++ if (vma->vm_ops && vma->vm_ops->page_mkwrite) { ++ /* ++ * Notify the address space that the page is about to ++ * become writable so that it can prohibit this or wait ++ * for the page to get into an appropriate state. ++ * ++ * We do this without the lock held, so that it can ++ * sleep if it needs to. ++ */ ++ page_cache_get(old_page); ++ pte_unmap_unlock(page_table, ptl); ++ ++ if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) ++ goto unwritable_page; ++ ++ /* ++ * Since we dropped the lock we need to revalidate ++ * the PTE as someone else may have changed it. If ++ * they did, we just return, as we can count on the ++ * MMU to tell us if they didn't also make it writable. ++ */ ++ page_table = pte_offset_map_lock(mm, pmd, address, ++ &ptl); ++ page_cache_release(old_page); ++ if (!pte_same(*page_table, orig_pte)) ++ goto unlock; ++ ++ page_mkwrite = 1; ++ } ++ dirty_page = old_page; ++ get_page(dirty_page); ++ reuse = 1; ++ } ++ ++ if (reuse) { ++reuse: ++ flush_cache_page(vma, address, pte_pfn(orig_pte)); ++ entry = pte_mkyoung(orig_pte); ++ entry = maybe_mkwrite(pte_mkdirty(entry), vma); ++ if (ptep_set_access_flags(vma, address, page_table, entry,1)) ++ update_mmu_cache(vma, address, entry); ++ ret |= VM_FAULT_WRITE; ++ goto unlock; ++ } ++ ++ /* ++ * Ok, we need to copy. Oh, well.. ++ */ ++ page_cache_get(old_page); ++gotten: ++ pte_unmap_unlock(page_table, ptl); ++ ++ if (unlikely(anon_vma_prepare(vma))) ++ goto oom; ++ VM_BUG_ON(old_page == ZERO_PAGE(0)); ++ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); ++ if (!new_page) ++ goto oom; ++ cow_user_page(new_page, old_page, address, vma); ++ __SetPageUptodate(new_page); ++ ++ if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) ++ goto oom_free_new; ++ ++ /* ++ * Re-check the pte - we dropped the lock ++ */ ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (likely(pte_same(*page_table, orig_pte))) { ++ if (old_page) { ++ if (!PageAnon(old_page)) { ++ dec_mm_counter(mm, file_rss); ++ inc_mm_counter(mm, anon_rss); ++ } ++ } else ++ inc_mm_counter(mm, anon_rss); ++ flush_cache_page(vma, address, pte_pfn(orig_pte)); ++ entry = mk_pte(new_page, vma->vm_page_prot); ++ entry = maybe_mkwrite(pte_mkdirty(entry), vma); ++ /* ++ * Clear the pte entry and flush it first, before updating the ++ * pte with the new entry. This will avoid a race condition ++ * seen in the presence of one thread doing SMC and another ++ * thread doing COW. ++ */ ++ ptep_clear_flush_notify(vma, address, page_table); ++ set_pte_at(mm, address, page_table, entry); ++ update_mmu_cache(vma, address, entry); ++ lru_cache_add_active(new_page); ++ page_add_new_anon_rmap(new_page, vma, address); ++ ++ if (old_page) { ++ /* ++ * Only after switching the pte to the new page may ++ * we remove the mapcount here. Otherwise another ++ * process may come and find the rmap count decremented ++ * before the pte is switched to the new page, and ++ * "reuse" the old page writing into it while our pte ++ * here still points into it and can be read by other ++ * threads. ++ * ++ * The critical issue is to order this ++ * page_remove_rmap with the ptp_clear_flush above. ++ * Those stores are ordered by (if nothing else,) ++ * the barrier present in the atomic_add_negative ++ * in page_remove_rmap. ++ * ++ * Then the TLB flush in ptep_clear_flush ensures that ++ * no process can access the old page before the ++ * decremented mapcount is visible. And the old page ++ * cannot be reused until after the decremented ++ * mapcount is visible. So transitively, TLBs to ++ * old page will be flushed before it can be reused. ++ */ ++ page_remove_rmap(old_page, vma); ++ } ++ ++ /* Free the old page.. */ ++ new_page = old_page; ++ ret |= VM_FAULT_WRITE; ++ } else ++ mem_cgroup_uncharge_page(new_page); ++ ++ if (new_page) ++ page_cache_release(new_page); ++ if (old_page) ++ page_cache_release(old_page); ++unlock: ++ pte_unmap_unlock(page_table, ptl); ++ if (dirty_page) { ++ if (vma->vm_file) ++ file_update_time(vma->vm_file); ++ ++ /* ++ * Yes, Virginia, this is actually required to prevent a race ++ * with clear_page_dirty_for_io() from clearing the page dirty ++ * bit after it clear all dirty ptes, but before a racing ++ * do_wp_page installs a dirty pte. ++ * ++ * do_no_page is protected similarly. ++ */ ++ wait_on_page_locked(dirty_page); ++ set_page_dirty_balance(dirty_page, page_mkwrite); ++ put_page(dirty_page); ++ } ++ return ret; ++oom_free_new: ++ page_cache_release(new_page); ++oom: ++ if (old_page) ++ page_cache_release(old_page); ++ return VM_FAULT_OOM; ++ ++unwritable_page: ++ page_cache_release(old_page); ++ return VM_FAULT_SIGBUS; ++} ++ ++/* ++ * Helper functions for unmap_mapping_range(). ++ * ++ * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ ++ * ++ * We have to restart searching the prio_tree whenever we drop the lock, ++ * since the iterator is only valid while the lock is held, and anyway ++ * a later vma might be split and reinserted earlier while lock dropped. ++ * ++ * The list of nonlinear vmas could be handled more efficiently, using ++ * a placeholder, but handle it in the same way until a need is shown. ++ * It is important to search the prio_tree before nonlinear list: a vma ++ * may become nonlinear and be shifted from prio_tree to nonlinear list ++ * while the lock is dropped; but never shifted from list to prio_tree. ++ * ++ * In order to make forward progress despite restarting the search, ++ * vm_truncate_count is used to mark a vma as now dealt with, so we can ++ * quickly skip it next time around. Since the prio_tree search only ++ * shows us those vmas affected by unmapping the range in question, we ++ * can't efficiently keep all vmas in step with mapping->truncate_count: ++ * so instead reset them all whenever it wraps back to 0 (then go to 1). ++ * mapping->truncate_count and vma->vm_truncate_count are protected by ++ * i_mmap_lock. ++ * ++ * In order to make forward progress despite repeatedly restarting some ++ * large vma, note the restart_addr from unmap_vmas when it breaks out: ++ * and restart from that address when we reach that vma again. It might ++ * have been split or merged, shrunk or extended, but never shifted: so ++ * restart_addr remains valid so long as it remains in the vma's range. ++ * unmap_mapping_range forces truncate_count to leap over page-aligned ++ * values so we can save vma's restart_addr in its truncate_count field. ++ */ ++#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) ++ ++static void reset_vma_truncate_counts(struct address_space *mapping) ++{ ++ struct vm_area_struct *vma; ++ struct prio_tree_iter iter; ++ ++ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) ++ vma->vm_truncate_count = 0; ++ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) ++ vma->vm_truncate_count = 0; ++} ++ ++static int unmap_mapping_range_vma(struct vm_area_struct *vma, ++ unsigned long start_addr, unsigned long end_addr, ++ struct zap_details *details) ++{ ++ unsigned long restart_addr; ++ int need_break; ++ ++ /* ++ * files that support invalidating or truncating portions of the ++ * file from under mmaped areas must have their ->fault function ++ * return a locked page (and set VM_FAULT_LOCKED in the return). ++ * This provides synchronisation against concurrent unmapping here. ++ */ ++ ++again: ++ restart_addr = vma->vm_truncate_count; ++ if (is_restart_addr(restart_addr) && start_addr < restart_addr) { ++ start_addr = restart_addr; ++ if (start_addr >= end_addr) { ++ /* Top of vma has been split off since last time */ ++ vma->vm_truncate_count = details->truncate_count; ++ return 0; ++ } ++ } ++ ++ restart_addr = zap_page_range(vma, start_addr, ++ end_addr - start_addr, details); ++ need_break = need_resched() || spin_needbreak(details->i_mmap_lock); ++ ++ if (restart_addr >= end_addr) { ++ /* We have now completed this vma: mark it so */ ++ vma->vm_truncate_count = details->truncate_count; ++ if (!need_break) ++ return 0; ++ } else { ++ /* Note restart_addr in vma's truncate_count field */ ++ vma->vm_truncate_count = restart_addr; ++ if (!need_break) ++ goto again; ++ } ++ ++ spin_unlock(details->i_mmap_lock); ++ cond_resched(); ++ spin_lock(details->i_mmap_lock); ++ return -EINTR; ++} ++ ++static inline void unmap_mapping_range_tree(struct prio_tree_root *root, ++ struct zap_details *details) ++{ ++ struct vm_area_struct *vma; ++ struct prio_tree_iter iter; ++ pgoff_t vba, vea, zba, zea; ++ ++restart: ++ vma_prio_tree_foreach(vma, &iter, root, ++ details->first_index, details->last_index) { ++ /* Skip quickly over those we have already dealt with */ ++ if (vma->vm_truncate_count == details->truncate_count) ++ continue; ++ ++ vba = vma->vm_pgoff; ++ vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; ++ /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ ++ zba = details->first_index; ++ if (zba < vba) ++ zba = vba; ++ zea = details->last_index; ++ if (zea > vea) ++ zea = vea; ++ ++ if (unmap_mapping_range_vma(vma, ++ ((zba - vba) << PAGE_SHIFT) + vma->vm_start, ++ ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, ++ details) < 0) ++ goto restart; ++ } ++} ++ ++static inline void unmap_mapping_range_list(struct list_head *head, ++ struct zap_details *details) ++{ ++ struct vm_area_struct *vma; ++ ++ /* ++ * In nonlinear VMAs there is no correspondence between virtual address ++ * offset and file offset. So we must perform an exhaustive search ++ * across *all* the pages in each nonlinear VMA, not just the pages ++ * whose virtual address lies outside the file truncation point. ++ */ ++restart: ++ list_for_each_entry(vma, head, shared.vm_set.list) { ++ /* Skip quickly over those we have already dealt with */ ++ if (vma->vm_truncate_count == details->truncate_count) ++ continue; ++ details->nonlinear_vma = vma; ++ if (unmap_mapping_range_vma(vma, vma->vm_start, ++ vma->vm_end, details) < 0) ++ goto restart; ++ } ++} ++ ++/** ++ * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. ++ * @mapping: the address space containing mmaps to be unmapped. ++ * @holebegin: byte in first page to unmap, relative to the start of ++ * the underlying file. This will be rounded down to a PAGE_SIZE ++ * boundary. Note that this is different from vmtruncate(), which ++ * must keep the partial page. In contrast, we must get rid of ++ * partial pages. ++ * @holelen: size of prospective hole in bytes. This will be rounded ++ * up to a PAGE_SIZE boundary. A holelen of zero truncates to the ++ * end of the file. ++ * @even_cows: 1 when truncating a file, unmap even private COWed pages; ++ * but 0 when invalidating pagecache, don't throw away private data. ++ */ ++void unmap_mapping_range(struct address_space *mapping, ++ loff_t const holebegin, loff_t const holelen, int even_cows) ++{ ++ struct zap_details details; ++ pgoff_t hba = holebegin >> PAGE_SHIFT; ++ pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ ++ /* Check for overflow. */ ++ if (sizeof(holelen) > sizeof(hlen)) { ++ long long holeend = ++ (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ if (holeend & ~(long long)ULONG_MAX) ++ hlen = ULONG_MAX - hba + 1; ++ } ++ ++ details.check_mapping = even_cows? NULL: mapping; ++ details.nonlinear_vma = NULL; ++ details.first_index = hba; ++ details.last_index = hba + hlen - 1; ++ if (details.last_index < details.first_index) ++ details.last_index = ULONG_MAX; ++ details.i_mmap_lock = &mapping->i_mmap_lock; ++ ++ spin_lock(&mapping->i_mmap_lock); ++ ++ /* Protect against endless unmapping loops */ ++ mapping->truncate_count++; ++ if (unlikely(is_restart_addr(mapping->truncate_count))) { ++ if (mapping->truncate_count == 0) ++ reset_vma_truncate_counts(mapping); ++ mapping->truncate_count++; ++ } ++ details.truncate_count = mapping->truncate_count; ++ ++ if (unlikely(!prio_tree_empty(&mapping->i_mmap))) ++ unmap_mapping_range_tree(&mapping->i_mmap, &details); ++ if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) ++ unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); ++ spin_unlock(&mapping->i_mmap_lock); ++} ++EXPORT_SYMBOL(unmap_mapping_range); ++ ++/** ++ * vmtruncate - unmap mappings "freed" by truncate() syscall ++ * @inode: inode of the file used ++ * @offset: file offset to start truncating ++ * ++ * NOTE! We have to be ready to update the memory sharing ++ * between the file and the memory map for a potential last ++ * incomplete page. Ugly, but necessary. ++ */ ++int vmtruncate(struct inode * inode, loff_t offset) ++{ ++ if (inode->i_size < offset) { ++ unsigned long limit; ++ ++ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; ++ if (limit != RLIM_INFINITY && offset > limit) ++ goto out_sig; ++ if (offset > inode->i_sb->s_maxbytes) ++ goto out_big; ++ i_size_write(inode, offset); ++ } else { ++ struct address_space *mapping = inode->i_mapping; ++ ++ /* ++ * truncation of in-use swapfiles is disallowed - it would ++ * cause subsequent swapout to scribble on the now-freed ++ * blocks. ++ */ ++ if (IS_SWAPFILE(inode)) ++ return -ETXTBSY; ++ i_size_write(inode, offset); ++ ++ /* ++ * unmap_mapping_range is called twice, first simply for ++ * efficiency so that truncate_inode_pages does fewer ++ * single-page unmaps. However after this first call, and ++ * before truncate_inode_pages finishes, it is possible for ++ * private pages to be COWed, which remain after ++ * truncate_inode_pages finishes, hence the second ++ * unmap_mapping_range call must be made for correctness. ++ */ ++ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); ++ truncate_inode_pages(mapping, offset); ++ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); ++ } ++ ++ if (inode->i_op && inode->i_op->truncate) ++ inode->i_op->truncate(inode); ++ return 0; ++ ++out_sig: ++ send_sig(SIGXFSZ, current, 0); ++out_big: ++ return -EFBIG; ++} ++EXPORT_SYMBOL(vmtruncate); ++ ++int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) ++{ ++ struct address_space *mapping = inode->i_mapping; ++ ++ /* ++ * If the underlying filesystem is not going to provide ++ * a way to truncate a range of blocks (punch a hole) - ++ * we should return failure right now. ++ */ ++ if (!inode->i_op || !inode->i_op->truncate_range) ++ return -ENOSYS; ++ ++ mutex_lock(&inode->i_mutex); ++ down_write(&inode->i_alloc_sem); ++ unmap_mapping_range(mapping, offset, (end - offset), 1); ++ truncate_inode_pages_range(mapping, offset, end); ++ unmap_mapping_range(mapping, offset, (end - offset), 1); ++ inode->i_op->truncate_range(inode, offset, end); ++ up_write(&inode->i_alloc_sem); ++ mutex_unlock(&inode->i_mutex); ++ ++ return 0; ++} ++ ++/* ++ * We enter with non-exclusive mmap_sem (to exclude vma changes, ++ * but allow concurrent faults), and pte mapped but not yet locked. ++ * We return with mmap_sem still held, but pte unmapped and unlocked. ++ */ ++static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ++ unsigned long address, pte_t *page_table, pmd_t *pmd, ++ int write_access, pte_t orig_pte) ++{ ++ spinlock_t *ptl; ++ struct page *page; ++ swp_entry_t entry; ++ pte_t pte; ++ int ret = 0; ++ ++ if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) ++ goto out; ++ ++ entry = pte_to_swp_entry(orig_pte); ++ if (is_migration_entry(entry)) { ++ migration_entry_wait(mm, pmd, address); ++ goto out; ++ } ++ delayacct_set_flag(DELAYACCT_PF_SWAPIN); ++ page = lookup_swap_cache(entry); ++ if (!page) { ++ grab_swap_token(); /* Contend for token _before_ read-in */ ++ page = swapin_readahead(entry, ++ GFP_HIGHUSER_MOVABLE, vma, address); ++ if (!page) { ++ /* ++ * Back out if somebody else faulted in this pte ++ * while we released the pte lock. ++ */ ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (likely(pte_same(*page_table, orig_pte))) ++ ret = VM_FAULT_OOM; ++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN); ++ goto unlock; ++ } ++ ++ /* Had to read the page from swap area: Major fault */ ++ ret = VM_FAULT_MAJOR; ++ count_vm_event(PGMAJFAULT); ++ } ++ ++ if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { ++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN); ++ ret = VM_FAULT_OOM; ++ goto out; ++ } ++ ++ if (!vx_rss_avail(mm, 1)) { ++ ret = VM_FAULT_OOM; ++ goto out; ++ } ++ ++ mark_page_accessed(page); ++ lock_page(page); ++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN); ++ ++ /* ++ * Back out if somebody else already faulted in this pte. ++ */ ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (unlikely(!pte_same(*page_table, orig_pte))) ++ goto out_nomap; ++ ++ if (unlikely(!PageUptodate(page))) { ++ ret = VM_FAULT_SIGBUS; ++ goto out_nomap; ++ } ++ ++ /* The page isn't present yet, go ahead with the fault. */ ++ ++ inc_mm_counter(mm, anon_rss); ++ pte = mk_pte(page, vma->vm_page_prot); ++ if (write_access && can_share_swap_page(page)) { ++ pte = maybe_mkwrite(pte_mkdirty(pte), vma); ++ write_access = 0; ++ } ++ ++ flush_icache_page(vma, page); ++ set_pte_at(mm, address, page_table, pte); ++ page_add_anon_rmap(page, vma, address); ++ ++ swap_free(entry); ++ if (vm_swap_full()) ++ remove_exclusive_swap_page(page); ++ unlock_page(page); ++ ++ if (write_access) { ++ ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); ++ if (ret & VM_FAULT_ERROR) ++ ret &= VM_FAULT_ERROR; ++ goto out; ++ } ++ ++ /* No need to invalidate - it was non-present before */ ++ update_mmu_cache(vma, address, pte); ++unlock: ++ pte_unmap_unlock(page_table, ptl); ++out: ++ return ret; ++out_nomap: ++ mem_cgroup_uncharge_page(page); ++ pte_unmap_unlock(page_table, ptl); ++ unlock_page(page); ++ page_cache_release(page); ++ return ret; ++} ++ ++/* ++ * We enter with non-exclusive mmap_sem (to exclude vma changes, ++ * but allow concurrent faults), and pte mapped but not yet locked. ++ * We return with mmap_sem still held, but pte unmapped and unlocked. ++ */ ++static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ++ unsigned long address, pte_t *page_table, pmd_t *pmd, ++ int write_access) ++{ ++ struct page *page; ++ spinlock_t *ptl; ++ pte_t entry; ++ ++ /* Allocate our own private page. */ ++ pte_unmap(page_table); ++ ++ if (!vx_rss_avail(mm, 1)) ++ goto oom; ++ if (unlikely(anon_vma_prepare(vma))) ++ goto oom; ++ page = alloc_zeroed_user_highpage_movable(vma, address); ++ if (!page) ++ goto oom; ++ __SetPageUptodate(page); ++ ++ if (mem_cgroup_charge(page, mm, GFP_KERNEL)) ++ goto oom_free_page; ++ ++ entry = mk_pte(page, vma->vm_page_prot); ++ entry = maybe_mkwrite(pte_mkdirty(entry), vma); ++ ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (!pte_none(*page_table)) ++ goto release; ++ inc_mm_counter(mm, anon_rss); ++ lru_cache_add_active(page); ++ page_add_new_anon_rmap(page, vma, address); ++ set_pte_at(mm, address, page_table, entry); ++ ++ /* No need to invalidate - it was non-present before */ ++ update_mmu_cache(vma, address, entry); ++unlock: ++ pte_unmap_unlock(page_table, ptl); ++ return 0; ++release: ++ mem_cgroup_uncharge_page(page); ++ page_cache_release(page); ++ goto unlock; ++oom_free_page: ++ page_cache_release(page); ++oom: ++ return VM_FAULT_OOM; ++} ++ ++/* ++ * __do_fault() tries to create a new page mapping. It aggressively ++ * tries to share with existing pages, but makes a separate copy if ++ * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid ++ * the next page fault. ++ * ++ * As this is called only for pages that do not currently exist, we ++ * do not need to flush old virtual caches or the TLB. ++ * ++ * We enter with non-exclusive mmap_sem (to exclude vma changes, ++ * but allow concurrent faults), and pte neither mapped nor locked. ++ * We return with mmap_sem still held, but pte unmapped and unlocked. ++ */ ++static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ++ unsigned long address, pmd_t *pmd, ++ pgoff_t pgoff, unsigned int flags, pte_t orig_pte) ++{ ++ pte_t *page_table; ++ spinlock_t *ptl; ++ struct page *page; ++ pte_t entry; ++ int anon = 0; ++ struct page *dirty_page = NULL; ++ struct vm_fault vmf; ++ int ret; ++ int page_mkwrite = 0; ++ ++ vmf.virtual_address = (void __user *)(address & PAGE_MASK); ++ vmf.pgoff = pgoff; ++ vmf.flags = flags; ++ vmf.page = NULL; ++ ++ ret = vma->vm_ops->fault(vma, &vmf); ++ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) ++ return ret; ++ ++ /* ++ * For consistency in subsequent calls, make the faulted page always ++ * locked. ++ */ ++ if (unlikely(!(ret & VM_FAULT_LOCKED))) ++ lock_page(vmf.page); ++ else ++ VM_BUG_ON(!PageLocked(vmf.page)); ++ ++ /* ++ * Should we do an early C-O-W break? ++ */ ++ page = vmf.page; ++ if (flags & FAULT_FLAG_WRITE) { ++ if (!(vma->vm_flags & VM_SHARED)) { ++ anon = 1; ++ if (unlikely(anon_vma_prepare(vma))) { ++ ret = VM_FAULT_OOM; ++ goto out; ++ } ++ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, ++ vma, address); ++ if (!page) { ++ ret = VM_FAULT_OOM; ++ goto out; ++ } ++ copy_user_highpage(page, vmf.page, address, vma); ++ __SetPageUptodate(page); ++ } else { ++ /* ++ * If the page will be shareable, see if the backing ++ * address space wants to know that the page is about ++ * to become writable ++ */ ++ if (vma->vm_ops->page_mkwrite) { ++ unlock_page(page); ++ if (vma->vm_ops->page_mkwrite(vma, page) < 0) { ++ ret = VM_FAULT_SIGBUS; ++ anon = 1; /* no anon but release vmf.page */ ++ goto out_unlocked; ++ } ++ lock_page(page); ++ /* ++ * XXX: this is not quite right (racy vs ++ * invalidate) to unlock and relock the page ++ * like this, however a better fix requires ++ * reworking page_mkwrite locking API, which ++ * is better done later. ++ */ ++ if (!page->mapping) { ++ ret = 0; ++ anon = 1; /* no anon but release vmf.page */ ++ goto out; ++ } ++ page_mkwrite = 1; ++ } ++ } ++ ++ } ++ ++ if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { ++ ret = VM_FAULT_OOM; ++ goto out; ++ } ++ ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ++ /* ++ * This silly early PAGE_DIRTY setting removes a race ++ * due to the bad i386 page protection. But it's valid ++ * for other architectures too. ++ * ++ * Note that if write_access is true, we either now have ++ * an exclusive copy of the page, or this is a shared mapping, ++ * so we can make it writable and dirty to avoid having to ++ * handle that later. ++ */ ++ /* Only go through if we didn't race with anybody else... */ ++ if (likely(pte_same(*page_table, orig_pte))) { ++ flush_icache_page(vma, page); ++ entry = mk_pte(page, vma->vm_page_prot); ++ if (flags & FAULT_FLAG_WRITE) ++ entry = maybe_mkwrite(pte_mkdirty(entry), vma); ++ set_pte_at(mm, address, page_table, entry); ++ if (anon) { ++ inc_mm_counter(mm, anon_rss); ++ lru_cache_add_active(page); ++ page_add_new_anon_rmap(page, vma, address); ++ } else { ++ inc_mm_counter(mm, file_rss); ++ page_add_file_rmap(page); ++ if (flags & FAULT_FLAG_WRITE) { ++ dirty_page = page; ++ get_page(dirty_page); ++ } ++ } ++ ++ /* no need to invalidate: a not-present page won't be cached */ ++ update_mmu_cache(vma, address, entry); ++ } else { ++ mem_cgroup_uncharge_page(page); ++ if (anon) ++ page_cache_release(page); ++ else ++ anon = 1; /* no anon but release faulted_page */ ++ } ++ ++ pte_unmap_unlock(page_table, ptl); ++ ++out: ++ unlock_page(vmf.page); ++out_unlocked: ++ if (anon) ++ page_cache_release(vmf.page); ++ else if (dirty_page) { ++ if (vma->vm_file) ++ file_update_time(vma->vm_file); ++ ++ set_page_dirty_balance(dirty_page, page_mkwrite); ++ put_page(dirty_page); ++ } ++ ++ return ret; ++} ++ ++static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, ++ unsigned long address, pte_t *page_table, pmd_t *pmd, ++ int write_access, pte_t orig_pte) ++{ ++ pgoff_t pgoff = (((address & PAGE_MASK) ++ - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; ++ unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); ++ ++ pte_unmap(page_table); ++ return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); ++} ++ ++/* ++ * Fault of a previously existing named mapping. Repopulate the pte ++ * from the encoded file_pte if possible. This enables swappable ++ * nonlinear vmas. ++ * ++ * We enter with non-exclusive mmap_sem (to exclude vma changes, ++ * but allow concurrent faults), and pte mapped but not yet locked. ++ * We return with mmap_sem still held, but pte unmapped and unlocked. ++ */ ++static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, ++ unsigned long address, pte_t *page_table, pmd_t *pmd, ++ int write_access, pte_t orig_pte) ++{ ++ unsigned int flags = FAULT_FLAG_NONLINEAR | ++ (write_access ? FAULT_FLAG_WRITE : 0); ++ pgoff_t pgoff; ++ ++ if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) ++ return 0; ++ ++ if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || ++ !(vma->vm_flags & VM_CAN_NONLINEAR))) { ++ /* ++ * Page table corrupted: show pte and kill process. ++ */ ++ print_bad_pte(vma, orig_pte, address); ++ return VM_FAULT_OOM; ++ } ++ ++ pgoff = pte_to_pgoff(orig_pte); ++ return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); ++} ++ ++/* ++ * These routines also need to handle stuff like marking pages dirty ++ * and/or accessed for architectures that don't do it in hardware (most ++ * RISC architectures). The early dirtying is also good on the i386. ++ * ++ * There is also a hook called "update_mmu_cache()" that architectures ++ * with external mmu caches can use to update those (ie the Sparc or ++ * PowerPC hashed page tables that act as extended TLBs). ++ * ++ * We enter with non-exclusive mmap_sem (to exclude vma changes, ++ * but allow concurrent faults), and pte mapped but not yet locked. ++ * We return with mmap_sem still held, but pte unmapped and unlocked. ++ */ ++static inline int handle_pte_fault(struct mm_struct *mm, ++ struct vm_area_struct *vma, unsigned long address, ++ pte_t *pte, pmd_t *pmd, int write_access) ++{ ++ pte_t entry; ++ spinlock_t *ptl; ++ int ret = 0, type = VXPT_UNKNOWN; ++ ++ entry = *pte; ++ if (!pte_present(entry)) { ++ if (pte_none(entry)) { ++ if (vma->vm_ops) { ++ if (likely(vma->vm_ops->fault)) ++ return do_linear_fault(mm, vma, address, ++ pte, pmd, write_access, entry); ++ } ++ return do_anonymous_page(mm, vma, address, ++ pte, pmd, write_access); ++ } ++ if (pte_file(entry)) ++ return do_nonlinear_fault(mm, vma, address, ++ pte, pmd, write_access, entry); ++ return do_swap_page(mm, vma, address, ++ pte, pmd, write_access, entry); ++ } ++ ++ ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); ++ if (unlikely(!pte_same(*pte, entry))) ++ goto unlock; ++ if (write_access) { ++ if (!pte_write(entry)) { ++ ret = do_wp_page(mm, vma, address, ++ pte, pmd, ptl, entry); ++ type = VXPT_WRITE; ++ goto out; ++ } ++ entry = pte_mkdirty(entry); ++ } ++ entry = pte_mkyoung(entry); ++ if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { ++ update_mmu_cache(vma, address, entry); ++ } else { ++ /* ++ * This is needed only for protection faults but the arch code ++ * is not yet telling us if this is a protection fault or not. ++ * This still avoids useless tlb flushes for .text page faults ++ * with threads. ++ */ ++ if (write_access) ++ flush_tlb_page(vma, address); ++ } ++unlock: ++ pte_unmap_unlock(pte, ptl); ++ ret = 0; ++out: ++ vx_page_fault(mm, vma, type, ret); ++ return ret; ++} ++ ++/* ++ * By the time we get here, we already hold the mm semaphore ++ */ ++int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, ++ unsigned long address, int write_access) ++{ ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *pte; ++ ++ __set_current_state(TASK_RUNNING); ++ ++ count_vm_event(PGFAULT); ++ ++ if (unlikely(is_vm_hugetlb_page(vma))) ++ return hugetlb_fault(mm, vma, address, write_access); ++ ++ pgd = pgd_offset(mm, address); ++ pud = pud_alloc(mm, pgd, address); ++ if (!pud) ++ return VM_FAULT_OOM; ++ pmd = pmd_alloc(mm, pud, address); ++ if (!pmd) ++ return VM_FAULT_OOM; ++ pte = pte_alloc_map(mm, pmd, address); ++ if (!pte) ++ return VM_FAULT_OOM; ++ ++ return handle_pte_fault(mm, vma, address, pte, pmd, write_access); ++} ++ ++#ifndef __PAGETABLE_PUD_FOLDED ++/* ++ * Allocate page upper directory. ++ * We've already handled the fast-path in-line. ++ */ ++int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) ++{ ++ pud_t *new = pud_alloc_one(mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ smp_wmb(); /* See comment in __pte_alloc */ ++ ++ spin_lock(&mm->page_table_lock); ++ if (pgd_present(*pgd)) /* Another has populated it */ ++ pud_free(mm, new); ++ else ++ pgd_populate(mm, pgd, new); ++ spin_unlock(&mm->page_table_lock); ++ return 0; ++} ++#endif /* __PAGETABLE_PUD_FOLDED */ ++ ++#ifndef __PAGETABLE_PMD_FOLDED ++/* ++ * Allocate page middle directory. ++ * We've already handled the fast-path in-line. ++ */ ++int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) ++{ ++ pmd_t *new = pmd_alloc_one(mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ smp_wmb(); /* See comment in __pte_alloc */ ++ ++ spin_lock(&mm->page_table_lock); ++#ifndef __ARCH_HAS_4LEVEL_HACK ++ if (pud_present(*pud)) /* Another has populated it */ ++ pmd_free(mm, new); ++ else ++ pud_populate(mm, pud, new); ++#else ++ if (pgd_present(*pud)) /* Another has populated it */ ++ pmd_free(mm, new); ++ else ++ pgd_populate(mm, pud, new); ++#endif /* __ARCH_HAS_4LEVEL_HACK */ ++ spin_unlock(&mm->page_table_lock); ++ return 0; ++} ++#endif /* __PAGETABLE_PMD_FOLDED */ ++ ++int make_pages_present(unsigned long addr, unsigned long end) ++{ ++ int ret, len, write; ++ struct vm_area_struct * vma; ++ ++ vma = find_vma(current->mm, addr); ++ if (!vma) ++ return -ENOMEM; ++ write = (vma->vm_flags & VM_WRITE) != 0; ++ BUG_ON(addr >= end); ++ BUG_ON(end > vma->vm_end); ++ len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; ++ ret = get_user_pages(current, current->mm, addr, ++ len, write, 0, NULL, NULL); ++ if (ret < 0) { ++ /* ++ SUS require strange return value to mlock ++ - invalid addr generate to ENOMEM. ++ - out of memory should generate EAGAIN. ++ */ ++ if (ret == -EFAULT) ++ ret = -ENOMEM; ++ else if (ret == -ENOMEM) ++ ret = -EAGAIN; ++ return ret; ++ } ++ return ret == len ? 0 : -ENOMEM; ++} ++ ++#if !defined(__HAVE_ARCH_GATE_AREA) ++ ++#if defined(AT_SYSINFO_EHDR) ++static struct vm_area_struct gate_vma; ++ ++static int __init gate_vma_init(void) ++{ ++ gate_vma.vm_mm = NULL; ++ gate_vma.vm_start = FIXADDR_USER_START; ++ gate_vma.vm_end = FIXADDR_USER_END; ++ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; ++ gate_vma.vm_page_prot = __P101; ++ /* ++ * Make sure the vDSO gets into every core dump. ++ * Dumping its contents makes post-mortem fully interpretable later ++ * without matching up the same kernel and hardware config to see ++ * what PC values meant. ++ */ ++ gate_vma.vm_flags |= VM_ALWAYSDUMP; ++ return 0; ++} ++__initcall(gate_vma_init); ++#endif ++ ++struct vm_area_struct *get_gate_vma(struct task_struct *tsk) ++{ ++#ifdef AT_SYSINFO_EHDR ++ return &gate_vma; ++#else ++ return NULL; ++#endif ++} ++ ++int in_gate_area_no_task(unsigned long addr) ++{ ++#ifdef AT_SYSINFO_EHDR ++ if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) ++ return 1; ++#endif ++ return 0; ++} ++ ++#endif /* __HAVE_ARCH_GATE_AREA */ ++ ++#ifdef CONFIG_HAVE_IOREMAP_PROT ++static resource_size_t follow_phys(struct vm_area_struct *vma, ++ unsigned long address, unsigned int flags, ++ unsigned long *prot) ++{ ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep, pte; ++ spinlock_t *ptl; ++ resource_size_t phys_addr = 0; ++ struct mm_struct *mm = vma->vm_mm; ++ ++ VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); ++ ++ pgd = pgd_offset(mm, address); ++ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) ++ goto no_page_table; ++ ++ pud = pud_offset(pgd, address); ++ if (pud_none(*pud) || unlikely(pud_bad(*pud))) ++ goto no_page_table; ++ ++ pmd = pmd_offset(pud, address); ++ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) ++ goto no_page_table; ++ ++ /* We cannot handle huge page PFN maps. Luckily they don't exist. */ ++ if (pmd_huge(*pmd)) ++ goto no_page_table; ++ ++ ptep = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (!ptep) ++ goto out; ++ ++ pte = *ptep; ++ if (!pte_present(pte)) ++ goto unlock; ++ if ((flags & FOLL_WRITE) && !pte_write(pte)) ++ goto unlock; ++ phys_addr = pte_pfn(pte); ++ phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ ++ ++ *prot = pgprot_val(pte_pgprot(pte)); ++ ++unlock: ++ pte_unmap_unlock(ptep, ptl); ++out: ++ return phys_addr; ++no_page_table: ++ return 0; ++} ++ ++int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, ++ void *buf, int len, int write) ++{ ++ resource_size_t phys_addr; ++ unsigned long prot = 0; ++ void *maddr; ++ int offset = addr & (PAGE_SIZE-1); ++ ++ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) ++ return -EINVAL; ++ ++ phys_addr = follow_phys(vma, addr, write, &prot); ++ ++ if (!phys_addr) ++ return -EINVAL; ++ ++ maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); ++ if (write) ++ memcpy_toio(maddr + offset, buf, len); ++ else ++ memcpy_fromio(buf, maddr + offset, len); ++ iounmap(maddr); ++ ++ return len; ++} ++#endif ++ ++/* ++ * Access another process' address space. ++ * Source/target buffer must be kernel space, ++ * Do not walk the page table directly, use get_user_pages ++ */ ++int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) ++{ ++ struct mm_struct *mm; ++ struct vm_area_struct *vma; ++ void *old_buf = buf; ++ ++ mm = get_task_mm(tsk); ++ if (!mm) ++ return 0; ++ ++ down_read(&mm->mmap_sem); ++ /* ignore errors, just check how much was successfully transferred */ ++ while (len) { ++ int bytes, ret, offset; ++ void *maddr; ++ struct page *page = NULL; ++ ++ ret = get_user_pages(tsk, mm, addr, 1, ++ write, 1, &page, &vma); ++ if (ret <= 0) { ++ /* ++ * Check if this is a VM_IO | VM_PFNMAP VMA, which ++ * we can access using slightly different code. ++ */ ++#ifdef CONFIG_HAVE_IOREMAP_PROT ++ vma = find_vma(mm, addr); ++ if (!vma) ++ break; ++ if (vma->vm_ops && vma->vm_ops->access) ++ ret = vma->vm_ops->access(vma, addr, buf, ++ len, write); ++ if (ret <= 0) ++#endif ++ break; ++ bytes = ret; ++ } else { ++ bytes = len; ++ offset = addr & (PAGE_SIZE-1); ++ if (bytes > PAGE_SIZE-offset) ++ bytes = PAGE_SIZE-offset; ++ ++ maddr = kmap(page); ++ if (write) { ++ copy_to_user_page(vma, page, addr, ++ maddr + offset, buf, bytes); ++ set_page_dirty_lock(page); ++ } else { ++ copy_from_user_page(vma, page, addr, ++ buf, maddr + offset, bytes); ++ } ++ kunmap(page); ++ page_cache_release(page); ++ } ++ len -= bytes; ++ buf += bytes; ++ addr += bytes; ++ } ++ up_read(&mm->mmap_sem); ++ mmput(mm); ++ ++ return buf - old_buf; ++} ++ ++/* ++ * Print the name of a VMA. ++ */ ++void print_vma_addr(char *prefix, unsigned long ip) ++{ ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++ ++ /* ++ * Do not print if we are in atomic ++ * contexts (in exception stacks, etc.): ++ */ ++ if (preempt_count()) ++ return; ++ ++ down_read(&mm->mmap_sem); ++ vma = find_vma(mm, ip); ++ if (vma && vma->vm_file) { ++ struct file *f = vma->vm_file; ++ char *buf = (char *)__get_free_page(GFP_KERNEL); ++ if (buf) { ++ char *p, *s; ++ ++ p = d_path(&f->f_path, buf, PAGE_SIZE); ++ if (IS_ERR(p)) ++ p = "?"; ++ s = strrchr(p, '/'); ++ if (s) ++ p = s+1; ++ printk("%s%s[%lx+%lx]", prefix, p, ++ vma->vm_start, ++ vma->vm_end - vma->vm_start); ++ free_page((unsigned long)buf); ++ } ++ } ++ up_read(¤t->mm->mmap_sem); ++} +diff -Nurb linux-2.6.27-590/mm/slab.c linux-2.6.27-591/mm/slab.c +--- linux-2.6.27-590/mm/slab.c 2010-02-01 19:42:07.000000000 -0500 ++++ linux-2.6.27-591/mm/slab.c 2010-02-01 19:43:07.000000000 -0500 @@ -110,6 +110,7 @@ #include #include @@ -743,7 +13187,7 @@ Index: linux-2.6.27.y/mm/slab.c #include #include -@@ -248,6 +249,14 @@ struct slab_rcu { +@@ -248,6 +249,14 @@ void *addr; }; @@ -758,7 +13202,7 @@ Index: linux-2.6.27.y/mm/slab.c /* * struct array_cache * -@@ -3469,6 +3478,19 @@ __cache_alloc(struct kmem_cache *cachep, +@@ -3469,6 +3478,19 @@ local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); @@ -778,7 +13222,7 @@ Index: linux-2.6.27.y/mm/slab.c if (unlikely((flags & __GFP_ZERO) && objp)) memset(objp, 0, obj_size(cachep)); -@@ -3578,12 +3600,26 @@ free_done: +@@ -3578,12 +3600,26 @@ * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ @@ -807,7 +13251,7 @@ Index: linux-2.6.27.y/mm/slab.c vx_slab_free(cachep); /* -@@ -3714,6 +3750,7 @@ static __always_inline void *__do_kmallo +@@ -3714,6 +3750,7 @@ void *caller) { struct kmem_cache *cachep; @@ -815,7 +13259,7 @@ Index: linux-2.6.27.y/mm/slab.c /* If you want to save a few bytes .text space: replace * __ with kmem_. -@@ -3741,10 +3778,17 @@ void *__kmalloc_track_caller(size_t size +@@ -3741,10 +3778,17 @@ EXPORT_SYMBOL(__kmalloc_track_caller); #else @@ -833,7 +13277,7 @@ Index: linux-2.6.27.y/mm/slab.c EXPORT_SYMBOL(__kmalloc); #endif -@@ -3764,7 +3808,7 @@ void kmem_cache_free(struct kmem_cache * +@@ -3764,7 +3808,7 @@ debug_check_no_locks_freed(objp, obj_size(cachep)); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, obj_size(cachep)); @@ -842,7 +13286,7 @@ Index: linux-2.6.27.y/mm/slab.c local_irq_restore(flags); } EXPORT_SYMBOL(kmem_cache_free); -@@ -3790,7 +3834,7 @@ void kfree(const void *objp) +@@ -3790,7 +3834,7 @@ c = virt_to_cache(objp); debug_check_no_locks_freed(objp, obj_size(c)); debug_check_no_obj_freed(objp, obj_size(c));