4 * Processor and Memory placement constraints for sets of tasks.
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004 Silicon Graphics, Inc.
9 * Portions derived from Patrick Mochel's sysfs code.
10 * sysfs is Copyright (c) 2001-3 Patrick Mochel
11 * Portions Copyright (c) 2004 Silicon Graphics, Inc.
13 * 2003-10-10 Written by Simon Derr <simon.derr@bull.net>
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson <pj@sgi.com>
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file COPYING in the main directory of the Linux
19 * distribution for more details.
22 #include <linux/config.h>
23 #include <linux/cpu.h>
24 #include <linux/cpumask.h>
25 #include <linux/cpuset.h>
26 #include <linux/err.h>
27 #include <linux/errno.h>
28 #include <linux/file.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/kernel.h>
33 #include <linux/kmod.h>
34 #include <linux/list.h>
35 #include <linux/mempolicy.h>
37 #include <linux/module.h>
38 #include <linux/mount.h>
39 #include <linux/namei.h>
40 #include <linux/pagemap.h>
41 #include <linux/proc_fs.h>
42 #include <linux/rcupdate.h>
43 #include <linux/sched.h>
44 #include <linux/seq_file.h>
45 #include <linux/slab.h>
46 #include <linux/smp_lock.h>
47 #include <linux/spinlock.h>
48 #include <linux/stat.h>
49 #include <linux/string.h>
50 #include <linux/time.h>
51 #include <linux/backing-dev.h>
52 #include <linux/sort.h>
53 #include <linux/vs_cvirt.h>
55 #include <asm/uaccess.h>
56 #include <asm/atomic.h>
57 #include <asm/semaphore.h>
59 #define CPUSET_SUPER_MAGIC 0x27e0eb
62 * Tracks how many cpusets are currently defined in system.
63 * When there is only one cpuset (the root cpuset) we can
64 * short circuit some hooks.
66 int number_of_cpusets __read_mostly;
68 /* See "Frequency meter" comments, below. */
71 int cnt; /* unprocessed events count */
72 int val; /* most recent output value */
73 time_t time; /* clock (secs) when val computed */
74 spinlock_t lock; /* guards read or write of above */
78 unsigned long flags; /* "unsigned long" so bitops work */
79 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
80 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
83 * Count is atomic so can incr (fork) or decr (exit) without a lock.
85 atomic_t count; /* count tasks using this cpuset */
88 * We link our 'sibling' struct into our parents 'children'.
89 * Our children link their 'sibling' into our 'children'.
91 struct list_head sibling; /* my parents children */
92 struct list_head children; /* my children */
94 struct cpuset *parent; /* my parent */
95 struct dentry *dentry; /* cpuset fs entry */
98 * Copy of global cpuset_mems_generation as of the most
99 * recent time this cpuset changed its mems_allowed.
103 struct fmeter fmeter; /* memory_pressure filter */
106 /* bits in struct cpuset flags field */
115 /* convenient tests for these bits */
116 static inline int is_cpu_exclusive(const struct cpuset *cs)
118 return !!test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
121 static inline int is_mem_exclusive(const struct cpuset *cs)
123 return !!test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
126 static inline int is_removed(const struct cpuset *cs)
128 return !!test_bit(CS_REMOVED, &cs->flags);
131 static inline int notify_on_release(const struct cpuset *cs)
133 return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
136 static inline int is_memory_migrate(const struct cpuset *cs)
138 return !!test_bit(CS_MEMORY_MIGRATE, &cs->flags);
142 * Increment this atomic integer everytime any cpuset changes its
143 * mems_allowed value. Users of cpusets can track this generation
144 * number, and avoid having to lock and reload mems_allowed unless
145 * the cpuset they're using changes generation.
147 * A single, global generation is needed because attach_task() could
148 * reattach a task to a different cpuset, which must not have its
149 * generation numbers aliased with those of that tasks previous cpuset.
151 * Generations are needed for mems_allowed because one task cannot
152 * modify anothers memory placement. So we must enable every task,
153 * on every visit to __alloc_pages(), to efficiently check whether
154 * its current->cpuset->mems_allowed has changed, requiring an update
155 * of its current->mems_allowed.
157 static atomic_t cpuset_mems_generation = ATOMIC_INIT(1);
159 static struct cpuset top_cpuset = {
160 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
161 .cpus_allowed = CPU_MASK_ALL,
162 .mems_allowed = NODE_MASK_ALL,
163 .count = ATOMIC_INIT(0),
164 .sibling = LIST_HEAD_INIT(top_cpuset.sibling),
165 .children = LIST_HEAD_INIT(top_cpuset.children),
168 static struct vfsmount *cpuset_mount;
169 static struct super_block *cpuset_sb;
172 * We have two global cpuset semaphores below. They can nest.
173 * It is ok to first take manage_sem, then nest callback_sem. We also
174 * require taking task_lock() when dereferencing a tasks cpuset pointer.
175 * See "The task_lock() exception", at the end of this comment.
177 * A task must hold both semaphores to modify cpusets. If a task
178 * holds manage_sem, then it blocks others wanting that semaphore,
179 * ensuring that it is the only task able to also acquire callback_sem
180 * and be able to modify cpusets. It can perform various checks on
181 * the cpuset structure first, knowing nothing will change. It can
182 * also allocate memory while just holding manage_sem. While it is
183 * performing these checks, various callback routines can briefly
184 * acquire callback_sem to query cpusets. Once it is ready to make
185 * the changes, it takes callback_sem, blocking everyone else.
187 * Calls to the kernel memory allocator can not be made while holding
188 * callback_sem, as that would risk double tripping on callback_sem
189 * from one of the callbacks into the cpuset code from within
192 * If a task is only holding callback_sem, then it has read-only
195 * The task_struct fields mems_allowed and mems_generation may only
196 * be accessed in the context of that task, so require no locks.
198 * Any task can increment and decrement the count field without lock.
199 * So in general, code holding manage_sem or callback_sem can't rely
200 * on the count field not changing. However, if the count goes to
201 * zero, then only attach_task(), which holds both semaphores, can
202 * increment it again. Because a count of zero means that no tasks
203 * are currently attached, therefore there is no way a task attached
204 * to that cpuset can fork (the other way to increment the count).
205 * So code holding manage_sem or callback_sem can safely assume that
206 * if the count is zero, it will stay zero. Similarly, if a task
207 * holds manage_sem or callback_sem on a cpuset with zero count, it
208 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
209 * both of those semaphores.
211 * A possible optimization to improve parallelism would be to make
212 * callback_sem a R/W semaphore (rwsem), allowing the callback routines
213 * to proceed in parallel, with read access, until the holder of
214 * manage_sem needed to take this rwsem for exclusive write access
215 * and modify some cpusets.
217 * The cpuset_common_file_write handler for operations that modify
218 * the cpuset hierarchy holds manage_sem across the entire operation,
219 * single threading all such cpuset modifications across the system.
221 * The cpuset_common_file_read() handlers only hold callback_sem across
222 * small pieces of code, such as when reading out possibly multi-word
223 * cpumasks and nodemasks.
225 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
226 * (usually) take either semaphore. These are the two most performance
227 * critical pieces of code here. The exception occurs on cpuset_exit(),
228 * when a task in a notify_on_release cpuset exits. Then manage_sem
229 * is taken, and if the cpuset count is zero, a usermode call made
230 * to /sbin/cpuset_release_agent with the name of the cpuset (path
231 * relative to the root of cpuset file system) as the argument.
233 * A cpuset can only be deleted if both its 'count' of using tasks
234 * is zero, and its list of 'children' cpusets is empty. Since all
235 * tasks in the system use _some_ cpuset, and since there is always at
236 * least one task in the system (init, pid == 1), therefore, top_cpuset
237 * always has either children cpusets and/or using tasks. So we don't
238 * need a special hack to ensure that top_cpuset cannot be deleted.
240 * The above "Tale of Two Semaphores" would be complete, but for:
242 * The task_lock() exception
244 * The need for this exception arises from the action of attach_task(),
245 * which overwrites one tasks cpuset pointer with another. It does
246 * so using both semaphores, however there are several performance
247 * critical places that need to reference task->cpuset without the
248 * expense of grabbing a system global semaphore. Therefore except as
249 * noted below, when dereferencing or, as in attach_task(), modifying
250 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
251 * (task->alloc_lock) already in the task_struct routinely used for
254 * P.S. One more locking exception. RCU is used to guard the
255 * update of a tasks cpuset pointer by attach_task() and the
256 * access of task->cpuset->mems_generation via that pointer in
257 * the routine cpuset_update_task_memory_state().
260 static DECLARE_MUTEX(manage_sem);
261 static DECLARE_MUTEX(callback_sem);
264 * A couple of forward declarations required, due to cyclic reference loop:
265 * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
266 * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
269 static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
270 static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);
272 static struct backing_dev_info cpuset_backing_dev_info = {
273 .ra_pages = 0, /* No readahead */
274 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
277 static struct inode *cpuset_new_inode(mode_t mode)
279 struct inode *inode = new_inode(cpuset_sb);
282 inode->i_mode = mode;
283 inode->i_uid = current->fsuid;
284 inode->i_gid = current->fsgid;
285 inode->i_blksize = PAGE_CACHE_SIZE;
287 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
288 inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
293 static void cpuset_diput(struct dentry *dentry, struct inode *inode)
295 /* is dentry a directory ? if so, kfree() associated cpuset */
296 if (S_ISDIR(inode->i_mode)) {
297 struct cpuset *cs = dentry->d_fsdata;
298 BUG_ON(!(is_removed(cs)));
304 static struct dentry_operations cpuset_dops = {
305 .d_iput = cpuset_diput,
308 static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
310 struct dentry *d = lookup_one_len(name, parent, strlen(name));
312 d->d_op = &cpuset_dops;
316 static void remove_dir(struct dentry *d)
318 struct dentry *parent = dget(d->d_parent);
321 simple_rmdir(parent->d_inode, d);
326 * NOTE : the dentry must have been dget()'ed
328 static void cpuset_d_remove_dir(struct dentry *dentry)
330 struct list_head *node;
332 spin_lock(&dcache_lock);
333 node = dentry->d_subdirs.next;
334 while (node != &dentry->d_subdirs) {
335 struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
339 spin_unlock(&dcache_lock);
341 simple_unlink(dentry->d_inode, d);
343 spin_lock(&dcache_lock);
345 node = dentry->d_subdirs.next;
347 list_del_init(&dentry->d_u.d_child);
348 spin_unlock(&dcache_lock);
352 static struct super_operations cpuset_ops = {
353 .statfs = simple_statfs,
354 .drop_inode = generic_delete_inode,
357 static int cpuset_fill_super(struct super_block *sb, void *unused_data,
363 sb->s_blocksize = PAGE_CACHE_SIZE;
364 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
365 sb->s_magic = CPUSET_SUPER_MAGIC;
366 sb->s_op = &cpuset_ops;
369 inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
371 inode->i_op = &simple_dir_inode_operations;
372 inode->i_fop = &simple_dir_operations;
373 /* directories start off with i_nlink == 2 (for "." entry) */
379 root = d_alloc_root(inode);
388 static struct super_block *cpuset_get_sb(struct file_system_type *fs_type,
389 int flags, const char *unused_dev_name,
392 return get_sb_single(fs_type, flags, data, cpuset_fill_super);
395 static struct file_system_type cpuset_fs_type = {
397 .get_sb = cpuset_get_sb,
398 .kill_sb = kill_litter_super,
403 * The files in the cpuset filesystem mostly have a very simple read/write
404 * handling, some common function will take care of it. Nevertheless some cases
405 * (read tasks) are special and therefore I define this structure for every
409 * When reading/writing to a file:
410 * - the cpuset to use in file->f_dentry->d_parent->d_fsdata
411 * - the 'cftype' of the file is file->f_dentry->d_fsdata
417 int (*open) (struct inode *inode, struct file *file);
418 ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
420 int (*write) (struct file *file, const char __user *buf, size_t nbytes,
422 int (*release) (struct inode *inode, struct file *file);
425 static inline struct cpuset *__d_cs(struct dentry *dentry)
427 return dentry->d_fsdata;
430 static inline struct cftype *__d_cft(struct dentry *dentry)
432 return dentry->d_fsdata;
436 * Call with manage_sem held. Writes path of cpuset into buf.
437 * Returns 0 on success, -errno on error.
440 static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
444 start = buf + buflen;
448 int len = cs->dentry->d_name.len;
449 if ((start -= len) < buf)
450 return -ENAMETOOLONG;
451 memcpy(start, cs->dentry->d_name.name, len);
458 return -ENAMETOOLONG;
461 memmove(buf, start, buf + buflen - start);
466 * Notify userspace when a cpuset is released, by running
467 * /sbin/cpuset_release_agent with the name of the cpuset (path
468 * relative to the root of cpuset file system) as the argument.
470 * Most likely, this user command will try to rmdir this cpuset.
472 * This races with the possibility that some other task will be
473 * attached to this cpuset before it is removed, or that some other
474 * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
475 * The presumed 'rmdir' will fail quietly if this cpuset is no longer
476 * unused, and this cpuset will be reprieved from its death sentence,
477 * to continue to serve a useful existence. Next time it's released,
478 * we will get notified again, if it still has 'notify_on_release' set.
480 * The final arg to call_usermodehelper() is 0, which means don't
481 * wait. The separate /sbin/cpuset_release_agent task is forked by
482 * call_usermodehelper(), then control in this thread returns here,
483 * without waiting for the release agent task. We don't bother to
484 * wait because the caller of this routine has no use for the exit
485 * status of the /sbin/cpuset_release_agent task, so no sense holding
486 * our caller up for that.
488 * When we had only one cpuset semaphore, we had to call this
489 * without holding it, to avoid deadlock when call_usermodehelper()
490 * allocated memory. With two locks, we could now call this while
491 * holding manage_sem, but we still don't, so as to minimize
492 * the time manage_sem is held.
495 static void cpuset_release_agent(const char *pathbuf)
497 char *argv[3], *envp[3];
504 argv[i++] = "/sbin/cpuset_release_agent";
505 argv[i++] = (char *)pathbuf;
509 /* minimal command environment */
510 envp[i++] = "HOME=/";
511 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
514 call_usermodehelper(argv[0], argv, envp, 0);
519 * Either cs->count of using tasks transitioned to zero, or the
520 * cs->children list of child cpusets just became empty. If this
521 * cs is notify_on_release() and now both the user count is zero and
522 * the list of children is empty, prepare cpuset path in a kmalloc'd
523 * buffer, to be returned via ppathbuf, so that the caller can invoke
524 * cpuset_release_agent() with it later on, once manage_sem is dropped.
525 * Call here with manage_sem held.
527 * This check_for_release() routine is responsible for kmalloc'ing
528 * pathbuf. The above cpuset_release_agent() is responsible for
529 * kfree'ing pathbuf. The caller of these routines is responsible
530 * for providing a pathbuf pointer, initialized to NULL, then
531 * calling check_for_release() with manage_sem held and the address
532 * of the pathbuf pointer, then dropping manage_sem, then calling
533 * cpuset_release_agent() with pathbuf, as set by check_for_release().
536 static void check_for_release(struct cpuset *cs, char **ppathbuf)
538 if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
539 list_empty(&cs->children)) {
542 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
545 if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
553 * Return in *pmask the portion of a cpusets's cpus_allowed that
554 * are online. If none are online, walk up the cpuset hierarchy
555 * until we find one that does have some online cpus. If we get
556 * all the way to the top and still haven't found any online cpus,
557 * return cpu_online_map. Or if passed a NULL cs from an exit'ing
558 * task, return cpu_online_map.
560 * One way or another, we guarantee to return some non-empty subset
563 * Call with callback_sem held.
566 static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
568 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
571 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
573 *pmask = cpu_online_map;
574 BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
578 * Return in *pmask the portion of a cpusets's mems_allowed that
579 * are online. If none are online, walk up the cpuset hierarchy
580 * until we find one that does have some online mems. If we get
581 * all the way to the top and still haven't found any online mems,
582 * return node_online_map.
584 * One way or another, we guarantee to return some non-empty subset
585 * of node_online_map.
587 * Call with callback_sem held.
590 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
592 while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
595 nodes_and(*pmask, cs->mems_allowed, node_online_map);
597 *pmask = node_online_map;
598 BUG_ON(!nodes_intersects(*pmask, node_online_map));
602 * cpuset_update_task_memory_state - update task memory placement
604 * If the current tasks cpusets mems_allowed changed behind our
605 * backs, update current->mems_allowed, mems_generation and task NUMA
606 * mempolicy to the new value.
608 * Task mempolicy is updated by rebinding it relative to the
609 * current->cpuset if a task has its memory placement changed.
610 * Do not call this routine if in_interrupt().
612 * Call without callback_sem or task_lock() held. May be called
613 * with or without manage_sem held. Doesn't need task_lock to guard
614 * against another task changing a non-NULL cpuset pointer to NULL,
615 * as that is only done by a task on itself, and if the current task
616 * is here, it is not simultaneously in the exit code NULL'ing its
617 * cpuset pointer. This routine also might acquire callback_sem and
618 * current->mm->mmap_sem during call.
620 * Reading current->cpuset->mems_generation doesn't need task_lock
621 * to guard the current->cpuset derefence, because it is guarded
622 * from concurrent freeing of current->cpuset by attach_task(),
625 * The rcu_dereference() is technically probably not needed,
626 * as I don't actually mind if I see a new cpuset pointer but
627 * an old value of mems_generation. However this really only
628 * matters on alpha systems using cpusets heavily. If I dropped
629 * that rcu_dereference(), it would save them a memory barrier.
630 * For all other arch's, rcu_dereference is a no-op anyway, and for
631 * alpha systems not using cpusets, another planned optimization,
632 * avoiding the rcu critical section for tasks in the root cpuset
633 * which is statically allocated, so can't vanish, will make this
634 * irrelevant. Better to use RCU as intended, than to engage in
635 * some cute trick to save a memory barrier that is impossible to
636 * test, for alpha systems using cpusets heavily, which might not
639 * This routine is needed to update the per-task mems_allowed data,
640 * within the tasks context, when it is trying to allocate memory
641 * (in various mm/mempolicy.c routines) and notices that some other
642 * task has been modifying its cpuset.
645 void cpuset_update_task_memory_state(void)
647 int my_cpusets_mem_gen;
648 struct task_struct *tsk = current;
651 if (tsk->cpuset == &top_cpuset) {
652 /* Don't need rcu for top_cpuset. It's never freed. */
653 my_cpusets_mem_gen = top_cpuset.mems_generation;
656 cs = rcu_dereference(tsk->cpuset);
657 my_cpusets_mem_gen = cs->mems_generation;
661 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
664 cs = tsk->cpuset; /* Maybe changed when task not locked */
665 guarantee_online_mems(cs, &tsk->mems_allowed);
666 tsk->cpuset_mems_generation = cs->mems_generation;
669 mpol_rebind_task(tsk, &tsk->mems_allowed);
674 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
676 * One cpuset is a subset of another if all its allowed CPUs and
677 * Memory Nodes are a subset of the other, and its exclusive flags
678 * are only set if the other's are set. Call holding manage_sem.
681 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
683 return cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
684 nodes_subset(p->mems_allowed, q->mems_allowed) &&
685 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
686 is_mem_exclusive(p) <= is_mem_exclusive(q);
690 * validate_change() - Used to validate that any proposed cpuset change
691 * follows the structural rules for cpusets.
693 * If we replaced the flag and mask values of the current cpuset
694 * (cur) with those values in the trial cpuset (trial), would
695 * our various subset and exclusive rules still be valid? Presumes
698 * 'cur' is the address of an actual, in-use cpuset. Operations
699 * such as list traversal that depend on the actual address of the
700 * cpuset in the list must use cur below, not trial.
702 * 'trial' is the address of bulk structure copy of cur, with
703 * perhaps one or more of the fields cpus_allowed, mems_allowed,
704 * or flags changed to new, trial values.
706 * Return 0 if valid, -errno if not.
709 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
711 struct cpuset *c, *par;
713 /* Each of our child cpusets must be a subset of us */
714 list_for_each_entry(c, &cur->children, sibling) {
715 if (!is_cpuset_subset(c, trial))
719 /* Remaining checks don't apply to root cpuset */
720 if ((par = cur->parent) == NULL)
723 /* We must be a subset of our parent cpuset */
724 if (!is_cpuset_subset(trial, par))
727 /* If either I or some sibling (!= me) is exclusive, we can't overlap */
728 list_for_each_entry(c, &par->children, sibling) {
729 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
731 cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
733 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
735 nodes_intersects(trial->mems_allowed, c->mems_allowed))
743 * For a given cpuset cur, partition the system as follows
744 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
745 * exclusive child cpusets
746 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
747 * exclusive child cpusets
748 * Build these two partitions by calling partition_sched_domains
750 * Call with manage_sem held. May nest a call to the
751 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
754 static void update_cpu_domains(struct cpuset *cur)
756 struct cpuset *c, *par = cur->parent;
757 cpumask_t pspan, cspan;
759 if (par == NULL || cpus_empty(cur->cpus_allowed))
763 * Get all cpus from parent's cpus_allowed not part of exclusive
766 pspan = par->cpus_allowed;
767 list_for_each_entry(c, &par->children, sibling) {
768 if (is_cpu_exclusive(c))
769 cpus_andnot(pspan, pspan, c->cpus_allowed);
771 if (is_removed(cur) || !is_cpu_exclusive(cur)) {
772 cpus_or(pspan, pspan, cur->cpus_allowed);
773 if (cpus_equal(pspan, cur->cpus_allowed))
775 cspan = CPU_MASK_NONE;
777 if (cpus_empty(pspan))
779 cspan = cur->cpus_allowed;
781 * Get all cpus from current cpuset's cpus_allowed not part
782 * of exclusive children
784 list_for_each_entry(c, &cur->children, sibling) {
785 if (is_cpu_exclusive(c))
786 cpus_andnot(cspan, cspan, c->cpus_allowed);
791 partition_sched_domains(&pspan, &cspan);
792 unlock_cpu_hotplug();
796 * Call with manage_sem held. May take callback_sem during call.
799 static int update_cpumask(struct cpuset *cs, char *buf)
801 struct cpuset trialcs;
802 int retval, cpus_unchanged;
805 retval = cpulist_parse(buf, trialcs.cpus_allowed);
808 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
809 if (cpus_empty(trialcs.cpus_allowed))
811 retval = validate_change(cs, &trialcs);
814 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
816 cs->cpus_allowed = trialcs.cpus_allowed;
818 if (is_cpu_exclusive(cs) && !cpus_unchanged)
819 update_cpu_domains(cs);
824 * Handle user request to change the 'mems' memory placement
825 * of a cpuset. Needs to validate the request, update the
826 * cpusets mems_allowed and mems_generation, and for each
827 * task in the cpuset, rebind any vma mempolicies and if
828 * the cpuset is marked 'memory_migrate', migrate the tasks
829 * pages to the new memory.
831 * Call with manage_sem held. May take callback_sem during call.
832 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
833 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
834 * their mempolicies to the cpusets new mems_allowed.
837 static int update_nodemask(struct cpuset *cs, char *buf)
839 struct cpuset trialcs;
841 struct task_struct *g, *p;
842 struct mm_struct **mmarray;
849 retval = nodelist_parse(buf, trialcs.mems_allowed);
852 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
853 oldmem = cs->mems_allowed;
854 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
855 retval = 0; /* Too easy - nothing to do */
858 if (nodes_empty(trialcs.mems_allowed)) {
862 retval = validate_change(cs, &trialcs);
867 cs->mems_allowed = trialcs.mems_allowed;
868 atomic_inc(&cpuset_mems_generation);
869 cs->mems_generation = atomic_read(&cpuset_mems_generation);
872 set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
874 fudge = 10; /* spare mmarray[] slots */
875 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
879 * Allocate mmarray[] to hold mm reference for each task
880 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
881 * tasklist_lock. We could use GFP_ATOMIC, but with a
882 * few more lines of code, we can retry until we get a big
883 * enough mmarray[] w/o using GFP_ATOMIC.
886 ntasks = atomic_read(&cs->count); /* guess */
888 mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
891 write_lock_irq(&tasklist_lock); /* block fork */
892 if (atomic_read(&cs->count) <= ntasks)
893 break; /* got enough */
894 write_unlock_irq(&tasklist_lock); /* try again */
900 /* Load up mmarray[] with mm reference for each task in cpuset. */
901 do_each_thread(g, p) {
902 struct mm_struct *mm;
906 "Cpuset mempolicy rebind incomplete.\n");
915 } while_each_thread(g, p);
916 write_unlock_irq(&tasklist_lock);
919 * Now that we've dropped the tasklist spinlock, we can
920 * rebind the vma mempolicies of each mm in mmarray[] to their
921 * new cpuset, and release that mm. The mpol_rebind_mm()
922 * call takes mmap_sem, which we couldn't take while holding
923 * tasklist_lock. Forks can happen again now - the mpol_copy()
924 * cpuset_being_rebound check will catch such forks, and rebind
925 * their vma mempolicies too. Because we still hold the global
926 * cpuset manage_sem, we know that no other rebind effort will
927 * be contending for the global variable cpuset_being_rebound.
928 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
929 * is idempotent. Also migrate pages in each mm to new nodes.
931 migrate = is_memory_migrate(cs);
932 for (i = 0; i < n; i++) {
933 struct mm_struct *mm = mmarray[i];
935 mpol_rebind_mm(mm, &cs->mems_allowed);
937 do_migrate_pages(mm, &oldmem, &cs->mems_allowed,
943 /* We're done rebinding vma's to this cpusets new mems_allowed. */
945 set_cpuset_being_rebound(NULL);
952 * Call with manage_sem held.
955 static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
957 if (simple_strtoul(buf, NULL, 10) != 0)
958 cpuset_memory_pressure_enabled = 1;
960 cpuset_memory_pressure_enabled = 0;
965 * update_flag - read a 0 or a 1 in a file and update associated flag
966 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
967 * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE)
968 * cs: the cpuset to update
969 * buf: the buffer where we read the 0 or 1
971 * Call with manage_sem held.
974 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
977 struct cpuset trialcs;
978 int err, cpu_exclusive_changed;
980 turning_on = (simple_strtoul(buf, NULL, 10) != 0);
984 set_bit(bit, &trialcs.flags);
986 clear_bit(bit, &trialcs.flags);
988 err = validate_change(cs, &trialcs);
991 cpu_exclusive_changed =
992 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
995 set_bit(bit, &cs->flags);
997 clear_bit(bit, &cs->flags);
1000 if (cpu_exclusive_changed)
1001 update_cpu_domains(cs);
1006 * Frequency meter - How fast is some event occuring?
1008 * These routines manage a digitally filtered, constant time based,
1009 * event frequency meter. There are four routines:
1010 * fmeter_init() - initialize a frequency meter.
1011 * fmeter_markevent() - called each time the event happens.
1012 * fmeter_getrate() - returns the recent rate of such events.
1013 * fmeter_update() - internal routine used to update fmeter.
1015 * A common data structure is passed to each of these routines,
1016 * which is used to keep track of the state required to manage the
1017 * frequency meter and its digital filter.
1019 * The filter works on the number of events marked per unit time.
1020 * The filter is single-pole low-pass recursive (IIR). The time unit
1021 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1022 * simulate 3 decimal digits of precision (multiplied by 1000).
1024 * With an FM_COEF of 933, and a time base of 1 second, the filter
1025 * has a half-life of 10 seconds, meaning that if the events quit
1026 * happening, then the rate returned from the fmeter_getrate()
1027 * will be cut in half each 10 seconds, until it converges to zero.
1029 * It is not worth doing a real infinitely recursive filter. If more
1030 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1031 * just compute FM_MAXTICKS ticks worth, by which point the level
1034 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1035 * arithmetic overflow in the fmeter_update() routine.
1037 * Given the simple 32 bit integer arithmetic used, this meter works
1038 * best for reporting rates between one per millisecond (msec) and
1039 * one per 32 (approx) seconds. At constant rates faster than one
1040 * per msec it maxes out at values just under 1,000,000. At constant
1041 * rates between one per msec, and one per second it will stabilize
1042 * to a value N*1000, where N is the rate of events per second.
1043 * At constant rates between one per second and one per 32 seconds,
1044 * it will be choppy, moving up on the seconds that have an event,
1045 * and then decaying until the next event. At rates slower than
1046 * about one in 32 seconds, it decays all the way back to zero between
1050 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
1051 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1052 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1053 #define FM_SCALE 1000 /* faux fixed point scale */
1055 /* Initialize a frequency meter */
1056 static void fmeter_init(struct fmeter *fmp)
1061 spin_lock_init(&fmp->lock);
1064 /* Internal meter update - process cnt events and update value */
1065 static void fmeter_update(struct fmeter *fmp)
1067 time_t now = get_seconds();
1068 time_t ticks = now - fmp->time;
1073 ticks = min(FM_MAXTICKS, ticks);
1075 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1078 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1082 /* Process any previous ticks, then bump cnt by one (times scale). */
1083 static void fmeter_markevent(struct fmeter *fmp)
1085 spin_lock(&fmp->lock);
1087 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1088 spin_unlock(&fmp->lock);
1091 /* Process any previous ticks, then return current value. */
1092 static int fmeter_getrate(struct fmeter *fmp)
1096 spin_lock(&fmp->lock);
1099 spin_unlock(&fmp->lock);
1104 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
1105 * writing the path of the old cpuset in 'ppathbuf' if it needs to be
1106 * notified on release.
1108 * Call holding manage_sem. May take callback_sem and task_lock of
1109 * the task 'pid' during call.
1112 static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
1115 struct task_struct *tsk;
1116 struct cpuset *oldcs;
1118 nodemask_t from, to;
1119 struct mm_struct *mm;
1121 if (sscanf(pidbuf, "%d", &pid) != 1)
1123 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1127 read_lock(&tasklist_lock);
1129 tsk = find_task_by_pid(pid);
1130 if (!tsk || tsk->flags & PF_EXITING) {
1131 read_unlock(&tasklist_lock);
1135 get_task_struct(tsk);
1136 read_unlock(&tasklist_lock);
1138 if ((current->euid) && (current->euid != tsk->uid)
1139 && (current->euid != tsk->suid)) {
1140 put_task_struct(tsk);
1145 get_task_struct(tsk);
1148 down(&callback_sem);
1151 oldcs = tsk->cpuset;
1155 put_task_struct(tsk);
1158 atomic_inc(&cs->count);
1159 rcu_assign_pointer(tsk->cpuset, cs);
1162 guarantee_online_cpus(cs, &cpus);
1163 set_cpus_allowed(tsk, cpus);
1165 from = oldcs->mems_allowed;
1166 to = cs->mems_allowed;
1170 mm = get_task_mm(tsk);
1172 mpol_rebind_mm(mm, &to);
1176 if (is_memory_migrate(cs))
1177 do_migrate_pages(tsk->mm, &from, &to, MPOL_MF_MOVE_ALL);
1178 put_task_struct(tsk);
1180 if (atomic_dec_and_test(&oldcs->count))
1181 check_for_release(oldcs, ppathbuf);
1185 /* The various types of files and directories in a cpuset file system */
1190 FILE_MEMORY_MIGRATE,
1195 FILE_NOTIFY_ON_RELEASE,
1196 FILE_MEMORY_PRESSURE_ENABLED,
1197 FILE_MEMORY_PRESSURE,
1199 } cpuset_filetype_t;
1201 static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
1202 size_t nbytes, loff_t *unused_ppos)
1204 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1205 struct cftype *cft = __d_cft(file->f_dentry);
1206 cpuset_filetype_t type = cft->private;
1208 char *pathbuf = NULL;
1211 /* Crude upper limit on largest legitimate cpulist user might write. */
1212 if (nbytes > 100 + 6 * NR_CPUS)
1215 /* +1 for nul-terminator */
1216 if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
1219 if (copy_from_user(buffer, userbuf, nbytes)) {
1223 buffer[nbytes] = 0; /* nul-terminate */
1227 if (is_removed(cs)) {
1234 retval = update_cpumask(cs, buffer);
1237 retval = update_nodemask(cs, buffer);
1239 case FILE_CPU_EXCLUSIVE:
1240 retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
1242 case FILE_MEM_EXCLUSIVE:
1243 retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
1245 case FILE_NOTIFY_ON_RELEASE:
1246 retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
1248 case FILE_MEMORY_MIGRATE:
1249 retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
1251 case FILE_MEMORY_PRESSURE_ENABLED:
1252 retval = update_memory_pressure_enabled(cs, buffer);
1254 case FILE_MEMORY_PRESSURE:
1258 retval = attach_task(cs, buffer, &pathbuf);
1269 cpuset_release_agent(pathbuf);
1275 static ssize_t cpuset_file_write(struct file *file, const char __user *buf,
1276 size_t nbytes, loff_t *ppos)
1279 struct cftype *cft = __d_cft(file->f_dentry);
1283 /* special function ? */
1285 retval = cft->write(file, buf, nbytes, ppos);
1287 retval = cpuset_common_file_write(file, buf, nbytes, ppos);
1293 * These ascii lists should be read in a single call, by using a user
1294 * buffer large enough to hold the entire map. If read in smaller
1295 * chunks, there is no guarantee of atomicity. Since the display format
1296 * used, list of ranges of sequential numbers, is variable length,
1297 * and since these maps can change value dynamically, one could read
1298 * gibberish by doing partial reads while a list was changing.
1299 * A single large read to a buffer that crosses a page boundary is
1300 * ok, because the result being copied to user land is not recomputed
1301 * across a page fault.
1304 static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1308 down(&callback_sem);
1309 mask = cs->cpus_allowed;
1312 return cpulist_scnprintf(page, PAGE_SIZE, mask);
1315 static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1319 down(&callback_sem);
1320 mask = cs->mems_allowed;
1323 return nodelist_scnprintf(page, PAGE_SIZE, mask);
1326 static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
1327 size_t nbytes, loff_t *ppos)
1329 struct cftype *cft = __d_cft(file->f_dentry);
1330 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1331 cpuset_filetype_t type = cft->private;
1336 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1343 s += cpuset_sprintf_cpulist(s, cs);
1346 s += cpuset_sprintf_memlist(s, cs);
1348 case FILE_CPU_EXCLUSIVE:
1349 *s++ = is_cpu_exclusive(cs) ? '1' : '0';
1351 case FILE_MEM_EXCLUSIVE:
1352 *s++ = is_mem_exclusive(cs) ? '1' : '0';
1354 case FILE_NOTIFY_ON_RELEASE:
1355 *s++ = notify_on_release(cs) ? '1' : '0';
1357 case FILE_MEMORY_MIGRATE:
1358 *s++ = is_memory_migrate(cs) ? '1' : '0';
1360 case FILE_MEMORY_PRESSURE_ENABLED:
1361 *s++ = cpuset_memory_pressure_enabled ? '1' : '0';
1363 case FILE_MEMORY_PRESSURE:
1364 s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
1372 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1374 free_page((unsigned long)page);
1378 static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes,
1382 struct cftype *cft = __d_cft(file->f_dentry);
1386 /* special function ? */
1388 retval = cft->read(file, buf, nbytes, ppos);
1390 retval = cpuset_common_file_read(file, buf, nbytes, ppos);
1395 static int cpuset_file_open(struct inode *inode, struct file *file)
1400 err = generic_file_open(inode, file);
1404 cft = __d_cft(file->f_dentry);
1408 err = cft->open(inode, file);
1415 static int cpuset_file_release(struct inode *inode, struct file *file)
1417 struct cftype *cft = __d_cft(file->f_dentry);
1419 return cft->release(inode, file);
1424 * cpuset_rename - Only allow simple rename of directories in place.
1426 static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry,
1427 struct inode *new_dir, struct dentry *new_dentry)
1429 if (!S_ISDIR(old_dentry->d_inode->i_mode))
1431 if (new_dentry->d_inode)
1433 if (old_dir != new_dir)
1435 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1438 static struct file_operations cpuset_file_operations = {
1439 .read = cpuset_file_read,
1440 .write = cpuset_file_write,
1441 .llseek = generic_file_llseek,
1442 .open = cpuset_file_open,
1443 .release = cpuset_file_release,
1446 static struct inode_operations cpuset_dir_inode_operations = {
1447 .lookup = simple_lookup,
1448 .mkdir = cpuset_mkdir,
1449 .rmdir = cpuset_rmdir,
1450 .rename = cpuset_rename,
1453 static int cpuset_create_file(struct dentry *dentry, int mode)
1455 struct inode *inode;
1459 if (dentry->d_inode)
1462 inode = cpuset_new_inode(mode);
1466 if (S_ISDIR(mode)) {
1467 inode->i_op = &cpuset_dir_inode_operations;
1468 inode->i_fop = &simple_dir_operations;
1470 /* start off with i_nlink == 2 (for "." entry) */
1472 } else if (S_ISREG(mode)) {
1474 inode->i_fop = &cpuset_file_operations;
1477 d_instantiate(dentry, inode);
1478 dget(dentry); /* Extra count - pin the dentry in core */
1483 * cpuset_create_dir - create a directory for an object.
1484 * cs: the cpuset we create the directory for.
1485 * It must have a valid ->parent field
1486 * And we are going to fill its ->dentry field.
1487 * name: The name to give to the cpuset directory. Will be copied.
1488 * mode: mode to set on new directory.
1491 static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
1493 struct dentry *dentry = NULL;
1494 struct dentry *parent;
1497 parent = cs->parent->dentry;
1498 dentry = cpuset_get_dentry(parent, name);
1500 return PTR_ERR(dentry);
1501 error = cpuset_create_file(dentry, S_IFDIR | mode);
1503 dentry->d_fsdata = cs;
1504 parent->d_inode->i_nlink++;
1505 cs->dentry = dentry;
1512 static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
1514 struct dentry *dentry;
1517 mutex_lock(&dir->d_inode->i_mutex);
1518 dentry = cpuset_get_dentry(dir, cft->name);
1519 if (!IS_ERR(dentry)) {
1520 error = cpuset_create_file(dentry, 0644 | S_IFREG);
1522 dentry->d_fsdata = (void *)cft;
1525 error = PTR_ERR(dentry);
1526 mutex_unlock(&dir->d_inode->i_mutex);
1531 * Stuff for reading the 'tasks' file.
1533 * Reading this file can return large amounts of data if a cpuset has
1534 * *lots* of attached tasks. So it may need several calls to read(),
1535 * but we cannot guarantee that the information we produce is correct
1536 * unless we produce it entirely atomically.
1538 * Upon tasks file open(), a struct ctr_struct is allocated, that
1539 * will have a pointer to an array (also allocated here). The struct
1540 * ctr_struct * is stored in file->private_data. Its resources will
1541 * be freed by release() when the file is closed. The array is used
1542 * to sprintf the PIDs and then used by read().
1545 /* cpusets_tasks_read array */
1553 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
1554 * Return actual number of pids loaded. No need to task_lock(p)
1555 * when reading out p->cpuset, as we don't really care if it changes
1556 * on the next cycle, and we are not going to try to dereference it.
1558 static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
1561 struct task_struct *g, *p;
1563 read_lock(&tasklist_lock);
1565 do_each_thread(g, p) {
1566 if (p->cpuset == cs) {
1567 pidarray[n++] = p->pid;
1568 if (unlikely(n == npids))
1571 } while_each_thread(g, p);
1574 read_unlock(&tasklist_lock);
1578 static int cmppid(const void *a, const void *b)
1580 return *(pid_t *)a - *(pid_t *)b;
1584 * Convert array 'a' of 'npids' pid_t's to a string of newline separated
1585 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
1586 * count 'cnt' of how many chars would be written if buf were large enough.
1588 static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
1593 for (i = 0; i < npids; i++)
1594 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
1599 * Handle an open on 'tasks' file. Prepare a buffer listing the
1600 * process id's of tasks currently attached to the cpuset being opened.
1602 * Does not require any specific cpuset semaphores, and does not take any.
1604 static int cpuset_tasks_open(struct inode *unused, struct file *file)
1606 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1607 struct ctr_struct *ctr;
1612 if (!(file->f_mode & FMODE_READ))
1615 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
1620 * If cpuset gets more users after we read count, we won't have
1621 * enough space - tough. This race is indistinguishable to the
1622 * caller from the case that the additional cpuset users didn't
1623 * show up until sometime later on.
1625 npids = atomic_read(&cs->count);
1626 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
1630 npids = pid_array_load(pidarray, npids, cs);
1631 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
1633 /* Call pid_array_to_buf() twice, first just to get bufsz */
1634 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
1635 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
1638 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
1641 file->private_data = ctr;
1652 static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
1653 size_t nbytes, loff_t *ppos)
1655 struct ctr_struct *ctr = file->private_data;
1657 if (*ppos + nbytes > ctr->bufsz)
1658 nbytes = ctr->bufsz - *ppos;
1659 if (copy_to_user(buf, ctr->buf + *ppos, nbytes))
1665 static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
1667 struct ctr_struct *ctr;
1669 if (file->f_mode & FMODE_READ) {
1670 ctr = file->private_data;
1678 * for the common functions, 'private' gives the type of file
1681 static struct cftype cft_tasks = {
1683 .open = cpuset_tasks_open,
1684 .read = cpuset_tasks_read,
1685 .release = cpuset_tasks_release,
1686 .private = FILE_TASKLIST,
1689 static struct cftype cft_cpus = {
1691 .private = FILE_CPULIST,
1694 static struct cftype cft_mems = {
1696 .private = FILE_MEMLIST,
1699 static struct cftype cft_cpu_exclusive = {
1700 .name = "cpu_exclusive",
1701 .private = FILE_CPU_EXCLUSIVE,
1704 static struct cftype cft_mem_exclusive = {
1705 .name = "mem_exclusive",
1706 .private = FILE_MEM_EXCLUSIVE,
1709 static struct cftype cft_notify_on_release = {
1710 .name = "notify_on_release",
1711 .private = FILE_NOTIFY_ON_RELEASE,
1714 static struct cftype cft_memory_migrate = {
1715 .name = "memory_migrate",
1716 .private = FILE_MEMORY_MIGRATE,
1719 static struct cftype cft_memory_pressure_enabled = {
1720 .name = "memory_pressure_enabled",
1721 .private = FILE_MEMORY_PRESSURE_ENABLED,
1724 static struct cftype cft_memory_pressure = {
1725 .name = "memory_pressure",
1726 .private = FILE_MEMORY_PRESSURE,
1729 static int cpuset_populate_dir(struct dentry *cs_dentry)
1733 if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0)
1735 if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0)
1737 if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0)
1739 if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0)
1741 if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
1743 if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0)
1745 if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0)
1747 if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
1753 * cpuset_create - create a cpuset
1754 * parent: cpuset that will be parent of the new cpuset.
1755 * name: name of the new cpuset. Will be strcpy'ed.
1756 * mode: mode to set on new inode
1758 * Must be called with the semaphore on the parent inode held
1761 static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1766 cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1771 cpuset_update_task_memory_state();
1773 if (notify_on_release(parent))
1774 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
1775 cs->cpus_allowed = CPU_MASK_NONE;
1776 cs->mems_allowed = NODE_MASK_NONE;
1777 atomic_set(&cs->count, 0);
1778 INIT_LIST_HEAD(&cs->sibling);
1779 INIT_LIST_HEAD(&cs->children);
1780 atomic_inc(&cpuset_mems_generation);
1781 cs->mems_generation = atomic_read(&cpuset_mems_generation);
1782 fmeter_init(&cs->fmeter);
1784 cs->parent = parent;
1786 down(&callback_sem);
1787 list_add(&cs->sibling, &cs->parent->children);
1788 number_of_cpusets++;
1791 err = cpuset_create_dir(cs, name, mode);
1796 * Release manage_sem before cpuset_populate_dir() because it
1797 * will down() this new directory's i_mutex and if we race with
1798 * another mkdir, we might deadlock.
1802 err = cpuset_populate_dir(cs->dentry);
1803 /* If err < 0, we have a half-filled directory - oh well ;) */
1806 list_del(&cs->sibling);
1812 static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1814 struct cpuset *c_parent = dentry->d_parent->d_fsdata;
1816 /* the vfs holds inode->i_mutex already */
1817 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1820 static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1822 struct cpuset *cs = dentry->d_fsdata;
1824 struct cpuset *parent;
1825 char *pathbuf = NULL;
1827 /* the vfs holds both inode->i_mutex already */
1830 cpuset_update_task_memory_state();
1831 if (atomic_read(&cs->count) > 0) {
1835 if (!list_empty(&cs->children)) {
1839 parent = cs->parent;
1840 down(&callback_sem);
1841 set_bit(CS_REMOVED, &cs->flags);
1842 if (is_cpu_exclusive(cs))
1843 update_cpu_domains(cs);
1844 list_del(&cs->sibling); /* delete my sibling from parent->children */
1845 spin_lock(&cs->dentry->d_lock);
1846 d = dget(cs->dentry);
1848 spin_unlock(&d->d_lock);
1849 cpuset_d_remove_dir(d);
1851 number_of_cpusets--;
1853 if (list_empty(&parent->children))
1854 check_for_release(parent, &pathbuf);
1856 cpuset_release_agent(pathbuf);
1861 * cpuset_init_early - just enough so that the calls to
1862 * cpuset_update_task_memory_state() in early init code
1866 int __init cpuset_init_early(void)
1868 struct task_struct *tsk = current;
1870 tsk->cpuset = &top_cpuset;
1871 tsk->cpuset->mems_generation = atomic_read(&cpuset_mems_generation);
1876 * cpuset_init - initialize cpusets at system boot
1878 * Description: Initialize top_cpuset and the cpuset internal file system,
1881 int __init cpuset_init(void)
1883 struct dentry *root;
1886 top_cpuset.cpus_allowed = CPU_MASK_ALL;
1887 top_cpuset.mems_allowed = NODE_MASK_ALL;
1889 fmeter_init(&top_cpuset.fmeter);
1890 atomic_inc(&cpuset_mems_generation);
1891 top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation);
1893 init_task.cpuset = &top_cpuset;
1895 err = register_filesystem(&cpuset_fs_type);
1898 cpuset_mount = kern_mount(&cpuset_fs_type);
1899 if (IS_ERR(cpuset_mount)) {
1900 printk(KERN_ERR "cpuset: could not mount!\n");
1901 err = PTR_ERR(cpuset_mount);
1902 cpuset_mount = NULL;
1905 root = cpuset_mount->mnt_sb->s_root;
1906 root->d_fsdata = &top_cpuset;
1907 root->d_inode->i_nlink++;
1908 top_cpuset.dentry = root;
1909 root->d_inode->i_op = &cpuset_dir_inode_operations;
1910 number_of_cpusets = 1;
1911 err = cpuset_populate_dir(root);
1912 /* memory_pressure_enabled is in root cpuset only */
1914 err = cpuset_add_file(root, &cft_memory_pressure_enabled);
1920 * cpuset_init_smp - initialize cpus_allowed
1922 * Description: Finish top cpuset after cpu, node maps are initialized
1925 void __init cpuset_init_smp(void)
1927 top_cpuset.cpus_allowed = cpu_online_map;
1928 top_cpuset.mems_allowed = node_online_map;
1932 * cpuset_fork - attach newly forked task to its parents cpuset.
1933 * @tsk: pointer to task_struct of forking parent process.
1935 * Description: A task inherits its parent's cpuset at fork().
1937 * A pointer to the shared cpuset was automatically copied in fork.c
1938 * by dup_task_struct(). However, we ignore that copy, since it was
1939 * not made under the protection of task_lock(), so might no longer be
1940 * a valid cpuset pointer. attach_task() might have already changed
1941 * current->cpuset, allowing the previously referenced cpuset to
1942 * be removed and freed. Instead, we task_lock(current) and copy
1943 * its present value of current->cpuset for our freshly forked child.
1945 * At the point that cpuset_fork() is called, 'current' is the parent
1946 * task, and the passed argument 'child' points to the child task.
1949 void cpuset_fork(struct task_struct *child)
1952 child->cpuset = current->cpuset;
1953 atomic_inc(&child->cpuset->count);
1954 task_unlock(current);
1958 * cpuset_exit - detach cpuset from exiting task
1959 * @tsk: pointer to task_struct of exiting process
1961 * Description: Detach cpuset from @tsk and release it.
1963 * Note that cpusets marked notify_on_release force every task in
1964 * them to take the global manage_sem semaphore when exiting.
1965 * This could impact scaling on very large systems. Be reluctant to
1966 * use notify_on_release cpusets where very high task exit scaling
1967 * is required on large systems.
1969 * Don't even think about derefencing 'cs' after the cpuset use count
1970 * goes to zero, except inside a critical section guarded by manage_sem
1971 * or callback_sem. Otherwise a zero cpuset use count is a license to
1972 * any other task to nuke the cpuset immediately, via cpuset_rmdir().
1974 * This routine has to take manage_sem, not callback_sem, because
1975 * it is holding that semaphore while calling check_for_release(),
1976 * which calls kmalloc(), so can't be called holding callback__sem().
1978 * We don't need to task_lock() this reference to tsk->cpuset,
1979 * because tsk is already marked PF_EXITING, so attach_task() won't
1980 * mess with it, or task is a failed fork, never visible to attach_task.
1984 * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
1986 * Don't leave a task unable to allocate memory, as that is an
1987 * accident waiting to happen should someone add a callout in
1988 * do_exit() after the cpuset_exit() call that might allocate.
1989 * If a task tries to allocate memory with an invalid cpuset,
1990 * it will oops in cpuset_update_task_memory_state().
1992 * We call cpuset_exit() while the task is still competent to
1993 * handle notify_on_release(), then leave the task attached to
1994 * the root cpuset (top_cpuset) for the remainder of its exit.
1996 * To do this properly, we would increment the reference count on
1997 * top_cpuset, and near the very end of the kernel/exit.c do_exit()
1998 * code we would add a second cpuset function call, to drop that
1999 * reference. This would just create an unnecessary hot spot on
2000 * the top_cpuset reference count, to no avail.
2002 * Normally, holding a reference to a cpuset without bumping its
2003 * count is unsafe. The cpuset could go away, or someone could
2004 * attach us to a different cpuset, decrementing the count on
2005 * the first cpuset that we never incremented. But in this case,
2006 * top_cpuset isn't going away, and either task has PF_EXITING set,
2007 * which wards off any attach_task() attempts, or task is a failed
2008 * fork, never visible to attach_task.
2010 * Another way to do this would be to set the cpuset pointer
2011 * to NULL here, and check in cpuset_update_task_memory_state()
2012 * for a NULL pointer. This hack avoids that NULL check, for no
2013 * cost (other than this way too long comment ;).
2016 void cpuset_exit(struct task_struct *tsk)
2021 tsk->cpuset = &top_cpuset; /* Hack - see comment above */
2023 if (notify_on_release(cs)) {
2024 char *pathbuf = NULL;
2027 if (atomic_dec_and_test(&cs->count))
2028 check_for_release(cs, &pathbuf);
2030 cpuset_release_agent(pathbuf);
2032 atomic_dec(&cs->count);
2037 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2038 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2040 * Description: Returns the cpumask_t cpus_allowed of the cpuset
2041 * attached to the specified @tsk. Guaranteed to return some non-empty
2042 * subset of cpu_online_map, even if this means going outside the
2046 cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
2050 down(&callback_sem);
2052 guarantee_online_cpus(tsk->cpuset, &mask);
2059 void cpuset_init_current_mems_allowed(void)
2061 current->mems_allowed = NODE_MASK_ALL;
2065 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2066 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2068 * Description: Returns the nodemask_t mems_allowed of the cpuset
2069 * attached to the specified @tsk. Guaranteed to return some non-empty
2070 * subset of node_online_map, even if this means going outside the
2074 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2078 down(&callback_sem);
2080 guarantee_online_mems(tsk->cpuset, &mask);
2088 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
2089 * @zl: the zonelist to be checked
2091 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
2093 int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
2097 for (i = 0; zl->zones[i]; i++) {
2098 int nid = zl->zones[i]->zone_pgdat->node_id;
2100 if (node_isset(nid, current->mems_allowed))
2107 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
2108 * ancestor to the specified cpuset. Call holding callback_sem.
2109 * If no ancestor is mem_exclusive (an unusual configuration), then
2110 * returns the root cpuset.
2112 static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2114 while (!is_mem_exclusive(cs) && cs->parent)
2120 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node?
2121 * @z: is this zone on an allowed node?
2122 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL)
2124 * If we're in interrupt, yes, we can always allocate. If zone
2125 * z's node is in our tasks mems_allowed, yes. If it's not a
2126 * __GFP_HARDWALL request and this zone's nodes is in the nearest
2127 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
2130 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2131 * and do not allow allocations outside the current tasks cpuset.
2132 * GFP_KERNEL allocations are not so marked, so can escape to the
2133 * nearest mem_exclusive ancestor cpuset.
2135 * Scanning up parent cpusets requires callback_sem. The __alloc_pages()
2136 * routine only calls here with __GFP_HARDWALL bit _not_ set if
2137 * it's a GFP_KERNEL allocation, and all nodes in the current tasks
2138 * mems_allowed came up empty on the first pass over the zonelist.
2139 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
2140 * short of memory, might require taking the callback_sem semaphore.
2142 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
2143 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
2144 * hardwall cpusets - no allocation on a node outside the cpuset is
2145 * allowed (unless in interrupt, of course).
2147 * The second loop doesn't even call here for GFP_ATOMIC requests
2148 * (if the __alloc_pages() local variable 'wait' is set). That check
2149 * and the checks below have the combined affect in the second loop of
2150 * the __alloc_pages() routine that:
2151 * in_interrupt - any node ok (current task context irrelevant)
2152 * GFP_ATOMIC - any node ok
2153 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
2154 * GFP_USER - only nodes in current tasks mems allowed ok.
2157 int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
2159 int node; /* node that zone z is on */
2160 const struct cpuset *cs; /* current cpuset ancestors */
2161 int allowed = 1; /* is allocation in zone z allowed? */
2165 node = z->zone_pgdat->node_id;
2166 if (node_isset(node, current->mems_allowed))
2168 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
2171 if (current->flags & PF_EXITING) /* Let dying task have memory */
2174 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2175 down(&callback_sem);
2178 cs = nearest_exclusive_ancestor(current->cpuset);
2179 task_unlock(current);
2181 allowed = node_isset(node, cs->mems_allowed);
2187 * cpuset_lock - lock out any changes to cpuset structures
2189 * The out of memory (oom) code needs to lock down cpusets
2190 * from being changed while it scans the tasklist looking for a
2191 * task in an overlapping cpuset. Expose callback_sem via this
2192 * cpuset_lock() routine, so the oom code can lock it, before
2193 * locking the task list. The tasklist_lock is a spinlock, so
2194 * must be taken inside callback_sem.
2197 void cpuset_lock(void)
2199 down(&callback_sem);
2203 * cpuset_unlock - release lock on cpuset changes
2205 * Undo the lock taken in a previous cpuset_lock() call.
2208 void cpuset_unlock(void)
2214 * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
2215 * @p: pointer to task_struct of some other task.
2217 * Description: Return true if the nearest mem_exclusive ancestor
2218 * cpusets of tasks @p and current overlap. Used by oom killer to
2219 * determine if task @p's memory usage might impact the memory
2220 * available to the current task.
2222 * Call while holding callback_sem.
2225 int cpuset_excl_nodes_overlap(const struct task_struct *p)
2227 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
2228 int overlap = 0; /* do cpusets overlap? */
2231 if (current->flags & PF_EXITING) {
2232 task_unlock(current);
2235 cs1 = nearest_exclusive_ancestor(current->cpuset);
2236 task_unlock(current);
2238 task_lock((struct task_struct *)p);
2239 if (p->flags & PF_EXITING) {
2240 task_unlock((struct task_struct *)p);
2243 cs2 = nearest_exclusive_ancestor(p->cpuset);
2244 task_unlock((struct task_struct *)p);
2246 overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
2252 * Collection of memory_pressure is suppressed unless
2253 * this flag is enabled by writing "1" to the special
2254 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2257 int cpuset_memory_pressure_enabled __read_mostly;
2260 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2262 * Keep a running average of the rate of synchronous (direct)
2263 * page reclaim efforts initiated by tasks in each cpuset.
2265 * This represents the rate at which some task in the cpuset
2266 * ran low on memory on all nodes it was allowed to use, and
2267 * had to enter the kernels page reclaim code in an effort to
2268 * create more free memory by tossing clean pages or swapping
2269 * or writing dirty pages.
2271 * Display to user space in the per-cpuset read-only file
2272 * "memory_pressure". Value displayed is an integer
2273 * representing the recent rate of entry into the synchronous
2274 * (direct) page reclaim by any task attached to the cpuset.
2277 void __cpuset_memory_pressure_bump(void)
2282 cs = current->cpuset;
2283 fmeter_markevent(&cs->fmeter);
2284 task_unlock(current);
2288 * proc_cpuset_show()
2289 * - Print tasks cpuset path into seq_file.
2290 * - Used for /proc/<pid>/cpuset.
2291 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2292 * doesn't really matter if tsk->cpuset changes after we read it,
2293 * and we take manage_sem, keeping attach_task() from changing it
2297 static int proc_cpuset_show(struct seq_file *m, void *v)
2300 struct task_struct *tsk;
2304 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2316 retval = cpuset_path(cs, buf, PAGE_SIZE);
2327 static int cpuset_open(struct inode *inode, struct file *file)
2329 struct task_struct *tsk = PROC_I(inode)->task;
2330 return single_open(file, proc_cpuset_show, tsk);
2333 struct file_operations proc_cpuset_operations = {
2334 .open = cpuset_open,
2336 .llseek = seq_lseek,
2337 .release = single_release,
2340 /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
2341 char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
2343 buffer += sprintf(buffer, "Cpus_allowed:\t");
2344 buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed);
2345 buffer += sprintf(buffer, "\n");
2346 buffer += sprintf(buffer, "Mems_allowed:\t");
2347 buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed);
2348 buffer += sprintf(buffer, "\n");