4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
8 * The routines in this file are used to kill a process when
9 * we're seriously out of memory. This gets called from __alloc_pages()
10 * in mm/page_alloc.c when we really run out of memory.
12 * Since we won't call these routines often (on a well-configured
13 * machine) this file will double as a 'coding guide' and a signpost
14 * for newbie kernel hackers. It features several pointers to major
15 * kernel subsystems and hints as to where to find out what things do.
18 #include <linux/oom.h>
20 #include <linux/sched.h>
21 #include <linux/swap.h>
22 #include <linux/timex.h>
23 #include <linux/jiffies.h>
24 #include <linux/cpuset.h>
25 #include <linux/module.h>
26 #include <linux/notifier.h>
27 #include <linux/vs_memory.h>
29 int sysctl_panic_on_oom;
33 * badness - calculate a numeric value for how bad this task has been
34 * @p: task struct of which task we should calculate
35 * @uptime: current uptime in seconds
37 * The formula used is relatively simple and documented inline in the
38 * function. The main rationale is that we want to select a good task
39 * to kill when we run out of memory.
41 * Good in this context means that:
42 * 1) we lose the minimum amount of work done
43 * 2) we recover a large amount of memory
44 * 3) we don't kill anything innocent of eating tons of memory
45 * 4) we want to kill the minimum amount of processes (one)
46 * 5) we try to kill the process the user expects us to kill, this
47 * algorithm has been meticulously tuned to meet the principle
48 * of least surprise ... (be careful when you change it)
51 unsigned long badness(struct task_struct *p, unsigned long uptime)
53 unsigned long points, cpu_time, run_time, s;
55 struct task_struct *child;
65 * The memory size of the process is the basis for the badness.
67 points = mm->total_vm;
70 * add points for context badness
73 points += vx_badness(p, mm);
76 * After this unlock we can no longer dereference local variable `mm'
81 * swapoff can easily use up all memory, so kill those first.
83 if (p->flags & PF_SWAPOFF)
87 * Processes which fork a lot of child processes are likely
88 * a good choice. We add half the vmsize of the children if they
89 * have an own mm. This prevents forking servers to flood the
90 * machine with an endless amount of children. In case a single
91 * child is eating the vast majority of memory, adding only half
92 * to the parents will make the child our kill candidate of choice.
94 list_for_each_entry(child, &p->children, sibling) {
96 if (child->mm != mm && child->mm)
97 points += child->mm->total_vm/2 + 1;
102 * CPU time is in tens of seconds and run time is in thousands
103 * of seconds. There is no particular reason for this other than
104 * that it turned out to work very well in practice.
106 cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
109 if (uptime >= p->start_time.tv_sec)
110 run_time = (uptime - p->start_time.tv_sec) >> 10;
114 s = int_sqrt(cpu_time);
117 s = int_sqrt(int_sqrt(run_time));
122 * Niced processes are most likely less important, so double
123 * their badness points.
125 if (task_nice(p) > 0)
129 * Superuser processes are usually more important, so we make it
130 * less likely that we kill those.
132 if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) ||
133 p->uid == 0 || p->euid == 0)
137 * We don't want to kill a process with direct hardware access.
138 * Not only could that mess up the hardware, but usually users
139 * tend to only have this flag set on applications they think
142 if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO))
146 * If p's nodes don't overlap ours, it may still help to kill p
147 * because p may have allocated or otherwise mapped memory on
148 * this node before. However it will be less likely.
150 if (!cpuset_excl_nodes_overlap(p))
154 * Adjust the score by oomkilladj.
157 if (p->oomkilladj > 0)
158 points <<= p->oomkilladj;
160 points >>= -(p->oomkilladj);
164 printk(KERN_DEBUG "OOMkill: task %d:#%u (%s) got %d points\n",
165 p->pid, p->xid, p->comm, points);
170 #if defined(CONFIG_OOM_PANIC) && defined(CONFIG_OOM_KILLER)
171 #warning Only define OOM_PANIC or OOM_KILLER; not both
174 #ifdef CONFIG_OOM_KILLER
176 * Types of limitations to the nodes from which allocations may occur
178 #define CONSTRAINT_NONE 1
179 #define CONSTRAINT_MEMORY_POLICY 2
180 #define CONSTRAINT_CPUSET 3
183 * Determine the type of allocation constraint.
185 static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
193 /* node has memory ? */
194 for_each_online_node(node)
195 if (NODE_DATA(node)->node_present_pages)
196 node_set(node, nodes);
198 for (z = zonelist->zones; *z; z++)
199 if (cpuset_zone_allowed_softwall(*z, gfp_mask))
200 node_clear(zone_to_nid(*z), nodes);
202 return CONSTRAINT_CPUSET;
204 if (!nodes_empty(nodes))
205 return CONSTRAINT_MEMORY_POLICY;
208 return CONSTRAINT_NONE;
212 * Simple selection loop. We chose the process with the highest
213 * number of 'points'. We expect the caller will lock the tasklist.
215 * (not docbooked, we don't want this one cluttering up the manual)
217 static struct task_struct *select_bad_process(unsigned long *ppoints)
219 struct task_struct *g, *p;
220 struct task_struct *chosen = NULL;
221 struct timespec uptime;
224 do_posix_clock_monotonic_gettime(&uptime);
225 do_each_thread(g, p) {
226 unsigned long points;
229 * skip kernel threads and tasks which have already released
234 /* skip the init task */
239 * This task already has access to memory reserves and is
240 * being killed. Don't allow any other task access to the
243 * Note: this may have a chance of deadlock if it gets
244 * blocked waiting for another task which itself is waiting
245 * for memory. Is there a better alternative?
247 if (test_tsk_thread_flag(p, TIF_MEMDIE))
248 return ERR_PTR(-1UL);
251 * This is in the process of releasing memory so wait for it
252 * to finish before killing some other task by mistake.
254 * However, if p is the current task, we allow the 'kill' to
255 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
256 * which will allow it to gain access to memory reserves in
257 * the process of exiting and releasing its resources.
258 * Otherwise we could get an easy OOM deadlock.
260 if (p->flags & PF_EXITING) {
262 return ERR_PTR(-1UL);
265 *ppoints = ULONG_MAX;
268 if (p->oomkilladj == OOM_DISABLE)
271 points = badness(p, uptime.tv_sec);
272 if (points > *ppoints || !chosen) {
276 } while_each_thread(g, p);
282 * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
283 * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
286 static void __oom_kill_task(struct task_struct *p, int verbose)
290 printk(KERN_WARNING "tried to kill init!\n");
296 printk(KERN_WARNING "tried to kill an mm-less task!\n");
301 printk(KERN_ERR "Killed process %d:#%u (%s)\n",
302 p->pid, p->xid, p->comm);
305 * We give our sacrificial lamb high priority and access to
306 * all the memory it needs. That way it should be able to
307 * exit() and clear out its resources quickly...
310 set_tsk_thread_flag(p, TIF_MEMDIE);
312 force_sig(SIGKILL, p);
315 static int oom_kill_task(struct task_struct *p)
317 struct mm_struct *mm;
318 struct task_struct *g, *q;
322 /* WARNING: mm may not be dereferenced since we did not obtain its
323 * value from get_task_mm(p). This is OK since all we need to do is
324 * compare mm to q->mm below.
326 * Furthermore, even if mm contains a non-NULL value, p->mm may
327 * change to NULL at any time since we do not hold task_lock(p).
328 * However, this is of no concern to us.
335 * Don't kill the process if any threads are set to OOM_DISABLE
337 do_each_thread(g, q) {
338 if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
340 } while_each_thread(g, q);
342 __oom_kill_task(p, 1);
345 * kill all processes that share the ->mm (i.e. all threads),
346 * but are in a different thread group. Don't let them have access
347 * to memory reserves though, otherwise we might deplete all memory.
349 do_each_thread(g, q) {
350 if (q->mm == mm && q->tgid != p->tgid)
351 force_sig(SIGKILL, q);
352 } while_each_thread(g, q);
357 static int oom_kill_process(struct task_struct *p, unsigned long points,
360 struct task_struct *c;
361 struct list_head *tsk;
364 * If the task is already exiting, don't alarm the sysadmin or kill
365 * its children or threads, just set TIF_MEMDIE so it can die quickly
367 if (p->flags & PF_EXITING) {
368 __oom_kill_task(p, 0);
372 printk(KERN_ERR "%s: kill process %d:#%u (%s) score %li or a child\n",
373 message, p->pid, p->xid, p->comm, points);
375 /* Try to kill a child first */
376 list_for_each(tsk, &p->children) {
377 c = list_entry(tsk, struct task_struct, sibling);
380 if (!oom_kill_task(c))
383 return oom_kill_task(p);
386 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
388 int register_oom_notifier(struct notifier_block *nb)
390 return blocking_notifier_chain_register(&oom_notify_list, nb);
392 EXPORT_SYMBOL_GPL(register_oom_notifier);
394 int unregister_oom_notifier(struct notifier_block *nb)
396 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
398 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
401 * out_of_memory - kill the "best" process when we run out of memory
403 * If we run out of memory, we have the choice between either
404 * killing a random task (bad), letting the system crash (worse)
405 * OR try to be smart about which process to kill. Note that we
406 * don't have to be perfect here, we just have to be good.
408 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
410 struct task_struct *p;
411 unsigned long points = 0;
412 unsigned long freed = 0;
414 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
416 /* Got some memory back in the last second. */
419 if (printk_ratelimit()) {
420 printk(KERN_WARNING "%s invoked oom-killer: "
421 "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
422 current->comm, gfp_mask, order, current->oomkilladj);
428 read_lock(&tasklist_lock);
431 * Check if there were limitations on the allocation (only relevant for
432 * NUMA) that may require different handling.
434 switch (constrained_alloc(zonelist, gfp_mask)) {
435 case CONSTRAINT_MEMORY_POLICY:
436 oom_kill_process(current, points,
437 "No available memory (MPOL_BIND)");
440 case CONSTRAINT_CPUSET:
441 oom_kill_process(current, points,
442 "No available memory in cpuset");
445 case CONSTRAINT_NONE:
446 if (sysctl_panic_on_oom)
447 panic("out of memory. panic_on_oom is selected\n");
450 * Rambo mode: Shoot down a process and hope it solves whatever
451 * issues we may have.
453 p = select_bad_process(&points);
455 if (PTR_ERR(p) == -1UL)
458 /* Found nothing?!?! Either we hang forever, or we panic. */
460 read_unlock(&tasklist_lock);
462 panic("Out of memory and no killable processes...\n");
465 if (oom_kill_process(p, points, "Out of memory"))
472 read_unlock(&tasklist_lock);
476 * Give "p" a good chance of killing itself before we
477 * retry to allocate memory unless "p" is current
479 if (!test_thread_flag(TIF_MEMDIE))
480 schedule_timeout_uninterruptible(1);
482 #endif /* CONFIG_OOM_KILLER */
484 #ifdef CONFIG_OOM_PANIC
486 * out_of_memory - panic if the system out of memory?
488 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
491 * oom_lock protects out_of_memory()'s static variables.
492 * It's a global lock; this is not performance-critical.
494 static spinlock_t oom_lock = SPIN_LOCK_UNLOCKED;
495 static unsigned long count;
497 spin_lock(&oom_lock);
500 * If we have gotten only a few failures,
501 * we're not really oom.
505 * Ok, really out of memory. Panic.
508 printk("oom-killer: gfp_mask=0x%x\n", gfp_mask);
511 panic("Out Of Memory");
513 spin_unlock(&oom_lock);
515 #endif /* CONFIG_OOM_PANIC */