xref: /openbmc/linux/mm/oom_kill.c (revision 545e4006)
1 /*
2  *  linux/mm/oom_kill.c
3  *
4  *  Copyright (C)  1998,2000  Rik van Riel
5  *	Thanks go out to Claus Fischer for some serious inspiration and
6  *	for goading me into coding this file...
7  *
8  *  The routines in this file are used to kill a process when
9  *  we're seriously out of memory. This gets called from __alloc_pages()
10  *  in mm/page_alloc.c when we really run out of memory.
11  *
12  *  Since we won't call these routines often (on a well-configured
13  *  machine) this file will double as a 'coding guide' and a signpost
14  *  for newbie kernel hackers. It features several pointers to major
15  *  kernel subsystems and hints as to where to find out what things do.
16  */
17 
18 #include <linux/oom.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/sched.h>
22 #include <linux/swap.h>
23 #include <linux/timex.h>
24 #include <linux/jiffies.h>
25 #include <linux/cpuset.h>
26 #include <linux/module.h>
27 #include <linux/notifier.h>
28 #include <linux/memcontrol.h>
29 
30 int sysctl_panic_on_oom;
31 int sysctl_oom_kill_allocating_task;
32 int sysctl_oom_dump_tasks;
33 static DEFINE_SPINLOCK(zone_scan_mutex);
34 /* #define DEBUG */
35 
36 /**
37  * badness - calculate a numeric value for how bad this task has been
38  * @p: task struct of which task we should calculate
39  * @uptime: current uptime in seconds
40  * @mem: target memory controller
41  *
42  * The formula used is relatively simple and documented inline in the
43  * function. The main rationale is that we want to select a good task
44  * to kill when we run out of memory.
45  *
46  * Good in this context means that:
47  * 1) we lose the minimum amount of work done
48  * 2) we recover a large amount of memory
49  * 3) we don't kill anything innocent of eating tons of memory
50  * 4) we want to kill the minimum amount of processes (one)
51  * 5) we try to kill the process the user expects us to kill, this
52  *    algorithm has been meticulously tuned to meet the principle
53  *    of least surprise ... (be careful when you change it)
54  */
55 
56 unsigned long badness(struct task_struct *p, unsigned long uptime)
57 {
58 	unsigned long points, cpu_time, run_time, s;
59 	struct mm_struct *mm;
60 	struct task_struct *child;
61 
62 	task_lock(p);
63 	mm = p->mm;
64 	if (!mm) {
65 		task_unlock(p);
66 		return 0;
67 	}
68 
69 	/*
70 	 * The memory size of the process is the basis for the badness.
71 	 */
72 	points = mm->total_vm;
73 
74 	/*
75 	 * After this unlock we can no longer dereference local variable `mm'
76 	 */
77 	task_unlock(p);
78 
79 	/*
80 	 * swapoff can easily use up all memory, so kill those first.
81 	 */
82 	if (p->flags & PF_SWAPOFF)
83 		return ULONG_MAX;
84 
85 	/*
86 	 * Processes which fork a lot of child processes are likely
87 	 * a good choice. We add half the vmsize of the children if they
88 	 * have an own mm. This prevents forking servers to flood the
89 	 * machine with an endless amount of children. In case a single
90 	 * child is eating the vast majority of memory, adding only half
91 	 * to the parents will make the child our kill candidate of choice.
92 	 */
93 	list_for_each_entry(child, &p->children, sibling) {
94 		task_lock(child);
95 		if (child->mm != mm && child->mm)
96 			points += child->mm->total_vm/2 + 1;
97 		task_unlock(child);
98 	}
99 
100 	/*
101 	 * CPU time is in tens of seconds and run time is in thousands
102          * of seconds. There is no particular reason for this other than
103          * that it turned out to work very well in practice.
104 	 */
105 	cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
106 		>> (SHIFT_HZ + 3);
107 
108 	if (uptime >= p->start_time.tv_sec)
109 		run_time = (uptime - p->start_time.tv_sec) >> 10;
110 	else
111 		run_time = 0;
112 
113 	s = int_sqrt(cpu_time);
114 	if (s)
115 		points /= s;
116 	s = int_sqrt(int_sqrt(run_time));
117 	if (s)
118 		points /= s;
119 
120 	/*
121 	 * Niced processes are most likely less important, so double
122 	 * their badness points.
123 	 */
124 	if (task_nice(p) > 0)
125 		points *= 2;
126 
127 	/*
128 	 * Superuser processes are usually more important, so we make it
129 	 * less likely that we kill those.
130 	 */
131 	if (__capable(p, CAP_SYS_ADMIN) || __capable(p, CAP_SYS_RESOURCE))
132 		points /= 4;
133 
134 	/*
135 	 * We don't want to kill a process with direct hardware access.
136 	 * Not only could that mess up the hardware, but usually users
137 	 * tend to only have this flag set on applications they think
138 	 * of as important.
139 	 */
140 	if (__capable(p, CAP_SYS_RAWIO))
141 		points /= 4;
142 
143 	/*
144 	 * If p's nodes don't overlap ours, it may still help to kill p
145 	 * because p may have allocated or otherwise mapped memory on
146 	 * this node before. However it will be less likely.
147 	 */
148 	if (!cpuset_mems_allowed_intersects(current, p))
149 		points /= 8;
150 
151 	/*
152 	 * Adjust the score by oomkilladj.
153 	 */
154 	if (p->oomkilladj) {
155 		if (p->oomkilladj > 0) {
156 			if (!points)
157 				points = 1;
158 			points <<= p->oomkilladj;
159 		} else
160 			points >>= -(p->oomkilladj);
161 	}
162 
163 #ifdef DEBUG
164 	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
165 	p->pid, p->comm, points);
166 #endif
167 	return points;
168 }
169 
170 /*
171  * Determine the type of allocation constraint.
172  */
173 static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
174 						    gfp_t gfp_mask)
175 {
176 #ifdef CONFIG_NUMA
177 	struct zone *zone;
178 	struct zoneref *z;
179 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
180 	nodemask_t nodes = node_states[N_HIGH_MEMORY];
181 
182 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
183 		if (cpuset_zone_allowed_softwall(zone, gfp_mask))
184 			node_clear(zone_to_nid(zone), nodes);
185 		else
186 			return CONSTRAINT_CPUSET;
187 
188 	if (!nodes_empty(nodes))
189 		return CONSTRAINT_MEMORY_POLICY;
190 #endif
191 
192 	return CONSTRAINT_NONE;
193 }
194 
195 /*
196  * Simple selection loop. We chose the process with the highest
197  * number of 'points'. We expect the caller will lock the tasklist.
198  *
199  * (not docbooked, we don't want this one cluttering up the manual)
200  */
201 static struct task_struct *select_bad_process(unsigned long *ppoints,
202 						struct mem_cgroup *mem)
203 {
204 	struct task_struct *g, *p;
205 	struct task_struct *chosen = NULL;
206 	struct timespec uptime;
207 	*ppoints = 0;
208 
209 	do_posix_clock_monotonic_gettime(&uptime);
210 	do_each_thread(g, p) {
211 		unsigned long points;
212 
213 		/*
214 		 * skip kernel threads and tasks which have already released
215 		 * their mm.
216 		 */
217 		if (!p->mm)
218 			continue;
219 		/* skip the init task */
220 		if (is_global_init(p))
221 			continue;
222 		if (mem && !task_in_mem_cgroup(p, mem))
223 			continue;
224 
225 		/*
226 		 * This task already has access to memory reserves and is
227 		 * being killed. Don't allow any other task access to the
228 		 * memory reserve.
229 		 *
230 		 * Note: this may have a chance of deadlock if it gets
231 		 * blocked waiting for another task which itself is waiting
232 		 * for memory. Is there a better alternative?
233 		 */
234 		if (test_tsk_thread_flag(p, TIF_MEMDIE))
235 			return ERR_PTR(-1UL);
236 
237 		/*
238 		 * This is in the process of releasing memory so wait for it
239 		 * to finish before killing some other task by mistake.
240 		 *
241 		 * However, if p is the current task, we allow the 'kill' to
242 		 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
243 		 * which will allow it to gain access to memory reserves in
244 		 * the process of exiting and releasing its resources.
245 		 * Otherwise we could get an easy OOM deadlock.
246 		 */
247 		if (p->flags & PF_EXITING) {
248 			if (p != current)
249 				return ERR_PTR(-1UL);
250 
251 			chosen = p;
252 			*ppoints = ULONG_MAX;
253 		}
254 
255 		if (p->oomkilladj == OOM_DISABLE)
256 			continue;
257 
258 		points = badness(p, uptime.tv_sec);
259 		if (points > *ppoints || !chosen) {
260 			chosen = p;
261 			*ppoints = points;
262 		}
263 	} while_each_thread(g, p);
264 
265 	return chosen;
266 }
267 
268 /**
269  * dump_tasks - dump current memory state of all system tasks
270  * @mem: target memory controller
271  *
272  * Dumps the current memory state of all system tasks, excluding kernel threads.
273  * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
274  * score, and name.
275  *
276  * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
277  * shown.
278  *
279  * Call with tasklist_lock read-locked.
280  */
281 static void dump_tasks(const struct mem_cgroup *mem)
282 {
283 	struct task_struct *g, *p;
284 
285 	printk(KERN_INFO "[ pid ]   uid  tgid total_vm      rss cpu oom_adj "
286 	       "name\n");
287 	do_each_thread(g, p) {
288 		/*
289 		 * total_vm and rss sizes do not exist for tasks with a
290 		 * detached mm so there's no need to report them.
291 		 */
292 		if (!p->mm)
293 			continue;
294 		if (mem && !task_in_mem_cgroup(p, mem))
295 			continue;
296 
297 		task_lock(p);
298 		printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d     %3d %s\n",
299 		       p->pid, p->uid, p->tgid, p->mm->total_vm,
300 		       get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj,
301 		       p->comm);
302 		task_unlock(p);
303 	} while_each_thread(g, p);
304 }
305 
306 /*
307  * Send SIGKILL to the selected  process irrespective of  CAP_SYS_RAW_IO
308  * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
309  * set.
310  */
311 static void __oom_kill_task(struct task_struct *p, int verbose)
312 {
313 	if (is_global_init(p)) {
314 		WARN_ON(1);
315 		printk(KERN_WARNING "tried to kill init!\n");
316 		return;
317 	}
318 
319 	if (!p->mm) {
320 		WARN_ON(1);
321 		printk(KERN_WARNING "tried to kill an mm-less task!\n");
322 		return;
323 	}
324 
325 	if (verbose)
326 		printk(KERN_ERR "Killed process %d (%s)\n",
327 				task_pid_nr(p), p->comm);
328 
329 	/*
330 	 * We give our sacrificial lamb high priority and access to
331 	 * all the memory it needs. That way it should be able to
332 	 * exit() and clear out its resources quickly...
333 	 */
334 	p->rt.time_slice = HZ;
335 	set_tsk_thread_flag(p, TIF_MEMDIE);
336 
337 	force_sig(SIGKILL, p);
338 }
339 
340 static int oom_kill_task(struct task_struct *p)
341 {
342 	struct mm_struct *mm;
343 	struct task_struct *g, *q;
344 
345 	mm = p->mm;
346 
347 	/* WARNING: mm may not be dereferenced since we did not obtain its
348 	 * value from get_task_mm(p).  This is OK since all we need to do is
349 	 * compare mm to q->mm below.
350 	 *
351 	 * Furthermore, even if mm contains a non-NULL value, p->mm may
352 	 * change to NULL at any time since we do not hold task_lock(p).
353 	 * However, this is of no concern to us.
354 	 */
355 
356 	if (mm == NULL)
357 		return 1;
358 
359 	/*
360 	 * Don't kill the process if any threads are set to OOM_DISABLE
361 	 */
362 	do_each_thread(g, q) {
363 		if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
364 			return 1;
365 	} while_each_thread(g, q);
366 
367 	__oom_kill_task(p, 1);
368 
369 	/*
370 	 * kill all processes that share the ->mm (i.e. all threads),
371 	 * but are in a different thread group. Don't let them have access
372 	 * to memory reserves though, otherwise we might deplete all memory.
373 	 */
374 	do_each_thread(g, q) {
375 		if (q->mm == mm && !same_thread_group(q, p))
376 			force_sig(SIGKILL, q);
377 	} while_each_thread(g, q);
378 
379 	return 0;
380 }
381 
382 static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
383 			    unsigned long points, struct mem_cgroup *mem,
384 			    const char *message)
385 {
386 	struct task_struct *c;
387 
388 	if (printk_ratelimit()) {
389 		printk(KERN_WARNING "%s invoked oom-killer: "
390 			"gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
391 			current->comm, gfp_mask, order, current->oomkilladj);
392 		dump_stack();
393 		show_mem();
394 		if (sysctl_oom_dump_tasks)
395 			dump_tasks(mem);
396 	}
397 
398 	/*
399 	 * If the task is already exiting, don't alarm the sysadmin or kill
400 	 * its children or threads, just set TIF_MEMDIE so it can die quickly
401 	 */
402 	if (p->flags & PF_EXITING) {
403 		__oom_kill_task(p, 0);
404 		return 0;
405 	}
406 
407 	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
408 					message, task_pid_nr(p), p->comm, points);
409 
410 	/* Try to kill a child first */
411 	list_for_each_entry(c, &p->children, sibling) {
412 		if (c->mm == p->mm)
413 			continue;
414 		if (!oom_kill_task(c))
415 			return 0;
416 	}
417 	return oom_kill_task(p);
418 }
419 
420 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
421 void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
422 {
423 	unsigned long points = 0;
424 	struct task_struct *p;
425 
426 	cgroup_lock();
427 	read_lock(&tasklist_lock);
428 retry:
429 	p = select_bad_process(&points, mem);
430 	if (PTR_ERR(p) == -1UL)
431 		goto out;
432 
433 	if (!p)
434 		p = current;
435 
436 	if (oom_kill_process(p, gfp_mask, 0, points, mem,
437 				"Memory cgroup out of memory"))
438 		goto retry;
439 out:
440 	read_unlock(&tasklist_lock);
441 	cgroup_unlock();
442 }
443 #endif
444 
445 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
446 
447 int register_oom_notifier(struct notifier_block *nb)
448 {
449 	return blocking_notifier_chain_register(&oom_notify_list, nb);
450 }
451 EXPORT_SYMBOL_GPL(register_oom_notifier);
452 
453 int unregister_oom_notifier(struct notifier_block *nb)
454 {
455 	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
456 }
457 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
458 
459 /*
460  * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
461  * if a parallel OOM killing is already taking place that includes a zone in
462  * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
463  */
464 int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
465 {
466 	struct zoneref *z;
467 	struct zone *zone;
468 	int ret = 1;
469 
470 	spin_lock(&zone_scan_mutex);
471 	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
472 		if (zone_is_oom_locked(zone)) {
473 			ret = 0;
474 			goto out;
475 		}
476 	}
477 
478 	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
479 		/*
480 		 * Lock each zone in the zonelist under zone_scan_mutex so a
481 		 * parallel invocation of try_set_zone_oom() doesn't succeed
482 		 * when it shouldn't.
483 		 */
484 		zone_set_flag(zone, ZONE_OOM_LOCKED);
485 	}
486 
487 out:
488 	spin_unlock(&zone_scan_mutex);
489 	return ret;
490 }
491 
492 /*
493  * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
494  * allocation attempts with zonelists containing them may now recall the OOM
495  * killer, if necessary.
496  */
497 void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
498 {
499 	struct zoneref *z;
500 	struct zone *zone;
501 
502 	spin_lock(&zone_scan_mutex);
503 	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
504 		zone_clear_flag(zone, ZONE_OOM_LOCKED);
505 	}
506 	spin_unlock(&zone_scan_mutex);
507 }
508 
509 /**
510  * out_of_memory - kill the "best" process when we run out of memory
511  * @zonelist: zonelist pointer
512  * @gfp_mask: memory allocation flags
513  * @order: amount of memory being requested as a power of 2
514  *
515  * If we run out of memory, we have the choice between either
516  * killing a random task (bad), letting the system crash (worse)
517  * OR try to be smart about which process to kill. Note that we
518  * don't have to be perfect here, we just have to be good.
519  */
520 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
521 {
522 	struct task_struct *p;
523 	unsigned long points = 0;
524 	unsigned long freed = 0;
525 	enum oom_constraint constraint;
526 
527 	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
528 	if (freed > 0)
529 		/* Got some memory back in the last second. */
530 		return;
531 
532 	if (sysctl_panic_on_oom == 2)
533 		panic("out of memory. Compulsory panic_on_oom is selected.\n");
534 
535 	/*
536 	 * Check if there were limitations on the allocation (only relevant for
537 	 * NUMA) that may require different handling.
538 	 */
539 	constraint = constrained_alloc(zonelist, gfp_mask);
540 	read_lock(&tasklist_lock);
541 
542 	switch (constraint) {
543 	case CONSTRAINT_MEMORY_POLICY:
544 		oom_kill_process(current, gfp_mask, order, points, NULL,
545 				"No available memory (MPOL_BIND)");
546 		break;
547 
548 	case CONSTRAINT_NONE:
549 		if (sysctl_panic_on_oom)
550 			panic("out of memory. panic_on_oom is selected\n");
551 		/* Fall-through */
552 	case CONSTRAINT_CPUSET:
553 		if (sysctl_oom_kill_allocating_task) {
554 			oom_kill_process(current, gfp_mask, order, points, NULL,
555 					"Out of memory (oom_kill_allocating_task)");
556 			break;
557 		}
558 retry:
559 		/*
560 		 * Rambo mode: Shoot down a process and hope it solves whatever
561 		 * issues we may have.
562 		 */
563 		p = select_bad_process(&points, NULL);
564 
565 		if (PTR_ERR(p) == -1UL)
566 			goto out;
567 
568 		/* Found nothing?!?! Either we hang forever, or we panic. */
569 		if (!p) {
570 			read_unlock(&tasklist_lock);
571 			panic("Out of memory and no killable processes...\n");
572 		}
573 
574 		if (oom_kill_process(p, gfp_mask, order, points, NULL,
575 				     "Out of memory"))
576 			goto retry;
577 
578 		break;
579 	}
580 
581 out:
582 	read_unlock(&tasklist_lock);
583 
584 	/*
585 	 * Give "p" a good chance of killing itself before we
586 	 * retry to allocate memory unless "p" is current
587 	 */
588 	if (!test_thread_flag(TIF_MEMDIE))
589 		schedule_timeout_uninterruptible(1);
590 }
591