xref: /openbmc/linux/mm/oom_kill.c (revision e5c86679)
1 /*
2  *  linux/mm/oom_kill.c
3  *
4  *  Copyright (C)  1998,2000  Rik van Riel
5  *	Thanks go out to Claus Fischer for some serious inspiration and
6  *	for goading me into coding this file...
7  *  Copyright (C)  2010  Google, Inc.
8  *	Rewritten by David Rientjes
9  *
10  *  The routines in this file are used to kill a process when
11  *  we're seriously out of memory. This gets called from __alloc_pages()
12  *  in mm/page_alloc.c when we really run out of memory.
13  *
14  *  Since we won't call these routines often (on a well-configured
15  *  machine) this file will double as a 'coding guide' and a signpost
16  *  for newbie kernel hackers. It features several pointers to major
17  *  kernel subsystems and hints as to where to find out what things do.
18  */
19 
20 #include <linux/oom.h>
21 #include <linux/mm.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/coredump.h>
27 #include <linux/sched/task.h>
28 #include <linux/swap.h>
29 #include <linux/timex.h>
30 #include <linux/jiffies.h>
31 #include <linux/cpuset.h>
32 #include <linux/export.h>
33 #include <linux/notifier.h>
34 #include <linux/memcontrol.h>
35 #include <linux/mempolicy.h>
36 #include <linux/security.h>
37 #include <linux/ptrace.h>
38 #include <linux/freezer.h>
39 #include <linux/ftrace.h>
40 #include <linux/ratelimit.h>
41 #include <linux/kthread.h>
42 #include <linux/init.h>
43 
44 #include <asm/tlb.h>
45 #include "internal.h"
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/oom.h>
49 
50 int sysctl_panic_on_oom;
51 int sysctl_oom_kill_allocating_task;
52 int sysctl_oom_dump_tasks = 1;
53 
54 DEFINE_MUTEX(oom_lock);
55 
56 #ifdef CONFIG_NUMA
57 /**
58  * has_intersects_mems_allowed() - check task eligiblity for kill
59  * @start: task struct of which task to consider
60  * @mask: nodemask passed to page allocator for mempolicy ooms
61  *
62  * Task eligibility is determined by whether or not a candidate task, @tsk,
63  * shares the same mempolicy nodes as current if it is bound by such a policy
64  * and whether or not it has the same set of allowed cpuset nodes.
65  */
66 static bool has_intersects_mems_allowed(struct task_struct *start,
67 					const nodemask_t *mask)
68 {
69 	struct task_struct *tsk;
70 	bool ret = false;
71 
72 	rcu_read_lock();
73 	for_each_thread(start, tsk) {
74 		if (mask) {
75 			/*
76 			 * If this is a mempolicy constrained oom, tsk's
77 			 * cpuset is irrelevant.  Only return true if its
78 			 * mempolicy intersects current, otherwise it may be
79 			 * needlessly killed.
80 			 */
81 			ret = mempolicy_nodemask_intersects(tsk, mask);
82 		} else {
83 			/*
84 			 * This is not a mempolicy constrained oom, so only
85 			 * check the mems of tsk's cpuset.
86 			 */
87 			ret = cpuset_mems_allowed_intersects(current, tsk);
88 		}
89 		if (ret)
90 			break;
91 	}
92 	rcu_read_unlock();
93 
94 	return ret;
95 }
96 #else
97 static bool has_intersects_mems_allowed(struct task_struct *tsk,
98 					const nodemask_t *mask)
99 {
100 	return true;
101 }
102 #endif /* CONFIG_NUMA */
103 
104 /*
105  * The process p may have detached its own ->mm while exiting or through
106  * use_mm(), but one or more of its subthreads may still have a valid
107  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
108  * task_lock() held.
109  */
110 struct task_struct *find_lock_task_mm(struct task_struct *p)
111 {
112 	struct task_struct *t;
113 
114 	rcu_read_lock();
115 
116 	for_each_thread(p, t) {
117 		task_lock(t);
118 		if (likely(t->mm))
119 			goto found;
120 		task_unlock(t);
121 	}
122 	t = NULL;
123 found:
124 	rcu_read_unlock();
125 
126 	return t;
127 }
128 
129 /*
130  * order == -1 means the oom kill is required by sysrq, otherwise only
131  * for display purposes.
132  */
133 static inline bool is_sysrq_oom(struct oom_control *oc)
134 {
135 	return oc->order == -1;
136 }
137 
138 static inline bool is_memcg_oom(struct oom_control *oc)
139 {
140 	return oc->memcg != NULL;
141 }
142 
143 /* return true if the task is not adequate as candidate victim task. */
144 static bool oom_unkillable_task(struct task_struct *p,
145 		struct mem_cgroup *memcg, const nodemask_t *nodemask)
146 {
147 	if (is_global_init(p))
148 		return true;
149 	if (p->flags & PF_KTHREAD)
150 		return true;
151 
152 	/* When mem_cgroup_out_of_memory() and p is not member of the group */
153 	if (memcg && !task_in_mem_cgroup(p, memcg))
154 		return true;
155 
156 	/* p may not have freeable memory in nodemask */
157 	if (!has_intersects_mems_allowed(p, nodemask))
158 		return true;
159 
160 	return false;
161 }
162 
163 /**
164  * oom_badness - heuristic function to determine which candidate task to kill
165  * @p: task struct of which task we should calculate
166  * @totalpages: total present RAM allowed for page allocation
167  *
168  * The heuristic for determining which task to kill is made to be as simple and
169  * predictable as possible.  The goal is to return the highest value for the
170  * task consuming the most memory to avoid subsequent oom failures.
171  */
172 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
173 			  const nodemask_t *nodemask, unsigned long totalpages)
174 {
175 	long points;
176 	long adj;
177 
178 	if (oom_unkillable_task(p, memcg, nodemask))
179 		return 0;
180 
181 	p = find_lock_task_mm(p);
182 	if (!p)
183 		return 0;
184 
185 	/*
186 	 * Do not even consider tasks which are explicitly marked oom
187 	 * unkillable or have been already oom reaped or the are in
188 	 * the middle of vfork
189 	 */
190 	adj = (long)p->signal->oom_score_adj;
191 	if (adj == OOM_SCORE_ADJ_MIN ||
192 			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
193 			in_vfork(p)) {
194 		task_unlock(p);
195 		return 0;
196 	}
197 
198 	/*
199 	 * The baseline for the badness score is the proportion of RAM that each
200 	 * task's rss, pagetable and swap space use.
201 	 */
202 	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
203 		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
204 	task_unlock(p);
205 
206 	/*
207 	 * Root processes get 3% bonus, just like the __vm_enough_memory()
208 	 * implementation used by LSMs.
209 	 */
210 	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
211 		points -= (points * 3) / 100;
212 
213 	/* Normalize to oom_score_adj units */
214 	adj *= totalpages / 1000;
215 	points += adj;
216 
217 	/*
218 	 * Never return 0 for an eligible task regardless of the root bonus and
219 	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
220 	 */
221 	return points > 0 ? points : 1;
222 }
223 
224 enum oom_constraint {
225 	CONSTRAINT_NONE,
226 	CONSTRAINT_CPUSET,
227 	CONSTRAINT_MEMORY_POLICY,
228 	CONSTRAINT_MEMCG,
229 };
230 
231 /*
232  * Determine the type of allocation constraint.
233  */
234 static enum oom_constraint constrained_alloc(struct oom_control *oc)
235 {
236 	struct zone *zone;
237 	struct zoneref *z;
238 	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
239 	bool cpuset_limited = false;
240 	int nid;
241 
242 	if (is_memcg_oom(oc)) {
243 		oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
244 		return CONSTRAINT_MEMCG;
245 	}
246 
247 	/* Default to all available memory */
248 	oc->totalpages = totalram_pages + total_swap_pages;
249 
250 	if (!IS_ENABLED(CONFIG_NUMA))
251 		return CONSTRAINT_NONE;
252 
253 	if (!oc->zonelist)
254 		return CONSTRAINT_NONE;
255 	/*
256 	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
257 	 * to kill current.We have to random task kill in this case.
258 	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
259 	 */
260 	if (oc->gfp_mask & __GFP_THISNODE)
261 		return CONSTRAINT_NONE;
262 
263 	/*
264 	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
265 	 * the page allocator means a mempolicy is in effect.  Cpuset policy
266 	 * is enforced in get_page_from_freelist().
267 	 */
268 	if (oc->nodemask &&
269 	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
270 		oc->totalpages = total_swap_pages;
271 		for_each_node_mask(nid, *oc->nodemask)
272 			oc->totalpages += node_spanned_pages(nid);
273 		return CONSTRAINT_MEMORY_POLICY;
274 	}
275 
276 	/* Check this allocation failure is caused by cpuset's wall function */
277 	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
278 			high_zoneidx, oc->nodemask)
279 		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
280 			cpuset_limited = true;
281 
282 	if (cpuset_limited) {
283 		oc->totalpages = total_swap_pages;
284 		for_each_node_mask(nid, cpuset_current_mems_allowed)
285 			oc->totalpages += node_spanned_pages(nid);
286 		return CONSTRAINT_CPUSET;
287 	}
288 	return CONSTRAINT_NONE;
289 }
290 
291 static int oom_evaluate_task(struct task_struct *task, void *arg)
292 {
293 	struct oom_control *oc = arg;
294 	unsigned long points;
295 
296 	if (oom_unkillable_task(task, NULL, oc->nodemask))
297 		goto next;
298 
299 	/*
300 	 * This task already has access to memory reserves and is being killed.
301 	 * Don't allow any other task to have access to the reserves unless
302 	 * the task has MMF_OOM_SKIP because chances that it would release
303 	 * any memory is quite low.
304 	 */
305 	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
306 		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
307 			goto next;
308 		goto abort;
309 	}
310 
311 	/*
312 	 * If task is allocating a lot of memory and has been marked to be
313 	 * killed first if it triggers an oom, then select it.
314 	 */
315 	if (oom_task_origin(task)) {
316 		points = ULONG_MAX;
317 		goto select;
318 	}
319 
320 	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
321 	if (!points || points < oc->chosen_points)
322 		goto next;
323 
324 	/* Prefer thread group leaders for display purposes */
325 	if (points == oc->chosen_points && thread_group_leader(oc->chosen))
326 		goto next;
327 select:
328 	if (oc->chosen)
329 		put_task_struct(oc->chosen);
330 	get_task_struct(task);
331 	oc->chosen = task;
332 	oc->chosen_points = points;
333 next:
334 	return 0;
335 abort:
336 	if (oc->chosen)
337 		put_task_struct(oc->chosen);
338 	oc->chosen = (void *)-1UL;
339 	return 1;
340 }
341 
342 /*
343  * Simple selection loop. We choose the process with the highest number of
344  * 'points'. In case scan was aborted, oc->chosen is set to -1.
345  */
346 static void select_bad_process(struct oom_control *oc)
347 {
348 	if (is_memcg_oom(oc))
349 		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
350 	else {
351 		struct task_struct *p;
352 
353 		rcu_read_lock();
354 		for_each_process(p)
355 			if (oom_evaluate_task(p, oc))
356 				break;
357 		rcu_read_unlock();
358 	}
359 
360 	oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
361 }
362 
363 /**
364  * dump_tasks - dump current memory state of all system tasks
365  * @memcg: current's memory controller, if constrained
366  * @nodemask: nodemask passed to page allocator for mempolicy ooms
367  *
368  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
369  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
370  * are not shown.
371  * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
372  * swapents, oom_score_adj value, and name.
373  */
374 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
375 {
376 	struct task_struct *p;
377 	struct task_struct *task;
378 
379 	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name\n");
380 	rcu_read_lock();
381 	for_each_process(p) {
382 		if (oom_unkillable_task(p, memcg, nodemask))
383 			continue;
384 
385 		task = find_lock_task_mm(p);
386 		if (!task) {
387 			/*
388 			 * This is a kthread or all of p's threads have already
389 			 * detached their mm's.  There's no need to report
390 			 * them; they can't be oom killed anyway.
391 			 */
392 			continue;
393 		}
394 
395 		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu         %5hd %s\n",
396 			task->pid, from_kuid(&init_user_ns, task_uid(task)),
397 			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
398 			atomic_long_read(&task->mm->nr_ptes),
399 			mm_nr_pmds(task->mm),
400 			get_mm_counter(task->mm, MM_SWAPENTS),
401 			task->signal->oom_score_adj, task->comm);
402 		task_unlock(task);
403 	}
404 	rcu_read_unlock();
405 }
406 
407 static void dump_header(struct oom_control *oc, struct task_struct *p)
408 {
409 	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=",
410 		current->comm, oc->gfp_mask, &oc->gfp_mask);
411 	if (oc->nodemask)
412 		pr_cont("%*pbl", nodemask_pr_args(oc->nodemask));
413 	else
414 		pr_cont("(null)");
415 	pr_cont(",  order=%d, oom_score_adj=%hd\n",
416 		oc->order, current->signal->oom_score_adj);
417 	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
418 		pr_warn("COMPACTION is disabled!!!\n");
419 
420 	cpuset_print_current_mems_allowed();
421 	dump_stack();
422 	if (oc->memcg)
423 		mem_cgroup_print_oom_info(oc->memcg, p);
424 	else
425 		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
426 	if (sysctl_oom_dump_tasks)
427 		dump_tasks(oc->memcg, oc->nodemask);
428 }
429 
430 /*
431  * Number of OOM victims in flight
432  */
433 static atomic_t oom_victims = ATOMIC_INIT(0);
434 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
435 
436 static bool oom_killer_disabled __read_mostly;
437 
438 #define K(x) ((x) << (PAGE_SHIFT-10))
439 
440 /*
441  * task->mm can be NULL if the task is the exited group leader.  So to
442  * determine whether the task is using a particular mm, we examine all the
443  * task's threads: if one of those is using this mm then this task was also
444  * using it.
445  */
446 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
447 {
448 	struct task_struct *t;
449 
450 	for_each_thread(p, t) {
451 		struct mm_struct *t_mm = READ_ONCE(t->mm);
452 		if (t_mm)
453 			return t_mm == mm;
454 	}
455 	return false;
456 }
457 
458 
459 #ifdef CONFIG_MMU
460 /*
461  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
462  * victim (if that is possible) to help the OOM killer to move on.
463  */
464 static struct task_struct *oom_reaper_th;
465 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
466 static struct task_struct *oom_reaper_list;
467 static DEFINE_SPINLOCK(oom_reaper_lock);
468 
469 static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
470 {
471 	struct mmu_gather tlb;
472 	struct vm_area_struct *vma;
473 	bool ret = true;
474 
475 	/*
476 	 * We have to make sure to not race with the victim exit path
477 	 * and cause premature new oom victim selection:
478 	 * __oom_reap_task_mm		exit_mm
479 	 *   mmget_not_zero
480 	 *				  mmput
481 	 *				    atomic_dec_and_test
482 	 *				  exit_oom_victim
483 	 *				[...]
484 	 *				out_of_memory
485 	 *				  select_bad_process
486 	 *				    # no TIF_MEMDIE task selects new victim
487 	 *  unmap_page_range # frees some memory
488 	 */
489 	mutex_lock(&oom_lock);
490 
491 	if (!down_read_trylock(&mm->mmap_sem)) {
492 		ret = false;
493 		goto unlock_oom;
494 	}
495 
496 	/*
497 	 * increase mm_users only after we know we will reap something so
498 	 * that the mmput_async is called only when we have reaped something
499 	 * and delayed __mmput doesn't matter that much
500 	 */
501 	if (!mmget_not_zero(mm)) {
502 		up_read(&mm->mmap_sem);
503 		goto unlock_oom;
504 	}
505 
506 	/*
507 	 * Tell all users of get_user/copy_from_user etc... that the content
508 	 * is no longer stable. No barriers really needed because unmapping
509 	 * should imply barriers already and the reader would hit a page fault
510 	 * if it stumbled over a reaped memory.
511 	 */
512 	set_bit(MMF_UNSTABLE, &mm->flags);
513 
514 	tlb_gather_mmu(&tlb, mm, 0, -1);
515 	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
516 		if (!can_madv_dontneed_vma(vma))
517 			continue;
518 
519 		/*
520 		 * Only anonymous pages have a good chance to be dropped
521 		 * without additional steps which we cannot afford as we
522 		 * are OOM already.
523 		 *
524 		 * We do not even care about fs backed pages because all
525 		 * which are reclaimable have already been reclaimed and
526 		 * we do not want to block exit_mmap by keeping mm ref
527 		 * count elevated without a good reason.
528 		 */
529 		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
530 			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
531 					 NULL);
532 	}
533 	tlb_finish_mmu(&tlb, 0, -1);
534 	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
535 			task_pid_nr(tsk), tsk->comm,
536 			K(get_mm_counter(mm, MM_ANONPAGES)),
537 			K(get_mm_counter(mm, MM_FILEPAGES)),
538 			K(get_mm_counter(mm, MM_SHMEMPAGES)));
539 	up_read(&mm->mmap_sem);
540 
541 	/*
542 	 * Drop our reference but make sure the mmput slow path is called from a
543 	 * different context because we shouldn't risk we get stuck there and
544 	 * put the oom_reaper out of the way.
545 	 */
546 	mmput_async(mm);
547 unlock_oom:
548 	mutex_unlock(&oom_lock);
549 	return ret;
550 }
551 
552 #define MAX_OOM_REAP_RETRIES 10
553 static void oom_reap_task(struct task_struct *tsk)
554 {
555 	int attempts = 0;
556 	struct mm_struct *mm = tsk->signal->oom_mm;
557 
558 	/* Retry the down_read_trylock(mmap_sem) a few times */
559 	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
560 		schedule_timeout_idle(HZ/10);
561 
562 	if (attempts <= MAX_OOM_REAP_RETRIES)
563 		goto done;
564 
565 
566 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
567 		task_pid_nr(tsk), tsk->comm);
568 	debug_show_all_locks();
569 
570 done:
571 	tsk->oom_reaper_list = NULL;
572 
573 	/*
574 	 * Hide this mm from OOM killer because it has been either reaped or
575 	 * somebody can't call up_write(mmap_sem).
576 	 */
577 	set_bit(MMF_OOM_SKIP, &mm->flags);
578 
579 	/* Drop a reference taken by wake_oom_reaper */
580 	put_task_struct(tsk);
581 }
582 
583 static int oom_reaper(void *unused)
584 {
585 	while (true) {
586 		struct task_struct *tsk = NULL;
587 
588 		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
589 		spin_lock(&oom_reaper_lock);
590 		if (oom_reaper_list != NULL) {
591 			tsk = oom_reaper_list;
592 			oom_reaper_list = tsk->oom_reaper_list;
593 		}
594 		spin_unlock(&oom_reaper_lock);
595 
596 		if (tsk)
597 			oom_reap_task(tsk);
598 	}
599 
600 	return 0;
601 }
602 
603 static void wake_oom_reaper(struct task_struct *tsk)
604 {
605 	if (!oom_reaper_th)
606 		return;
607 
608 	/* tsk is already queued? */
609 	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
610 		return;
611 
612 	get_task_struct(tsk);
613 
614 	spin_lock(&oom_reaper_lock);
615 	tsk->oom_reaper_list = oom_reaper_list;
616 	oom_reaper_list = tsk;
617 	spin_unlock(&oom_reaper_lock);
618 	wake_up(&oom_reaper_wait);
619 }
620 
621 static int __init oom_init(void)
622 {
623 	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
624 	if (IS_ERR(oom_reaper_th)) {
625 		pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
626 				PTR_ERR(oom_reaper_th));
627 		oom_reaper_th = NULL;
628 	}
629 	return 0;
630 }
631 subsys_initcall(oom_init)
632 #else
633 static inline void wake_oom_reaper(struct task_struct *tsk)
634 {
635 }
636 #endif /* CONFIG_MMU */
637 
638 /**
639  * mark_oom_victim - mark the given task as OOM victim
640  * @tsk: task to mark
641  *
642  * Has to be called with oom_lock held and never after
643  * oom has been disabled already.
644  *
645  * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
646  * under task_lock or operate on the current).
647  */
648 static void mark_oom_victim(struct task_struct *tsk)
649 {
650 	struct mm_struct *mm = tsk->mm;
651 
652 	WARN_ON(oom_killer_disabled);
653 	/* OOM killer might race with memcg OOM */
654 	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
655 		return;
656 
657 	/* oom_mm is bound to the signal struct life time. */
658 	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
659 		mmgrab(tsk->signal->oom_mm);
660 
661 	/*
662 	 * Make sure that the task is woken up from uninterruptible sleep
663 	 * if it is frozen because OOM killer wouldn't be able to free
664 	 * any memory and livelock. freezing_slow_path will tell the freezer
665 	 * that TIF_MEMDIE tasks should be ignored.
666 	 */
667 	__thaw_task(tsk);
668 	atomic_inc(&oom_victims);
669 }
670 
671 /**
672  * exit_oom_victim - note the exit of an OOM victim
673  */
674 void exit_oom_victim(void)
675 {
676 	clear_thread_flag(TIF_MEMDIE);
677 
678 	if (!atomic_dec_return(&oom_victims))
679 		wake_up_all(&oom_victims_wait);
680 }
681 
682 /**
683  * oom_killer_enable - enable OOM killer
684  */
685 void oom_killer_enable(void)
686 {
687 	oom_killer_disabled = false;
688 }
689 
690 /**
691  * oom_killer_disable - disable OOM killer
692  * @timeout: maximum timeout to wait for oom victims in jiffies
693  *
694  * Forces all page allocations to fail rather than trigger OOM killer.
695  * Will block and wait until all OOM victims are killed or the given
696  * timeout expires.
697  *
698  * The function cannot be called when there are runnable user tasks because
699  * the userspace would see unexpected allocation failures as a result. Any
700  * new usage of this function should be consulted with MM people.
701  *
702  * Returns true if successful and false if the OOM killer cannot be
703  * disabled.
704  */
705 bool oom_killer_disable(signed long timeout)
706 {
707 	signed long ret;
708 
709 	/*
710 	 * Make sure to not race with an ongoing OOM killer. Check that the
711 	 * current is not killed (possibly due to sharing the victim's memory).
712 	 */
713 	if (mutex_lock_killable(&oom_lock))
714 		return false;
715 	oom_killer_disabled = true;
716 	mutex_unlock(&oom_lock);
717 
718 	ret = wait_event_interruptible_timeout(oom_victims_wait,
719 			!atomic_read(&oom_victims), timeout);
720 	if (ret <= 0) {
721 		oom_killer_enable();
722 		return false;
723 	}
724 
725 	return true;
726 }
727 
728 static inline bool __task_will_free_mem(struct task_struct *task)
729 {
730 	struct signal_struct *sig = task->signal;
731 
732 	/*
733 	 * A coredumping process may sleep for an extended period in exit_mm(),
734 	 * so the oom killer cannot assume that the process will promptly exit
735 	 * and release memory.
736 	 */
737 	if (sig->flags & SIGNAL_GROUP_COREDUMP)
738 		return false;
739 
740 	if (sig->flags & SIGNAL_GROUP_EXIT)
741 		return true;
742 
743 	if (thread_group_empty(task) && (task->flags & PF_EXITING))
744 		return true;
745 
746 	return false;
747 }
748 
749 /*
750  * Checks whether the given task is dying or exiting and likely to
751  * release its address space. This means that all threads and processes
752  * sharing the same mm have to be killed or exiting.
753  * Caller has to make sure that task->mm is stable (hold task_lock or
754  * it operates on the current).
755  */
756 static bool task_will_free_mem(struct task_struct *task)
757 {
758 	struct mm_struct *mm = task->mm;
759 	struct task_struct *p;
760 	bool ret = true;
761 
762 	/*
763 	 * Skip tasks without mm because it might have passed its exit_mm and
764 	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
765 	 * on that for now. We can consider find_lock_task_mm in future.
766 	 */
767 	if (!mm)
768 		return false;
769 
770 	if (!__task_will_free_mem(task))
771 		return false;
772 
773 	/*
774 	 * This task has already been drained by the oom reaper so there are
775 	 * only small chances it will free some more
776 	 */
777 	if (test_bit(MMF_OOM_SKIP, &mm->flags))
778 		return false;
779 
780 	if (atomic_read(&mm->mm_users) <= 1)
781 		return true;
782 
783 	/*
784 	 * Make sure that all tasks which share the mm with the given tasks
785 	 * are dying as well to make sure that a) nobody pins its mm and
786 	 * b) the task is also reapable by the oom reaper.
787 	 */
788 	rcu_read_lock();
789 	for_each_process(p) {
790 		if (!process_shares_mm(p, mm))
791 			continue;
792 		if (same_thread_group(task, p))
793 			continue;
794 		ret = __task_will_free_mem(p);
795 		if (!ret)
796 			break;
797 	}
798 	rcu_read_unlock();
799 
800 	return ret;
801 }
802 
803 static void oom_kill_process(struct oom_control *oc, const char *message)
804 {
805 	struct task_struct *p = oc->chosen;
806 	unsigned int points = oc->chosen_points;
807 	struct task_struct *victim = p;
808 	struct task_struct *child;
809 	struct task_struct *t;
810 	struct mm_struct *mm;
811 	unsigned int victim_points = 0;
812 	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
813 					      DEFAULT_RATELIMIT_BURST);
814 	bool can_oom_reap = true;
815 
816 	/*
817 	 * If the task is already exiting, don't alarm the sysadmin or kill
818 	 * its children or threads, just set TIF_MEMDIE so it can die quickly
819 	 */
820 	task_lock(p);
821 	if (task_will_free_mem(p)) {
822 		mark_oom_victim(p);
823 		wake_oom_reaper(p);
824 		task_unlock(p);
825 		put_task_struct(p);
826 		return;
827 	}
828 	task_unlock(p);
829 
830 	if (__ratelimit(&oom_rs))
831 		dump_header(oc, p);
832 
833 	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
834 		message, task_pid_nr(p), p->comm, points);
835 
836 	/*
837 	 * If any of p's children has a different mm and is eligible for kill,
838 	 * the one with the highest oom_badness() score is sacrificed for its
839 	 * parent.  This attempts to lose the minimal amount of work done while
840 	 * still freeing memory.
841 	 */
842 	read_lock(&tasklist_lock);
843 	for_each_thread(p, t) {
844 		list_for_each_entry(child, &t->children, sibling) {
845 			unsigned int child_points;
846 
847 			if (process_shares_mm(child, p->mm))
848 				continue;
849 			/*
850 			 * oom_badness() returns 0 if the thread is unkillable
851 			 */
852 			child_points = oom_badness(child,
853 				oc->memcg, oc->nodemask, oc->totalpages);
854 			if (child_points > victim_points) {
855 				put_task_struct(victim);
856 				victim = child;
857 				victim_points = child_points;
858 				get_task_struct(victim);
859 			}
860 		}
861 	}
862 	read_unlock(&tasklist_lock);
863 
864 	p = find_lock_task_mm(victim);
865 	if (!p) {
866 		put_task_struct(victim);
867 		return;
868 	} else if (victim != p) {
869 		get_task_struct(p);
870 		put_task_struct(victim);
871 		victim = p;
872 	}
873 
874 	/* Get a reference to safely compare mm after task_unlock(victim) */
875 	mm = victim->mm;
876 	mmgrab(mm);
877 	/*
878 	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
879 	 * the OOM victim from depleting the memory reserves from the user
880 	 * space under its control.
881 	 */
882 	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
883 	mark_oom_victim(victim);
884 	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
885 		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
886 		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
887 		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
888 		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
889 	task_unlock(victim);
890 
891 	/*
892 	 * Kill all user processes sharing victim->mm in other thread groups, if
893 	 * any.  They don't get access to memory reserves, though, to avoid
894 	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
895 	 * oom killed thread cannot exit because it requires the semaphore and
896 	 * its contended by another thread trying to allocate memory itself.
897 	 * That thread will now get access to memory reserves since it has a
898 	 * pending fatal signal.
899 	 */
900 	rcu_read_lock();
901 	for_each_process(p) {
902 		if (!process_shares_mm(p, mm))
903 			continue;
904 		if (same_thread_group(p, victim))
905 			continue;
906 		if (is_global_init(p)) {
907 			can_oom_reap = false;
908 			set_bit(MMF_OOM_SKIP, &mm->flags);
909 			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
910 					task_pid_nr(victim), victim->comm,
911 					task_pid_nr(p), p->comm);
912 			continue;
913 		}
914 		/*
915 		 * No use_mm() user needs to read from the userspace so we are
916 		 * ok to reap it.
917 		 */
918 		if (unlikely(p->flags & PF_KTHREAD))
919 			continue;
920 		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
921 	}
922 	rcu_read_unlock();
923 
924 	if (can_oom_reap)
925 		wake_oom_reaper(victim);
926 
927 	mmdrop(mm);
928 	put_task_struct(victim);
929 }
930 #undef K
931 
932 /*
933  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
934  */
935 static void check_panic_on_oom(struct oom_control *oc,
936 			       enum oom_constraint constraint)
937 {
938 	if (likely(!sysctl_panic_on_oom))
939 		return;
940 	if (sysctl_panic_on_oom != 2) {
941 		/*
942 		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
943 		 * does not panic for cpuset, mempolicy, or memcg allocation
944 		 * failures.
945 		 */
946 		if (constraint != CONSTRAINT_NONE)
947 			return;
948 	}
949 	/* Do not panic for oom kills triggered by sysrq */
950 	if (is_sysrq_oom(oc))
951 		return;
952 	dump_header(oc, NULL);
953 	panic("Out of memory: %s panic_on_oom is enabled\n",
954 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
955 }
956 
957 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
958 
959 int register_oom_notifier(struct notifier_block *nb)
960 {
961 	return blocking_notifier_chain_register(&oom_notify_list, nb);
962 }
963 EXPORT_SYMBOL_GPL(register_oom_notifier);
964 
965 int unregister_oom_notifier(struct notifier_block *nb)
966 {
967 	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
968 }
969 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
970 
971 /**
972  * out_of_memory - kill the "best" process when we run out of memory
973  * @oc: pointer to struct oom_control
974  *
975  * If we run out of memory, we have the choice between either
976  * killing a random task (bad), letting the system crash (worse)
977  * OR try to be smart about which process to kill. Note that we
978  * don't have to be perfect here, we just have to be good.
979  */
980 bool out_of_memory(struct oom_control *oc)
981 {
982 	unsigned long freed = 0;
983 	enum oom_constraint constraint = CONSTRAINT_NONE;
984 
985 	if (oom_killer_disabled)
986 		return false;
987 
988 	if (!is_memcg_oom(oc)) {
989 		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
990 		if (freed > 0)
991 			/* Got some memory back in the last second. */
992 			return true;
993 	}
994 
995 	/*
996 	 * If current has a pending SIGKILL or is exiting, then automatically
997 	 * select it.  The goal is to allow it to allocate so that it may
998 	 * quickly exit and free its memory.
999 	 */
1000 	if (task_will_free_mem(current)) {
1001 		mark_oom_victim(current);
1002 		wake_oom_reaper(current);
1003 		return true;
1004 	}
1005 
1006 	/*
1007 	 * The OOM killer does not compensate for IO-less reclaim.
1008 	 * pagefault_out_of_memory lost its gfp context so we have to
1009 	 * make sure exclude 0 mask - all other users should have at least
1010 	 * ___GFP_DIRECT_RECLAIM to get here.
1011 	 */
1012 	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
1013 		return true;
1014 
1015 	/*
1016 	 * Check if there were limitations on the allocation (only relevant for
1017 	 * NUMA and memcg) that may require different handling.
1018 	 */
1019 	constraint = constrained_alloc(oc);
1020 	if (constraint != CONSTRAINT_MEMORY_POLICY)
1021 		oc->nodemask = NULL;
1022 	check_panic_on_oom(oc, constraint);
1023 
1024 	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1025 	    current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1026 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1027 		get_task_struct(current);
1028 		oc->chosen = current;
1029 		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1030 		return true;
1031 	}
1032 
1033 	select_bad_process(oc);
1034 	/* Found nothing?!?! Either we hang forever, or we panic. */
1035 	if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1036 		dump_header(oc, NULL);
1037 		panic("Out of memory and no killable processes...\n");
1038 	}
1039 	if (oc->chosen && oc->chosen != (void *)-1UL) {
1040 		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1041 				 "Memory cgroup out of memory");
1042 		/*
1043 		 * Give the killed process a good chance to exit before trying
1044 		 * to allocate memory again.
1045 		 */
1046 		schedule_timeout_killable(1);
1047 	}
1048 	return !!oc->chosen;
1049 }
1050 
1051 /*
1052  * The pagefault handler calls here because it is out of memory, so kill a
1053  * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1054  * killing is already in progress so do nothing.
1055  */
1056 void pagefault_out_of_memory(void)
1057 {
1058 	struct oom_control oc = {
1059 		.zonelist = NULL,
1060 		.nodemask = NULL,
1061 		.memcg = NULL,
1062 		.gfp_mask = 0,
1063 		.order = 0,
1064 	};
1065 
1066 	if (mem_cgroup_oom_synchronize(true))
1067 		return;
1068 
1069 	if (!mutex_trylock(&oom_lock))
1070 		return;
1071 	out_of_memory(&oc);
1072 	mutex_unlock(&oom_lock);
1073 }
1074