xref: /openbmc/linux/mm/oom_kill.c (revision 11788d9b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/oom_kill.c
4  *
5  *  Copyright (C)  1998,2000  Rik van Riel
6  *	Thanks go out to Claus Fischer for some serious inspiration and
7  *	for goading me into coding this file...
8  *  Copyright (C)  2010  Google, Inc.
9  *	Rewritten by David Rientjes
10  *
11  *  The routines in this file are used to kill a process when
12  *  we're seriously out of memory. This gets called from __alloc_pages()
13  *  in mm/page_alloc.c when we really run out of memory.
14  *
15  *  Since we won't call these routines often (on a well-configured
16  *  machine) this file will double as a 'coding guide' and a signpost
17  *  for newbie kernel hackers. It features several pointers to major
18  *  kernel subsystems and hints as to where to find out what things do.
19  */
20 
21 #include <linux/oom.h>
22 #include <linux/mm.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/coredump.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/debug.h>
30 #include <linux/swap.h>
31 #include <linux/timex.h>
32 #include <linux/jiffies.h>
33 #include <linux/cpuset.h>
34 #include <linux/export.h>
35 #include <linux/notifier.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mempolicy.h>
38 #include <linux/security.h>
39 #include <linux/ptrace.h>
40 #include <linux/freezer.h>
41 #include <linux/ftrace.h>
42 #include <linux/ratelimit.h>
43 #include <linux/kthread.h>
44 #include <linux/init.h>
45 #include <linux/mmu_notifier.h>
46 
47 #include <asm/tlb.h>
48 #include "internal.h"
49 #include "slab.h"
50 
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/oom.h>
53 
54 int sysctl_panic_on_oom;
55 int sysctl_oom_kill_allocating_task;
56 int sysctl_oom_dump_tasks = 1;
57 
58 /*
59  * Serializes oom killer invocations (out_of_memory()) from all contexts to
60  * prevent from over eager oom killing (e.g. when the oom killer is invoked
61  * from different domains).
62  *
63  * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
64  * and mark_oom_victim
65  */
66 DEFINE_MUTEX(oom_lock);
67 
68 static inline bool is_memcg_oom(struct oom_control *oc)
69 {
70 	return oc->memcg != NULL;
71 }
72 
73 #ifdef CONFIG_NUMA
74 /**
75  * oom_cpuset_eligible() - check task eligiblity for kill
76  * @start: task struct of which task to consider
77  * @oc: pointer to struct oom_control
78  *
79  * Task eligibility is determined by whether or not a candidate task, @tsk,
80  * shares the same mempolicy nodes as current if it is bound by such a policy
81  * and whether or not it has the same set of allowed cpuset nodes.
82  *
83  * This function is assuming oom-killer context and 'current' has triggered
84  * the oom-killer.
85  */
86 static bool oom_cpuset_eligible(struct task_struct *start,
87 				struct oom_control *oc)
88 {
89 	struct task_struct *tsk;
90 	bool ret = false;
91 	const nodemask_t *mask = oc->nodemask;
92 
93 	if (is_memcg_oom(oc))
94 		return true;
95 
96 	rcu_read_lock();
97 	for_each_thread(start, tsk) {
98 		if (mask) {
99 			/*
100 			 * If this is a mempolicy constrained oom, tsk's
101 			 * cpuset is irrelevant.  Only return true if its
102 			 * mempolicy intersects current, otherwise it may be
103 			 * needlessly killed.
104 			 */
105 			ret = mempolicy_nodemask_intersects(tsk, mask);
106 		} else {
107 			/*
108 			 * This is not a mempolicy constrained oom, so only
109 			 * check the mems of tsk's cpuset.
110 			 */
111 			ret = cpuset_mems_allowed_intersects(current, tsk);
112 		}
113 		if (ret)
114 			break;
115 	}
116 	rcu_read_unlock();
117 
118 	return ret;
119 }
120 #else
121 static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
122 {
123 	return true;
124 }
125 #endif /* CONFIG_NUMA */
126 
127 /*
128  * The process p may have detached its own ->mm while exiting or through
129  * kthread_use_mm(), but one or more of its subthreads may still have a valid
130  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
131  * task_lock() held.
132  */
133 struct task_struct *find_lock_task_mm(struct task_struct *p)
134 {
135 	struct task_struct *t;
136 
137 	rcu_read_lock();
138 
139 	for_each_thread(p, t) {
140 		task_lock(t);
141 		if (likely(t->mm))
142 			goto found;
143 		task_unlock(t);
144 	}
145 	t = NULL;
146 found:
147 	rcu_read_unlock();
148 
149 	return t;
150 }
151 
152 /*
153  * order == -1 means the oom kill is required by sysrq, otherwise only
154  * for display purposes.
155  */
156 static inline bool is_sysrq_oom(struct oom_control *oc)
157 {
158 	return oc->order == -1;
159 }
160 
161 /* return true if the task is not adequate as candidate victim task. */
162 static bool oom_unkillable_task(struct task_struct *p)
163 {
164 	if (is_global_init(p))
165 		return true;
166 	if (p->flags & PF_KTHREAD)
167 		return true;
168 	return false;
169 }
170 
171 /*
172  * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
173  * than all user memory (LRU pages)
174  */
175 static bool is_dump_unreclaim_slabs(void)
176 {
177 	unsigned long nr_lru;
178 
179 	nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
180 		 global_node_page_state(NR_INACTIVE_ANON) +
181 		 global_node_page_state(NR_ACTIVE_FILE) +
182 		 global_node_page_state(NR_INACTIVE_FILE) +
183 		 global_node_page_state(NR_ISOLATED_ANON) +
184 		 global_node_page_state(NR_ISOLATED_FILE) +
185 		 global_node_page_state(NR_UNEVICTABLE);
186 
187 	return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
188 }
189 
190 /**
191  * oom_badness - heuristic function to determine which candidate task to kill
192  * @p: task struct of which task we should calculate
193  * @totalpages: total present RAM allowed for page allocation
194  *
195  * The heuristic for determining which task to kill is made to be as simple and
196  * predictable as possible.  The goal is to return the highest value for the
197  * task consuming the most memory to avoid subsequent oom failures.
198  */
199 long oom_badness(struct task_struct *p, unsigned long totalpages)
200 {
201 	long points;
202 	long adj;
203 
204 	if (oom_unkillable_task(p))
205 		return LONG_MIN;
206 
207 	p = find_lock_task_mm(p);
208 	if (!p)
209 		return LONG_MIN;
210 
211 	/*
212 	 * Do not even consider tasks which are explicitly marked oom
213 	 * unkillable or have been already oom reaped or the are in
214 	 * the middle of vfork
215 	 */
216 	adj = (long)p->signal->oom_score_adj;
217 	if (adj == OOM_SCORE_ADJ_MIN ||
218 			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
219 			in_vfork(p)) {
220 		task_unlock(p);
221 		return LONG_MIN;
222 	}
223 
224 	/*
225 	 * The baseline for the badness score is the proportion of RAM that each
226 	 * task's rss, pagetable and swap space use.
227 	 */
228 	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
229 		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
230 	task_unlock(p);
231 
232 	/* Normalize to oom_score_adj units */
233 	adj *= totalpages / 1000;
234 	points += adj;
235 
236 	return points;
237 }
238 
239 static const char * const oom_constraint_text[] = {
240 	[CONSTRAINT_NONE] = "CONSTRAINT_NONE",
241 	[CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
242 	[CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
243 	[CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
244 };
245 
246 /*
247  * Determine the type of allocation constraint.
248  */
249 static enum oom_constraint constrained_alloc(struct oom_control *oc)
250 {
251 	struct zone *zone;
252 	struct zoneref *z;
253 	enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
254 	bool cpuset_limited = false;
255 	int nid;
256 
257 	if (is_memcg_oom(oc)) {
258 		oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
259 		return CONSTRAINT_MEMCG;
260 	}
261 
262 	/* Default to all available memory */
263 	oc->totalpages = totalram_pages() + total_swap_pages;
264 
265 	if (!IS_ENABLED(CONFIG_NUMA))
266 		return CONSTRAINT_NONE;
267 
268 	if (!oc->zonelist)
269 		return CONSTRAINT_NONE;
270 	/*
271 	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
272 	 * to kill current.We have to random task kill in this case.
273 	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
274 	 */
275 	if (oc->gfp_mask & __GFP_THISNODE)
276 		return CONSTRAINT_NONE;
277 
278 	/*
279 	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
280 	 * the page allocator means a mempolicy is in effect.  Cpuset policy
281 	 * is enforced in get_page_from_freelist().
282 	 */
283 	if (oc->nodemask &&
284 	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
285 		oc->totalpages = total_swap_pages;
286 		for_each_node_mask(nid, *oc->nodemask)
287 			oc->totalpages += node_present_pages(nid);
288 		return CONSTRAINT_MEMORY_POLICY;
289 	}
290 
291 	/* Check this allocation failure is caused by cpuset's wall function */
292 	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
293 			highest_zoneidx, oc->nodemask)
294 		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
295 			cpuset_limited = true;
296 
297 	if (cpuset_limited) {
298 		oc->totalpages = total_swap_pages;
299 		for_each_node_mask(nid, cpuset_current_mems_allowed)
300 			oc->totalpages += node_present_pages(nid);
301 		return CONSTRAINT_CPUSET;
302 	}
303 	return CONSTRAINT_NONE;
304 }
305 
306 static int oom_evaluate_task(struct task_struct *task, void *arg)
307 {
308 	struct oom_control *oc = arg;
309 	long points;
310 
311 	if (oom_unkillable_task(task))
312 		goto next;
313 
314 	/* p may not have freeable memory in nodemask */
315 	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
316 		goto next;
317 
318 	/*
319 	 * This task already has access to memory reserves and is being killed.
320 	 * Don't allow any other task to have access to the reserves unless
321 	 * the task has MMF_OOM_SKIP because chances that it would release
322 	 * any memory is quite low.
323 	 */
324 	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
325 		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
326 			goto next;
327 		goto abort;
328 	}
329 
330 	/*
331 	 * If task is allocating a lot of memory and has been marked to be
332 	 * killed first if it triggers an oom, then select it.
333 	 */
334 	if (oom_task_origin(task)) {
335 		points = LONG_MAX;
336 		goto select;
337 	}
338 
339 	points = oom_badness(task, oc->totalpages);
340 	if (points == LONG_MIN || points < oc->chosen_points)
341 		goto next;
342 
343 select:
344 	if (oc->chosen)
345 		put_task_struct(oc->chosen);
346 	get_task_struct(task);
347 	oc->chosen = task;
348 	oc->chosen_points = points;
349 next:
350 	return 0;
351 abort:
352 	if (oc->chosen)
353 		put_task_struct(oc->chosen);
354 	oc->chosen = (void *)-1UL;
355 	return 1;
356 }
357 
358 /*
359  * Simple selection loop. We choose the process with the highest number of
360  * 'points'. In case scan was aborted, oc->chosen is set to -1.
361  */
362 static void select_bad_process(struct oom_control *oc)
363 {
364 	oc->chosen_points = LONG_MIN;
365 
366 	if (is_memcg_oom(oc))
367 		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
368 	else {
369 		struct task_struct *p;
370 
371 		rcu_read_lock();
372 		for_each_process(p)
373 			if (oom_evaluate_task(p, oc))
374 				break;
375 		rcu_read_unlock();
376 	}
377 }
378 
379 static int dump_task(struct task_struct *p, void *arg)
380 {
381 	struct oom_control *oc = arg;
382 	struct task_struct *task;
383 
384 	if (oom_unkillable_task(p))
385 		return 0;
386 
387 	/* p may not have freeable memory in nodemask */
388 	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
389 		return 0;
390 
391 	task = find_lock_task_mm(p);
392 	if (!task) {
393 		/*
394 		 * This is a kthread or all of p's threads have already
395 		 * detached their mm's.  There's no need to report
396 		 * them; they can't be oom killed anyway.
397 		 */
398 		return 0;
399 	}
400 
401 	pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
402 		task->pid, from_kuid(&init_user_ns, task_uid(task)),
403 		task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
404 		mm_pgtables_bytes(task->mm),
405 		get_mm_counter(task->mm, MM_SWAPENTS),
406 		task->signal->oom_score_adj, task->comm);
407 	task_unlock(task);
408 
409 	return 0;
410 }
411 
412 /**
413  * dump_tasks - dump current memory state of all system tasks
414  * @oc: pointer to struct oom_control
415  *
416  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
417  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
418  * are not shown.
419  * State information includes task's pid, uid, tgid, vm size, rss,
420  * pgtables_bytes, swapents, oom_score_adj value, and name.
421  */
422 static void dump_tasks(struct oom_control *oc)
423 {
424 	pr_info("Tasks state (memory values in pages):\n");
425 	pr_info("[  pid  ]   uid  tgid total_vm      rss pgtables_bytes swapents oom_score_adj name\n");
426 
427 	if (is_memcg_oom(oc))
428 		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
429 	else {
430 		struct task_struct *p;
431 
432 		rcu_read_lock();
433 		for_each_process(p)
434 			dump_task(p, oc);
435 		rcu_read_unlock();
436 	}
437 }
438 
439 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
440 {
441 	/* one line summary of the oom killer context. */
442 	pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
443 			oom_constraint_text[oc->constraint],
444 			nodemask_pr_args(oc->nodemask));
445 	cpuset_print_current_mems_allowed();
446 	mem_cgroup_print_oom_context(oc->memcg, victim);
447 	pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
448 		from_kuid(&init_user_ns, task_uid(victim)));
449 }
450 
451 static void dump_header(struct oom_control *oc, struct task_struct *p)
452 {
453 	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
454 		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
455 			current->signal->oom_score_adj);
456 	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
457 		pr_warn("COMPACTION is disabled!!!\n");
458 
459 	dump_stack();
460 	if (is_memcg_oom(oc))
461 		mem_cgroup_print_oom_meminfo(oc->memcg);
462 	else {
463 		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
464 		if (is_dump_unreclaim_slabs())
465 			dump_unreclaimable_slab();
466 	}
467 	if (sysctl_oom_dump_tasks)
468 		dump_tasks(oc);
469 	if (p)
470 		dump_oom_summary(oc, p);
471 }
472 
473 /*
474  * Number of OOM victims in flight
475  */
476 static atomic_t oom_victims = ATOMIC_INIT(0);
477 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
478 
479 static bool oom_killer_disabled __read_mostly;
480 
481 #define K(x) ((x) << (PAGE_SHIFT-10))
482 
483 /*
484  * task->mm can be NULL if the task is the exited group leader.  So to
485  * determine whether the task is using a particular mm, we examine all the
486  * task's threads: if one of those is using this mm then this task was also
487  * using it.
488  */
489 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
490 {
491 	struct task_struct *t;
492 
493 	for_each_thread(p, t) {
494 		struct mm_struct *t_mm = READ_ONCE(t->mm);
495 		if (t_mm)
496 			return t_mm == mm;
497 	}
498 	return false;
499 }
500 
501 #ifdef CONFIG_MMU
502 /*
503  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
504  * victim (if that is possible) to help the OOM killer to move on.
505  */
506 static struct task_struct *oom_reaper_th;
507 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
508 static struct task_struct *oom_reaper_list;
509 static DEFINE_SPINLOCK(oom_reaper_lock);
510 
511 bool __oom_reap_task_mm(struct mm_struct *mm)
512 {
513 	struct vm_area_struct *vma;
514 	bool ret = true;
515 
516 	/*
517 	 * Tell all users of get_user/copy_from_user etc... that the content
518 	 * is no longer stable. No barriers really needed because unmapping
519 	 * should imply barriers already and the reader would hit a page fault
520 	 * if it stumbled over a reaped memory.
521 	 */
522 	set_bit(MMF_UNSTABLE, &mm->flags);
523 
524 	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
525 		if (!can_madv_lru_vma(vma))
526 			continue;
527 
528 		/*
529 		 * Only anonymous pages have a good chance to be dropped
530 		 * without additional steps which we cannot afford as we
531 		 * are OOM already.
532 		 *
533 		 * We do not even care about fs backed pages because all
534 		 * which are reclaimable have already been reclaimed and
535 		 * we do not want to block exit_mmap by keeping mm ref
536 		 * count elevated without a good reason.
537 		 */
538 		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
539 			struct mmu_notifier_range range;
540 			struct mmu_gather tlb;
541 
542 			mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
543 						vma, mm, vma->vm_start,
544 						vma->vm_end);
545 			tlb_gather_mmu(&tlb, mm, range.start, range.end);
546 			if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
547 				tlb_finish_mmu(&tlb, range.start, range.end);
548 				ret = false;
549 				continue;
550 			}
551 			unmap_page_range(&tlb, vma, range.start, range.end, NULL);
552 			mmu_notifier_invalidate_range_end(&range);
553 			tlb_finish_mmu(&tlb, range.start, range.end);
554 		}
555 	}
556 
557 	return ret;
558 }
559 
560 /*
561  * Reaps the address space of the give task.
562  *
563  * Returns true on success and false if none or part of the address space
564  * has been reclaimed and the caller should retry later.
565  */
566 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
567 {
568 	bool ret = true;
569 
570 	if (!mmap_read_trylock(mm)) {
571 		trace_skip_task_reaping(tsk->pid);
572 		return false;
573 	}
574 
575 	/*
576 	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
577 	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
578 	 * under mmap_lock for reading because it serializes against the
579 	 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
580 	 */
581 	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
582 		trace_skip_task_reaping(tsk->pid);
583 		goto out_unlock;
584 	}
585 
586 	trace_start_task_reaping(tsk->pid);
587 
588 	/* failed to reap part of the address space. Try again later */
589 	ret = __oom_reap_task_mm(mm);
590 	if (!ret)
591 		goto out_finish;
592 
593 	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
594 			task_pid_nr(tsk), tsk->comm,
595 			K(get_mm_counter(mm, MM_ANONPAGES)),
596 			K(get_mm_counter(mm, MM_FILEPAGES)),
597 			K(get_mm_counter(mm, MM_SHMEMPAGES)));
598 out_finish:
599 	trace_finish_task_reaping(tsk->pid);
600 out_unlock:
601 	mmap_read_unlock(mm);
602 
603 	return ret;
604 }
605 
606 #define MAX_OOM_REAP_RETRIES 10
607 static void oom_reap_task(struct task_struct *tsk)
608 {
609 	int attempts = 0;
610 	struct mm_struct *mm = tsk->signal->oom_mm;
611 
612 	/* Retry the mmap_read_trylock(mm) a few times */
613 	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
614 		schedule_timeout_idle(HZ/10);
615 
616 	if (attempts <= MAX_OOM_REAP_RETRIES ||
617 	    test_bit(MMF_OOM_SKIP, &mm->flags))
618 		goto done;
619 
620 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
621 		task_pid_nr(tsk), tsk->comm);
622 	sched_show_task(tsk);
623 	debug_show_all_locks();
624 
625 done:
626 	tsk->oom_reaper_list = NULL;
627 
628 	/*
629 	 * Hide this mm from OOM killer because it has been either reaped or
630 	 * somebody can't call mmap_write_unlock(mm).
631 	 */
632 	set_bit(MMF_OOM_SKIP, &mm->flags);
633 
634 	/* Drop a reference taken by wake_oom_reaper */
635 	put_task_struct(tsk);
636 }
637 
638 static int oom_reaper(void *unused)
639 {
640 	while (true) {
641 		struct task_struct *tsk = NULL;
642 
643 		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
644 		spin_lock(&oom_reaper_lock);
645 		if (oom_reaper_list != NULL) {
646 			tsk = oom_reaper_list;
647 			oom_reaper_list = tsk->oom_reaper_list;
648 		}
649 		spin_unlock(&oom_reaper_lock);
650 
651 		if (tsk)
652 			oom_reap_task(tsk);
653 	}
654 
655 	return 0;
656 }
657 
658 static void wake_oom_reaper(struct task_struct *tsk)
659 {
660 	/* mm is already queued? */
661 	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
662 		return;
663 
664 	get_task_struct(tsk);
665 
666 	spin_lock(&oom_reaper_lock);
667 	tsk->oom_reaper_list = oom_reaper_list;
668 	oom_reaper_list = tsk;
669 	spin_unlock(&oom_reaper_lock);
670 	trace_wake_reaper(tsk->pid);
671 	wake_up(&oom_reaper_wait);
672 }
673 
674 static int __init oom_init(void)
675 {
676 	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
677 	return 0;
678 }
679 subsys_initcall(oom_init)
680 #else
681 static inline void wake_oom_reaper(struct task_struct *tsk)
682 {
683 }
684 #endif /* CONFIG_MMU */
685 
686 /**
687  * mark_oom_victim - mark the given task as OOM victim
688  * @tsk: task to mark
689  *
690  * Has to be called with oom_lock held and never after
691  * oom has been disabled already.
692  *
693  * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
694  * under task_lock or operate on the current).
695  */
696 static void mark_oom_victim(struct task_struct *tsk)
697 {
698 	struct mm_struct *mm = tsk->mm;
699 
700 	WARN_ON(oom_killer_disabled);
701 	/* OOM killer might race with memcg OOM */
702 	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
703 		return;
704 
705 	/* oom_mm is bound to the signal struct life time. */
706 	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
707 		mmgrab(tsk->signal->oom_mm);
708 		set_bit(MMF_OOM_VICTIM, &mm->flags);
709 	}
710 
711 	/*
712 	 * Make sure that the task is woken up from uninterruptible sleep
713 	 * if it is frozen because OOM killer wouldn't be able to free
714 	 * any memory and livelock. freezing_slow_path will tell the freezer
715 	 * that TIF_MEMDIE tasks should be ignored.
716 	 */
717 	__thaw_task(tsk);
718 	atomic_inc(&oom_victims);
719 	trace_mark_victim(tsk->pid);
720 }
721 
722 /**
723  * exit_oom_victim - note the exit of an OOM victim
724  */
725 void exit_oom_victim(void)
726 {
727 	clear_thread_flag(TIF_MEMDIE);
728 
729 	if (!atomic_dec_return(&oom_victims))
730 		wake_up_all(&oom_victims_wait);
731 }
732 
733 /**
734  * oom_killer_enable - enable OOM killer
735  */
736 void oom_killer_enable(void)
737 {
738 	oom_killer_disabled = false;
739 	pr_info("OOM killer enabled.\n");
740 }
741 
742 /**
743  * oom_killer_disable - disable OOM killer
744  * @timeout: maximum timeout to wait for oom victims in jiffies
745  *
746  * Forces all page allocations to fail rather than trigger OOM killer.
747  * Will block and wait until all OOM victims are killed or the given
748  * timeout expires.
749  *
750  * The function cannot be called when there are runnable user tasks because
751  * the userspace would see unexpected allocation failures as a result. Any
752  * new usage of this function should be consulted with MM people.
753  *
754  * Returns true if successful and false if the OOM killer cannot be
755  * disabled.
756  */
757 bool oom_killer_disable(signed long timeout)
758 {
759 	signed long ret;
760 
761 	/*
762 	 * Make sure to not race with an ongoing OOM killer. Check that the
763 	 * current is not killed (possibly due to sharing the victim's memory).
764 	 */
765 	if (mutex_lock_killable(&oom_lock))
766 		return false;
767 	oom_killer_disabled = true;
768 	mutex_unlock(&oom_lock);
769 
770 	ret = wait_event_interruptible_timeout(oom_victims_wait,
771 			!atomic_read(&oom_victims), timeout);
772 	if (ret <= 0) {
773 		oom_killer_enable();
774 		return false;
775 	}
776 	pr_info("OOM killer disabled.\n");
777 
778 	return true;
779 }
780 
781 static inline bool __task_will_free_mem(struct task_struct *task)
782 {
783 	struct signal_struct *sig = task->signal;
784 
785 	/*
786 	 * A coredumping process may sleep for an extended period in exit_mm(),
787 	 * so the oom killer cannot assume that the process will promptly exit
788 	 * and release memory.
789 	 */
790 	if (sig->flags & SIGNAL_GROUP_COREDUMP)
791 		return false;
792 
793 	if (sig->flags & SIGNAL_GROUP_EXIT)
794 		return true;
795 
796 	if (thread_group_empty(task) && (task->flags & PF_EXITING))
797 		return true;
798 
799 	return false;
800 }
801 
802 /*
803  * Checks whether the given task is dying or exiting and likely to
804  * release its address space. This means that all threads and processes
805  * sharing the same mm have to be killed or exiting.
806  * Caller has to make sure that task->mm is stable (hold task_lock or
807  * it operates on the current).
808  */
809 static bool task_will_free_mem(struct task_struct *task)
810 {
811 	struct mm_struct *mm = task->mm;
812 	struct task_struct *p;
813 	bool ret = true;
814 
815 	/*
816 	 * Skip tasks without mm because it might have passed its exit_mm and
817 	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
818 	 * on that for now. We can consider find_lock_task_mm in future.
819 	 */
820 	if (!mm)
821 		return false;
822 
823 	if (!__task_will_free_mem(task))
824 		return false;
825 
826 	/*
827 	 * This task has already been drained by the oom reaper so there are
828 	 * only small chances it will free some more
829 	 */
830 	if (test_bit(MMF_OOM_SKIP, &mm->flags))
831 		return false;
832 
833 	if (atomic_read(&mm->mm_users) <= 1)
834 		return true;
835 
836 	/*
837 	 * Make sure that all tasks which share the mm with the given tasks
838 	 * are dying as well to make sure that a) nobody pins its mm and
839 	 * b) the task is also reapable by the oom reaper.
840 	 */
841 	rcu_read_lock();
842 	for_each_process(p) {
843 		if (!process_shares_mm(p, mm))
844 			continue;
845 		if (same_thread_group(task, p))
846 			continue;
847 		ret = __task_will_free_mem(p);
848 		if (!ret)
849 			break;
850 	}
851 	rcu_read_unlock();
852 
853 	return ret;
854 }
855 
856 static void __oom_kill_process(struct task_struct *victim, const char *message)
857 {
858 	struct task_struct *p;
859 	struct mm_struct *mm;
860 	bool can_oom_reap = true;
861 
862 	p = find_lock_task_mm(victim);
863 	if (!p) {
864 		pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
865 			message, task_pid_nr(victim), victim->comm);
866 		put_task_struct(victim);
867 		return;
868 	} else if (victim != p) {
869 		get_task_struct(p);
870 		put_task_struct(victim);
871 		victim = p;
872 	}
873 
874 	/* Get a reference to safely compare mm after task_unlock(victim) */
875 	mm = victim->mm;
876 	mmgrab(mm);
877 
878 	/* Raise event before sending signal: task reaper must see this */
879 	count_vm_event(OOM_KILL);
880 	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
881 
882 	/*
883 	 * We should send SIGKILL before granting access to memory reserves
884 	 * in order to prevent the OOM victim from depleting the memory
885 	 * reserves from the user space under its control.
886 	 */
887 	do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
888 	mark_oom_victim(victim);
889 	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
890 		message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
891 		K(get_mm_counter(mm, MM_ANONPAGES)),
892 		K(get_mm_counter(mm, MM_FILEPAGES)),
893 		K(get_mm_counter(mm, MM_SHMEMPAGES)),
894 		from_kuid(&init_user_ns, task_uid(victim)),
895 		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
896 	task_unlock(victim);
897 
898 	/*
899 	 * Kill all user processes sharing victim->mm in other thread groups, if
900 	 * any.  They don't get access to memory reserves, though, to avoid
901 	 * depletion of all memory.  This prevents mm->mmap_lock livelock when an
902 	 * oom killed thread cannot exit because it requires the semaphore and
903 	 * its contended by another thread trying to allocate memory itself.
904 	 * That thread will now get access to memory reserves since it has a
905 	 * pending fatal signal.
906 	 */
907 	rcu_read_lock();
908 	for_each_process(p) {
909 		if (!process_shares_mm(p, mm))
910 			continue;
911 		if (same_thread_group(p, victim))
912 			continue;
913 		if (is_global_init(p)) {
914 			can_oom_reap = false;
915 			set_bit(MMF_OOM_SKIP, &mm->flags);
916 			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
917 					task_pid_nr(victim), victim->comm,
918 					task_pid_nr(p), p->comm);
919 			continue;
920 		}
921 		/*
922 		 * No kthead_use_mm() user needs to read from the userspace so
923 		 * we are ok to reap it.
924 		 */
925 		if (unlikely(p->flags & PF_KTHREAD))
926 			continue;
927 		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
928 	}
929 	rcu_read_unlock();
930 
931 	if (can_oom_reap)
932 		wake_oom_reaper(victim);
933 
934 	mmdrop(mm);
935 	put_task_struct(victim);
936 }
937 #undef K
938 
939 /*
940  * Kill provided task unless it's secured by setting
941  * oom_score_adj to OOM_SCORE_ADJ_MIN.
942  */
943 static int oom_kill_memcg_member(struct task_struct *task, void *message)
944 {
945 	if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
946 	    !is_global_init(task)) {
947 		get_task_struct(task);
948 		__oom_kill_process(task, message);
949 	}
950 	return 0;
951 }
952 
953 static void oom_kill_process(struct oom_control *oc, const char *message)
954 {
955 	struct task_struct *victim = oc->chosen;
956 	struct mem_cgroup *oom_group;
957 	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
958 					      DEFAULT_RATELIMIT_BURST);
959 
960 	/*
961 	 * If the task is already exiting, don't alarm the sysadmin or kill
962 	 * its children or threads, just give it access to memory reserves
963 	 * so it can die quickly
964 	 */
965 	task_lock(victim);
966 	if (task_will_free_mem(victim)) {
967 		mark_oom_victim(victim);
968 		wake_oom_reaper(victim);
969 		task_unlock(victim);
970 		put_task_struct(victim);
971 		return;
972 	}
973 	task_unlock(victim);
974 
975 	if (__ratelimit(&oom_rs))
976 		dump_header(oc, victim);
977 
978 	/*
979 	 * Do we need to kill the entire memory cgroup?
980 	 * Or even one of the ancestor memory cgroups?
981 	 * Check this out before killing the victim task.
982 	 */
983 	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
984 
985 	__oom_kill_process(victim, message);
986 
987 	/*
988 	 * If necessary, kill all tasks in the selected memory cgroup.
989 	 */
990 	if (oom_group) {
991 		mem_cgroup_print_oom_group(oom_group);
992 		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
993 				      (void*)message);
994 		mem_cgroup_put(oom_group);
995 	}
996 }
997 
998 /*
999  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1000  */
1001 static void check_panic_on_oom(struct oom_control *oc)
1002 {
1003 	if (likely(!sysctl_panic_on_oom))
1004 		return;
1005 	if (sysctl_panic_on_oom != 2) {
1006 		/*
1007 		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1008 		 * does not panic for cpuset, mempolicy, or memcg allocation
1009 		 * failures.
1010 		 */
1011 		if (oc->constraint != CONSTRAINT_NONE)
1012 			return;
1013 	}
1014 	/* Do not panic for oom kills triggered by sysrq */
1015 	if (is_sysrq_oom(oc))
1016 		return;
1017 	dump_header(oc, NULL);
1018 	panic("Out of memory: %s panic_on_oom is enabled\n",
1019 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1020 }
1021 
1022 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1023 
1024 int register_oom_notifier(struct notifier_block *nb)
1025 {
1026 	return blocking_notifier_chain_register(&oom_notify_list, nb);
1027 }
1028 EXPORT_SYMBOL_GPL(register_oom_notifier);
1029 
1030 int unregister_oom_notifier(struct notifier_block *nb)
1031 {
1032 	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1033 }
1034 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1035 
1036 /**
1037  * out_of_memory - kill the "best" process when we run out of memory
1038  * @oc: pointer to struct oom_control
1039  *
1040  * If we run out of memory, we have the choice between either
1041  * killing a random task (bad), letting the system crash (worse)
1042  * OR try to be smart about which process to kill. Note that we
1043  * don't have to be perfect here, we just have to be good.
1044  */
1045 bool out_of_memory(struct oom_control *oc)
1046 {
1047 	unsigned long freed = 0;
1048 
1049 	if (oom_killer_disabled)
1050 		return false;
1051 
1052 	if (!is_memcg_oom(oc)) {
1053 		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1054 		if (freed > 0)
1055 			/* Got some memory back in the last second. */
1056 			return true;
1057 	}
1058 
1059 	/*
1060 	 * If current has a pending SIGKILL or is exiting, then automatically
1061 	 * select it.  The goal is to allow it to allocate so that it may
1062 	 * quickly exit and free its memory.
1063 	 */
1064 	if (task_will_free_mem(current)) {
1065 		mark_oom_victim(current);
1066 		wake_oom_reaper(current);
1067 		return true;
1068 	}
1069 
1070 	/*
1071 	 * The OOM killer does not compensate for IO-less reclaim.
1072 	 * pagefault_out_of_memory lost its gfp context so we have to
1073 	 * make sure exclude 0 mask - all other users should have at least
1074 	 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1075 	 * invoke the OOM killer even if it is a GFP_NOFS allocation.
1076 	 */
1077 	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1078 		return true;
1079 
1080 	/*
1081 	 * Check if there were limitations on the allocation (only relevant for
1082 	 * NUMA and memcg) that may require different handling.
1083 	 */
1084 	oc->constraint = constrained_alloc(oc);
1085 	if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1086 		oc->nodemask = NULL;
1087 	check_panic_on_oom(oc);
1088 
1089 	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1090 	    current->mm && !oom_unkillable_task(current) &&
1091 	    oom_cpuset_eligible(current, oc) &&
1092 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1093 		get_task_struct(current);
1094 		oc->chosen = current;
1095 		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1096 		return true;
1097 	}
1098 
1099 	select_bad_process(oc);
1100 	/* Found nothing?!?! */
1101 	if (!oc->chosen) {
1102 		dump_header(oc, NULL);
1103 		pr_warn("Out of memory and no killable processes...\n");
1104 		/*
1105 		 * If we got here due to an actual allocation at the
1106 		 * system level, we cannot survive this and will enter
1107 		 * an endless loop in the allocator. Bail out now.
1108 		 */
1109 		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1110 			panic("System is deadlocked on memory\n");
1111 	}
1112 	if (oc->chosen && oc->chosen != (void *)-1UL)
1113 		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1114 				 "Memory cgroup out of memory");
1115 	return !!oc->chosen;
1116 }
1117 
1118 /*
1119  * The pagefault handler calls here because it is out of memory, so kill a
1120  * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1121  * killing is already in progress so do nothing.
1122  */
1123 void pagefault_out_of_memory(void)
1124 {
1125 	struct oom_control oc = {
1126 		.zonelist = NULL,
1127 		.nodemask = NULL,
1128 		.memcg = NULL,
1129 		.gfp_mask = 0,
1130 		.order = 0,
1131 	};
1132 
1133 	if (mem_cgroup_oom_synchronize(true))
1134 		return;
1135 
1136 	if (!mutex_trylock(&oom_lock))
1137 		return;
1138 	out_of_memory(&oc);
1139 	mutex_unlock(&oom_lock);
1140 }
1141