xref: /openbmc/linux/mm/oom_kill.c (revision e1e38ea1)
1 /*
2  *  linux/mm/oom_kill.c
3  *
4  *  Copyright (C)  1998,2000  Rik van Riel
5  *	Thanks go out to Claus Fischer for some serious inspiration and
6  *	for goading me into coding this file...
7  *  Copyright (C)  2010  Google, Inc.
8  *	Rewritten by David Rientjes
9  *
10  *  The routines in this file are used to kill a process when
11  *  we're seriously out of memory. This gets called from __alloc_pages()
12  *  in mm/page_alloc.c when we really run out of memory.
13  *
14  *  Since we won't call these routines often (on a well-configured
15  *  machine) this file will double as a 'coding guide' and a signpost
16  *  for newbie kernel hackers. It features several pointers to major
17  *  kernel subsystems and hints as to where to find out what things do.
18  */
19 
20 #include <linux/oom.h>
21 #include <linux/mm.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/coredump.h>
27 #include <linux/sched/task.h>
28 #include <linux/swap.h>
29 #include <linux/timex.h>
30 #include <linux/jiffies.h>
31 #include <linux/cpuset.h>
32 #include <linux/export.h>
33 #include <linux/notifier.h>
34 #include <linux/memcontrol.h>
35 #include <linux/mempolicy.h>
36 #include <linux/security.h>
37 #include <linux/ptrace.h>
38 #include <linux/freezer.h>
39 #include <linux/ftrace.h>
40 #include <linux/ratelimit.h>
41 #include <linux/kthread.h>
42 #include <linux/init.h>
43 #include <linux/mmu_notifier.h>
44 
45 #include <asm/tlb.h>
46 #include "internal.h"
47 #include "slab.h"
48 
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/oom.h>
51 
52 int sysctl_panic_on_oom;
53 int sysctl_oom_kill_allocating_task;
54 int sysctl_oom_dump_tasks = 1;
55 
56 /*
57  * Serializes oom killer invocations (out_of_memory()) from all contexts to
58  * prevent from over eager oom killing (e.g. when the oom killer is invoked
59  * from different domains).
60  *
61  * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
62  * and mark_oom_victim
63  */
64 DEFINE_MUTEX(oom_lock);
65 
66 #ifdef CONFIG_NUMA
67 /**
68  * has_intersects_mems_allowed() - check task eligiblity for kill
69  * @start: task struct of which task to consider
70  * @mask: nodemask passed to page allocator for mempolicy ooms
71  *
72  * Task eligibility is determined by whether or not a candidate task, @tsk,
73  * shares the same mempolicy nodes as current if it is bound by such a policy
74  * and whether or not it has the same set of allowed cpuset nodes.
75  */
76 static bool has_intersects_mems_allowed(struct task_struct *start,
77 					const nodemask_t *mask)
78 {
79 	struct task_struct *tsk;
80 	bool ret = false;
81 
82 	rcu_read_lock();
83 	for_each_thread(start, tsk) {
84 		if (mask) {
85 			/*
86 			 * If this is a mempolicy constrained oom, tsk's
87 			 * cpuset is irrelevant.  Only return true if its
88 			 * mempolicy intersects current, otherwise it may be
89 			 * needlessly killed.
90 			 */
91 			ret = mempolicy_nodemask_intersects(tsk, mask);
92 		} else {
93 			/*
94 			 * This is not a mempolicy constrained oom, so only
95 			 * check the mems of tsk's cpuset.
96 			 */
97 			ret = cpuset_mems_allowed_intersects(current, tsk);
98 		}
99 		if (ret)
100 			break;
101 	}
102 	rcu_read_unlock();
103 
104 	return ret;
105 }
106 #else
107 static bool has_intersects_mems_allowed(struct task_struct *tsk,
108 					const nodemask_t *mask)
109 {
110 	return true;
111 }
112 #endif /* CONFIG_NUMA */
113 
114 /*
115  * The process p may have detached its own ->mm while exiting or through
116  * use_mm(), but one or more of its subthreads may still have a valid
117  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
118  * task_lock() held.
119  */
120 struct task_struct *find_lock_task_mm(struct task_struct *p)
121 {
122 	struct task_struct *t;
123 
124 	rcu_read_lock();
125 
126 	for_each_thread(p, t) {
127 		task_lock(t);
128 		if (likely(t->mm))
129 			goto found;
130 		task_unlock(t);
131 	}
132 	t = NULL;
133 found:
134 	rcu_read_unlock();
135 
136 	return t;
137 }
138 
139 /*
140  * order == -1 means the oom kill is required by sysrq, otherwise only
141  * for display purposes.
142  */
143 static inline bool is_sysrq_oom(struct oom_control *oc)
144 {
145 	return oc->order == -1;
146 }
147 
148 static inline bool is_memcg_oom(struct oom_control *oc)
149 {
150 	return oc->memcg != NULL;
151 }
152 
153 /* return true if the task is not adequate as candidate victim task. */
154 static bool oom_unkillable_task(struct task_struct *p,
155 		struct mem_cgroup *memcg, const nodemask_t *nodemask)
156 {
157 	if (is_global_init(p))
158 		return true;
159 	if (p->flags & PF_KTHREAD)
160 		return true;
161 
162 	/* When mem_cgroup_out_of_memory() and p is not member of the group */
163 	if (memcg && !task_in_mem_cgroup(p, memcg))
164 		return true;
165 
166 	/* p may not have freeable memory in nodemask */
167 	if (!has_intersects_mems_allowed(p, nodemask))
168 		return true;
169 
170 	return false;
171 }
172 
173 /*
174  * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
175  * than all user memory (LRU pages)
176  */
177 static bool is_dump_unreclaim_slabs(void)
178 {
179 	unsigned long nr_lru;
180 
181 	nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
182 		 global_node_page_state(NR_INACTIVE_ANON) +
183 		 global_node_page_state(NR_ACTIVE_FILE) +
184 		 global_node_page_state(NR_INACTIVE_FILE) +
185 		 global_node_page_state(NR_ISOLATED_ANON) +
186 		 global_node_page_state(NR_ISOLATED_FILE) +
187 		 global_node_page_state(NR_UNEVICTABLE);
188 
189 	return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
190 }
191 
192 /**
193  * oom_badness - heuristic function to determine which candidate task to kill
194  * @p: task struct of which task we should calculate
195  * @totalpages: total present RAM allowed for page allocation
196  * @memcg: task's memory controller, if constrained
197  * @nodemask: nodemask passed to page allocator for mempolicy ooms
198  *
199  * The heuristic for determining which task to kill is made to be as simple and
200  * predictable as possible.  The goal is to return the highest value for the
201  * task consuming the most memory to avoid subsequent oom failures.
202  */
203 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
204 			  const nodemask_t *nodemask, unsigned long totalpages)
205 {
206 	long points;
207 	long adj;
208 
209 	if (oom_unkillable_task(p, memcg, nodemask))
210 		return 0;
211 
212 	p = find_lock_task_mm(p);
213 	if (!p)
214 		return 0;
215 
216 	/*
217 	 * Do not even consider tasks which are explicitly marked oom
218 	 * unkillable or have been already oom reaped or the are in
219 	 * the middle of vfork
220 	 */
221 	adj = (long)p->signal->oom_score_adj;
222 	if (adj == OOM_SCORE_ADJ_MIN ||
223 			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
224 			in_vfork(p)) {
225 		task_unlock(p);
226 		return 0;
227 	}
228 
229 	/*
230 	 * The baseline for the badness score is the proportion of RAM that each
231 	 * task's rss, pagetable and swap space use.
232 	 */
233 	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
234 		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
235 	task_unlock(p);
236 
237 	/* Normalize to oom_score_adj units */
238 	adj *= totalpages / 1000;
239 	points += adj;
240 
241 	/*
242 	 * Never return 0 for an eligible task regardless of the root bonus and
243 	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
244 	 */
245 	return points > 0 ? points : 1;
246 }
247 
248 enum oom_constraint {
249 	CONSTRAINT_NONE,
250 	CONSTRAINT_CPUSET,
251 	CONSTRAINT_MEMORY_POLICY,
252 	CONSTRAINT_MEMCG,
253 };
254 
255 /*
256  * Determine the type of allocation constraint.
257  */
258 static enum oom_constraint constrained_alloc(struct oom_control *oc)
259 {
260 	struct zone *zone;
261 	struct zoneref *z;
262 	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
263 	bool cpuset_limited = false;
264 	int nid;
265 
266 	if (is_memcg_oom(oc)) {
267 		oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
268 		return CONSTRAINT_MEMCG;
269 	}
270 
271 	/* Default to all available memory */
272 	oc->totalpages = totalram_pages + total_swap_pages;
273 
274 	if (!IS_ENABLED(CONFIG_NUMA))
275 		return CONSTRAINT_NONE;
276 
277 	if (!oc->zonelist)
278 		return CONSTRAINT_NONE;
279 	/*
280 	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
281 	 * to kill current.We have to random task kill in this case.
282 	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
283 	 */
284 	if (oc->gfp_mask & __GFP_THISNODE)
285 		return CONSTRAINT_NONE;
286 
287 	/*
288 	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
289 	 * the page allocator means a mempolicy is in effect.  Cpuset policy
290 	 * is enforced in get_page_from_freelist().
291 	 */
292 	if (oc->nodemask &&
293 	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
294 		oc->totalpages = total_swap_pages;
295 		for_each_node_mask(nid, *oc->nodemask)
296 			oc->totalpages += node_spanned_pages(nid);
297 		return CONSTRAINT_MEMORY_POLICY;
298 	}
299 
300 	/* Check this allocation failure is caused by cpuset's wall function */
301 	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
302 			high_zoneidx, oc->nodemask)
303 		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
304 			cpuset_limited = true;
305 
306 	if (cpuset_limited) {
307 		oc->totalpages = total_swap_pages;
308 		for_each_node_mask(nid, cpuset_current_mems_allowed)
309 			oc->totalpages += node_spanned_pages(nid);
310 		return CONSTRAINT_CPUSET;
311 	}
312 	return CONSTRAINT_NONE;
313 }
314 
315 static int oom_evaluate_task(struct task_struct *task, void *arg)
316 {
317 	struct oom_control *oc = arg;
318 	unsigned long points;
319 
320 	if (oom_unkillable_task(task, NULL, oc->nodemask))
321 		goto next;
322 
323 	/*
324 	 * This task already has access to memory reserves and is being killed.
325 	 * Don't allow any other task to have access to the reserves unless
326 	 * the task has MMF_OOM_SKIP because chances that it would release
327 	 * any memory is quite low.
328 	 */
329 	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
330 		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
331 			goto next;
332 		goto abort;
333 	}
334 
335 	/*
336 	 * If task is allocating a lot of memory and has been marked to be
337 	 * killed first if it triggers an oom, then select it.
338 	 */
339 	if (oom_task_origin(task)) {
340 		points = ULONG_MAX;
341 		goto select;
342 	}
343 
344 	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
345 	if (!points || points < oc->chosen_points)
346 		goto next;
347 
348 	/* Prefer thread group leaders for display purposes */
349 	if (points == oc->chosen_points && thread_group_leader(oc->chosen))
350 		goto next;
351 select:
352 	if (oc->chosen)
353 		put_task_struct(oc->chosen);
354 	get_task_struct(task);
355 	oc->chosen = task;
356 	oc->chosen_points = points;
357 next:
358 	return 0;
359 abort:
360 	if (oc->chosen)
361 		put_task_struct(oc->chosen);
362 	oc->chosen = (void *)-1UL;
363 	return 1;
364 }
365 
366 /*
367  * Simple selection loop. We choose the process with the highest number of
368  * 'points'. In case scan was aborted, oc->chosen is set to -1.
369  */
370 static void select_bad_process(struct oom_control *oc)
371 {
372 	if (is_memcg_oom(oc))
373 		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
374 	else {
375 		struct task_struct *p;
376 
377 		rcu_read_lock();
378 		for_each_process(p)
379 			if (oom_evaluate_task(p, oc))
380 				break;
381 		rcu_read_unlock();
382 	}
383 
384 	oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
385 }
386 
387 /**
388  * dump_tasks - dump current memory state of all system tasks
389  * @memcg: current's memory controller, if constrained
390  * @nodemask: nodemask passed to page allocator for mempolicy ooms
391  *
392  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
393  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
394  * are not shown.
395  * State information includes task's pid, uid, tgid, vm size, rss,
396  * pgtables_bytes, swapents, oom_score_adj value, and name.
397  */
398 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
399 {
400 	struct task_struct *p;
401 	struct task_struct *task;
402 
403 	pr_info("Tasks state (memory values in pages):\n");
404 	pr_info("[  pid  ]   uid  tgid total_vm      rss pgtables_bytes swapents oom_score_adj name\n");
405 	rcu_read_lock();
406 	for_each_process(p) {
407 		if (oom_unkillable_task(p, memcg, nodemask))
408 			continue;
409 
410 		task = find_lock_task_mm(p);
411 		if (!task) {
412 			/*
413 			 * This is a kthread or all of p's threads have already
414 			 * detached their mm's.  There's no need to report
415 			 * them; they can't be oom killed anyway.
416 			 */
417 			continue;
418 		}
419 
420 		pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
421 			task->pid, from_kuid(&init_user_ns, task_uid(task)),
422 			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
423 			mm_pgtables_bytes(task->mm),
424 			get_mm_counter(task->mm, MM_SWAPENTS),
425 			task->signal->oom_score_adj, task->comm);
426 		task_unlock(task);
427 	}
428 	rcu_read_unlock();
429 }
430 
431 static void dump_header(struct oom_control *oc, struct task_struct *p)
432 {
433 	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
434 		current->comm, oc->gfp_mask, &oc->gfp_mask,
435 		nodemask_pr_args(oc->nodemask), oc->order,
436 			current->signal->oom_score_adj);
437 	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
438 		pr_warn("COMPACTION is disabled!!!\n");
439 
440 	cpuset_print_current_mems_allowed();
441 	dump_stack();
442 	if (is_memcg_oom(oc))
443 		mem_cgroup_print_oom_info(oc->memcg, p);
444 	else {
445 		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
446 		if (is_dump_unreclaim_slabs())
447 			dump_unreclaimable_slab();
448 	}
449 	if (sysctl_oom_dump_tasks)
450 		dump_tasks(oc->memcg, oc->nodemask);
451 }
452 
453 /*
454  * Number of OOM victims in flight
455  */
456 static atomic_t oom_victims = ATOMIC_INIT(0);
457 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
458 
459 static bool oom_killer_disabled __read_mostly;
460 
461 #define K(x) ((x) << (PAGE_SHIFT-10))
462 
463 /*
464  * task->mm can be NULL if the task is the exited group leader.  So to
465  * determine whether the task is using a particular mm, we examine all the
466  * task's threads: if one of those is using this mm then this task was also
467  * using it.
468  */
469 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
470 {
471 	struct task_struct *t;
472 
473 	for_each_thread(p, t) {
474 		struct mm_struct *t_mm = READ_ONCE(t->mm);
475 		if (t_mm)
476 			return t_mm == mm;
477 	}
478 	return false;
479 }
480 
481 #ifdef CONFIG_MMU
482 /*
483  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
484  * victim (if that is possible) to help the OOM killer to move on.
485  */
486 static struct task_struct *oom_reaper_th;
487 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
488 static struct task_struct *oom_reaper_list;
489 static DEFINE_SPINLOCK(oom_reaper_lock);
490 
491 bool __oom_reap_task_mm(struct mm_struct *mm)
492 {
493 	struct vm_area_struct *vma;
494 	bool ret = true;
495 
496 	/*
497 	 * Tell all users of get_user/copy_from_user etc... that the content
498 	 * is no longer stable. No barriers really needed because unmapping
499 	 * should imply barriers already and the reader would hit a page fault
500 	 * if it stumbled over a reaped memory.
501 	 */
502 	set_bit(MMF_UNSTABLE, &mm->flags);
503 
504 	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
505 		if (!can_madv_dontneed_vma(vma))
506 			continue;
507 
508 		/*
509 		 * Only anonymous pages have a good chance to be dropped
510 		 * without additional steps which we cannot afford as we
511 		 * are OOM already.
512 		 *
513 		 * We do not even care about fs backed pages because all
514 		 * which are reclaimable have already been reclaimed and
515 		 * we do not want to block exit_mmap by keeping mm ref
516 		 * count elevated without a good reason.
517 		 */
518 		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
519 			const unsigned long start = vma->vm_start;
520 			const unsigned long end = vma->vm_end;
521 			struct mmu_gather tlb;
522 
523 			tlb_gather_mmu(&tlb, mm, start, end);
524 			if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
525 				ret = false;
526 				continue;
527 			}
528 			unmap_page_range(&tlb, vma, start, end, NULL);
529 			mmu_notifier_invalidate_range_end(mm, start, end);
530 			tlb_finish_mmu(&tlb, start, end);
531 		}
532 	}
533 
534 	return ret;
535 }
536 
537 /*
538  * Reaps the address space of the give task.
539  *
540  * Returns true on success and false if none or part of the address space
541  * has been reclaimed and the caller should retry later.
542  */
543 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
544 {
545 	bool ret = true;
546 
547 	if (!down_read_trylock(&mm->mmap_sem)) {
548 		trace_skip_task_reaping(tsk->pid);
549 		return false;
550 	}
551 
552 	/*
553 	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
554 	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
555 	 * under mmap_sem for reading because it serializes against the
556 	 * down_write();up_write() cycle in exit_mmap().
557 	 */
558 	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
559 		trace_skip_task_reaping(tsk->pid);
560 		goto out_unlock;
561 	}
562 
563 	trace_start_task_reaping(tsk->pid);
564 
565 	/* failed to reap part of the address space. Try again later */
566 	ret = __oom_reap_task_mm(mm);
567 	if (!ret)
568 		goto out_finish;
569 
570 	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
571 			task_pid_nr(tsk), tsk->comm,
572 			K(get_mm_counter(mm, MM_ANONPAGES)),
573 			K(get_mm_counter(mm, MM_FILEPAGES)),
574 			K(get_mm_counter(mm, MM_SHMEMPAGES)));
575 out_finish:
576 	trace_finish_task_reaping(tsk->pid);
577 out_unlock:
578 	up_read(&mm->mmap_sem);
579 
580 	return ret;
581 }
582 
583 #define MAX_OOM_REAP_RETRIES 10
584 static void oom_reap_task(struct task_struct *tsk)
585 {
586 	int attempts = 0;
587 	struct mm_struct *mm = tsk->signal->oom_mm;
588 
589 	/* Retry the down_read_trylock(mmap_sem) a few times */
590 	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
591 		schedule_timeout_idle(HZ/10);
592 
593 	if (attempts <= MAX_OOM_REAP_RETRIES ||
594 	    test_bit(MMF_OOM_SKIP, &mm->flags))
595 		goto done;
596 
597 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
598 		task_pid_nr(tsk), tsk->comm);
599 	debug_show_all_locks();
600 
601 done:
602 	tsk->oom_reaper_list = NULL;
603 
604 	/*
605 	 * Hide this mm from OOM killer because it has been either reaped or
606 	 * somebody can't call up_write(mmap_sem).
607 	 */
608 	set_bit(MMF_OOM_SKIP, &mm->flags);
609 
610 	/* Drop a reference taken by wake_oom_reaper */
611 	put_task_struct(tsk);
612 }
613 
614 static int oom_reaper(void *unused)
615 {
616 	while (true) {
617 		struct task_struct *tsk = NULL;
618 
619 		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
620 		spin_lock(&oom_reaper_lock);
621 		if (oom_reaper_list != NULL) {
622 			tsk = oom_reaper_list;
623 			oom_reaper_list = tsk->oom_reaper_list;
624 		}
625 		spin_unlock(&oom_reaper_lock);
626 
627 		if (tsk)
628 			oom_reap_task(tsk);
629 	}
630 
631 	return 0;
632 }
633 
634 static void wake_oom_reaper(struct task_struct *tsk)
635 {
636 	/* tsk is already queued? */
637 	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
638 		return;
639 
640 	get_task_struct(tsk);
641 
642 	spin_lock(&oom_reaper_lock);
643 	tsk->oom_reaper_list = oom_reaper_list;
644 	oom_reaper_list = tsk;
645 	spin_unlock(&oom_reaper_lock);
646 	trace_wake_reaper(tsk->pid);
647 	wake_up(&oom_reaper_wait);
648 }
649 
650 static int __init oom_init(void)
651 {
652 	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
653 	return 0;
654 }
655 subsys_initcall(oom_init)
656 #else
657 static inline void wake_oom_reaper(struct task_struct *tsk)
658 {
659 }
660 #endif /* CONFIG_MMU */
661 
662 /**
663  * mark_oom_victim - mark the given task as OOM victim
664  * @tsk: task to mark
665  *
666  * Has to be called with oom_lock held and never after
667  * oom has been disabled already.
668  *
669  * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
670  * under task_lock or operate on the current).
671  */
672 static void mark_oom_victim(struct task_struct *tsk)
673 {
674 	struct mm_struct *mm = tsk->mm;
675 
676 	WARN_ON(oom_killer_disabled);
677 	/* OOM killer might race with memcg OOM */
678 	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
679 		return;
680 
681 	/* oom_mm is bound to the signal struct life time. */
682 	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
683 		mmgrab(tsk->signal->oom_mm);
684 		set_bit(MMF_OOM_VICTIM, &mm->flags);
685 	}
686 
687 	/*
688 	 * Make sure that the task is woken up from uninterruptible sleep
689 	 * if it is frozen because OOM killer wouldn't be able to free
690 	 * any memory and livelock. freezing_slow_path will tell the freezer
691 	 * that TIF_MEMDIE tasks should be ignored.
692 	 */
693 	__thaw_task(tsk);
694 	atomic_inc(&oom_victims);
695 	trace_mark_victim(tsk->pid);
696 }
697 
698 /**
699  * exit_oom_victim - note the exit of an OOM victim
700  */
701 void exit_oom_victim(void)
702 {
703 	clear_thread_flag(TIF_MEMDIE);
704 
705 	if (!atomic_dec_return(&oom_victims))
706 		wake_up_all(&oom_victims_wait);
707 }
708 
709 /**
710  * oom_killer_enable - enable OOM killer
711  */
712 void oom_killer_enable(void)
713 {
714 	oom_killer_disabled = false;
715 	pr_info("OOM killer enabled.\n");
716 }
717 
718 /**
719  * oom_killer_disable - disable OOM killer
720  * @timeout: maximum timeout to wait for oom victims in jiffies
721  *
722  * Forces all page allocations to fail rather than trigger OOM killer.
723  * Will block and wait until all OOM victims are killed or the given
724  * timeout expires.
725  *
726  * The function cannot be called when there are runnable user tasks because
727  * the userspace would see unexpected allocation failures as a result. Any
728  * new usage of this function should be consulted with MM people.
729  *
730  * Returns true if successful and false if the OOM killer cannot be
731  * disabled.
732  */
733 bool oom_killer_disable(signed long timeout)
734 {
735 	signed long ret;
736 
737 	/*
738 	 * Make sure to not race with an ongoing OOM killer. Check that the
739 	 * current is not killed (possibly due to sharing the victim's memory).
740 	 */
741 	if (mutex_lock_killable(&oom_lock))
742 		return false;
743 	oom_killer_disabled = true;
744 	mutex_unlock(&oom_lock);
745 
746 	ret = wait_event_interruptible_timeout(oom_victims_wait,
747 			!atomic_read(&oom_victims), timeout);
748 	if (ret <= 0) {
749 		oom_killer_enable();
750 		return false;
751 	}
752 	pr_info("OOM killer disabled.\n");
753 
754 	return true;
755 }
756 
757 static inline bool __task_will_free_mem(struct task_struct *task)
758 {
759 	struct signal_struct *sig = task->signal;
760 
761 	/*
762 	 * A coredumping process may sleep for an extended period in exit_mm(),
763 	 * so the oom killer cannot assume that the process will promptly exit
764 	 * and release memory.
765 	 */
766 	if (sig->flags & SIGNAL_GROUP_COREDUMP)
767 		return false;
768 
769 	if (sig->flags & SIGNAL_GROUP_EXIT)
770 		return true;
771 
772 	if (thread_group_empty(task) && (task->flags & PF_EXITING))
773 		return true;
774 
775 	return false;
776 }
777 
778 /*
779  * Checks whether the given task is dying or exiting and likely to
780  * release its address space. This means that all threads and processes
781  * sharing the same mm have to be killed or exiting.
782  * Caller has to make sure that task->mm is stable (hold task_lock or
783  * it operates on the current).
784  */
785 static bool task_will_free_mem(struct task_struct *task)
786 {
787 	struct mm_struct *mm = task->mm;
788 	struct task_struct *p;
789 	bool ret = true;
790 
791 	/*
792 	 * Skip tasks without mm because it might have passed its exit_mm and
793 	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
794 	 * on that for now. We can consider find_lock_task_mm in future.
795 	 */
796 	if (!mm)
797 		return false;
798 
799 	if (!__task_will_free_mem(task))
800 		return false;
801 
802 	/*
803 	 * This task has already been drained by the oom reaper so there are
804 	 * only small chances it will free some more
805 	 */
806 	if (test_bit(MMF_OOM_SKIP, &mm->flags))
807 		return false;
808 
809 	if (atomic_read(&mm->mm_users) <= 1)
810 		return true;
811 
812 	/*
813 	 * Make sure that all tasks which share the mm with the given tasks
814 	 * are dying as well to make sure that a) nobody pins its mm and
815 	 * b) the task is also reapable by the oom reaper.
816 	 */
817 	rcu_read_lock();
818 	for_each_process(p) {
819 		if (!process_shares_mm(p, mm))
820 			continue;
821 		if (same_thread_group(task, p))
822 			continue;
823 		ret = __task_will_free_mem(p);
824 		if (!ret)
825 			break;
826 	}
827 	rcu_read_unlock();
828 
829 	return ret;
830 }
831 
832 static void __oom_kill_process(struct task_struct *victim)
833 {
834 	struct task_struct *p;
835 	struct mm_struct *mm;
836 	bool can_oom_reap = true;
837 
838 	p = find_lock_task_mm(victim);
839 	if (!p) {
840 		put_task_struct(victim);
841 		return;
842 	} else if (victim != p) {
843 		get_task_struct(p);
844 		put_task_struct(victim);
845 		victim = p;
846 	}
847 
848 	/* Get a reference to safely compare mm after task_unlock(victim) */
849 	mm = victim->mm;
850 	mmgrab(mm);
851 
852 	/* Raise event before sending signal: task reaper must see this */
853 	count_vm_event(OOM_KILL);
854 	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
855 
856 	/*
857 	 * We should send SIGKILL before granting access to memory reserves
858 	 * in order to prevent the OOM victim from depleting the memory
859 	 * reserves from the user space under its control.
860 	 */
861 	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID);
862 	mark_oom_victim(victim);
863 	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
864 		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
865 		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
866 		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
867 		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
868 	task_unlock(victim);
869 
870 	/*
871 	 * Kill all user processes sharing victim->mm in other thread groups, if
872 	 * any.  They don't get access to memory reserves, though, to avoid
873 	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
874 	 * oom killed thread cannot exit because it requires the semaphore and
875 	 * its contended by another thread trying to allocate memory itself.
876 	 * That thread will now get access to memory reserves since it has a
877 	 * pending fatal signal.
878 	 */
879 	rcu_read_lock();
880 	for_each_process(p) {
881 		if (!process_shares_mm(p, mm))
882 			continue;
883 		if (same_thread_group(p, victim))
884 			continue;
885 		if (is_global_init(p)) {
886 			can_oom_reap = false;
887 			set_bit(MMF_OOM_SKIP, &mm->flags);
888 			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
889 					task_pid_nr(victim), victim->comm,
890 					task_pid_nr(p), p->comm);
891 			continue;
892 		}
893 		/*
894 		 * No use_mm() user needs to read from the userspace so we are
895 		 * ok to reap it.
896 		 */
897 		if (unlikely(p->flags & PF_KTHREAD))
898 			continue;
899 		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID);
900 	}
901 	rcu_read_unlock();
902 
903 	if (can_oom_reap)
904 		wake_oom_reaper(victim);
905 
906 	mmdrop(mm);
907 	put_task_struct(victim);
908 }
909 #undef K
910 
911 /*
912  * Kill provided task unless it's secured by setting
913  * oom_score_adj to OOM_SCORE_ADJ_MIN.
914  */
915 static int oom_kill_memcg_member(struct task_struct *task, void *unused)
916 {
917 	if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
918 		get_task_struct(task);
919 		__oom_kill_process(task);
920 	}
921 	return 0;
922 }
923 
924 static void oom_kill_process(struct oom_control *oc, const char *message)
925 {
926 	struct task_struct *p = oc->chosen;
927 	unsigned int points = oc->chosen_points;
928 	struct task_struct *victim = p;
929 	struct task_struct *child;
930 	struct task_struct *t;
931 	struct mem_cgroup *oom_group;
932 	unsigned int victim_points = 0;
933 	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
934 					      DEFAULT_RATELIMIT_BURST);
935 
936 	/*
937 	 * If the task is already exiting, don't alarm the sysadmin or kill
938 	 * its children or threads, just give it access to memory reserves
939 	 * so it can die quickly
940 	 */
941 	task_lock(p);
942 	if (task_will_free_mem(p)) {
943 		mark_oom_victim(p);
944 		wake_oom_reaper(p);
945 		task_unlock(p);
946 		put_task_struct(p);
947 		return;
948 	}
949 	task_unlock(p);
950 
951 	if (__ratelimit(&oom_rs))
952 		dump_header(oc, p);
953 
954 	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
955 		message, task_pid_nr(p), p->comm, points);
956 
957 	/*
958 	 * If any of p's children has a different mm and is eligible for kill,
959 	 * the one with the highest oom_badness() score is sacrificed for its
960 	 * parent.  This attempts to lose the minimal amount of work done while
961 	 * still freeing memory.
962 	 */
963 	read_lock(&tasklist_lock);
964 	for_each_thread(p, t) {
965 		list_for_each_entry(child, &t->children, sibling) {
966 			unsigned int child_points;
967 
968 			if (process_shares_mm(child, p->mm))
969 				continue;
970 			/*
971 			 * oom_badness() returns 0 if the thread is unkillable
972 			 */
973 			child_points = oom_badness(child,
974 				oc->memcg, oc->nodemask, oc->totalpages);
975 			if (child_points > victim_points) {
976 				put_task_struct(victim);
977 				victim = child;
978 				victim_points = child_points;
979 				get_task_struct(victim);
980 			}
981 		}
982 	}
983 	read_unlock(&tasklist_lock);
984 
985 	/*
986 	 * Do we need to kill the entire memory cgroup?
987 	 * Or even one of the ancestor memory cgroups?
988 	 * Check this out before killing the victim task.
989 	 */
990 	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
991 
992 	__oom_kill_process(victim);
993 
994 	/*
995 	 * If necessary, kill all tasks in the selected memory cgroup.
996 	 */
997 	if (oom_group) {
998 		mem_cgroup_print_oom_group(oom_group);
999 		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, NULL);
1000 		mem_cgroup_put(oom_group);
1001 	}
1002 }
1003 
1004 /*
1005  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1006  */
1007 static void check_panic_on_oom(struct oom_control *oc,
1008 			       enum oom_constraint constraint)
1009 {
1010 	if (likely(!sysctl_panic_on_oom))
1011 		return;
1012 	if (sysctl_panic_on_oom != 2) {
1013 		/*
1014 		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1015 		 * does not panic for cpuset, mempolicy, or memcg allocation
1016 		 * failures.
1017 		 */
1018 		if (constraint != CONSTRAINT_NONE)
1019 			return;
1020 	}
1021 	/* Do not panic for oom kills triggered by sysrq */
1022 	if (is_sysrq_oom(oc))
1023 		return;
1024 	dump_header(oc, NULL);
1025 	panic("Out of memory: %s panic_on_oom is enabled\n",
1026 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1027 }
1028 
1029 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1030 
1031 int register_oom_notifier(struct notifier_block *nb)
1032 {
1033 	return blocking_notifier_chain_register(&oom_notify_list, nb);
1034 }
1035 EXPORT_SYMBOL_GPL(register_oom_notifier);
1036 
1037 int unregister_oom_notifier(struct notifier_block *nb)
1038 {
1039 	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1040 }
1041 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1042 
1043 /**
1044  * out_of_memory - kill the "best" process when we run out of memory
1045  * @oc: pointer to struct oom_control
1046  *
1047  * If we run out of memory, we have the choice between either
1048  * killing a random task (bad), letting the system crash (worse)
1049  * OR try to be smart about which process to kill. Note that we
1050  * don't have to be perfect here, we just have to be good.
1051  */
1052 bool out_of_memory(struct oom_control *oc)
1053 {
1054 	unsigned long freed = 0;
1055 	enum oom_constraint constraint = CONSTRAINT_NONE;
1056 
1057 	if (oom_killer_disabled)
1058 		return false;
1059 
1060 	if (!is_memcg_oom(oc)) {
1061 		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1062 		if (freed > 0)
1063 			/* Got some memory back in the last second. */
1064 			return true;
1065 	}
1066 
1067 	/*
1068 	 * If current has a pending SIGKILL or is exiting, then automatically
1069 	 * select it.  The goal is to allow it to allocate so that it may
1070 	 * quickly exit and free its memory.
1071 	 */
1072 	if (task_will_free_mem(current)) {
1073 		mark_oom_victim(current);
1074 		wake_oom_reaper(current);
1075 		return true;
1076 	}
1077 
1078 	/*
1079 	 * The OOM killer does not compensate for IO-less reclaim.
1080 	 * pagefault_out_of_memory lost its gfp context so we have to
1081 	 * make sure exclude 0 mask - all other users should have at least
1082 	 * ___GFP_DIRECT_RECLAIM to get here.
1083 	 */
1084 	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
1085 		return true;
1086 
1087 	/*
1088 	 * Check if there were limitations on the allocation (only relevant for
1089 	 * NUMA and memcg) that may require different handling.
1090 	 */
1091 	constraint = constrained_alloc(oc);
1092 	if (constraint != CONSTRAINT_MEMORY_POLICY)
1093 		oc->nodemask = NULL;
1094 	check_panic_on_oom(oc, constraint);
1095 
1096 	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1097 	    current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1098 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1099 		get_task_struct(current);
1100 		oc->chosen = current;
1101 		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1102 		return true;
1103 	}
1104 
1105 	select_bad_process(oc);
1106 	/* Found nothing?!?! Either we hang forever, or we panic. */
1107 	if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1108 		dump_header(oc, NULL);
1109 		panic("Out of memory and no killable processes...\n");
1110 	}
1111 	if (oc->chosen && oc->chosen != (void *)-1UL)
1112 		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1113 				 "Memory cgroup out of memory");
1114 	return !!oc->chosen;
1115 }
1116 
1117 /*
1118  * The pagefault handler calls here because it is out of memory, so kill a
1119  * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1120  * killing is already in progress so do nothing.
1121  */
1122 void pagefault_out_of_memory(void)
1123 {
1124 	struct oom_control oc = {
1125 		.zonelist = NULL,
1126 		.nodemask = NULL,
1127 		.memcg = NULL,
1128 		.gfp_mask = 0,
1129 		.order = 0,
1130 	};
1131 
1132 	if (mem_cgroup_oom_synchronize(true))
1133 		return;
1134 
1135 	if (!mutex_trylock(&oom_lock))
1136 		return;
1137 	out_of_memory(&oc);
1138 	mutex_unlock(&oom_lock);
1139 }
1140