xref: /openbmc/linux/mm/oom_kill.c (revision 61f29738)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/mm/oom_kill.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  Copyright (C)  1998,2000  Rik van Riel
61da177e4SLinus Torvalds  *	Thanks go out to Claus Fischer for some serious inspiration and
71da177e4SLinus Torvalds  *	for goading me into coding this file...
8a63d83f4SDavid Rientjes  *  Copyright (C)  2010  Google, Inc.
9a63d83f4SDavid Rientjes  *	Rewritten by David Rientjes
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *  The routines in this file are used to kill a process when
12a49335ccSPaul Jackson  *  we're seriously out of memory. This gets called from __alloc_pages()
13a49335ccSPaul Jackson  *  in mm/page_alloc.c when we really run out of memory.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  *  Since we won't call these routines often (on a well-configured
161da177e4SLinus Torvalds  *  machine) this file will double as a 'coding guide' and a signpost
171da177e4SLinus Torvalds  *  for newbie kernel hackers. It features several pointers to major
181da177e4SLinus Torvalds  *  kernel subsystems and hints as to where to find out what things do.
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
218ac773b4SAlexey Dobriyan #include <linux/oom.h>
221da177e4SLinus Torvalds #include <linux/mm.h>
234e950f6fSAlexey Dobriyan #include <linux/err.h>
245a0e3ad6STejun Heo #include <linux/gfp.h>
251da177e4SLinus Torvalds #include <linux/sched.h>
266e84f315SIngo Molnar #include <linux/sched/mm.h>
27f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
2829930025SIngo Molnar #include <linux/sched/task.h>
298a7ff02aSDavid Rientjes #include <linux/sched/debug.h>
301da177e4SLinus Torvalds #include <linux/swap.h>
31884a7e59SSuren Baghdasaryan #include <linux/syscalls.h>
321da177e4SLinus Torvalds #include <linux/timex.h>
331da177e4SLinus Torvalds #include <linux/jiffies.h>
34ef08e3b4SPaul Jackson #include <linux/cpuset.h>
35b95f1b31SPaul Gortmaker #include <linux/export.h>
368bc719d3SMartin Schwidefsky #include <linux/notifier.h>
37c7ba5c9eSPavel Emelianov #include <linux/memcontrol.h>
386f48d0ebSDavid Rientjes #include <linux/mempolicy.h>
395cd9c58fSDavid Howells #include <linux/security.h>
40edd45544SDavid Rientjes #include <linux/ptrace.h>
41f660daacSDavid Rientjes #include <linux/freezer.h>
4243d2b113SKAMEZAWA Hiroyuki #include <linux/ftrace.h>
43dc3f21eaSDavid Rientjes #include <linux/ratelimit.h>
44aac45363SMichal Hocko #include <linux/kthread.h>
45aac45363SMichal Hocko #include <linux/init.h>
464d4bbd85SMichal Hocko #include <linux/mmu_notifier.h>
47aac45363SMichal Hocko 
48aac45363SMichal Hocko #include <asm/tlb.h>
49aac45363SMichal Hocko #include "internal.h"
50852d8be0SYang Shi #include "slab.h"
5143d2b113SKAMEZAWA Hiroyuki 
5243d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS
5343d2b113SKAMEZAWA Hiroyuki #include <trace/events/oom.h>
541da177e4SLinus Torvalds 
5543fe219aSsujiaxun static int sysctl_panic_on_oom;
5643fe219aSsujiaxun static int sysctl_oom_kill_allocating_task;
5743fe219aSsujiaxun static int sysctl_oom_dump_tasks = 1;
5843fe219aSsujiaxun 
59a195d3f5SMichal Hocko /*
60a195d3f5SMichal Hocko  * Serializes oom killer invocations (out_of_memory()) from all contexts to
61a195d3f5SMichal Hocko  * prevent from over eager oom killing (e.g. when the oom killer is invoked
62a195d3f5SMichal Hocko  * from different domains).
63a195d3f5SMichal Hocko  *
64a195d3f5SMichal Hocko  * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
65a195d3f5SMichal Hocko  * and mark_oom_victim
66a195d3f5SMichal Hocko  */
67dc56401fSJohannes Weiner DEFINE_MUTEX(oom_lock);
6867197a4fSSuren Baghdasaryan /* Serializes oom_score_adj and oom_score_adj_min updates */
6967197a4fSSuren Baghdasaryan DEFINE_MUTEX(oom_adj_mutex);
701da177e4SLinus Torvalds 
is_memcg_oom(struct oom_control * oc)71ac311a14SShakeel Butt static inline bool is_memcg_oom(struct oom_control *oc)
72ac311a14SShakeel Butt {
73ac311a14SShakeel Butt 	return oc->memcg != NULL;
74ac311a14SShakeel Butt }
75ac311a14SShakeel Butt 
766f48d0ebSDavid Rientjes #ifdef CONFIG_NUMA
776f48d0ebSDavid Rientjes /**
78f0953a1bSIngo Molnar  * oom_cpuset_eligible() - check task eligibility for kill
79ad962441SOleg Nesterov  * @start: task struct of which task to consider
80f364f06bSYi Wang  * @oc: pointer to struct oom_control
816f48d0ebSDavid Rientjes  *
826f48d0ebSDavid Rientjes  * Task eligibility is determined by whether or not a candidate task, @tsk,
836f48d0ebSDavid Rientjes  * shares the same mempolicy nodes as current if it is bound by such a policy
846f48d0ebSDavid Rientjes  * and whether or not it has the same set of allowed cpuset nodes.
85ac311a14SShakeel Butt  *
86ac311a14SShakeel Butt  * This function is assuming oom-killer context and 'current' has triggered
87ac311a14SShakeel Butt  * the oom-killer.
88495789a5SKOSAKI Motohiro  */
oom_cpuset_eligible(struct task_struct * start,struct oom_control * oc)89ac311a14SShakeel Butt static bool oom_cpuset_eligible(struct task_struct *start,
90ac311a14SShakeel Butt 				struct oom_control *oc)
91495789a5SKOSAKI Motohiro {
92ad962441SOleg Nesterov 	struct task_struct *tsk;
93ad962441SOleg Nesterov 	bool ret = false;
94ac311a14SShakeel Butt 	const nodemask_t *mask = oc->nodemask;
95ac311a14SShakeel Butt 
96ad962441SOleg Nesterov 	rcu_read_lock();
971da4db0cSOleg Nesterov 	for_each_thread(start, tsk) {
986f48d0ebSDavid Rientjes 		if (mask) {
996f48d0ebSDavid Rientjes 			/*
1006f48d0ebSDavid Rientjes 			 * If this is a mempolicy constrained oom, tsk's
1016f48d0ebSDavid Rientjes 			 * cpuset is irrelevant.  Only return true if its
1026f48d0ebSDavid Rientjes 			 * mempolicy intersects current, otherwise it may be
1036f48d0ebSDavid Rientjes 			 * needlessly killed.
1046f48d0ebSDavid Rientjes 			 */
105b26e517aSFeng Tang 			ret = mempolicy_in_oom_domain(tsk, mask);
1066f48d0ebSDavid Rientjes 		} else {
1076f48d0ebSDavid Rientjes 			/*
1086f48d0ebSDavid Rientjes 			 * This is not a mempolicy constrained oom, so only
1096f48d0ebSDavid Rientjes 			 * check the mems of tsk's cpuset.
1106f48d0ebSDavid Rientjes 			 */
111ad962441SOleg Nesterov 			ret = cpuset_mems_allowed_intersects(current, tsk);
112495789a5SKOSAKI Motohiro 		}
113ad962441SOleg Nesterov 		if (ret)
114ad962441SOleg Nesterov 			break;
1151da4db0cSOleg Nesterov 	}
116ad962441SOleg Nesterov 	rcu_read_unlock();
117df1090a8SKOSAKI Motohiro 
118ad962441SOleg Nesterov 	return ret;
1196f48d0ebSDavid Rientjes }
1206f48d0ebSDavid Rientjes #else
oom_cpuset_eligible(struct task_struct * tsk,struct oom_control * oc)121ac311a14SShakeel Butt static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
1226f48d0ebSDavid Rientjes {
1236f48d0ebSDavid Rientjes 	return true;
1246f48d0ebSDavid Rientjes }
1256f48d0ebSDavid Rientjes #endif /* CONFIG_NUMA */
126495789a5SKOSAKI Motohiro 
1276f48d0ebSDavid Rientjes /*
1286f48d0ebSDavid Rientjes  * The process p may have detached its own ->mm while exiting or through
129f5678e7fSChristoph Hellwig  * kthread_use_mm(), but one or more of its subthreads may still have a valid
1306f48d0ebSDavid Rientjes  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
1316f48d0ebSDavid Rientjes  * task_lock() held.
1326f48d0ebSDavid Rientjes  */
find_lock_task_mm(struct task_struct * p)133158e0a2dSKAMEZAWA Hiroyuki struct task_struct *find_lock_task_mm(struct task_struct *p)
134dd8e8f40SOleg Nesterov {
1351da4db0cSOleg Nesterov 	struct task_struct *t;
136dd8e8f40SOleg Nesterov 
1374d4048beSOleg Nesterov 	rcu_read_lock();
1384d4048beSOleg Nesterov 
1391da4db0cSOleg Nesterov 	for_each_thread(p, t) {
140dd8e8f40SOleg Nesterov 		task_lock(t);
141dd8e8f40SOleg Nesterov 		if (likely(t->mm))
1424d4048beSOleg Nesterov 			goto found;
143dd8e8f40SOleg Nesterov 		task_unlock(t);
1441da4db0cSOleg Nesterov 	}
1454d4048beSOleg Nesterov 	t = NULL;
1464d4048beSOleg Nesterov found:
1474d4048beSOleg Nesterov 	rcu_read_unlock();
148dd8e8f40SOleg Nesterov 
1494d4048beSOleg Nesterov 	return t;
150dd8e8f40SOleg Nesterov }
151dd8e8f40SOleg Nesterov 
152db2a0dd7SYaowei Bai /*
153db2a0dd7SYaowei Bai  * order == -1 means the oom kill is required by sysrq, otherwise only
154db2a0dd7SYaowei Bai  * for display purposes.
155db2a0dd7SYaowei Bai  */
is_sysrq_oom(struct oom_control * oc)156db2a0dd7SYaowei Bai static inline bool is_sysrq_oom(struct oom_control *oc)
157db2a0dd7SYaowei Bai {
158db2a0dd7SYaowei Bai 	return oc->order == -1;
159db2a0dd7SYaowei Bai }
160db2a0dd7SYaowei Bai 
161ab290adbSKOSAKI Motohiro /* return true if the task is not adequate as candidate victim task. */
oom_unkillable_task(struct task_struct * p)162ac311a14SShakeel Butt static bool oom_unkillable_task(struct task_struct *p)
163ab290adbSKOSAKI Motohiro {
164ab290adbSKOSAKI Motohiro 	if (is_global_init(p))
165ab290adbSKOSAKI Motohiro 		return true;
166ab290adbSKOSAKI Motohiro 	if (p->flags & PF_KTHREAD)
167ab290adbSKOSAKI Motohiro 		return true;
168ab290adbSKOSAKI Motohiro 	return false;
169ab290adbSKOSAKI Motohiro }
170ab290adbSKOSAKI Motohiro 
171845be1cdSRandy Dunlap /*
172259b3633SHui Su  * Check whether unreclaimable slab amount is greater than
173259b3633SHui Su  * all user memory(LRU pages).
174259b3633SHui Su  * dump_unreclaimable_slab() could help in the case that
175259b3633SHui Su  * oom due to too much unreclaimable slab used by kernel.
176852d8be0SYang Shi */
should_dump_unreclaim_slab(void)177259b3633SHui Su static bool should_dump_unreclaim_slab(void)
178852d8be0SYang Shi {
179852d8be0SYang Shi 	unsigned long nr_lru;
180852d8be0SYang Shi 
181852d8be0SYang Shi 	nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
182852d8be0SYang Shi 		 global_node_page_state(NR_INACTIVE_ANON) +
183852d8be0SYang Shi 		 global_node_page_state(NR_ACTIVE_FILE) +
184852d8be0SYang Shi 		 global_node_page_state(NR_INACTIVE_FILE) +
185852d8be0SYang Shi 		 global_node_page_state(NR_ISOLATED_ANON) +
186852d8be0SYang Shi 		 global_node_page_state(NR_ISOLATED_FILE) +
187852d8be0SYang Shi 		 global_node_page_state(NR_UNEVICTABLE);
188852d8be0SYang Shi 
189d42f3245SRoman Gushchin 	return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
190852d8be0SYang Shi }
191852d8be0SYang Shi 
1921da177e4SLinus Torvalds /**
193a63d83f4SDavid Rientjes  * oom_badness - heuristic function to determine which candidate task to kill
1941da177e4SLinus Torvalds  * @p: task struct of which task we should calculate
195a63d83f4SDavid Rientjes  * @totalpages: total present RAM allowed for page allocation
1961da177e4SLinus Torvalds  *
197a63d83f4SDavid Rientjes  * The heuristic for determining which task to kill is made to be as simple and
198a63d83f4SDavid Rientjes  * predictable as possible.  The goal is to return the highest value for the
199a63d83f4SDavid Rientjes  * task consuming the most memory to avoid subsequent oom failures.
2001da177e4SLinus Torvalds  */
oom_badness(struct task_struct * p,unsigned long totalpages)2019066e5cfSYafang Shao long oom_badness(struct task_struct *p, unsigned long totalpages)
2021da177e4SLinus Torvalds {
2031e11ad8dSDavid Rientjes 	long points;
20461eafb00SDavid Rientjes 	long adj;
20528b83c51SKOSAKI Motohiro 
206ac311a14SShakeel Butt 	if (oom_unkillable_task(p))
2079066e5cfSYafang Shao 		return LONG_MIN;
2081da177e4SLinus Torvalds 
209dd8e8f40SOleg Nesterov 	p = find_lock_task_mm(p);
210dd8e8f40SOleg Nesterov 	if (!p)
2119066e5cfSYafang Shao 		return LONG_MIN;
2121da177e4SLinus Torvalds 
213bb8a4b7fSMichal Hocko 	/*
214bb8a4b7fSMichal Hocko 	 * Do not even consider tasks which are explicitly marked oom
215b18dc5f2SMichal Hocko 	 * unkillable or have been already oom reaped or the are in
216b18dc5f2SMichal Hocko 	 * the middle of vfork
217bb8a4b7fSMichal Hocko 	 */
218a9c58b90SDavid Rientjes 	adj = (long)p->signal->oom_score_adj;
219bb8a4b7fSMichal Hocko 	if (adj == OOM_SCORE_ADJ_MIN ||
220862e3073SMichal Hocko 			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
221b18dc5f2SMichal Hocko 			in_vfork(p)) {
2225aecc85aSMichal Hocko 		task_unlock(p);
2239066e5cfSYafang Shao 		return LONG_MIN;
2245aecc85aSMichal Hocko 	}
2255aecc85aSMichal Hocko 
2261da177e4SLinus Torvalds 	/*
227a63d83f4SDavid Rientjes 	 * The baseline for the badness score is the proportion of RAM that each
228f755a042SKOSAKI Motohiro 	 * task's rss, pagetable and swap space use.
229a63d83f4SDavid Rientjes 	 */
230dc6c9a35SKirill A. Shutemov 	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
231af5b0f6aSKirill A. Shutemov 		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
23297c2c9b8SAndrew Morton 	task_unlock(p);
2331da177e4SLinus Torvalds 
23461eafb00SDavid Rientjes 	/* Normalize to oom_score_adj units */
23561eafb00SDavid Rientjes 	adj *= totalpages / 1000;
23661eafb00SDavid Rientjes 	points += adj;
2371da177e4SLinus Torvalds 
2389066e5cfSYafang Shao 	return points;
2391da177e4SLinus Torvalds }
2401da177e4SLinus Torvalds 
241ef8444eaSyuzhoujian static const char * const oom_constraint_text[] = {
242ef8444eaSyuzhoujian 	[CONSTRAINT_NONE] = "CONSTRAINT_NONE",
243ef8444eaSyuzhoujian 	[CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
244ef8444eaSyuzhoujian 	[CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
245ef8444eaSyuzhoujian 	[CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
2467c5f64f8SVladimir Davydov };
2477c5f64f8SVladimir Davydov 
2481da177e4SLinus Torvalds /*
2499b0f8b04SChristoph Lameter  * Determine the type of allocation constraint.
2509b0f8b04SChristoph Lameter  */
constrained_alloc(struct oom_control * oc)2517c5f64f8SVladimir Davydov static enum oom_constraint constrained_alloc(struct oom_control *oc)
2524365a567SKAMEZAWA Hiroyuki {
25354a6eb5cSMel Gorman 	struct zone *zone;
254dd1a239fSMel Gorman 	struct zoneref *z;
25597a225e6SJoonsoo Kim 	enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
256a63d83f4SDavid Rientjes 	bool cpuset_limited = false;
257a63d83f4SDavid Rientjes 	int nid;
2589b0f8b04SChristoph Lameter 
2597c5f64f8SVladimir Davydov 	if (is_memcg_oom(oc)) {
260bbec2e15SRoman Gushchin 		oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
2617c5f64f8SVladimir Davydov 		return CONSTRAINT_MEMCG;
2627c5f64f8SVladimir Davydov 	}
2637c5f64f8SVladimir Davydov 
264a63d83f4SDavid Rientjes 	/* Default to all available memory */
265ca79b0c2SArun KS 	oc->totalpages = totalram_pages() + total_swap_pages;
2667c5f64f8SVladimir Davydov 
2677c5f64f8SVladimir Davydov 	if (!IS_ENABLED(CONFIG_NUMA))
2687c5f64f8SVladimir Davydov 		return CONSTRAINT_NONE;
269a63d83f4SDavid Rientjes 
2706e0fc46dSDavid Rientjes 	if (!oc->zonelist)
271a63d83f4SDavid Rientjes 		return CONSTRAINT_NONE;
2724365a567SKAMEZAWA Hiroyuki 	/*
2734365a567SKAMEZAWA Hiroyuki 	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
2744365a567SKAMEZAWA Hiroyuki 	 * to kill current.We have to random task kill in this case.
2754365a567SKAMEZAWA Hiroyuki 	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
2764365a567SKAMEZAWA Hiroyuki 	 */
2776e0fc46dSDavid Rientjes 	if (oc->gfp_mask & __GFP_THISNODE)
2784365a567SKAMEZAWA Hiroyuki 		return CONSTRAINT_NONE;
2799b0f8b04SChristoph Lameter 
2804365a567SKAMEZAWA Hiroyuki 	/*
281a63d83f4SDavid Rientjes 	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
282a63d83f4SDavid Rientjes 	 * the page allocator means a mempolicy is in effect.  Cpuset policy
283a63d83f4SDavid Rientjes 	 * is enforced in get_page_from_freelist().
2844365a567SKAMEZAWA Hiroyuki 	 */
2856e0fc46dSDavid Rientjes 	if (oc->nodemask &&
2866e0fc46dSDavid Rientjes 	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
2877c5f64f8SVladimir Davydov 		oc->totalpages = total_swap_pages;
2886e0fc46dSDavid Rientjes 		for_each_node_mask(nid, *oc->nodemask)
2891eb41bb0SMichal Hocko 			oc->totalpages += node_present_pages(nid);
2909b0f8b04SChristoph Lameter 		return CONSTRAINT_MEMORY_POLICY;
291a63d83f4SDavid Rientjes 	}
2924365a567SKAMEZAWA Hiroyuki 
2934365a567SKAMEZAWA Hiroyuki 	/* Check this allocation failure is caused by cpuset's wall function */
2946e0fc46dSDavid Rientjes 	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
29597a225e6SJoonsoo Kim 			highest_zoneidx, oc->nodemask)
2966e0fc46dSDavid Rientjes 		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
297a63d83f4SDavid Rientjes 			cpuset_limited = true;
2989b0f8b04SChristoph Lameter 
299a63d83f4SDavid Rientjes 	if (cpuset_limited) {
3007c5f64f8SVladimir Davydov 		oc->totalpages = total_swap_pages;
301a63d83f4SDavid Rientjes 		for_each_node_mask(nid, cpuset_current_mems_allowed)
3021eb41bb0SMichal Hocko 			oc->totalpages += node_present_pages(nid);
303a63d83f4SDavid Rientjes 		return CONSTRAINT_CPUSET;
304a63d83f4SDavid Rientjes 	}
3059b0f8b04SChristoph Lameter 	return CONSTRAINT_NONE;
3069b0f8b04SChristoph Lameter }
3079b0f8b04SChristoph Lameter 
oom_evaluate_task(struct task_struct * task,void * arg)3087c5f64f8SVladimir Davydov static int oom_evaluate_task(struct task_struct *task, void *arg)
309462607ecSDavid Rientjes {
3107c5f64f8SVladimir Davydov 	struct oom_control *oc = arg;
3119066e5cfSYafang Shao 	long points;
3127c5f64f8SVladimir Davydov 
313ac311a14SShakeel Butt 	if (oom_unkillable_task(task))
314ac311a14SShakeel Butt 		goto next;
315ac311a14SShakeel Butt 
316ac311a14SShakeel Butt 	/* p may not have freeable memory in nodemask */
317ac311a14SShakeel Butt 	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
3187c5f64f8SVladimir Davydov 		goto next;
319462607ecSDavid Rientjes 
320462607ecSDavid Rientjes 	/*
321462607ecSDavid Rientjes 	 * This task already has access to memory reserves and is being killed.
322a373966dSMichal Hocko 	 * Don't allow any other task to have access to the reserves unless
323862e3073SMichal Hocko 	 * the task has MMF_OOM_SKIP because chances that it would release
324a373966dSMichal Hocko 	 * any memory is quite low.
325462607ecSDavid Rientjes 	 */
326862e3073SMichal Hocko 	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
327862e3073SMichal Hocko 		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
3287c5f64f8SVladimir Davydov 			goto next;
3297c5f64f8SVladimir Davydov 		goto abort;
330a373966dSMichal Hocko 	}
331462607ecSDavid Rientjes 
332e1e12d2fSDavid Rientjes 	/*
333e1e12d2fSDavid Rientjes 	 * If task is allocating a lot of memory and has been marked to be
334e1e12d2fSDavid Rientjes 	 * killed first if it triggers an oom, then select it.
335e1e12d2fSDavid Rientjes 	 */
3367c5f64f8SVladimir Davydov 	if (oom_task_origin(task)) {
3379066e5cfSYafang Shao 		points = LONG_MAX;
3387c5f64f8SVladimir Davydov 		goto select;
3397c5f64f8SVladimir Davydov 	}
340e1e12d2fSDavid Rientjes 
341ac311a14SShakeel Butt 	points = oom_badness(task, oc->totalpages);
3429066e5cfSYafang Shao 	if (points == LONG_MIN || points < oc->chosen_points)
3437c5f64f8SVladimir Davydov 		goto next;
3447c5f64f8SVladimir Davydov 
3457c5f64f8SVladimir Davydov select:
3467c5f64f8SVladimir Davydov 	if (oc->chosen)
3477c5f64f8SVladimir Davydov 		put_task_struct(oc->chosen);
3487c5f64f8SVladimir Davydov 	get_task_struct(task);
3497c5f64f8SVladimir Davydov 	oc->chosen = task;
3507c5f64f8SVladimir Davydov 	oc->chosen_points = points;
3517c5f64f8SVladimir Davydov next:
3527c5f64f8SVladimir Davydov 	return 0;
3537c5f64f8SVladimir Davydov abort:
3547c5f64f8SVladimir Davydov 	if (oc->chosen)
3557c5f64f8SVladimir Davydov 		put_task_struct(oc->chosen);
3567c5f64f8SVladimir Davydov 	oc->chosen = (void *)-1UL;
3577c5f64f8SVladimir Davydov 	return 1;
358462607ecSDavid Rientjes }
359462607ecSDavid Rientjes 
3609b0f8b04SChristoph Lameter /*
3617c5f64f8SVladimir Davydov  * Simple selection loop. We choose the process with the highest number of
3627c5f64f8SVladimir Davydov  * 'points'. In case scan was aborted, oc->chosen is set to -1.
3631da177e4SLinus Torvalds  */
select_bad_process(struct oom_control * oc)3647c5f64f8SVladimir Davydov static void select_bad_process(struct oom_control *oc)
3651da177e4SLinus Torvalds {
3669066e5cfSYafang Shao 	oc->chosen_points = LONG_MIN;
3679066e5cfSYafang Shao 
3687c5f64f8SVladimir Davydov 	if (is_memcg_oom(oc))
3697c5f64f8SVladimir Davydov 		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
3707c5f64f8SVladimir Davydov 	else {
371f44666b0STetsuo Handa 		struct task_struct *p;
3721da177e4SLinus Torvalds 
3736b0c81b3SDavid Rientjes 		rcu_read_lock();
3747c5f64f8SVladimir Davydov 		for_each_process(p)
3757c5f64f8SVladimir Davydov 			if (oom_evaluate_task(p, oc))
376462607ecSDavid Rientjes 				break;
3776b0c81b3SDavid Rientjes 		rcu_read_unlock();
3787c5f64f8SVladimir Davydov 	}
3791da177e4SLinus Torvalds }
3801da177e4SLinus Torvalds 
dump_task(struct task_struct * p,void * arg)3815eee7e1cSShakeel Butt static int dump_task(struct task_struct *p, void *arg)
382fef1bdd6SDavid Rientjes {
3835eee7e1cSShakeel Butt 	struct oom_control *oc = arg;
384c55db957SKOSAKI Motohiro 	struct task_struct *task;
385fef1bdd6SDavid Rientjes 
386ac311a14SShakeel Butt 	if (oom_unkillable_task(p))
387ac311a14SShakeel Butt 		return 0;
388ac311a14SShakeel Butt 
389ac311a14SShakeel Butt 	/* p may not have freeable memory in nodemask */
390ac311a14SShakeel Butt 	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
3915eee7e1cSShakeel Butt 		return 0;
392fef1bdd6SDavid Rientjes 
393c55db957SKOSAKI Motohiro 	task = find_lock_task_mm(p);
394c55db957SKOSAKI Motohiro 	if (!task) {
3956d2661edSDavid Rientjes 		/*
396f8159c13STang Yizhou 		 * All of p's threads have already detached their mm's. There's
397f8159c13STang Yizhou 		 * no need to report them; they can't be oom killed anyway.
3986d2661edSDavid Rientjes 		 */
3995eee7e1cSShakeel Butt 		return 0;
4006d2661edSDavid Rientjes 	}
401c55db957SKOSAKI Motohiro 
402c3b78b11SRodrigo Freire 	pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
403078de5f7SEric W. Biederman 		task->pid, from_kuid(&init_user_ns, task_uid(task)),
404078de5f7SEric W. Biederman 		task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
405af5b0f6aSKirill A. Shutemov 		mm_pgtables_bytes(task->mm),
406de34d965SDavid Rientjes 		get_mm_counter(task->mm, MM_SWAPENTS),
407a63d83f4SDavid Rientjes 		task->signal->oom_score_adj, task->comm);
408c55db957SKOSAKI Motohiro 	task_unlock(task);
4095eee7e1cSShakeel Butt 
4105eee7e1cSShakeel Butt 	return 0;
411c55db957SKOSAKI Motohiro }
4125eee7e1cSShakeel Butt 
4135eee7e1cSShakeel Butt /**
4145eee7e1cSShakeel Butt  * dump_tasks - dump current memory state of all system tasks
4155eee7e1cSShakeel Butt  * @oc: pointer to struct oom_control
4165eee7e1cSShakeel Butt  *
4175eee7e1cSShakeel Butt  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
4185eee7e1cSShakeel Butt  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
4195eee7e1cSShakeel Butt  * are not shown.
4205eee7e1cSShakeel Butt  * State information includes task's pid, uid, tgid, vm size, rss,
4215eee7e1cSShakeel Butt  * pgtables_bytes, swapents, oom_score_adj value, and name.
4225eee7e1cSShakeel Butt  */
dump_tasks(struct oom_control * oc)4235eee7e1cSShakeel Butt static void dump_tasks(struct oom_control *oc)
4245eee7e1cSShakeel Butt {
4255eee7e1cSShakeel Butt 	pr_info("Tasks state (memory values in pages):\n");
4265eee7e1cSShakeel Butt 	pr_info("[  pid  ]   uid  tgid total_vm      rss pgtables_bytes swapents oom_score_adj name\n");
4275eee7e1cSShakeel Butt 
4285eee7e1cSShakeel Butt 	if (is_memcg_oom(oc))
4295eee7e1cSShakeel Butt 		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
4305eee7e1cSShakeel Butt 	else {
4315eee7e1cSShakeel Butt 		struct task_struct *p;
4325eee7e1cSShakeel Butt 
4335eee7e1cSShakeel Butt 		rcu_read_lock();
4345eee7e1cSShakeel Butt 		for_each_process(p)
4355eee7e1cSShakeel Butt 			dump_task(p, oc);
4366b0c81b3SDavid Rientjes 		rcu_read_unlock();
437fef1bdd6SDavid Rientjes 	}
4385eee7e1cSShakeel Butt }
439fef1bdd6SDavid Rientjes 
dump_oom_summary(struct oom_control * oc,struct task_struct * victim)440ef8444eaSyuzhoujian static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
441ef8444eaSyuzhoujian {
442ef8444eaSyuzhoujian 	/* one line summary of the oom killer context. */
443ef8444eaSyuzhoujian 	pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
444ef8444eaSyuzhoujian 			oom_constraint_text[oc->constraint],
445ef8444eaSyuzhoujian 			nodemask_pr_args(oc->nodemask));
446ef8444eaSyuzhoujian 	cpuset_print_current_mems_allowed();
447f0c867d9Syuzhoujian 	mem_cgroup_print_oom_context(oc->memcg, victim);
448ef8444eaSyuzhoujian 	pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
449ef8444eaSyuzhoujian 		from_kuid(&init_user_ns, task_uid(victim)));
450ef8444eaSyuzhoujian }
451ef8444eaSyuzhoujian 
dump_header(struct oom_control * oc,struct task_struct * p)4522a966b77SVladimir Davydov static void dump_header(struct oom_control *oc, struct task_struct *p)
4531b604d75SDavid Rientjes {
454ef8444eaSyuzhoujian 	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
455ef8444eaSyuzhoujian 		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
4560205f755SMichal Hocko 			current->signal->oom_score_adj);
4579254990fSMichal Hocko 	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
4589254990fSMichal Hocko 		pr_warn("COMPACTION is disabled!!!\n");
459a0795cd4SVlastimil Babka 
4601b604d75SDavid Rientjes 	dump_stack();
461852d8be0SYang Shi 	if (is_memcg_oom(oc))
462f0c867d9Syuzhoujian 		mem_cgroup_print_oom_meminfo(oc->memcg);
463852d8be0SYang Shi 	else {
464974f4367SMichal Hocko 		__show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask));
465259b3633SHui Su 		if (should_dump_unreclaim_slab())
466852d8be0SYang Shi 			dump_unreclaimable_slab();
467852d8be0SYang Shi 	}
4681b604d75SDavid Rientjes 	if (sysctl_oom_dump_tasks)
4695eee7e1cSShakeel Butt 		dump_tasks(oc);
470ef8444eaSyuzhoujian 	if (p)
471ef8444eaSyuzhoujian 		dump_oom_summary(oc, p);
4721b604d75SDavid Rientjes }
4731b604d75SDavid Rientjes 
4745695be14SMichal Hocko /*
475c32b3cbeSMichal Hocko  * Number of OOM victims in flight
4765695be14SMichal Hocko  */
477c32b3cbeSMichal Hocko static atomic_t oom_victims = ATOMIC_INIT(0);
478c32b3cbeSMichal Hocko static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
4795695be14SMichal Hocko 
4807c5f64f8SVladimir Davydov static bool oom_killer_disabled __read_mostly;
4815695be14SMichal Hocko 
4823ef22dffSMichal Hocko /*
4833ef22dffSMichal Hocko  * task->mm can be NULL if the task is the exited group leader.  So to
4843ef22dffSMichal Hocko  * determine whether the task is using a particular mm, we examine all the
4853ef22dffSMichal Hocko  * task's threads: if one of those is using this mm then this task was also
4863ef22dffSMichal Hocko  * using it.
4873ef22dffSMichal Hocko  */
process_shares_mm(struct task_struct * p,struct mm_struct * mm)48844a70adeSMichal Hocko bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
4893ef22dffSMichal Hocko {
4903ef22dffSMichal Hocko 	struct task_struct *t;
4913ef22dffSMichal Hocko 
4923ef22dffSMichal Hocko 	for_each_thread(p, t) {
4933ef22dffSMichal Hocko 		struct mm_struct *t_mm = READ_ONCE(t->mm);
4943ef22dffSMichal Hocko 		if (t_mm)
4953ef22dffSMichal Hocko 			return t_mm == mm;
4963ef22dffSMichal Hocko 	}
4973ef22dffSMichal Hocko 	return false;
4983ef22dffSMichal Hocko }
4993ef22dffSMichal Hocko 
500aac45363SMichal Hocko #ifdef CONFIG_MMU
501aac45363SMichal Hocko /*
502aac45363SMichal Hocko  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
503aac45363SMichal Hocko  * victim (if that is possible) to help the OOM killer to move on.
504aac45363SMichal Hocko  */
505aac45363SMichal Hocko static struct task_struct *oom_reaper_th;
506aac45363SMichal Hocko static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
50729c696e1SVladimir Davydov static struct task_struct *oom_reaper_list;
50803049269SMichal Hocko static DEFINE_SPINLOCK(oom_reaper_lock);
50903049269SMichal Hocko 
__oom_reap_task_mm(struct mm_struct * mm)510bf3980c8SSuren Baghdasaryan static bool __oom_reap_task_mm(struct mm_struct *mm)
511aac45363SMichal Hocko {
512aac45363SMichal Hocko 	struct vm_area_struct *vma;
51393065ac7SMichal Hocko 	bool ret = true;
514e1c2c775SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
51527ae357fSDavid Rientjes 
51627ae357fSDavid Rientjes 	/*
51727ae357fSDavid Rientjes 	 * Tell all users of get_user/copy_from_user etc... that the content
51827ae357fSDavid Rientjes 	 * is no longer stable. No barriers really needed because unmapping
51927ae357fSDavid Rientjes 	 * should imply barriers already and the reader would hit a page fault
52027ae357fSDavid Rientjes 	 * if it stumbled over a reaped memory.
52127ae357fSDavid Rientjes 	 */
52227ae357fSDavid Rientjes 	set_bit(MMF_UNSTABLE, &mm->flags);
52327ae357fSDavid Rientjes 
524e1c2c775SLiam R. Howlett 	for_each_vma(vmi, vma) {
525a213e5cfSHugh Dickins 		if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP))
52627ae357fSDavid Rientjes 			continue;
52727ae357fSDavid Rientjes 
52827ae357fSDavid Rientjes 		/*
52927ae357fSDavid Rientjes 		 * Only anonymous pages have a good chance to be dropped
53027ae357fSDavid Rientjes 		 * without additional steps which we cannot afford as we
53127ae357fSDavid Rientjes 		 * are OOM already.
53227ae357fSDavid Rientjes 		 *
53327ae357fSDavid Rientjes 		 * We do not even care about fs backed pages because all
53427ae357fSDavid Rientjes 		 * which are reclaimable have already been reclaimed and
53527ae357fSDavid Rientjes 		 * we do not want to block exit_mmap by keeping mm ref
53627ae357fSDavid Rientjes 		 * count elevated without a good reason.
53727ae357fSDavid Rientjes 		 */
53827ae357fSDavid Rientjes 		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
539ac46d4f3SJérôme Glisse 			struct mmu_notifier_range range;
54027ae357fSDavid Rientjes 			struct mmu_gather tlb;
54127ae357fSDavid Rientjes 
5426f4f13e8SJérôme Glisse 			mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
5437d4a8be0SAlistair Popple 						mm, vma->vm_start,
544ac46d4f3SJérôme Glisse 						vma->vm_end);
545a72afd87SWill Deacon 			tlb_gather_mmu(&tlb, mm);
546ac46d4f3SJérôme Glisse 			if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
547ae8eba8bSWill Deacon 				tlb_finish_mmu(&tlb);
54893065ac7SMichal Hocko 				ret = false;
54993065ac7SMichal Hocko 				continue;
55093065ac7SMichal Hocko 			}
551ac46d4f3SJérôme Glisse 			unmap_page_range(&tlb, vma, range.start, range.end, NULL);
552ac46d4f3SJérôme Glisse 			mmu_notifier_invalidate_range_end(&range);
553ae8eba8bSWill Deacon 			tlb_finish_mmu(&tlb);
55427ae357fSDavid Rientjes 		}
55527ae357fSDavid Rientjes 	}
55693065ac7SMichal Hocko 
55793065ac7SMichal Hocko 	return ret;
55827ae357fSDavid Rientjes }
55927ae357fSDavid Rientjes 
560431f42fdSMichal Hocko /*
561431f42fdSMichal Hocko  * Reaps the address space of the give task.
562431f42fdSMichal Hocko  *
563431f42fdSMichal Hocko  * Returns true on success and false if none or part of the address space
564431f42fdSMichal Hocko  * has been reclaimed and the caller should retry later.
565431f42fdSMichal Hocko  */
oom_reap_task_mm(struct task_struct * tsk,struct mm_struct * mm)56627ae357fSDavid Rientjes static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
56727ae357fSDavid Rientjes {
568aac45363SMichal Hocko 	bool ret = true;
569aac45363SMichal Hocko 
570d8ed45c5SMichel Lespinasse 	if (!mmap_read_trylock(mm)) {
571422580c3SRoman Gushchin 		trace_skip_task_reaping(tsk->pid);
572af5679fbSMichal Hocko 		return false;
5734d4bbd85SMichal Hocko 	}
5744d4bbd85SMichal Hocko 
5754d4bbd85SMichal Hocko 	/*
57621292580SAndrea Arcangeli 	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
57721292580SAndrea Arcangeli 	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
5783e4e28c5SMichel Lespinasse 	 * under mmap_lock for reading because it serializes against the
5793e4e28c5SMichel Lespinasse 	 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
580e5e3f4c4SMichal Hocko 	 */
58121292580SAndrea Arcangeli 	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
582422580c3SRoman Gushchin 		trace_skip_task_reaping(tsk->pid);
583431f42fdSMichal Hocko 		goto out_unlock;
584aac45363SMichal Hocko 	}
585aac45363SMichal Hocko 
586422580c3SRoman Gushchin 	trace_start_task_reaping(tsk->pid);
587422580c3SRoman Gushchin 
58893065ac7SMichal Hocko 	/* failed to reap part of the address space. Try again later */
589431f42fdSMichal Hocko 	ret = __oom_reap_task_mm(mm);
590431f42fdSMichal Hocko 	if (!ret)
591431f42fdSMichal Hocko 		goto out_finish;
5923f70dc38SMichal Hocko 
593bc448e89SMichal Hocko 	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
594bc448e89SMichal Hocko 			task_pid_nr(tsk), tsk->comm,
595bc448e89SMichal Hocko 			K(get_mm_counter(mm, MM_ANONPAGES)),
596bc448e89SMichal Hocko 			K(get_mm_counter(mm, MM_FILEPAGES)),
597bc448e89SMichal Hocko 			K(get_mm_counter(mm, MM_SHMEMPAGES)));
598431f42fdSMichal Hocko out_finish:
599431f42fdSMichal Hocko 	trace_finish_task_reaping(tsk->pid);
600431f42fdSMichal Hocko out_unlock:
601d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
60236324a99SMichal Hocko 
603aac45363SMichal Hocko 	return ret;
604aac45363SMichal Hocko }
605aac45363SMichal Hocko 
606bc448e89SMichal Hocko #define MAX_OOM_REAP_RETRIES 10
oom_reap_task(struct task_struct * tsk)60736324a99SMichal Hocko static void oom_reap_task(struct task_struct *tsk)
608aac45363SMichal Hocko {
609aac45363SMichal Hocko 	int attempts = 0;
61026db62f1SMichal Hocko 	struct mm_struct *mm = tsk->signal->oom_mm;
611aac45363SMichal Hocko 
6123e4e28c5SMichel Lespinasse 	/* Retry the mmap_read_trylock(mm) a few times */
61327ae357fSDavid Rientjes 	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
614aac45363SMichal Hocko 		schedule_timeout_idle(HZ/10);
615aac45363SMichal Hocko 
61697b1255cSTetsuo Handa 	if (attempts <= MAX_OOM_REAP_RETRIES ||
61797b1255cSTetsuo Handa 	    test_bit(MMF_OOM_SKIP, &mm->flags))
6187ebffa45STetsuo Handa 		goto done;
61911a410d5SMichal Hocko 
620bc448e89SMichal Hocko 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
621bc448e89SMichal Hocko 		task_pid_nr(tsk), tsk->comm);
6228a7ff02aSDavid Rientjes 	sched_show_task(tsk);
623bc448e89SMichal Hocko 	debug_show_all_locks();
624bc448e89SMichal Hocko 
6257ebffa45STetsuo Handa done:
626449d777dSMichal Hocko 	tsk->oom_reaper_list = NULL;
627449d777dSMichal Hocko 
62826db62f1SMichal Hocko 	/*
62926db62f1SMichal Hocko 	 * Hide this mm from OOM killer because it has been either reaped or
6303e4e28c5SMichel Lespinasse 	 * somebody can't call mmap_write_unlock(mm).
63126db62f1SMichal Hocko 	 */
632862e3073SMichal Hocko 	set_bit(MMF_OOM_SKIP, &mm->flags);
63326db62f1SMichal Hocko 
634e4a38402SNico Pache 	/* Drop a reference taken by queue_oom_reaper */
63536324a99SMichal Hocko 	put_task_struct(tsk);
636aac45363SMichal Hocko }
637aac45363SMichal Hocko 
oom_reaper(void * unused)638aac45363SMichal Hocko static int oom_reaper(void *unused)
639aac45363SMichal Hocko {
6403723929eSSultan Alsawaf 	set_freezable();
6413723929eSSultan Alsawaf 
642aac45363SMichal Hocko 	while (true) {
64303049269SMichal Hocko 		struct task_struct *tsk = NULL;
644aac45363SMichal Hocko 
64529c696e1SVladimir Davydov 		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
646e4a38402SNico Pache 		spin_lock_irq(&oom_reaper_lock);
64729c696e1SVladimir Davydov 		if (oom_reaper_list != NULL) {
64829c696e1SVladimir Davydov 			tsk = oom_reaper_list;
64929c696e1SVladimir Davydov 			oom_reaper_list = tsk->oom_reaper_list;
65003049269SMichal Hocko 		}
651e4a38402SNico Pache 		spin_unlock_irq(&oom_reaper_lock);
65203049269SMichal Hocko 
65303049269SMichal Hocko 		if (tsk)
65436324a99SMichal Hocko 			oom_reap_task(tsk);
655aac45363SMichal Hocko 	}
656aac45363SMichal Hocko 
657aac45363SMichal Hocko 	return 0;
658aac45363SMichal Hocko }
659aac45363SMichal Hocko 
wake_oom_reaper(struct timer_list * timer)660e4a38402SNico Pache static void wake_oom_reaper(struct timer_list *timer)
661e4a38402SNico Pache {
662e4a38402SNico Pache 	struct task_struct *tsk = container_of(timer, struct task_struct,
663e4a38402SNico Pache 			oom_reaper_timer);
664e4a38402SNico Pache 	struct mm_struct *mm = tsk->signal->oom_mm;
665e4a38402SNico Pache 	unsigned long flags;
666e4a38402SNico Pache 
667e4a38402SNico Pache 	/* The victim managed to terminate on its own - see exit_mmap */
668e4a38402SNico Pache 	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
669e4a38402SNico Pache 		put_task_struct(tsk);
670e4a38402SNico Pache 		return;
671e4a38402SNico Pache 	}
672e4a38402SNico Pache 
673e4a38402SNico Pache 	spin_lock_irqsave(&oom_reaper_lock, flags);
674e4a38402SNico Pache 	tsk->oom_reaper_list = oom_reaper_list;
675e4a38402SNico Pache 	oom_reaper_list = tsk;
676e4a38402SNico Pache 	spin_unlock_irqrestore(&oom_reaper_lock, flags);
677e4a38402SNico Pache 	trace_wake_reaper(tsk->pid);
678e4a38402SNico Pache 	wake_up(&oom_reaper_wait);
679e4a38402SNico Pache }
680e4a38402SNico Pache 
681e4a38402SNico Pache /*
682e4a38402SNico Pache  * Give the OOM victim time to exit naturally before invoking the oom_reaping.
683e4a38402SNico Pache  * The timers timeout is arbitrary... the longer it is, the longer the worst
684e4a38402SNico Pache  * case scenario for the OOM can take. If it is too small, the oom_reaper can
685e4a38402SNico Pache  * get in the way and release resources needed by the process exit path.
686e4a38402SNico Pache  * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
687e4a38402SNico Pache  * before the exit path is able to wake the futex waiters.
688e4a38402SNico Pache  */
689e4a38402SNico Pache #define OOM_REAPER_DELAY (2*HZ)
queue_oom_reaper(struct task_struct * tsk)690e4a38402SNico Pache static void queue_oom_reaper(struct task_struct *tsk)
691aac45363SMichal Hocko {
6929bcdeb51STetsuo Handa 	/* mm is already queued? */
6939bcdeb51STetsuo Handa 	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
694aac45363SMichal Hocko 		return;
695aac45363SMichal Hocko 
69636324a99SMichal Hocko 	get_task_struct(tsk);
697e4a38402SNico Pache 	timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
698e4a38402SNico Pache 	tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
699e4a38402SNico Pache 	add_timer(&tsk->oom_reaper_timer);
700aac45363SMichal Hocko }
701aac45363SMichal Hocko 
702a19cad06SAndrew Morton #ifdef CONFIG_SYSCTL
703a19cad06SAndrew Morton static struct ctl_table vm_oom_kill_table[] = {
704a19cad06SAndrew Morton 	{
705a19cad06SAndrew Morton 		.procname	= "panic_on_oom",
706a19cad06SAndrew Morton 		.data		= &sysctl_panic_on_oom,
707a19cad06SAndrew Morton 		.maxlen		= sizeof(sysctl_panic_on_oom),
708a19cad06SAndrew Morton 		.mode		= 0644,
709a19cad06SAndrew Morton 		.proc_handler	= proc_dointvec_minmax,
710a19cad06SAndrew Morton 		.extra1		= SYSCTL_ZERO,
711a19cad06SAndrew Morton 		.extra2		= SYSCTL_TWO,
712a19cad06SAndrew Morton 	},
713a19cad06SAndrew Morton 	{
714a19cad06SAndrew Morton 		.procname	= "oom_kill_allocating_task",
715a19cad06SAndrew Morton 		.data		= &sysctl_oom_kill_allocating_task,
716a19cad06SAndrew Morton 		.maxlen		= sizeof(sysctl_oom_kill_allocating_task),
717a19cad06SAndrew Morton 		.mode		= 0644,
718a19cad06SAndrew Morton 		.proc_handler	= proc_dointvec,
719a19cad06SAndrew Morton 	},
720a19cad06SAndrew Morton 	{
721a19cad06SAndrew Morton 		.procname	= "oom_dump_tasks",
722a19cad06SAndrew Morton 		.data		= &sysctl_oom_dump_tasks,
723a19cad06SAndrew Morton 		.maxlen		= sizeof(sysctl_oom_dump_tasks),
724a19cad06SAndrew Morton 		.mode		= 0644,
725a19cad06SAndrew Morton 		.proc_handler	= proc_dointvec,
726a19cad06SAndrew Morton 	},
727a19cad06SAndrew Morton 	{}
728a19cad06SAndrew Morton };
729a19cad06SAndrew Morton #endif
730a19cad06SAndrew Morton 
oom_init(void)731aac45363SMichal Hocko static int __init oom_init(void)
732aac45363SMichal Hocko {
733aac45363SMichal Hocko 	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
73443fe219aSsujiaxun #ifdef CONFIG_SYSCTL
73543fe219aSsujiaxun 	register_sysctl_init("vm", vm_oom_kill_table);
73643fe219aSsujiaxun #endif
737aac45363SMichal Hocko 	return 0;
738aac45363SMichal Hocko }
subsys_initcall(oom_init)739aac45363SMichal Hocko subsys_initcall(oom_init)
7407c5f64f8SVladimir Davydov #else
741e4a38402SNico Pache static inline void queue_oom_reaper(struct task_struct *tsk)
7427c5f64f8SVladimir Davydov {
7437c5f64f8SVladimir Davydov }
7447c5f64f8SVladimir Davydov #endif /* CONFIG_MMU */
745aac45363SMichal Hocko 
74649550b60SMichal Hocko /**
74716e95196SJohannes Weiner  * mark_oom_victim - mark the given task as OOM victim
74849550b60SMichal Hocko  * @tsk: task to mark
749c32b3cbeSMichal Hocko  *
750dc56401fSJohannes Weiner  * Has to be called with oom_lock held and never after
751c32b3cbeSMichal Hocko  * oom has been disabled already.
75226db62f1SMichal Hocko  *
75326db62f1SMichal Hocko  * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
75426db62f1SMichal Hocko  * under task_lock or operate on the current).
75549550b60SMichal Hocko  */
7567c5f64f8SVladimir Davydov static void mark_oom_victim(struct task_struct *tsk)
75749550b60SMichal Hocko {
75826db62f1SMichal Hocko 	struct mm_struct *mm = tsk->mm;
75926db62f1SMichal Hocko 
760c32b3cbeSMichal Hocko 	WARN_ON(oom_killer_disabled);
761c32b3cbeSMichal Hocko 	/* OOM killer might race with memcg OOM */
762c32b3cbeSMichal Hocko 	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
763c32b3cbeSMichal Hocko 		return;
76426db62f1SMichal Hocko 
76526db62f1SMichal Hocko 	/* oom_mm is bound to the signal struct life time. */
766b3541d91SSuren Baghdasaryan 	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
767f1f10076SVegard Nossum 		mmgrab(tsk->signal->oom_mm);
76826db62f1SMichal Hocko 
76963a8ca9bSMichal Hocko 	/*
77063a8ca9bSMichal Hocko 	 * Make sure that the task is woken up from uninterruptible sleep
77163a8ca9bSMichal Hocko 	 * if it is frozen because OOM killer wouldn't be able to free
77263a8ca9bSMichal Hocko 	 * any memory and livelock. freezing_slow_path will tell the freezer
77363a8ca9bSMichal Hocko 	 * that TIF_MEMDIE tasks should be ignored.
77463a8ca9bSMichal Hocko 	 */
77563a8ca9bSMichal Hocko 	__thaw_task(tsk);
776c32b3cbeSMichal Hocko 	atomic_inc(&oom_victims);
777422580c3SRoman Gushchin 	trace_mark_victim(tsk->pid);
77849550b60SMichal Hocko }
77949550b60SMichal Hocko 
78049550b60SMichal Hocko /**
78116e95196SJohannes Weiner  * exit_oom_victim - note the exit of an OOM victim
78249550b60SMichal Hocko  */
exit_oom_victim(void)78338531201STetsuo Handa void exit_oom_victim(void)
78449550b60SMichal Hocko {
78538531201STetsuo Handa 	clear_thread_flag(TIF_MEMDIE);
786c32b3cbeSMichal Hocko 
787c38f1025SJohannes Weiner 	if (!atomic_dec_return(&oom_victims))
788c32b3cbeSMichal Hocko 		wake_up_all(&oom_victims_wait);
789c32b3cbeSMichal Hocko }
790c32b3cbeSMichal Hocko 
791c32b3cbeSMichal Hocko /**
7927d2e7a22SMichal Hocko  * oom_killer_enable - enable OOM killer
7937d2e7a22SMichal Hocko  */
oom_killer_enable(void)7947d2e7a22SMichal Hocko void oom_killer_enable(void)
7957d2e7a22SMichal Hocko {
7967d2e7a22SMichal Hocko 	oom_killer_disabled = false;
797d75da004SMichal Hocko 	pr_info("OOM killer enabled.\n");
7987d2e7a22SMichal Hocko }
7997d2e7a22SMichal Hocko 
8007d2e7a22SMichal Hocko /**
801c32b3cbeSMichal Hocko  * oom_killer_disable - disable OOM killer
8027d2e7a22SMichal Hocko  * @timeout: maximum timeout to wait for oom victims in jiffies
803c32b3cbeSMichal Hocko  *
804c32b3cbeSMichal Hocko  * Forces all page allocations to fail rather than trigger OOM killer.
8057d2e7a22SMichal Hocko  * Will block and wait until all OOM victims are killed or the given
8067d2e7a22SMichal Hocko  * timeout expires.
807c32b3cbeSMichal Hocko  *
808c32b3cbeSMichal Hocko  * The function cannot be called when there are runnable user tasks because
809c32b3cbeSMichal Hocko  * the userspace would see unexpected allocation failures as a result. Any
810c32b3cbeSMichal Hocko  * new usage of this function should be consulted with MM people.
811c32b3cbeSMichal Hocko  *
812c32b3cbeSMichal Hocko  * Returns true if successful and false if the OOM killer cannot be
813c32b3cbeSMichal Hocko  * disabled.
814c32b3cbeSMichal Hocko  */
oom_killer_disable(signed long timeout)8157d2e7a22SMichal Hocko bool oom_killer_disable(signed long timeout)
816c32b3cbeSMichal Hocko {
8177d2e7a22SMichal Hocko 	signed long ret;
8187d2e7a22SMichal Hocko 
819c32b3cbeSMichal Hocko 	/*
8206afcf289STetsuo Handa 	 * Make sure to not race with an ongoing OOM killer. Check that the
8216afcf289STetsuo Handa 	 * current is not killed (possibly due to sharing the victim's memory).
822c32b3cbeSMichal Hocko 	 */
8236afcf289STetsuo Handa 	if (mutex_lock_killable(&oom_lock))
824c32b3cbeSMichal Hocko 		return false;
825c32b3cbeSMichal Hocko 	oom_killer_disabled = true;
826dc56401fSJohannes Weiner 	mutex_unlock(&oom_lock);
827c32b3cbeSMichal Hocko 
8287d2e7a22SMichal Hocko 	ret = wait_event_interruptible_timeout(oom_victims_wait,
8297d2e7a22SMichal Hocko 			!atomic_read(&oom_victims), timeout);
8307d2e7a22SMichal Hocko 	if (ret <= 0) {
8317d2e7a22SMichal Hocko 		oom_killer_enable();
8327d2e7a22SMichal Hocko 		return false;
833c32b3cbeSMichal Hocko 	}
834d75da004SMichal Hocko 	pr_info("OOM killer disabled.\n");
835c32b3cbeSMichal Hocko 
8367d2e7a22SMichal Hocko 	return true;
83749550b60SMichal Hocko }
83849550b60SMichal Hocko 
__task_will_free_mem(struct task_struct * task)8391af8bb43SMichal Hocko static inline bool __task_will_free_mem(struct task_struct *task)
8401af8bb43SMichal Hocko {
8411af8bb43SMichal Hocko 	struct signal_struct *sig = task->signal;
8421af8bb43SMichal Hocko 
8431af8bb43SMichal Hocko 	/*
844d67e03e3SEric W. Biederman 	 * A coredumping process may sleep for an extended period in
84592307383SEric W. Biederman 	 * coredump_task_exit(), so the oom killer cannot assume that
846d67e03e3SEric W. Biederman 	 * the process will promptly exit and release memory.
8471af8bb43SMichal Hocko 	 */
84898b24b16SEric W. Biederman 	if (sig->core_state)
8491af8bb43SMichal Hocko 		return false;
8501af8bb43SMichal Hocko 
8511af8bb43SMichal Hocko 	if (sig->flags & SIGNAL_GROUP_EXIT)
8521af8bb43SMichal Hocko 		return true;
8531af8bb43SMichal Hocko 
8541af8bb43SMichal Hocko 	if (thread_group_empty(task) && (task->flags & PF_EXITING))
8551af8bb43SMichal Hocko 		return true;
8561af8bb43SMichal Hocko 
8571af8bb43SMichal Hocko 	return false;
8581af8bb43SMichal Hocko }
8591af8bb43SMichal Hocko 
8601af8bb43SMichal Hocko /*
8611af8bb43SMichal Hocko  * Checks whether the given task is dying or exiting and likely to
8621af8bb43SMichal Hocko  * release its address space. This means that all threads and processes
8631af8bb43SMichal Hocko  * sharing the same mm have to be killed or exiting.
864091f362cSMichal Hocko  * Caller has to make sure that task->mm is stable (hold task_lock or
865091f362cSMichal Hocko  * it operates on the current).
8661af8bb43SMichal Hocko  */
task_will_free_mem(struct task_struct * task)8677c5f64f8SVladimir Davydov static bool task_will_free_mem(struct task_struct *task)
8681af8bb43SMichal Hocko {
869091f362cSMichal Hocko 	struct mm_struct *mm = task->mm;
8701af8bb43SMichal Hocko 	struct task_struct *p;
871f33e6f06SGeert Uytterhoeven 	bool ret = true;
8721af8bb43SMichal Hocko 
873091f362cSMichal Hocko 	/*
874091f362cSMichal Hocko 	 * Skip tasks without mm because it might have passed its exit_mm and
875091f362cSMichal Hocko 	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
876091f362cSMichal Hocko 	 * on that for now. We can consider find_lock_task_mm in future.
877091f362cSMichal Hocko 	 */
878091f362cSMichal Hocko 	if (!mm)
879091f362cSMichal Hocko 		return false;
880091f362cSMichal Hocko 
8811af8bb43SMichal Hocko 	if (!__task_will_free_mem(task))
8821af8bb43SMichal Hocko 		return false;
8831af8bb43SMichal Hocko 
8841af8bb43SMichal Hocko 	/*
885696453e6SMichal Hocko 	 * This task has already been drained by the oom reaper so there are
886696453e6SMichal Hocko 	 * only small chances it will free some more
887696453e6SMichal Hocko 	 */
888862e3073SMichal Hocko 	if (test_bit(MMF_OOM_SKIP, &mm->flags))
889696453e6SMichal Hocko 		return false;
890696453e6SMichal Hocko 
891091f362cSMichal Hocko 	if (atomic_read(&mm->mm_users) <= 1)
8921af8bb43SMichal Hocko 		return true;
8931af8bb43SMichal Hocko 
8941af8bb43SMichal Hocko 	/*
8955870c2e1SMichal Hocko 	 * Make sure that all tasks which share the mm with the given tasks
8965870c2e1SMichal Hocko 	 * are dying as well to make sure that a) nobody pins its mm and
8975870c2e1SMichal Hocko 	 * b) the task is also reapable by the oom reaper.
8981af8bb43SMichal Hocko 	 */
8991af8bb43SMichal Hocko 	rcu_read_lock();
9001af8bb43SMichal Hocko 	for_each_process(p) {
9011af8bb43SMichal Hocko 		if (!process_shares_mm(p, mm))
9021af8bb43SMichal Hocko 			continue;
9031af8bb43SMichal Hocko 		if (same_thread_group(task, p))
9041af8bb43SMichal Hocko 			continue;
9051af8bb43SMichal Hocko 		ret = __task_will_free_mem(p);
9061af8bb43SMichal Hocko 		if (!ret)
9071af8bb43SMichal Hocko 			break;
9081af8bb43SMichal Hocko 	}
9091af8bb43SMichal Hocko 	rcu_read_unlock();
9101af8bb43SMichal Hocko 
9111af8bb43SMichal Hocko 	return ret;
9121af8bb43SMichal Hocko }
9131af8bb43SMichal Hocko 
__oom_kill_process(struct task_struct * victim,const char * message)914bbbe4802SShakeel Butt static void __oom_kill_process(struct task_struct *victim, const char *message)
9151da177e4SLinus Torvalds {
9165989ad7bSRoman Gushchin 	struct task_struct *p;
917647f2bdfSDavid Rientjes 	struct mm_struct *mm;
918bb29902aSTetsuo Handa 	bool can_oom_reap = true;
9191da177e4SLinus Torvalds 
9206b0c81b3SDavid Rientjes 	p = find_lock_task_mm(victim);
9216b0c81b3SDavid Rientjes 	if (!p) {
922619b5b46SYafang Shao 		pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
923619b5b46SYafang Shao 			message, task_pid_nr(victim), victim->comm);
9246b0c81b3SDavid Rientjes 		put_task_struct(victim);
925647f2bdfSDavid Rientjes 		return;
9266b0c81b3SDavid Rientjes 	} else if (victim != p) {
9276b0c81b3SDavid Rientjes 		get_task_struct(p);
9286b0c81b3SDavid Rientjes 		put_task_struct(victim);
9296b0c81b3SDavid Rientjes 		victim = p;
9306b0c81b3SDavid Rientjes 	}
931647f2bdfSDavid Rientjes 
932880b7689STetsuo Handa 	/* Get a reference to safely compare mm after task_unlock(victim) */
933647f2bdfSDavid Rientjes 	mm = victim->mm;
934f1f10076SVegard Nossum 	mmgrab(mm);
9358e675f7aSKonstantin Khlebnikov 
9368e675f7aSKonstantin Khlebnikov 	/* Raise event before sending signal: task reaper must see this */
9378e675f7aSKonstantin Khlebnikov 	count_vm_event(OOM_KILL);
938fe6bdfc8SRoman Gushchin 	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
9398e675f7aSKonstantin Khlebnikov 
940426fb5e7STetsuo Handa 	/*
941cd04ae1eSMichal Hocko 	 * We should send SIGKILL before granting access to memory reserves
942cd04ae1eSMichal Hocko 	 * in order to prevent the OOM victim from depleting the memory
943cd04ae1eSMichal Hocko 	 * reserves from the user space under its control.
944426fb5e7STetsuo Handa 	 */
945079b22dcSEric W. Biederman 	do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
94616e95196SJohannes Weiner 	mark_oom_victim(victim);
94770cb6d26SEdward Chron 	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
94870cb6d26SEdward Chron 		message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
94970cb6d26SEdward Chron 		K(get_mm_counter(mm, MM_ANONPAGES)),
95070cb6d26SEdward Chron 		K(get_mm_counter(mm, MM_FILEPAGES)),
95170cb6d26SEdward Chron 		K(get_mm_counter(mm, MM_SHMEMPAGES)),
95270cb6d26SEdward Chron 		from_kuid(&init_user_ns, task_uid(victim)),
953941f762bSIlya Dryomov 		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
954647f2bdfSDavid Rientjes 	task_unlock(victim);
955647f2bdfSDavid Rientjes 
956647f2bdfSDavid Rientjes 	/*
957647f2bdfSDavid Rientjes 	 * Kill all user processes sharing victim->mm in other thread groups, if
958647f2bdfSDavid Rientjes 	 * any.  They don't get access to memory reserves, though, to avoid
959c1e8d7c6SMichel Lespinasse 	 * depletion of all memory.  This prevents mm->mmap_lock livelock when an
960647f2bdfSDavid Rientjes 	 * oom killed thread cannot exit because it requires the semaphore and
961647f2bdfSDavid Rientjes 	 * its contended by another thread trying to allocate memory itself.
962647f2bdfSDavid Rientjes 	 * That thread will now get access to memory reserves since it has a
963647f2bdfSDavid Rientjes 	 * pending fatal signal.
964647f2bdfSDavid Rientjes 	 */
9654d4048beSOleg Nesterov 	rcu_read_lock();
966c319025aSOleg Nesterov 	for_each_process(p) {
9674d7b3394SOleg Nesterov 		if (!process_shares_mm(p, mm))
968c319025aSOleg Nesterov 			continue;
969c319025aSOleg Nesterov 		if (same_thread_group(p, victim))
970c319025aSOleg Nesterov 			continue;
9711b51e65eSMichal Hocko 		if (is_global_init(p)) {
972aac45363SMichal Hocko 			can_oom_reap = false;
973862e3073SMichal Hocko 			set_bit(MMF_OOM_SKIP, &mm->flags);
974a373966dSMichal Hocko 			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
975a373966dSMichal Hocko 					task_pid_nr(victim), victim->comm,
976a373966dSMichal Hocko 					task_pid_nr(p), p->comm);
977c319025aSOleg Nesterov 			continue;
978aac45363SMichal Hocko 		}
9791b51e65eSMichal Hocko 		/*
9804c9c3809SRolf Eike Beer 		 * No kthread_use_mm() user needs to read from the userspace so
981f5678e7fSChristoph Hellwig 		 * we are ok to reap it.
9821b51e65eSMichal Hocko 		 */
9831b51e65eSMichal Hocko 		if (unlikely(p->flags & PF_KTHREAD))
9841b51e65eSMichal Hocko 			continue;
985079b22dcSEric W. Biederman 		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
9861da177e4SLinus Torvalds 	}
9876b0c81b3SDavid Rientjes 	rcu_read_unlock();
9881da177e4SLinus Torvalds 
989aac45363SMichal Hocko 	if (can_oom_reap)
990e4a38402SNico Pache 		queue_oom_reaper(victim);
991aac45363SMichal Hocko 
992880b7689STetsuo Handa 	mmdrop(mm);
9936b0c81b3SDavid Rientjes 	put_task_struct(victim);
994647f2bdfSDavid Rientjes }
995647f2bdfSDavid Rientjes 
996309ed882SDavid Rientjes /*
9973d8b38ebSRoman Gushchin  * Kill provided task unless it's secured by setting
9983d8b38ebSRoman Gushchin  * oom_score_adj to OOM_SCORE_ADJ_MIN.
9993d8b38ebSRoman Gushchin  */
oom_kill_memcg_member(struct task_struct * task,void * message)1000bbbe4802SShakeel Butt static int oom_kill_memcg_member(struct task_struct *task, void *message)
10013d8b38ebSRoman Gushchin {
1002d342a0b3STetsuo Handa 	if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
1003d342a0b3STetsuo Handa 	    !is_global_init(task)) {
10043d8b38ebSRoman Gushchin 		get_task_struct(task);
1005bbbe4802SShakeel Butt 		__oom_kill_process(task, message);
10063d8b38ebSRoman Gushchin 	}
10073d8b38ebSRoman Gushchin 	return 0;
10083d8b38ebSRoman Gushchin }
10093d8b38ebSRoman Gushchin 
oom_kill_process(struct oom_control * oc,const char * message)10105989ad7bSRoman Gushchin static void oom_kill_process(struct oom_control *oc, const char *message)
10115989ad7bSRoman Gushchin {
1012bbbe4802SShakeel Butt 	struct task_struct *victim = oc->chosen;
10133d8b38ebSRoman Gushchin 	struct mem_cgroup *oom_group;
10145989ad7bSRoman Gushchin 	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
10155989ad7bSRoman Gushchin 					      DEFAULT_RATELIMIT_BURST);
10165989ad7bSRoman Gushchin 
10175989ad7bSRoman Gushchin 	/*
10185989ad7bSRoman Gushchin 	 * If the task is already exiting, don't alarm the sysadmin or kill
10195989ad7bSRoman Gushchin 	 * its children or threads, just give it access to memory reserves
10205989ad7bSRoman Gushchin 	 * so it can die quickly
10215989ad7bSRoman Gushchin 	 */
1022bbbe4802SShakeel Butt 	task_lock(victim);
1023bbbe4802SShakeel Butt 	if (task_will_free_mem(victim)) {
1024bbbe4802SShakeel Butt 		mark_oom_victim(victim);
1025e4a38402SNico Pache 		queue_oom_reaper(victim);
1026bbbe4802SShakeel Butt 		task_unlock(victim);
1027bbbe4802SShakeel Butt 		put_task_struct(victim);
10285989ad7bSRoman Gushchin 		return;
10295989ad7bSRoman Gushchin 	}
1030bbbe4802SShakeel Butt 	task_unlock(victim);
10315989ad7bSRoman Gushchin 
10325989ad7bSRoman Gushchin 	if (__ratelimit(&oom_rs))
1033bbbe4802SShakeel Butt 		dump_header(oc, victim);
10345989ad7bSRoman Gushchin 
10353d8b38ebSRoman Gushchin 	/*
10363d8b38ebSRoman Gushchin 	 * Do we need to kill the entire memory cgroup?
10373d8b38ebSRoman Gushchin 	 * Or even one of the ancestor memory cgroups?
10383d8b38ebSRoman Gushchin 	 * Check this out before killing the victim task.
10393d8b38ebSRoman Gushchin 	 */
10403d8b38ebSRoman Gushchin 	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
10413d8b38ebSRoman Gushchin 
1042bbbe4802SShakeel Butt 	__oom_kill_process(victim, message);
10433d8b38ebSRoman Gushchin 
10443d8b38ebSRoman Gushchin 	/*
10453d8b38ebSRoman Gushchin 	 * If necessary, kill all tasks in the selected memory cgroup.
10463d8b38ebSRoman Gushchin 	 */
10473d8b38ebSRoman Gushchin 	if (oom_group) {
1048b6bf9abbSDan Schatzberg 		memcg_memory_event(oom_group, MEMCG_OOM_GROUP_KILL);
10493d8b38ebSRoman Gushchin 		mem_cgroup_print_oom_group(oom_group);
1050bbbe4802SShakeel Butt 		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
1051bbbe4802SShakeel Butt 				      (void *)message);
10523d8b38ebSRoman Gushchin 		mem_cgroup_put(oom_group);
10533d8b38ebSRoman Gushchin 	}
10545989ad7bSRoman Gushchin }
10555989ad7bSRoman Gushchin 
1056309ed882SDavid Rientjes /*
1057309ed882SDavid Rientjes  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1058309ed882SDavid Rientjes  */
check_panic_on_oom(struct oom_control * oc)1059432b1de0SYafang Shao static void check_panic_on_oom(struct oom_control *oc)
1060309ed882SDavid Rientjes {
1061309ed882SDavid Rientjes 	if (likely(!sysctl_panic_on_oom))
1062309ed882SDavid Rientjes 		return;
1063309ed882SDavid Rientjes 	if (sysctl_panic_on_oom != 2) {
1064309ed882SDavid Rientjes 		/*
1065309ed882SDavid Rientjes 		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1066309ed882SDavid Rientjes 		 * does not panic for cpuset, mempolicy, or memcg allocation
1067309ed882SDavid Rientjes 		 * failures.
1068309ed882SDavid Rientjes 		 */
1069432b1de0SYafang Shao 		if (oc->constraint != CONSTRAINT_NONE)
1070309ed882SDavid Rientjes 			return;
1071309ed882SDavid Rientjes 	}
1072071a4befSDavid Rientjes 	/* Do not panic for oom kills triggered by sysrq */
1073db2a0dd7SYaowei Bai 	if (is_sysrq_oom(oc))
1074071a4befSDavid Rientjes 		return;
10752a966b77SVladimir Davydov 	dump_header(oc, NULL);
1076309ed882SDavid Rientjes 	panic("Out of memory: %s panic_on_oom is enabled\n",
1077309ed882SDavid Rientjes 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1078309ed882SDavid Rientjes }
1079309ed882SDavid Rientjes 
10808bc719d3SMartin Schwidefsky static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
10818bc719d3SMartin Schwidefsky 
register_oom_notifier(struct notifier_block * nb)10828bc719d3SMartin Schwidefsky int register_oom_notifier(struct notifier_block *nb)
10838bc719d3SMartin Schwidefsky {
10848bc719d3SMartin Schwidefsky 	return blocking_notifier_chain_register(&oom_notify_list, nb);
10858bc719d3SMartin Schwidefsky }
10868bc719d3SMartin Schwidefsky EXPORT_SYMBOL_GPL(register_oom_notifier);
10878bc719d3SMartin Schwidefsky 
unregister_oom_notifier(struct notifier_block * nb)10888bc719d3SMartin Schwidefsky int unregister_oom_notifier(struct notifier_block *nb)
10898bc719d3SMartin Schwidefsky {
10908bc719d3SMartin Schwidefsky 	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
10918bc719d3SMartin Schwidefsky }
10928bc719d3SMartin Schwidefsky EXPORT_SYMBOL_GPL(unregister_oom_notifier);
10938bc719d3SMartin Schwidefsky 
10941da177e4SLinus Torvalds /**
10956e0fc46dSDavid Rientjes  * out_of_memory - kill the "best" process when we run out of memory
10966e0fc46dSDavid Rientjes  * @oc: pointer to struct oom_control
10971da177e4SLinus Torvalds  *
10981da177e4SLinus Torvalds  * If we run out of memory, we have the choice between either
10991da177e4SLinus Torvalds  * killing a random task (bad), letting the system crash (worse)
11001da177e4SLinus Torvalds  * OR try to be smart about which process to kill. Note that we
11011da177e4SLinus Torvalds  * don't have to be perfect here, we just have to be good.
11021da177e4SLinus Torvalds  */
out_of_memory(struct oom_control * oc)11036e0fc46dSDavid Rientjes bool out_of_memory(struct oom_control *oc)
11041da177e4SLinus Torvalds {
11058bc719d3SMartin Schwidefsky 	unsigned long freed = 0;
11068bc719d3SMartin Schwidefsky 
1107dc56401fSJohannes Weiner 	if (oom_killer_disabled)
1108dc56401fSJohannes Weiner 		return false;
1109dc56401fSJohannes Weiner 
11107c5f64f8SVladimir Davydov 	if (!is_memcg_oom(oc)) {
11118bc719d3SMartin Schwidefsky 		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1112f530243aSJann Horn 		if (freed > 0 && !is_sysrq_oom(oc))
11138bc719d3SMartin Schwidefsky 			/* Got some memory back in the last second. */
111475e8f8b2SDavid Rientjes 			return true;
11157c5f64f8SVladimir Davydov 	}
11161da177e4SLinus Torvalds 
11177b98c2e4SDavid Rientjes 	/*
11189ff4868eSDavid Rientjes 	 * If current has a pending SIGKILL or is exiting, then automatically
11199ff4868eSDavid Rientjes 	 * select it.  The goal is to allow it to allocate so that it may
11209ff4868eSDavid Rientjes 	 * quickly exit and free its memory.
11217b98c2e4SDavid Rientjes 	 */
1122091f362cSMichal Hocko 	if (task_will_free_mem(current)) {
112316e95196SJohannes Weiner 		mark_oom_victim(current);
1124e4a38402SNico Pache 		queue_oom_reaper(current);
112575e8f8b2SDavid Rientjes 		return true;
11267b98c2e4SDavid Rientjes 	}
11277b98c2e4SDavid Rientjes 
11289b0f8b04SChristoph Lameter 	/*
11293da88fb3SMichal Hocko 	 * The OOM killer does not compensate for IO-less reclaim.
1130*4822acb1SHaifeng Xu 	 * But mem_cgroup_oom() has to invoke the OOM killer even
1131*4822acb1SHaifeng Xu 	 * if it is a GFP_NOFS allocation.
11323da88fb3SMichal Hocko 	 */
1133*4822acb1SHaifeng Xu 	if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
11343da88fb3SMichal Hocko 		return true;
11353da88fb3SMichal Hocko 
11363da88fb3SMichal Hocko 	/*
11379b0f8b04SChristoph Lameter 	 * Check if there were limitations on the allocation (only relevant for
11387c5f64f8SVladimir Davydov 	 * NUMA and memcg) that may require different handling.
11399b0f8b04SChristoph Lameter 	 */
1140432b1de0SYafang Shao 	oc->constraint = constrained_alloc(oc);
1141432b1de0SYafang Shao 	if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
11426e0fc46dSDavid Rientjes 		oc->nodemask = NULL;
1143432b1de0SYafang Shao 	check_panic_on_oom(oc);
11440aad4b31SDavid Rientjes 
11457c5f64f8SVladimir Davydov 	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1146ac311a14SShakeel Butt 	    current->mm && !oom_unkillable_task(current) &&
1147ac311a14SShakeel Butt 	    oom_cpuset_eligible(current, oc) &&
1148121d1ba0SDavid Rientjes 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
11496b0c81b3SDavid Rientjes 		get_task_struct(current);
11507c5f64f8SVladimir Davydov 		oc->chosen = current;
11517c5f64f8SVladimir Davydov 		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
115275e8f8b2SDavid Rientjes 		return true;
11530aad4b31SDavid Rientjes 	}
11540aad4b31SDavid Rientjes 
11557c5f64f8SVladimir Davydov 	select_bad_process(oc);
11563100dab2SJohannes Weiner 	/* Found nothing?!?! */
11573100dab2SJohannes Weiner 	if (!oc->chosen) {
11582a966b77SVladimir Davydov 		dump_header(oc, NULL);
11593100dab2SJohannes Weiner 		pr_warn("Out of memory and no killable processes...\n");
11603100dab2SJohannes Weiner 		/*
11613100dab2SJohannes Weiner 		 * If we got here due to an actual allocation at the
11623100dab2SJohannes Weiner 		 * system level, we cannot survive this and will enter
11633100dab2SJohannes Weiner 		 * an endless loop in the allocator. Bail out now.
11643100dab2SJohannes Weiner 		 */
11653100dab2SJohannes Weiner 		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
11663100dab2SJohannes Weiner 			panic("System is deadlocked on memory\n");
11670aad4b31SDavid Rientjes 	}
11689bfe5dedSMichal Hocko 	if (oc->chosen && oc->chosen != (void *)-1UL)
11697c5f64f8SVladimir Davydov 		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
11707c5f64f8SVladimir Davydov 				 "Memory cgroup out of memory");
11717c5f64f8SVladimir Davydov 	return !!oc->chosen;
1172c32b3cbeSMichal Hocko }
1173c32b3cbeSMichal Hocko 
1174e3658932SDavid Rientjes /*
117560e2793dSMichal Hocko  * The pagefault handler calls here because some allocation has failed. We have
117660e2793dSMichal Hocko  * to take care of the memcg OOM here because this is the only safe context without
117760e2793dSMichal Hocko  * any locks held but let the oom killer triggered from the allocation context care
117860e2793dSMichal Hocko  * about the global OOM.
1179e3658932SDavid Rientjes  */
pagefault_out_of_memory(void)1180e3658932SDavid Rientjes void pagefault_out_of_memory(void)
1181e3658932SDavid Rientjes {
118260e2793dSMichal Hocko 	static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
118360e2793dSMichal Hocko 				      DEFAULT_RATELIMIT_BURST);
11846e0fc46dSDavid Rientjes 
118549426420SJohannes Weiner 	if (mem_cgroup_oom_synchronize(true))
1186dc56401fSJohannes Weiner 		return;
11873812c8c8SJohannes Weiner 
11880b28179aSVasily Averin 	if (fatal_signal_pending(current))
1189dc56401fSJohannes Weiner 		return;
11900b28179aSVasily Averin 
119160e2793dSMichal Hocko 	if (__ratelimit(&pfoom_rs))
119260e2793dSMichal Hocko 		pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
1193e3658932SDavid Rientjes }
1194884a7e59SSuren Baghdasaryan 
SYSCALL_DEFINE2(process_mrelease,int,pidfd,unsigned int,flags)1195884a7e59SSuren Baghdasaryan SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
1196884a7e59SSuren Baghdasaryan {
1197884a7e59SSuren Baghdasaryan #ifdef CONFIG_MMU
1198884a7e59SSuren Baghdasaryan 	struct mm_struct *mm = NULL;
1199884a7e59SSuren Baghdasaryan 	struct task_struct *task;
1200884a7e59SSuren Baghdasaryan 	struct task_struct *p;
1201884a7e59SSuren Baghdasaryan 	unsigned int f_flags;
1202337546e8SSuren Baghdasaryan 	bool reap = false;
1203884a7e59SSuren Baghdasaryan 	long ret = 0;
1204884a7e59SSuren Baghdasaryan 
1205884a7e59SSuren Baghdasaryan 	if (flags)
1206884a7e59SSuren Baghdasaryan 		return -EINVAL;
1207884a7e59SSuren Baghdasaryan 
1208ee9955d6SChristian Brauner 	task = pidfd_get_task(pidfd, &f_flags);
1209ee9955d6SChristian Brauner 	if (IS_ERR(task))
1210ee9955d6SChristian Brauner 		return PTR_ERR(task);
1211884a7e59SSuren Baghdasaryan 
1212884a7e59SSuren Baghdasaryan 	/*
1213884a7e59SSuren Baghdasaryan 	 * Make sure to choose a thread which still has a reference to mm
1214884a7e59SSuren Baghdasaryan 	 * during the group exit
1215884a7e59SSuren Baghdasaryan 	 */
1216884a7e59SSuren Baghdasaryan 	p = find_lock_task_mm(task);
1217884a7e59SSuren Baghdasaryan 	if (!p) {
1218884a7e59SSuren Baghdasaryan 		ret = -ESRCH;
1219884a7e59SSuren Baghdasaryan 		goto put_task;
1220884a7e59SSuren Baghdasaryan 	}
1221884a7e59SSuren Baghdasaryan 
1222884a7e59SSuren Baghdasaryan 	mm = p->mm;
1223ba535c1cSSuren Baghdasaryan 	mmgrab(mm);
1224ba535c1cSSuren Baghdasaryan 
1225337546e8SSuren Baghdasaryan 	if (task_will_free_mem(p))
1226337546e8SSuren Baghdasaryan 		reap = true;
1227337546e8SSuren Baghdasaryan 	else {
1228337546e8SSuren Baghdasaryan 		/* Error only if the work has not been done already */
1229337546e8SSuren Baghdasaryan 		if (!test_bit(MMF_OOM_SKIP, &mm->flags))
1230884a7e59SSuren Baghdasaryan 			ret = -EINVAL;
1231884a7e59SSuren Baghdasaryan 	}
1232884a7e59SSuren Baghdasaryan 	task_unlock(p);
1233884a7e59SSuren Baghdasaryan 
1234884a7e59SSuren Baghdasaryan 	if (!reap)
1235884a7e59SSuren Baghdasaryan 		goto drop_mm;
1236884a7e59SSuren Baghdasaryan 
1237884a7e59SSuren Baghdasaryan 	if (mmap_read_lock_killable(mm)) {
1238884a7e59SSuren Baghdasaryan 		ret = -EINTR;
1239884a7e59SSuren Baghdasaryan 		goto drop_mm;
1240884a7e59SSuren Baghdasaryan 	}
1241ba535c1cSSuren Baghdasaryan 	/*
1242ba535c1cSSuren Baghdasaryan 	 * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
1243ba535c1cSSuren Baghdasaryan 	 * possible change in exit_mmap is seen
1244ba535c1cSSuren Baghdasaryan 	 */
1245ba535c1cSSuren Baghdasaryan 	if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm))
1246884a7e59SSuren Baghdasaryan 		ret = -EAGAIN;
1247884a7e59SSuren Baghdasaryan 	mmap_read_unlock(mm);
1248884a7e59SSuren Baghdasaryan 
1249884a7e59SSuren Baghdasaryan drop_mm:
1250ba535c1cSSuren Baghdasaryan 	mmdrop(mm);
1251884a7e59SSuren Baghdasaryan put_task:
1252884a7e59SSuren Baghdasaryan 	put_task_struct(task);
1253884a7e59SSuren Baghdasaryan 	return ret;
1254884a7e59SSuren Baghdasaryan #else
1255884a7e59SSuren Baghdasaryan 	return -ENOSYS;
1256884a7e59SSuren Baghdasaryan #endif /* CONFIG_MMU */
1257884a7e59SSuren Baghdasaryan }
1258