1 /* 2 * linux/mm/oom_kill.c 3 * 4 * Copyright (C) 1998,2000 Rik van Riel 5 * Thanks go out to Claus Fischer for some serious inspiration and 6 * for goading me into coding this file... 7 * Copyright (C) 2010 Google, Inc. 8 * Rewritten by David Rientjes 9 * 10 * The routines in this file are used to kill a process when 11 * we're seriously out of memory. This gets called from __alloc_pages() 12 * in mm/page_alloc.c when we really run out of memory. 13 * 14 * Since we won't call these routines often (on a well-configured 15 * machine) this file will double as a 'coding guide' and a signpost 16 * for newbie kernel hackers. It features several pointers to major 17 * kernel subsystems and hints as to where to find out what things do. 18 */ 19 20 #include <linux/oom.h> 21 #include <linux/mm.h> 22 #include <linux/err.h> 23 #include <linux/gfp.h> 24 #include <linux/sched.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/coredump.h> 27 #include <linux/swap.h> 28 #include <linux/timex.h> 29 #include <linux/jiffies.h> 30 #include <linux/cpuset.h> 31 #include <linux/export.h> 32 #include <linux/notifier.h> 33 #include <linux/memcontrol.h> 34 #include <linux/mempolicy.h> 35 #include <linux/security.h> 36 #include <linux/ptrace.h> 37 #include <linux/freezer.h> 38 #include <linux/ftrace.h> 39 #include <linux/ratelimit.h> 40 #include <linux/kthread.h> 41 #include <linux/init.h> 42 43 #include <asm/tlb.h> 44 #include "internal.h" 45 46 #define CREATE_TRACE_POINTS 47 #include <trace/events/oom.h> 48 49 int sysctl_panic_on_oom; 50 int sysctl_oom_kill_allocating_task; 51 int sysctl_oom_dump_tasks = 1; 52 53 DEFINE_MUTEX(oom_lock); 54 55 #ifdef CONFIG_NUMA 56 /** 57 * has_intersects_mems_allowed() - check task eligiblity for kill 58 * @start: task struct of which task to consider 59 * @mask: nodemask passed to page allocator for mempolicy ooms 60 * 61 * Task eligibility is determined by whether or not a candidate task, @tsk, 62 * shares the same mempolicy nodes as current if it is bound by such a policy 63 * and whether or not it has the same set of allowed cpuset nodes. 64 */ 65 static bool has_intersects_mems_allowed(struct task_struct *start, 66 const nodemask_t *mask) 67 { 68 struct task_struct *tsk; 69 bool ret = false; 70 71 rcu_read_lock(); 72 for_each_thread(start, tsk) { 73 if (mask) { 74 /* 75 * If this is a mempolicy constrained oom, tsk's 76 * cpuset is irrelevant. Only return true if its 77 * mempolicy intersects current, otherwise it may be 78 * needlessly killed. 79 */ 80 ret = mempolicy_nodemask_intersects(tsk, mask); 81 } else { 82 /* 83 * This is not a mempolicy constrained oom, so only 84 * check the mems of tsk's cpuset. 85 */ 86 ret = cpuset_mems_allowed_intersects(current, tsk); 87 } 88 if (ret) 89 break; 90 } 91 rcu_read_unlock(); 92 93 return ret; 94 } 95 #else 96 static bool has_intersects_mems_allowed(struct task_struct *tsk, 97 const nodemask_t *mask) 98 { 99 return true; 100 } 101 #endif /* CONFIG_NUMA */ 102 103 /* 104 * The process p may have detached its own ->mm while exiting or through 105 * use_mm(), but one or more of its subthreads may still have a valid 106 * pointer. Return p, or any of its subthreads with a valid ->mm, with 107 * task_lock() held. 108 */ 109 struct task_struct *find_lock_task_mm(struct task_struct *p) 110 { 111 struct task_struct *t; 112 113 rcu_read_lock(); 114 115 for_each_thread(p, t) { 116 task_lock(t); 117 if (likely(t->mm)) 118 goto found; 119 task_unlock(t); 120 } 121 t = NULL; 122 found: 123 rcu_read_unlock(); 124 125 return t; 126 } 127 128 /* 129 * order == -1 means the oom kill is required by sysrq, otherwise only 130 * for display purposes. 131 */ 132 static inline bool is_sysrq_oom(struct oom_control *oc) 133 { 134 return oc->order == -1; 135 } 136 137 static inline bool is_memcg_oom(struct oom_control *oc) 138 { 139 return oc->memcg != NULL; 140 } 141 142 /* return true if the task is not adequate as candidate victim task. */ 143 static bool oom_unkillable_task(struct task_struct *p, 144 struct mem_cgroup *memcg, const nodemask_t *nodemask) 145 { 146 if (is_global_init(p)) 147 return true; 148 if (p->flags & PF_KTHREAD) 149 return true; 150 151 /* When mem_cgroup_out_of_memory() and p is not member of the group */ 152 if (memcg && !task_in_mem_cgroup(p, memcg)) 153 return true; 154 155 /* p may not have freeable memory in nodemask */ 156 if (!has_intersects_mems_allowed(p, nodemask)) 157 return true; 158 159 return false; 160 } 161 162 /** 163 * oom_badness - heuristic function to determine which candidate task to kill 164 * @p: task struct of which task we should calculate 165 * @totalpages: total present RAM allowed for page allocation 166 * 167 * The heuristic for determining which task to kill is made to be as simple and 168 * predictable as possible. The goal is to return the highest value for the 169 * task consuming the most memory to avoid subsequent oom failures. 170 */ 171 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 172 const nodemask_t *nodemask, unsigned long totalpages) 173 { 174 long points; 175 long adj; 176 177 if (oom_unkillable_task(p, memcg, nodemask)) 178 return 0; 179 180 p = find_lock_task_mm(p); 181 if (!p) 182 return 0; 183 184 /* 185 * Do not even consider tasks which are explicitly marked oom 186 * unkillable or have been already oom reaped or the are in 187 * the middle of vfork 188 */ 189 adj = (long)p->signal->oom_score_adj; 190 if (adj == OOM_SCORE_ADJ_MIN || 191 test_bit(MMF_OOM_SKIP, &p->mm->flags) || 192 in_vfork(p)) { 193 task_unlock(p); 194 return 0; 195 } 196 197 /* 198 * The baseline for the badness score is the proportion of RAM that each 199 * task's rss, pagetable and swap space use. 200 */ 201 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + 202 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); 203 task_unlock(p); 204 205 /* 206 * Root processes get 3% bonus, just like the __vm_enough_memory() 207 * implementation used by LSMs. 208 */ 209 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 210 points -= (points * 3) / 100; 211 212 /* Normalize to oom_score_adj units */ 213 adj *= totalpages / 1000; 214 points += adj; 215 216 /* 217 * Never return 0 for an eligible task regardless of the root bonus and 218 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). 219 */ 220 return points > 0 ? points : 1; 221 } 222 223 enum oom_constraint { 224 CONSTRAINT_NONE, 225 CONSTRAINT_CPUSET, 226 CONSTRAINT_MEMORY_POLICY, 227 CONSTRAINT_MEMCG, 228 }; 229 230 /* 231 * Determine the type of allocation constraint. 232 */ 233 static enum oom_constraint constrained_alloc(struct oom_control *oc) 234 { 235 struct zone *zone; 236 struct zoneref *z; 237 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); 238 bool cpuset_limited = false; 239 int nid; 240 241 if (is_memcg_oom(oc)) { 242 oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1; 243 return CONSTRAINT_MEMCG; 244 } 245 246 /* Default to all available memory */ 247 oc->totalpages = totalram_pages + total_swap_pages; 248 249 if (!IS_ENABLED(CONFIG_NUMA)) 250 return CONSTRAINT_NONE; 251 252 if (!oc->zonelist) 253 return CONSTRAINT_NONE; 254 /* 255 * Reach here only when __GFP_NOFAIL is used. So, we should avoid 256 * to kill current.We have to random task kill in this case. 257 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. 258 */ 259 if (oc->gfp_mask & __GFP_THISNODE) 260 return CONSTRAINT_NONE; 261 262 /* 263 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in 264 * the page allocator means a mempolicy is in effect. Cpuset policy 265 * is enforced in get_page_from_freelist(). 266 */ 267 if (oc->nodemask && 268 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { 269 oc->totalpages = total_swap_pages; 270 for_each_node_mask(nid, *oc->nodemask) 271 oc->totalpages += node_spanned_pages(nid); 272 return CONSTRAINT_MEMORY_POLICY; 273 } 274 275 /* Check this allocation failure is caused by cpuset's wall function */ 276 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, 277 high_zoneidx, oc->nodemask) 278 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) 279 cpuset_limited = true; 280 281 if (cpuset_limited) { 282 oc->totalpages = total_swap_pages; 283 for_each_node_mask(nid, cpuset_current_mems_allowed) 284 oc->totalpages += node_spanned_pages(nid); 285 return CONSTRAINT_CPUSET; 286 } 287 return CONSTRAINT_NONE; 288 } 289 290 static int oom_evaluate_task(struct task_struct *task, void *arg) 291 { 292 struct oom_control *oc = arg; 293 unsigned long points; 294 295 if (oom_unkillable_task(task, NULL, oc->nodemask)) 296 goto next; 297 298 /* 299 * This task already has access to memory reserves and is being killed. 300 * Don't allow any other task to have access to the reserves unless 301 * the task has MMF_OOM_SKIP because chances that it would release 302 * any memory is quite low. 303 */ 304 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { 305 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) 306 goto next; 307 goto abort; 308 } 309 310 /* 311 * If task is allocating a lot of memory and has been marked to be 312 * killed first if it triggers an oom, then select it. 313 */ 314 if (oom_task_origin(task)) { 315 points = ULONG_MAX; 316 goto select; 317 } 318 319 points = oom_badness(task, NULL, oc->nodemask, oc->totalpages); 320 if (!points || points < oc->chosen_points) 321 goto next; 322 323 /* Prefer thread group leaders for display purposes */ 324 if (points == oc->chosen_points && thread_group_leader(oc->chosen)) 325 goto next; 326 select: 327 if (oc->chosen) 328 put_task_struct(oc->chosen); 329 get_task_struct(task); 330 oc->chosen = task; 331 oc->chosen_points = points; 332 next: 333 return 0; 334 abort: 335 if (oc->chosen) 336 put_task_struct(oc->chosen); 337 oc->chosen = (void *)-1UL; 338 return 1; 339 } 340 341 /* 342 * Simple selection loop. We choose the process with the highest number of 343 * 'points'. In case scan was aborted, oc->chosen is set to -1. 344 */ 345 static void select_bad_process(struct oom_control *oc) 346 { 347 if (is_memcg_oom(oc)) 348 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); 349 else { 350 struct task_struct *p; 351 352 rcu_read_lock(); 353 for_each_process(p) 354 if (oom_evaluate_task(p, oc)) 355 break; 356 rcu_read_unlock(); 357 } 358 359 oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; 360 } 361 362 /** 363 * dump_tasks - dump current memory state of all system tasks 364 * @memcg: current's memory controller, if constrained 365 * @nodemask: nodemask passed to page allocator for mempolicy ooms 366 * 367 * Dumps the current memory state of all eligible tasks. Tasks not in the same 368 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes 369 * are not shown. 370 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, 371 * swapents, oom_score_adj value, and name. 372 */ 373 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) 374 { 375 struct task_struct *p; 376 struct task_struct *task; 377 378 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); 379 rcu_read_lock(); 380 for_each_process(p) { 381 if (oom_unkillable_task(p, memcg, nodemask)) 382 continue; 383 384 task = find_lock_task_mm(p); 385 if (!task) { 386 /* 387 * This is a kthread or all of p's threads have already 388 * detached their mm's. There's no need to report 389 * them; they can't be oom killed anyway. 390 */ 391 continue; 392 } 393 394 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", 395 task->pid, from_kuid(&init_user_ns, task_uid(task)), 396 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), 397 atomic_long_read(&task->mm->nr_ptes), 398 mm_nr_pmds(task->mm), 399 get_mm_counter(task->mm, MM_SWAPENTS), 400 task->signal->oom_score_adj, task->comm); 401 task_unlock(task); 402 } 403 rcu_read_unlock(); 404 } 405 406 static void dump_header(struct oom_control *oc, struct task_struct *p) 407 { 408 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=", 409 current->comm, oc->gfp_mask, &oc->gfp_mask); 410 if (oc->nodemask) 411 pr_cont("%*pbl", nodemask_pr_args(oc->nodemask)); 412 else 413 pr_cont("(null)"); 414 pr_cont(", order=%d, oom_score_adj=%hd\n", 415 oc->order, current->signal->oom_score_adj); 416 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) 417 pr_warn("COMPACTION is disabled!!!\n"); 418 419 cpuset_print_current_mems_allowed(); 420 dump_stack(); 421 if (oc->memcg) 422 mem_cgroup_print_oom_info(oc->memcg, p); 423 else 424 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); 425 if (sysctl_oom_dump_tasks) 426 dump_tasks(oc->memcg, oc->nodemask); 427 } 428 429 /* 430 * Number of OOM victims in flight 431 */ 432 static atomic_t oom_victims = ATOMIC_INIT(0); 433 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); 434 435 static bool oom_killer_disabled __read_mostly; 436 437 #define K(x) ((x) << (PAGE_SHIFT-10)) 438 439 /* 440 * task->mm can be NULL if the task is the exited group leader. So to 441 * determine whether the task is using a particular mm, we examine all the 442 * task's threads: if one of those is using this mm then this task was also 443 * using it. 444 */ 445 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) 446 { 447 struct task_struct *t; 448 449 for_each_thread(p, t) { 450 struct mm_struct *t_mm = READ_ONCE(t->mm); 451 if (t_mm) 452 return t_mm == mm; 453 } 454 return false; 455 } 456 457 458 #ifdef CONFIG_MMU 459 /* 460 * OOM Reaper kernel thread which tries to reap the memory used by the OOM 461 * victim (if that is possible) to help the OOM killer to move on. 462 */ 463 static struct task_struct *oom_reaper_th; 464 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); 465 static struct task_struct *oom_reaper_list; 466 static DEFINE_SPINLOCK(oom_reaper_lock); 467 468 static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) 469 { 470 struct mmu_gather tlb; 471 struct vm_area_struct *vma; 472 bool ret = true; 473 474 /* 475 * We have to make sure to not race with the victim exit path 476 * and cause premature new oom victim selection: 477 * __oom_reap_task_mm exit_mm 478 * mmget_not_zero 479 * mmput 480 * atomic_dec_and_test 481 * exit_oom_victim 482 * [...] 483 * out_of_memory 484 * select_bad_process 485 * # no TIF_MEMDIE task selects new victim 486 * unmap_page_range # frees some memory 487 */ 488 mutex_lock(&oom_lock); 489 490 if (!down_read_trylock(&mm->mmap_sem)) { 491 ret = false; 492 goto unlock_oom; 493 } 494 495 /* 496 * increase mm_users only after we know we will reap something so 497 * that the mmput_async is called only when we have reaped something 498 * and delayed __mmput doesn't matter that much 499 */ 500 if (!mmget_not_zero(mm)) { 501 up_read(&mm->mmap_sem); 502 goto unlock_oom; 503 } 504 505 /* 506 * Tell all users of get_user/copy_from_user etc... that the content 507 * is no longer stable. No barriers really needed because unmapping 508 * should imply barriers already and the reader would hit a page fault 509 * if it stumbled over a reaped memory. 510 */ 511 set_bit(MMF_UNSTABLE, &mm->flags); 512 513 tlb_gather_mmu(&tlb, mm, 0, -1); 514 for (vma = mm->mmap ; vma; vma = vma->vm_next) { 515 if (!can_madv_dontneed_vma(vma)) 516 continue; 517 518 /* 519 * Only anonymous pages have a good chance to be dropped 520 * without additional steps which we cannot afford as we 521 * are OOM already. 522 * 523 * We do not even care about fs backed pages because all 524 * which are reclaimable have already been reclaimed and 525 * we do not want to block exit_mmap by keeping mm ref 526 * count elevated without a good reason. 527 */ 528 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) 529 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, 530 NULL); 531 } 532 tlb_finish_mmu(&tlb, 0, -1); 533 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 534 task_pid_nr(tsk), tsk->comm, 535 K(get_mm_counter(mm, MM_ANONPAGES)), 536 K(get_mm_counter(mm, MM_FILEPAGES)), 537 K(get_mm_counter(mm, MM_SHMEMPAGES))); 538 up_read(&mm->mmap_sem); 539 540 /* 541 * Drop our reference but make sure the mmput slow path is called from a 542 * different context because we shouldn't risk we get stuck there and 543 * put the oom_reaper out of the way. 544 */ 545 mmput_async(mm); 546 unlock_oom: 547 mutex_unlock(&oom_lock); 548 return ret; 549 } 550 551 #define MAX_OOM_REAP_RETRIES 10 552 static void oom_reap_task(struct task_struct *tsk) 553 { 554 int attempts = 0; 555 struct mm_struct *mm = tsk->signal->oom_mm; 556 557 /* Retry the down_read_trylock(mmap_sem) a few times */ 558 while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) 559 schedule_timeout_idle(HZ/10); 560 561 if (attempts <= MAX_OOM_REAP_RETRIES) 562 goto done; 563 564 565 pr_info("oom_reaper: unable to reap pid:%d (%s)\n", 566 task_pid_nr(tsk), tsk->comm); 567 debug_show_all_locks(); 568 569 done: 570 tsk->oom_reaper_list = NULL; 571 572 /* 573 * Hide this mm from OOM killer because it has been either reaped or 574 * somebody can't call up_write(mmap_sem). 575 */ 576 set_bit(MMF_OOM_SKIP, &mm->flags); 577 578 /* Drop a reference taken by wake_oom_reaper */ 579 put_task_struct(tsk); 580 } 581 582 static int oom_reaper(void *unused) 583 { 584 while (true) { 585 struct task_struct *tsk = NULL; 586 587 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); 588 spin_lock(&oom_reaper_lock); 589 if (oom_reaper_list != NULL) { 590 tsk = oom_reaper_list; 591 oom_reaper_list = tsk->oom_reaper_list; 592 } 593 spin_unlock(&oom_reaper_lock); 594 595 if (tsk) 596 oom_reap_task(tsk); 597 } 598 599 return 0; 600 } 601 602 static void wake_oom_reaper(struct task_struct *tsk) 603 { 604 if (!oom_reaper_th) 605 return; 606 607 /* tsk is already queued? */ 608 if (tsk == oom_reaper_list || tsk->oom_reaper_list) 609 return; 610 611 get_task_struct(tsk); 612 613 spin_lock(&oom_reaper_lock); 614 tsk->oom_reaper_list = oom_reaper_list; 615 oom_reaper_list = tsk; 616 spin_unlock(&oom_reaper_lock); 617 wake_up(&oom_reaper_wait); 618 } 619 620 static int __init oom_init(void) 621 { 622 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); 623 if (IS_ERR(oom_reaper_th)) { 624 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", 625 PTR_ERR(oom_reaper_th)); 626 oom_reaper_th = NULL; 627 } 628 return 0; 629 } 630 subsys_initcall(oom_init) 631 #else 632 static inline void wake_oom_reaper(struct task_struct *tsk) 633 { 634 } 635 #endif /* CONFIG_MMU */ 636 637 /** 638 * mark_oom_victim - mark the given task as OOM victim 639 * @tsk: task to mark 640 * 641 * Has to be called with oom_lock held and never after 642 * oom has been disabled already. 643 * 644 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either 645 * under task_lock or operate on the current). 646 */ 647 static void mark_oom_victim(struct task_struct *tsk) 648 { 649 struct mm_struct *mm = tsk->mm; 650 651 WARN_ON(oom_killer_disabled); 652 /* OOM killer might race with memcg OOM */ 653 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) 654 return; 655 656 /* oom_mm is bound to the signal struct life time. */ 657 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) 658 mmgrab(tsk->signal->oom_mm); 659 660 /* 661 * Make sure that the task is woken up from uninterruptible sleep 662 * if it is frozen because OOM killer wouldn't be able to free 663 * any memory and livelock. freezing_slow_path will tell the freezer 664 * that TIF_MEMDIE tasks should be ignored. 665 */ 666 __thaw_task(tsk); 667 atomic_inc(&oom_victims); 668 } 669 670 /** 671 * exit_oom_victim - note the exit of an OOM victim 672 */ 673 void exit_oom_victim(void) 674 { 675 clear_thread_flag(TIF_MEMDIE); 676 677 if (!atomic_dec_return(&oom_victims)) 678 wake_up_all(&oom_victims_wait); 679 } 680 681 /** 682 * oom_killer_enable - enable OOM killer 683 */ 684 void oom_killer_enable(void) 685 { 686 oom_killer_disabled = false; 687 } 688 689 /** 690 * oom_killer_disable - disable OOM killer 691 * @timeout: maximum timeout to wait for oom victims in jiffies 692 * 693 * Forces all page allocations to fail rather than trigger OOM killer. 694 * Will block and wait until all OOM victims are killed or the given 695 * timeout expires. 696 * 697 * The function cannot be called when there are runnable user tasks because 698 * the userspace would see unexpected allocation failures as a result. Any 699 * new usage of this function should be consulted with MM people. 700 * 701 * Returns true if successful and false if the OOM killer cannot be 702 * disabled. 703 */ 704 bool oom_killer_disable(signed long timeout) 705 { 706 signed long ret; 707 708 /* 709 * Make sure to not race with an ongoing OOM killer. Check that the 710 * current is not killed (possibly due to sharing the victim's memory). 711 */ 712 if (mutex_lock_killable(&oom_lock)) 713 return false; 714 oom_killer_disabled = true; 715 mutex_unlock(&oom_lock); 716 717 ret = wait_event_interruptible_timeout(oom_victims_wait, 718 !atomic_read(&oom_victims), timeout); 719 if (ret <= 0) { 720 oom_killer_enable(); 721 return false; 722 } 723 724 return true; 725 } 726 727 static inline bool __task_will_free_mem(struct task_struct *task) 728 { 729 struct signal_struct *sig = task->signal; 730 731 /* 732 * A coredumping process may sleep for an extended period in exit_mm(), 733 * so the oom killer cannot assume that the process will promptly exit 734 * and release memory. 735 */ 736 if (sig->flags & SIGNAL_GROUP_COREDUMP) 737 return false; 738 739 if (sig->flags & SIGNAL_GROUP_EXIT) 740 return true; 741 742 if (thread_group_empty(task) && (task->flags & PF_EXITING)) 743 return true; 744 745 return false; 746 } 747 748 /* 749 * Checks whether the given task is dying or exiting and likely to 750 * release its address space. This means that all threads and processes 751 * sharing the same mm have to be killed or exiting. 752 * Caller has to make sure that task->mm is stable (hold task_lock or 753 * it operates on the current). 754 */ 755 static bool task_will_free_mem(struct task_struct *task) 756 { 757 struct mm_struct *mm = task->mm; 758 struct task_struct *p; 759 bool ret = true; 760 761 /* 762 * Skip tasks without mm because it might have passed its exit_mm and 763 * exit_oom_victim. oom_reaper could have rescued that but do not rely 764 * on that for now. We can consider find_lock_task_mm in future. 765 */ 766 if (!mm) 767 return false; 768 769 if (!__task_will_free_mem(task)) 770 return false; 771 772 /* 773 * This task has already been drained by the oom reaper so there are 774 * only small chances it will free some more 775 */ 776 if (test_bit(MMF_OOM_SKIP, &mm->flags)) 777 return false; 778 779 if (atomic_read(&mm->mm_users) <= 1) 780 return true; 781 782 /* 783 * Make sure that all tasks which share the mm with the given tasks 784 * are dying as well to make sure that a) nobody pins its mm and 785 * b) the task is also reapable by the oom reaper. 786 */ 787 rcu_read_lock(); 788 for_each_process(p) { 789 if (!process_shares_mm(p, mm)) 790 continue; 791 if (same_thread_group(task, p)) 792 continue; 793 ret = __task_will_free_mem(p); 794 if (!ret) 795 break; 796 } 797 rcu_read_unlock(); 798 799 return ret; 800 } 801 802 static void oom_kill_process(struct oom_control *oc, const char *message) 803 { 804 struct task_struct *p = oc->chosen; 805 unsigned int points = oc->chosen_points; 806 struct task_struct *victim = p; 807 struct task_struct *child; 808 struct task_struct *t; 809 struct mm_struct *mm; 810 unsigned int victim_points = 0; 811 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, 812 DEFAULT_RATELIMIT_BURST); 813 bool can_oom_reap = true; 814 815 /* 816 * If the task is already exiting, don't alarm the sysadmin or kill 817 * its children or threads, just set TIF_MEMDIE so it can die quickly 818 */ 819 task_lock(p); 820 if (task_will_free_mem(p)) { 821 mark_oom_victim(p); 822 wake_oom_reaper(p); 823 task_unlock(p); 824 put_task_struct(p); 825 return; 826 } 827 task_unlock(p); 828 829 if (__ratelimit(&oom_rs)) 830 dump_header(oc, p); 831 832 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", 833 message, task_pid_nr(p), p->comm, points); 834 835 /* 836 * If any of p's children has a different mm and is eligible for kill, 837 * the one with the highest oom_badness() score is sacrificed for its 838 * parent. This attempts to lose the minimal amount of work done while 839 * still freeing memory. 840 */ 841 read_lock(&tasklist_lock); 842 for_each_thread(p, t) { 843 list_for_each_entry(child, &t->children, sibling) { 844 unsigned int child_points; 845 846 if (process_shares_mm(child, p->mm)) 847 continue; 848 /* 849 * oom_badness() returns 0 if the thread is unkillable 850 */ 851 child_points = oom_badness(child, 852 oc->memcg, oc->nodemask, oc->totalpages); 853 if (child_points > victim_points) { 854 put_task_struct(victim); 855 victim = child; 856 victim_points = child_points; 857 get_task_struct(victim); 858 } 859 } 860 } 861 read_unlock(&tasklist_lock); 862 863 p = find_lock_task_mm(victim); 864 if (!p) { 865 put_task_struct(victim); 866 return; 867 } else if (victim != p) { 868 get_task_struct(p); 869 put_task_struct(victim); 870 victim = p; 871 } 872 873 /* Get a reference to safely compare mm after task_unlock(victim) */ 874 mm = victim->mm; 875 mmgrab(mm); 876 /* 877 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent 878 * the OOM victim from depleting the memory reserves from the user 879 * space under its control. 880 */ 881 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); 882 mark_oom_victim(victim); 883 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 884 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), 885 K(get_mm_counter(victim->mm, MM_ANONPAGES)), 886 K(get_mm_counter(victim->mm, MM_FILEPAGES)), 887 K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); 888 task_unlock(victim); 889 890 /* 891 * Kill all user processes sharing victim->mm in other thread groups, if 892 * any. They don't get access to memory reserves, though, to avoid 893 * depletion of all memory. This prevents mm->mmap_sem livelock when an 894 * oom killed thread cannot exit because it requires the semaphore and 895 * its contended by another thread trying to allocate memory itself. 896 * That thread will now get access to memory reserves since it has a 897 * pending fatal signal. 898 */ 899 rcu_read_lock(); 900 for_each_process(p) { 901 if (!process_shares_mm(p, mm)) 902 continue; 903 if (same_thread_group(p, victim)) 904 continue; 905 if (is_global_init(p)) { 906 can_oom_reap = false; 907 set_bit(MMF_OOM_SKIP, &mm->flags); 908 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", 909 task_pid_nr(victim), victim->comm, 910 task_pid_nr(p), p->comm); 911 continue; 912 } 913 /* 914 * No use_mm() user needs to read from the userspace so we are 915 * ok to reap it. 916 */ 917 if (unlikely(p->flags & PF_KTHREAD)) 918 continue; 919 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); 920 } 921 rcu_read_unlock(); 922 923 if (can_oom_reap) 924 wake_oom_reaper(victim); 925 926 mmdrop(mm); 927 put_task_struct(victim); 928 } 929 #undef K 930 931 /* 932 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 933 */ 934 static void check_panic_on_oom(struct oom_control *oc, 935 enum oom_constraint constraint) 936 { 937 if (likely(!sysctl_panic_on_oom)) 938 return; 939 if (sysctl_panic_on_oom != 2) { 940 /* 941 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel 942 * does not panic for cpuset, mempolicy, or memcg allocation 943 * failures. 944 */ 945 if (constraint != CONSTRAINT_NONE) 946 return; 947 } 948 /* Do not panic for oom kills triggered by sysrq */ 949 if (is_sysrq_oom(oc)) 950 return; 951 dump_header(oc, NULL); 952 panic("Out of memory: %s panic_on_oom is enabled\n", 953 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 954 } 955 956 static BLOCKING_NOTIFIER_HEAD(oom_notify_list); 957 958 int register_oom_notifier(struct notifier_block *nb) 959 { 960 return blocking_notifier_chain_register(&oom_notify_list, nb); 961 } 962 EXPORT_SYMBOL_GPL(register_oom_notifier); 963 964 int unregister_oom_notifier(struct notifier_block *nb) 965 { 966 return blocking_notifier_chain_unregister(&oom_notify_list, nb); 967 } 968 EXPORT_SYMBOL_GPL(unregister_oom_notifier); 969 970 /** 971 * out_of_memory - kill the "best" process when we run out of memory 972 * @oc: pointer to struct oom_control 973 * 974 * If we run out of memory, we have the choice between either 975 * killing a random task (bad), letting the system crash (worse) 976 * OR try to be smart about which process to kill. Note that we 977 * don't have to be perfect here, we just have to be good. 978 */ 979 bool out_of_memory(struct oom_control *oc) 980 { 981 unsigned long freed = 0; 982 enum oom_constraint constraint = CONSTRAINT_NONE; 983 984 if (oom_killer_disabled) 985 return false; 986 987 if (!is_memcg_oom(oc)) { 988 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 989 if (freed > 0) 990 /* Got some memory back in the last second. */ 991 return true; 992 } 993 994 /* 995 * If current has a pending SIGKILL or is exiting, then automatically 996 * select it. The goal is to allow it to allocate so that it may 997 * quickly exit and free its memory. 998 */ 999 if (task_will_free_mem(current)) { 1000 mark_oom_victim(current); 1001 wake_oom_reaper(current); 1002 return true; 1003 } 1004 1005 /* 1006 * The OOM killer does not compensate for IO-less reclaim. 1007 * pagefault_out_of_memory lost its gfp context so we have to 1008 * make sure exclude 0 mask - all other users should have at least 1009 * ___GFP_DIRECT_RECLAIM to get here. 1010 */ 1011 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) 1012 return true; 1013 1014 /* 1015 * Check if there were limitations on the allocation (only relevant for 1016 * NUMA and memcg) that may require different handling. 1017 */ 1018 constraint = constrained_alloc(oc); 1019 if (constraint != CONSTRAINT_MEMORY_POLICY) 1020 oc->nodemask = NULL; 1021 check_panic_on_oom(oc, constraint); 1022 1023 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && 1024 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && 1025 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { 1026 get_task_struct(current); 1027 oc->chosen = current; 1028 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); 1029 return true; 1030 } 1031 1032 select_bad_process(oc); 1033 /* Found nothing?!?! Either we hang forever, or we panic. */ 1034 if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { 1035 dump_header(oc, NULL); 1036 panic("Out of memory and no killable processes...\n"); 1037 } 1038 if (oc->chosen && oc->chosen != (void *)-1UL) { 1039 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : 1040 "Memory cgroup out of memory"); 1041 /* 1042 * Give the killed process a good chance to exit before trying 1043 * to allocate memory again. 1044 */ 1045 schedule_timeout_killable(1); 1046 } 1047 return !!oc->chosen; 1048 } 1049 1050 /* 1051 * The pagefault handler calls here because it is out of memory, so kill a 1052 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom 1053 * killing is already in progress so do nothing. 1054 */ 1055 void pagefault_out_of_memory(void) 1056 { 1057 struct oom_control oc = { 1058 .zonelist = NULL, 1059 .nodemask = NULL, 1060 .memcg = NULL, 1061 .gfp_mask = 0, 1062 .order = 0, 1063 }; 1064 1065 if (mem_cgroup_oom_synchronize(true)) 1066 return; 1067 1068 if (!mutex_trylock(&oom_lock)) 1069 return; 1070 out_of_memory(&oc); 1071 mutex_unlock(&oom_lock); 1072 } 1073