1 /* 2 * linux/mm/oom_kill.c 3 * 4 * Copyright (C) 1998,2000 Rik van Riel 5 * Thanks go out to Claus Fischer for some serious inspiration and 6 * for goading me into coding this file... 7 * Copyright (C) 2010 Google, Inc. 8 * Rewritten by David Rientjes 9 * 10 * The routines in this file are used to kill a process when 11 * we're seriously out of memory. This gets called from __alloc_pages() 12 * in mm/page_alloc.c when we really run out of memory. 13 * 14 * Since we won't call these routines often (on a well-configured 15 * machine) this file will double as a 'coding guide' and a signpost 16 * for newbie kernel hackers. It features several pointers to major 17 * kernel subsystems and hints as to where to find out what things do. 18 */ 19 20 #include <linux/oom.h> 21 #include <linux/mm.h> 22 #include <linux/err.h> 23 #include <linux/gfp.h> 24 #include <linux/sched.h> 25 #include <linux/swap.h> 26 #include <linux/timex.h> 27 #include <linux/jiffies.h> 28 #include <linux/cpuset.h> 29 #include <linux/export.h> 30 #include <linux/notifier.h> 31 #include <linux/memcontrol.h> 32 #include <linux/mempolicy.h> 33 #include <linux/security.h> 34 #include <linux/ptrace.h> 35 #include <linux/freezer.h> 36 #include <linux/ftrace.h> 37 #include <linux/ratelimit.h> 38 #include <linux/kthread.h> 39 #include <linux/init.h> 40 41 #include <asm/tlb.h> 42 #include "internal.h" 43 44 #define CREATE_TRACE_POINTS 45 #include <trace/events/oom.h> 46 47 int sysctl_panic_on_oom; 48 int sysctl_oom_kill_allocating_task; 49 int sysctl_oom_dump_tasks = 1; 50 51 DEFINE_MUTEX(oom_lock); 52 53 #ifdef CONFIG_NUMA 54 /** 55 * has_intersects_mems_allowed() - check task eligiblity for kill 56 * @start: task struct of which task to consider 57 * @mask: nodemask passed to page allocator for mempolicy ooms 58 * 59 * Task eligibility is determined by whether or not a candidate task, @tsk, 60 * shares the same mempolicy nodes as current if it is bound by such a policy 61 * and whether or not it has the same set of allowed cpuset nodes. 62 */ 63 static bool has_intersects_mems_allowed(struct task_struct *start, 64 const nodemask_t *mask) 65 { 66 struct task_struct *tsk; 67 bool ret = false; 68 69 rcu_read_lock(); 70 for_each_thread(start, tsk) { 71 if (mask) { 72 /* 73 * If this is a mempolicy constrained oom, tsk's 74 * cpuset is irrelevant. Only return true if its 75 * mempolicy intersects current, otherwise it may be 76 * needlessly killed. 77 */ 78 ret = mempolicy_nodemask_intersects(tsk, mask); 79 } else { 80 /* 81 * This is not a mempolicy constrained oom, so only 82 * check the mems of tsk's cpuset. 83 */ 84 ret = cpuset_mems_allowed_intersects(current, tsk); 85 } 86 if (ret) 87 break; 88 } 89 rcu_read_unlock(); 90 91 return ret; 92 } 93 #else 94 static bool has_intersects_mems_allowed(struct task_struct *tsk, 95 const nodemask_t *mask) 96 { 97 return true; 98 } 99 #endif /* CONFIG_NUMA */ 100 101 /* 102 * The process p may have detached its own ->mm while exiting or through 103 * use_mm(), but one or more of its subthreads may still have a valid 104 * pointer. Return p, or any of its subthreads with a valid ->mm, with 105 * task_lock() held. 106 */ 107 struct task_struct *find_lock_task_mm(struct task_struct *p) 108 { 109 struct task_struct *t; 110 111 rcu_read_lock(); 112 113 for_each_thread(p, t) { 114 task_lock(t); 115 if (likely(t->mm)) 116 goto found; 117 task_unlock(t); 118 } 119 t = NULL; 120 found: 121 rcu_read_unlock(); 122 123 return t; 124 } 125 126 /* 127 * order == -1 means the oom kill is required by sysrq, otherwise only 128 * for display purposes. 129 */ 130 static inline bool is_sysrq_oom(struct oom_control *oc) 131 { 132 return oc->order == -1; 133 } 134 135 static inline bool is_memcg_oom(struct oom_control *oc) 136 { 137 return oc->memcg != NULL; 138 } 139 140 /* return true if the task is not adequate as candidate victim task. */ 141 static bool oom_unkillable_task(struct task_struct *p, 142 struct mem_cgroup *memcg, const nodemask_t *nodemask) 143 { 144 if (is_global_init(p)) 145 return true; 146 if (p->flags & PF_KTHREAD) 147 return true; 148 149 /* When mem_cgroup_out_of_memory() and p is not member of the group */ 150 if (memcg && !task_in_mem_cgroup(p, memcg)) 151 return true; 152 153 /* p may not have freeable memory in nodemask */ 154 if (!has_intersects_mems_allowed(p, nodemask)) 155 return true; 156 157 return false; 158 } 159 160 /** 161 * oom_badness - heuristic function to determine which candidate task to kill 162 * @p: task struct of which task we should calculate 163 * @totalpages: total present RAM allowed for page allocation 164 * 165 * The heuristic for determining which task to kill is made to be as simple and 166 * predictable as possible. The goal is to return the highest value for the 167 * task consuming the most memory to avoid subsequent oom failures. 168 */ 169 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 170 const nodemask_t *nodemask, unsigned long totalpages) 171 { 172 long points; 173 long adj; 174 175 if (oom_unkillable_task(p, memcg, nodemask)) 176 return 0; 177 178 p = find_lock_task_mm(p); 179 if (!p) 180 return 0; 181 182 /* 183 * Do not even consider tasks which are explicitly marked oom 184 * unkillable or have been already oom reaped or the are in 185 * the middle of vfork 186 */ 187 adj = (long)p->signal->oom_score_adj; 188 if (adj == OOM_SCORE_ADJ_MIN || 189 test_bit(MMF_OOM_SKIP, &p->mm->flags) || 190 in_vfork(p)) { 191 task_unlock(p); 192 return 0; 193 } 194 195 /* 196 * The baseline for the badness score is the proportion of RAM that each 197 * task's rss, pagetable and swap space use. 198 */ 199 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + 200 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); 201 task_unlock(p); 202 203 /* 204 * Root processes get 3% bonus, just like the __vm_enough_memory() 205 * implementation used by LSMs. 206 */ 207 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 208 points -= (points * 3) / 100; 209 210 /* Normalize to oom_score_adj units */ 211 adj *= totalpages / 1000; 212 points += adj; 213 214 /* 215 * Never return 0 for an eligible task regardless of the root bonus and 216 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). 217 */ 218 return points > 0 ? points : 1; 219 } 220 221 enum oom_constraint { 222 CONSTRAINT_NONE, 223 CONSTRAINT_CPUSET, 224 CONSTRAINT_MEMORY_POLICY, 225 CONSTRAINT_MEMCG, 226 }; 227 228 /* 229 * Determine the type of allocation constraint. 230 */ 231 static enum oom_constraint constrained_alloc(struct oom_control *oc) 232 { 233 struct zone *zone; 234 struct zoneref *z; 235 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); 236 bool cpuset_limited = false; 237 int nid; 238 239 if (is_memcg_oom(oc)) { 240 oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1; 241 return CONSTRAINT_MEMCG; 242 } 243 244 /* Default to all available memory */ 245 oc->totalpages = totalram_pages + total_swap_pages; 246 247 if (!IS_ENABLED(CONFIG_NUMA)) 248 return CONSTRAINT_NONE; 249 250 if (!oc->zonelist) 251 return CONSTRAINT_NONE; 252 /* 253 * Reach here only when __GFP_NOFAIL is used. So, we should avoid 254 * to kill current.We have to random task kill in this case. 255 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. 256 */ 257 if (oc->gfp_mask & __GFP_THISNODE) 258 return CONSTRAINT_NONE; 259 260 /* 261 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in 262 * the page allocator means a mempolicy is in effect. Cpuset policy 263 * is enforced in get_page_from_freelist(). 264 */ 265 if (oc->nodemask && 266 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { 267 oc->totalpages = total_swap_pages; 268 for_each_node_mask(nid, *oc->nodemask) 269 oc->totalpages += node_spanned_pages(nid); 270 return CONSTRAINT_MEMORY_POLICY; 271 } 272 273 /* Check this allocation failure is caused by cpuset's wall function */ 274 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, 275 high_zoneidx, oc->nodemask) 276 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) 277 cpuset_limited = true; 278 279 if (cpuset_limited) { 280 oc->totalpages = total_swap_pages; 281 for_each_node_mask(nid, cpuset_current_mems_allowed) 282 oc->totalpages += node_spanned_pages(nid); 283 return CONSTRAINT_CPUSET; 284 } 285 return CONSTRAINT_NONE; 286 } 287 288 static int oom_evaluate_task(struct task_struct *task, void *arg) 289 { 290 struct oom_control *oc = arg; 291 unsigned long points; 292 293 if (oom_unkillable_task(task, NULL, oc->nodemask)) 294 goto next; 295 296 /* 297 * This task already has access to memory reserves and is being killed. 298 * Don't allow any other task to have access to the reserves unless 299 * the task has MMF_OOM_SKIP because chances that it would release 300 * any memory is quite low. 301 */ 302 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { 303 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) 304 goto next; 305 goto abort; 306 } 307 308 /* 309 * If task is allocating a lot of memory and has been marked to be 310 * killed first if it triggers an oom, then select it. 311 */ 312 if (oom_task_origin(task)) { 313 points = ULONG_MAX; 314 goto select; 315 } 316 317 points = oom_badness(task, NULL, oc->nodemask, oc->totalpages); 318 if (!points || points < oc->chosen_points) 319 goto next; 320 321 /* Prefer thread group leaders for display purposes */ 322 if (points == oc->chosen_points && thread_group_leader(oc->chosen)) 323 goto next; 324 select: 325 if (oc->chosen) 326 put_task_struct(oc->chosen); 327 get_task_struct(task); 328 oc->chosen = task; 329 oc->chosen_points = points; 330 next: 331 return 0; 332 abort: 333 if (oc->chosen) 334 put_task_struct(oc->chosen); 335 oc->chosen = (void *)-1UL; 336 return 1; 337 } 338 339 /* 340 * Simple selection loop. We choose the process with the highest number of 341 * 'points'. In case scan was aborted, oc->chosen is set to -1. 342 */ 343 static void select_bad_process(struct oom_control *oc) 344 { 345 if (is_memcg_oom(oc)) 346 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); 347 else { 348 struct task_struct *p; 349 350 rcu_read_lock(); 351 for_each_process(p) 352 if (oom_evaluate_task(p, oc)) 353 break; 354 rcu_read_unlock(); 355 } 356 357 oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; 358 } 359 360 /** 361 * dump_tasks - dump current memory state of all system tasks 362 * @memcg: current's memory controller, if constrained 363 * @nodemask: nodemask passed to page allocator for mempolicy ooms 364 * 365 * Dumps the current memory state of all eligible tasks. Tasks not in the same 366 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes 367 * are not shown. 368 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, 369 * swapents, oom_score_adj value, and name. 370 */ 371 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) 372 { 373 struct task_struct *p; 374 struct task_struct *task; 375 376 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); 377 rcu_read_lock(); 378 for_each_process(p) { 379 if (oom_unkillable_task(p, memcg, nodemask)) 380 continue; 381 382 task = find_lock_task_mm(p); 383 if (!task) { 384 /* 385 * This is a kthread or all of p's threads have already 386 * detached their mm's. There's no need to report 387 * them; they can't be oom killed anyway. 388 */ 389 continue; 390 } 391 392 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", 393 task->pid, from_kuid(&init_user_ns, task_uid(task)), 394 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), 395 atomic_long_read(&task->mm->nr_ptes), 396 mm_nr_pmds(task->mm), 397 get_mm_counter(task->mm, MM_SWAPENTS), 398 task->signal->oom_score_adj, task->comm); 399 task_unlock(task); 400 } 401 rcu_read_unlock(); 402 } 403 404 static void dump_header(struct oom_control *oc, struct task_struct *p) 405 { 406 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=", 407 current->comm, oc->gfp_mask, &oc->gfp_mask); 408 if (oc->nodemask) 409 pr_cont("%*pbl", nodemask_pr_args(oc->nodemask)); 410 else 411 pr_cont("(null)"); 412 pr_cont(", order=%d, oom_score_adj=%hd\n", 413 oc->order, current->signal->oom_score_adj); 414 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) 415 pr_warn("COMPACTION is disabled!!!\n"); 416 417 cpuset_print_current_mems_allowed(); 418 dump_stack(); 419 if (oc->memcg) 420 mem_cgroup_print_oom_info(oc->memcg, p); 421 else 422 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); 423 if (sysctl_oom_dump_tasks) 424 dump_tasks(oc->memcg, oc->nodemask); 425 } 426 427 /* 428 * Number of OOM victims in flight 429 */ 430 static atomic_t oom_victims = ATOMIC_INIT(0); 431 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); 432 433 static bool oom_killer_disabled __read_mostly; 434 435 #define K(x) ((x) << (PAGE_SHIFT-10)) 436 437 /* 438 * task->mm can be NULL if the task is the exited group leader. So to 439 * determine whether the task is using a particular mm, we examine all the 440 * task's threads: if one of those is using this mm then this task was also 441 * using it. 442 */ 443 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) 444 { 445 struct task_struct *t; 446 447 for_each_thread(p, t) { 448 struct mm_struct *t_mm = READ_ONCE(t->mm); 449 if (t_mm) 450 return t_mm == mm; 451 } 452 return false; 453 } 454 455 456 #ifdef CONFIG_MMU 457 /* 458 * OOM Reaper kernel thread which tries to reap the memory used by the OOM 459 * victim (if that is possible) to help the OOM killer to move on. 460 */ 461 static struct task_struct *oom_reaper_th; 462 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); 463 static struct task_struct *oom_reaper_list; 464 static DEFINE_SPINLOCK(oom_reaper_lock); 465 466 static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) 467 { 468 struct mmu_gather tlb; 469 struct vm_area_struct *vma; 470 bool ret = true; 471 472 /* 473 * We have to make sure to not race with the victim exit path 474 * and cause premature new oom victim selection: 475 * __oom_reap_task_mm exit_mm 476 * mmget_not_zero 477 * mmput 478 * atomic_dec_and_test 479 * exit_oom_victim 480 * [...] 481 * out_of_memory 482 * select_bad_process 483 * # no TIF_MEMDIE task selects new victim 484 * unmap_page_range # frees some memory 485 */ 486 mutex_lock(&oom_lock); 487 488 if (!down_read_trylock(&mm->mmap_sem)) { 489 ret = false; 490 goto unlock_oom; 491 } 492 493 /* 494 * increase mm_users only after we know we will reap something so 495 * that the mmput_async is called only when we have reaped something 496 * and delayed __mmput doesn't matter that much 497 */ 498 if (!mmget_not_zero(mm)) { 499 up_read(&mm->mmap_sem); 500 goto unlock_oom; 501 } 502 503 /* 504 * Tell all users of get_user/copy_from_user etc... that the content 505 * is no longer stable. No barriers really needed because unmapping 506 * should imply barriers already and the reader would hit a page fault 507 * if it stumbled over a reaped memory. 508 */ 509 set_bit(MMF_UNSTABLE, &mm->flags); 510 511 tlb_gather_mmu(&tlb, mm, 0, -1); 512 for (vma = mm->mmap ; vma; vma = vma->vm_next) { 513 if (!can_madv_dontneed_vma(vma)) 514 continue; 515 516 /* 517 * Only anonymous pages have a good chance to be dropped 518 * without additional steps which we cannot afford as we 519 * are OOM already. 520 * 521 * We do not even care about fs backed pages because all 522 * which are reclaimable have already been reclaimed and 523 * we do not want to block exit_mmap by keeping mm ref 524 * count elevated without a good reason. 525 */ 526 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) 527 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, 528 NULL); 529 } 530 tlb_finish_mmu(&tlb, 0, -1); 531 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 532 task_pid_nr(tsk), tsk->comm, 533 K(get_mm_counter(mm, MM_ANONPAGES)), 534 K(get_mm_counter(mm, MM_FILEPAGES)), 535 K(get_mm_counter(mm, MM_SHMEMPAGES))); 536 up_read(&mm->mmap_sem); 537 538 /* 539 * Drop our reference but make sure the mmput slow path is called from a 540 * different context because we shouldn't risk we get stuck there and 541 * put the oom_reaper out of the way. 542 */ 543 mmput_async(mm); 544 unlock_oom: 545 mutex_unlock(&oom_lock); 546 return ret; 547 } 548 549 #define MAX_OOM_REAP_RETRIES 10 550 static void oom_reap_task(struct task_struct *tsk) 551 { 552 int attempts = 0; 553 struct mm_struct *mm = tsk->signal->oom_mm; 554 555 /* Retry the down_read_trylock(mmap_sem) a few times */ 556 while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) 557 schedule_timeout_idle(HZ/10); 558 559 if (attempts <= MAX_OOM_REAP_RETRIES) 560 goto done; 561 562 563 pr_info("oom_reaper: unable to reap pid:%d (%s)\n", 564 task_pid_nr(tsk), tsk->comm); 565 debug_show_all_locks(); 566 567 done: 568 tsk->oom_reaper_list = NULL; 569 570 /* 571 * Hide this mm from OOM killer because it has been either reaped or 572 * somebody can't call up_write(mmap_sem). 573 */ 574 set_bit(MMF_OOM_SKIP, &mm->flags); 575 576 /* Drop a reference taken by wake_oom_reaper */ 577 put_task_struct(tsk); 578 } 579 580 static int oom_reaper(void *unused) 581 { 582 while (true) { 583 struct task_struct *tsk = NULL; 584 585 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); 586 spin_lock(&oom_reaper_lock); 587 if (oom_reaper_list != NULL) { 588 tsk = oom_reaper_list; 589 oom_reaper_list = tsk->oom_reaper_list; 590 } 591 spin_unlock(&oom_reaper_lock); 592 593 if (tsk) 594 oom_reap_task(tsk); 595 } 596 597 return 0; 598 } 599 600 static void wake_oom_reaper(struct task_struct *tsk) 601 { 602 if (!oom_reaper_th) 603 return; 604 605 /* tsk is already queued? */ 606 if (tsk == oom_reaper_list || tsk->oom_reaper_list) 607 return; 608 609 get_task_struct(tsk); 610 611 spin_lock(&oom_reaper_lock); 612 tsk->oom_reaper_list = oom_reaper_list; 613 oom_reaper_list = tsk; 614 spin_unlock(&oom_reaper_lock); 615 wake_up(&oom_reaper_wait); 616 } 617 618 static int __init oom_init(void) 619 { 620 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); 621 if (IS_ERR(oom_reaper_th)) { 622 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", 623 PTR_ERR(oom_reaper_th)); 624 oom_reaper_th = NULL; 625 } 626 return 0; 627 } 628 subsys_initcall(oom_init) 629 #else 630 static inline void wake_oom_reaper(struct task_struct *tsk) 631 { 632 } 633 #endif /* CONFIG_MMU */ 634 635 /** 636 * mark_oom_victim - mark the given task as OOM victim 637 * @tsk: task to mark 638 * 639 * Has to be called with oom_lock held and never after 640 * oom has been disabled already. 641 * 642 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either 643 * under task_lock or operate on the current). 644 */ 645 static void mark_oom_victim(struct task_struct *tsk) 646 { 647 struct mm_struct *mm = tsk->mm; 648 649 WARN_ON(oom_killer_disabled); 650 /* OOM killer might race with memcg OOM */ 651 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) 652 return; 653 654 /* oom_mm is bound to the signal struct life time. */ 655 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) 656 atomic_inc(&tsk->signal->oom_mm->mm_count); 657 658 /* 659 * Make sure that the task is woken up from uninterruptible sleep 660 * if it is frozen because OOM killer wouldn't be able to free 661 * any memory and livelock. freezing_slow_path will tell the freezer 662 * that TIF_MEMDIE tasks should be ignored. 663 */ 664 __thaw_task(tsk); 665 atomic_inc(&oom_victims); 666 } 667 668 /** 669 * exit_oom_victim - note the exit of an OOM victim 670 */ 671 void exit_oom_victim(void) 672 { 673 clear_thread_flag(TIF_MEMDIE); 674 675 if (!atomic_dec_return(&oom_victims)) 676 wake_up_all(&oom_victims_wait); 677 } 678 679 /** 680 * oom_killer_enable - enable OOM killer 681 */ 682 void oom_killer_enable(void) 683 { 684 oom_killer_disabled = false; 685 } 686 687 /** 688 * oom_killer_disable - disable OOM killer 689 * @timeout: maximum timeout to wait for oom victims in jiffies 690 * 691 * Forces all page allocations to fail rather than trigger OOM killer. 692 * Will block and wait until all OOM victims are killed or the given 693 * timeout expires. 694 * 695 * The function cannot be called when there are runnable user tasks because 696 * the userspace would see unexpected allocation failures as a result. Any 697 * new usage of this function should be consulted with MM people. 698 * 699 * Returns true if successful and false if the OOM killer cannot be 700 * disabled. 701 */ 702 bool oom_killer_disable(signed long timeout) 703 { 704 signed long ret; 705 706 /* 707 * Make sure to not race with an ongoing OOM killer. Check that the 708 * current is not killed (possibly due to sharing the victim's memory). 709 */ 710 if (mutex_lock_killable(&oom_lock)) 711 return false; 712 oom_killer_disabled = true; 713 mutex_unlock(&oom_lock); 714 715 ret = wait_event_interruptible_timeout(oom_victims_wait, 716 !atomic_read(&oom_victims), timeout); 717 if (ret <= 0) { 718 oom_killer_enable(); 719 return false; 720 } 721 722 return true; 723 } 724 725 static inline bool __task_will_free_mem(struct task_struct *task) 726 { 727 struct signal_struct *sig = task->signal; 728 729 /* 730 * A coredumping process may sleep for an extended period in exit_mm(), 731 * so the oom killer cannot assume that the process will promptly exit 732 * and release memory. 733 */ 734 if (sig->flags & SIGNAL_GROUP_COREDUMP) 735 return false; 736 737 if (sig->flags & SIGNAL_GROUP_EXIT) 738 return true; 739 740 if (thread_group_empty(task) && (task->flags & PF_EXITING)) 741 return true; 742 743 return false; 744 } 745 746 /* 747 * Checks whether the given task is dying or exiting and likely to 748 * release its address space. This means that all threads and processes 749 * sharing the same mm have to be killed or exiting. 750 * Caller has to make sure that task->mm is stable (hold task_lock or 751 * it operates on the current). 752 */ 753 static bool task_will_free_mem(struct task_struct *task) 754 { 755 struct mm_struct *mm = task->mm; 756 struct task_struct *p; 757 bool ret = true; 758 759 /* 760 * Skip tasks without mm because it might have passed its exit_mm and 761 * exit_oom_victim. oom_reaper could have rescued that but do not rely 762 * on that for now. We can consider find_lock_task_mm in future. 763 */ 764 if (!mm) 765 return false; 766 767 if (!__task_will_free_mem(task)) 768 return false; 769 770 /* 771 * This task has already been drained by the oom reaper so there are 772 * only small chances it will free some more 773 */ 774 if (test_bit(MMF_OOM_SKIP, &mm->flags)) 775 return false; 776 777 if (atomic_read(&mm->mm_users) <= 1) 778 return true; 779 780 /* 781 * Make sure that all tasks which share the mm with the given tasks 782 * are dying as well to make sure that a) nobody pins its mm and 783 * b) the task is also reapable by the oom reaper. 784 */ 785 rcu_read_lock(); 786 for_each_process(p) { 787 if (!process_shares_mm(p, mm)) 788 continue; 789 if (same_thread_group(task, p)) 790 continue; 791 ret = __task_will_free_mem(p); 792 if (!ret) 793 break; 794 } 795 rcu_read_unlock(); 796 797 return ret; 798 } 799 800 static void oom_kill_process(struct oom_control *oc, const char *message) 801 { 802 struct task_struct *p = oc->chosen; 803 unsigned int points = oc->chosen_points; 804 struct task_struct *victim = p; 805 struct task_struct *child; 806 struct task_struct *t; 807 struct mm_struct *mm; 808 unsigned int victim_points = 0; 809 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, 810 DEFAULT_RATELIMIT_BURST); 811 bool can_oom_reap = true; 812 813 /* 814 * If the task is already exiting, don't alarm the sysadmin or kill 815 * its children or threads, just set TIF_MEMDIE so it can die quickly 816 */ 817 task_lock(p); 818 if (task_will_free_mem(p)) { 819 mark_oom_victim(p); 820 wake_oom_reaper(p); 821 task_unlock(p); 822 put_task_struct(p); 823 return; 824 } 825 task_unlock(p); 826 827 if (__ratelimit(&oom_rs)) 828 dump_header(oc, p); 829 830 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", 831 message, task_pid_nr(p), p->comm, points); 832 833 /* 834 * If any of p's children has a different mm and is eligible for kill, 835 * the one with the highest oom_badness() score is sacrificed for its 836 * parent. This attempts to lose the minimal amount of work done while 837 * still freeing memory. 838 */ 839 read_lock(&tasklist_lock); 840 for_each_thread(p, t) { 841 list_for_each_entry(child, &t->children, sibling) { 842 unsigned int child_points; 843 844 if (process_shares_mm(child, p->mm)) 845 continue; 846 /* 847 * oom_badness() returns 0 if the thread is unkillable 848 */ 849 child_points = oom_badness(child, 850 oc->memcg, oc->nodemask, oc->totalpages); 851 if (child_points > victim_points) { 852 put_task_struct(victim); 853 victim = child; 854 victim_points = child_points; 855 get_task_struct(victim); 856 } 857 } 858 } 859 read_unlock(&tasklist_lock); 860 861 p = find_lock_task_mm(victim); 862 if (!p) { 863 put_task_struct(victim); 864 return; 865 } else if (victim != p) { 866 get_task_struct(p); 867 put_task_struct(victim); 868 victim = p; 869 } 870 871 /* Get a reference to safely compare mm after task_unlock(victim) */ 872 mm = victim->mm; 873 atomic_inc(&mm->mm_count); 874 /* 875 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent 876 * the OOM victim from depleting the memory reserves from the user 877 * space under its control. 878 */ 879 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); 880 mark_oom_victim(victim); 881 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 882 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), 883 K(get_mm_counter(victim->mm, MM_ANONPAGES)), 884 K(get_mm_counter(victim->mm, MM_FILEPAGES)), 885 K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); 886 task_unlock(victim); 887 888 /* 889 * Kill all user processes sharing victim->mm in other thread groups, if 890 * any. They don't get access to memory reserves, though, to avoid 891 * depletion of all memory. This prevents mm->mmap_sem livelock when an 892 * oom killed thread cannot exit because it requires the semaphore and 893 * its contended by another thread trying to allocate memory itself. 894 * That thread will now get access to memory reserves since it has a 895 * pending fatal signal. 896 */ 897 rcu_read_lock(); 898 for_each_process(p) { 899 if (!process_shares_mm(p, mm)) 900 continue; 901 if (same_thread_group(p, victim)) 902 continue; 903 if (is_global_init(p)) { 904 can_oom_reap = false; 905 set_bit(MMF_OOM_SKIP, &mm->flags); 906 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", 907 task_pid_nr(victim), victim->comm, 908 task_pid_nr(p), p->comm); 909 continue; 910 } 911 /* 912 * No use_mm() user needs to read from the userspace so we are 913 * ok to reap it. 914 */ 915 if (unlikely(p->flags & PF_KTHREAD)) 916 continue; 917 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); 918 } 919 rcu_read_unlock(); 920 921 if (can_oom_reap) 922 wake_oom_reaper(victim); 923 924 mmdrop(mm); 925 put_task_struct(victim); 926 } 927 #undef K 928 929 /* 930 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 931 */ 932 static void check_panic_on_oom(struct oom_control *oc, 933 enum oom_constraint constraint) 934 { 935 if (likely(!sysctl_panic_on_oom)) 936 return; 937 if (sysctl_panic_on_oom != 2) { 938 /* 939 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel 940 * does not panic for cpuset, mempolicy, or memcg allocation 941 * failures. 942 */ 943 if (constraint != CONSTRAINT_NONE) 944 return; 945 } 946 /* Do not panic for oom kills triggered by sysrq */ 947 if (is_sysrq_oom(oc)) 948 return; 949 dump_header(oc, NULL); 950 panic("Out of memory: %s panic_on_oom is enabled\n", 951 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 952 } 953 954 static BLOCKING_NOTIFIER_HEAD(oom_notify_list); 955 956 int register_oom_notifier(struct notifier_block *nb) 957 { 958 return blocking_notifier_chain_register(&oom_notify_list, nb); 959 } 960 EXPORT_SYMBOL_GPL(register_oom_notifier); 961 962 int unregister_oom_notifier(struct notifier_block *nb) 963 { 964 return blocking_notifier_chain_unregister(&oom_notify_list, nb); 965 } 966 EXPORT_SYMBOL_GPL(unregister_oom_notifier); 967 968 /** 969 * out_of_memory - kill the "best" process when we run out of memory 970 * @oc: pointer to struct oom_control 971 * 972 * If we run out of memory, we have the choice between either 973 * killing a random task (bad), letting the system crash (worse) 974 * OR try to be smart about which process to kill. Note that we 975 * don't have to be perfect here, we just have to be good. 976 */ 977 bool out_of_memory(struct oom_control *oc) 978 { 979 unsigned long freed = 0; 980 enum oom_constraint constraint = CONSTRAINT_NONE; 981 982 if (oom_killer_disabled) 983 return false; 984 985 if (!is_memcg_oom(oc)) { 986 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 987 if (freed > 0) 988 /* Got some memory back in the last second. */ 989 return true; 990 } 991 992 /* 993 * If current has a pending SIGKILL or is exiting, then automatically 994 * select it. The goal is to allow it to allocate so that it may 995 * quickly exit and free its memory. 996 */ 997 if (task_will_free_mem(current)) { 998 mark_oom_victim(current); 999 wake_oom_reaper(current); 1000 return true; 1001 } 1002 1003 /* 1004 * The OOM killer does not compensate for IO-less reclaim. 1005 * pagefault_out_of_memory lost its gfp context so we have to 1006 * make sure exclude 0 mask - all other users should have at least 1007 * ___GFP_DIRECT_RECLAIM to get here. 1008 */ 1009 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) 1010 return true; 1011 1012 /* 1013 * Check if there were limitations on the allocation (only relevant for 1014 * NUMA and memcg) that may require different handling. 1015 */ 1016 constraint = constrained_alloc(oc); 1017 if (constraint != CONSTRAINT_MEMORY_POLICY) 1018 oc->nodemask = NULL; 1019 check_panic_on_oom(oc, constraint); 1020 1021 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && 1022 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && 1023 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { 1024 get_task_struct(current); 1025 oc->chosen = current; 1026 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); 1027 return true; 1028 } 1029 1030 select_bad_process(oc); 1031 /* Found nothing?!?! Either we hang forever, or we panic. */ 1032 if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { 1033 dump_header(oc, NULL); 1034 panic("Out of memory and no killable processes...\n"); 1035 } 1036 if (oc->chosen && oc->chosen != (void *)-1UL) { 1037 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : 1038 "Memory cgroup out of memory"); 1039 /* 1040 * Give the killed process a good chance to exit before trying 1041 * to allocate memory again. 1042 */ 1043 schedule_timeout_killable(1); 1044 } 1045 return !!oc->chosen; 1046 } 1047 1048 /* 1049 * The pagefault handler calls here because it is out of memory, so kill a 1050 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom 1051 * killing is already in progress so do nothing. 1052 */ 1053 void pagefault_out_of_memory(void) 1054 { 1055 struct oom_control oc = { 1056 .zonelist = NULL, 1057 .nodemask = NULL, 1058 .memcg = NULL, 1059 .gfp_mask = 0, 1060 .order = 0, 1061 }; 1062 1063 if (mem_cgroup_oom_synchronize(true)) 1064 return; 1065 1066 if (!mutex_trylock(&oom_lock)) 1067 return; 1068 out_of_memory(&oc); 1069 mutex_unlock(&oom_lock); 1070 } 1071