1 /* 2 * linux/mm/oom_kill.c 3 * 4 * Copyright (C) 1998,2000 Rik van Riel 5 * Thanks go out to Claus Fischer for some serious inspiration and 6 * for goading me into coding this file... 7 * Copyright (C) 2010 Google, Inc. 8 * Rewritten by David Rientjes 9 * 10 * The routines in this file are used to kill a process when 11 * we're seriously out of memory. This gets called from __alloc_pages() 12 * in mm/page_alloc.c when we really run out of memory. 13 * 14 * Since we won't call these routines often (on a well-configured 15 * machine) this file will double as a 'coding guide' and a signpost 16 * for newbie kernel hackers. It features several pointers to major 17 * kernel subsystems and hints as to where to find out what things do. 18 */ 19 20 #include <linux/oom.h> 21 #include <linux/mm.h> 22 #include <linux/err.h> 23 #include <linux/gfp.h> 24 #include <linux/sched.h> 25 #include <linux/swap.h> 26 #include <linux/timex.h> 27 #include <linux/jiffies.h> 28 #include <linux/cpuset.h> 29 #include <linux/export.h> 30 #include <linux/notifier.h> 31 #include <linux/memcontrol.h> 32 #include <linux/mempolicy.h> 33 #include <linux/security.h> 34 #include <linux/ptrace.h> 35 #include <linux/freezer.h> 36 #include <linux/ftrace.h> 37 #include <linux/ratelimit.h> 38 #include <linux/kthread.h> 39 #include <linux/init.h> 40 41 #include <asm/tlb.h> 42 #include "internal.h" 43 44 #define CREATE_TRACE_POINTS 45 #include <trace/events/oom.h> 46 47 int sysctl_panic_on_oom; 48 int sysctl_oom_kill_allocating_task; 49 int sysctl_oom_dump_tasks = 1; 50 51 DEFINE_MUTEX(oom_lock); 52 53 #ifdef CONFIG_NUMA 54 /** 55 * has_intersects_mems_allowed() - check task eligiblity for kill 56 * @start: task struct of which task to consider 57 * @mask: nodemask passed to page allocator for mempolicy ooms 58 * 59 * Task eligibility is determined by whether or not a candidate task, @tsk, 60 * shares the same mempolicy nodes as current if it is bound by such a policy 61 * and whether or not it has the same set of allowed cpuset nodes. 62 */ 63 static bool has_intersects_mems_allowed(struct task_struct *start, 64 const nodemask_t *mask) 65 { 66 struct task_struct *tsk; 67 bool ret = false; 68 69 rcu_read_lock(); 70 for_each_thread(start, tsk) { 71 if (mask) { 72 /* 73 * If this is a mempolicy constrained oom, tsk's 74 * cpuset is irrelevant. Only return true if its 75 * mempolicy intersects current, otherwise it may be 76 * needlessly killed. 77 */ 78 ret = mempolicy_nodemask_intersects(tsk, mask); 79 } else { 80 /* 81 * This is not a mempolicy constrained oom, so only 82 * check the mems of tsk's cpuset. 83 */ 84 ret = cpuset_mems_allowed_intersects(current, tsk); 85 } 86 if (ret) 87 break; 88 } 89 rcu_read_unlock(); 90 91 return ret; 92 } 93 #else 94 static bool has_intersects_mems_allowed(struct task_struct *tsk, 95 const nodemask_t *mask) 96 { 97 return true; 98 } 99 #endif /* CONFIG_NUMA */ 100 101 /* 102 * The process p may have detached its own ->mm while exiting or through 103 * use_mm(), but one or more of its subthreads may still have a valid 104 * pointer. Return p, or any of its subthreads with a valid ->mm, with 105 * task_lock() held. 106 */ 107 struct task_struct *find_lock_task_mm(struct task_struct *p) 108 { 109 struct task_struct *t; 110 111 rcu_read_lock(); 112 113 for_each_thread(p, t) { 114 task_lock(t); 115 if (likely(t->mm)) 116 goto found; 117 task_unlock(t); 118 } 119 t = NULL; 120 found: 121 rcu_read_unlock(); 122 123 return t; 124 } 125 126 /* 127 * order == -1 means the oom kill is required by sysrq, otherwise only 128 * for display purposes. 129 */ 130 static inline bool is_sysrq_oom(struct oom_control *oc) 131 { 132 return oc->order == -1; 133 } 134 135 /* return true if the task is not adequate as candidate victim task. */ 136 static bool oom_unkillable_task(struct task_struct *p, 137 struct mem_cgroup *memcg, const nodemask_t *nodemask) 138 { 139 if (is_global_init(p)) 140 return true; 141 if (p->flags & PF_KTHREAD) 142 return true; 143 144 /* When mem_cgroup_out_of_memory() and p is not member of the group */ 145 if (memcg && !task_in_mem_cgroup(p, memcg)) 146 return true; 147 148 /* p may not have freeable memory in nodemask */ 149 if (!has_intersects_mems_allowed(p, nodemask)) 150 return true; 151 152 return false; 153 } 154 155 /** 156 * oom_badness - heuristic function to determine which candidate task to kill 157 * @p: task struct of which task we should calculate 158 * @totalpages: total present RAM allowed for page allocation 159 * 160 * The heuristic for determining which task to kill is made to be as simple and 161 * predictable as possible. The goal is to return the highest value for the 162 * task consuming the most memory to avoid subsequent oom failures. 163 */ 164 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 165 const nodemask_t *nodemask, unsigned long totalpages) 166 { 167 long points; 168 long adj; 169 170 if (oom_unkillable_task(p, memcg, nodemask)) 171 return 0; 172 173 p = find_lock_task_mm(p); 174 if (!p) 175 return 0; 176 177 /* 178 * Do not even consider tasks which are explicitly marked oom 179 * unkillable or have been already oom reaped or the are in 180 * the middle of vfork 181 */ 182 adj = (long)p->signal->oom_score_adj; 183 if (adj == OOM_SCORE_ADJ_MIN || 184 test_bit(MMF_OOM_REAPED, &p->mm->flags) || 185 in_vfork(p)) { 186 task_unlock(p); 187 return 0; 188 } 189 190 /* 191 * The baseline for the badness score is the proportion of RAM that each 192 * task's rss, pagetable and swap space use. 193 */ 194 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + 195 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); 196 task_unlock(p); 197 198 /* 199 * Root processes get 3% bonus, just like the __vm_enough_memory() 200 * implementation used by LSMs. 201 */ 202 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 203 points -= (points * 3) / 100; 204 205 /* Normalize to oom_score_adj units */ 206 adj *= totalpages / 1000; 207 points += adj; 208 209 /* 210 * Never return 0 for an eligible task regardless of the root bonus and 211 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). 212 */ 213 return points > 0 ? points : 1; 214 } 215 216 /* 217 * Determine the type of allocation constraint. 218 */ 219 #ifdef CONFIG_NUMA 220 static enum oom_constraint constrained_alloc(struct oom_control *oc, 221 unsigned long *totalpages) 222 { 223 struct zone *zone; 224 struct zoneref *z; 225 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); 226 bool cpuset_limited = false; 227 int nid; 228 229 /* Default to all available memory */ 230 *totalpages = totalram_pages + total_swap_pages; 231 232 if (!oc->zonelist) 233 return CONSTRAINT_NONE; 234 /* 235 * Reach here only when __GFP_NOFAIL is used. So, we should avoid 236 * to kill current.We have to random task kill in this case. 237 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. 238 */ 239 if (oc->gfp_mask & __GFP_THISNODE) 240 return CONSTRAINT_NONE; 241 242 /* 243 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in 244 * the page allocator means a mempolicy is in effect. Cpuset policy 245 * is enforced in get_page_from_freelist(). 246 */ 247 if (oc->nodemask && 248 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { 249 *totalpages = total_swap_pages; 250 for_each_node_mask(nid, *oc->nodemask) 251 *totalpages += node_spanned_pages(nid); 252 return CONSTRAINT_MEMORY_POLICY; 253 } 254 255 /* Check this allocation failure is caused by cpuset's wall function */ 256 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, 257 high_zoneidx, oc->nodemask) 258 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) 259 cpuset_limited = true; 260 261 if (cpuset_limited) { 262 *totalpages = total_swap_pages; 263 for_each_node_mask(nid, cpuset_current_mems_allowed) 264 *totalpages += node_spanned_pages(nid); 265 return CONSTRAINT_CPUSET; 266 } 267 return CONSTRAINT_NONE; 268 } 269 #else 270 static enum oom_constraint constrained_alloc(struct oom_control *oc, 271 unsigned long *totalpages) 272 { 273 *totalpages = totalram_pages + total_swap_pages; 274 return CONSTRAINT_NONE; 275 } 276 #endif 277 278 enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, 279 struct task_struct *task) 280 { 281 if (oom_unkillable_task(task, NULL, oc->nodemask)) 282 return OOM_SCAN_CONTINUE; 283 284 /* 285 * This task already has access to memory reserves and is being killed. 286 * Don't allow any other task to have access to the reserves unless 287 * the task has MMF_OOM_REAPED because chances that it would release 288 * any memory is quite low. 289 */ 290 if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims)) { 291 struct task_struct *p = find_lock_task_mm(task); 292 enum oom_scan_t ret = OOM_SCAN_ABORT; 293 294 if (p) { 295 if (test_bit(MMF_OOM_REAPED, &p->mm->flags)) 296 ret = OOM_SCAN_CONTINUE; 297 task_unlock(p); 298 } 299 300 return ret; 301 } 302 303 /* 304 * If task is allocating a lot of memory and has been marked to be 305 * killed first if it triggers an oom, then select it. 306 */ 307 if (oom_task_origin(task)) 308 return OOM_SCAN_SELECT; 309 310 return OOM_SCAN_OK; 311 } 312 313 /* 314 * Simple selection loop. We chose the process with the highest 315 * number of 'points'. Returns -1 on scan abort. 316 */ 317 static struct task_struct *select_bad_process(struct oom_control *oc, 318 unsigned int *ppoints, unsigned long totalpages) 319 { 320 struct task_struct *p; 321 struct task_struct *chosen = NULL; 322 unsigned long chosen_points = 0; 323 324 rcu_read_lock(); 325 for_each_process(p) { 326 unsigned int points; 327 328 switch (oom_scan_process_thread(oc, p)) { 329 case OOM_SCAN_SELECT: 330 chosen = p; 331 chosen_points = ULONG_MAX; 332 /* fall through */ 333 case OOM_SCAN_CONTINUE: 334 continue; 335 case OOM_SCAN_ABORT: 336 rcu_read_unlock(); 337 return (struct task_struct *)(-1UL); 338 case OOM_SCAN_OK: 339 break; 340 }; 341 points = oom_badness(p, NULL, oc->nodemask, totalpages); 342 if (!points || points < chosen_points) 343 continue; 344 345 chosen = p; 346 chosen_points = points; 347 } 348 if (chosen) 349 get_task_struct(chosen); 350 rcu_read_unlock(); 351 352 *ppoints = chosen_points * 1000 / totalpages; 353 return chosen; 354 } 355 356 /** 357 * dump_tasks - dump current memory state of all system tasks 358 * @memcg: current's memory controller, if constrained 359 * @nodemask: nodemask passed to page allocator for mempolicy ooms 360 * 361 * Dumps the current memory state of all eligible tasks. Tasks not in the same 362 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes 363 * are not shown. 364 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, 365 * swapents, oom_score_adj value, and name. 366 */ 367 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) 368 { 369 struct task_struct *p; 370 struct task_struct *task; 371 372 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); 373 rcu_read_lock(); 374 for_each_process(p) { 375 if (oom_unkillable_task(p, memcg, nodemask)) 376 continue; 377 378 task = find_lock_task_mm(p); 379 if (!task) { 380 /* 381 * This is a kthread or all of p's threads have already 382 * detached their mm's. There's no need to report 383 * them; they can't be oom killed anyway. 384 */ 385 continue; 386 } 387 388 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", 389 task->pid, from_kuid(&init_user_ns, task_uid(task)), 390 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), 391 atomic_long_read(&task->mm->nr_ptes), 392 mm_nr_pmds(task->mm), 393 get_mm_counter(task->mm, MM_SWAPENTS), 394 task->signal->oom_score_adj, task->comm); 395 task_unlock(task); 396 } 397 rcu_read_unlock(); 398 } 399 400 static void dump_header(struct oom_control *oc, struct task_struct *p) 401 { 402 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", 403 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, 404 current->signal->oom_score_adj); 405 406 cpuset_print_current_mems_allowed(); 407 dump_stack(); 408 if (oc->memcg) 409 mem_cgroup_print_oom_info(oc->memcg, p); 410 else 411 show_mem(SHOW_MEM_FILTER_NODES); 412 if (sysctl_oom_dump_tasks) 413 dump_tasks(oc->memcg, oc->nodemask); 414 } 415 416 /* 417 * Number of OOM victims in flight 418 */ 419 static atomic_t oom_victims = ATOMIC_INIT(0); 420 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); 421 422 bool oom_killer_disabled __read_mostly; 423 424 #define K(x) ((x) << (PAGE_SHIFT-10)) 425 426 /* 427 * task->mm can be NULL if the task is the exited group leader. So to 428 * determine whether the task is using a particular mm, we examine all the 429 * task's threads: if one of those is using this mm then this task was also 430 * using it. 431 */ 432 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) 433 { 434 struct task_struct *t; 435 436 for_each_thread(p, t) { 437 struct mm_struct *t_mm = READ_ONCE(t->mm); 438 if (t_mm) 439 return t_mm == mm; 440 } 441 return false; 442 } 443 444 445 #ifdef CONFIG_MMU 446 /* 447 * OOM Reaper kernel thread which tries to reap the memory used by the OOM 448 * victim (if that is possible) to help the OOM killer to move on. 449 */ 450 static struct task_struct *oom_reaper_th; 451 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); 452 static struct task_struct *oom_reaper_list; 453 static DEFINE_SPINLOCK(oom_reaper_lock); 454 455 static bool __oom_reap_task(struct task_struct *tsk) 456 { 457 struct mmu_gather tlb; 458 struct vm_area_struct *vma; 459 struct mm_struct *mm = NULL; 460 struct task_struct *p; 461 struct zap_details details = {.check_swap_entries = true, 462 .ignore_dirty = true}; 463 bool ret = true; 464 465 /* 466 * We have to make sure to not race with the victim exit path 467 * and cause premature new oom victim selection: 468 * __oom_reap_task exit_mm 469 * mmget_not_zero 470 * mmput 471 * atomic_dec_and_test 472 * exit_oom_victim 473 * [...] 474 * out_of_memory 475 * select_bad_process 476 * # no TIF_MEMDIE task selects new victim 477 * unmap_page_range # frees some memory 478 */ 479 mutex_lock(&oom_lock); 480 481 /* 482 * Make sure we find the associated mm_struct even when the particular 483 * thread has already terminated and cleared its mm. 484 * We might have race with exit path so consider our work done if there 485 * is no mm. 486 */ 487 p = find_lock_task_mm(tsk); 488 if (!p) 489 goto unlock_oom; 490 mm = p->mm; 491 atomic_inc(&mm->mm_count); 492 task_unlock(p); 493 494 if (!down_read_trylock(&mm->mmap_sem)) { 495 ret = false; 496 goto mm_drop; 497 } 498 499 /* 500 * increase mm_users only after we know we will reap something so 501 * that the mmput_async is called only when we have reaped something 502 * and delayed __mmput doesn't matter that much 503 */ 504 if (!mmget_not_zero(mm)) { 505 up_read(&mm->mmap_sem); 506 goto mm_drop; 507 } 508 509 tlb_gather_mmu(&tlb, mm, 0, -1); 510 for (vma = mm->mmap ; vma; vma = vma->vm_next) { 511 if (is_vm_hugetlb_page(vma)) 512 continue; 513 514 /* 515 * mlocked VMAs require explicit munlocking before unmap. 516 * Let's keep it simple here and skip such VMAs. 517 */ 518 if (vma->vm_flags & VM_LOCKED) 519 continue; 520 521 /* 522 * Only anonymous pages have a good chance to be dropped 523 * without additional steps which we cannot afford as we 524 * are OOM already. 525 * 526 * We do not even care about fs backed pages because all 527 * which are reclaimable have already been reclaimed and 528 * we do not want to block exit_mmap by keeping mm ref 529 * count elevated without a good reason. 530 */ 531 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) 532 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, 533 &details); 534 } 535 tlb_finish_mmu(&tlb, 0, -1); 536 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 537 task_pid_nr(tsk), tsk->comm, 538 K(get_mm_counter(mm, MM_ANONPAGES)), 539 K(get_mm_counter(mm, MM_FILEPAGES)), 540 K(get_mm_counter(mm, MM_SHMEMPAGES))); 541 up_read(&mm->mmap_sem); 542 543 /* 544 * This task can be safely ignored because we cannot do much more 545 * to release its memory. 546 */ 547 set_bit(MMF_OOM_REAPED, &mm->flags); 548 /* 549 * Drop our reference but make sure the mmput slow path is called from a 550 * different context because we shouldn't risk we get stuck there and 551 * put the oom_reaper out of the way. 552 */ 553 mmput_async(mm); 554 mm_drop: 555 mmdrop(mm); 556 unlock_oom: 557 mutex_unlock(&oom_lock); 558 return ret; 559 } 560 561 #define MAX_OOM_REAP_RETRIES 10 562 static void oom_reap_task(struct task_struct *tsk) 563 { 564 int attempts = 0; 565 566 /* Retry the down_read_trylock(mmap_sem) a few times */ 567 while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk)) 568 schedule_timeout_idle(HZ/10); 569 570 if (attempts > MAX_OOM_REAP_RETRIES) { 571 struct task_struct *p; 572 573 pr_info("oom_reaper: unable to reap pid:%d (%s)\n", 574 task_pid_nr(tsk), tsk->comm); 575 576 /* 577 * If we've already tried to reap this task in the past and 578 * failed it probably doesn't make much sense to try yet again 579 * so hide the mm from the oom killer so that it can move on 580 * to another task with a different mm struct. 581 */ 582 p = find_lock_task_mm(tsk); 583 if (p) { 584 if (test_and_set_bit(MMF_OOM_NOT_REAPABLE, &p->mm->flags)) { 585 pr_info("oom_reaper: giving up pid:%d (%s)\n", 586 task_pid_nr(tsk), tsk->comm); 587 set_bit(MMF_OOM_REAPED, &p->mm->flags); 588 } 589 task_unlock(p); 590 } 591 592 debug_show_all_locks(); 593 } 594 595 /* 596 * Clear TIF_MEMDIE because the task shouldn't be sitting on a 597 * reasonably reclaimable memory anymore or it is not a good candidate 598 * for the oom victim right now because it cannot release its memory 599 * itself nor by the oom reaper. 600 */ 601 tsk->oom_reaper_list = NULL; 602 exit_oom_victim(tsk); 603 604 /* Drop a reference taken by wake_oom_reaper */ 605 put_task_struct(tsk); 606 } 607 608 static int oom_reaper(void *unused) 609 { 610 set_freezable(); 611 612 while (true) { 613 struct task_struct *tsk = NULL; 614 615 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); 616 spin_lock(&oom_reaper_lock); 617 if (oom_reaper_list != NULL) { 618 tsk = oom_reaper_list; 619 oom_reaper_list = tsk->oom_reaper_list; 620 } 621 spin_unlock(&oom_reaper_lock); 622 623 if (tsk) 624 oom_reap_task(tsk); 625 } 626 627 return 0; 628 } 629 630 void wake_oom_reaper(struct task_struct *tsk) 631 { 632 if (!oom_reaper_th) 633 return; 634 635 /* tsk is already queued? */ 636 if (tsk == oom_reaper_list || tsk->oom_reaper_list) 637 return; 638 639 get_task_struct(tsk); 640 641 spin_lock(&oom_reaper_lock); 642 tsk->oom_reaper_list = oom_reaper_list; 643 oom_reaper_list = tsk; 644 spin_unlock(&oom_reaper_lock); 645 wake_up(&oom_reaper_wait); 646 } 647 648 static int __init oom_init(void) 649 { 650 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); 651 if (IS_ERR(oom_reaper_th)) { 652 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", 653 PTR_ERR(oom_reaper_th)); 654 oom_reaper_th = NULL; 655 } 656 return 0; 657 } 658 subsys_initcall(oom_init) 659 #endif 660 661 /** 662 * mark_oom_victim - mark the given task as OOM victim 663 * @tsk: task to mark 664 * 665 * Has to be called with oom_lock held and never after 666 * oom has been disabled already. 667 */ 668 void mark_oom_victim(struct task_struct *tsk) 669 { 670 WARN_ON(oom_killer_disabled); 671 /* OOM killer might race with memcg OOM */ 672 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) 673 return; 674 atomic_inc(&tsk->signal->oom_victims); 675 /* 676 * Make sure that the task is woken up from uninterruptible sleep 677 * if it is frozen because OOM killer wouldn't be able to free 678 * any memory and livelock. freezing_slow_path will tell the freezer 679 * that TIF_MEMDIE tasks should be ignored. 680 */ 681 __thaw_task(tsk); 682 atomic_inc(&oom_victims); 683 } 684 685 /** 686 * exit_oom_victim - note the exit of an OOM victim 687 */ 688 void exit_oom_victim(struct task_struct *tsk) 689 { 690 if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE)) 691 return; 692 atomic_dec(&tsk->signal->oom_victims); 693 694 if (!atomic_dec_return(&oom_victims)) 695 wake_up_all(&oom_victims_wait); 696 } 697 698 /** 699 * oom_killer_disable - disable OOM killer 700 * 701 * Forces all page allocations to fail rather than trigger OOM killer. 702 * Will block and wait until all OOM victims are killed. 703 * 704 * The function cannot be called when there are runnable user tasks because 705 * the userspace would see unexpected allocation failures as a result. Any 706 * new usage of this function should be consulted with MM people. 707 * 708 * Returns true if successful and false if the OOM killer cannot be 709 * disabled. 710 */ 711 bool oom_killer_disable(void) 712 { 713 /* 714 * Make sure to not race with an ongoing OOM killer. Check that the 715 * current is not killed (possibly due to sharing the victim's memory). 716 */ 717 if (mutex_lock_killable(&oom_lock)) 718 return false; 719 oom_killer_disabled = true; 720 mutex_unlock(&oom_lock); 721 722 wait_event(oom_victims_wait, !atomic_read(&oom_victims)); 723 724 return true; 725 } 726 727 /** 728 * oom_killer_enable - enable OOM killer 729 */ 730 void oom_killer_enable(void) 731 { 732 oom_killer_disabled = false; 733 } 734 735 static inline bool __task_will_free_mem(struct task_struct *task) 736 { 737 struct signal_struct *sig = task->signal; 738 739 /* 740 * A coredumping process may sleep for an extended period in exit_mm(), 741 * so the oom killer cannot assume that the process will promptly exit 742 * and release memory. 743 */ 744 if (sig->flags & SIGNAL_GROUP_COREDUMP) 745 return false; 746 747 if (sig->flags & SIGNAL_GROUP_EXIT) 748 return true; 749 750 if (thread_group_empty(task) && (task->flags & PF_EXITING)) 751 return true; 752 753 return false; 754 } 755 756 /* 757 * Checks whether the given task is dying or exiting and likely to 758 * release its address space. This means that all threads and processes 759 * sharing the same mm have to be killed or exiting. 760 * Caller has to make sure that task->mm is stable (hold task_lock or 761 * it operates on the current). 762 */ 763 bool task_will_free_mem(struct task_struct *task) 764 { 765 struct mm_struct *mm = task->mm; 766 struct task_struct *p; 767 bool ret = true; 768 769 /* 770 * Skip tasks without mm because it might have passed its exit_mm and 771 * exit_oom_victim. oom_reaper could have rescued that but do not rely 772 * on that for now. We can consider find_lock_task_mm in future. 773 */ 774 if (!mm) 775 return false; 776 777 if (!__task_will_free_mem(task)) 778 return false; 779 780 /* 781 * This task has already been drained by the oom reaper so there are 782 * only small chances it will free some more 783 */ 784 if (test_bit(MMF_OOM_REAPED, &mm->flags)) 785 return false; 786 787 if (atomic_read(&mm->mm_users) <= 1) 788 return true; 789 790 /* 791 * This is really pessimistic but we do not have any reliable way 792 * to check that external processes share with our mm 793 */ 794 rcu_read_lock(); 795 for_each_process(p) { 796 if (!process_shares_mm(p, mm)) 797 continue; 798 if (same_thread_group(task, p)) 799 continue; 800 ret = __task_will_free_mem(p); 801 if (!ret) 802 break; 803 } 804 rcu_read_unlock(); 805 806 return ret; 807 } 808 809 /* 810 * Must be called while holding a reference to p, which will be released upon 811 * returning. 812 */ 813 void oom_kill_process(struct oom_control *oc, struct task_struct *p, 814 unsigned int points, unsigned long totalpages, 815 const char *message) 816 { 817 struct task_struct *victim = p; 818 struct task_struct *child; 819 struct task_struct *t; 820 struct mm_struct *mm; 821 unsigned int victim_points = 0; 822 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, 823 DEFAULT_RATELIMIT_BURST); 824 bool can_oom_reap = true; 825 826 /* 827 * If the task is already exiting, don't alarm the sysadmin or kill 828 * its children or threads, just set TIF_MEMDIE so it can die quickly 829 */ 830 task_lock(p); 831 if (task_will_free_mem(p)) { 832 mark_oom_victim(p); 833 wake_oom_reaper(p); 834 task_unlock(p); 835 put_task_struct(p); 836 return; 837 } 838 task_unlock(p); 839 840 if (__ratelimit(&oom_rs)) 841 dump_header(oc, p); 842 843 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", 844 message, task_pid_nr(p), p->comm, points); 845 846 /* 847 * If any of p's children has a different mm and is eligible for kill, 848 * the one with the highest oom_badness() score is sacrificed for its 849 * parent. This attempts to lose the minimal amount of work done while 850 * still freeing memory. 851 */ 852 read_lock(&tasklist_lock); 853 for_each_thread(p, t) { 854 list_for_each_entry(child, &t->children, sibling) { 855 unsigned int child_points; 856 857 if (process_shares_mm(child, p->mm)) 858 continue; 859 /* 860 * oom_badness() returns 0 if the thread is unkillable 861 */ 862 child_points = oom_badness(child, 863 oc->memcg, oc->nodemask, totalpages); 864 if (child_points > victim_points) { 865 put_task_struct(victim); 866 victim = child; 867 victim_points = child_points; 868 get_task_struct(victim); 869 } 870 } 871 } 872 read_unlock(&tasklist_lock); 873 874 p = find_lock_task_mm(victim); 875 if (!p) { 876 put_task_struct(victim); 877 return; 878 } else if (victim != p) { 879 get_task_struct(p); 880 put_task_struct(victim); 881 victim = p; 882 } 883 884 /* Get a reference to safely compare mm after task_unlock(victim) */ 885 mm = victim->mm; 886 atomic_inc(&mm->mm_count); 887 /* 888 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent 889 * the OOM victim from depleting the memory reserves from the user 890 * space under its control. 891 */ 892 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); 893 mark_oom_victim(victim); 894 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 895 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), 896 K(get_mm_counter(victim->mm, MM_ANONPAGES)), 897 K(get_mm_counter(victim->mm, MM_FILEPAGES)), 898 K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); 899 task_unlock(victim); 900 901 /* 902 * Kill all user processes sharing victim->mm in other thread groups, if 903 * any. They don't get access to memory reserves, though, to avoid 904 * depletion of all memory. This prevents mm->mmap_sem livelock when an 905 * oom killed thread cannot exit because it requires the semaphore and 906 * its contended by another thread trying to allocate memory itself. 907 * That thread will now get access to memory reserves since it has a 908 * pending fatal signal. 909 */ 910 rcu_read_lock(); 911 for_each_process(p) { 912 if (!process_shares_mm(p, mm)) 913 continue; 914 if (same_thread_group(p, victim)) 915 continue; 916 if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p)) { 917 /* 918 * We cannot use oom_reaper for the mm shared by this 919 * process because it wouldn't get killed and so the 920 * memory might be still used. Hide the mm from the oom 921 * killer to guarantee OOM forward progress. 922 */ 923 can_oom_reap = false; 924 set_bit(MMF_OOM_REAPED, &mm->flags); 925 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", 926 task_pid_nr(victim), victim->comm, 927 task_pid_nr(p), p->comm); 928 continue; 929 } 930 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); 931 } 932 rcu_read_unlock(); 933 934 if (can_oom_reap) 935 wake_oom_reaper(victim); 936 937 mmdrop(mm); 938 put_task_struct(victim); 939 } 940 #undef K 941 942 /* 943 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 944 */ 945 void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint) 946 { 947 if (likely(!sysctl_panic_on_oom)) 948 return; 949 if (sysctl_panic_on_oom != 2) { 950 /* 951 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel 952 * does not panic for cpuset, mempolicy, or memcg allocation 953 * failures. 954 */ 955 if (constraint != CONSTRAINT_NONE) 956 return; 957 } 958 /* Do not panic for oom kills triggered by sysrq */ 959 if (is_sysrq_oom(oc)) 960 return; 961 dump_header(oc, NULL); 962 panic("Out of memory: %s panic_on_oom is enabled\n", 963 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 964 } 965 966 static BLOCKING_NOTIFIER_HEAD(oom_notify_list); 967 968 int register_oom_notifier(struct notifier_block *nb) 969 { 970 return blocking_notifier_chain_register(&oom_notify_list, nb); 971 } 972 EXPORT_SYMBOL_GPL(register_oom_notifier); 973 974 int unregister_oom_notifier(struct notifier_block *nb) 975 { 976 return blocking_notifier_chain_unregister(&oom_notify_list, nb); 977 } 978 EXPORT_SYMBOL_GPL(unregister_oom_notifier); 979 980 /** 981 * out_of_memory - kill the "best" process when we run out of memory 982 * @oc: pointer to struct oom_control 983 * 984 * If we run out of memory, we have the choice between either 985 * killing a random task (bad), letting the system crash (worse) 986 * OR try to be smart about which process to kill. Note that we 987 * don't have to be perfect here, we just have to be good. 988 */ 989 bool out_of_memory(struct oom_control *oc) 990 { 991 struct task_struct *p; 992 unsigned long totalpages; 993 unsigned long freed = 0; 994 unsigned int uninitialized_var(points); 995 enum oom_constraint constraint = CONSTRAINT_NONE; 996 997 if (oom_killer_disabled) 998 return false; 999 1000 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 1001 if (freed > 0) 1002 /* Got some memory back in the last second. */ 1003 return true; 1004 1005 /* 1006 * If current has a pending SIGKILL or is exiting, then automatically 1007 * select it. The goal is to allow it to allocate so that it may 1008 * quickly exit and free its memory. 1009 */ 1010 if (task_will_free_mem(current)) { 1011 mark_oom_victim(current); 1012 wake_oom_reaper(current); 1013 return true; 1014 } 1015 1016 /* 1017 * The OOM killer does not compensate for IO-less reclaim. 1018 * pagefault_out_of_memory lost its gfp context so we have to 1019 * make sure exclude 0 mask - all other users should have at least 1020 * ___GFP_DIRECT_RECLAIM to get here. 1021 */ 1022 if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL))) 1023 return true; 1024 1025 /* 1026 * Check if there were limitations on the allocation (only relevant for 1027 * NUMA) that may require different handling. 1028 */ 1029 constraint = constrained_alloc(oc, &totalpages); 1030 if (constraint != CONSTRAINT_MEMORY_POLICY) 1031 oc->nodemask = NULL; 1032 check_panic_on_oom(oc, constraint); 1033 1034 if (sysctl_oom_kill_allocating_task && current->mm && 1035 !oom_unkillable_task(current, NULL, oc->nodemask) && 1036 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { 1037 get_task_struct(current); 1038 oom_kill_process(oc, current, 0, totalpages, 1039 "Out of memory (oom_kill_allocating_task)"); 1040 return true; 1041 } 1042 1043 p = select_bad_process(oc, &points, totalpages); 1044 /* Found nothing?!?! Either we hang forever, or we panic. */ 1045 if (!p && !is_sysrq_oom(oc)) { 1046 dump_header(oc, NULL); 1047 panic("Out of memory and no killable processes...\n"); 1048 } 1049 if (p && p != (void *)-1UL) { 1050 oom_kill_process(oc, p, points, totalpages, "Out of memory"); 1051 /* 1052 * Give the killed process a good chance to exit before trying 1053 * to allocate memory again. 1054 */ 1055 schedule_timeout_killable(1); 1056 } 1057 return true; 1058 } 1059 1060 /* 1061 * The pagefault handler calls here because it is out of memory, so kill a 1062 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom 1063 * killing is already in progress so do nothing. 1064 */ 1065 void pagefault_out_of_memory(void) 1066 { 1067 struct oom_control oc = { 1068 .zonelist = NULL, 1069 .nodemask = NULL, 1070 .memcg = NULL, 1071 .gfp_mask = 0, 1072 .order = 0, 1073 }; 1074 1075 if (mem_cgroup_oom_synchronize(true)) 1076 return; 1077 1078 if (!mutex_trylock(&oom_lock)) 1079 return; 1080 1081 if (!out_of_memory(&oc)) { 1082 /* 1083 * There shouldn't be any user tasks runnable while the 1084 * OOM killer is disabled, so the current task has to 1085 * be a racing OOM victim for which oom_killer_disable() 1086 * is waiting for. 1087 */ 1088 WARN_ON(test_thread_flag(TIF_MEMDIE)); 1089 } 1090 1091 mutex_unlock(&oom_lock); 1092 } 1093