1 /* 2 * linux/mm/oom_kill.c 3 * 4 * Copyright (C) 1998,2000 Rik van Riel 5 * Thanks go out to Claus Fischer for some serious inspiration and 6 * for goading me into coding this file... 7 * Copyright (C) 2010 Google, Inc. 8 * Rewritten by David Rientjes 9 * 10 * The routines in this file are used to kill a process when 11 * we're seriously out of memory. This gets called from __alloc_pages() 12 * in mm/page_alloc.c when we really run out of memory. 13 * 14 * Since we won't call these routines often (on a well-configured 15 * machine) this file will double as a 'coding guide' and a signpost 16 * for newbie kernel hackers. It features several pointers to major 17 * kernel subsystems and hints as to where to find out what things do. 18 */ 19 20 #include <linux/oom.h> 21 #include <linux/mm.h> 22 #include <linux/err.h> 23 #include <linux/gfp.h> 24 #include <linux/sched.h> 25 #include <linux/swap.h> 26 #include <linux/timex.h> 27 #include <linux/jiffies.h> 28 #include <linux/cpuset.h> 29 #include <linux/export.h> 30 #include <linux/notifier.h> 31 #include <linux/memcontrol.h> 32 #include <linux/mempolicy.h> 33 #include <linux/security.h> 34 #include <linux/ptrace.h> 35 #include <linux/freezer.h> 36 #include <linux/ftrace.h> 37 #include <linux/ratelimit.h> 38 #include <linux/kthread.h> 39 #include <linux/init.h> 40 41 #include <asm/tlb.h> 42 #include "internal.h" 43 44 #define CREATE_TRACE_POINTS 45 #include <trace/events/oom.h> 46 47 int sysctl_panic_on_oom; 48 int sysctl_oom_kill_allocating_task; 49 int sysctl_oom_dump_tasks = 1; 50 51 DEFINE_MUTEX(oom_lock); 52 53 #ifdef CONFIG_NUMA 54 /** 55 * has_intersects_mems_allowed() - check task eligiblity for kill 56 * @start: task struct of which task to consider 57 * @mask: nodemask passed to page allocator for mempolicy ooms 58 * 59 * Task eligibility is determined by whether or not a candidate task, @tsk, 60 * shares the same mempolicy nodes as current if it is bound by such a policy 61 * and whether or not it has the same set of allowed cpuset nodes. 62 */ 63 static bool has_intersects_mems_allowed(struct task_struct *start, 64 const nodemask_t *mask) 65 { 66 struct task_struct *tsk; 67 bool ret = false; 68 69 rcu_read_lock(); 70 for_each_thread(start, tsk) { 71 if (mask) { 72 /* 73 * If this is a mempolicy constrained oom, tsk's 74 * cpuset is irrelevant. Only return true if its 75 * mempolicy intersects current, otherwise it may be 76 * needlessly killed. 77 */ 78 ret = mempolicy_nodemask_intersects(tsk, mask); 79 } else { 80 /* 81 * This is not a mempolicy constrained oom, so only 82 * check the mems of tsk's cpuset. 83 */ 84 ret = cpuset_mems_allowed_intersects(current, tsk); 85 } 86 if (ret) 87 break; 88 } 89 rcu_read_unlock(); 90 91 return ret; 92 } 93 #else 94 static bool has_intersects_mems_allowed(struct task_struct *tsk, 95 const nodemask_t *mask) 96 { 97 return true; 98 } 99 #endif /* CONFIG_NUMA */ 100 101 /* 102 * The process p may have detached its own ->mm while exiting or through 103 * use_mm(), but one or more of its subthreads may still have a valid 104 * pointer. Return p, or any of its subthreads with a valid ->mm, with 105 * task_lock() held. 106 */ 107 struct task_struct *find_lock_task_mm(struct task_struct *p) 108 { 109 struct task_struct *t; 110 111 rcu_read_lock(); 112 113 for_each_thread(p, t) { 114 task_lock(t); 115 if (likely(t->mm)) 116 goto found; 117 task_unlock(t); 118 } 119 t = NULL; 120 found: 121 rcu_read_unlock(); 122 123 return t; 124 } 125 126 /* 127 * order == -1 means the oom kill is required by sysrq, otherwise only 128 * for display purposes. 129 */ 130 static inline bool is_sysrq_oom(struct oom_control *oc) 131 { 132 return oc->order == -1; 133 } 134 135 /* return true if the task is not adequate as candidate victim task. */ 136 static bool oom_unkillable_task(struct task_struct *p, 137 struct mem_cgroup *memcg, const nodemask_t *nodemask) 138 { 139 if (is_global_init(p)) 140 return true; 141 if (p->flags & PF_KTHREAD) 142 return true; 143 144 /* When mem_cgroup_out_of_memory() and p is not member of the group */ 145 if (memcg && !task_in_mem_cgroup(p, memcg)) 146 return true; 147 148 /* p may not have freeable memory in nodemask */ 149 if (!has_intersects_mems_allowed(p, nodemask)) 150 return true; 151 152 return false; 153 } 154 155 /** 156 * oom_badness - heuristic function to determine which candidate task to kill 157 * @p: task struct of which task we should calculate 158 * @totalpages: total present RAM allowed for page allocation 159 * 160 * The heuristic for determining which task to kill is made to be as simple and 161 * predictable as possible. The goal is to return the highest value for the 162 * task consuming the most memory to avoid subsequent oom failures. 163 */ 164 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 165 const nodemask_t *nodemask, unsigned long totalpages) 166 { 167 long points; 168 long adj; 169 170 if (oom_unkillable_task(p, memcg, nodemask)) 171 return 0; 172 173 p = find_lock_task_mm(p); 174 if (!p) 175 return 0; 176 177 /* 178 * Do not even consider tasks which are explicitly marked oom 179 * unkillable or have been already oom reaped. 180 */ 181 adj = (long)p->signal->oom_score_adj; 182 if (adj == OOM_SCORE_ADJ_MIN || 183 test_bit(MMF_OOM_REAPED, &p->mm->flags)) { 184 task_unlock(p); 185 return 0; 186 } 187 188 /* 189 * The baseline for the badness score is the proportion of RAM that each 190 * task's rss, pagetable and swap space use. 191 */ 192 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + 193 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); 194 task_unlock(p); 195 196 /* 197 * Root processes get 3% bonus, just like the __vm_enough_memory() 198 * implementation used by LSMs. 199 */ 200 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 201 points -= (points * 3) / 100; 202 203 /* Normalize to oom_score_adj units */ 204 adj *= totalpages / 1000; 205 points += adj; 206 207 /* 208 * Never return 0 for an eligible task regardless of the root bonus and 209 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). 210 */ 211 return points > 0 ? points : 1; 212 } 213 214 /* 215 * Determine the type of allocation constraint. 216 */ 217 #ifdef CONFIG_NUMA 218 static enum oom_constraint constrained_alloc(struct oom_control *oc, 219 unsigned long *totalpages) 220 { 221 struct zone *zone; 222 struct zoneref *z; 223 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); 224 bool cpuset_limited = false; 225 int nid; 226 227 /* Default to all available memory */ 228 *totalpages = totalram_pages + total_swap_pages; 229 230 if (!oc->zonelist) 231 return CONSTRAINT_NONE; 232 /* 233 * Reach here only when __GFP_NOFAIL is used. So, we should avoid 234 * to kill current.We have to random task kill in this case. 235 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. 236 */ 237 if (oc->gfp_mask & __GFP_THISNODE) 238 return CONSTRAINT_NONE; 239 240 /* 241 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in 242 * the page allocator means a mempolicy is in effect. Cpuset policy 243 * is enforced in get_page_from_freelist(). 244 */ 245 if (oc->nodemask && 246 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { 247 *totalpages = total_swap_pages; 248 for_each_node_mask(nid, *oc->nodemask) 249 *totalpages += node_spanned_pages(nid); 250 return CONSTRAINT_MEMORY_POLICY; 251 } 252 253 /* Check this allocation failure is caused by cpuset's wall function */ 254 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, 255 high_zoneidx, oc->nodemask) 256 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) 257 cpuset_limited = true; 258 259 if (cpuset_limited) { 260 *totalpages = total_swap_pages; 261 for_each_node_mask(nid, cpuset_current_mems_allowed) 262 *totalpages += node_spanned_pages(nid); 263 return CONSTRAINT_CPUSET; 264 } 265 return CONSTRAINT_NONE; 266 } 267 #else 268 static enum oom_constraint constrained_alloc(struct oom_control *oc, 269 unsigned long *totalpages) 270 { 271 *totalpages = totalram_pages + total_swap_pages; 272 return CONSTRAINT_NONE; 273 } 274 #endif 275 276 enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, 277 struct task_struct *task, unsigned long totalpages) 278 { 279 if (oom_unkillable_task(task, NULL, oc->nodemask)) 280 return OOM_SCAN_CONTINUE; 281 282 /* 283 * This task already has access to memory reserves and is being killed. 284 * Don't allow any other task to have access to the reserves. 285 */ 286 if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims)) 287 return OOM_SCAN_ABORT; 288 289 /* 290 * If task is allocating a lot of memory and has been marked to be 291 * killed first if it triggers an oom, then select it. 292 */ 293 if (oom_task_origin(task)) 294 return OOM_SCAN_SELECT; 295 296 return OOM_SCAN_OK; 297 } 298 299 /* 300 * Simple selection loop. We chose the process with the highest 301 * number of 'points'. Returns -1 on scan abort. 302 */ 303 static struct task_struct *select_bad_process(struct oom_control *oc, 304 unsigned int *ppoints, unsigned long totalpages) 305 { 306 struct task_struct *p; 307 struct task_struct *chosen = NULL; 308 unsigned long chosen_points = 0; 309 310 rcu_read_lock(); 311 for_each_process(p) { 312 unsigned int points; 313 314 switch (oom_scan_process_thread(oc, p, totalpages)) { 315 case OOM_SCAN_SELECT: 316 chosen = p; 317 chosen_points = ULONG_MAX; 318 /* fall through */ 319 case OOM_SCAN_CONTINUE: 320 continue; 321 case OOM_SCAN_ABORT: 322 rcu_read_unlock(); 323 return (struct task_struct *)(-1UL); 324 case OOM_SCAN_OK: 325 break; 326 }; 327 points = oom_badness(p, NULL, oc->nodemask, totalpages); 328 if (!points || points < chosen_points) 329 continue; 330 331 chosen = p; 332 chosen_points = points; 333 } 334 if (chosen) 335 get_task_struct(chosen); 336 rcu_read_unlock(); 337 338 *ppoints = chosen_points * 1000 / totalpages; 339 return chosen; 340 } 341 342 /** 343 * dump_tasks - dump current memory state of all system tasks 344 * @memcg: current's memory controller, if constrained 345 * @nodemask: nodemask passed to page allocator for mempolicy ooms 346 * 347 * Dumps the current memory state of all eligible tasks. Tasks not in the same 348 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes 349 * are not shown. 350 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, 351 * swapents, oom_score_adj value, and name. 352 */ 353 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) 354 { 355 struct task_struct *p; 356 struct task_struct *task; 357 358 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); 359 rcu_read_lock(); 360 for_each_process(p) { 361 if (oom_unkillable_task(p, memcg, nodemask)) 362 continue; 363 364 task = find_lock_task_mm(p); 365 if (!task) { 366 /* 367 * This is a kthread or all of p's threads have already 368 * detached their mm's. There's no need to report 369 * them; they can't be oom killed anyway. 370 */ 371 continue; 372 } 373 374 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", 375 task->pid, from_kuid(&init_user_ns, task_uid(task)), 376 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), 377 atomic_long_read(&task->mm->nr_ptes), 378 mm_nr_pmds(task->mm), 379 get_mm_counter(task->mm, MM_SWAPENTS), 380 task->signal->oom_score_adj, task->comm); 381 task_unlock(task); 382 } 383 rcu_read_unlock(); 384 } 385 386 static void dump_header(struct oom_control *oc, struct task_struct *p, 387 struct mem_cgroup *memcg) 388 { 389 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", 390 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, 391 current->signal->oom_score_adj); 392 393 cpuset_print_current_mems_allowed(); 394 dump_stack(); 395 if (memcg) 396 mem_cgroup_print_oom_info(memcg, p); 397 else 398 show_mem(SHOW_MEM_FILTER_NODES); 399 if (sysctl_oom_dump_tasks) 400 dump_tasks(memcg, oc->nodemask); 401 } 402 403 /* 404 * Number of OOM victims in flight 405 */ 406 static atomic_t oom_victims = ATOMIC_INIT(0); 407 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); 408 409 bool oom_killer_disabled __read_mostly; 410 411 #define K(x) ((x) << (PAGE_SHIFT-10)) 412 413 /* 414 * task->mm can be NULL if the task is the exited group leader. So to 415 * determine whether the task is using a particular mm, we examine all the 416 * task's threads: if one of those is using this mm then this task was also 417 * using it. 418 */ 419 static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) 420 { 421 struct task_struct *t; 422 423 for_each_thread(p, t) { 424 struct mm_struct *t_mm = READ_ONCE(t->mm); 425 if (t_mm) 426 return t_mm == mm; 427 } 428 return false; 429 } 430 431 432 #ifdef CONFIG_MMU 433 /* 434 * OOM Reaper kernel thread which tries to reap the memory used by the OOM 435 * victim (if that is possible) to help the OOM killer to move on. 436 */ 437 static struct task_struct *oom_reaper_th; 438 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); 439 static struct task_struct *oom_reaper_list; 440 static DEFINE_SPINLOCK(oom_reaper_lock); 441 442 static bool __oom_reap_task(struct task_struct *tsk) 443 { 444 struct mmu_gather tlb; 445 struct vm_area_struct *vma; 446 struct mm_struct *mm = NULL; 447 struct task_struct *p; 448 struct zap_details details = {.check_swap_entries = true, 449 .ignore_dirty = true}; 450 bool ret = true; 451 452 /* 453 * We have to make sure to not race with the victim exit path 454 * and cause premature new oom victim selection: 455 * __oom_reap_task exit_mm 456 * atomic_inc_not_zero 457 * mmput 458 * atomic_dec_and_test 459 * exit_oom_victim 460 * [...] 461 * out_of_memory 462 * select_bad_process 463 * # no TIF_MEMDIE task selects new victim 464 * unmap_page_range # frees some memory 465 */ 466 mutex_lock(&oom_lock); 467 468 /* 469 * Make sure we find the associated mm_struct even when the particular 470 * thread has already terminated and cleared its mm. 471 * We might have race with exit path so consider our work done if there 472 * is no mm. 473 */ 474 p = find_lock_task_mm(tsk); 475 if (!p) 476 goto unlock_oom; 477 mm = p->mm; 478 atomic_inc(&mm->mm_users); 479 task_unlock(p); 480 481 if (!down_read_trylock(&mm->mmap_sem)) { 482 ret = false; 483 goto unlock_oom; 484 } 485 486 tlb_gather_mmu(&tlb, mm, 0, -1); 487 for (vma = mm->mmap ; vma; vma = vma->vm_next) { 488 if (is_vm_hugetlb_page(vma)) 489 continue; 490 491 /* 492 * mlocked VMAs require explicit munlocking before unmap. 493 * Let's keep it simple here and skip such VMAs. 494 */ 495 if (vma->vm_flags & VM_LOCKED) 496 continue; 497 498 /* 499 * Only anonymous pages have a good chance to be dropped 500 * without additional steps which we cannot afford as we 501 * are OOM already. 502 * 503 * We do not even care about fs backed pages because all 504 * which are reclaimable have already been reclaimed and 505 * we do not want to block exit_mmap by keeping mm ref 506 * count elevated without a good reason. 507 */ 508 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) 509 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, 510 &details); 511 } 512 tlb_finish_mmu(&tlb, 0, -1); 513 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 514 task_pid_nr(tsk), tsk->comm, 515 K(get_mm_counter(mm, MM_ANONPAGES)), 516 K(get_mm_counter(mm, MM_FILEPAGES)), 517 K(get_mm_counter(mm, MM_SHMEMPAGES))); 518 up_read(&mm->mmap_sem); 519 520 /* 521 * This task can be safely ignored because we cannot do much more 522 * to release its memory. 523 */ 524 set_bit(MMF_OOM_REAPED, &mm->flags); 525 unlock_oom: 526 mutex_unlock(&oom_lock); 527 /* 528 * Drop our reference but make sure the mmput slow path is called from a 529 * different context because we shouldn't risk we get stuck there and 530 * put the oom_reaper out of the way. 531 */ 532 if (mm) 533 mmput_async(mm); 534 return ret; 535 } 536 537 #define MAX_OOM_REAP_RETRIES 10 538 static void oom_reap_task(struct task_struct *tsk) 539 { 540 int attempts = 0; 541 542 /* Retry the down_read_trylock(mmap_sem) a few times */ 543 while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk)) 544 schedule_timeout_idle(HZ/10); 545 546 if (attempts > MAX_OOM_REAP_RETRIES) { 547 pr_info("oom_reaper: unable to reap pid:%d (%s)\n", 548 task_pid_nr(tsk), tsk->comm); 549 debug_show_all_locks(); 550 } 551 552 /* 553 * Clear TIF_MEMDIE because the task shouldn't be sitting on a 554 * reasonably reclaimable memory anymore or it is not a good candidate 555 * for the oom victim right now because it cannot release its memory 556 * itself nor by the oom reaper. 557 */ 558 tsk->oom_reaper_list = NULL; 559 exit_oom_victim(tsk); 560 561 /* Drop a reference taken by wake_oom_reaper */ 562 put_task_struct(tsk); 563 } 564 565 static int oom_reaper(void *unused) 566 { 567 set_freezable(); 568 569 while (true) { 570 struct task_struct *tsk = NULL; 571 572 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); 573 spin_lock(&oom_reaper_lock); 574 if (oom_reaper_list != NULL) { 575 tsk = oom_reaper_list; 576 oom_reaper_list = tsk->oom_reaper_list; 577 } 578 spin_unlock(&oom_reaper_lock); 579 580 if (tsk) 581 oom_reap_task(tsk); 582 } 583 584 return 0; 585 } 586 587 static void wake_oom_reaper(struct task_struct *tsk) 588 { 589 if (!oom_reaper_th) 590 return; 591 592 /* tsk is already queued? */ 593 if (tsk == oom_reaper_list || tsk->oom_reaper_list) 594 return; 595 596 get_task_struct(tsk); 597 598 spin_lock(&oom_reaper_lock); 599 tsk->oom_reaper_list = oom_reaper_list; 600 oom_reaper_list = tsk; 601 spin_unlock(&oom_reaper_lock); 602 wake_up(&oom_reaper_wait); 603 } 604 605 /* Check if we can reap the given task. This has to be called with stable 606 * tsk->mm 607 */ 608 void try_oom_reaper(struct task_struct *tsk) 609 { 610 struct mm_struct *mm = tsk->mm; 611 struct task_struct *p; 612 613 if (!mm) 614 return; 615 616 /* 617 * There might be other threads/processes which are either not 618 * dying or even not killable. 619 */ 620 if (atomic_read(&mm->mm_users) > 1) { 621 rcu_read_lock(); 622 for_each_process(p) { 623 if (!process_shares_mm(p, mm)) 624 continue; 625 if (fatal_signal_pending(p)) 626 continue; 627 628 /* 629 * If the task is exiting make sure the whole thread group 630 * is exiting and cannot acces mm anymore. 631 */ 632 if (signal_group_exit(p->signal)) 633 continue; 634 635 /* Give up */ 636 rcu_read_unlock(); 637 return; 638 } 639 rcu_read_unlock(); 640 } 641 642 wake_oom_reaper(tsk); 643 } 644 645 static int __init oom_init(void) 646 { 647 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); 648 if (IS_ERR(oom_reaper_th)) { 649 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", 650 PTR_ERR(oom_reaper_th)); 651 oom_reaper_th = NULL; 652 } 653 return 0; 654 } 655 subsys_initcall(oom_init) 656 #else 657 static void wake_oom_reaper(struct task_struct *tsk) 658 { 659 } 660 #endif 661 662 /** 663 * mark_oom_victim - mark the given task as OOM victim 664 * @tsk: task to mark 665 * 666 * Has to be called with oom_lock held and never after 667 * oom has been disabled already. 668 */ 669 void mark_oom_victim(struct task_struct *tsk) 670 { 671 WARN_ON(oom_killer_disabled); 672 /* OOM killer might race with memcg OOM */ 673 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) 674 return; 675 atomic_inc(&tsk->signal->oom_victims); 676 /* 677 * Make sure that the task is woken up from uninterruptible sleep 678 * if it is frozen because OOM killer wouldn't be able to free 679 * any memory and livelock. freezing_slow_path will tell the freezer 680 * that TIF_MEMDIE tasks should be ignored. 681 */ 682 __thaw_task(tsk); 683 atomic_inc(&oom_victims); 684 } 685 686 /** 687 * exit_oom_victim - note the exit of an OOM victim 688 */ 689 void exit_oom_victim(struct task_struct *tsk) 690 { 691 if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE)) 692 return; 693 atomic_dec(&tsk->signal->oom_victims); 694 695 if (!atomic_dec_return(&oom_victims)) 696 wake_up_all(&oom_victims_wait); 697 } 698 699 /** 700 * oom_killer_disable - disable OOM killer 701 * 702 * Forces all page allocations to fail rather than trigger OOM killer. 703 * Will block and wait until all OOM victims are killed. 704 * 705 * The function cannot be called when there are runnable user tasks because 706 * the userspace would see unexpected allocation failures as a result. Any 707 * new usage of this function should be consulted with MM people. 708 * 709 * Returns true if successful and false if the OOM killer cannot be 710 * disabled. 711 */ 712 bool oom_killer_disable(void) 713 { 714 /* 715 * Make sure to not race with an ongoing OOM killer. Check that the 716 * current is not killed (possibly due to sharing the victim's memory). 717 */ 718 if (mutex_lock_killable(&oom_lock)) 719 return false; 720 oom_killer_disabled = true; 721 mutex_unlock(&oom_lock); 722 723 wait_event(oom_victims_wait, !atomic_read(&oom_victims)); 724 725 return true; 726 } 727 728 /** 729 * oom_killer_enable - enable OOM killer 730 */ 731 void oom_killer_enable(void) 732 { 733 oom_killer_disabled = false; 734 } 735 736 /* 737 * Must be called while holding a reference to p, which will be released upon 738 * returning. 739 */ 740 void oom_kill_process(struct oom_control *oc, struct task_struct *p, 741 unsigned int points, unsigned long totalpages, 742 struct mem_cgroup *memcg, const char *message) 743 { 744 struct task_struct *victim = p; 745 struct task_struct *child; 746 struct task_struct *t; 747 struct mm_struct *mm; 748 unsigned int victim_points = 0; 749 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, 750 DEFAULT_RATELIMIT_BURST); 751 bool can_oom_reap = true; 752 753 /* 754 * If the task is already exiting, don't alarm the sysadmin or kill 755 * its children or threads, just set TIF_MEMDIE so it can die quickly 756 */ 757 task_lock(p); 758 if (p->mm && task_will_free_mem(p)) { 759 mark_oom_victim(p); 760 try_oom_reaper(p); 761 task_unlock(p); 762 put_task_struct(p); 763 return; 764 } 765 task_unlock(p); 766 767 if (__ratelimit(&oom_rs)) 768 dump_header(oc, p, memcg); 769 770 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", 771 message, task_pid_nr(p), p->comm, points); 772 773 /* 774 * If any of p's children has a different mm and is eligible for kill, 775 * the one with the highest oom_badness() score is sacrificed for its 776 * parent. This attempts to lose the minimal amount of work done while 777 * still freeing memory. 778 */ 779 read_lock(&tasklist_lock); 780 for_each_thread(p, t) { 781 list_for_each_entry(child, &t->children, sibling) { 782 unsigned int child_points; 783 784 if (process_shares_mm(child, p->mm)) 785 continue; 786 /* 787 * oom_badness() returns 0 if the thread is unkillable 788 */ 789 child_points = oom_badness(child, memcg, oc->nodemask, 790 totalpages); 791 if (child_points > victim_points) { 792 put_task_struct(victim); 793 victim = child; 794 victim_points = child_points; 795 get_task_struct(victim); 796 } 797 } 798 } 799 read_unlock(&tasklist_lock); 800 801 p = find_lock_task_mm(victim); 802 if (!p) { 803 put_task_struct(victim); 804 return; 805 } else if (victim != p) { 806 get_task_struct(p); 807 put_task_struct(victim); 808 victim = p; 809 } 810 811 /* Get a reference to safely compare mm after task_unlock(victim) */ 812 mm = victim->mm; 813 atomic_inc(&mm->mm_count); 814 /* 815 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent 816 * the OOM victim from depleting the memory reserves from the user 817 * space under its control. 818 */ 819 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); 820 mark_oom_victim(victim); 821 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", 822 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), 823 K(get_mm_counter(victim->mm, MM_ANONPAGES)), 824 K(get_mm_counter(victim->mm, MM_FILEPAGES)), 825 K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); 826 task_unlock(victim); 827 828 /* 829 * Kill all user processes sharing victim->mm in other thread groups, if 830 * any. They don't get access to memory reserves, though, to avoid 831 * depletion of all memory. This prevents mm->mmap_sem livelock when an 832 * oom killed thread cannot exit because it requires the semaphore and 833 * its contended by another thread trying to allocate memory itself. 834 * That thread will now get access to memory reserves since it has a 835 * pending fatal signal. 836 */ 837 rcu_read_lock(); 838 for_each_process(p) { 839 if (!process_shares_mm(p, mm)) 840 continue; 841 if (same_thread_group(p, victim)) 842 continue; 843 if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p) || 844 p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { 845 /* 846 * We cannot use oom_reaper for the mm shared by this 847 * process because it wouldn't get killed and so the 848 * memory might be still used. 849 */ 850 can_oom_reap = false; 851 continue; 852 } 853 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); 854 } 855 rcu_read_unlock(); 856 857 if (can_oom_reap) 858 wake_oom_reaper(victim); 859 860 mmdrop(mm); 861 put_task_struct(victim); 862 } 863 #undef K 864 865 /* 866 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 867 */ 868 void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, 869 struct mem_cgroup *memcg) 870 { 871 if (likely(!sysctl_panic_on_oom)) 872 return; 873 if (sysctl_panic_on_oom != 2) { 874 /* 875 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel 876 * does not panic for cpuset, mempolicy, or memcg allocation 877 * failures. 878 */ 879 if (constraint != CONSTRAINT_NONE) 880 return; 881 } 882 /* Do not panic for oom kills triggered by sysrq */ 883 if (is_sysrq_oom(oc)) 884 return; 885 dump_header(oc, NULL, memcg); 886 panic("Out of memory: %s panic_on_oom is enabled\n", 887 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 888 } 889 890 static BLOCKING_NOTIFIER_HEAD(oom_notify_list); 891 892 int register_oom_notifier(struct notifier_block *nb) 893 { 894 return blocking_notifier_chain_register(&oom_notify_list, nb); 895 } 896 EXPORT_SYMBOL_GPL(register_oom_notifier); 897 898 int unregister_oom_notifier(struct notifier_block *nb) 899 { 900 return blocking_notifier_chain_unregister(&oom_notify_list, nb); 901 } 902 EXPORT_SYMBOL_GPL(unregister_oom_notifier); 903 904 /** 905 * out_of_memory - kill the "best" process when we run out of memory 906 * @oc: pointer to struct oom_control 907 * 908 * If we run out of memory, we have the choice between either 909 * killing a random task (bad), letting the system crash (worse) 910 * OR try to be smart about which process to kill. Note that we 911 * don't have to be perfect here, we just have to be good. 912 */ 913 bool out_of_memory(struct oom_control *oc) 914 { 915 struct task_struct *p; 916 unsigned long totalpages; 917 unsigned long freed = 0; 918 unsigned int uninitialized_var(points); 919 enum oom_constraint constraint = CONSTRAINT_NONE; 920 921 if (oom_killer_disabled) 922 return false; 923 924 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 925 if (freed > 0) 926 /* Got some memory back in the last second. */ 927 return true; 928 929 /* 930 * If current has a pending SIGKILL or is exiting, then automatically 931 * select it. The goal is to allow it to allocate so that it may 932 * quickly exit and free its memory. 933 * 934 * But don't select if current has already released its mm and cleared 935 * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur. 936 */ 937 if (current->mm && 938 (fatal_signal_pending(current) || task_will_free_mem(current))) { 939 mark_oom_victim(current); 940 try_oom_reaper(current); 941 return true; 942 } 943 944 /* 945 * The OOM killer does not compensate for IO-less reclaim. 946 * pagefault_out_of_memory lost its gfp context so we have to 947 * make sure exclude 0 mask - all other users should have at least 948 * ___GFP_DIRECT_RECLAIM to get here. 949 */ 950 if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL))) 951 return true; 952 953 /* 954 * Check if there were limitations on the allocation (only relevant for 955 * NUMA) that may require different handling. 956 */ 957 constraint = constrained_alloc(oc, &totalpages); 958 if (constraint != CONSTRAINT_MEMORY_POLICY) 959 oc->nodemask = NULL; 960 check_panic_on_oom(oc, constraint, NULL); 961 962 if (sysctl_oom_kill_allocating_task && current->mm && 963 !oom_unkillable_task(current, NULL, oc->nodemask) && 964 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { 965 get_task_struct(current); 966 oom_kill_process(oc, current, 0, totalpages, NULL, 967 "Out of memory (oom_kill_allocating_task)"); 968 return true; 969 } 970 971 p = select_bad_process(oc, &points, totalpages); 972 /* Found nothing?!?! Either we hang forever, or we panic. */ 973 if (!p && !is_sysrq_oom(oc)) { 974 dump_header(oc, NULL, NULL); 975 panic("Out of memory and no killable processes...\n"); 976 } 977 if (p && p != (void *)-1UL) { 978 oom_kill_process(oc, p, points, totalpages, NULL, 979 "Out of memory"); 980 /* 981 * Give the killed process a good chance to exit before trying 982 * to allocate memory again. 983 */ 984 schedule_timeout_killable(1); 985 } 986 return true; 987 } 988 989 /* 990 * The pagefault handler calls here because it is out of memory, so kill a 991 * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a 992 * parallel oom killing is already in progress so do nothing. 993 */ 994 void pagefault_out_of_memory(void) 995 { 996 struct oom_control oc = { 997 .zonelist = NULL, 998 .nodemask = NULL, 999 .gfp_mask = 0, 1000 .order = 0, 1001 }; 1002 1003 if (mem_cgroup_oom_synchronize(true)) 1004 return; 1005 1006 if (!mutex_trylock(&oom_lock)) 1007 return; 1008 1009 if (!out_of_memory(&oc)) { 1010 /* 1011 * There shouldn't be any user tasks runnable while the 1012 * OOM killer is disabled, so the current task has to 1013 * be a racing OOM victim for which oom_killer_disable() 1014 * is waiting for. 1015 */ 1016 WARN_ON(test_thread_flag(TIF_MEMDIE)); 1017 } 1018 1019 mutex_unlock(&oom_lock); 1020 } 1021