1 /* 2 * linux/mm/oom_kill.c 3 * 4 * Copyright (C) 1998,2000 Rik van Riel 5 * Thanks go out to Claus Fischer for some serious inspiration and 6 * for goading me into coding this file... 7 * 8 * The routines in this file are used to kill a process when 9 * we're seriously out of memory. This gets called from __alloc_pages() 10 * in mm/page_alloc.c when we really run out of memory. 11 * 12 * Since we won't call these routines often (on a well-configured 13 * machine) this file will double as a 'coding guide' and a signpost 14 * for newbie kernel hackers. It features several pointers to major 15 * kernel subsystems and hints as to where to find out what things do. 16 */ 17 18 #include <linux/oom.h> 19 #include <linux/mm.h> 20 #include <linux/err.h> 21 #include <linux/gfp.h> 22 #include <linux/sched.h> 23 #include <linux/swap.h> 24 #include <linux/timex.h> 25 #include <linux/jiffies.h> 26 #include <linux/cpuset.h> 27 #include <linux/module.h> 28 #include <linux/notifier.h> 29 #include <linux/memcontrol.h> 30 #include <linux/security.h> 31 32 int sysctl_panic_on_oom; 33 int sysctl_oom_kill_allocating_task; 34 int sysctl_oom_dump_tasks; 35 static DEFINE_SPINLOCK(zone_scan_lock); 36 /* #define DEBUG */ 37 38 /* 39 * Is all threads of the target process nodes overlap ours? 40 */ 41 static int has_intersects_mems_allowed(struct task_struct *tsk) 42 { 43 struct task_struct *t; 44 45 t = tsk; 46 do { 47 if (cpuset_mems_allowed_intersects(current, t)) 48 return 1; 49 t = next_thread(t); 50 } while (t != tsk); 51 52 return 0; 53 } 54 55 /** 56 * badness - calculate a numeric value for how bad this task has been 57 * @p: task struct of which task we should calculate 58 * @uptime: current uptime in seconds 59 * 60 * The formula used is relatively simple and documented inline in the 61 * function. The main rationale is that we want to select a good task 62 * to kill when we run out of memory. 63 * 64 * Good in this context means that: 65 * 1) we lose the minimum amount of work done 66 * 2) we recover a large amount of memory 67 * 3) we don't kill anything innocent of eating tons of memory 68 * 4) we want to kill the minimum amount of processes (one) 69 * 5) we try to kill the process the user expects us to kill, this 70 * algorithm has been meticulously tuned to meet the principle 71 * of least surprise ... (be careful when you change it) 72 */ 73 74 unsigned long badness(struct task_struct *p, unsigned long uptime) 75 { 76 unsigned long points, cpu_time, run_time; 77 struct mm_struct *mm; 78 struct task_struct *child; 79 int oom_adj = p->signal->oom_adj; 80 struct task_cputime task_time; 81 unsigned long utime; 82 unsigned long stime; 83 84 if (oom_adj == OOM_DISABLE) 85 return 0; 86 87 task_lock(p); 88 mm = p->mm; 89 if (!mm) { 90 task_unlock(p); 91 return 0; 92 } 93 94 /* 95 * The memory size of the process is the basis for the badness. 96 */ 97 points = mm->total_vm; 98 99 /* 100 * After this unlock we can no longer dereference local variable `mm' 101 */ 102 task_unlock(p); 103 104 /* 105 * swapoff can easily use up all memory, so kill those first. 106 */ 107 if (p->flags & PF_OOM_ORIGIN) 108 return ULONG_MAX; 109 110 /* 111 * Processes which fork a lot of child processes are likely 112 * a good choice. We add half the vmsize of the children if they 113 * have an own mm. This prevents forking servers to flood the 114 * machine with an endless amount of children. In case a single 115 * child is eating the vast majority of memory, adding only half 116 * to the parents will make the child our kill candidate of choice. 117 */ 118 list_for_each_entry(child, &p->children, sibling) { 119 task_lock(child); 120 if (child->mm != mm && child->mm) 121 points += child->mm->total_vm/2 + 1; 122 task_unlock(child); 123 } 124 125 /* 126 * CPU time is in tens of seconds and run time is in thousands 127 * of seconds. There is no particular reason for this other than 128 * that it turned out to work very well in practice. 129 */ 130 thread_group_cputime(p, &task_time); 131 utime = cputime_to_jiffies(task_time.utime); 132 stime = cputime_to_jiffies(task_time.stime); 133 cpu_time = (utime + stime) >> (SHIFT_HZ + 3); 134 135 136 if (uptime >= p->start_time.tv_sec) 137 run_time = (uptime - p->start_time.tv_sec) >> 10; 138 else 139 run_time = 0; 140 141 if (cpu_time) 142 points /= int_sqrt(cpu_time); 143 if (run_time) 144 points /= int_sqrt(int_sqrt(run_time)); 145 146 /* 147 * Niced processes are most likely less important, so double 148 * their badness points. 149 */ 150 if (task_nice(p) > 0) 151 points *= 2; 152 153 /* 154 * Superuser processes are usually more important, so we make it 155 * less likely that we kill those. 156 */ 157 if (has_capability_noaudit(p, CAP_SYS_ADMIN) || 158 has_capability_noaudit(p, CAP_SYS_RESOURCE)) 159 points /= 4; 160 161 /* 162 * We don't want to kill a process with direct hardware access. 163 * Not only could that mess up the hardware, but usually users 164 * tend to only have this flag set on applications they think 165 * of as important. 166 */ 167 if (has_capability_noaudit(p, CAP_SYS_RAWIO)) 168 points /= 4; 169 170 /* 171 * If p's nodes don't overlap ours, it may still help to kill p 172 * because p may have allocated or otherwise mapped memory on 173 * this node before. However it will be less likely. 174 */ 175 if (!has_intersects_mems_allowed(p)) 176 points /= 8; 177 178 /* 179 * Adjust the score by oom_adj. 180 */ 181 if (oom_adj) { 182 if (oom_adj > 0) { 183 if (!points) 184 points = 1; 185 points <<= oom_adj; 186 } else 187 points >>= -(oom_adj); 188 } 189 190 #ifdef DEBUG 191 printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n", 192 p->pid, p->comm, points); 193 #endif 194 return points; 195 } 196 197 /* 198 * Determine the type of allocation constraint. 199 */ 200 #ifdef CONFIG_NUMA 201 static enum oom_constraint constrained_alloc(struct zonelist *zonelist, 202 gfp_t gfp_mask, nodemask_t *nodemask) 203 { 204 struct zone *zone; 205 struct zoneref *z; 206 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 207 208 /* 209 * Reach here only when __GFP_NOFAIL is used. So, we should avoid 210 * to kill current.We have to random task kill in this case. 211 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. 212 */ 213 if (gfp_mask & __GFP_THISNODE) 214 return CONSTRAINT_NONE; 215 216 /* 217 * The nodemask here is a nodemask passed to alloc_pages(). Now, 218 * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy 219 * feature. mempolicy is an only user of nodemask here. 220 * check mempolicy's nodemask contains all N_HIGH_MEMORY 221 */ 222 if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) 223 return CONSTRAINT_MEMORY_POLICY; 224 225 /* Check this allocation failure is caused by cpuset's wall function */ 226 for_each_zone_zonelist_nodemask(zone, z, zonelist, 227 high_zoneidx, nodemask) 228 if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) 229 return CONSTRAINT_CPUSET; 230 231 return CONSTRAINT_NONE; 232 } 233 #else 234 static enum oom_constraint constrained_alloc(struct zonelist *zonelist, 235 gfp_t gfp_mask, nodemask_t *nodemask) 236 { 237 return CONSTRAINT_NONE; 238 } 239 #endif 240 241 /* 242 * Simple selection loop. We chose the process with the highest 243 * number of 'points'. We expect the caller will lock the tasklist. 244 * 245 * (not docbooked, we don't want this one cluttering up the manual) 246 */ 247 static struct task_struct *select_bad_process(unsigned long *ppoints, 248 struct mem_cgroup *mem) 249 { 250 struct task_struct *p; 251 struct task_struct *chosen = NULL; 252 struct timespec uptime; 253 *ppoints = 0; 254 255 do_posix_clock_monotonic_gettime(&uptime); 256 for_each_process(p) { 257 unsigned long points; 258 259 /* 260 * skip kernel threads and tasks which have already released 261 * their mm. 262 */ 263 if (!p->mm) 264 continue; 265 /* skip the init task */ 266 if (is_global_init(p)) 267 continue; 268 if (mem && !task_in_mem_cgroup(p, mem)) 269 continue; 270 271 /* 272 * This task already has access to memory reserves and is 273 * being killed. Don't allow any other task access to the 274 * memory reserve. 275 * 276 * Note: this may have a chance of deadlock if it gets 277 * blocked waiting for another task which itself is waiting 278 * for memory. Is there a better alternative? 279 */ 280 if (test_tsk_thread_flag(p, TIF_MEMDIE)) 281 return ERR_PTR(-1UL); 282 283 /* 284 * This is in the process of releasing memory so wait for it 285 * to finish before killing some other task by mistake. 286 * 287 * However, if p is the current task, we allow the 'kill' to 288 * go ahead if it is exiting: this will simply set TIF_MEMDIE, 289 * which will allow it to gain access to memory reserves in 290 * the process of exiting and releasing its resources. 291 * Otherwise we could get an easy OOM deadlock. 292 */ 293 if (p->flags & PF_EXITING) { 294 if (p != current) 295 return ERR_PTR(-1UL); 296 297 chosen = p; 298 *ppoints = ULONG_MAX; 299 } 300 301 if (p->signal->oom_adj == OOM_DISABLE) 302 continue; 303 304 points = badness(p, uptime.tv_sec); 305 if (points > *ppoints || !chosen) { 306 chosen = p; 307 *ppoints = points; 308 } 309 } 310 311 return chosen; 312 } 313 314 /** 315 * dump_tasks - dump current memory state of all system tasks 316 * @mem: target memory controller 317 * 318 * Dumps the current memory state of all system tasks, excluding kernel threads. 319 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj 320 * score, and name. 321 * 322 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are 323 * shown. 324 * 325 * Call with tasklist_lock read-locked. 326 */ 327 static void dump_tasks(const struct mem_cgroup *mem) 328 { 329 struct task_struct *g, *p; 330 331 printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " 332 "name\n"); 333 do_each_thread(g, p) { 334 struct mm_struct *mm; 335 336 if (mem && !task_in_mem_cgroup(p, mem)) 337 continue; 338 if (!thread_group_leader(p)) 339 continue; 340 341 task_lock(p); 342 mm = p->mm; 343 if (!mm) { 344 /* 345 * total_vm and rss sizes do not exist for tasks with no 346 * mm so there's no need to report them; they can't be 347 * oom killed anyway. 348 */ 349 task_unlock(p); 350 continue; 351 } 352 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", 353 p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, 354 get_mm_rss(mm), (int)task_cpu(p), p->signal->oom_adj, 355 p->comm); 356 task_unlock(p); 357 } while_each_thread(g, p); 358 } 359 360 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, 361 struct mem_cgroup *mem) 362 { 363 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " 364 "oom_adj=%d\n", 365 current->comm, gfp_mask, order, current->signal->oom_adj); 366 task_lock(current); 367 cpuset_print_task_mems_allowed(current); 368 task_unlock(current); 369 dump_stack(); 370 mem_cgroup_print_oom_info(mem, p); 371 show_mem(); 372 if (sysctl_oom_dump_tasks) 373 dump_tasks(mem); 374 } 375 376 #define K(x) ((x) << (PAGE_SHIFT-10)) 377 378 /* 379 * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO 380 * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO 381 * set. 382 */ 383 static void __oom_kill_task(struct task_struct *p, int verbose) 384 { 385 if (is_global_init(p)) { 386 WARN_ON(1); 387 printk(KERN_WARNING "tried to kill init!\n"); 388 return; 389 } 390 391 task_lock(p); 392 if (!p->mm) { 393 WARN_ON(1); 394 printk(KERN_WARNING "tried to kill an mm-less task %d (%s)!\n", 395 task_pid_nr(p), p->comm); 396 task_unlock(p); 397 return; 398 } 399 400 if (verbose) 401 printk(KERN_ERR "Killed process %d (%s) " 402 "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n", 403 task_pid_nr(p), p->comm, 404 K(p->mm->total_vm), 405 K(get_mm_counter(p->mm, MM_ANONPAGES)), 406 K(get_mm_counter(p->mm, MM_FILEPAGES))); 407 task_unlock(p); 408 409 /* 410 * We give our sacrificial lamb high priority and access to 411 * all the memory it needs. That way it should be able to 412 * exit() and clear out its resources quickly... 413 */ 414 p->rt.time_slice = HZ; 415 set_tsk_thread_flag(p, TIF_MEMDIE); 416 417 force_sig(SIGKILL, p); 418 } 419 420 static int oom_kill_task(struct task_struct *p) 421 { 422 /* WARNING: mm may not be dereferenced since we did not obtain its 423 * value from get_task_mm(p). This is OK since all we need to do is 424 * compare mm to q->mm below. 425 * 426 * Furthermore, even if mm contains a non-NULL value, p->mm may 427 * change to NULL at any time since we do not hold task_lock(p). 428 * However, this is of no concern to us. 429 */ 430 if (!p->mm || p->signal->oom_adj == OOM_DISABLE) 431 return 1; 432 433 __oom_kill_task(p, 1); 434 435 return 0; 436 } 437 438 static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 439 unsigned long points, struct mem_cgroup *mem, 440 const char *message) 441 { 442 struct task_struct *c; 443 444 if (printk_ratelimit()) 445 dump_header(p, gfp_mask, order, mem); 446 447 /* 448 * If the task is already exiting, don't alarm the sysadmin or kill 449 * its children or threads, just set TIF_MEMDIE so it can die quickly 450 */ 451 if (p->flags & PF_EXITING) { 452 __oom_kill_task(p, 0); 453 return 0; 454 } 455 456 printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", 457 message, task_pid_nr(p), p->comm, points); 458 459 /* Try to kill a child first */ 460 list_for_each_entry(c, &p->children, sibling) { 461 if (c->mm == p->mm) 462 continue; 463 if (mem && !task_in_mem_cgroup(c, mem)) 464 continue; 465 if (!oom_kill_task(c)) 466 return 0; 467 } 468 return oom_kill_task(p); 469 } 470 471 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 472 void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) 473 { 474 unsigned long points = 0; 475 struct task_struct *p; 476 477 if (sysctl_panic_on_oom == 2) 478 panic("out of memory(memcg). panic_on_oom is selected.\n"); 479 read_lock(&tasklist_lock); 480 retry: 481 p = select_bad_process(&points, mem); 482 if (PTR_ERR(p) == -1UL) 483 goto out; 484 485 if (!p) 486 p = current; 487 488 if (oom_kill_process(p, gfp_mask, 0, points, mem, 489 "Memory cgroup out of memory")) 490 goto retry; 491 out: 492 read_unlock(&tasklist_lock); 493 } 494 #endif 495 496 static BLOCKING_NOTIFIER_HEAD(oom_notify_list); 497 498 int register_oom_notifier(struct notifier_block *nb) 499 { 500 return blocking_notifier_chain_register(&oom_notify_list, nb); 501 } 502 EXPORT_SYMBOL_GPL(register_oom_notifier); 503 504 int unregister_oom_notifier(struct notifier_block *nb) 505 { 506 return blocking_notifier_chain_unregister(&oom_notify_list, nb); 507 } 508 EXPORT_SYMBOL_GPL(unregister_oom_notifier); 509 510 /* 511 * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero 512 * if a parallel OOM killing is already taking place that includes a zone in 513 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. 514 */ 515 int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) 516 { 517 struct zoneref *z; 518 struct zone *zone; 519 int ret = 1; 520 521 spin_lock(&zone_scan_lock); 522 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 523 if (zone_is_oom_locked(zone)) { 524 ret = 0; 525 goto out; 526 } 527 } 528 529 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 530 /* 531 * Lock each zone in the zonelist under zone_scan_lock so a 532 * parallel invocation of try_set_zone_oom() doesn't succeed 533 * when it shouldn't. 534 */ 535 zone_set_flag(zone, ZONE_OOM_LOCKED); 536 } 537 538 out: 539 spin_unlock(&zone_scan_lock); 540 return ret; 541 } 542 543 /* 544 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed 545 * allocation attempts with zonelists containing them may now recall the OOM 546 * killer, if necessary. 547 */ 548 void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) 549 { 550 struct zoneref *z; 551 struct zone *zone; 552 553 spin_lock(&zone_scan_lock); 554 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 555 zone_clear_flag(zone, ZONE_OOM_LOCKED); 556 } 557 spin_unlock(&zone_scan_lock); 558 } 559 560 /* 561 * Must be called with tasklist_lock held for read. 562 */ 563 static void __out_of_memory(gfp_t gfp_mask, int order) 564 { 565 struct task_struct *p; 566 unsigned long points; 567 568 if (sysctl_oom_kill_allocating_task) 569 if (!oom_kill_process(current, gfp_mask, order, 0, NULL, 570 "Out of memory (oom_kill_allocating_task)")) 571 return; 572 retry: 573 /* 574 * Rambo mode: Shoot down a process and hope it solves whatever 575 * issues we may have. 576 */ 577 p = select_bad_process(&points, NULL); 578 579 if (PTR_ERR(p) == -1UL) 580 return; 581 582 /* Found nothing?!?! Either we hang forever, or we panic. */ 583 if (!p) { 584 read_unlock(&tasklist_lock); 585 dump_header(NULL, gfp_mask, order, NULL); 586 panic("Out of memory and no killable processes...\n"); 587 } 588 589 if (oom_kill_process(p, gfp_mask, order, points, NULL, 590 "Out of memory")) 591 goto retry; 592 } 593 594 /* 595 * pagefault handler calls into here because it is out of memory but 596 * doesn't know exactly how or why. 597 */ 598 void pagefault_out_of_memory(void) 599 { 600 unsigned long freed = 0; 601 602 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 603 if (freed > 0) 604 /* Got some memory back in the last second. */ 605 return; 606 607 if (sysctl_panic_on_oom) 608 panic("out of memory from page fault. panic_on_oom is selected.\n"); 609 610 read_lock(&tasklist_lock); 611 __out_of_memory(0, 0); /* unknown gfp_mask and order */ 612 read_unlock(&tasklist_lock); 613 614 /* 615 * Give "p" a good chance of killing itself before we 616 * retry to allocate memory. 617 */ 618 if (!test_thread_flag(TIF_MEMDIE)) 619 schedule_timeout_uninterruptible(1); 620 } 621 622 /** 623 * out_of_memory - kill the "best" process when we run out of memory 624 * @zonelist: zonelist pointer 625 * @gfp_mask: memory allocation flags 626 * @order: amount of memory being requested as a power of 2 627 * 628 * If we run out of memory, we have the choice between either 629 * killing a random task (bad), letting the system crash (worse) 630 * OR try to be smart about which process to kill. Note that we 631 * don't have to be perfect here, we just have to be good. 632 */ 633 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 634 int order, nodemask_t *nodemask) 635 { 636 unsigned long freed = 0; 637 enum oom_constraint constraint; 638 639 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 640 if (freed > 0) 641 /* Got some memory back in the last second. */ 642 return; 643 644 if (sysctl_panic_on_oom == 2) { 645 dump_header(NULL, gfp_mask, order, NULL); 646 panic("out of memory. Compulsory panic_on_oom is selected.\n"); 647 } 648 649 /* 650 * Check if there were limitations on the allocation (only relevant for 651 * NUMA) that may require different handling. 652 */ 653 constraint = constrained_alloc(zonelist, gfp_mask, nodemask); 654 read_lock(&tasklist_lock); 655 656 switch (constraint) { 657 case CONSTRAINT_MEMORY_POLICY: 658 oom_kill_process(current, gfp_mask, order, 0, NULL, 659 "No available memory (MPOL_BIND)"); 660 break; 661 662 case CONSTRAINT_NONE: 663 if (sysctl_panic_on_oom) { 664 dump_header(NULL, gfp_mask, order, NULL); 665 panic("out of memory. panic_on_oom is selected\n"); 666 } 667 /* Fall-through */ 668 case CONSTRAINT_CPUSET: 669 __out_of_memory(gfp_mask, order); 670 break; 671 } 672 673 read_unlock(&tasklist_lock); 674 675 /* 676 * Give "p" a good chance of killing itself before we 677 * retry to allocate memory unless "p" is current 678 */ 679 if (!test_thread_flag(TIF_MEMDIE)) 680 schedule_timeout_uninterruptible(1); 681 } 682