1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008, 2009 Intel Corporation 4 * Authors: Andi Kleen, Fengguang Wu 5 * 6 * High level machine check handler. Handles pages reported by the 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache 8 * failure. 9 * 10 * In addition there is a "soft offline" entry point that allows stop using 11 * not-yet-corrupted-by-suspicious pages without killing anything. 12 * 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 15 * other VM users, because memory failures could happen anytime and 16 * anywhere. This could violate some of their assumptions. This is why 17 * this code has to be extremely careful. Generally it tries to use 18 * normal locking rules, as in get the standard locks, even if that means 19 * the error handling takes potentially a long time. 20 * 21 * It can be very tempting to add handling for obscure cases here. 22 * In general any code for handling new cases should only be added iff: 23 * - You know how to test it. 24 * - You have a test that can be added to mce-test 25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/vm/page-types when running a real workload. 28 * 29 * There are several operations here with exponential complexity because 30 * of unsuitable VM data structures. For example the operation to map back 31 * from RMAP chains to processes has to walk the complete process list and 32 * has non linear complexity with the number. But since memory corruptions 33 * are rare we hope to get away with this. This avoids impacting the core 34 * VM. 35 */ 36 #include <linux/kernel.h> 37 #include <linux/mm.h> 38 #include <linux/page-flags.h> 39 #include <linux/kernel-page-flags.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/task.h> 42 #include <linux/ksm.h> 43 #include <linux/rmap.h> 44 #include <linux/export.h> 45 #include <linux/pagemap.h> 46 #include <linux/swap.h> 47 #include <linux/backing-dev.h> 48 #include <linux/migrate.h> 49 #include <linux/suspend.h> 50 #include <linux/slab.h> 51 #include <linux/swapops.h> 52 #include <linux/hugetlb.h> 53 #include <linux/memory_hotplug.h> 54 #include <linux/mm_inline.h> 55 #include <linux/memremap.h> 56 #include <linux/kfifo.h> 57 #include <linux/ratelimit.h> 58 #include <linux/page-isolation.h> 59 #include <linux/pagewalk.h> 60 #include "internal.h" 61 #include "ras/ras_event.h" 62 63 int sysctl_memory_failure_early_kill __read_mostly = 0; 64 65 int sysctl_memory_failure_recovery __read_mostly = 1; 66 67 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); 68 69 static bool __page_handle_poison(struct page *page) 70 { 71 bool ret; 72 73 zone_pcp_disable(page_zone(page)); 74 ret = dissolve_free_huge_page(page); 75 if (!ret) 76 ret = take_page_off_buddy(page); 77 zone_pcp_enable(page_zone(page)); 78 79 return ret; 80 } 81 82 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) 83 { 84 if (hugepage_or_freepage) { 85 /* 86 * Doing this check for free pages is also fine since dissolve_free_huge_page 87 * returns 0 for non-hugetlb pages as well. 88 */ 89 if (!__page_handle_poison(page)) 90 /* 91 * We could fail to take off the target page from buddy 92 * for example due to racy page allocation, but that's 93 * acceptable because soft-offlined page is not broken 94 * and if someone really want to use it, they should 95 * take it. 96 */ 97 return false; 98 } 99 100 SetPageHWPoison(page); 101 if (release) 102 put_page(page); 103 page_ref_inc(page); 104 num_poisoned_pages_inc(); 105 106 return true; 107 } 108 109 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) 110 111 u32 hwpoison_filter_enable = 0; 112 u32 hwpoison_filter_dev_major = ~0U; 113 u32 hwpoison_filter_dev_minor = ~0U; 114 u64 hwpoison_filter_flags_mask; 115 u64 hwpoison_filter_flags_value; 116 EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 117 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 118 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 119 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 120 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 121 122 static int hwpoison_filter_dev(struct page *p) 123 { 124 struct address_space *mapping; 125 dev_t dev; 126 127 if (hwpoison_filter_dev_major == ~0U && 128 hwpoison_filter_dev_minor == ~0U) 129 return 0; 130 131 /* 132 * page_mapping() does not accept slab pages. 133 */ 134 if (PageSlab(p)) 135 return -EINVAL; 136 137 mapping = page_mapping(p); 138 if (mapping == NULL || mapping->host == NULL) 139 return -EINVAL; 140 141 dev = mapping->host->i_sb->s_dev; 142 if (hwpoison_filter_dev_major != ~0U && 143 hwpoison_filter_dev_major != MAJOR(dev)) 144 return -EINVAL; 145 if (hwpoison_filter_dev_minor != ~0U && 146 hwpoison_filter_dev_minor != MINOR(dev)) 147 return -EINVAL; 148 149 return 0; 150 } 151 152 static int hwpoison_filter_flags(struct page *p) 153 { 154 if (!hwpoison_filter_flags_mask) 155 return 0; 156 157 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 158 hwpoison_filter_flags_value) 159 return 0; 160 else 161 return -EINVAL; 162 } 163 164 /* 165 * This allows stress tests to limit test scope to a collection of tasks 166 * by putting them under some memcg. This prevents killing unrelated/important 167 * processes such as /sbin/init. Note that the target task may share clean 168 * pages with init (eg. libc text), which is harmless. If the target task 169 * share _dirty_ pages with another task B, the test scheme must make sure B 170 * is also included in the memcg. At last, due to race conditions this filter 171 * can only guarantee that the page either belongs to the memcg tasks, or is 172 * a freed page. 173 */ 174 #ifdef CONFIG_MEMCG 175 u64 hwpoison_filter_memcg; 176 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 177 static int hwpoison_filter_task(struct page *p) 178 { 179 if (!hwpoison_filter_memcg) 180 return 0; 181 182 if (page_cgroup_ino(p) != hwpoison_filter_memcg) 183 return -EINVAL; 184 185 return 0; 186 } 187 #else 188 static int hwpoison_filter_task(struct page *p) { return 0; } 189 #endif 190 191 int hwpoison_filter(struct page *p) 192 { 193 if (!hwpoison_filter_enable) 194 return 0; 195 196 if (hwpoison_filter_dev(p)) 197 return -EINVAL; 198 199 if (hwpoison_filter_flags(p)) 200 return -EINVAL; 201 202 if (hwpoison_filter_task(p)) 203 return -EINVAL; 204 205 return 0; 206 } 207 #else 208 int hwpoison_filter(struct page *p) 209 { 210 return 0; 211 } 212 #endif 213 214 EXPORT_SYMBOL_GPL(hwpoison_filter); 215 216 /* 217 * Kill all processes that have a poisoned page mapped and then isolate 218 * the page. 219 * 220 * General strategy: 221 * Find all processes having the page mapped and kill them. 222 * But we keep a page reference around so that the page is not 223 * actually freed yet. 224 * Then stash the page away 225 * 226 * There's no convenient way to get back to mapped processes 227 * from the VMAs. So do a brute-force search over all 228 * running processes. 229 * 230 * Remember that machine checks are not common (or rather 231 * if they are common you have other problems), so this shouldn't 232 * be a performance issue. 233 * 234 * Also there are some races possible while we get from the 235 * error detection to actually handle it. 236 */ 237 238 struct to_kill { 239 struct list_head nd; 240 struct task_struct *tsk; 241 unsigned long addr; 242 short size_shift; 243 }; 244 245 /* 246 * Send all the processes who have the page mapped a signal. 247 * ``action optional'' if they are not immediately affected by the error 248 * ``action required'' if error happened in current execution context 249 */ 250 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) 251 { 252 struct task_struct *t = tk->tsk; 253 short addr_lsb = tk->size_shift; 254 int ret = 0; 255 256 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", 257 pfn, t->comm, t->pid); 258 259 if (flags & MF_ACTION_REQUIRED) { 260 if (t == current) 261 ret = force_sig_mceerr(BUS_MCEERR_AR, 262 (void __user *)tk->addr, addr_lsb); 263 else 264 /* Signal other processes sharing the page if they have PF_MCE_EARLY set. */ 265 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, 266 addr_lsb, t); 267 } else { 268 /* 269 * Don't use force here, it's convenient if the signal 270 * can be temporarily blocked. 271 * This could cause a loop when the user sets SIGBUS 272 * to SIG_IGN, but hopefully no one will do that? 273 */ 274 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, 275 addr_lsb, t); /* synchronous? */ 276 } 277 if (ret < 0) 278 pr_info("Memory failure: Error sending signal to %s:%d: %d\n", 279 t->comm, t->pid, ret); 280 return ret; 281 } 282 283 /* 284 * Unknown page type encountered. Try to check whether it can turn PageLRU by 285 * lru_add_drain_all, or a free page by reclaiming slabs when possible. 286 */ 287 void shake_page(struct page *p, int access) 288 { 289 if (PageHuge(p)) 290 return; 291 292 if (!PageSlab(p)) { 293 lru_add_drain_all(); 294 if (PageLRU(p) || is_free_buddy_page(p)) 295 return; 296 } 297 298 /* 299 * Only call shrink_node_slabs here (which would also shrink 300 * other caches) if access is not potentially fatal. 301 */ 302 if (access) 303 drop_slab_node(page_to_nid(p)); 304 } 305 EXPORT_SYMBOL_GPL(shake_page); 306 307 static unsigned long dev_pagemap_mapping_shift(struct page *page, 308 struct vm_area_struct *vma) 309 { 310 unsigned long address = vma_address(page, vma); 311 pgd_t *pgd; 312 p4d_t *p4d; 313 pud_t *pud; 314 pmd_t *pmd; 315 pte_t *pte; 316 317 pgd = pgd_offset(vma->vm_mm, address); 318 if (!pgd_present(*pgd)) 319 return 0; 320 p4d = p4d_offset(pgd, address); 321 if (!p4d_present(*p4d)) 322 return 0; 323 pud = pud_offset(p4d, address); 324 if (!pud_present(*pud)) 325 return 0; 326 if (pud_devmap(*pud)) 327 return PUD_SHIFT; 328 pmd = pmd_offset(pud, address); 329 if (!pmd_present(*pmd)) 330 return 0; 331 if (pmd_devmap(*pmd)) 332 return PMD_SHIFT; 333 pte = pte_offset_map(pmd, address); 334 if (!pte_present(*pte)) 335 return 0; 336 if (pte_devmap(*pte)) 337 return PAGE_SHIFT; 338 return 0; 339 } 340 341 /* 342 * Failure handling: if we can't find or can't kill a process there's 343 * not much we can do. We just print a message and ignore otherwise. 344 */ 345 346 /* 347 * Schedule a process for later kill. 348 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 349 */ 350 static void add_to_kill(struct task_struct *tsk, struct page *p, 351 struct vm_area_struct *vma, 352 struct list_head *to_kill) 353 { 354 struct to_kill *tk; 355 356 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 357 if (!tk) { 358 pr_err("Memory failure: Out of memory while machine check handling\n"); 359 return; 360 } 361 362 tk->addr = page_address_in_vma(p, vma); 363 if (is_zone_device_page(p)) 364 tk->size_shift = dev_pagemap_mapping_shift(p, vma); 365 else 366 tk->size_shift = page_shift(compound_head(p)); 367 368 /* 369 * Send SIGKILL if "tk->addr == -EFAULT". Also, as 370 * "tk->size_shift" is always non-zero for !is_zone_device_page(), 371 * so "tk->size_shift == 0" effectively checks no mapping on 372 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times 373 * to a process' address space, it's possible not all N VMAs 374 * contain mappings for the page, but at least one VMA does. 375 * Only deliver SIGBUS with payload derived from the VMA that 376 * has a mapping for the page. 377 */ 378 if (tk->addr == -EFAULT) { 379 pr_info("Memory failure: Unable to find user space address %lx in %s\n", 380 page_to_pfn(p), tsk->comm); 381 } else if (tk->size_shift == 0) { 382 kfree(tk); 383 return; 384 } 385 386 get_task_struct(tsk); 387 tk->tsk = tsk; 388 list_add_tail(&tk->nd, to_kill); 389 } 390 391 /* 392 * Kill the processes that have been collected earlier. 393 * 394 * Only do anything when DOIT is set, otherwise just free the list 395 * (this is used for clean pages which do not need killing) 396 * Also when FAIL is set do a force kill because something went 397 * wrong earlier. 398 */ 399 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, 400 unsigned long pfn, int flags) 401 { 402 struct to_kill *tk, *next; 403 404 list_for_each_entry_safe (tk, next, to_kill, nd) { 405 if (forcekill) { 406 /* 407 * In case something went wrong with munmapping 408 * make sure the process doesn't catch the 409 * signal and then access the memory. Just kill it. 410 */ 411 if (fail || tk->addr == -EFAULT) { 412 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 413 pfn, tk->tsk->comm, tk->tsk->pid); 414 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, 415 tk->tsk, PIDTYPE_PID); 416 } 417 418 /* 419 * In theory the process could have mapped 420 * something else on the address in-between. We could 421 * check for that, but we need to tell the 422 * process anyways. 423 */ 424 else if (kill_proc(tk, pfn, flags) < 0) 425 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n", 426 pfn, tk->tsk->comm, tk->tsk->pid); 427 } 428 put_task_struct(tk->tsk); 429 kfree(tk); 430 } 431 } 432 433 /* 434 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO) 435 * on behalf of the thread group. Return task_struct of the (first found) 436 * dedicated thread if found, and return NULL otherwise. 437 * 438 * We already hold read_lock(&tasklist_lock) in the caller, so we don't 439 * have to call rcu_read_lock/unlock() in this function. 440 */ 441 static struct task_struct *find_early_kill_thread(struct task_struct *tsk) 442 { 443 struct task_struct *t; 444 445 for_each_thread(tsk, t) { 446 if (t->flags & PF_MCE_PROCESS) { 447 if (t->flags & PF_MCE_EARLY) 448 return t; 449 } else { 450 if (sysctl_memory_failure_early_kill) 451 return t; 452 } 453 } 454 return NULL; 455 } 456 457 /* 458 * Determine whether a given process is "early kill" process which expects 459 * to be signaled when some page under the process is hwpoisoned. 460 * Return task_struct of the dedicated thread (main thread unless explicitly 461 * specified) if the process is "early kill" and otherwise returns NULL. 462 * 463 * Note that the above is true for Action Optional case. For Action Required 464 * case, it's only meaningful to the current thread which need to be signaled 465 * with SIGBUS, this error is Action Optional for other non current 466 * processes sharing the same error page,if the process is "early kill", the 467 * task_struct of the dedicated thread will also be returned. 468 */ 469 static struct task_struct *task_early_kill(struct task_struct *tsk, 470 int force_early) 471 { 472 if (!tsk->mm) 473 return NULL; 474 /* 475 * Comparing ->mm here because current task might represent 476 * a subthread, while tsk always points to the main thread. 477 */ 478 if (force_early && tsk->mm == current->mm) 479 return current; 480 481 return find_early_kill_thread(tsk); 482 } 483 484 /* 485 * Collect processes when the error hit an anonymous page. 486 */ 487 static void collect_procs_anon(struct page *page, struct list_head *to_kill, 488 int force_early) 489 { 490 struct vm_area_struct *vma; 491 struct task_struct *tsk; 492 struct anon_vma *av; 493 pgoff_t pgoff; 494 495 av = page_lock_anon_vma_read(page); 496 if (av == NULL) /* Not actually mapped anymore */ 497 return; 498 499 pgoff = page_to_pgoff(page); 500 read_lock(&tasklist_lock); 501 for_each_process (tsk) { 502 struct anon_vma_chain *vmac; 503 struct task_struct *t = task_early_kill(tsk, force_early); 504 505 if (!t) 506 continue; 507 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 508 pgoff, pgoff) { 509 vma = vmac->vma; 510 if (!page_mapped_in_vma(page, vma)) 511 continue; 512 if (vma->vm_mm == t->mm) 513 add_to_kill(t, page, vma, to_kill); 514 } 515 } 516 read_unlock(&tasklist_lock); 517 page_unlock_anon_vma_read(av); 518 } 519 520 /* 521 * Collect processes when the error hit a file mapped page. 522 */ 523 static void collect_procs_file(struct page *page, struct list_head *to_kill, 524 int force_early) 525 { 526 struct vm_area_struct *vma; 527 struct task_struct *tsk; 528 struct address_space *mapping = page->mapping; 529 pgoff_t pgoff; 530 531 i_mmap_lock_read(mapping); 532 read_lock(&tasklist_lock); 533 pgoff = page_to_pgoff(page); 534 for_each_process(tsk) { 535 struct task_struct *t = task_early_kill(tsk, force_early); 536 537 if (!t) 538 continue; 539 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, 540 pgoff) { 541 /* 542 * Send early kill signal to tasks where a vma covers 543 * the page but the corrupted page is not necessarily 544 * mapped it in its pte. 545 * Assume applications who requested early kill want 546 * to be informed of all such data corruptions. 547 */ 548 if (vma->vm_mm == t->mm) 549 add_to_kill(t, page, vma, to_kill); 550 } 551 } 552 read_unlock(&tasklist_lock); 553 i_mmap_unlock_read(mapping); 554 } 555 556 /* 557 * Collect the processes who have the corrupted page mapped to kill. 558 */ 559 static void collect_procs(struct page *page, struct list_head *tokill, 560 int force_early) 561 { 562 if (!page->mapping) 563 return; 564 565 if (PageAnon(page)) 566 collect_procs_anon(page, tokill, force_early); 567 else 568 collect_procs_file(page, tokill, force_early); 569 } 570 571 struct hwp_walk { 572 struct to_kill tk; 573 unsigned long pfn; 574 int flags; 575 }; 576 577 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift) 578 { 579 tk->addr = addr; 580 tk->size_shift = shift; 581 } 582 583 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, 584 unsigned long poisoned_pfn, struct to_kill *tk) 585 { 586 unsigned long pfn = 0; 587 588 if (pte_present(pte)) { 589 pfn = pte_pfn(pte); 590 } else { 591 swp_entry_t swp = pte_to_swp_entry(pte); 592 593 if (is_hwpoison_entry(swp)) 594 pfn = hwpoison_entry_to_pfn(swp); 595 } 596 597 if (!pfn || pfn != poisoned_pfn) 598 return 0; 599 600 set_to_kill(tk, addr, shift); 601 return 1; 602 } 603 604 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 605 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 606 struct hwp_walk *hwp) 607 { 608 pmd_t pmd = *pmdp; 609 unsigned long pfn; 610 unsigned long hwpoison_vaddr; 611 612 if (!pmd_present(pmd)) 613 return 0; 614 pfn = pmd_pfn(pmd); 615 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { 616 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT); 617 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT); 618 return 1; 619 } 620 return 0; 621 } 622 #else 623 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 624 struct hwp_walk *hwp) 625 { 626 return 0; 627 } 628 #endif 629 630 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, 631 unsigned long end, struct mm_walk *walk) 632 { 633 struct hwp_walk *hwp = (struct hwp_walk *)walk->private; 634 int ret = 0; 635 pte_t *ptep; 636 spinlock_t *ptl; 637 638 ptl = pmd_trans_huge_lock(pmdp, walk->vma); 639 if (ptl) { 640 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp); 641 spin_unlock(ptl); 642 goto out; 643 } 644 645 if (pmd_trans_unstable(pmdp)) 646 goto out; 647 648 ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl); 649 for (; addr != end; ptep++, addr += PAGE_SIZE) { 650 ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, 651 hwp->pfn, &hwp->tk); 652 if (ret == 1) 653 break; 654 } 655 pte_unmap_unlock(ptep - 1, ptl); 656 out: 657 cond_resched(); 658 return ret; 659 } 660 661 #ifdef CONFIG_HUGETLB_PAGE 662 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, 663 unsigned long addr, unsigned long end, 664 struct mm_walk *walk) 665 { 666 struct hwp_walk *hwp = (struct hwp_walk *)walk->private; 667 pte_t pte = huge_ptep_get(ptep); 668 struct hstate *h = hstate_vma(walk->vma); 669 670 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), 671 hwp->pfn, &hwp->tk); 672 } 673 #else 674 #define hwpoison_hugetlb_range NULL 675 #endif 676 677 static struct mm_walk_ops hwp_walk_ops = { 678 .pmd_entry = hwpoison_pte_range, 679 .hugetlb_entry = hwpoison_hugetlb_range, 680 }; 681 682 /* 683 * Sends SIGBUS to the current process with error info. 684 * 685 * This function is intended to handle "Action Required" MCEs on already 686 * hardware poisoned pages. They could happen, for example, when 687 * memory_failure() failed to unmap the error page at the first call, or 688 * when multiple local machine checks happened on different CPUs. 689 * 690 * MCE handler currently has no easy access to the error virtual address, 691 * so this function walks page table to find it. The returned virtual address 692 * is proper in most cases, but it could be wrong when the application 693 * process has multiple entries mapping the error page. 694 */ 695 static int kill_accessing_process(struct task_struct *p, unsigned long pfn, 696 int flags) 697 { 698 int ret; 699 struct hwp_walk priv = { 700 .pfn = pfn, 701 }; 702 priv.tk.tsk = p; 703 704 mmap_read_lock(p->mm); 705 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops, 706 (void *)&priv); 707 if (ret == 1 && priv.tk.addr) 708 kill_proc(&priv.tk, pfn, flags); 709 mmap_read_unlock(p->mm); 710 return ret ? -EFAULT : -EHWPOISON; 711 } 712 713 static const char *action_name[] = { 714 [MF_IGNORED] = "Ignored", 715 [MF_FAILED] = "Failed", 716 [MF_DELAYED] = "Delayed", 717 [MF_RECOVERED] = "Recovered", 718 }; 719 720 static const char * const action_page_types[] = { 721 [MF_MSG_KERNEL] = "reserved kernel page", 722 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page", 723 [MF_MSG_SLAB] = "kernel slab page", 724 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", 725 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned", 726 [MF_MSG_HUGE] = "huge page", 727 [MF_MSG_FREE_HUGE] = "free huge page", 728 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page", 729 [MF_MSG_UNMAP_FAILED] = "unmapping failed page", 730 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", 731 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", 732 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page", 733 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page", 734 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page", 735 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page", 736 [MF_MSG_DIRTY_LRU] = "dirty LRU page", 737 [MF_MSG_CLEAN_LRU] = "clean LRU page", 738 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page", 739 [MF_MSG_BUDDY] = "free buddy page", 740 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)", 741 [MF_MSG_DAX] = "dax page", 742 [MF_MSG_UNSPLIT_THP] = "unsplit thp", 743 [MF_MSG_UNKNOWN] = "unknown page", 744 }; 745 746 /* 747 * XXX: It is possible that a page is isolated from LRU cache, 748 * and then kept in swap cache or failed to remove from page cache. 749 * The page count will stop it from being freed by unpoison. 750 * Stress tests should be aware of this memory leak problem. 751 */ 752 static int delete_from_lru_cache(struct page *p) 753 { 754 if (!isolate_lru_page(p)) { 755 /* 756 * Clear sensible page flags, so that the buddy system won't 757 * complain when the page is unpoison-and-freed. 758 */ 759 ClearPageActive(p); 760 ClearPageUnevictable(p); 761 762 /* 763 * Poisoned page might never drop its ref count to 0 so we have 764 * to uncharge it manually from its memcg. 765 */ 766 mem_cgroup_uncharge(p); 767 768 /* 769 * drop the page count elevated by isolate_lru_page() 770 */ 771 put_page(p); 772 return 0; 773 } 774 return -EIO; 775 } 776 777 static int truncate_error_page(struct page *p, unsigned long pfn, 778 struct address_space *mapping) 779 { 780 int ret = MF_FAILED; 781 782 if (mapping->a_ops->error_remove_page) { 783 int err = mapping->a_ops->error_remove_page(mapping, p); 784 785 if (err != 0) { 786 pr_info("Memory failure: %#lx: Failed to punch page: %d\n", 787 pfn, err); 788 } else if (page_has_private(p) && 789 !try_to_release_page(p, GFP_NOIO)) { 790 pr_info("Memory failure: %#lx: failed to release buffers\n", 791 pfn); 792 } else { 793 ret = MF_RECOVERED; 794 } 795 } else { 796 /* 797 * If the file system doesn't support it just invalidate 798 * This fails on dirty or anything with private pages 799 */ 800 if (invalidate_inode_page(p)) 801 ret = MF_RECOVERED; 802 else 803 pr_info("Memory failure: %#lx: Failed to invalidate\n", 804 pfn); 805 } 806 807 return ret; 808 } 809 810 /* 811 * Error hit kernel page. 812 * Do nothing, try to be lucky and not touch this instead. For a few cases we 813 * could be more sophisticated. 814 */ 815 static int me_kernel(struct page *p, unsigned long pfn) 816 { 817 unlock_page(p); 818 return MF_IGNORED; 819 } 820 821 /* 822 * Page in unknown state. Do nothing. 823 */ 824 static int me_unknown(struct page *p, unsigned long pfn) 825 { 826 pr_err("Memory failure: %#lx: Unknown page state\n", pfn); 827 unlock_page(p); 828 return MF_FAILED; 829 } 830 831 /* 832 * Clean (or cleaned) page cache page. 833 */ 834 static int me_pagecache_clean(struct page *p, unsigned long pfn) 835 { 836 int ret; 837 struct address_space *mapping; 838 839 delete_from_lru_cache(p); 840 841 /* 842 * For anonymous pages we're done the only reference left 843 * should be the one m_f() holds. 844 */ 845 if (PageAnon(p)) { 846 ret = MF_RECOVERED; 847 goto out; 848 } 849 850 /* 851 * Now truncate the page in the page cache. This is really 852 * more like a "temporary hole punch" 853 * Don't do this for block devices when someone else 854 * has a reference, because it could be file system metadata 855 * and that's not safe to truncate. 856 */ 857 mapping = page_mapping(p); 858 if (!mapping) { 859 /* 860 * Page has been teared down in the meanwhile 861 */ 862 ret = MF_FAILED; 863 goto out; 864 } 865 866 /* 867 * Truncation is a bit tricky. Enable it per file system for now. 868 * 869 * Open: to take i_mutex or not for this? Right now we don't. 870 */ 871 ret = truncate_error_page(p, pfn, mapping); 872 out: 873 unlock_page(p); 874 return ret; 875 } 876 877 /* 878 * Dirty pagecache page 879 * Issues: when the error hit a hole page the error is not properly 880 * propagated. 881 */ 882 static int me_pagecache_dirty(struct page *p, unsigned long pfn) 883 { 884 struct address_space *mapping = page_mapping(p); 885 886 SetPageError(p); 887 /* TBD: print more information about the file. */ 888 if (mapping) { 889 /* 890 * IO error will be reported by write(), fsync(), etc. 891 * who check the mapping. 892 * This way the application knows that something went 893 * wrong with its dirty file data. 894 * 895 * There's one open issue: 896 * 897 * The EIO will be only reported on the next IO 898 * operation and then cleared through the IO map. 899 * Normally Linux has two mechanisms to pass IO error 900 * first through the AS_EIO flag in the address space 901 * and then through the PageError flag in the page. 902 * Since we drop pages on memory failure handling the 903 * only mechanism open to use is through AS_AIO. 904 * 905 * This has the disadvantage that it gets cleared on 906 * the first operation that returns an error, while 907 * the PageError bit is more sticky and only cleared 908 * when the page is reread or dropped. If an 909 * application assumes it will always get error on 910 * fsync, but does other operations on the fd before 911 * and the page is dropped between then the error 912 * will not be properly reported. 913 * 914 * This can already happen even without hwpoisoned 915 * pages: first on metadata IO errors (which only 916 * report through AS_EIO) or when the page is dropped 917 * at the wrong time. 918 * 919 * So right now we assume that the application DTRT on 920 * the first EIO, but we're not worse than other parts 921 * of the kernel. 922 */ 923 mapping_set_error(mapping, -EIO); 924 } 925 926 return me_pagecache_clean(p, pfn); 927 } 928 929 /* 930 * Clean and dirty swap cache. 931 * 932 * Dirty swap cache page is tricky to handle. The page could live both in page 933 * cache and swap cache(ie. page is freshly swapped in). So it could be 934 * referenced concurrently by 2 types of PTEs: 935 * normal PTEs and swap PTEs. We try to handle them consistently by calling 936 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 937 * and then 938 * - clear dirty bit to prevent IO 939 * - remove from LRU 940 * - but keep in the swap cache, so that when we return to it on 941 * a later page fault, we know the application is accessing 942 * corrupted data and shall be killed (we installed simple 943 * interception code in do_swap_page to catch it). 944 * 945 * Clean swap cache pages can be directly isolated. A later page fault will 946 * bring in the known good data from disk. 947 */ 948 static int me_swapcache_dirty(struct page *p, unsigned long pfn) 949 { 950 int ret; 951 952 ClearPageDirty(p); 953 /* Trigger EIO in shmem: */ 954 ClearPageUptodate(p); 955 956 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; 957 unlock_page(p); 958 return ret; 959 } 960 961 static int me_swapcache_clean(struct page *p, unsigned long pfn) 962 { 963 int ret; 964 965 delete_from_swap_cache(p); 966 967 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; 968 unlock_page(p); 969 return ret; 970 } 971 972 /* 973 * Huge pages. Needs work. 974 * Issues: 975 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 976 * To narrow down kill region to one page, we need to break up pmd. 977 */ 978 static int me_huge_page(struct page *p, unsigned long pfn) 979 { 980 int res; 981 struct page *hpage = compound_head(p); 982 struct address_space *mapping; 983 984 if (!PageHuge(hpage)) 985 return MF_DELAYED; 986 987 mapping = page_mapping(hpage); 988 if (mapping) { 989 res = truncate_error_page(hpage, pfn, mapping); 990 unlock_page(hpage); 991 } else { 992 res = MF_FAILED; 993 unlock_page(hpage); 994 /* 995 * migration entry prevents later access on error anonymous 996 * hugepage, so we can free and dissolve it into buddy to 997 * save healthy subpages. 998 */ 999 if (PageAnon(hpage)) 1000 put_page(hpage); 1001 if (__page_handle_poison(p)) { 1002 page_ref_inc(p); 1003 res = MF_RECOVERED; 1004 } 1005 } 1006 1007 return res; 1008 } 1009 1010 /* 1011 * Various page states we can handle. 1012 * 1013 * A page state is defined by its current page->flags bits. 1014 * The table matches them in order and calls the right handler. 1015 * 1016 * This is quite tricky because we can access page at any time 1017 * in its live cycle, so all accesses have to be extremely careful. 1018 * 1019 * This is not complete. More states could be added. 1020 * For any missing state don't attempt recovery. 1021 */ 1022 1023 #define dirty (1UL << PG_dirty) 1024 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked)) 1025 #define unevict (1UL << PG_unevictable) 1026 #define mlock (1UL << PG_mlocked) 1027 #define lru (1UL << PG_lru) 1028 #define head (1UL << PG_head) 1029 #define slab (1UL << PG_slab) 1030 #define reserved (1UL << PG_reserved) 1031 1032 static struct page_state { 1033 unsigned long mask; 1034 unsigned long res; 1035 enum mf_action_page_type type; 1036 1037 /* Callback ->action() has to unlock the relevant page inside it. */ 1038 int (*action)(struct page *p, unsigned long pfn); 1039 } error_states[] = { 1040 { reserved, reserved, MF_MSG_KERNEL, me_kernel }, 1041 /* 1042 * free pages are specially detected outside this table: 1043 * PG_buddy pages only make a small fraction of all free pages. 1044 */ 1045 1046 /* 1047 * Could in theory check if slab page is free or if we can drop 1048 * currently unused objects without touching them. But just 1049 * treat it as standard kernel for now. 1050 */ 1051 { slab, slab, MF_MSG_SLAB, me_kernel }, 1052 1053 { head, head, MF_MSG_HUGE, me_huge_page }, 1054 1055 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, 1056 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, 1057 1058 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty }, 1059 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean }, 1060 1061 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty }, 1062 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean }, 1063 1064 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 1065 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 1066 1067 /* 1068 * Catchall entry: must be at end. 1069 */ 1070 { 0, 0, MF_MSG_UNKNOWN, me_unknown }, 1071 }; 1072 1073 #undef dirty 1074 #undef sc 1075 #undef unevict 1076 #undef mlock 1077 #undef lru 1078 #undef head 1079 #undef slab 1080 #undef reserved 1081 1082 /* 1083 * "Dirty/Clean" indication is not 100% accurate due to the possibility of 1084 * setting PG_dirty outside page lock. See also comment above set_page_dirty(). 1085 */ 1086 static void action_result(unsigned long pfn, enum mf_action_page_type type, 1087 enum mf_result result) 1088 { 1089 trace_memory_failure_event(pfn, type, result); 1090 1091 pr_err("Memory failure: %#lx: recovery action for %s: %s\n", 1092 pfn, action_page_types[type], action_name[result]); 1093 } 1094 1095 static int page_action(struct page_state *ps, struct page *p, 1096 unsigned long pfn) 1097 { 1098 int result; 1099 int count; 1100 1101 /* page p should be unlocked after returning from ps->action(). */ 1102 result = ps->action(p, pfn); 1103 1104 count = page_count(p) - 1; 1105 if (ps->action == me_swapcache_dirty && result == MF_DELAYED) 1106 count--; 1107 if (count > 0) { 1108 pr_err("Memory failure: %#lx: %s still referenced by %d users\n", 1109 pfn, action_page_types[ps->type], count); 1110 result = MF_FAILED; 1111 } 1112 action_result(pfn, ps->type, result); 1113 1114 /* Could do more checks here if page looks ok */ 1115 /* 1116 * Could adjust zone counters here to correct for the missing page. 1117 */ 1118 1119 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY; 1120 } 1121 1122 /* 1123 * Return true if a page type of a given page is supported by hwpoison 1124 * mechanism (while handling could fail), otherwise false. This function 1125 * does not return true for hugetlb or device memory pages, so it's assumed 1126 * to be called only in the context where we never have such pages. 1127 */ 1128 static inline bool HWPoisonHandlable(struct page *page) 1129 { 1130 return PageLRU(page) || __PageMovable(page); 1131 } 1132 1133 static int __get_hwpoison_page(struct page *page) 1134 { 1135 struct page *head = compound_head(page); 1136 int ret = 0; 1137 bool hugetlb = false; 1138 1139 ret = get_hwpoison_huge_page(head, &hugetlb); 1140 if (hugetlb) 1141 return ret; 1142 1143 /* 1144 * This check prevents from calling get_hwpoison_unless_zero() 1145 * for any unsupported type of page in order to reduce the risk of 1146 * unexpected races caused by taking a page refcount. 1147 */ 1148 if (!HWPoisonHandlable(head)) 1149 return -EBUSY; 1150 1151 if (PageTransHuge(head)) { 1152 /* 1153 * Non anonymous thp exists only in allocation/free time. We 1154 * can't handle such a case correctly, so let's give it up. 1155 * This should be better than triggering BUG_ON when kernel 1156 * tries to touch the "partially handled" page. 1157 */ 1158 if (!PageAnon(head)) { 1159 pr_err("Memory failure: %#lx: non anonymous thp\n", 1160 page_to_pfn(page)); 1161 return 0; 1162 } 1163 } 1164 1165 if (get_page_unless_zero(head)) { 1166 if (head == compound_head(page)) 1167 return 1; 1168 1169 pr_info("Memory failure: %#lx cannot catch tail\n", 1170 page_to_pfn(page)); 1171 put_page(head); 1172 } 1173 1174 return 0; 1175 } 1176 1177 static int get_any_page(struct page *p, unsigned long flags) 1178 { 1179 int ret = 0, pass = 0; 1180 bool count_increased = false; 1181 1182 if (flags & MF_COUNT_INCREASED) 1183 count_increased = true; 1184 1185 try_again: 1186 if (!count_increased) { 1187 ret = __get_hwpoison_page(p); 1188 if (!ret) { 1189 if (page_count(p)) { 1190 /* We raced with an allocation, retry. */ 1191 if (pass++ < 3) 1192 goto try_again; 1193 ret = -EBUSY; 1194 } else if (!PageHuge(p) && !is_free_buddy_page(p)) { 1195 /* We raced with put_page, retry. */ 1196 if (pass++ < 3) 1197 goto try_again; 1198 ret = -EIO; 1199 } 1200 goto out; 1201 } else if (ret == -EBUSY) { 1202 /* 1203 * We raced with (possibly temporary) unhandlable 1204 * page, retry. 1205 */ 1206 if (pass++ < 3) { 1207 shake_page(p, 1); 1208 goto try_again; 1209 } 1210 ret = -EIO; 1211 goto out; 1212 } 1213 } 1214 1215 if (PageHuge(p) || HWPoisonHandlable(p)) { 1216 ret = 1; 1217 } else { 1218 /* 1219 * A page we cannot handle. Check whether we can turn 1220 * it into something we can handle. 1221 */ 1222 if (pass++ < 3) { 1223 put_page(p); 1224 shake_page(p, 1); 1225 count_increased = false; 1226 goto try_again; 1227 } 1228 put_page(p); 1229 ret = -EIO; 1230 } 1231 out: 1232 return ret; 1233 } 1234 1235 /** 1236 * get_hwpoison_page() - Get refcount for memory error handling 1237 * @p: Raw error page (hit by memory error) 1238 * @flags: Flags controlling behavior of error handling 1239 * 1240 * get_hwpoison_page() takes a page refcount of an error page to handle memory 1241 * error on it, after checking that the error page is in a well-defined state 1242 * (defined as a page-type we can successfully handle the memor error on it, 1243 * such as LRU page and hugetlb page). 1244 * 1245 * Memory error handling could be triggered at any time on any type of page, 1246 * so it's prone to race with typical memory management lifecycle (like 1247 * allocation and free). So to avoid such races, get_hwpoison_page() takes 1248 * extra care for the error page's state (as done in __get_hwpoison_page()), 1249 * and has some retry logic in get_any_page(). 1250 * 1251 * Return: 0 on failure, 1252 * 1 on success for in-use pages in a well-defined state, 1253 * -EIO for pages on which we can not handle memory errors, 1254 * -EBUSY when get_hwpoison_page() has raced with page lifecycle 1255 * operations like allocation and free. 1256 */ 1257 static int get_hwpoison_page(struct page *p, unsigned long flags) 1258 { 1259 int ret; 1260 1261 zone_pcp_disable(page_zone(p)); 1262 ret = get_any_page(p, flags); 1263 zone_pcp_enable(page_zone(p)); 1264 1265 return ret; 1266 } 1267 1268 /* 1269 * Do all that is necessary to remove user space mappings. Unmap 1270 * the pages and send SIGBUS to the processes if the data was dirty. 1271 */ 1272 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, 1273 int flags, struct page **hpagep) 1274 { 1275 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC; 1276 struct address_space *mapping; 1277 LIST_HEAD(tokill); 1278 bool unmap_success; 1279 int kill = 1, forcekill; 1280 struct page *hpage = *hpagep; 1281 bool mlocked = PageMlocked(hpage); 1282 1283 /* 1284 * Here we are interested only in user-mapped pages, so skip any 1285 * other types of pages. 1286 */ 1287 if (PageReserved(p) || PageSlab(p)) 1288 return true; 1289 if (!(PageLRU(hpage) || PageHuge(p))) 1290 return true; 1291 1292 /* 1293 * This check implies we don't kill processes if their pages 1294 * are in the swap cache early. Those are always late kills. 1295 */ 1296 if (!page_mapped(hpage)) 1297 return true; 1298 1299 if (PageKsm(p)) { 1300 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn); 1301 return false; 1302 } 1303 1304 if (PageSwapCache(p)) { 1305 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n", 1306 pfn); 1307 ttu |= TTU_IGNORE_HWPOISON; 1308 } 1309 1310 /* 1311 * Propagate the dirty bit from PTEs to struct page first, because we 1312 * need this to decide if we should kill or just drop the page. 1313 * XXX: the dirty test could be racy: set_page_dirty() may not always 1314 * be called inside page lock (it's recommended but not enforced). 1315 */ 1316 mapping = page_mapping(hpage); 1317 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && 1318 mapping_can_writeback(mapping)) { 1319 if (page_mkclean(hpage)) { 1320 SetPageDirty(hpage); 1321 } else { 1322 kill = 0; 1323 ttu |= TTU_IGNORE_HWPOISON; 1324 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n", 1325 pfn); 1326 } 1327 } 1328 1329 /* 1330 * First collect all the processes that have the page 1331 * mapped in dirty form. This has to be done before try_to_unmap, 1332 * because ttu takes the rmap data structures down. 1333 * 1334 * Error handling: We ignore errors here because 1335 * there's nothing that can be done. 1336 */ 1337 if (kill) 1338 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1339 1340 if (!PageHuge(hpage)) { 1341 try_to_unmap(hpage, ttu); 1342 } else { 1343 if (!PageAnon(hpage)) { 1344 /* 1345 * For hugetlb pages in shared mappings, try_to_unmap 1346 * could potentially call huge_pmd_unshare. Because of 1347 * this, take semaphore in write mode here and set 1348 * TTU_RMAP_LOCKED to indicate we have taken the lock 1349 * at this higher level. 1350 */ 1351 mapping = hugetlb_page_mapping_lock_write(hpage); 1352 if (mapping) { 1353 try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); 1354 i_mmap_unlock_write(mapping); 1355 } else 1356 pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); 1357 } else { 1358 try_to_unmap(hpage, ttu); 1359 } 1360 } 1361 1362 unmap_success = !page_mapped(hpage); 1363 if (!unmap_success) 1364 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", 1365 pfn, page_mapcount(hpage)); 1366 1367 /* 1368 * try_to_unmap() might put mlocked page in lru cache, so call 1369 * shake_page() again to ensure that it's flushed. 1370 */ 1371 if (mlocked) 1372 shake_page(hpage, 0); 1373 1374 /* 1375 * Now that the dirty bit has been propagated to the 1376 * struct page and all unmaps done we can decide if 1377 * killing is needed or not. Only kill when the page 1378 * was dirty or the process is not restartable, 1379 * otherwise the tokill list is merely 1380 * freed. When there was a problem unmapping earlier 1381 * use a more force-full uncatchable kill to prevent 1382 * any accesses to the poisoned memory. 1383 */ 1384 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL); 1385 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); 1386 1387 return unmap_success; 1388 } 1389 1390 static int identify_page_state(unsigned long pfn, struct page *p, 1391 unsigned long page_flags) 1392 { 1393 struct page_state *ps; 1394 1395 /* 1396 * The first check uses the current page flags which may not have any 1397 * relevant information. The second check with the saved page flags is 1398 * carried out only if the first check can't determine the page status. 1399 */ 1400 for (ps = error_states;; ps++) 1401 if ((p->flags & ps->mask) == ps->res) 1402 break; 1403 1404 page_flags |= (p->flags & (1UL << PG_dirty)); 1405 1406 if (!ps->mask) 1407 for (ps = error_states;; ps++) 1408 if ((page_flags & ps->mask) == ps->res) 1409 break; 1410 return page_action(ps, p, pfn); 1411 } 1412 1413 static int try_to_split_thp_page(struct page *page, const char *msg) 1414 { 1415 lock_page(page); 1416 if (!PageAnon(page) || unlikely(split_huge_page(page))) { 1417 unsigned long pfn = page_to_pfn(page); 1418 1419 unlock_page(page); 1420 if (!PageAnon(page)) 1421 pr_info("%s: %#lx: non anonymous thp\n", msg, pfn); 1422 else 1423 pr_info("%s: %#lx: thp split failed\n", msg, pfn); 1424 put_page(page); 1425 return -EBUSY; 1426 } 1427 unlock_page(page); 1428 1429 return 0; 1430 } 1431 1432 static int memory_failure_hugetlb(unsigned long pfn, int flags) 1433 { 1434 struct page *p = pfn_to_page(pfn); 1435 struct page *head = compound_head(p); 1436 int res; 1437 unsigned long page_flags; 1438 1439 if (TestSetPageHWPoison(head)) { 1440 pr_err("Memory failure: %#lx: already hardware poisoned\n", 1441 pfn); 1442 res = -EHWPOISON; 1443 if (flags & MF_ACTION_REQUIRED) 1444 res = kill_accessing_process(current, page_to_pfn(head), flags); 1445 return res; 1446 } 1447 1448 num_poisoned_pages_inc(); 1449 1450 if (!(flags & MF_COUNT_INCREASED)) { 1451 res = get_hwpoison_page(p, flags); 1452 if (!res) { 1453 /* 1454 * Check "filter hit" and "race with other subpage." 1455 */ 1456 lock_page(head); 1457 if (PageHWPoison(head)) { 1458 if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) 1459 || (p != head && TestSetPageHWPoison(head))) { 1460 num_poisoned_pages_dec(); 1461 unlock_page(head); 1462 return 0; 1463 } 1464 } 1465 unlock_page(head); 1466 res = MF_FAILED; 1467 if (__page_handle_poison(p)) { 1468 page_ref_inc(p); 1469 res = MF_RECOVERED; 1470 } 1471 action_result(pfn, MF_MSG_FREE_HUGE, res); 1472 return res == MF_RECOVERED ? 0 : -EBUSY; 1473 } else if (res < 0) { 1474 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 1475 return -EBUSY; 1476 } 1477 } 1478 1479 lock_page(head); 1480 page_flags = head->flags; 1481 1482 if (!PageHWPoison(head)) { 1483 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn); 1484 num_poisoned_pages_dec(); 1485 unlock_page(head); 1486 put_page(head); 1487 return 0; 1488 } 1489 1490 /* 1491 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so 1492 * simply disable it. In order to make it work properly, we need 1493 * make sure that: 1494 * - conversion of a pud that maps an error hugetlb into hwpoison 1495 * entry properly works, and 1496 * - other mm code walking over page table is aware of pud-aligned 1497 * hwpoison entries. 1498 */ 1499 if (huge_page_size(page_hstate(head)) > PMD_SIZE) { 1500 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED); 1501 res = -EBUSY; 1502 goto out; 1503 } 1504 1505 if (!hwpoison_user_mappings(p, pfn, flags, &head)) { 1506 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 1507 res = -EBUSY; 1508 goto out; 1509 } 1510 1511 return identify_page_state(pfn, p, page_flags); 1512 out: 1513 unlock_page(head); 1514 return res; 1515 } 1516 1517 static int memory_failure_dev_pagemap(unsigned long pfn, int flags, 1518 struct dev_pagemap *pgmap) 1519 { 1520 struct page *page = pfn_to_page(pfn); 1521 const bool unmap_success = true; 1522 unsigned long size = 0; 1523 struct to_kill *tk; 1524 LIST_HEAD(tokill); 1525 int rc = -EBUSY; 1526 loff_t start; 1527 dax_entry_t cookie; 1528 1529 if (flags & MF_COUNT_INCREASED) 1530 /* 1531 * Drop the extra refcount in case we come from madvise(). 1532 */ 1533 put_page(page); 1534 1535 /* device metadata space is not recoverable */ 1536 if (!pgmap_pfn_valid(pgmap, pfn)) { 1537 rc = -ENXIO; 1538 goto out; 1539 } 1540 1541 /* 1542 * Prevent the inode from being freed while we are interrogating 1543 * the address_space, typically this would be handled by 1544 * lock_page(), but dax pages do not use the page lock. This 1545 * also prevents changes to the mapping of this pfn until 1546 * poison signaling is complete. 1547 */ 1548 cookie = dax_lock_page(page); 1549 if (!cookie) 1550 goto out; 1551 1552 if (hwpoison_filter(page)) { 1553 rc = 0; 1554 goto unlock; 1555 } 1556 1557 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 1558 /* 1559 * TODO: Handle HMM pages which may need coordination 1560 * with device-side memory. 1561 */ 1562 goto unlock; 1563 } 1564 1565 /* 1566 * Use this flag as an indication that the dax page has been 1567 * remapped UC to prevent speculative consumption of poison. 1568 */ 1569 SetPageHWPoison(page); 1570 1571 /* 1572 * Unlike System-RAM there is no possibility to swap in a 1573 * different physical page at a given virtual address, so all 1574 * userspace consumption of ZONE_DEVICE memory necessitates 1575 * SIGBUS (i.e. MF_MUST_KILL) 1576 */ 1577 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1578 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED); 1579 1580 list_for_each_entry(tk, &tokill, nd) 1581 if (tk->size_shift) 1582 size = max(size, 1UL << tk->size_shift); 1583 if (size) { 1584 /* 1585 * Unmap the largest mapping to avoid breaking up 1586 * device-dax mappings which are constant size. The 1587 * actual size of the mapping being torn down is 1588 * communicated in siginfo, see kill_proc() 1589 */ 1590 start = (page->index << PAGE_SHIFT) & ~(size - 1); 1591 unmap_mapping_range(page->mapping, start, size, 0); 1592 } 1593 kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags); 1594 rc = 0; 1595 unlock: 1596 dax_unlock_page(page, cookie); 1597 out: 1598 /* drop pgmap ref acquired in caller */ 1599 put_dev_pagemap(pgmap); 1600 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); 1601 return rc; 1602 } 1603 1604 /** 1605 * memory_failure - Handle memory failure of a page. 1606 * @pfn: Page Number of the corrupted page 1607 * @flags: fine tune action taken 1608 * 1609 * This function is called by the low level machine check code 1610 * of an architecture when it detects hardware memory corruption 1611 * of a page. It tries its best to recover, which includes 1612 * dropping pages, killing processes etc. 1613 * 1614 * The function is primarily of use for corruptions that 1615 * happen outside the current execution context (e.g. when 1616 * detected by a background scrubber) 1617 * 1618 * Must run in process context (e.g. a work queue) with interrupts 1619 * enabled and no spinlocks hold. 1620 */ 1621 int memory_failure(unsigned long pfn, int flags) 1622 { 1623 struct page *p; 1624 struct page *hpage; 1625 struct page *orig_head; 1626 struct dev_pagemap *pgmap; 1627 int res = 0; 1628 unsigned long page_flags; 1629 bool retry = true; 1630 static DEFINE_MUTEX(mf_mutex); 1631 1632 if (!sysctl_memory_failure_recovery) 1633 panic("Memory failure on page %lx", pfn); 1634 1635 p = pfn_to_online_page(pfn); 1636 if (!p) { 1637 if (pfn_valid(pfn)) { 1638 pgmap = get_dev_pagemap(pfn, NULL); 1639 if (pgmap) 1640 return memory_failure_dev_pagemap(pfn, flags, 1641 pgmap); 1642 } 1643 pr_err("Memory failure: %#lx: memory outside kernel control\n", 1644 pfn); 1645 return -ENXIO; 1646 } 1647 1648 mutex_lock(&mf_mutex); 1649 1650 try_again: 1651 if (PageHuge(p)) { 1652 res = memory_failure_hugetlb(pfn, flags); 1653 goto unlock_mutex; 1654 } 1655 1656 if (TestSetPageHWPoison(p)) { 1657 pr_err("Memory failure: %#lx: already hardware poisoned\n", 1658 pfn); 1659 res = -EHWPOISON; 1660 if (flags & MF_ACTION_REQUIRED) 1661 res = kill_accessing_process(current, pfn, flags); 1662 goto unlock_mutex; 1663 } 1664 1665 orig_head = hpage = compound_head(p); 1666 num_poisoned_pages_inc(); 1667 1668 /* 1669 * We need/can do nothing about count=0 pages. 1670 * 1) it's a free page, and therefore in safe hand: 1671 * prep_new_page() will be the gate keeper. 1672 * 2) it's part of a non-compound high order page. 1673 * Implies some kernel user: cannot stop them from 1674 * R/W the page; let's pray that the page has been 1675 * used and will be freed some time later. 1676 * In fact it's dangerous to directly bump up page count from 0, 1677 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. 1678 */ 1679 if (!(flags & MF_COUNT_INCREASED)) { 1680 res = get_hwpoison_page(p, flags); 1681 if (!res) { 1682 if (is_free_buddy_page(p)) { 1683 if (take_page_off_buddy(p)) { 1684 page_ref_inc(p); 1685 res = MF_RECOVERED; 1686 } else { 1687 /* We lost the race, try again */ 1688 if (retry) { 1689 ClearPageHWPoison(p); 1690 num_poisoned_pages_dec(); 1691 retry = false; 1692 goto try_again; 1693 } 1694 res = MF_FAILED; 1695 } 1696 action_result(pfn, MF_MSG_BUDDY, res); 1697 res = res == MF_RECOVERED ? 0 : -EBUSY; 1698 } else { 1699 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); 1700 res = -EBUSY; 1701 } 1702 goto unlock_mutex; 1703 } else if (res < 0) { 1704 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 1705 res = -EBUSY; 1706 goto unlock_mutex; 1707 } 1708 } 1709 1710 if (PageTransHuge(hpage)) { 1711 if (try_to_split_thp_page(p, "Memory Failure") < 0) { 1712 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); 1713 res = -EBUSY; 1714 goto unlock_mutex; 1715 } 1716 VM_BUG_ON_PAGE(!page_count(p), p); 1717 } 1718 1719 /* 1720 * We ignore non-LRU pages for good reasons. 1721 * - PG_locked is only well defined for LRU pages and a few others 1722 * - to avoid races with __SetPageLocked() 1723 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 1724 * The check (unnecessarily) ignores LRU pages being isolated and 1725 * walked by the page reclaim code, however that's not a big loss. 1726 */ 1727 shake_page(p, 0); 1728 1729 lock_page(p); 1730 1731 /* 1732 * The page could have changed compound pages during the locking. 1733 * If this happens just bail out. 1734 */ 1735 if (PageCompound(p) && compound_head(p) != orig_head) { 1736 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); 1737 res = -EBUSY; 1738 goto unlock_page; 1739 } 1740 1741 /* 1742 * We use page flags to determine what action should be taken, but 1743 * the flags can be modified by the error containment action. One 1744 * example is an mlocked page, where PG_mlocked is cleared by 1745 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 1746 * correctly, we save a copy of the page flags at this time. 1747 */ 1748 page_flags = p->flags; 1749 1750 /* 1751 * unpoison always clear PG_hwpoison inside page lock 1752 */ 1753 if (!PageHWPoison(p)) { 1754 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn); 1755 num_poisoned_pages_dec(); 1756 unlock_page(p); 1757 put_page(p); 1758 goto unlock_mutex; 1759 } 1760 if (hwpoison_filter(p)) { 1761 if (TestClearPageHWPoison(p)) 1762 num_poisoned_pages_dec(); 1763 unlock_page(p); 1764 put_page(p); 1765 goto unlock_mutex; 1766 } 1767 1768 /* 1769 * __munlock_pagevec may clear a writeback page's LRU flag without 1770 * page_lock. We need wait writeback completion for this page or it 1771 * may trigger vfs BUG while evict inode. 1772 */ 1773 if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p)) 1774 goto identify_page_state; 1775 1776 /* 1777 * It's very difficult to mess with pages currently under IO 1778 * and in many cases impossible, so we just avoid it here. 1779 */ 1780 wait_on_page_writeback(p); 1781 1782 /* 1783 * Now take care of user space mappings. 1784 * Abort on fail: __delete_from_page_cache() assumes unmapped page. 1785 */ 1786 if (!hwpoison_user_mappings(p, pfn, flags, &p)) { 1787 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 1788 res = -EBUSY; 1789 goto unlock_page; 1790 } 1791 1792 /* 1793 * Torn down by someone else? 1794 */ 1795 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 1796 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); 1797 res = -EBUSY; 1798 goto unlock_page; 1799 } 1800 1801 identify_page_state: 1802 res = identify_page_state(pfn, p, page_flags); 1803 mutex_unlock(&mf_mutex); 1804 return res; 1805 unlock_page: 1806 unlock_page(p); 1807 unlock_mutex: 1808 mutex_unlock(&mf_mutex); 1809 return res; 1810 } 1811 EXPORT_SYMBOL_GPL(memory_failure); 1812 1813 #define MEMORY_FAILURE_FIFO_ORDER 4 1814 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) 1815 1816 struct memory_failure_entry { 1817 unsigned long pfn; 1818 int flags; 1819 }; 1820 1821 struct memory_failure_cpu { 1822 DECLARE_KFIFO(fifo, struct memory_failure_entry, 1823 MEMORY_FAILURE_FIFO_SIZE); 1824 spinlock_t lock; 1825 struct work_struct work; 1826 }; 1827 1828 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); 1829 1830 /** 1831 * memory_failure_queue - Schedule handling memory failure of a page. 1832 * @pfn: Page Number of the corrupted page 1833 * @flags: Flags for memory failure handling 1834 * 1835 * This function is called by the low level hardware error handler 1836 * when it detects hardware memory corruption of a page. It schedules 1837 * the recovering of error page, including dropping pages, killing 1838 * processes etc. 1839 * 1840 * The function is primarily of use for corruptions that 1841 * happen outside the current execution context (e.g. when 1842 * detected by a background scrubber) 1843 * 1844 * Can run in IRQ context. 1845 */ 1846 void memory_failure_queue(unsigned long pfn, int flags) 1847 { 1848 struct memory_failure_cpu *mf_cpu; 1849 unsigned long proc_flags; 1850 struct memory_failure_entry entry = { 1851 .pfn = pfn, 1852 .flags = flags, 1853 }; 1854 1855 mf_cpu = &get_cpu_var(memory_failure_cpu); 1856 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 1857 if (kfifo_put(&mf_cpu->fifo, entry)) 1858 schedule_work_on(smp_processor_id(), &mf_cpu->work); 1859 else 1860 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n", 1861 pfn); 1862 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 1863 put_cpu_var(memory_failure_cpu); 1864 } 1865 EXPORT_SYMBOL_GPL(memory_failure_queue); 1866 1867 static void memory_failure_work_func(struct work_struct *work) 1868 { 1869 struct memory_failure_cpu *mf_cpu; 1870 struct memory_failure_entry entry = { 0, }; 1871 unsigned long proc_flags; 1872 int gotten; 1873 1874 mf_cpu = container_of(work, struct memory_failure_cpu, work); 1875 for (;;) { 1876 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 1877 gotten = kfifo_get(&mf_cpu->fifo, &entry); 1878 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 1879 if (!gotten) 1880 break; 1881 if (entry.flags & MF_SOFT_OFFLINE) 1882 soft_offline_page(entry.pfn, entry.flags); 1883 else 1884 memory_failure(entry.pfn, entry.flags); 1885 } 1886 } 1887 1888 /* 1889 * Process memory_failure work queued on the specified CPU. 1890 * Used to avoid return-to-userspace racing with the memory_failure workqueue. 1891 */ 1892 void memory_failure_queue_kick(int cpu) 1893 { 1894 struct memory_failure_cpu *mf_cpu; 1895 1896 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 1897 cancel_work_sync(&mf_cpu->work); 1898 memory_failure_work_func(&mf_cpu->work); 1899 } 1900 1901 static int __init memory_failure_init(void) 1902 { 1903 struct memory_failure_cpu *mf_cpu; 1904 int cpu; 1905 1906 for_each_possible_cpu(cpu) { 1907 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 1908 spin_lock_init(&mf_cpu->lock); 1909 INIT_KFIFO(mf_cpu->fifo); 1910 INIT_WORK(&mf_cpu->work, memory_failure_work_func); 1911 } 1912 1913 return 0; 1914 } 1915 core_initcall(memory_failure_init); 1916 1917 #define unpoison_pr_info(fmt, pfn, rs) \ 1918 ({ \ 1919 if (__ratelimit(rs)) \ 1920 pr_info(fmt, pfn); \ 1921 }) 1922 1923 /** 1924 * unpoison_memory - Unpoison a previously poisoned page 1925 * @pfn: Page number of the to be unpoisoned page 1926 * 1927 * Software-unpoison a page that has been poisoned by 1928 * memory_failure() earlier. 1929 * 1930 * This is only done on the software-level, so it only works 1931 * for linux injected failures, not real hardware failures 1932 * 1933 * Returns 0 for success, otherwise -errno. 1934 */ 1935 int unpoison_memory(unsigned long pfn) 1936 { 1937 struct page *page; 1938 struct page *p; 1939 int freeit = 0; 1940 unsigned long flags = 0; 1941 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, 1942 DEFAULT_RATELIMIT_BURST); 1943 1944 if (!pfn_valid(pfn)) 1945 return -ENXIO; 1946 1947 p = pfn_to_page(pfn); 1948 page = compound_head(p); 1949 1950 if (!PageHWPoison(p)) { 1951 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", 1952 pfn, &unpoison_rs); 1953 return 0; 1954 } 1955 1956 if (page_count(page) > 1) { 1957 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", 1958 pfn, &unpoison_rs); 1959 return 0; 1960 } 1961 1962 if (page_mapped(page)) { 1963 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", 1964 pfn, &unpoison_rs); 1965 return 0; 1966 } 1967 1968 if (page_mapping(page)) { 1969 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", 1970 pfn, &unpoison_rs); 1971 return 0; 1972 } 1973 1974 /* 1975 * unpoison_memory() can encounter thp only when the thp is being 1976 * worked by memory_failure() and the page lock is not held yet. 1977 * In such case, we yield to memory_failure() and make unpoison fail. 1978 */ 1979 if (!PageHuge(page) && PageTransHuge(page)) { 1980 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n", 1981 pfn, &unpoison_rs); 1982 return 0; 1983 } 1984 1985 if (!get_hwpoison_page(p, flags)) { 1986 if (TestClearPageHWPoison(p)) 1987 num_poisoned_pages_dec(); 1988 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n", 1989 pfn, &unpoison_rs); 1990 return 0; 1991 } 1992 1993 lock_page(page); 1994 /* 1995 * This test is racy because PG_hwpoison is set outside of page lock. 1996 * That's acceptable because that won't trigger kernel panic. Instead, 1997 * the PG_hwpoison page will be caught and isolated on the entrance to 1998 * the free buddy page pool. 1999 */ 2000 if (TestClearPageHWPoison(page)) { 2001 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", 2002 pfn, &unpoison_rs); 2003 num_poisoned_pages_dec(); 2004 freeit = 1; 2005 } 2006 unlock_page(page); 2007 2008 put_page(page); 2009 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) 2010 put_page(page); 2011 2012 return 0; 2013 } 2014 EXPORT_SYMBOL(unpoison_memory); 2015 2016 static bool isolate_page(struct page *page, struct list_head *pagelist) 2017 { 2018 bool isolated = false; 2019 bool lru = PageLRU(page); 2020 2021 if (PageHuge(page)) { 2022 isolated = isolate_huge_page(page, pagelist); 2023 } else { 2024 if (lru) 2025 isolated = !isolate_lru_page(page); 2026 else 2027 isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE); 2028 2029 if (isolated) 2030 list_add(&page->lru, pagelist); 2031 } 2032 2033 if (isolated && lru) 2034 inc_node_page_state(page, NR_ISOLATED_ANON + 2035 page_is_file_lru(page)); 2036 2037 /* 2038 * If we succeed to isolate the page, we grabbed another refcount on 2039 * the page, so we can safely drop the one we got from get_any_pages(). 2040 * If we failed to isolate the page, it means that we cannot go further 2041 * and we will return an error, so drop the reference we got from 2042 * get_any_pages() as well. 2043 */ 2044 put_page(page); 2045 return isolated; 2046 } 2047 2048 /* 2049 * __soft_offline_page handles hugetlb-pages and non-hugetlb pages. 2050 * If the page is a non-dirty unmapped page-cache page, it simply invalidates. 2051 * If the page is mapped, it migrates the contents over. 2052 */ 2053 static int __soft_offline_page(struct page *page) 2054 { 2055 int ret = 0; 2056 unsigned long pfn = page_to_pfn(page); 2057 struct page *hpage = compound_head(page); 2058 char const *msg_page[] = {"page", "hugepage"}; 2059 bool huge = PageHuge(page); 2060 LIST_HEAD(pagelist); 2061 struct migration_target_control mtc = { 2062 .nid = NUMA_NO_NODE, 2063 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 2064 }; 2065 2066 /* 2067 * Check PageHWPoison again inside page lock because PageHWPoison 2068 * is set by memory_failure() outside page lock. Note that 2069 * memory_failure() also double-checks PageHWPoison inside page lock, 2070 * so there's no race between soft_offline_page() and memory_failure(). 2071 */ 2072 lock_page(page); 2073 if (!PageHuge(page)) 2074 wait_on_page_writeback(page); 2075 if (PageHWPoison(page)) { 2076 unlock_page(page); 2077 put_page(page); 2078 pr_info("soft offline: %#lx page already poisoned\n", pfn); 2079 return 0; 2080 } 2081 2082 if (!PageHuge(page)) 2083 /* 2084 * Try to invalidate first. This should work for 2085 * non dirty unmapped page cache pages. 2086 */ 2087 ret = invalidate_inode_page(page); 2088 unlock_page(page); 2089 2090 /* 2091 * RED-PEN would be better to keep it isolated here, but we 2092 * would need to fix isolation locking first. 2093 */ 2094 if (ret) { 2095 pr_info("soft_offline: %#lx: invalidated\n", pfn); 2096 page_handle_poison(page, false, true); 2097 return 0; 2098 } 2099 2100 if (isolate_page(hpage, &pagelist)) { 2101 ret = migrate_pages(&pagelist, alloc_migration_target, NULL, 2102 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE); 2103 if (!ret) { 2104 bool release = !huge; 2105 2106 if (!page_handle_poison(page, huge, release)) 2107 ret = -EBUSY; 2108 } else { 2109 if (!list_empty(&pagelist)) 2110 putback_movable_pages(&pagelist); 2111 2112 pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n", 2113 pfn, msg_page[huge], ret, page->flags, &page->flags); 2114 if (ret > 0) 2115 ret = -EBUSY; 2116 } 2117 } else { 2118 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n", 2119 pfn, msg_page[huge], page_count(page), page->flags, &page->flags); 2120 ret = -EBUSY; 2121 } 2122 return ret; 2123 } 2124 2125 static int soft_offline_in_use_page(struct page *page) 2126 { 2127 struct page *hpage = compound_head(page); 2128 2129 if (!PageHuge(page) && PageTransHuge(hpage)) 2130 if (try_to_split_thp_page(page, "soft offline") < 0) 2131 return -EBUSY; 2132 return __soft_offline_page(page); 2133 } 2134 2135 static int soft_offline_free_page(struct page *page) 2136 { 2137 int rc = 0; 2138 2139 if (!page_handle_poison(page, true, false)) 2140 rc = -EBUSY; 2141 2142 return rc; 2143 } 2144 2145 static void put_ref_page(struct page *page) 2146 { 2147 if (page) 2148 put_page(page); 2149 } 2150 2151 /** 2152 * soft_offline_page - Soft offline a page. 2153 * @pfn: pfn to soft-offline 2154 * @flags: flags. Same as memory_failure(). 2155 * 2156 * Returns 0 on success, otherwise negated errno. 2157 * 2158 * Soft offline a page, by migration or invalidation, 2159 * without killing anything. This is for the case when 2160 * a page is not corrupted yet (so it's still valid to access), 2161 * but has had a number of corrected errors and is better taken 2162 * out. 2163 * 2164 * The actual policy on when to do that is maintained by 2165 * user space. 2166 * 2167 * This should never impact any application or cause data loss, 2168 * however it might take some time. 2169 * 2170 * This is not a 100% solution for all memory, but tries to be 2171 * ``good enough'' for the majority of memory. 2172 */ 2173 int soft_offline_page(unsigned long pfn, int flags) 2174 { 2175 int ret; 2176 bool try_again = true; 2177 struct page *page, *ref_page = NULL; 2178 2179 WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED)); 2180 2181 if (!pfn_valid(pfn)) 2182 return -ENXIO; 2183 if (flags & MF_COUNT_INCREASED) 2184 ref_page = pfn_to_page(pfn); 2185 2186 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ 2187 page = pfn_to_online_page(pfn); 2188 if (!page) { 2189 put_ref_page(ref_page); 2190 return -EIO; 2191 } 2192 2193 if (PageHWPoison(page)) { 2194 pr_info("%s: %#lx page already poisoned\n", __func__, pfn); 2195 put_ref_page(ref_page); 2196 return 0; 2197 } 2198 2199 retry: 2200 get_online_mems(); 2201 ret = get_hwpoison_page(page, flags); 2202 put_online_mems(); 2203 2204 if (ret > 0) { 2205 ret = soft_offline_in_use_page(page); 2206 } else if (ret == 0) { 2207 if (soft_offline_free_page(page) && try_again) { 2208 try_again = false; 2209 goto retry; 2210 } 2211 } else if (ret == -EIO) { 2212 pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n", 2213 __func__, pfn, page->flags, &page->flags); 2214 } 2215 2216 return ret; 2217 } 2218