1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008, 2009 Intel Corporation 4 * Authors: Andi Kleen, Fengguang Wu 5 * 6 * High level machine check handler. Handles pages reported by the 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache 8 * failure. 9 * 10 * In addition there is a "soft offline" entry point that allows stop using 11 * not-yet-corrupted-by-suspicious pages without killing anything. 12 * 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 15 * other VM users, because memory failures could happen anytime and 16 * anywhere. This could violate some of their assumptions. This is why 17 * this code has to be extremely careful. Generally it tries to use 18 * normal locking rules, as in get the standard locks, even if that means 19 * the error handling takes potentially a long time. 20 * 21 * It can be very tempting to add handling for obscure cases here. 22 * In general any code for handling new cases should only be added iff: 23 * - You know how to test it. 24 * - You have a test that can be added to mce-test 25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/mm/page-types when running a real workload. 28 * 29 * There are several operations here with exponential complexity because 30 * of unsuitable VM data structures. For example the operation to map back 31 * from RMAP chains to processes has to walk the complete process list and 32 * has non linear complexity with the number. But since memory corruptions 33 * are rare we hope to get away with this. This avoids impacting the core 34 * VM. 35 */ 36 37 #define pr_fmt(fmt) "Memory failure: " fmt 38 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/page-flags.h> 42 #include <linux/kernel-page-flags.h> 43 #include <linux/sched/signal.h> 44 #include <linux/sched/task.h> 45 #include <linux/dax.h> 46 #include <linux/ksm.h> 47 #include <linux/rmap.h> 48 #include <linux/export.h> 49 #include <linux/pagemap.h> 50 #include <linux/swap.h> 51 #include <linux/backing-dev.h> 52 #include <linux/migrate.h> 53 #include <linux/suspend.h> 54 #include <linux/slab.h> 55 #include <linux/swapops.h> 56 #include <linux/hugetlb.h> 57 #include <linux/memory_hotplug.h> 58 #include <linux/mm_inline.h> 59 #include <linux/memremap.h> 60 #include <linux/kfifo.h> 61 #include <linux/ratelimit.h> 62 #include <linux/page-isolation.h> 63 #include <linux/pagewalk.h> 64 #include <linux/shmem_fs.h> 65 #include <linux/sysctl.h> 66 #include "swap.h" 67 #include "internal.h" 68 #include "ras/ras_event.h" 69 70 static int sysctl_memory_failure_early_kill __read_mostly; 71 72 static int sysctl_memory_failure_recovery __read_mostly = 1; 73 74 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); 75 76 static bool hw_memory_failure __read_mostly = false; 77 78 inline void num_poisoned_pages_inc(unsigned long pfn) 79 { 80 atomic_long_inc(&num_poisoned_pages); 81 memblk_nr_poison_inc(pfn); 82 } 83 84 inline void num_poisoned_pages_sub(unsigned long pfn, long i) 85 { 86 atomic_long_sub(i, &num_poisoned_pages); 87 if (pfn != -1UL) 88 memblk_nr_poison_sub(pfn, i); 89 } 90 91 /** 92 * MF_ATTR_RO - Create sysfs entry for each memory failure statistics. 93 * @_name: name of the file in the per NUMA sysfs directory. 94 */ 95 #define MF_ATTR_RO(_name) \ 96 static ssize_t _name##_show(struct device *dev, \ 97 struct device_attribute *attr, \ 98 char *buf) \ 99 { \ 100 struct memory_failure_stats *mf_stats = \ 101 &NODE_DATA(dev->id)->mf_stats; \ 102 return sprintf(buf, "%lu\n", mf_stats->_name); \ 103 } \ 104 static DEVICE_ATTR_RO(_name) 105 106 MF_ATTR_RO(total); 107 MF_ATTR_RO(ignored); 108 MF_ATTR_RO(failed); 109 MF_ATTR_RO(delayed); 110 MF_ATTR_RO(recovered); 111 112 static struct attribute *memory_failure_attr[] = { 113 &dev_attr_total.attr, 114 &dev_attr_ignored.attr, 115 &dev_attr_failed.attr, 116 &dev_attr_delayed.attr, 117 &dev_attr_recovered.attr, 118 NULL, 119 }; 120 121 const struct attribute_group memory_failure_attr_group = { 122 .name = "memory_failure", 123 .attrs = memory_failure_attr, 124 }; 125 126 static struct ctl_table memory_failure_table[] = { 127 { 128 .procname = "memory_failure_early_kill", 129 .data = &sysctl_memory_failure_early_kill, 130 .maxlen = sizeof(sysctl_memory_failure_early_kill), 131 .mode = 0644, 132 .proc_handler = proc_dointvec_minmax, 133 .extra1 = SYSCTL_ZERO, 134 .extra2 = SYSCTL_ONE, 135 }, 136 { 137 .procname = "memory_failure_recovery", 138 .data = &sysctl_memory_failure_recovery, 139 .maxlen = sizeof(sysctl_memory_failure_recovery), 140 .mode = 0644, 141 .proc_handler = proc_dointvec_minmax, 142 .extra1 = SYSCTL_ZERO, 143 .extra2 = SYSCTL_ONE, 144 }, 145 { } 146 }; 147 148 /* 149 * Return values: 150 * 1: the page is dissolved (if needed) and taken off from buddy, 151 * 0: the page is dissolved (if needed) and not taken off from buddy, 152 * < 0: failed to dissolve. 153 */ 154 static int __page_handle_poison(struct page *page) 155 { 156 int ret; 157 158 zone_pcp_disable(page_zone(page)); 159 ret = dissolve_free_huge_page(page); 160 if (!ret) 161 ret = take_page_off_buddy(page); 162 zone_pcp_enable(page_zone(page)); 163 164 return ret; 165 } 166 167 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) 168 { 169 if (hugepage_or_freepage) { 170 /* 171 * Doing this check for free pages is also fine since dissolve_free_huge_page 172 * returns 0 for non-hugetlb pages as well. 173 */ 174 if (__page_handle_poison(page) <= 0) 175 /* 176 * We could fail to take off the target page from buddy 177 * for example due to racy page allocation, but that's 178 * acceptable because soft-offlined page is not broken 179 * and if someone really want to use it, they should 180 * take it. 181 */ 182 return false; 183 } 184 185 SetPageHWPoison(page); 186 if (release) 187 put_page(page); 188 page_ref_inc(page); 189 num_poisoned_pages_inc(page_to_pfn(page)); 190 191 return true; 192 } 193 194 #if IS_ENABLED(CONFIG_HWPOISON_INJECT) 195 196 u32 hwpoison_filter_enable = 0; 197 u32 hwpoison_filter_dev_major = ~0U; 198 u32 hwpoison_filter_dev_minor = ~0U; 199 u64 hwpoison_filter_flags_mask; 200 u64 hwpoison_filter_flags_value; 201 EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 202 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 203 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 204 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 205 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 206 207 static int hwpoison_filter_dev(struct page *p) 208 { 209 struct address_space *mapping; 210 dev_t dev; 211 212 if (hwpoison_filter_dev_major == ~0U && 213 hwpoison_filter_dev_minor == ~0U) 214 return 0; 215 216 mapping = page_mapping(p); 217 if (mapping == NULL || mapping->host == NULL) 218 return -EINVAL; 219 220 dev = mapping->host->i_sb->s_dev; 221 if (hwpoison_filter_dev_major != ~0U && 222 hwpoison_filter_dev_major != MAJOR(dev)) 223 return -EINVAL; 224 if (hwpoison_filter_dev_minor != ~0U && 225 hwpoison_filter_dev_minor != MINOR(dev)) 226 return -EINVAL; 227 228 return 0; 229 } 230 231 static int hwpoison_filter_flags(struct page *p) 232 { 233 if (!hwpoison_filter_flags_mask) 234 return 0; 235 236 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 237 hwpoison_filter_flags_value) 238 return 0; 239 else 240 return -EINVAL; 241 } 242 243 /* 244 * This allows stress tests to limit test scope to a collection of tasks 245 * by putting them under some memcg. This prevents killing unrelated/important 246 * processes such as /sbin/init. Note that the target task may share clean 247 * pages with init (eg. libc text), which is harmless. If the target task 248 * share _dirty_ pages with another task B, the test scheme must make sure B 249 * is also included in the memcg. At last, due to race conditions this filter 250 * can only guarantee that the page either belongs to the memcg tasks, or is 251 * a freed page. 252 */ 253 #ifdef CONFIG_MEMCG 254 u64 hwpoison_filter_memcg; 255 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 256 static int hwpoison_filter_task(struct page *p) 257 { 258 if (!hwpoison_filter_memcg) 259 return 0; 260 261 if (page_cgroup_ino(p) != hwpoison_filter_memcg) 262 return -EINVAL; 263 264 return 0; 265 } 266 #else 267 static int hwpoison_filter_task(struct page *p) { return 0; } 268 #endif 269 270 int hwpoison_filter(struct page *p) 271 { 272 if (!hwpoison_filter_enable) 273 return 0; 274 275 if (hwpoison_filter_dev(p)) 276 return -EINVAL; 277 278 if (hwpoison_filter_flags(p)) 279 return -EINVAL; 280 281 if (hwpoison_filter_task(p)) 282 return -EINVAL; 283 284 return 0; 285 } 286 #else 287 int hwpoison_filter(struct page *p) 288 { 289 return 0; 290 } 291 #endif 292 293 EXPORT_SYMBOL_GPL(hwpoison_filter); 294 295 /* 296 * Kill all processes that have a poisoned page mapped and then isolate 297 * the page. 298 * 299 * General strategy: 300 * Find all processes having the page mapped and kill them. 301 * But we keep a page reference around so that the page is not 302 * actually freed yet. 303 * Then stash the page away 304 * 305 * There's no convenient way to get back to mapped processes 306 * from the VMAs. So do a brute-force search over all 307 * running processes. 308 * 309 * Remember that machine checks are not common (or rather 310 * if they are common you have other problems), so this shouldn't 311 * be a performance issue. 312 * 313 * Also there are some races possible while we get from the 314 * error detection to actually handle it. 315 */ 316 317 struct to_kill { 318 struct list_head nd; 319 struct task_struct *tsk; 320 unsigned long addr; 321 short size_shift; 322 }; 323 324 /* 325 * Send all the processes who have the page mapped a signal. 326 * ``action optional'' if they are not immediately affected by the error 327 * ``action required'' if error happened in current execution context 328 */ 329 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) 330 { 331 struct task_struct *t = tk->tsk; 332 short addr_lsb = tk->size_shift; 333 int ret = 0; 334 335 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", 336 pfn, t->comm, t->pid); 337 338 if ((flags & MF_ACTION_REQUIRED) && (t == current)) 339 ret = force_sig_mceerr(BUS_MCEERR_AR, 340 (void __user *)tk->addr, addr_lsb); 341 else 342 /* 343 * Signal other processes sharing the page if they have 344 * PF_MCE_EARLY set. 345 * Don't use force here, it's convenient if the signal 346 * can be temporarily blocked. 347 * This could cause a loop when the user sets SIGBUS 348 * to SIG_IGN, but hopefully no one will do that? 349 */ 350 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, 351 addr_lsb, t); 352 if (ret < 0) 353 pr_info("Error sending signal to %s:%d: %d\n", 354 t->comm, t->pid, ret); 355 return ret; 356 } 357 358 /* 359 * Unknown page type encountered. Try to check whether it can turn PageLRU by 360 * lru_add_drain_all. 361 */ 362 void shake_page(struct page *p) 363 { 364 if (PageHuge(p)) 365 return; 366 367 if (!PageSlab(p)) { 368 lru_add_drain_all(); 369 if (PageLRU(p) || is_free_buddy_page(p)) 370 return; 371 } 372 373 /* 374 * TODO: Could shrink slab caches here if a lightweight range-based 375 * shrinker will be available. 376 */ 377 } 378 EXPORT_SYMBOL_GPL(shake_page); 379 380 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, 381 unsigned long address) 382 { 383 unsigned long ret = 0; 384 pgd_t *pgd; 385 p4d_t *p4d; 386 pud_t *pud; 387 pmd_t *pmd; 388 pte_t *pte; 389 pte_t ptent; 390 391 VM_BUG_ON_VMA(address == -EFAULT, vma); 392 pgd = pgd_offset(vma->vm_mm, address); 393 if (!pgd_present(*pgd)) 394 return 0; 395 p4d = p4d_offset(pgd, address); 396 if (!p4d_present(*p4d)) 397 return 0; 398 pud = pud_offset(p4d, address); 399 if (!pud_present(*pud)) 400 return 0; 401 if (pud_devmap(*pud)) 402 return PUD_SHIFT; 403 pmd = pmd_offset(pud, address); 404 if (!pmd_present(*pmd)) 405 return 0; 406 if (pmd_devmap(*pmd)) 407 return PMD_SHIFT; 408 pte = pte_offset_map(pmd, address); 409 if (!pte) 410 return 0; 411 ptent = ptep_get(pte); 412 if (pte_present(ptent) && pte_devmap(ptent)) 413 ret = PAGE_SHIFT; 414 pte_unmap(pte); 415 return ret; 416 } 417 418 /* 419 * Failure handling: if we can't find or can't kill a process there's 420 * not much we can do. We just print a message and ignore otherwise. 421 */ 422 423 #define FSDAX_INVALID_PGOFF ULONG_MAX 424 425 /* 426 * Schedule a process for later kill. 427 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 428 * 429 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a 430 * filesystem with a memory failure handler has claimed the 431 * memory_failure event. In all other cases, page->index and 432 * page->mapping are sufficient for mapping the page back to its 433 * corresponding user virtual address. 434 */ 435 static void __add_to_kill(struct task_struct *tsk, struct page *p, 436 struct vm_area_struct *vma, struct list_head *to_kill, 437 unsigned long ksm_addr, pgoff_t fsdax_pgoff) 438 { 439 struct to_kill *tk; 440 441 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 442 if (!tk) { 443 pr_err("Out of memory while machine check handling\n"); 444 return; 445 } 446 447 tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma); 448 if (is_zone_device_page(p)) { 449 if (fsdax_pgoff != FSDAX_INVALID_PGOFF) 450 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); 451 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); 452 } else 453 tk->size_shift = page_shift(compound_head(p)); 454 455 /* 456 * Send SIGKILL if "tk->addr == -EFAULT". Also, as 457 * "tk->size_shift" is always non-zero for !is_zone_device_page(), 458 * so "tk->size_shift == 0" effectively checks no mapping on 459 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times 460 * to a process' address space, it's possible not all N VMAs 461 * contain mappings for the page, but at least one VMA does. 462 * Only deliver SIGBUS with payload derived from the VMA that 463 * has a mapping for the page. 464 */ 465 if (tk->addr == -EFAULT) { 466 pr_info("Unable to find user space address %lx in %s\n", 467 page_to_pfn(p), tsk->comm); 468 } else if (tk->size_shift == 0) { 469 kfree(tk); 470 return; 471 } 472 473 get_task_struct(tsk); 474 tk->tsk = tsk; 475 list_add_tail(&tk->nd, to_kill); 476 } 477 478 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, 479 struct vm_area_struct *vma, 480 struct list_head *to_kill) 481 { 482 __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF); 483 } 484 485 #ifdef CONFIG_KSM 486 static bool task_in_to_kill_list(struct list_head *to_kill, 487 struct task_struct *tsk) 488 { 489 struct to_kill *tk, *next; 490 491 list_for_each_entry_safe(tk, next, to_kill, nd) { 492 if (tk->tsk == tsk) 493 return true; 494 } 495 496 return false; 497 } 498 void add_to_kill_ksm(struct task_struct *tsk, struct page *p, 499 struct vm_area_struct *vma, struct list_head *to_kill, 500 unsigned long ksm_addr) 501 { 502 if (!task_in_to_kill_list(to_kill, tsk)) 503 __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF); 504 } 505 #endif 506 /* 507 * Kill the processes that have been collected earlier. 508 * 509 * Only do anything when FORCEKILL is set, otherwise just free the 510 * list (this is used for clean pages which do not need killing) 511 * Also when FAIL is set do a force kill because something went 512 * wrong earlier. 513 */ 514 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, 515 unsigned long pfn, int flags) 516 { 517 struct to_kill *tk, *next; 518 519 list_for_each_entry_safe(tk, next, to_kill, nd) { 520 if (forcekill) { 521 /* 522 * In case something went wrong with munmapping 523 * make sure the process doesn't catch the 524 * signal and then access the memory. Just kill it. 525 */ 526 if (fail || tk->addr == -EFAULT) { 527 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 528 pfn, tk->tsk->comm, tk->tsk->pid); 529 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, 530 tk->tsk, PIDTYPE_PID); 531 } 532 533 /* 534 * In theory the process could have mapped 535 * something else on the address in-between. We could 536 * check for that, but we need to tell the 537 * process anyways. 538 */ 539 else if (kill_proc(tk, pfn, flags) < 0) 540 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n", 541 pfn, tk->tsk->comm, tk->tsk->pid); 542 } 543 list_del(&tk->nd); 544 put_task_struct(tk->tsk); 545 kfree(tk); 546 } 547 } 548 549 /* 550 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO) 551 * on behalf of the thread group. Return task_struct of the (first found) 552 * dedicated thread if found, and return NULL otherwise. 553 * 554 * We already hold read_lock(&tasklist_lock) in the caller, so we don't 555 * have to call rcu_read_lock/unlock() in this function. 556 */ 557 static struct task_struct *find_early_kill_thread(struct task_struct *tsk) 558 { 559 struct task_struct *t; 560 561 for_each_thread(tsk, t) { 562 if (t->flags & PF_MCE_PROCESS) { 563 if (t->flags & PF_MCE_EARLY) 564 return t; 565 } else { 566 if (sysctl_memory_failure_early_kill) 567 return t; 568 } 569 } 570 return NULL; 571 } 572 573 /* 574 * Determine whether a given process is "early kill" process which expects 575 * to be signaled when some page under the process is hwpoisoned. 576 * Return task_struct of the dedicated thread (main thread unless explicitly 577 * specified) if the process is "early kill" and otherwise returns NULL. 578 * 579 * Note that the above is true for Action Optional case. For Action Required 580 * case, it's only meaningful to the current thread which need to be signaled 581 * with SIGBUS, this error is Action Optional for other non current 582 * processes sharing the same error page,if the process is "early kill", the 583 * task_struct of the dedicated thread will also be returned. 584 */ 585 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) 586 { 587 if (!tsk->mm) 588 return NULL; 589 /* 590 * Comparing ->mm here because current task might represent 591 * a subthread, while tsk always points to the main thread. 592 */ 593 if (force_early && tsk->mm == current->mm) 594 return current; 595 596 return find_early_kill_thread(tsk); 597 } 598 599 /* 600 * Collect processes when the error hit an anonymous page. 601 */ 602 static void collect_procs_anon(struct page *page, struct list_head *to_kill, 603 int force_early) 604 { 605 struct folio *folio = page_folio(page); 606 struct vm_area_struct *vma; 607 struct task_struct *tsk; 608 struct anon_vma *av; 609 pgoff_t pgoff; 610 611 av = folio_lock_anon_vma_read(folio, NULL); 612 if (av == NULL) /* Not actually mapped anymore */ 613 return; 614 615 pgoff = page_to_pgoff(page); 616 read_lock(&tasklist_lock); 617 for_each_process (tsk) { 618 struct anon_vma_chain *vmac; 619 struct task_struct *t = task_early_kill(tsk, force_early); 620 621 if (!t) 622 continue; 623 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 624 pgoff, pgoff) { 625 vma = vmac->vma; 626 if (vma->vm_mm != t->mm) 627 continue; 628 if (!page_mapped_in_vma(page, vma)) 629 continue; 630 add_to_kill_anon_file(t, page, vma, to_kill); 631 } 632 } 633 read_unlock(&tasklist_lock); 634 anon_vma_unlock_read(av); 635 } 636 637 /* 638 * Collect processes when the error hit a file mapped page. 639 */ 640 static void collect_procs_file(struct page *page, struct list_head *to_kill, 641 int force_early) 642 { 643 struct vm_area_struct *vma; 644 struct task_struct *tsk; 645 struct address_space *mapping = page->mapping; 646 pgoff_t pgoff; 647 648 i_mmap_lock_read(mapping); 649 read_lock(&tasklist_lock); 650 pgoff = page_to_pgoff(page); 651 for_each_process(tsk) { 652 struct task_struct *t = task_early_kill(tsk, force_early); 653 654 if (!t) 655 continue; 656 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, 657 pgoff) { 658 /* 659 * Send early kill signal to tasks where a vma covers 660 * the page but the corrupted page is not necessarily 661 * mapped it in its pte. 662 * Assume applications who requested early kill want 663 * to be informed of all such data corruptions. 664 */ 665 if (vma->vm_mm == t->mm) 666 add_to_kill_anon_file(t, page, vma, to_kill); 667 } 668 } 669 read_unlock(&tasklist_lock); 670 i_mmap_unlock_read(mapping); 671 } 672 673 #ifdef CONFIG_FS_DAX 674 static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p, 675 struct vm_area_struct *vma, 676 struct list_head *to_kill, pgoff_t pgoff) 677 { 678 __add_to_kill(tsk, p, vma, to_kill, 0, pgoff); 679 } 680 681 /* 682 * Collect processes when the error hit a fsdax page. 683 */ 684 static void collect_procs_fsdax(struct page *page, 685 struct address_space *mapping, pgoff_t pgoff, 686 struct list_head *to_kill) 687 { 688 struct vm_area_struct *vma; 689 struct task_struct *tsk; 690 691 i_mmap_lock_read(mapping); 692 read_lock(&tasklist_lock); 693 for_each_process(tsk) { 694 struct task_struct *t = task_early_kill(tsk, true); 695 696 if (!t) 697 continue; 698 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 699 if (vma->vm_mm == t->mm) 700 add_to_kill_fsdax(t, page, vma, to_kill, pgoff); 701 } 702 } 703 read_unlock(&tasklist_lock); 704 i_mmap_unlock_read(mapping); 705 } 706 #endif /* CONFIG_FS_DAX */ 707 708 /* 709 * Collect the processes who have the corrupted page mapped to kill. 710 */ 711 static void collect_procs(struct page *page, struct list_head *tokill, 712 int force_early) 713 { 714 if (!page->mapping) 715 return; 716 if (unlikely(PageKsm(page))) 717 collect_procs_ksm(page, tokill, force_early); 718 else if (PageAnon(page)) 719 collect_procs_anon(page, tokill, force_early); 720 else 721 collect_procs_file(page, tokill, force_early); 722 } 723 724 struct hwp_walk { 725 struct to_kill tk; 726 unsigned long pfn; 727 int flags; 728 }; 729 730 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift) 731 { 732 tk->addr = addr; 733 tk->size_shift = shift; 734 } 735 736 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, 737 unsigned long poisoned_pfn, struct to_kill *tk) 738 { 739 unsigned long pfn = 0; 740 741 if (pte_present(pte)) { 742 pfn = pte_pfn(pte); 743 } else { 744 swp_entry_t swp = pte_to_swp_entry(pte); 745 746 if (is_hwpoison_entry(swp)) 747 pfn = swp_offset_pfn(swp); 748 } 749 750 if (!pfn || pfn != poisoned_pfn) 751 return 0; 752 753 set_to_kill(tk, addr, shift); 754 return 1; 755 } 756 757 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 758 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 759 struct hwp_walk *hwp) 760 { 761 pmd_t pmd = *pmdp; 762 unsigned long pfn; 763 unsigned long hwpoison_vaddr; 764 765 if (!pmd_present(pmd)) 766 return 0; 767 pfn = pmd_pfn(pmd); 768 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { 769 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT); 770 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT); 771 return 1; 772 } 773 return 0; 774 } 775 #else 776 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 777 struct hwp_walk *hwp) 778 { 779 return 0; 780 } 781 #endif 782 783 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, 784 unsigned long end, struct mm_walk *walk) 785 { 786 struct hwp_walk *hwp = walk->private; 787 int ret = 0; 788 pte_t *ptep, *mapped_pte; 789 spinlock_t *ptl; 790 791 ptl = pmd_trans_huge_lock(pmdp, walk->vma); 792 if (ptl) { 793 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp); 794 spin_unlock(ptl); 795 goto out; 796 } 797 798 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, 799 addr, &ptl); 800 if (!ptep) 801 goto out; 802 803 for (; addr != end; ptep++, addr += PAGE_SIZE) { 804 ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT, 805 hwp->pfn, &hwp->tk); 806 if (ret == 1) 807 break; 808 } 809 pte_unmap_unlock(mapped_pte, ptl); 810 out: 811 cond_resched(); 812 return ret; 813 } 814 815 #ifdef CONFIG_HUGETLB_PAGE 816 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, 817 unsigned long addr, unsigned long end, 818 struct mm_walk *walk) 819 { 820 struct hwp_walk *hwp = walk->private; 821 pte_t pte = huge_ptep_get(ptep); 822 struct hstate *h = hstate_vma(walk->vma); 823 824 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), 825 hwp->pfn, &hwp->tk); 826 } 827 #else 828 #define hwpoison_hugetlb_range NULL 829 #endif 830 831 static const struct mm_walk_ops hwp_walk_ops = { 832 .pmd_entry = hwpoison_pte_range, 833 .hugetlb_entry = hwpoison_hugetlb_range, 834 .walk_lock = PGWALK_RDLOCK, 835 }; 836 837 /* 838 * Sends SIGBUS to the current process with error info. 839 * 840 * This function is intended to handle "Action Required" MCEs on already 841 * hardware poisoned pages. They could happen, for example, when 842 * memory_failure() failed to unmap the error page at the first call, or 843 * when multiple local machine checks happened on different CPUs. 844 * 845 * MCE handler currently has no easy access to the error virtual address, 846 * so this function walks page table to find it. The returned virtual address 847 * is proper in most cases, but it could be wrong when the application 848 * process has multiple entries mapping the error page. 849 */ 850 static int kill_accessing_process(struct task_struct *p, unsigned long pfn, 851 int flags) 852 { 853 int ret; 854 struct hwp_walk priv = { 855 .pfn = pfn, 856 }; 857 priv.tk.tsk = p; 858 859 if (!p->mm) 860 return -EFAULT; 861 862 mmap_read_lock(p->mm); 863 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops, 864 (void *)&priv); 865 if (ret == 1 && priv.tk.addr) 866 kill_proc(&priv.tk, pfn, flags); 867 else 868 ret = 0; 869 mmap_read_unlock(p->mm); 870 return ret > 0 ? -EHWPOISON : -EFAULT; 871 } 872 873 static const char *action_name[] = { 874 [MF_IGNORED] = "Ignored", 875 [MF_FAILED] = "Failed", 876 [MF_DELAYED] = "Delayed", 877 [MF_RECOVERED] = "Recovered", 878 }; 879 880 static const char * const action_page_types[] = { 881 [MF_MSG_KERNEL] = "reserved kernel page", 882 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page", 883 [MF_MSG_SLAB] = "kernel slab page", 884 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", 885 [MF_MSG_HUGE] = "huge page", 886 [MF_MSG_FREE_HUGE] = "free huge page", 887 [MF_MSG_UNMAP_FAILED] = "unmapping failed page", 888 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", 889 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", 890 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page", 891 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page", 892 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page", 893 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page", 894 [MF_MSG_DIRTY_LRU] = "dirty LRU page", 895 [MF_MSG_CLEAN_LRU] = "clean LRU page", 896 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page", 897 [MF_MSG_BUDDY] = "free buddy page", 898 [MF_MSG_DAX] = "dax page", 899 [MF_MSG_UNSPLIT_THP] = "unsplit thp", 900 [MF_MSG_UNKNOWN] = "unknown page", 901 }; 902 903 /* 904 * XXX: It is possible that a page is isolated from LRU cache, 905 * and then kept in swap cache or failed to remove from page cache. 906 * The page count will stop it from being freed by unpoison. 907 * Stress tests should be aware of this memory leak problem. 908 */ 909 static int delete_from_lru_cache(struct page *p) 910 { 911 if (isolate_lru_page(p)) { 912 /* 913 * Clear sensible page flags, so that the buddy system won't 914 * complain when the page is unpoison-and-freed. 915 */ 916 ClearPageActive(p); 917 ClearPageUnevictable(p); 918 919 /* 920 * Poisoned page might never drop its ref count to 0 so we have 921 * to uncharge it manually from its memcg. 922 */ 923 mem_cgroup_uncharge(page_folio(p)); 924 925 /* 926 * drop the page count elevated by isolate_lru_page() 927 */ 928 put_page(p); 929 return 0; 930 } 931 return -EIO; 932 } 933 934 static int truncate_error_page(struct page *p, unsigned long pfn, 935 struct address_space *mapping) 936 { 937 int ret = MF_FAILED; 938 939 if (mapping->a_ops->error_remove_page) { 940 struct folio *folio = page_folio(p); 941 int err = mapping->a_ops->error_remove_page(mapping, p); 942 943 if (err != 0) { 944 pr_info("%#lx: Failed to punch page: %d\n", pfn, err); 945 } else if (folio_has_private(folio) && 946 !filemap_release_folio(folio, GFP_NOIO)) { 947 pr_info("%#lx: failed to release buffers\n", pfn); 948 } else { 949 ret = MF_RECOVERED; 950 } 951 } else { 952 /* 953 * If the file system doesn't support it just invalidate 954 * This fails on dirty or anything with private pages 955 */ 956 if (invalidate_inode_page(p)) 957 ret = MF_RECOVERED; 958 else 959 pr_info("%#lx: Failed to invalidate\n", pfn); 960 } 961 962 return ret; 963 } 964 965 struct page_state { 966 unsigned long mask; 967 unsigned long res; 968 enum mf_action_page_type type; 969 970 /* Callback ->action() has to unlock the relevant page inside it. */ 971 int (*action)(struct page_state *ps, struct page *p); 972 }; 973 974 /* 975 * Return true if page is still referenced by others, otherwise return 976 * false. 977 * 978 * The extra_pins is true when one extra refcount is expected. 979 */ 980 static bool has_extra_refcount(struct page_state *ps, struct page *p, 981 bool extra_pins) 982 { 983 int count = page_count(p) - 1; 984 985 if (extra_pins) 986 count -= 1; 987 988 if (count > 0) { 989 pr_err("%#lx: %s still referenced by %d users\n", 990 page_to_pfn(p), action_page_types[ps->type], count); 991 return true; 992 } 993 994 return false; 995 } 996 997 /* 998 * Error hit kernel page. 999 * Do nothing, try to be lucky and not touch this instead. For a few cases we 1000 * could be more sophisticated. 1001 */ 1002 static int me_kernel(struct page_state *ps, struct page *p) 1003 { 1004 unlock_page(p); 1005 return MF_IGNORED; 1006 } 1007 1008 /* 1009 * Page in unknown state. Do nothing. 1010 */ 1011 static int me_unknown(struct page_state *ps, struct page *p) 1012 { 1013 pr_err("%#lx: Unknown page state\n", page_to_pfn(p)); 1014 unlock_page(p); 1015 return MF_FAILED; 1016 } 1017 1018 /* 1019 * Clean (or cleaned) page cache page. 1020 */ 1021 static int me_pagecache_clean(struct page_state *ps, struct page *p) 1022 { 1023 int ret; 1024 struct address_space *mapping; 1025 bool extra_pins; 1026 1027 delete_from_lru_cache(p); 1028 1029 /* 1030 * For anonymous pages we're done the only reference left 1031 * should be the one m_f() holds. 1032 */ 1033 if (PageAnon(p)) { 1034 ret = MF_RECOVERED; 1035 goto out; 1036 } 1037 1038 /* 1039 * Now truncate the page in the page cache. This is really 1040 * more like a "temporary hole punch" 1041 * Don't do this for block devices when someone else 1042 * has a reference, because it could be file system metadata 1043 * and that's not safe to truncate. 1044 */ 1045 mapping = page_mapping(p); 1046 if (!mapping) { 1047 /* 1048 * Page has been teared down in the meanwhile 1049 */ 1050 ret = MF_FAILED; 1051 goto out; 1052 } 1053 1054 /* 1055 * The shmem page is kept in page cache instead of truncating 1056 * so is expected to have an extra refcount after error-handling. 1057 */ 1058 extra_pins = shmem_mapping(mapping); 1059 1060 /* 1061 * Truncation is a bit tricky. Enable it per file system for now. 1062 * 1063 * Open: to take i_rwsem or not for this? Right now we don't. 1064 */ 1065 ret = truncate_error_page(p, page_to_pfn(p), mapping); 1066 if (has_extra_refcount(ps, p, extra_pins)) 1067 ret = MF_FAILED; 1068 1069 out: 1070 unlock_page(p); 1071 1072 return ret; 1073 } 1074 1075 /* 1076 * Dirty pagecache page 1077 * Issues: when the error hit a hole page the error is not properly 1078 * propagated. 1079 */ 1080 static int me_pagecache_dirty(struct page_state *ps, struct page *p) 1081 { 1082 struct address_space *mapping = page_mapping(p); 1083 1084 SetPageError(p); 1085 /* TBD: print more information about the file. */ 1086 if (mapping) { 1087 /* 1088 * IO error will be reported by write(), fsync(), etc. 1089 * who check the mapping. 1090 * This way the application knows that something went 1091 * wrong with its dirty file data. 1092 * 1093 * There's one open issue: 1094 * 1095 * The EIO will be only reported on the next IO 1096 * operation and then cleared through the IO map. 1097 * Normally Linux has two mechanisms to pass IO error 1098 * first through the AS_EIO flag in the address space 1099 * and then through the PageError flag in the page. 1100 * Since we drop pages on memory failure handling the 1101 * only mechanism open to use is through AS_AIO. 1102 * 1103 * This has the disadvantage that it gets cleared on 1104 * the first operation that returns an error, while 1105 * the PageError bit is more sticky and only cleared 1106 * when the page is reread or dropped. If an 1107 * application assumes it will always get error on 1108 * fsync, but does other operations on the fd before 1109 * and the page is dropped between then the error 1110 * will not be properly reported. 1111 * 1112 * This can already happen even without hwpoisoned 1113 * pages: first on metadata IO errors (which only 1114 * report through AS_EIO) or when the page is dropped 1115 * at the wrong time. 1116 * 1117 * So right now we assume that the application DTRT on 1118 * the first EIO, but we're not worse than other parts 1119 * of the kernel. 1120 */ 1121 mapping_set_error(mapping, -EIO); 1122 } 1123 1124 return me_pagecache_clean(ps, p); 1125 } 1126 1127 /* 1128 * Clean and dirty swap cache. 1129 * 1130 * Dirty swap cache page is tricky to handle. The page could live both in page 1131 * cache and swap cache(ie. page is freshly swapped in). So it could be 1132 * referenced concurrently by 2 types of PTEs: 1133 * normal PTEs and swap PTEs. We try to handle them consistently by calling 1134 * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs, 1135 * and then 1136 * - clear dirty bit to prevent IO 1137 * - remove from LRU 1138 * - but keep in the swap cache, so that when we return to it on 1139 * a later page fault, we know the application is accessing 1140 * corrupted data and shall be killed (we installed simple 1141 * interception code in do_swap_page to catch it). 1142 * 1143 * Clean swap cache pages can be directly isolated. A later page fault will 1144 * bring in the known good data from disk. 1145 */ 1146 static int me_swapcache_dirty(struct page_state *ps, struct page *p) 1147 { 1148 int ret; 1149 bool extra_pins = false; 1150 1151 ClearPageDirty(p); 1152 /* Trigger EIO in shmem: */ 1153 ClearPageUptodate(p); 1154 1155 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; 1156 unlock_page(p); 1157 1158 if (ret == MF_DELAYED) 1159 extra_pins = true; 1160 1161 if (has_extra_refcount(ps, p, extra_pins)) 1162 ret = MF_FAILED; 1163 1164 return ret; 1165 } 1166 1167 static int me_swapcache_clean(struct page_state *ps, struct page *p) 1168 { 1169 struct folio *folio = page_folio(p); 1170 int ret; 1171 1172 delete_from_swap_cache(folio); 1173 1174 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; 1175 folio_unlock(folio); 1176 1177 if (has_extra_refcount(ps, p, false)) 1178 ret = MF_FAILED; 1179 1180 return ret; 1181 } 1182 1183 /* 1184 * Huge pages. Needs work. 1185 * Issues: 1186 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 1187 * To narrow down kill region to one page, we need to break up pmd. 1188 */ 1189 static int me_huge_page(struct page_state *ps, struct page *p) 1190 { 1191 int res; 1192 struct page *hpage = compound_head(p); 1193 struct address_space *mapping; 1194 bool extra_pins = false; 1195 1196 if (!PageHuge(hpage)) 1197 return MF_DELAYED; 1198 1199 mapping = page_mapping(hpage); 1200 if (mapping) { 1201 res = truncate_error_page(hpage, page_to_pfn(p), mapping); 1202 /* The page is kept in page cache. */ 1203 extra_pins = true; 1204 unlock_page(hpage); 1205 } else { 1206 unlock_page(hpage); 1207 /* 1208 * migration entry prevents later access on error hugepage, 1209 * so we can free and dissolve it into buddy to save healthy 1210 * subpages. 1211 */ 1212 put_page(hpage); 1213 if (__page_handle_poison(p) >= 0) { 1214 page_ref_inc(p); 1215 res = MF_RECOVERED; 1216 } else { 1217 res = MF_FAILED; 1218 } 1219 } 1220 1221 if (has_extra_refcount(ps, p, extra_pins)) 1222 res = MF_FAILED; 1223 1224 return res; 1225 } 1226 1227 /* 1228 * Various page states we can handle. 1229 * 1230 * A page state is defined by its current page->flags bits. 1231 * The table matches them in order and calls the right handler. 1232 * 1233 * This is quite tricky because we can access page at any time 1234 * in its live cycle, so all accesses have to be extremely careful. 1235 * 1236 * This is not complete. More states could be added. 1237 * For any missing state don't attempt recovery. 1238 */ 1239 1240 #define dirty (1UL << PG_dirty) 1241 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked)) 1242 #define unevict (1UL << PG_unevictable) 1243 #define mlock (1UL << PG_mlocked) 1244 #define lru (1UL << PG_lru) 1245 #define head (1UL << PG_head) 1246 #define slab (1UL << PG_slab) 1247 #define reserved (1UL << PG_reserved) 1248 1249 static struct page_state error_states[] = { 1250 { reserved, reserved, MF_MSG_KERNEL, me_kernel }, 1251 /* 1252 * free pages are specially detected outside this table: 1253 * PG_buddy pages only make a small fraction of all free pages. 1254 */ 1255 1256 /* 1257 * Could in theory check if slab page is free or if we can drop 1258 * currently unused objects without touching them. But just 1259 * treat it as standard kernel for now. 1260 */ 1261 { slab, slab, MF_MSG_SLAB, me_kernel }, 1262 1263 { head, head, MF_MSG_HUGE, me_huge_page }, 1264 1265 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, 1266 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, 1267 1268 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty }, 1269 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean }, 1270 1271 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty }, 1272 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean }, 1273 1274 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 1275 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 1276 1277 /* 1278 * Catchall entry: must be at end. 1279 */ 1280 { 0, 0, MF_MSG_UNKNOWN, me_unknown }, 1281 }; 1282 1283 #undef dirty 1284 #undef sc 1285 #undef unevict 1286 #undef mlock 1287 #undef lru 1288 #undef head 1289 #undef slab 1290 #undef reserved 1291 1292 static void update_per_node_mf_stats(unsigned long pfn, 1293 enum mf_result result) 1294 { 1295 int nid = MAX_NUMNODES; 1296 struct memory_failure_stats *mf_stats = NULL; 1297 1298 nid = pfn_to_nid(pfn); 1299 if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) { 1300 WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid); 1301 return; 1302 } 1303 1304 mf_stats = &NODE_DATA(nid)->mf_stats; 1305 switch (result) { 1306 case MF_IGNORED: 1307 ++mf_stats->ignored; 1308 break; 1309 case MF_FAILED: 1310 ++mf_stats->failed; 1311 break; 1312 case MF_DELAYED: 1313 ++mf_stats->delayed; 1314 break; 1315 case MF_RECOVERED: 1316 ++mf_stats->recovered; 1317 break; 1318 default: 1319 WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result); 1320 break; 1321 } 1322 ++mf_stats->total; 1323 } 1324 1325 /* 1326 * "Dirty/Clean" indication is not 100% accurate due to the possibility of 1327 * setting PG_dirty outside page lock. See also comment above set_page_dirty(). 1328 */ 1329 static int action_result(unsigned long pfn, enum mf_action_page_type type, 1330 enum mf_result result) 1331 { 1332 trace_memory_failure_event(pfn, type, result); 1333 1334 num_poisoned_pages_inc(pfn); 1335 1336 update_per_node_mf_stats(pfn, result); 1337 1338 pr_err("%#lx: recovery action for %s: %s\n", 1339 pfn, action_page_types[type], action_name[result]); 1340 1341 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY; 1342 } 1343 1344 static int page_action(struct page_state *ps, struct page *p, 1345 unsigned long pfn) 1346 { 1347 int result; 1348 1349 /* page p should be unlocked after returning from ps->action(). */ 1350 result = ps->action(ps, p); 1351 1352 /* Could do more checks here if page looks ok */ 1353 /* 1354 * Could adjust zone counters here to correct for the missing page. 1355 */ 1356 1357 return action_result(pfn, ps->type, result); 1358 } 1359 1360 static inline bool PageHWPoisonTakenOff(struct page *page) 1361 { 1362 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON; 1363 } 1364 1365 void SetPageHWPoisonTakenOff(struct page *page) 1366 { 1367 set_page_private(page, MAGIC_HWPOISON); 1368 } 1369 1370 void ClearPageHWPoisonTakenOff(struct page *page) 1371 { 1372 if (PageHWPoison(page)) 1373 set_page_private(page, 0); 1374 } 1375 1376 /* 1377 * Return true if a page type of a given page is supported by hwpoison 1378 * mechanism (while handling could fail), otherwise false. This function 1379 * does not return true for hugetlb or device memory pages, so it's assumed 1380 * to be called only in the context where we never have such pages. 1381 */ 1382 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) 1383 { 1384 /* Soft offline could migrate non-LRU movable pages */ 1385 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) 1386 return true; 1387 1388 return PageLRU(page) || is_free_buddy_page(page); 1389 } 1390 1391 static int __get_hwpoison_page(struct page *page, unsigned long flags) 1392 { 1393 struct folio *folio = page_folio(page); 1394 int ret = 0; 1395 bool hugetlb = false; 1396 1397 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false); 1398 if (hugetlb) 1399 return ret; 1400 1401 /* 1402 * This check prevents from calling folio_try_get() for any 1403 * unsupported type of folio in order to reduce the risk of unexpected 1404 * races caused by taking a folio refcount. 1405 */ 1406 if (!HWPoisonHandlable(&folio->page, flags)) 1407 return -EBUSY; 1408 1409 if (folio_try_get(folio)) { 1410 if (folio == page_folio(page)) 1411 return 1; 1412 1413 pr_info("%#lx cannot catch tail\n", page_to_pfn(page)); 1414 folio_put(folio); 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int get_any_page(struct page *p, unsigned long flags) 1421 { 1422 int ret = 0, pass = 0; 1423 bool count_increased = false; 1424 1425 if (flags & MF_COUNT_INCREASED) 1426 count_increased = true; 1427 1428 try_again: 1429 if (!count_increased) { 1430 ret = __get_hwpoison_page(p, flags); 1431 if (!ret) { 1432 if (page_count(p)) { 1433 /* We raced with an allocation, retry. */ 1434 if (pass++ < 3) 1435 goto try_again; 1436 ret = -EBUSY; 1437 } else if (!PageHuge(p) && !is_free_buddy_page(p)) { 1438 /* We raced with put_page, retry. */ 1439 if (pass++ < 3) 1440 goto try_again; 1441 ret = -EIO; 1442 } 1443 goto out; 1444 } else if (ret == -EBUSY) { 1445 /* 1446 * We raced with (possibly temporary) unhandlable 1447 * page, retry. 1448 */ 1449 if (pass++ < 3) { 1450 shake_page(p); 1451 goto try_again; 1452 } 1453 ret = -EIO; 1454 goto out; 1455 } 1456 } 1457 1458 if (PageHuge(p) || HWPoisonHandlable(p, flags)) { 1459 ret = 1; 1460 } else { 1461 /* 1462 * A page we cannot handle. Check whether we can turn 1463 * it into something we can handle. 1464 */ 1465 if (pass++ < 3) { 1466 put_page(p); 1467 shake_page(p); 1468 count_increased = false; 1469 goto try_again; 1470 } 1471 put_page(p); 1472 ret = -EIO; 1473 } 1474 out: 1475 if (ret == -EIO) 1476 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p)); 1477 1478 return ret; 1479 } 1480 1481 static int __get_unpoison_page(struct page *page) 1482 { 1483 struct folio *folio = page_folio(page); 1484 int ret = 0; 1485 bool hugetlb = false; 1486 1487 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true); 1488 if (hugetlb) 1489 return ret; 1490 1491 /* 1492 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison, 1493 * but also isolated from buddy freelist, so need to identify the 1494 * state and have to cancel both operations to unpoison. 1495 */ 1496 if (PageHWPoisonTakenOff(page)) 1497 return -EHWPOISON; 1498 1499 return get_page_unless_zero(page) ? 1 : 0; 1500 } 1501 1502 /** 1503 * get_hwpoison_page() - Get refcount for memory error handling 1504 * @p: Raw error page (hit by memory error) 1505 * @flags: Flags controlling behavior of error handling 1506 * 1507 * get_hwpoison_page() takes a page refcount of an error page to handle memory 1508 * error on it, after checking that the error page is in a well-defined state 1509 * (defined as a page-type we can successfully handle the memory error on it, 1510 * such as LRU page and hugetlb page). 1511 * 1512 * Memory error handling could be triggered at any time on any type of page, 1513 * so it's prone to race with typical memory management lifecycle (like 1514 * allocation and free). So to avoid such races, get_hwpoison_page() takes 1515 * extra care for the error page's state (as done in __get_hwpoison_page()), 1516 * and has some retry logic in get_any_page(). 1517 * 1518 * When called from unpoison_memory(), the caller should already ensure that 1519 * the given page has PG_hwpoison. So it's never reused for other page 1520 * allocations, and __get_unpoison_page() never races with them. 1521 * 1522 * Return: 0 on failure, 1523 * 1 on success for in-use pages in a well-defined state, 1524 * -EIO for pages on which we can not handle memory errors, 1525 * -EBUSY when get_hwpoison_page() has raced with page lifecycle 1526 * operations like allocation and free, 1527 * -EHWPOISON when the page is hwpoisoned and taken off from buddy. 1528 */ 1529 static int get_hwpoison_page(struct page *p, unsigned long flags) 1530 { 1531 int ret; 1532 1533 zone_pcp_disable(page_zone(p)); 1534 if (flags & MF_UNPOISON) 1535 ret = __get_unpoison_page(p); 1536 else 1537 ret = get_any_page(p, flags); 1538 zone_pcp_enable(page_zone(p)); 1539 1540 return ret; 1541 } 1542 1543 /* 1544 * Do all that is necessary to remove user space mappings. Unmap 1545 * the pages and send SIGBUS to the processes if the data was dirty. 1546 */ 1547 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, 1548 int flags, struct page *hpage) 1549 { 1550 struct folio *folio = page_folio(hpage); 1551 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; 1552 struct address_space *mapping; 1553 LIST_HEAD(tokill); 1554 bool unmap_success; 1555 int forcekill; 1556 bool mlocked = PageMlocked(hpage); 1557 1558 /* 1559 * Here we are interested only in user-mapped pages, so skip any 1560 * other types of pages. 1561 */ 1562 if (PageReserved(p) || PageSlab(p) || PageTable(p)) 1563 return true; 1564 if (!(PageLRU(hpage) || PageHuge(p))) 1565 return true; 1566 1567 /* 1568 * This check implies we don't kill processes if their pages 1569 * are in the swap cache early. Those are always late kills. 1570 */ 1571 if (!page_mapped(hpage)) 1572 return true; 1573 1574 if (PageSwapCache(p)) { 1575 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); 1576 ttu &= ~TTU_HWPOISON; 1577 } 1578 1579 /* 1580 * Propagate the dirty bit from PTEs to struct page first, because we 1581 * need this to decide if we should kill or just drop the page. 1582 * XXX: the dirty test could be racy: set_page_dirty() may not always 1583 * be called inside page lock (it's recommended but not enforced). 1584 */ 1585 mapping = page_mapping(hpage); 1586 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && 1587 mapping_can_writeback(mapping)) { 1588 if (page_mkclean(hpage)) { 1589 SetPageDirty(hpage); 1590 } else { 1591 ttu &= ~TTU_HWPOISON; 1592 pr_info("%#lx: corrupted page was clean: dropped without side effects\n", 1593 pfn); 1594 } 1595 } 1596 1597 /* 1598 * First collect all the processes that have the page 1599 * mapped in dirty form. This has to be done before try_to_unmap, 1600 * because ttu takes the rmap data structures down. 1601 */ 1602 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1603 1604 if (PageHuge(hpage) && !PageAnon(hpage)) { 1605 /* 1606 * For hugetlb pages in shared mappings, try_to_unmap 1607 * could potentially call huge_pmd_unshare. Because of 1608 * this, take semaphore in write mode here and set 1609 * TTU_RMAP_LOCKED to indicate we have taken the lock 1610 * at this higher level. 1611 */ 1612 mapping = hugetlb_page_mapping_lock_write(hpage); 1613 if (mapping) { 1614 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); 1615 i_mmap_unlock_write(mapping); 1616 } else 1617 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); 1618 } else { 1619 try_to_unmap(folio, ttu); 1620 } 1621 1622 unmap_success = !page_mapped(hpage); 1623 if (!unmap_success) 1624 pr_err("%#lx: failed to unmap page (mapcount=%d)\n", 1625 pfn, page_mapcount(hpage)); 1626 1627 /* 1628 * try_to_unmap() might put mlocked page in lru cache, so call 1629 * shake_page() again to ensure that it's flushed. 1630 */ 1631 if (mlocked) 1632 shake_page(hpage); 1633 1634 /* 1635 * Now that the dirty bit has been propagated to the 1636 * struct page and all unmaps done we can decide if 1637 * killing is needed or not. Only kill when the page 1638 * was dirty or the process is not restartable, 1639 * otherwise the tokill list is merely 1640 * freed. When there was a problem unmapping earlier 1641 * use a more force-full uncatchable kill to prevent 1642 * any accesses to the poisoned memory. 1643 */ 1644 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) || 1645 !unmap_success; 1646 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); 1647 1648 return unmap_success; 1649 } 1650 1651 static int identify_page_state(unsigned long pfn, struct page *p, 1652 unsigned long page_flags) 1653 { 1654 struct page_state *ps; 1655 1656 /* 1657 * The first check uses the current page flags which may not have any 1658 * relevant information. The second check with the saved page flags is 1659 * carried out only if the first check can't determine the page status. 1660 */ 1661 for (ps = error_states;; ps++) 1662 if ((p->flags & ps->mask) == ps->res) 1663 break; 1664 1665 page_flags |= (p->flags & (1UL << PG_dirty)); 1666 1667 if (!ps->mask) 1668 for (ps = error_states;; ps++) 1669 if ((page_flags & ps->mask) == ps->res) 1670 break; 1671 return page_action(ps, p, pfn); 1672 } 1673 1674 static int try_to_split_thp_page(struct page *page) 1675 { 1676 int ret; 1677 1678 lock_page(page); 1679 ret = split_huge_page(page); 1680 unlock_page(page); 1681 1682 if (unlikely(ret)) 1683 put_page(page); 1684 1685 return ret; 1686 } 1687 1688 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, 1689 struct address_space *mapping, pgoff_t index, int flags) 1690 { 1691 struct to_kill *tk; 1692 unsigned long size = 0; 1693 1694 list_for_each_entry(tk, to_kill, nd) 1695 if (tk->size_shift) 1696 size = max(size, 1UL << tk->size_shift); 1697 1698 if (size) { 1699 /* 1700 * Unmap the largest mapping to avoid breaking up device-dax 1701 * mappings which are constant size. The actual size of the 1702 * mapping being torn down is communicated in siginfo, see 1703 * kill_proc() 1704 */ 1705 loff_t start = (index << PAGE_SHIFT) & ~(size - 1); 1706 1707 unmap_mapping_range(mapping, start, size, 0); 1708 } 1709 1710 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); 1711 } 1712 1713 static int mf_generic_kill_procs(unsigned long long pfn, int flags, 1714 struct dev_pagemap *pgmap) 1715 { 1716 struct page *page = pfn_to_page(pfn); 1717 LIST_HEAD(to_kill); 1718 dax_entry_t cookie; 1719 int rc = 0; 1720 1721 /* 1722 * Pages instantiated by device-dax (not filesystem-dax) 1723 * may be compound pages. 1724 */ 1725 page = compound_head(page); 1726 1727 /* 1728 * Prevent the inode from being freed while we are interrogating 1729 * the address_space, typically this would be handled by 1730 * lock_page(), but dax pages do not use the page lock. This 1731 * also prevents changes to the mapping of this pfn until 1732 * poison signaling is complete. 1733 */ 1734 cookie = dax_lock_page(page); 1735 if (!cookie) 1736 return -EBUSY; 1737 1738 if (hwpoison_filter(page)) { 1739 rc = -EOPNOTSUPP; 1740 goto unlock; 1741 } 1742 1743 switch (pgmap->type) { 1744 case MEMORY_DEVICE_PRIVATE: 1745 case MEMORY_DEVICE_COHERENT: 1746 /* 1747 * TODO: Handle device pages which may need coordination 1748 * with device-side memory. 1749 */ 1750 rc = -ENXIO; 1751 goto unlock; 1752 default: 1753 break; 1754 } 1755 1756 /* 1757 * Use this flag as an indication that the dax page has been 1758 * remapped UC to prevent speculative consumption of poison. 1759 */ 1760 SetPageHWPoison(page); 1761 1762 /* 1763 * Unlike System-RAM there is no possibility to swap in a 1764 * different physical page at a given virtual address, so all 1765 * userspace consumption of ZONE_DEVICE memory necessitates 1766 * SIGBUS (i.e. MF_MUST_KILL) 1767 */ 1768 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1769 collect_procs(page, &to_kill, true); 1770 1771 unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); 1772 unlock: 1773 dax_unlock_page(page, cookie); 1774 return rc; 1775 } 1776 1777 #ifdef CONFIG_FS_DAX 1778 /** 1779 * mf_dax_kill_procs - Collect and kill processes who are using this file range 1780 * @mapping: address_space of the file in use 1781 * @index: start pgoff of the range within the file 1782 * @count: length of the range, in unit of PAGE_SIZE 1783 * @mf_flags: memory failure flags 1784 */ 1785 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, 1786 unsigned long count, int mf_flags) 1787 { 1788 LIST_HEAD(to_kill); 1789 dax_entry_t cookie; 1790 struct page *page; 1791 size_t end = index + count; 1792 1793 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1794 1795 for (; index < end; index++) { 1796 page = NULL; 1797 cookie = dax_lock_mapping_entry(mapping, index, &page); 1798 if (!cookie) 1799 return -EBUSY; 1800 if (!page) 1801 goto unlock; 1802 1803 SetPageHWPoison(page); 1804 1805 collect_procs_fsdax(page, mapping, index, &to_kill); 1806 unmap_and_kill(&to_kill, page_to_pfn(page), mapping, 1807 index, mf_flags); 1808 unlock: 1809 dax_unlock_mapping_entry(mapping, index, cookie); 1810 } 1811 return 0; 1812 } 1813 EXPORT_SYMBOL_GPL(mf_dax_kill_procs); 1814 #endif /* CONFIG_FS_DAX */ 1815 1816 #ifdef CONFIG_HUGETLB_PAGE 1817 /* 1818 * Struct raw_hwp_page represents information about "raw error page", 1819 * constructing singly linked list from ->_hugetlb_hwpoison field of folio. 1820 */ 1821 struct raw_hwp_page { 1822 struct llist_node node; 1823 struct page *page; 1824 }; 1825 1826 static inline struct llist_head *raw_hwp_list_head(struct folio *folio) 1827 { 1828 return (struct llist_head *)&folio->_hugetlb_hwpoison; 1829 } 1830 1831 static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) 1832 { 1833 struct llist_head *head; 1834 struct llist_node *t, *tnode; 1835 unsigned long count = 0; 1836 1837 head = raw_hwp_list_head(folio); 1838 llist_for_each_safe(tnode, t, head->first) { 1839 struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); 1840 1841 if (move_flag) 1842 SetPageHWPoison(p->page); 1843 else 1844 num_poisoned_pages_sub(page_to_pfn(p->page), 1); 1845 kfree(p); 1846 count++; 1847 } 1848 llist_del_all(head); 1849 return count; 1850 } 1851 1852 static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) 1853 { 1854 struct llist_head *head; 1855 struct raw_hwp_page *raw_hwp; 1856 struct llist_node *t, *tnode; 1857 int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0; 1858 1859 /* 1860 * Once the hwpoison hugepage has lost reliable raw error info, 1861 * there is little meaning to keep additional error info precisely, 1862 * so skip to add additional raw error info. 1863 */ 1864 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1865 return -EHWPOISON; 1866 head = raw_hwp_list_head(folio); 1867 llist_for_each_safe(tnode, t, head->first) { 1868 struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); 1869 1870 if (p->page == page) 1871 return -EHWPOISON; 1872 } 1873 1874 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); 1875 if (raw_hwp) { 1876 raw_hwp->page = page; 1877 llist_add(&raw_hwp->node, head); 1878 /* the first error event will be counted in action_result(). */ 1879 if (ret) 1880 num_poisoned_pages_inc(page_to_pfn(page)); 1881 } else { 1882 /* 1883 * Failed to save raw error info. We no longer trace all 1884 * hwpoisoned subpages, and we need refuse to free/dissolve 1885 * this hwpoisoned hugepage. 1886 */ 1887 folio_set_hugetlb_raw_hwp_unreliable(folio); 1888 /* 1889 * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not 1890 * used any more, so free it. 1891 */ 1892 __folio_free_raw_hwp(folio, false); 1893 } 1894 return ret; 1895 } 1896 1897 static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag) 1898 { 1899 /* 1900 * hugetlb_vmemmap_optimized hugepages can't be freed because struct 1901 * pages for tail pages are required but they don't exist. 1902 */ 1903 if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio)) 1904 return 0; 1905 1906 /* 1907 * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by 1908 * definition. 1909 */ 1910 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1911 return 0; 1912 1913 return __folio_free_raw_hwp(folio, move_flag); 1914 } 1915 1916 void folio_clear_hugetlb_hwpoison(struct folio *folio) 1917 { 1918 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1919 return; 1920 folio_clear_hwpoison(folio); 1921 folio_free_raw_hwp(folio, true); 1922 } 1923 1924 /* 1925 * Called from hugetlb code with hugetlb_lock held. 1926 * 1927 * Return values: 1928 * 0 - free hugepage 1929 * 1 - in-use hugepage 1930 * 2 - not a hugepage 1931 * -EBUSY - the hugepage is busy (try to retry) 1932 * -EHWPOISON - the hugepage is already hwpoisoned 1933 */ 1934 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, 1935 bool *migratable_cleared) 1936 { 1937 struct page *page = pfn_to_page(pfn); 1938 struct folio *folio = page_folio(page); 1939 int ret = 2; /* fallback to normal page handling */ 1940 bool count_increased = false; 1941 1942 if (!folio_test_hugetlb(folio)) 1943 goto out; 1944 1945 if (flags & MF_COUNT_INCREASED) { 1946 ret = 1; 1947 count_increased = true; 1948 } else if (folio_test_hugetlb_freed(folio)) { 1949 ret = 0; 1950 } else if (folio_test_hugetlb_migratable(folio)) { 1951 ret = folio_try_get(folio); 1952 if (ret) 1953 count_increased = true; 1954 } else { 1955 ret = -EBUSY; 1956 if (!(flags & MF_NO_RETRY)) 1957 goto out; 1958 } 1959 1960 if (folio_set_hugetlb_hwpoison(folio, page)) { 1961 ret = -EHWPOISON; 1962 goto out; 1963 } 1964 1965 /* 1966 * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them 1967 * from being migrated by memory hotremove. 1968 */ 1969 if (count_increased && folio_test_hugetlb_migratable(folio)) { 1970 folio_clear_hugetlb_migratable(folio); 1971 *migratable_cleared = true; 1972 } 1973 1974 return ret; 1975 out: 1976 if (count_increased) 1977 folio_put(folio); 1978 return ret; 1979 } 1980 1981 /* 1982 * Taking refcount of hugetlb pages needs extra care about race conditions 1983 * with basic operations like hugepage allocation/free/demotion. 1984 * So some of prechecks for hwpoison (pinning, and testing/setting 1985 * PageHWPoison) should be done in single hugetlb_lock range. 1986 */ 1987 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) 1988 { 1989 int res; 1990 struct page *p = pfn_to_page(pfn); 1991 struct folio *folio; 1992 unsigned long page_flags; 1993 bool migratable_cleared = false; 1994 1995 *hugetlb = 1; 1996 retry: 1997 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared); 1998 if (res == 2) { /* fallback to normal page handling */ 1999 *hugetlb = 0; 2000 return 0; 2001 } else if (res == -EHWPOISON) { 2002 pr_err("%#lx: already hardware poisoned\n", pfn); 2003 if (flags & MF_ACTION_REQUIRED) { 2004 folio = page_folio(p); 2005 res = kill_accessing_process(current, folio_pfn(folio), flags); 2006 } 2007 return res; 2008 } else if (res == -EBUSY) { 2009 if (!(flags & MF_NO_RETRY)) { 2010 flags |= MF_NO_RETRY; 2011 goto retry; 2012 } 2013 return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 2014 } 2015 2016 folio = page_folio(p); 2017 folio_lock(folio); 2018 2019 if (hwpoison_filter(p)) { 2020 folio_clear_hugetlb_hwpoison(folio); 2021 if (migratable_cleared) 2022 folio_set_hugetlb_migratable(folio); 2023 folio_unlock(folio); 2024 if (res == 1) 2025 folio_put(folio); 2026 return -EOPNOTSUPP; 2027 } 2028 2029 /* 2030 * Handling free hugepage. The possible race with hugepage allocation 2031 * or demotion can be prevented by PageHWPoison flag. 2032 */ 2033 if (res == 0) { 2034 folio_unlock(folio); 2035 if (__page_handle_poison(p) >= 0) { 2036 page_ref_inc(p); 2037 res = MF_RECOVERED; 2038 } else { 2039 res = MF_FAILED; 2040 } 2041 return action_result(pfn, MF_MSG_FREE_HUGE, res); 2042 } 2043 2044 page_flags = folio->flags; 2045 2046 if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) { 2047 folio_unlock(folio); 2048 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 2049 } 2050 2051 return identify_page_state(pfn, p, page_flags); 2052 } 2053 2054 #else 2055 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) 2056 { 2057 return 0; 2058 } 2059 2060 static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag) 2061 { 2062 return 0; 2063 } 2064 #endif /* CONFIG_HUGETLB_PAGE */ 2065 2066 /* Drop the extra refcount in case we come from madvise() */ 2067 static void put_ref_page(unsigned long pfn, int flags) 2068 { 2069 struct page *page; 2070 2071 if (!(flags & MF_COUNT_INCREASED)) 2072 return; 2073 2074 page = pfn_to_page(pfn); 2075 if (page) 2076 put_page(page); 2077 } 2078 2079 static int memory_failure_dev_pagemap(unsigned long pfn, int flags, 2080 struct dev_pagemap *pgmap) 2081 { 2082 int rc = -ENXIO; 2083 2084 put_ref_page(pfn, flags); 2085 2086 /* device metadata space is not recoverable */ 2087 if (!pgmap_pfn_valid(pgmap, pfn)) 2088 goto out; 2089 2090 /* 2091 * Call driver's implementation to handle the memory failure, otherwise 2092 * fall back to generic handler. 2093 */ 2094 if (pgmap_has_memory_failure(pgmap)) { 2095 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); 2096 /* 2097 * Fall back to generic handler too if operation is not 2098 * supported inside the driver/device/filesystem. 2099 */ 2100 if (rc != -EOPNOTSUPP) 2101 goto out; 2102 } 2103 2104 rc = mf_generic_kill_procs(pfn, flags, pgmap); 2105 out: 2106 /* drop pgmap ref acquired in caller */ 2107 put_dev_pagemap(pgmap); 2108 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); 2109 return rc; 2110 } 2111 2112 static DEFINE_MUTEX(mf_mutex); 2113 2114 /** 2115 * memory_failure - Handle memory failure of a page. 2116 * @pfn: Page Number of the corrupted page 2117 * @flags: fine tune action taken 2118 * 2119 * This function is called by the low level machine check code 2120 * of an architecture when it detects hardware memory corruption 2121 * of a page. It tries its best to recover, which includes 2122 * dropping pages, killing processes etc. 2123 * 2124 * The function is primarily of use for corruptions that 2125 * happen outside the current execution context (e.g. when 2126 * detected by a background scrubber) 2127 * 2128 * Must run in process context (e.g. a work queue) with interrupts 2129 * enabled and no spinlocks hold. 2130 * 2131 * Return: 0 for successfully handled the memory error, 2132 * -EOPNOTSUPP for hwpoison_filter() filtered the error event, 2133 * < 0(except -EOPNOTSUPP) on failure. 2134 */ 2135 int memory_failure(unsigned long pfn, int flags) 2136 { 2137 struct page *p; 2138 struct page *hpage; 2139 struct dev_pagemap *pgmap; 2140 int res = 0; 2141 unsigned long page_flags; 2142 bool retry = true; 2143 int hugetlb = 0; 2144 2145 if (!sysctl_memory_failure_recovery) 2146 panic("Memory failure on page %lx", pfn); 2147 2148 mutex_lock(&mf_mutex); 2149 2150 if (!(flags & MF_SW_SIMULATED)) 2151 hw_memory_failure = true; 2152 2153 p = pfn_to_online_page(pfn); 2154 if (!p) { 2155 res = arch_memory_failure(pfn, flags); 2156 if (res == 0) 2157 goto unlock_mutex; 2158 2159 if (pfn_valid(pfn)) { 2160 pgmap = get_dev_pagemap(pfn, NULL); 2161 if (pgmap) { 2162 res = memory_failure_dev_pagemap(pfn, flags, 2163 pgmap); 2164 goto unlock_mutex; 2165 } 2166 } 2167 pr_err("%#lx: memory outside kernel control\n", pfn); 2168 res = -ENXIO; 2169 goto unlock_mutex; 2170 } 2171 2172 try_again: 2173 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb); 2174 if (hugetlb) 2175 goto unlock_mutex; 2176 2177 if (TestSetPageHWPoison(p)) { 2178 pr_err("%#lx: already hardware poisoned\n", pfn); 2179 res = -EHWPOISON; 2180 if (flags & MF_ACTION_REQUIRED) 2181 res = kill_accessing_process(current, pfn, flags); 2182 if (flags & MF_COUNT_INCREASED) 2183 put_page(p); 2184 goto unlock_mutex; 2185 } 2186 2187 hpage = compound_head(p); 2188 2189 /* 2190 * We need/can do nothing about count=0 pages. 2191 * 1) it's a free page, and therefore in safe hand: 2192 * check_new_page() will be the gate keeper. 2193 * 2) it's part of a non-compound high order page. 2194 * Implies some kernel user: cannot stop them from 2195 * R/W the page; let's pray that the page has been 2196 * used and will be freed some time later. 2197 * In fact it's dangerous to directly bump up page count from 0, 2198 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. 2199 */ 2200 if (!(flags & MF_COUNT_INCREASED)) { 2201 res = get_hwpoison_page(p, flags); 2202 if (!res) { 2203 if (is_free_buddy_page(p)) { 2204 if (take_page_off_buddy(p)) { 2205 page_ref_inc(p); 2206 res = MF_RECOVERED; 2207 } else { 2208 /* We lost the race, try again */ 2209 if (retry) { 2210 ClearPageHWPoison(p); 2211 retry = false; 2212 goto try_again; 2213 } 2214 res = MF_FAILED; 2215 } 2216 res = action_result(pfn, MF_MSG_BUDDY, res); 2217 } else { 2218 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); 2219 } 2220 goto unlock_mutex; 2221 } else if (res < 0) { 2222 res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 2223 goto unlock_mutex; 2224 } 2225 } 2226 2227 if (PageTransHuge(hpage)) { 2228 /* 2229 * The flag must be set after the refcount is bumped 2230 * otherwise it may race with THP split. 2231 * And the flag can't be set in get_hwpoison_page() since 2232 * it is called by soft offline too and it is just called 2233 * for !MF_COUNT_INCREASE. So here seems to be the best 2234 * place. 2235 * 2236 * Don't need care about the above error handling paths for 2237 * get_hwpoison_page() since they handle either free page 2238 * or unhandlable page. The refcount is bumped iff the 2239 * page is a valid handlable page. 2240 */ 2241 SetPageHasHWPoisoned(hpage); 2242 if (try_to_split_thp_page(p) < 0) { 2243 res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); 2244 goto unlock_mutex; 2245 } 2246 VM_BUG_ON_PAGE(!page_count(p), p); 2247 } 2248 2249 /* 2250 * We ignore non-LRU pages for good reasons. 2251 * - PG_locked is only well defined for LRU pages and a few others 2252 * - to avoid races with __SetPageLocked() 2253 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 2254 * The check (unnecessarily) ignores LRU pages being isolated and 2255 * walked by the page reclaim code, however that's not a big loss. 2256 */ 2257 shake_page(p); 2258 2259 lock_page(p); 2260 2261 /* 2262 * We're only intended to deal with the non-Compound page here. 2263 * However, the page could have changed compound pages due to 2264 * race window. If this happens, we could try again to hopefully 2265 * handle the page next round. 2266 */ 2267 if (PageCompound(p)) { 2268 if (retry) { 2269 ClearPageHWPoison(p); 2270 unlock_page(p); 2271 put_page(p); 2272 flags &= ~MF_COUNT_INCREASED; 2273 retry = false; 2274 goto try_again; 2275 } 2276 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); 2277 goto unlock_page; 2278 } 2279 2280 /* 2281 * We use page flags to determine what action should be taken, but 2282 * the flags can be modified by the error containment action. One 2283 * example is an mlocked page, where PG_mlocked is cleared by 2284 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 2285 * correctly, we save a copy of the page flags at this time. 2286 */ 2287 page_flags = p->flags; 2288 2289 if (hwpoison_filter(p)) { 2290 ClearPageHWPoison(p); 2291 unlock_page(p); 2292 put_page(p); 2293 res = -EOPNOTSUPP; 2294 goto unlock_mutex; 2295 } 2296 2297 /* 2298 * __munlock_folio() may clear a writeback page's LRU flag without 2299 * page_lock. We need wait writeback completion for this page or it 2300 * may trigger vfs BUG while evict inode. 2301 */ 2302 if (!PageLRU(p) && !PageWriteback(p)) 2303 goto identify_page_state; 2304 2305 /* 2306 * It's very difficult to mess with pages currently under IO 2307 * and in many cases impossible, so we just avoid it here. 2308 */ 2309 wait_on_page_writeback(p); 2310 2311 /* 2312 * Now take care of user space mappings. 2313 * Abort on fail: __filemap_remove_folio() assumes unmapped page. 2314 */ 2315 if (!hwpoison_user_mappings(p, pfn, flags, p)) { 2316 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 2317 goto unlock_page; 2318 } 2319 2320 /* 2321 * Torn down by someone else? 2322 */ 2323 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 2324 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); 2325 goto unlock_page; 2326 } 2327 2328 identify_page_state: 2329 res = identify_page_state(pfn, p, page_flags); 2330 mutex_unlock(&mf_mutex); 2331 return res; 2332 unlock_page: 2333 unlock_page(p); 2334 unlock_mutex: 2335 mutex_unlock(&mf_mutex); 2336 return res; 2337 } 2338 EXPORT_SYMBOL_GPL(memory_failure); 2339 2340 #define MEMORY_FAILURE_FIFO_ORDER 4 2341 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) 2342 2343 struct memory_failure_entry { 2344 unsigned long pfn; 2345 int flags; 2346 }; 2347 2348 struct memory_failure_cpu { 2349 DECLARE_KFIFO(fifo, struct memory_failure_entry, 2350 MEMORY_FAILURE_FIFO_SIZE); 2351 spinlock_t lock; 2352 struct work_struct work; 2353 }; 2354 2355 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); 2356 2357 /** 2358 * memory_failure_queue - Schedule handling memory failure of a page. 2359 * @pfn: Page Number of the corrupted page 2360 * @flags: Flags for memory failure handling 2361 * 2362 * This function is called by the low level hardware error handler 2363 * when it detects hardware memory corruption of a page. It schedules 2364 * the recovering of error page, including dropping pages, killing 2365 * processes etc. 2366 * 2367 * The function is primarily of use for corruptions that 2368 * happen outside the current execution context (e.g. when 2369 * detected by a background scrubber) 2370 * 2371 * Can run in IRQ context. 2372 */ 2373 void memory_failure_queue(unsigned long pfn, int flags) 2374 { 2375 struct memory_failure_cpu *mf_cpu; 2376 unsigned long proc_flags; 2377 struct memory_failure_entry entry = { 2378 .pfn = pfn, 2379 .flags = flags, 2380 }; 2381 2382 mf_cpu = &get_cpu_var(memory_failure_cpu); 2383 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 2384 if (kfifo_put(&mf_cpu->fifo, entry)) 2385 schedule_work_on(smp_processor_id(), &mf_cpu->work); 2386 else 2387 pr_err("buffer overflow when queuing memory failure at %#lx\n", 2388 pfn); 2389 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 2390 put_cpu_var(memory_failure_cpu); 2391 } 2392 EXPORT_SYMBOL_GPL(memory_failure_queue); 2393 2394 static void memory_failure_work_func(struct work_struct *work) 2395 { 2396 struct memory_failure_cpu *mf_cpu; 2397 struct memory_failure_entry entry = { 0, }; 2398 unsigned long proc_flags; 2399 int gotten; 2400 2401 mf_cpu = container_of(work, struct memory_failure_cpu, work); 2402 for (;;) { 2403 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 2404 gotten = kfifo_get(&mf_cpu->fifo, &entry); 2405 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 2406 if (!gotten) 2407 break; 2408 if (entry.flags & MF_SOFT_OFFLINE) 2409 soft_offline_page(entry.pfn, entry.flags); 2410 else 2411 memory_failure(entry.pfn, entry.flags); 2412 } 2413 } 2414 2415 /* 2416 * Process memory_failure work queued on the specified CPU. 2417 * Used to avoid return-to-userspace racing with the memory_failure workqueue. 2418 */ 2419 void memory_failure_queue_kick(int cpu) 2420 { 2421 struct memory_failure_cpu *mf_cpu; 2422 2423 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 2424 cancel_work_sync(&mf_cpu->work); 2425 memory_failure_work_func(&mf_cpu->work); 2426 } 2427 2428 static int __init memory_failure_init(void) 2429 { 2430 struct memory_failure_cpu *mf_cpu; 2431 int cpu; 2432 2433 for_each_possible_cpu(cpu) { 2434 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 2435 spin_lock_init(&mf_cpu->lock); 2436 INIT_KFIFO(mf_cpu->fifo); 2437 INIT_WORK(&mf_cpu->work, memory_failure_work_func); 2438 } 2439 2440 register_sysctl_init("vm", memory_failure_table); 2441 2442 return 0; 2443 } 2444 core_initcall(memory_failure_init); 2445 2446 #undef pr_fmt 2447 #define pr_fmt(fmt) "" fmt 2448 #define unpoison_pr_info(fmt, pfn, rs) \ 2449 ({ \ 2450 if (__ratelimit(rs)) \ 2451 pr_info(fmt, pfn); \ 2452 }) 2453 2454 /** 2455 * unpoison_memory - Unpoison a previously poisoned page 2456 * @pfn: Page number of the to be unpoisoned page 2457 * 2458 * Software-unpoison a page that has been poisoned by 2459 * memory_failure() earlier. 2460 * 2461 * This is only done on the software-level, so it only works 2462 * for linux injected failures, not real hardware failures 2463 * 2464 * Returns 0 for success, otherwise -errno. 2465 */ 2466 int unpoison_memory(unsigned long pfn) 2467 { 2468 struct folio *folio; 2469 struct page *p; 2470 int ret = -EBUSY, ghp; 2471 unsigned long count = 1; 2472 bool huge = false; 2473 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, 2474 DEFAULT_RATELIMIT_BURST); 2475 2476 if (!pfn_valid(pfn)) 2477 return -ENXIO; 2478 2479 p = pfn_to_page(pfn); 2480 folio = page_folio(p); 2481 2482 mutex_lock(&mf_mutex); 2483 2484 if (hw_memory_failure) { 2485 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", 2486 pfn, &unpoison_rs); 2487 ret = -EOPNOTSUPP; 2488 goto unlock_mutex; 2489 } 2490 2491 if (!PageHWPoison(p)) { 2492 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", 2493 pfn, &unpoison_rs); 2494 goto unlock_mutex; 2495 } 2496 2497 if (folio_ref_count(folio) > 1) { 2498 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", 2499 pfn, &unpoison_rs); 2500 goto unlock_mutex; 2501 } 2502 2503 if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio)) 2504 goto unlock_mutex; 2505 2506 /* 2507 * Note that folio->_mapcount is overloaded in SLAB, so the simple test 2508 * in folio_mapped() has to be done after folio_test_slab() is checked. 2509 */ 2510 if (folio_mapped(folio)) { 2511 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", 2512 pfn, &unpoison_rs); 2513 goto unlock_mutex; 2514 } 2515 2516 if (folio_mapping(folio)) { 2517 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", 2518 pfn, &unpoison_rs); 2519 goto unlock_mutex; 2520 } 2521 2522 ghp = get_hwpoison_page(p, MF_UNPOISON); 2523 if (!ghp) { 2524 if (PageHuge(p)) { 2525 huge = true; 2526 count = folio_free_raw_hwp(folio, false); 2527 if (count == 0) 2528 goto unlock_mutex; 2529 } 2530 ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY; 2531 } else if (ghp < 0) { 2532 if (ghp == -EHWPOISON) { 2533 ret = put_page_back_buddy(p) ? 0 : -EBUSY; 2534 } else { 2535 ret = ghp; 2536 unpoison_pr_info("Unpoison: failed to grab page %#lx\n", 2537 pfn, &unpoison_rs); 2538 } 2539 } else { 2540 if (PageHuge(p)) { 2541 huge = true; 2542 count = folio_free_raw_hwp(folio, false); 2543 if (count == 0) { 2544 folio_put(folio); 2545 goto unlock_mutex; 2546 } 2547 } 2548 2549 folio_put(folio); 2550 if (TestClearPageHWPoison(p)) { 2551 folio_put(folio); 2552 ret = 0; 2553 } 2554 } 2555 2556 unlock_mutex: 2557 mutex_unlock(&mf_mutex); 2558 if (!ret) { 2559 if (!huge) 2560 num_poisoned_pages_sub(pfn, 1); 2561 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", 2562 page_to_pfn(p), &unpoison_rs); 2563 } 2564 return ret; 2565 } 2566 EXPORT_SYMBOL(unpoison_memory); 2567 2568 static bool isolate_page(struct page *page, struct list_head *pagelist) 2569 { 2570 bool isolated = false; 2571 2572 if (PageHuge(page)) { 2573 isolated = isolate_hugetlb(page_folio(page), pagelist); 2574 } else { 2575 bool lru = !__PageMovable(page); 2576 2577 if (lru) 2578 isolated = isolate_lru_page(page); 2579 else 2580 isolated = isolate_movable_page(page, 2581 ISOLATE_UNEVICTABLE); 2582 2583 if (isolated) { 2584 list_add(&page->lru, pagelist); 2585 if (lru) 2586 inc_node_page_state(page, NR_ISOLATED_ANON + 2587 page_is_file_lru(page)); 2588 } 2589 } 2590 2591 /* 2592 * If we succeed to isolate the page, we grabbed another refcount on 2593 * the page, so we can safely drop the one we got from get_any_pages(). 2594 * If we failed to isolate the page, it means that we cannot go further 2595 * and we will return an error, so drop the reference we got from 2596 * get_any_pages() as well. 2597 */ 2598 put_page(page); 2599 return isolated; 2600 } 2601 2602 /* 2603 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. 2604 * If the page is a non-dirty unmapped page-cache page, it simply invalidates. 2605 * If the page is mapped, it migrates the contents over. 2606 */ 2607 static int soft_offline_in_use_page(struct page *page) 2608 { 2609 long ret = 0; 2610 unsigned long pfn = page_to_pfn(page); 2611 struct page *hpage = compound_head(page); 2612 char const *msg_page[] = {"page", "hugepage"}; 2613 bool huge = PageHuge(page); 2614 LIST_HEAD(pagelist); 2615 struct migration_target_control mtc = { 2616 .nid = NUMA_NO_NODE, 2617 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 2618 }; 2619 2620 if (!huge && PageTransHuge(hpage)) { 2621 if (try_to_split_thp_page(page)) { 2622 pr_info("soft offline: %#lx: thp split failed\n", pfn); 2623 return -EBUSY; 2624 } 2625 hpage = page; 2626 } 2627 2628 lock_page(page); 2629 if (!PageHuge(page)) 2630 wait_on_page_writeback(page); 2631 if (PageHWPoison(page)) { 2632 unlock_page(page); 2633 put_page(page); 2634 pr_info("soft offline: %#lx page already poisoned\n", pfn); 2635 return 0; 2636 } 2637 2638 if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page)) 2639 /* 2640 * Try to invalidate first. This should work for 2641 * non dirty unmapped page cache pages. 2642 */ 2643 ret = invalidate_inode_page(page); 2644 unlock_page(page); 2645 2646 if (ret) { 2647 pr_info("soft_offline: %#lx: invalidated\n", pfn); 2648 page_handle_poison(page, false, true); 2649 return 0; 2650 } 2651 2652 if (isolate_page(hpage, &pagelist)) { 2653 ret = migrate_pages(&pagelist, alloc_migration_target, NULL, 2654 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); 2655 if (!ret) { 2656 bool release = !huge; 2657 2658 if (!page_handle_poison(page, huge, release)) 2659 ret = -EBUSY; 2660 } else { 2661 if (!list_empty(&pagelist)) 2662 putback_movable_pages(&pagelist); 2663 2664 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n", 2665 pfn, msg_page[huge], ret, &page->flags); 2666 if (ret > 0) 2667 ret = -EBUSY; 2668 } 2669 } else { 2670 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", 2671 pfn, msg_page[huge], page_count(page), &page->flags); 2672 ret = -EBUSY; 2673 } 2674 return ret; 2675 } 2676 2677 /** 2678 * soft_offline_page - Soft offline a page. 2679 * @pfn: pfn to soft-offline 2680 * @flags: flags. Same as memory_failure(). 2681 * 2682 * Returns 0 on success 2683 * -EOPNOTSUPP for hwpoison_filter() filtered the error event 2684 * < 0 otherwise negated errno. 2685 * 2686 * Soft offline a page, by migration or invalidation, 2687 * without killing anything. This is for the case when 2688 * a page is not corrupted yet (so it's still valid to access), 2689 * but has had a number of corrected errors and is better taken 2690 * out. 2691 * 2692 * The actual policy on when to do that is maintained by 2693 * user space. 2694 * 2695 * This should never impact any application or cause data loss, 2696 * however it might take some time. 2697 * 2698 * This is not a 100% solution for all memory, but tries to be 2699 * ``good enough'' for the majority of memory. 2700 */ 2701 int soft_offline_page(unsigned long pfn, int flags) 2702 { 2703 int ret; 2704 bool try_again = true; 2705 struct page *page; 2706 2707 if (!pfn_valid(pfn)) { 2708 WARN_ON_ONCE(flags & MF_COUNT_INCREASED); 2709 return -ENXIO; 2710 } 2711 2712 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ 2713 page = pfn_to_online_page(pfn); 2714 if (!page) { 2715 put_ref_page(pfn, flags); 2716 return -EIO; 2717 } 2718 2719 mutex_lock(&mf_mutex); 2720 2721 if (PageHWPoison(page)) { 2722 pr_info("%s: %#lx page already poisoned\n", __func__, pfn); 2723 put_ref_page(pfn, flags); 2724 mutex_unlock(&mf_mutex); 2725 return 0; 2726 } 2727 2728 retry: 2729 get_online_mems(); 2730 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE); 2731 put_online_mems(); 2732 2733 if (hwpoison_filter(page)) { 2734 if (ret > 0) 2735 put_page(page); 2736 2737 mutex_unlock(&mf_mutex); 2738 return -EOPNOTSUPP; 2739 } 2740 2741 if (ret > 0) { 2742 ret = soft_offline_in_use_page(page); 2743 } else if (ret == 0) { 2744 if (!page_handle_poison(page, true, false)) { 2745 if (try_again) { 2746 try_again = false; 2747 flags &= ~MF_COUNT_INCREASED; 2748 goto retry; 2749 } 2750 ret = -EBUSY; 2751 } 2752 } 2753 2754 mutex_unlock(&mf_mutex); 2755 2756 return ret; 2757 } 2758