xref: /openbmc/linux/mm/memory-failure.c (revision 4dc7ccf7)
1 /*
2  * Copyright (C) 2008, 2009 Intel Corporation
3  * Authors: Andi Kleen, Fengguang Wu
4  *
5  * This software may be redistributed and/or modified under the terms of
6  * the GNU General Public License ("GPL") version 2 only as published by the
7  * Free Software Foundation.
8  *
9  * High level machine check handler. Handles pages reported by the
10  * hardware as being corrupted usually due to a 2bit ECC memory or cache
11  * failure.
12  *
13  * Handles page cache pages in various states.	The tricky part
14  * here is that we can access any page asynchronous to other VM
15  * users, because memory failures could happen anytime and anywhere,
16  * possibly violating some of their assumptions. This is why this code
17  * has to be extremely careful. Generally it tries to use normal locking
18  * rules, as in get the standard locks, even if that means the
19  * error handling takes potentially a long time.
20  *
21  * The operation to map back from RMAP chains to processes has to walk
22  * the complete process list and has non linear complexity with the number
23  * mappings. In short it can be quite slow. But since memory corruptions
24  * are rare we hope to get away with this.
25  */
26 
27 /*
28  * Notebook:
29  * - hugetlb needs more code
30  * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
31  * - pass bad pages to kdump next kernel
32  */
33 #define DEBUG 1		/* remove me in 2.6.34 */
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/page-flags.h>
37 #include <linux/kernel-page-flags.h>
38 #include <linux/sched.h>
39 #include <linux/ksm.h>
40 #include <linux/rmap.h>
41 #include <linux/pagemap.h>
42 #include <linux/swap.h>
43 #include <linux/backing-dev.h>
44 #include <linux/migrate.h>
45 #include <linux/page-isolation.h>
46 #include <linux/suspend.h>
47 #include <linux/slab.h>
48 #include "internal.h"
49 
50 int sysctl_memory_failure_early_kill __read_mostly = 0;
51 
52 int sysctl_memory_failure_recovery __read_mostly = 1;
53 
54 atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
55 
56 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
57 
58 u32 hwpoison_filter_enable = 0;
59 u32 hwpoison_filter_dev_major = ~0U;
60 u32 hwpoison_filter_dev_minor = ~0U;
61 u64 hwpoison_filter_flags_mask;
62 u64 hwpoison_filter_flags_value;
63 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
64 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
65 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
66 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
67 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
68 
69 static int hwpoison_filter_dev(struct page *p)
70 {
71 	struct address_space *mapping;
72 	dev_t dev;
73 
74 	if (hwpoison_filter_dev_major == ~0U &&
75 	    hwpoison_filter_dev_minor == ~0U)
76 		return 0;
77 
78 	/*
79 	 * page_mapping() does not accept slab page
80 	 */
81 	if (PageSlab(p))
82 		return -EINVAL;
83 
84 	mapping = page_mapping(p);
85 	if (mapping == NULL || mapping->host == NULL)
86 		return -EINVAL;
87 
88 	dev = mapping->host->i_sb->s_dev;
89 	if (hwpoison_filter_dev_major != ~0U &&
90 	    hwpoison_filter_dev_major != MAJOR(dev))
91 		return -EINVAL;
92 	if (hwpoison_filter_dev_minor != ~0U &&
93 	    hwpoison_filter_dev_minor != MINOR(dev))
94 		return -EINVAL;
95 
96 	return 0;
97 }
98 
99 static int hwpoison_filter_flags(struct page *p)
100 {
101 	if (!hwpoison_filter_flags_mask)
102 		return 0;
103 
104 	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
105 				    hwpoison_filter_flags_value)
106 		return 0;
107 	else
108 		return -EINVAL;
109 }
110 
111 /*
112  * This allows stress tests to limit test scope to a collection of tasks
113  * by putting them under some memcg. This prevents killing unrelated/important
114  * processes such as /sbin/init. Note that the target task may share clean
115  * pages with init (eg. libc text), which is harmless. If the target task
116  * share _dirty_ pages with another task B, the test scheme must make sure B
117  * is also included in the memcg. At last, due to race conditions this filter
118  * can only guarantee that the page either belongs to the memcg tasks, or is
119  * a freed page.
120  */
121 #ifdef	CONFIG_CGROUP_MEM_RES_CTLR_SWAP
122 u64 hwpoison_filter_memcg;
123 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
124 static int hwpoison_filter_task(struct page *p)
125 {
126 	struct mem_cgroup *mem;
127 	struct cgroup_subsys_state *css;
128 	unsigned long ino;
129 
130 	if (!hwpoison_filter_memcg)
131 		return 0;
132 
133 	mem = try_get_mem_cgroup_from_page(p);
134 	if (!mem)
135 		return -EINVAL;
136 
137 	css = mem_cgroup_css(mem);
138 	/* root_mem_cgroup has NULL dentries */
139 	if (!css->cgroup->dentry)
140 		return -EINVAL;
141 
142 	ino = css->cgroup->dentry->d_inode->i_ino;
143 	css_put(css);
144 
145 	if (ino != hwpoison_filter_memcg)
146 		return -EINVAL;
147 
148 	return 0;
149 }
150 #else
151 static int hwpoison_filter_task(struct page *p) { return 0; }
152 #endif
153 
154 int hwpoison_filter(struct page *p)
155 {
156 	if (!hwpoison_filter_enable)
157 		return 0;
158 
159 	if (hwpoison_filter_dev(p))
160 		return -EINVAL;
161 
162 	if (hwpoison_filter_flags(p))
163 		return -EINVAL;
164 
165 	if (hwpoison_filter_task(p))
166 		return -EINVAL;
167 
168 	return 0;
169 }
170 #else
171 int hwpoison_filter(struct page *p)
172 {
173 	return 0;
174 }
175 #endif
176 
177 EXPORT_SYMBOL_GPL(hwpoison_filter);
178 
179 /*
180  * Send all the processes who have the page mapped an ``action optional''
181  * signal.
182  */
183 static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
184 			unsigned long pfn)
185 {
186 	struct siginfo si;
187 	int ret;
188 
189 	printk(KERN_ERR
190 		"MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
191 		pfn, t->comm, t->pid);
192 	si.si_signo = SIGBUS;
193 	si.si_errno = 0;
194 	si.si_code = BUS_MCEERR_AO;
195 	si.si_addr = (void *)addr;
196 #ifdef __ARCH_SI_TRAPNO
197 	si.si_trapno = trapno;
198 #endif
199 	si.si_addr_lsb = PAGE_SHIFT;
200 	/*
201 	 * Don't use force here, it's convenient if the signal
202 	 * can be temporarily blocked.
203 	 * This could cause a loop when the user sets SIGBUS
204 	 * to SIG_IGN, but hopefully noone will do that?
205 	 */
206 	ret = send_sig_info(SIGBUS, &si, t);  /* synchronous? */
207 	if (ret < 0)
208 		printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
209 		       t->comm, t->pid, ret);
210 	return ret;
211 }
212 
213 /*
214  * When a unknown page type is encountered drain as many buffers as possible
215  * in the hope to turn the page into a LRU or free page, which we can handle.
216  */
217 void shake_page(struct page *p, int access)
218 {
219 	if (!PageSlab(p)) {
220 		lru_add_drain_all();
221 		if (PageLRU(p))
222 			return;
223 		drain_all_pages();
224 		if (PageLRU(p) || is_free_buddy_page(p))
225 			return;
226 	}
227 
228 	/*
229 	 * Only all shrink_slab here (which would also
230 	 * shrink other caches) if access is not potentially fatal.
231 	 */
232 	if (access) {
233 		int nr;
234 		do {
235 			nr = shrink_slab(1000, GFP_KERNEL, 1000);
236 			if (page_count(p) == 0)
237 				break;
238 		} while (nr > 10);
239 	}
240 }
241 EXPORT_SYMBOL_GPL(shake_page);
242 
243 /*
244  * Kill all processes that have a poisoned page mapped and then isolate
245  * the page.
246  *
247  * General strategy:
248  * Find all processes having the page mapped and kill them.
249  * But we keep a page reference around so that the page is not
250  * actually freed yet.
251  * Then stash the page away
252  *
253  * There's no convenient way to get back to mapped processes
254  * from the VMAs. So do a brute-force search over all
255  * running processes.
256  *
257  * Remember that machine checks are not common (or rather
258  * if they are common you have other problems), so this shouldn't
259  * be a performance issue.
260  *
261  * Also there are some races possible while we get from the
262  * error detection to actually handle it.
263  */
264 
265 struct to_kill {
266 	struct list_head nd;
267 	struct task_struct *tsk;
268 	unsigned long addr;
269 	unsigned addr_valid:1;
270 };
271 
272 /*
273  * Failure handling: if we can't find or can't kill a process there's
274  * not much we can do.	We just print a message and ignore otherwise.
275  */
276 
277 /*
278  * Schedule a process for later kill.
279  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
280  * TBD would GFP_NOIO be enough?
281  */
282 static void add_to_kill(struct task_struct *tsk, struct page *p,
283 		       struct vm_area_struct *vma,
284 		       struct list_head *to_kill,
285 		       struct to_kill **tkc)
286 {
287 	struct to_kill *tk;
288 
289 	if (*tkc) {
290 		tk = *tkc;
291 		*tkc = NULL;
292 	} else {
293 		tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
294 		if (!tk) {
295 			printk(KERN_ERR
296 		"MCE: Out of memory while machine check handling\n");
297 			return;
298 		}
299 	}
300 	tk->addr = page_address_in_vma(p, vma);
301 	tk->addr_valid = 1;
302 
303 	/*
304 	 * In theory we don't have to kill when the page was
305 	 * munmaped. But it could be also a mremap. Since that's
306 	 * likely very rare kill anyways just out of paranoia, but use
307 	 * a SIGKILL because the error is not contained anymore.
308 	 */
309 	if (tk->addr == -EFAULT) {
310 		pr_debug("MCE: Unable to find user space address %lx in %s\n",
311 			page_to_pfn(p), tsk->comm);
312 		tk->addr_valid = 0;
313 	}
314 	get_task_struct(tsk);
315 	tk->tsk = tsk;
316 	list_add_tail(&tk->nd, to_kill);
317 }
318 
319 /*
320  * Kill the processes that have been collected earlier.
321  *
322  * Only do anything when DOIT is set, otherwise just free the list
323  * (this is used for clean pages which do not need killing)
324  * Also when FAIL is set do a force kill because something went
325  * wrong earlier.
326  */
327 static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
328 			  int fail, unsigned long pfn)
329 {
330 	struct to_kill *tk, *next;
331 
332 	list_for_each_entry_safe (tk, next, to_kill, nd) {
333 		if (doit) {
334 			/*
335 			 * In case something went wrong with munmapping
336 			 * make sure the process doesn't catch the
337 			 * signal and then access the memory. Just kill it.
338 			 */
339 			if (fail || tk->addr_valid == 0) {
340 				printk(KERN_ERR
341 		"MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
342 					pfn, tk->tsk->comm, tk->tsk->pid);
343 				force_sig(SIGKILL, tk->tsk);
344 			}
345 
346 			/*
347 			 * In theory the process could have mapped
348 			 * something else on the address in-between. We could
349 			 * check for that, but we need to tell the
350 			 * process anyways.
351 			 */
352 			else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
353 					      pfn) < 0)
354 				printk(KERN_ERR
355 		"MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
356 					pfn, tk->tsk->comm, tk->tsk->pid);
357 		}
358 		put_task_struct(tk->tsk);
359 		kfree(tk);
360 	}
361 }
362 
363 static int task_early_kill(struct task_struct *tsk)
364 {
365 	if (!tsk->mm)
366 		return 0;
367 	if (tsk->flags & PF_MCE_PROCESS)
368 		return !!(tsk->flags & PF_MCE_EARLY);
369 	return sysctl_memory_failure_early_kill;
370 }
371 
372 /*
373  * Collect processes when the error hit an anonymous page.
374  */
375 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
376 			      struct to_kill **tkc)
377 {
378 	struct vm_area_struct *vma;
379 	struct task_struct *tsk;
380 	struct anon_vma *av;
381 
382 	read_lock(&tasklist_lock);
383 	av = page_lock_anon_vma(page);
384 	if (av == NULL)	/* Not actually mapped anymore */
385 		goto out;
386 	for_each_process (tsk) {
387 		struct anon_vma_chain *vmac;
388 
389 		if (!task_early_kill(tsk))
390 			continue;
391 		list_for_each_entry(vmac, &av->head, same_anon_vma) {
392 			vma = vmac->vma;
393 			if (!page_mapped_in_vma(page, vma))
394 				continue;
395 			if (vma->vm_mm == tsk->mm)
396 				add_to_kill(tsk, page, vma, to_kill, tkc);
397 		}
398 	}
399 	page_unlock_anon_vma(av);
400 out:
401 	read_unlock(&tasklist_lock);
402 }
403 
404 /*
405  * Collect processes when the error hit a file mapped page.
406  */
407 static void collect_procs_file(struct page *page, struct list_head *to_kill,
408 			      struct to_kill **tkc)
409 {
410 	struct vm_area_struct *vma;
411 	struct task_struct *tsk;
412 	struct prio_tree_iter iter;
413 	struct address_space *mapping = page->mapping;
414 
415 	/*
416 	 * A note on the locking order between the two locks.
417 	 * We don't rely on this particular order.
418 	 * If you have some other code that needs a different order
419 	 * feel free to switch them around. Or add a reverse link
420 	 * from mm_struct to task_struct, then this could be all
421 	 * done without taking tasklist_lock and looping over all tasks.
422 	 */
423 
424 	read_lock(&tasklist_lock);
425 	spin_lock(&mapping->i_mmap_lock);
426 	for_each_process(tsk) {
427 		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
428 
429 		if (!task_early_kill(tsk))
430 			continue;
431 
432 		vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
433 				      pgoff) {
434 			/*
435 			 * Send early kill signal to tasks where a vma covers
436 			 * the page but the corrupted page is not necessarily
437 			 * mapped it in its pte.
438 			 * Assume applications who requested early kill want
439 			 * to be informed of all such data corruptions.
440 			 */
441 			if (vma->vm_mm == tsk->mm)
442 				add_to_kill(tsk, page, vma, to_kill, tkc);
443 		}
444 	}
445 	spin_unlock(&mapping->i_mmap_lock);
446 	read_unlock(&tasklist_lock);
447 }
448 
449 /*
450  * Collect the processes who have the corrupted page mapped to kill.
451  * This is done in two steps for locking reasons.
452  * First preallocate one tokill structure outside the spin locks,
453  * so that we can kill at least one process reasonably reliable.
454  */
455 static void collect_procs(struct page *page, struct list_head *tokill)
456 {
457 	struct to_kill *tk;
458 
459 	if (!page->mapping)
460 		return;
461 
462 	tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
463 	if (!tk)
464 		return;
465 	if (PageAnon(page))
466 		collect_procs_anon(page, tokill, &tk);
467 	else
468 		collect_procs_file(page, tokill, &tk);
469 	kfree(tk);
470 }
471 
472 /*
473  * Error handlers for various types of pages.
474  */
475 
476 enum outcome {
477 	IGNORED,	/* Error: cannot be handled */
478 	FAILED,		/* Error: handling failed */
479 	DELAYED,	/* Will be handled later */
480 	RECOVERED,	/* Successfully recovered */
481 };
482 
483 static const char *action_name[] = {
484 	[IGNORED] = "Ignored",
485 	[FAILED] = "Failed",
486 	[DELAYED] = "Delayed",
487 	[RECOVERED] = "Recovered",
488 };
489 
490 /*
491  * XXX: It is possible that a page is isolated from LRU cache,
492  * and then kept in swap cache or failed to remove from page cache.
493  * The page count will stop it from being freed by unpoison.
494  * Stress tests should be aware of this memory leak problem.
495  */
496 static int delete_from_lru_cache(struct page *p)
497 {
498 	if (!isolate_lru_page(p)) {
499 		/*
500 		 * Clear sensible page flags, so that the buddy system won't
501 		 * complain when the page is unpoison-and-freed.
502 		 */
503 		ClearPageActive(p);
504 		ClearPageUnevictable(p);
505 		/*
506 		 * drop the page count elevated by isolate_lru_page()
507 		 */
508 		page_cache_release(p);
509 		return 0;
510 	}
511 	return -EIO;
512 }
513 
514 /*
515  * Error hit kernel page.
516  * Do nothing, try to be lucky and not touch this instead. For a few cases we
517  * could be more sophisticated.
518  */
519 static int me_kernel(struct page *p, unsigned long pfn)
520 {
521 	return IGNORED;
522 }
523 
524 /*
525  * Page in unknown state. Do nothing.
526  */
527 static int me_unknown(struct page *p, unsigned long pfn)
528 {
529 	printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
530 	return FAILED;
531 }
532 
533 /*
534  * Clean (or cleaned) page cache page.
535  */
536 static int me_pagecache_clean(struct page *p, unsigned long pfn)
537 {
538 	int err;
539 	int ret = FAILED;
540 	struct address_space *mapping;
541 
542 	delete_from_lru_cache(p);
543 
544 	/*
545 	 * For anonymous pages we're done the only reference left
546 	 * should be the one m_f() holds.
547 	 */
548 	if (PageAnon(p))
549 		return RECOVERED;
550 
551 	/*
552 	 * Now truncate the page in the page cache. This is really
553 	 * more like a "temporary hole punch"
554 	 * Don't do this for block devices when someone else
555 	 * has a reference, because it could be file system metadata
556 	 * and that's not safe to truncate.
557 	 */
558 	mapping = page_mapping(p);
559 	if (!mapping) {
560 		/*
561 		 * Page has been teared down in the meanwhile
562 		 */
563 		return FAILED;
564 	}
565 
566 	/*
567 	 * Truncation is a bit tricky. Enable it per file system for now.
568 	 *
569 	 * Open: to take i_mutex or not for this? Right now we don't.
570 	 */
571 	if (mapping->a_ops->error_remove_page) {
572 		err = mapping->a_ops->error_remove_page(mapping, p);
573 		if (err != 0) {
574 			printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
575 					pfn, err);
576 		} else if (page_has_private(p) &&
577 				!try_to_release_page(p, GFP_NOIO)) {
578 			pr_debug("MCE %#lx: failed to release buffers\n", pfn);
579 		} else {
580 			ret = RECOVERED;
581 		}
582 	} else {
583 		/*
584 		 * If the file system doesn't support it just invalidate
585 		 * This fails on dirty or anything with private pages
586 		 */
587 		if (invalidate_inode_page(p))
588 			ret = RECOVERED;
589 		else
590 			printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
591 				pfn);
592 	}
593 	return ret;
594 }
595 
596 /*
597  * Dirty cache page page
598  * Issues: when the error hit a hole page the error is not properly
599  * propagated.
600  */
601 static int me_pagecache_dirty(struct page *p, unsigned long pfn)
602 {
603 	struct address_space *mapping = page_mapping(p);
604 
605 	SetPageError(p);
606 	/* TBD: print more information about the file. */
607 	if (mapping) {
608 		/*
609 		 * IO error will be reported by write(), fsync(), etc.
610 		 * who check the mapping.
611 		 * This way the application knows that something went
612 		 * wrong with its dirty file data.
613 		 *
614 		 * There's one open issue:
615 		 *
616 		 * The EIO will be only reported on the next IO
617 		 * operation and then cleared through the IO map.
618 		 * Normally Linux has two mechanisms to pass IO error
619 		 * first through the AS_EIO flag in the address space
620 		 * and then through the PageError flag in the page.
621 		 * Since we drop pages on memory failure handling the
622 		 * only mechanism open to use is through AS_AIO.
623 		 *
624 		 * This has the disadvantage that it gets cleared on
625 		 * the first operation that returns an error, while
626 		 * the PageError bit is more sticky and only cleared
627 		 * when the page is reread or dropped.  If an
628 		 * application assumes it will always get error on
629 		 * fsync, but does other operations on the fd before
630 		 * and the page is dropped inbetween then the error
631 		 * will not be properly reported.
632 		 *
633 		 * This can already happen even without hwpoisoned
634 		 * pages: first on metadata IO errors (which only
635 		 * report through AS_EIO) or when the page is dropped
636 		 * at the wrong time.
637 		 *
638 		 * So right now we assume that the application DTRT on
639 		 * the first EIO, but we're not worse than other parts
640 		 * of the kernel.
641 		 */
642 		mapping_set_error(mapping, EIO);
643 	}
644 
645 	return me_pagecache_clean(p, pfn);
646 }
647 
648 /*
649  * Clean and dirty swap cache.
650  *
651  * Dirty swap cache page is tricky to handle. The page could live both in page
652  * cache and swap cache(ie. page is freshly swapped in). So it could be
653  * referenced concurrently by 2 types of PTEs:
654  * normal PTEs and swap PTEs. We try to handle them consistently by calling
655  * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
656  * and then
657  *      - clear dirty bit to prevent IO
658  *      - remove from LRU
659  *      - but keep in the swap cache, so that when we return to it on
660  *        a later page fault, we know the application is accessing
661  *        corrupted data and shall be killed (we installed simple
662  *        interception code in do_swap_page to catch it).
663  *
664  * Clean swap cache pages can be directly isolated. A later page fault will
665  * bring in the known good data from disk.
666  */
667 static int me_swapcache_dirty(struct page *p, unsigned long pfn)
668 {
669 	ClearPageDirty(p);
670 	/* Trigger EIO in shmem: */
671 	ClearPageUptodate(p);
672 
673 	if (!delete_from_lru_cache(p))
674 		return DELAYED;
675 	else
676 		return FAILED;
677 }
678 
679 static int me_swapcache_clean(struct page *p, unsigned long pfn)
680 {
681 	delete_from_swap_cache(p);
682 
683 	if (!delete_from_lru_cache(p))
684 		return RECOVERED;
685 	else
686 		return FAILED;
687 }
688 
689 /*
690  * Huge pages. Needs work.
691  * Issues:
692  * No rmap support so we cannot find the original mapper. In theory could walk
693  * all MMs and look for the mappings, but that would be non atomic and racy.
694  * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
695  * like just walking the current process and hoping it has it mapped (that
696  * should be usually true for the common "shared database cache" case)
697  * Should handle free huge pages and dequeue them too, but this needs to
698  * handle huge page accounting correctly.
699  */
700 static int me_huge_page(struct page *p, unsigned long pfn)
701 {
702 	return FAILED;
703 }
704 
705 /*
706  * Various page states we can handle.
707  *
708  * A page state is defined by its current page->flags bits.
709  * The table matches them in order and calls the right handler.
710  *
711  * This is quite tricky because we can access page at any time
712  * in its live cycle, so all accesses have to be extremly careful.
713  *
714  * This is not complete. More states could be added.
715  * For any missing state don't attempt recovery.
716  */
717 
718 #define dirty		(1UL << PG_dirty)
719 #define sc		(1UL << PG_swapcache)
720 #define unevict		(1UL << PG_unevictable)
721 #define mlock		(1UL << PG_mlocked)
722 #define writeback	(1UL << PG_writeback)
723 #define lru		(1UL << PG_lru)
724 #define swapbacked	(1UL << PG_swapbacked)
725 #define head		(1UL << PG_head)
726 #define tail		(1UL << PG_tail)
727 #define compound	(1UL << PG_compound)
728 #define slab		(1UL << PG_slab)
729 #define reserved	(1UL << PG_reserved)
730 
731 static struct page_state {
732 	unsigned long mask;
733 	unsigned long res;
734 	char *msg;
735 	int (*action)(struct page *p, unsigned long pfn);
736 } error_states[] = {
737 	{ reserved,	reserved,	"reserved kernel",	me_kernel },
738 	/*
739 	 * free pages are specially detected outside this table:
740 	 * PG_buddy pages only make a small fraction of all free pages.
741 	 */
742 
743 	/*
744 	 * Could in theory check if slab page is free or if we can drop
745 	 * currently unused objects without touching them. But just
746 	 * treat it as standard kernel for now.
747 	 */
748 	{ slab,		slab,		"kernel slab",	me_kernel },
749 
750 #ifdef CONFIG_PAGEFLAGS_EXTENDED
751 	{ head,		head,		"huge",		me_huge_page },
752 	{ tail,		tail,		"huge",		me_huge_page },
753 #else
754 	{ compound,	compound,	"huge",		me_huge_page },
755 #endif
756 
757 	{ sc|dirty,	sc|dirty,	"swapcache",	me_swapcache_dirty },
758 	{ sc|dirty,	sc,		"swapcache",	me_swapcache_clean },
759 
760 	{ unevict|dirty, unevict|dirty,	"unevictable LRU", me_pagecache_dirty},
761 	{ unevict,	unevict,	"unevictable LRU", me_pagecache_clean},
762 
763 	{ mlock|dirty,	mlock|dirty,	"mlocked LRU",	me_pagecache_dirty },
764 	{ mlock,	mlock,		"mlocked LRU",	me_pagecache_clean },
765 
766 	{ lru|dirty,	lru|dirty,	"LRU",		me_pagecache_dirty },
767 	{ lru|dirty,	lru,		"clean LRU",	me_pagecache_clean },
768 
769 	/*
770 	 * Catchall entry: must be at end.
771 	 */
772 	{ 0,		0,		"unknown page state",	me_unknown },
773 };
774 
775 #undef dirty
776 #undef sc
777 #undef unevict
778 #undef mlock
779 #undef writeback
780 #undef lru
781 #undef swapbacked
782 #undef head
783 #undef tail
784 #undef compound
785 #undef slab
786 #undef reserved
787 
788 static void action_result(unsigned long pfn, char *msg, int result)
789 {
790 	struct page *page = pfn_to_page(pfn);
791 
792 	printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
793 		pfn,
794 		PageDirty(page) ? "dirty " : "",
795 		msg, action_name[result]);
796 }
797 
798 static int page_action(struct page_state *ps, struct page *p,
799 			unsigned long pfn)
800 {
801 	int result;
802 	int count;
803 
804 	result = ps->action(p, pfn);
805 	action_result(pfn, ps->msg, result);
806 
807 	count = page_count(p) - 1;
808 	if (ps->action == me_swapcache_dirty && result == DELAYED)
809 		count--;
810 	if (count != 0) {
811 		printk(KERN_ERR
812 		       "MCE %#lx: %s page still referenced by %d users\n",
813 		       pfn, ps->msg, count);
814 		result = FAILED;
815 	}
816 
817 	/* Could do more checks here if page looks ok */
818 	/*
819 	 * Could adjust zone counters here to correct for the missing page.
820 	 */
821 
822 	return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
823 }
824 
825 #define N_UNMAP_TRIES 5
826 
827 /*
828  * Do all that is necessary to remove user space mappings. Unmap
829  * the pages and send SIGBUS to the processes if the data was dirty.
830  */
831 static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
832 				  int trapno)
833 {
834 	enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
835 	struct address_space *mapping;
836 	LIST_HEAD(tokill);
837 	int ret;
838 	int i;
839 	int kill = 1;
840 
841 	if (PageReserved(p) || PageSlab(p))
842 		return SWAP_SUCCESS;
843 
844 	/*
845 	 * This check implies we don't kill processes if their pages
846 	 * are in the swap cache early. Those are always late kills.
847 	 */
848 	if (!page_mapped(p))
849 		return SWAP_SUCCESS;
850 
851 	if (PageCompound(p) || PageKsm(p))
852 		return SWAP_FAIL;
853 
854 	if (PageSwapCache(p)) {
855 		printk(KERN_ERR
856 		       "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
857 		ttu |= TTU_IGNORE_HWPOISON;
858 	}
859 
860 	/*
861 	 * Propagate the dirty bit from PTEs to struct page first, because we
862 	 * need this to decide if we should kill or just drop the page.
863 	 * XXX: the dirty test could be racy: set_page_dirty() may not always
864 	 * be called inside page lock (it's recommended but not enforced).
865 	 */
866 	mapping = page_mapping(p);
867 	if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
868 		if (page_mkclean(p)) {
869 			SetPageDirty(p);
870 		} else {
871 			kill = 0;
872 			ttu |= TTU_IGNORE_HWPOISON;
873 			printk(KERN_INFO
874 	"MCE %#lx: corrupted page was clean: dropped without side effects\n",
875 				pfn);
876 		}
877 	}
878 
879 	/*
880 	 * First collect all the processes that have the page
881 	 * mapped in dirty form.  This has to be done before try_to_unmap,
882 	 * because ttu takes the rmap data structures down.
883 	 *
884 	 * Error handling: We ignore errors here because
885 	 * there's nothing that can be done.
886 	 */
887 	if (kill)
888 		collect_procs(p, &tokill);
889 
890 	/*
891 	 * try_to_unmap can fail temporarily due to races.
892 	 * Try a few times (RED-PEN better strategy?)
893 	 */
894 	for (i = 0; i < N_UNMAP_TRIES; i++) {
895 		ret = try_to_unmap(p, ttu);
896 		if (ret == SWAP_SUCCESS)
897 			break;
898 		pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn,  ret);
899 	}
900 
901 	if (ret != SWAP_SUCCESS)
902 		printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
903 				pfn, page_mapcount(p));
904 
905 	/*
906 	 * Now that the dirty bit has been propagated to the
907 	 * struct page and all unmaps done we can decide if
908 	 * killing is needed or not.  Only kill when the page
909 	 * was dirty, otherwise the tokill list is merely
910 	 * freed.  When there was a problem unmapping earlier
911 	 * use a more force-full uncatchable kill to prevent
912 	 * any accesses to the poisoned memory.
913 	 */
914 	kill_procs_ao(&tokill, !!PageDirty(p), trapno,
915 		      ret != SWAP_SUCCESS, pfn);
916 
917 	return ret;
918 }
919 
920 int __memory_failure(unsigned long pfn, int trapno, int flags)
921 {
922 	struct page_state *ps;
923 	struct page *p;
924 	int res;
925 
926 	if (!sysctl_memory_failure_recovery)
927 		panic("Memory failure from trap %d on page %lx", trapno, pfn);
928 
929 	if (!pfn_valid(pfn)) {
930 		printk(KERN_ERR
931 		       "MCE %#lx: memory outside kernel control\n",
932 		       pfn);
933 		return -ENXIO;
934 	}
935 
936 	p = pfn_to_page(pfn);
937 	if (TestSetPageHWPoison(p)) {
938 		printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
939 		return 0;
940 	}
941 
942 	atomic_long_add(1, &mce_bad_pages);
943 
944 	/*
945 	 * We need/can do nothing about count=0 pages.
946 	 * 1) it's a free page, and therefore in safe hand:
947 	 *    prep_new_page() will be the gate keeper.
948 	 * 2) it's part of a non-compound high order page.
949 	 *    Implies some kernel user: cannot stop them from
950 	 *    R/W the page; let's pray that the page has been
951 	 *    used and will be freed some time later.
952 	 * In fact it's dangerous to directly bump up page count from 0,
953 	 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
954 	 */
955 	if (!(flags & MF_COUNT_INCREASED) &&
956 		!get_page_unless_zero(compound_head(p))) {
957 		if (is_free_buddy_page(p)) {
958 			action_result(pfn, "free buddy", DELAYED);
959 			return 0;
960 		} else {
961 			action_result(pfn, "high order kernel", IGNORED);
962 			return -EBUSY;
963 		}
964 	}
965 
966 	/*
967 	 * We ignore non-LRU pages for good reasons.
968 	 * - PG_locked is only well defined for LRU pages and a few others
969 	 * - to avoid races with __set_page_locked()
970 	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
971 	 * The check (unnecessarily) ignores LRU pages being isolated and
972 	 * walked by the page reclaim code, however that's not a big loss.
973 	 */
974 	if (!PageLRU(p))
975 		shake_page(p, 0);
976 	if (!PageLRU(p)) {
977 		/*
978 		 * shake_page could have turned it free.
979 		 */
980 		if (is_free_buddy_page(p)) {
981 			action_result(pfn, "free buddy, 2nd try", DELAYED);
982 			return 0;
983 		}
984 		action_result(pfn, "non LRU", IGNORED);
985 		put_page(p);
986 		return -EBUSY;
987 	}
988 
989 	/*
990 	 * Lock the page and wait for writeback to finish.
991 	 * It's very difficult to mess with pages currently under IO
992 	 * and in many cases impossible, so we just avoid it here.
993 	 */
994 	lock_page_nosync(p);
995 
996 	/*
997 	 * unpoison always clear PG_hwpoison inside page lock
998 	 */
999 	if (!PageHWPoison(p)) {
1000 		printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1001 		res = 0;
1002 		goto out;
1003 	}
1004 	if (hwpoison_filter(p)) {
1005 		if (TestClearPageHWPoison(p))
1006 			atomic_long_dec(&mce_bad_pages);
1007 		unlock_page(p);
1008 		put_page(p);
1009 		return 0;
1010 	}
1011 
1012 	wait_on_page_writeback(p);
1013 
1014 	/*
1015 	 * Now take care of user space mappings.
1016 	 * Abort on fail: __remove_from_page_cache() assumes unmapped page.
1017 	 */
1018 	if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
1019 		printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
1020 		res = -EBUSY;
1021 		goto out;
1022 	}
1023 
1024 	/*
1025 	 * Torn down by someone else?
1026 	 */
1027 	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1028 		action_result(pfn, "already truncated LRU", IGNORED);
1029 		res = -EBUSY;
1030 		goto out;
1031 	}
1032 
1033 	res = -EBUSY;
1034 	for (ps = error_states;; ps++) {
1035 		if ((p->flags & ps->mask) == ps->res) {
1036 			res = page_action(ps, p, pfn);
1037 			break;
1038 		}
1039 	}
1040 out:
1041 	unlock_page(p);
1042 	return res;
1043 }
1044 EXPORT_SYMBOL_GPL(__memory_failure);
1045 
1046 /**
1047  * memory_failure - Handle memory failure of a page.
1048  * @pfn: Page Number of the corrupted page
1049  * @trapno: Trap number reported in the signal to user space.
1050  *
1051  * This function is called by the low level machine check code
1052  * of an architecture when it detects hardware memory corruption
1053  * of a page. It tries its best to recover, which includes
1054  * dropping pages, killing processes etc.
1055  *
1056  * The function is primarily of use for corruptions that
1057  * happen outside the current execution context (e.g. when
1058  * detected by a background scrubber)
1059  *
1060  * Must run in process context (e.g. a work queue) with interrupts
1061  * enabled and no spinlocks hold.
1062  */
1063 void memory_failure(unsigned long pfn, int trapno)
1064 {
1065 	__memory_failure(pfn, trapno, 0);
1066 }
1067 
1068 /**
1069  * unpoison_memory - Unpoison a previously poisoned page
1070  * @pfn: Page number of the to be unpoisoned page
1071  *
1072  * Software-unpoison a page that has been poisoned by
1073  * memory_failure() earlier.
1074  *
1075  * This is only done on the software-level, so it only works
1076  * for linux injected failures, not real hardware failures
1077  *
1078  * Returns 0 for success, otherwise -errno.
1079  */
1080 int unpoison_memory(unsigned long pfn)
1081 {
1082 	struct page *page;
1083 	struct page *p;
1084 	int freeit = 0;
1085 
1086 	if (!pfn_valid(pfn))
1087 		return -ENXIO;
1088 
1089 	p = pfn_to_page(pfn);
1090 	page = compound_head(p);
1091 
1092 	if (!PageHWPoison(p)) {
1093 		pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
1094 		return 0;
1095 	}
1096 
1097 	if (!get_page_unless_zero(page)) {
1098 		if (TestClearPageHWPoison(p))
1099 			atomic_long_dec(&mce_bad_pages);
1100 		pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
1101 		return 0;
1102 	}
1103 
1104 	lock_page_nosync(page);
1105 	/*
1106 	 * This test is racy because PG_hwpoison is set outside of page lock.
1107 	 * That's acceptable because that won't trigger kernel panic. Instead,
1108 	 * the PG_hwpoison page will be caught and isolated on the entrance to
1109 	 * the free buddy page pool.
1110 	 */
1111 	if (TestClearPageHWPoison(p)) {
1112 		pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
1113 		atomic_long_dec(&mce_bad_pages);
1114 		freeit = 1;
1115 	}
1116 	unlock_page(page);
1117 
1118 	put_page(page);
1119 	if (freeit)
1120 		put_page(page);
1121 
1122 	return 0;
1123 }
1124 EXPORT_SYMBOL(unpoison_memory);
1125 
1126 static struct page *new_page(struct page *p, unsigned long private, int **x)
1127 {
1128 	int nid = page_to_nid(p);
1129 	return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
1130 }
1131 
1132 /*
1133  * Safely get reference count of an arbitrary page.
1134  * Returns 0 for a free page, -EIO for a zero refcount page
1135  * that is not free, and 1 for any other page type.
1136  * For 1 the page is returned with increased page count, otherwise not.
1137  */
1138 static int get_any_page(struct page *p, unsigned long pfn, int flags)
1139 {
1140 	int ret;
1141 
1142 	if (flags & MF_COUNT_INCREASED)
1143 		return 1;
1144 
1145 	/*
1146 	 * The lock_system_sleep prevents a race with memory hotplug,
1147 	 * because the isolation assumes there's only a single user.
1148 	 * This is a big hammer, a better would be nicer.
1149 	 */
1150 	lock_system_sleep();
1151 
1152 	/*
1153 	 * Isolate the page, so that it doesn't get reallocated if it
1154 	 * was free.
1155 	 */
1156 	set_migratetype_isolate(p);
1157 	if (!get_page_unless_zero(compound_head(p))) {
1158 		if (is_free_buddy_page(p)) {
1159 			pr_debug("get_any_page: %#lx free buddy page\n", pfn);
1160 			/* Set hwpoison bit while page is still isolated */
1161 			SetPageHWPoison(p);
1162 			ret = 0;
1163 		} else {
1164 			pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
1165 				pfn, p->flags);
1166 			ret = -EIO;
1167 		}
1168 	} else {
1169 		/* Not a free page */
1170 		ret = 1;
1171 	}
1172 	unset_migratetype_isolate(p);
1173 	unlock_system_sleep();
1174 	return ret;
1175 }
1176 
1177 /**
1178  * soft_offline_page - Soft offline a page.
1179  * @page: page to offline
1180  * @flags: flags. Same as memory_failure().
1181  *
1182  * Returns 0 on success, otherwise negated errno.
1183  *
1184  * Soft offline a page, by migration or invalidation,
1185  * without killing anything. This is for the case when
1186  * a page is not corrupted yet (so it's still valid to access),
1187  * but has had a number of corrected errors and is better taken
1188  * out.
1189  *
1190  * The actual policy on when to do that is maintained by
1191  * user space.
1192  *
1193  * This should never impact any application or cause data loss,
1194  * however it might take some time.
1195  *
1196  * This is not a 100% solution for all memory, but tries to be
1197  * ``good enough'' for the majority of memory.
1198  */
1199 int soft_offline_page(struct page *page, int flags)
1200 {
1201 	int ret;
1202 	unsigned long pfn = page_to_pfn(page);
1203 
1204 	ret = get_any_page(page, pfn, flags);
1205 	if (ret < 0)
1206 		return ret;
1207 	if (ret == 0)
1208 		goto done;
1209 
1210 	/*
1211 	 * Page cache page we can handle?
1212 	 */
1213 	if (!PageLRU(page)) {
1214 		/*
1215 		 * Try to free it.
1216 		 */
1217 		put_page(page);
1218 		shake_page(page, 1);
1219 
1220 		/*
1221 		 * Did it turn free?
1222 		 */
1223 		ret = get_any_page(page, pfn, 0);
1224 		if (ret < 0)
1225 			return ret;
1226 		if (ret == 0)
1227 			goto done;
1228 	}
1229 	if (!PageLRU(page)) {
1230 		pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
1231 				pfn, page->flags);
1232 		return -EIO;
1233 	}
1234 
1235 	lock_page(page);
1236 	wait_on_page_writeback(page);
1237 
1238 	/*
1239 	 * Synchronized using the page lock with memory_failure()
1240 	 */
1241 	if (PageHWPoison(page)) {
1242 		unlock_page(page);
1243 		put_page(page);
1244 		pr_debug("soft offline: %#lx page already poisoned\n", pfn);
1245 		return -EBUSY;
1246 	}
1247 
1248 	/*
1249 	 * Try to invalidate first. This should work for
1250 	 * non dirty unmapped page cache pages.
1251 	 */
1252 	ret = invalidate_inode_page(page);
1253 	unlock_page(page);
1254 
1255 	/*
1256 	 * Drop count because page migration doesn't like raised
1257 	 * counts. The page could get re-allocated, but if it becomes
1258 	 * LRU the isolation will just fail.
1259 	 * RED-PEN would be better to keep it isolated here, but we
1260 	 * would need to fix isolation locking first.
1261 	 */
1262 	put_page(page);
1263 	if (ret == 1) {
1264 		ret = 0;
1265 		pr_debug("soft_offline: %#lx: invalidated\n", pfn);
1266 		goto done;
1267 	}
1268 
1269 	/*
1270 	 * Simple invalidation didn't work.
1271 	 * Try to migrate to a new page instead. migrate.c
1272 	 * handles a large number of cases for us.
1273 	 */
1274 	ret = isolate_lru_page(page);
1275 	if (!ret) {
1276 		LIST_HEAD(pagelist);
1277 
1278 		list_add(&page->lru, &pagelist);
1279 		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
1280 		if (ret) {
1281 			pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
1282 				pfn, ret, page->flags);
1283 			if (ret > 0)
1284 				ret = -EIO;
1285 		}
1286 	} else {
1287 		pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
1288 				pfn, ret, page_count(page), page->flags);
1289 	}
1290 	if (ret)
1291 		return ret;
1292 
1293 done:
1294 	atomic_long_add(1, &mce_bad_pages);
1295 	SetPageHWPoison(page);
1296 	/* keep elevated page count for bad page */
1297 	return ret;
1298 }
1299