xref: /openbmc/linux/mm/memory-failure.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  * Copyright (C) 2008, 2009 Intel Corporation
3  * Authors: Andi Kleen, Fengguang Wu
4  *
5  * This software may be redistributed and/or modified under the terms of
6  * the GNU General Public License ("GPL") version 2 only as published by the
7  * Free Software Foundation.
8  *
9  * High level machine check handler. Handles pages reported by the
10  * hardware as being corrupted usually due to a 2bit ECC memory or cache
11  * failure.
12  *
13  * Handles page cache pages in various states.	The tricky part
14  * here is that we can access any page asynchronous to other VM
15  * users, because memory failures could happen anytime and anywhere,
16  * possibly violating some of their assumptions. This is why this code
17  * has to be extremely careful. Generally it tries to use normal locking
18  * rules, as in get the standard locks, even if that means the
19  * error handling takes potentially a long time.
20  *
21  * The operation to map back from RMAP chains to processes has to walk
22  * the complete process list and has non linear complexity with the number
23  * mappings. In short it can be quite slow. But since memory corruptions
24  * are rare we hope to get away with this.
25  */
26 
27 /*
28  * Notebook:
29  * - hugetlb needs more code
30  * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
31  * - pass bad pages to kdump next kernel
32  */
33 #define DEBUG 1		/* remove me in 2.6.34 */
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/page-flags.h>
37 #include <linux/kernel-page-flags.h>
38 #include <linux/sched.h>
39 #include <linux/ksm.h>
40 #include <linux/rmap.h>
41 #include <linux/pagemap.h>
42 #include <linux/swap.h>
43 #include <linux/backing-dev.h>
44 #include <linux/migrate.h>
45 #include <linux/page-isolation.h>
46 #include <linux/suspend.h>
47 #include "internal.h"
48 
49 int sysctl_memory_failure_early_kill __read_mostly = 0;
50 
51 int sysctl_memory_failure_recovery __read_mostly = 1;
52 
53 atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
54 
55 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
56 
57 u32 hwpoison_filter_enable = 0;
58 u32 hwpoison_filter_dev_major = ~0U;
59 u32 hwpoison_filter_dev_minor = ~0U;
60 u64 hwpoison_filter_flags_mask;
61 u64 hwpoison_filter_flags_value;
62 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
63 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
64 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
65 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
66 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
67 
68 static int hwpoison_filter_dev(struct page *p)
69 {
70 	struct address_space *mapping;
71 	dev_t dev;
72 
73 	if (hwpoison_filter_dev_major == ~0U &&
74 	    hwpoison_filter_dev_minor == ~0U)
75 		return 0;
76 
77 	/*
78 	 * page_mapping() does not accept slab page
79 	 */
80 	if (PageSlab(p))
81 		return -EINVAL;
82 
83 	mapping = page_mapping(p);
84 	if (mapping == NULL || mapping->host == NULL)
85 		return -EINVAL;
86 
87 	dev = mapping->host->i_sb->s_dev;
88 	if (hwpoison_filter_dev_major != ~0U &&
89 	    hwpoison_filter_dev_major != MAJOR(dev))
90 		return -EINVAL;
91 	if (hwpoison_filter_dev_minor != ~0U &&
92 	    hwpoison_filter_dev_minor != MINOR(dev))
93 		return -EINVAL;
94 
95 	return 0;
96 }
97 
98 static int hwpoison_filter_flags(struct page *p)
99 {
100 	if (!hwpoison_filter_flags_mask)
101 		return 0;
102 
103 	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
104 				    hwpoison_filter_flags_value)
105 		return 0;
106 	else
107 		return -EINVAL;
108 }
109 
110 /*
111  * This allows stress tests to limit test scope to a collection of tasks
112  * by putting them under some memcg. This prevents killing unrelated/important
113  * processes such as /sbin/init. Note that the target task may share clean
114  * pages with init (eg. libc text), which is harmless. If the target task
115  * share _dirty_ pages with another task B, the test scheme must make sure B
116  * is also included in the memcg. At last, due to race conditions this filter
117  * can only guarantee that the page either belongs to the memcg tasks, or is
118  * a freed page.
119  */
120 #ifdef	CONFIG_CGROUP_MEM_RES_CTLR_SWAP
121 u64 hwpoison_filter_memcg;
122 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
123 static int hwpoison_filter_task(struct page *p)
124 {
125 	struct mem_cgroup *mem;
126 	struct cgroup_subsys_state *css;
127 	unsigned long ino;
128 
129 	if (!hwpoison_filter_memcg)
130 		return 0;
131 
132 	mem = try_get_mem_cgroup_from_page(p);
133 	if (!mem)
134 		return -EINVAL;
135 
136 	css = mem_cgroup_css(mem);
137 	/* root_mem_cgroup has NULL dentries */
138 	if (!css->cgroup->dentry)
139 		return -EINVAL;
140 
141 	ino = css->cgroup->dentry->d_inode->i_ino;
142 	css_put(css);
143 
144 	if (ino != hwpoison_filter_memcg)
145 		return -EINVAL;
146 
147 	return 0;
148 }
149 #else
150 static int hwpoison_filter_task(struct page *p) { return 0; }
151 #endif
152 
153 int hwpoison_filter(struct page *p)
154 {
155 	if (!hwpoison_filter_enable)
156 		return 0;
157 
158 	if (hwpoison_filter_dev(p))
159 		return -EINVAL;
160 
161 	if (hwpoison_filter_flags(p))
162 		return -EINVAL;
163 
164 	if (hwpoison_filter_task(p))
165 		return -EINVAL;
166 
167 	return 0;
168 }
169 #else
170 int hwpoison_filter(struct page *p)
171 {
172 	return 0;
173 }
174 #endif
175 
176 EXPORT_SYMBOL_GPL(hwpoison_filter);
177 
178 /*
179  * Send all the processes who have the page mapped an ``action optional''
180  * signal.
181  */
182 static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
183 			unsigned long pfn)
184 {
185 	struct siginfo si;
186 	int ret;
187 
188 	printk(KERN_ERR
189 		"MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
190 		pfn, t->comm, t->pid);
191 	si.si_signo = SIGBUS;
192 	si.si_errno = 0;
193 	si.si_code = BUS_MCEERR_AO;
194 	si.si_addr = (void *)addr;
195 #ifdef __ARCH_SI_TRAPNO
196 	si.si_trapno = trapno;
197 #endif
198 	si.si_addr_lsb = PAGE_SHIFT;
199 	/*
200 	 * Don't use force here, it's convenient if the signal
201 	 * can be temporarily blocked.
202 	 * This could cause a loop when the user sets SIGBUS
203 	 * to SIG_IGN, but hopefully noone will do that?
204 	 */
205 	ret = send_sig_info(SIGBUS, &si, t);  /* synchronous? */
206 	if (ret < 0)
207 		printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
208 		       t->comm, t->pid, ret);
209 	return ret;
210 }
211 
212 /*
213  * When a unknown page type is encountered drain as many buffers as possible
214  * in the hope to turn the page into a LRU or free page, which we can handle.
215  */
216 void shake_page(struct page *p, int access)
217 {
218 	if (!PageSlab(p)) {
219 		lru_add_drain_all();
220 		if (PageLRU(p))
221 			return;
222 		drain_all_pages();
223 		if (PageLRU(p) || is_free_buddy_page(p))
224 			return;
225 	}
226 
227 	/*
228 	 * Only all shrink_slab here (which would also
229 	 * shrink other caches) if access is not potentially fatal.
230 	 */
231 	if (access) {
232 		int nr;
233 		do {
234 			nr = shrink_slab(1000, GFP_KERNEL, 1000);
235 			if (page_count(p) == 0)
236 				break;
237 		} while (nr > 10);
238 	}
239 }
240 EXPORT_SYMBOL_GPL(shake_page);
241 
242 /*
243  * Kill all processes that have a poisoned page mapped and then isolate
244  * the page.
245  *
246  * General strategy:
247  * Find all processes having the page mapped and kill them.
248  * But we keep a page reference around so that the page is not
249  * actually freed yet.
250  * Then stash the page away
251  *
252  * There's no convenient way to get back to mapped processes
253  * from the VMAs. So do a brute-force search over all
254  * running processes.
255  *
256  * Remember that machine checks are not common (or rather
257  * if they are common you have other problems), so this shouldn't
258  * be a performance issue.
259  *
260  * Also there are some races possible while we get from the
261  * error detection to actually handle it.
262  */
263 
264 struct to_kill {
265 	struct list_head nd;
266 	struct task_struct *tsk;
267 	unsigned long addr;
268 	unsigned addr_valid:1;
269 };
270 
271 /*
272  * Failure handling: if we can't find or can't kill a process there's
273  * not much we can do.	We just print a message and ignore otherwise.
274  */
275 
276 /*
277  * Schedule a process for later kill.
278  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
279  * TBD would GFP_NOIO be enough?
280  */
281 static void add_to_kill(struct task_struct *tsk, struct page *p,
282 		       struct vm_area_struct *vma,
283 		       struct list_head *to_kill,
284 		       struct to_kill **tkc)
285 {
286 	struct to_kill *tk;
287 
288 	if (*tkc) {
289 		tk = *tkc;
290 		*tkc = NULL;
291 	} else {
292 		tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
293 		if (!tk) {
294 			printk(KERN_ERR
295 		"MCE: Out of memory while machine check handling\n");
296 			return;
297 		}
298 	}
299 	tk->addr = page_address_in_vma(p, vma);
300 	tk->addr_valid = 1;
301 
302 	/*
303 	 * In theory we don't have to kill when the page was
304 	 * munmaped. But it could be also a mremap. Since that's
305 	 * likely very rare kill anyways just out of paranoia, but use
306 	 * a SIGKILL because the error is not contained anymore.
307 	 */
308 	if (tk->addr == -EFAULT) {
309 		pr_debug("MCE: Unable to find user space address %lx in %s\n",
310 			page_to_pfn(p), tsk->comm);
311 		tk->addr_valid = 0;
312 	}
313 	get_task_struct(tsk);
314 	tk->tsk = tsk;
315 	list_add_tail(&tk->nd, to_kill);
316 }
317 
318 /*
319  * Kill the processes that have been collected earlier.
320  *
321  * Only do anything when DOIT is set, otherwise just free the list
322  * (this is used for clean pages which do not need killing)
323  * Also when FAIL is set do a force kill because something went
324  * wrong earlier.
325  */
326 static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
327 			  int fail, unsigned long pfn)
328 {
329 	struct to_kill *tk, *next;
330 
331 	list_for_each_entry_safe (tk, next, to_kill, nd) {
332 		if (doit) {
333 			/*
334 			 * In case something went wrong with munmapping
335 			 * make sure the process doesn't catch the
336 			 * signal and then access the memory. Just kill it.
337 			 */
338 			if (fail || tk->addr_valid == 0) {
339 				printk(KERN_ERR
340 		"MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
341 					pfn, tk->tsk->comm, tk->tsk->pid);
342 				force_sig(SIGKILL, tk->tsk);
343 			}
344 
345 			/*
346 			 * In theory the process could have mapped
347 			 * something else on the address in-between. We could
348 			 * check for that, but we need to tell the
349 			 * process anyways.
350 			 */
351 			else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
352 					      pfn) < 0)
353 				printk(KERN_ERR
354 		"MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
355 					pfn, tk->tsk->comm, tk->tsk->pid);
356 		}
357 		put_task_struct(tk->tsk);
358 		kfree(tk);
359 	}
360 }
361 
362 static int task_early_kill(struct task_struct *tsk)
363 {
364 	if (!tsk->mm)
365 		return 0;
366 	if (tsk->flags & PF_MCE_PROCESS)
367 		return !!(tsk->flags & PF_MCE_EARLY);
368 	return sysctl_memory_failure_early_kill;
369 }
370 
371 /*
372  * Collect processes when the error hit an anonymous page.
373  */
374 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
375 			      struct to_kill **tkc)
376 {
377 	struct vm_area_struct *vma;
378 	struct task_struct *tsk;
379 	struct anon_vma *av;
380 
381 	read_lock(&tasklist_lock);
382 	av = page_lock_anon_vma(page);
383 	if (av == NULL)	/* Not actually mapped anymore */
384 		goto out;
385 	for_each_process (tsk) {
386 		if (!task_early_kill(tsk))
387 			continue;
388 		list_for_each_entry (vma, &av->head, anon_vma_node) {
389 			if (!page_mapped_in_vma(page, vma))
390 				continue;
391 			if (vma->vm_mm == tsk->mm)
392 				add_to_kill(tsk, page, vma, to_kill, tkc);
393 		}
394 	}
395 	page_unlock_anon_vma(av);
396 out:
397 	read_unlock(&tasklist_lock);
398 }
399 
400 /*
401  * Collect processes when the error hit a file mapped page.
402  */
403 static void collect_procs_file(struct page *page, struct list_head *to_kill,
404 			      struct to_kill **tkc)
405 {
406 	struct vm_area_struct *vma;
407 	struct task_struct *tsk;
408 	struct prio_tree_iter iter;
409 	struct address_space *mapping = page->mapping;
410 
411 	/*
412 	 * A note on the locking order between the two locks.
413 	 * We don't rely on this particular order.
414 	 * If you have some other code that needs a different order
415 	 * feel free to switch them around. Or add a reverse link
416 	 * from mm_struct to task_struct, then this could be all
417 	 * done without taking tasklist_lock and looping over all tasks.
418 	 */
419 
420 	read_lock(&tasklist_lock);
421 	spin_lock(&mapping->i_mmap_lock);
422 	for_each_process(tsk) {
423 		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
424 
425 		if (!task_early_kill(tsk))
426 			continue;
427 
428 		vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
429 				      pgoff) {
430 			/*
431 			 * Send early kill signal to tasks where a vma covers
432 			 * the page but the corrupted page is not necessarily
433 			 * mapped it in its pte.
434 			 * Assume applications who requested early kill want
435 			 * to be informed of all such data corruptions.
436 			 */
437 			if (vma->vm_mm == tsk->mm)
438 				add_to_kill(tsk, page, vma, to_kill, tkc);
439 		}
440 	}
441 	spin_unlock(&mapping->i_mmap_lock);
442 	read_unlock(&tasklist_lock);
443 }
444 
445 /*
446  * Collect the processes who have the corrupted page mapped to kill.
447  * This is done in two steps for locking reasons.
448  * First preallocate one tokill structure outside the spin locks,
449  * so that we can kill at least one process reasonably reliable.
450  */
451 static void collect_procs(struct page *page, struct list_head *tokill)
452 {
453 	struct to_kill *tk;
454 
455 	if (!page->mapping)
456 		return;
457 
458 	tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
459 	if (!tk)
460 		return;
461 	if (PageAnon(page))
462 		collect_procs_anon(page, tokill, &tk);
463 	else
464 		collect_procs_file(page, tokill, &tk);
465 	kfree(tk);
466 }
467 
468 /*
469  * Error handlers for various types of pages.
470  */
471 
472 enum outcome {
473 	IGNORED,	/* Error: cannot be handled */
474 	FAILED,		/* Error: handling failed */
475 	DELAYED,	/* Will be handled later */
476 	RECOVERED,	/* Successfully recovered */
477 };
478 
479 static const char *action_name[] = {
480 	[IGNORED] = "Ignored",
481 	[FAILED] = "Failed",
482 	[DELAYED] = "Delayed",
483 	[RECOVERED] = "Recovered",
484 };
485 
486 /*
487  * XXX: It is possible that a page is isolated from LRU cache,
488  * and then kept in swap cache or failed to remove from page cache.
489  * The page count will stop it from being freed by unpoison.
490  * Stress tests should be aware of this memory leak problem.
491  */
492 static int delete_from_lru_cache(struct page *p)
493 {
494 	if (!isolate_lru_page(p)) {
495 		/*
496 		 * Clear sensible page flags, so that the buddy system won't
497 		 * complain when the page is unpoison-and-freed.
498 		 */
499 		ClearPageActive(p);
500 		ClearPageUnevictable(p);
501 		/*
502 		 * drop the page count elevated by isolate_lru_page()
503 		 */
504 		page_cache_release(p);
505 		return 0;
506 	}
507 	return -EIO;
508 }
509 
510 /*
511  * Error hit kernel page.
512  * Do nothing, try to be lucky and not touch this instead. For a few cases we
513  * could be more sophisticated.
514  */
515 static int me_kernel(struct page *p, unsigned long pfn)
516 {
517 	return IGNORED;
518 }
519 
520 /*
521  * Page in unknown state. Do nothing.
522  */
523 static int me_unknown(struct page *p, unsigned long pfn)
524 {
525 	printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
526 	return FAILED;
527 }
528 
529 /*
530  * Clean (or cleaned) page cache page.
531  */
532 static int me_pagecache_clean(struct page *p, unsigned long pfn)
533 {
534 	int err;
535 	int ret = FAILED;
536 	struct address_space *mapping;
537 
538 	delete_from_lru_cache(p);
539 
540 	/*
541 	 * For anonymous pages we're done the only reference left
542 	 * should be the one m_f() holds.
543 	 */
544 	if (PageAnon(p))
545 		return RECOVERED;
546 
547 	/*
548 	 * Now truncate the page in the page cache. This is really
549 	 * more like a "temporary hole punch"
550 	 * Don't do this for block devices when someone else
551 	 * has a reference, because it could be file system metadata
552 	 * and that's not safe to truncate.
553 	 */
554 	mapping = page_mapping(p);
555 	if (!mapping) {
556 		/*
557 		 * Page has been teared down in the meanwhile
558 		 */
559 		return FAILED;
560 	}
561 
562 	/*
563 	 * Truncation is a bit tricky. Enable it per file system for now.
564 	 *
565 	 * Open: to take i_mutex or not for this? Right now we don't.
566 	 */
567 	if (mapping->a_ops->error_remove_page) {
568 		err = mapping->a_ops->error_remove_page(mapping, p);
569 		if (err != 0) {
570 			printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
571 					pfn, err);
572 		} else if (page_has_private(p) &&
573 				!try_to_release_page(p, GFP_NOIO)) {
574 			pr_debug("MCE %#lx: failed to release buffers\n", pfn);
575 		} else {
576 			ret = RECOVERED;
577 		}
578 	} else {
579 		/*
580 		 * If the file system doesn't support it just invalidate
581 		 * This fails on dirty or anything with private pages
582 		 */
583 		if (invalidate_inode_page(p))
584 			ret = RECOVERED;
585 		else
586 			printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
587 				pfn);
588 	}
589 	return ret;
590 }
591 
592 /*
593  * Dirty cache page page
594  * Issues: when the error hit a hole page the error is not properly
595  * propagated.
596  */
597 static int me_pagecache_dirty(struct page *p, unsigned long pfn)
598 {
599 	struct address_space *mapping = page_mapping(p);
600 
601 	SetPageError(p);
602 	/* TBD: print more information about the file. */
603 	if (mapping) {
604 		/*
605 		 * IO error will be reported by write(), fsync(), etc.
606 		 * who check the mapping.
607 		 * This way the application knows that something went
608 		 * wrong with its dirty file data.
609 		 *
610 		 * There's one open issue:
611 		 *
612 		 * The EIO will be only reported on the next IO
613 		 * operation and then cleared through the IO map.
614 		 * Normally Linux has two mechanisms to pass IO error
615 		 * first through the AS_EIO flag in the address space
616 		 * and then through the PageError flag in the page.
617 		 * Since we drop pages on memory failure handling the
618 		 * only mechanism open to use is through AS_AIO.
619 		 *
620 		 * This has the disadvantage that it gets cleared on
621 		 * the first operation that returns an error, while
622 		 * the PageError bit is more sticky and only cleared
623 		 * when the page is reread or dropped.  If an
624 		 * application assumes it will always get error on
625 		 * fsync, but does other operations on the fd before
626 		 * and the page is dropped inbetween then the error
627 		 * will not be properly reported.
628 		 *
629 		 * This can already happen even without hwpoisoned
630 		 * pages: first on metadata IO errors (which only
631 		 * report through AS_EIO) or when the page is dropped
632 		 * at the wrong time.
633 		 *
634 		 * So right now we assume that the application DTRT on
635 		 * the first EIO, but we're not worse than other parts
636 		 * of the kernel.
637 		 */
638 		mapping_set_error(mapping, EIO);
639 	}
640 
641 	return me_pagecache_clean(p, pfn);
642 }
643 
644 /*
645  * Clean and dirty swap cache.
646  *
647  * Dirty swap cache page is tricky to handle. The page could live both in page
648  * cache and swap cache(ie. page is freshly swapped in). So it could be
649  * referenced concurrently by 2 types of PTEs:
650  * normal PTEs and swap PTEs. We try to handle them consistently by calling
651  * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
652  * and then
653  *      - clear dirty bit to prevent IO
654  *      - remove from LRU
655  *      - but keep in the swap cache, so that when we return to it on
656  *        a later page fault, we know the application is accessing
657  *        corrupted data and shall be killed (we installed simple
658  *        interception code in do_swap_page to catch it).
659  *
660  * Clean swap cache pages can be directly isolated. A later page fault will
661  * bring in the known good data from disk.
662  */
663 static int me_swapcache_dirty(struct page *p, unsigned long pfn)
664 {
665 	ClearPageDirty(p);
666 	/* Trigger EIO in shmem: */
667 	ClearPageUptodate(p);
668 
669 	if (!delete_from_lru_cache(p))
670 		return DELAYED;
671 	else
672 		return FAILED;
673 }
674 
675 static int me_swapcache_clean(struct page *p, unsigned long pfn)
676 {
677 	delete_from_swap_cache(p);
678 
679 	if (!delete_from_lru_cache(p))
680 		return RECOVERED;
681 	else
682 		return FAILED;
683 }
684 
685 /*
686  * Huge pages. Needs work.
687  * Issues:
688  * No rmap support so we cannot find the original mapper. In theory could walk
689  * all MMs and look for the mappings, but that would be non atomic and racy.
690  * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
691  * like just walking the current process and hoping it has it mapped (that
692  * should be usually true for the common "shared database cache" case)
693  * Should handle free huge pages and dequeue them too, but this needs to
694  * handle huge page accounting correctly.
695  */
696 static int me_huge_page(struct page *p, unsigned long pfn)
697 {
698 	return FAILED;
699 }
700 
701 /*
702  * Various page states we can handle.
703  *
704  * A page state is defined by its current page->flags bits.
705  * The table matches them in order and calls the right handler.
706  *
707  * This is quite tricky because we can access page at any time
708  * in its live cycle, so all accesses have to be extremly careful.
709  *
710  * This is not complete. More states could be added.
711  * For any missing state don't attempt recovery.
712  */
713 
714 #define dirty		(1UL << PG_dirty)
715 #define sc		(1UL << PG_swapcache)
716 #define unevict		(1UL << PG_unevictable)
717 #define mlock		(1UL << PG_mlocked)
718 #define writeback	(1UL << PG_writeback)
719 #define lru		(1UL << PG_lru)
720 #define swapbacked	(1UL << PG_swapbacked)
721 #define head		(1UL << PG_head)
722 #define tail		(1UL << PG_tail)
723 #define compound	(1UL << PG_compound)
724 #define slab		(1UL << PG_slab)
725 #define reserved	(1UL << PG_reserved)
726 
727 static struct page_state {
728 	unsigned long mask;
729 	unsigned long res;
730 	char *msg;
731 	int (*action)(struct page *p, unsigned long pfn);
732 } error_states[] = {
733 	{ reserved,	reserved,	"reserved kernel",	me_kernel },
734 	/*
735 	 * free pages are specially detected outside this table:
736 	 * PG_buddy pages only make a small fraction of all free pages.
737 	 */
738 
739 	/*
740 	 * Could in theory check if slab page is free or if we can drop
741 	 * currently unused objects without touching them. But just
742 	 * treat it as standard kernel for now.
743 	 */
744 	{ slab,		slab,		"kernel slab",	me_kernel },
745 
746 #ifdef CONFIG_PAGEFLAGS_EXTENDED
747 	{ head,		head,		"huge",		me_huge_page },
748 	{ tail,		tail,		"huge",		me_huge_page },
749 #else
750 	{ compound,	compound,	"huge",		me_huge_page },
751 #endif
752 
753 	{ sc|dirty,	sc|dirty,	"swapcache",	me_swapcache_dirty },
754 	{ sc|dirty,	sc,		"swapcache",	me_swapcache_clean },
755 
756 	{ unevict|dirty, unevict|dirty,	"unevictable LRU", me_pagecache_dirty},
757 	{ unevict,	unevict,	"unevictable LRU", me_pagecache_clean},
758 
759 	{ mlock|dirty,	mlock|dirty,	"mlocked LRU",	me_pagecache_dirty },
760 	{ mlock,	mlock,		"mlocked LRU",	me_pagecache_clean },
761 
762 	{ lru|dirty,	lru|dirty,	"LRU",		me_pagecache_dirty },
763 	{ lru|dirty,	lru,		"clean LRU",	me_pagecache_clean },
764 
765 	/*
766 	 * Catchall entry: must be at end.
767 	 */
768 	{ 0,		0,		"unknown page state",	me_unknown },
769 };
770 
771 #undef dirty
772 #undef sc
773 #undef unevict
774 #undef mlock
775 #undef writeback
776 #undef lru
777 #undef swapbacked
778 #undef head
779 #undef tail
780 #undef compound
781 #undef slab
782 #undef reserved
783 
784 static void action_result(unsigned long pfn, char *msg, int result)
785 {
786 	struct page *page = pfn_to_page(pfn);
787 
788 	printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
789 		pfn,
790 		PageDirty(page) ? "dirty " : "",
791 		msg, action_name[result]);
792 }
793 
794 static int page_action(struct page_state *ps, struct page *p,
795 			unsigned long pfn)
796 {
797 	int result;
798 	int count;
799 
800 	result = ps->action(p, pfn);
801 	action_result(pfn, ps->msg, result);
802 
803 	count = page_count(p) - 1;
804 	if (ps->action == me_swapcache_dirty && result == DELAYED)
805 		count--;
806 	if (count != 0) {
807 		printk(KERN_ERR
808 		       "MCE %#lx: %s page still referenced by %d users\n",
809 		       pfn, ps->msg, count);
810 		result = FAILED;
811 	}
812 
813 	/* Could do more checks here if page looks ok */
814 	/*
815 	 * Could adjust zone counters here to correct for the missing page.
816 	 */
817 
818 	return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
819 }
820 
821 #define N_UNMAP_TRIES 5
822 
823 /*
824  * Do all that is necessary to remove user space mappings. Unmap
825  * the pages and send SIGBUS to the processes if the data was dirty.
826  */
827 static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
828 				  int trapno)
829 {
830 	enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
831 	struct address_space *mapping;
832 	LIST_HEAD(tokill);
833 	int ret;
834 	int i;
835 	int kill = 1;
836 
837 	if (PageReserved(p) || PageSlab(p))
838 		return SWAP_SUCCESS;
839 
840 	/*
841 	 * This check implies we don't kill processes if their pages
842 	 * are in the swap cache early. Those are always late kills.
843 	 */
844 	if (!page_mapped(p))
845 		return SWAP_SUCCESS;
846 
847 	if (PageCompound(p) || PageKsm(p))
848 		return SWAP_FAIL;
849 
850 	if (PageSwapCache(p)) {
851 		printk(KERN_ERR
852 		       "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
853 		ttu |= TTU_IGNORE_HWPOISON;
854 	}
855 
856 	/*
857 	 * Propagate the dirty bit from PTEs to struct page first, because we
858 	 * need this to decide if we should kill or just drop the page.
859 	 * XXX: the dirty test could be racy: set_page_dirty() may not always
860 	 * be called inside page lock (it's recommended but not enforced).
861 	 */
862 	mapping = page_mapping(p);
863 	if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
864 		if (page_mkclean(p)) {
865 			SetPageDirty(p);
866 		} else {
867 			kill = 0;
868 			ttu |= TTU_IGNORE_HWPOISON;
869 			printk(KERN_INFO
870 	"MCE %#lx: corrupted page was clean: dropped without side effects\n",
871 				pfn);
872 		}
873 	}
874 
875 	/*
876 	 * First collect all the processes that have the page
877 	 * mapped in dirty form.  This has to be done before try_to_unmap,
878 	 * because ttu takes the rmap data structures down.
879 	 *
880 	 * Error handling: We ignore errors here because
881 	 * there's nothing that can be done.
882 	 */
883 	if (kill)
884 		collect_procs(p, &tokill);
885 
886 	/*
887 	 * try_to_unmap can fail temporarily due to races.
888 	 * Try a few times (RED-PEN better strategy?)
889 	 */
890 	for (i = 0; i < N_UNMAP_TRIES; i++) {
891 		ret = try_to_unmap(p, ttu);
892 		if (ret == SWAP_SUCCESS)
893 			break;
894 		pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn,  ret);
895 	}
896 
897 	if (ret != SWAP_SUCCESS)
898 		printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
899 				pfn, page_mapcount(p));
900 
901 	/*
902 	 * Now that the dirty bit has been propagated to the
903 	 * struct page and all unmaps done we can decide if
904 	 * killing is needed or not.  Only kill when the page
905 	 * was dirty, otherwise the tokill list is merely
906 	 * freed.  When there was a problem unmapping earlier
907 	 * use a more force-full uncatchable kill to prevent
908 	 * any accesses to the poisoned memory.
909 	 */
910 	kill_procs_ao(&tokill, !!PageDirty(p), trapno,
911 		      ret != SWAP_SUCCESS, pfn);
912 
913 	return ret;
914 }
915 
916 int __memory_failure(unsigned long pfn, int trapno, int flags)
917 {
918 	struct page_state *ps;
919 	struct page *p;
920 	int res;
921 
922 	if (!sysctl_memory_failure_recovery)
923 		panic("Memory failure from trap %d on page %lx", trapno, pfn);
924 
925 	if (!pfn_valid(pfn)) {
926 		printk(KERN_ERR
927 		       "MCE %#lx: memory outside kernel control\n",
928 		       pfn);
929 		return -ENXIO;
930 	}
931 
932 	p = pfn_to_page(pfn);
933 	if (TestSetPageHWPoison(p)) {
934 		printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
935 		return 0;
936 	}
937 
938 	atomic_long_add(1, &mce_bad_pages);
939 
940 	/*
941 	 * We need/can do nothing about count=0 pages.
942 	 * 1) it's a free page, and therefore in safe hand:
943 	 *    prep_new_page() will be the gate keeper.
944 	 * 2) it's part of a non-compound high order page.
945 	 *    Implies some kernel user: cannot stop them from
946 	 *    R/W the page; let's pray that the page has been
947 	 *    used and will be freed some time later.
948 	 * In fact it's dangerous to directly bump up page count from 0,
949 	 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
950 	 */
951 	if (!(flags & MF_COUNT_INCREASED) &&
952 		!get_page_unless_zero(compound_head(p))) {
953 		if (is_free_buddy_page(p)) {
954 			action_result(pfn, "free buddy", DELAYED);
955 			return 0;
956 		} else {
957 			action_result(pfn, "high order kernel", IGNORED);
958 			return -EBUSY;
959 		}
960 	}
961 
962 	/*
963 	 * We ignore non-LRU pages for good reasons.
964 	 * - PG_locked is only well defined for LRU pages and a few others
965 	 * - to avoid races with __set_page_locked()
966 	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
967 	 * The check (unnecessarily) ignores LRU pages being isolated and
968 	 * walked by the page reclaim code, however that's not a big loss.
969 	 */
970 	if (!PageLRU(p))
971 		shake_page(p, 0);
972 	if (!PageLRU(p)) {
973 		/*
974 		 * shake_page could have turned it free.
975 		 */
976 		if (is_free_buddy_page(p)) {
977 			action_result(pfn, "free buddy, 2nd try", DELAYED);
978 			return 0;
979 		}
980 		action_result(pfn, "non LRU", IGNORED);
981 		put_page(p);
982 		return -EBUSY;
983 	}
984 
985 	/*
986 	 * Lock the page and wait for writeback to finish.
987 	 * It's very difficult to mess with pages currently under IO
988 	 * and in many cases impossible, so we just avoid it here.
989 	 */
990 	lock_page_nosync(p);
991 
992 	/*
993 	 * unpoison always clear PG_hwpoison inside page lock
994 	 */
995 	if (!PageHWPoison(p)) {
996 		printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
997 		res = 0;
998 		goto out;
999 	}
1000 	if (hwpoison_filter(p)) {
1001 		if (TestClearPageHWPoison(p))
1002 			atomic_long_dec(&mce_bad_pages);
1003 		unlock_page(p);
1004 		put_page(p);
1005 		return 0;
1006 	}
1007 
1008 	wait_on_page_writeback(p);
1009 
1010 	/*
1011 	 * Now take care of user space mappings.
1012 	 * Abort on fail: __remove_from_page_cache() assumes unmapped page.
1013 	 */
1014 	if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
1015 		printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
1016 		res = -EBUSY;
1017 		goto out;
1018 	}
1019 
1020 	/*
1021 	 * Torn down by someone else?
1022 	 */
1023 	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1024 		action_result(pfn, "already truncated LRU", IGNORED);
1025 		res = -EBUSY;
1026 		goto out;
1027 	}
1028 
1029 	res = -EBUSY;
1030 	for (ps = error_states;; ps++) {
1031 		if ((p->flags & ps->mask) == ps->res) {
1032 			res = page_action(ps, p, pfn);
1033 			break;
1034 		}
1035 	}
1036 out:
1037 	unlock_page(p);
1038 	return res;
1039 }
1040 EXPORT_SYMBOL_GPL(__memory_failure);
1041 
1042 /**
1043  * memory_failure - Handle memory failure of a page.
1044  * @pfn: Page Number of the corrupted page
1045  * @trapno: Trap number reported in the signal to user space.
1046  *
1047  * This function is called by the low level machine check code
1048  * of an architecture when it detects hardware memory corruption
1049  * of a page. It tries its best to recover, which includes
1050  * dropping pages, killing processes etc.
1051  *
1052  * The function is primarily of use for corruptions that
1053  * happen outside the current execution context (e.g. when
1054  * detected by a background scrubber)
1055  *
1056  * Must run in process context (e.g. a work queue) with interrupts
1057  * enabled and no spinlocks hold.
1058  */
1059 void memory_failure(unsigned long pfn, int trapno)
1060 {
1061 	__memory_failure(pfn, trapno, 0);
1062 }
1063 
1064 /**
1065  * unpoison_memory - Unpoison a previously poisoned page
1066  * @pfn: Page number of the to be unpoisoned page
1067  *
1068  * Software-unpoison a page that has been poisoned by
1069  * memory_failure() earlier.
1070  *
1071  * This is only done on the software-level, so it only works
1072  * for linux injected failures, not real hardware failures
1073  *
1074  * Returns 0 for success, otherwise -errno.
1075  */
1076 int unpoison_memory(unsigned long pfn)
1077 {
1078 	struct page *page;
1079 	struct page *p;
1080 	int freeit = 0;
1081 
1082 	if (!pfn_valid(pfn))
1083 		return -ENXIO;
1084 
1085 	p = pfn_to_page(pfn);
1086 	page = compound_head(p);
1087 
1088 	if (!PageHWPoison(p)) {
1089 		pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
1090 		return 0;
1091 	}
1092 
1093 	if (!get_page_unless_zero(page)) {
1094 		if (TestClearPageHWPoison(p))
1095 			atomic_long_dec(&mce_bad_pages);
1096 		pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
1097 		return 0;
1098 	}
1099 
1100 	lock_page_nosync(page);
1101 	/*
1102 	 * This test is racy because PG_hwpoison is set outside of page lock.
1103 	 * That's acceptable because that won't trigger kernel panic. Instead,
1104 	 * the PG_hwpoison page will be caught and isolated on the entrance to
1105 	 * the free buddy page pool.
1106 	 */
1107 	if (TestClearPageHWPoison(p)) {
1108 		pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
1109 		atomic_long_dec(&mce_bad_pages);
1110 		freeit = 1;
1111 	}
1112 	unlock_page(page);
1113 
1114 	put_page(page);
1115 	if (freeit)
1116 		put_page(page);
1117 
1118 	return 0;
1119 }
1120 EXPORT_SYMBOL(unpoison_memory);
1121 
1122 static struct page *new_page(struct page *p, unsigned long private, int **x)
1123 {
1124 	int nid = page_to_nid(p);
1125 	return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
1126 }
1127 
1128 /*
1129  * Safely get reference count of an arbitrary page.
1130  * Returns 0 for a free page, -EIO for a zero refcount page
1131  * that is not free, and 1 for any other page type.
1132  * For 1 the page is returned with increased page count, otherwise not.
1133  */
1134 static int get_any_page(struct page *p, unsigned long pfn, int flags)
1135 {
1136 	int ret;
1137 
1138 	if (flags & MF_COUNT_INCREASED)
1139 		return 1;
1140 
1141 	/*
1142 	 * The lock_system_sleep prevents a race with memory hotplug,
1143 	 * because the isolation assumes there's only a single user.
1144 	 * This is a big hammer, a better would be nicer.
1145 	 */
1146 	lock_system_sleep();
1147 
1148 	/*
1149 	 * Isolate the page, so that it doesn't get reallocated if it
1150 	 * was free.
1151 	 */
1152 	set_migratetype_isolate(p);
1153 	if (!get_page_unless_zero(compound_head(p))) {
1154 		if (is_free_buddy_page(p)) {
1155 			pr_debug("get_any_page: %#lx free buddy page\n", pfn);
1156 			/* Set hwpoison bit while page is still isolated */
1157 			SetPageHWPoison(p);
1158 			ret = 0;
1159 		} else {
1160 			pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
1161 				pfn, p->flags);
1162 			ret = -EIO;
1163 		}
1164 	} else {
1165 		/* Not a free page */
1166 		ret = 1;
1167 	}
1168 	unset_migratetype_isolate(p);
1169 	unlock_system_sleep();
1170 	return ret;
1171 }
1172 
1173 /**
1174  * soft_offline_page - Soft offline a page.
1175  * @page: page to offline
1176  * @flags: flags. Same as memory_failure().
1177  *
1178  * Returns 0 on success, otherwise negated errno.
1179  *
1180  * Soft offline a page, by migration or invalidation,
1181  * without killing anything. This is for the case when
1182  * a page is not corrupted yet (so it's still valid to access),
1183  * but has had a number of corrected errors and is better taken
1184  * out.
1185  *
1186  * The actual policy on when to do that is maintained by
1187  * user space.
1188  *
1189  * This should never impact any application or cause data loss,
1190  * however it might take some time.
1191  *
1192  * This is not a 100% solution for all memory, but tries to be
1193  * ``good enough'' for the majority of memory.
1194  */
1195 int soft_offline_page(struct page *page, int flags)
1196 {
1197 	int ret;
1198 	unsigned long pfn = page_to_pfn(page);
1199 
1200 	ret = get_any_page(page, pfn, flags);
1201 	if (ret < 0)
1202 		return ret;
1203 	if (ret == 0)
1204 		goto done;
1205 
1206 	/*
1207 	 * Page cache page we can handle?
1208 	 */
1209 	if (!PageLRU(page)) {
1210 		/*
1211 		 * Try to free it.
1212 		 */
1213 		put_page(page);
1214 		shake_page(page, 1);
1215 
1216 		/*
1217 		 * Did it turn free?
1218 		 */
1219 		ret = get_any_page(page, pfn, 0);
1220 		if (ret < 0)
1221 			return ret;
1222 		if (ret == 0)
1223 			goto done;
1224 	}
1225 	if (!PageLRU(page)) {
1226 		pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
1227 				pfn, page->flags);
1228 		return -EIO;
1229 	}
1230 
1231 	lock_page(page);
1232 	wait_on_page_writeback(page);
1233 
1234 	/*
1235 	 * Synchronized using the page lock with memory_failure()
1236 	 */
1237 	if (PageHWPoison(page)) {
1238 		unlock_page(page);
1239 		put_page(page);
1240 		pr_debug("soft offline: %#lx page already poisoned\n", pfn);
1241 		return -EBUSY;
1242 	}
1243 
1244 	/*
1245 	 * Try to invalidate first. This should work for
1246 	 * non dirty unmapped page cache pages.
1247 	 */
1248 	ret = invalidate_inode_page(page);
1249 	unlock_page(page);
1250 
1251 	/*
1252 	 * Drop count because page migration doesn't like raised
1253 	 * counts. The page could get re-allocated, but if it becomes
1254 	 * LRU the isolation will just fail.
1255 	 * RED-PEN would be better to keep it isolated here, but we
1256 	 * would need to fix isolation locking first.
1257 	 */
1258 	put_page(page);
1259 	if (ret == 1) {
1260 		ret = 0;
1261 		pr_debug("soft_offline: %#lx: invalidated\n", pfn);
1262 		goto done;
1263 	}
1264 
1265 	/*
1266 	 * Simple invalidation didn't work.
1267 	 * Try to migrate to a new page instead. migrate.c
1268 	 * handles a large number of cases for us.
1269 	 */
1270 	ret = isolate_lru_page(page);
1271 	if (!ret) {
1272 		LIST_HEAD(pagelist);
1273 
1274 		list_add(&page->lru, &pagelist);
1275 		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
1276 		if (ret) {
1277 			pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
1278 				pfn, ret, page->flags);
1279 			if (ret > 0)
1280 				ret = -EIO;
1281 		}
1282 	} else {
1283 		pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
1284 				pfn, ret, page_count(page), page->flags);
1285 	}
1286 	if (ret)
1287 		return ret;
1288 
1289 done:
1290 	atomic_long_add(1, &mce_bad_pages);
1291 	SetPageHWPoison(page);
1292 	/* keep elevated page count for bad page */
1293 	return ret;
1294 }
1295