xref: /openbmc/linux/arch/x86/mm/fault.c (revision bc000245)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
5  */
6 #include <linux/magic.h>		/* STACK_END_MAGIC		*/
7 #include <linux/sched.h>		/* test_thread_flag(), ...	*/
8 #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
9 #include <linux/module.h>		/* search_exception_table	*/
10 #include <linux/bootmem.h>		/* max_low_pfn			*/
11 #include <linux/kprobes.h>		/* __kprobes, ...		*/
12 #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13 #include <linux/perf_event.h>		/* perf_sw_event		*/
14 #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
15 #include <linux/prefetch.h>		/* prefetchw			*/
16 #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
17 
18 #include <asm/traps.h>			/* dotraplinkage, ...		*/
19 #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
20 #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
21 #include <asm/fixmap.h>			/* VSYSCALL_START		*/
22 
23 #define CREATE_TRACE_POINTS
24 #include <asm/trace/exceptions.h>
25 
26 /*
27  * Page fault error code bits:
28  *
29  *   bit 0 ==	 0: no page found	1: protection fault
30  *   bit 1 ==	 0: read access		1: write access
31  *   bit 2 ==	 0: kernel-mode access	1: user-mode access
32  *   bit 3 ==				1: use of reserved bit detected
33  *   bit 4 ==				1: fault was an instruction fetch
34  */
35 enum x86_pf_error_code {
36 
37 	PF_PROT		=		1 << 0,
38 	PF_WRITE	=		1 << 1,
39 	PF_USER		=		1 << 2,
40 	PF_RSVD		=		1 << 3,
41 	PF_INSTR	=		1 << 4,
42 };
43 
44 /*
45  * Returns 0 if mmiotrace is disabled, or if the fault is not
46  * handled by mmiotrace:
47  */
48 static inline int __kprobes
49 kmmio_fault(struct pt_regs *regs, unsigned long addr)
50 {
51 	if (unlikely(is_kmmio_active()))
52 		if (kmmio_handler(regs, addr) == 1)
53 			return -1;
54 	return 0;
55 }
56 
57 static inline int __kprobes kprobes_fault(struct pt_regs *regs)
58 {
59 	int ret = 0;
60 
61 	/* kprobe_running() needs smp_processor_id() */
62 	if (kprobes_built_in() && !user_mode_vm(regs)) {
63 		preempt_disable();
64 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
65 			ret = 1;
66 		preempt_enable();
67 	}
68 
69 	return ret;
70 }
71 
72 /*
73  * Prefetch quirks:
74  *
75  * 32-bit mode:
76  *
77  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
78  *   Check that here and ignore it.
79  *
80  * 64-bit mode:
81  *
82  *   Sometimes the CPU reports invalid exceptions on prefetch.
83  *   Check that here and ignore it.
84  *
85  * Opcode checker based on code by Richard Brunner.
86  */
87 static inline int
88 check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
89 		      unsigned char opcode, int *prefetch)
90 {
91 	unsigned char instr_hi = opcode & 0xf0;
92 	unsigned char instr_lo = opcode & 0x0f;
93 
94 	switch (instr_hi) {
95 	case 0x20:
96 	case 0x30:
97 		/*
98 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
99 		 * In X86_64 long mode, the CPU will signal invalid
100 		 * opcode if some of these prefixes are present so
101 		 * X86_64 will never get here anyway
102 		 */
103 		return ((instr_lo & 7) == 0x6);
104 #ifdef CONFIG_X86_64
105 	case 0x40:
106 		/*
107 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
108 		 * Need to figure out under what instruction mode the
109 		 * instruction was issued. Could check the LDT for lm,
110 		 * but for now it's good enough to assume that long
111 		 * mode only uses well known segments or kernel.
112 		 */
113 		return (!user_mode(regs) || user_64bit_mode(regs));
114 #endif
115 	case 0x60:
116 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
117 		return (instr_lo & 0xC) == 0x4;
118 	case 0xF0:
119 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
120 		return !instr_lo || (instr_lo>>1) == 1;
121 	case 0x00:
122 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
123 		if (probe_kernel_address(instr, opcode))
124 			return 0;
125 
126 		*prefetch = (instr_lo == 0xF) &&
127 			(opcode == 0x0D || opcode == 0x18);
128 		return 0;
129 	default:
130 		return 0;
131 	}
132 }
133 
134 static int
135 is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
136 {
137 	unsigned char *max_instr;
138 	unsigned char *instr;
139 	int prefetch = 0;
140 
141 	/*
142 	 * If it was a exec (instruction fetch) fault on NX page, then
143 	 * do not ignore the fault:
144 	 */
145 	if (error_code & PF_INSTR)
146 		return 0;
147 
148 	instr = (void *)convert_ip_to_linear(current, regs);
149 	max_instr = instr + 15;
150 
151 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
152 		return 0;
153 
154 	while (instr < max_instr) {
155 		unsigned char opcode;
156 
157 		if (probe_kernel_address(instr, opcode))
158 			break;
159 
160 		instr++;
161 
162 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
163 			break;
164 	}
165 	return prefetch;
166 }
167 
168 static void
169 force_sig_info_fault(int si_signo, int si_code, unsigned long address,
170 		     struct task_struct *tsk, int fault)
171 {
172 	unsigned lsb = 0;
173 	siginfo_t info;
174 
175 	info.si_signo	= si_signo;
176 	info.si_errno	= 0;
177 	info.si_code	= si_code;
178 	info.si_addr	= (void __user *)address;
179 	if (fault & VM_FAULT_HWPOISON_LARGE)
180 		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
181 	if (fault & VM_FAULT_HWPOISON)
182 		lsb = PAGE_SHIFT;
183 	info.si_addr_lsb = lsb;
184 
185 	force_sig_info(si_signo, &info, tsk);
186 }
187 
188 DEFINE_SPINLOCK(pgd_lock);
189 LIST_HEAD(pgd_list);
190 
191 #ifdef CONFIG_X86_32
192 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
193 {
194 	unsigned index = pgd_index(address);
195 	pgd_t *pgd_k;
196 	pud_t *pud, *pud_k;
197 	pmd_t *pmd, *pmd_k;
198 
199 	pgd += index;
200 	pgd_k = init_mm.pgd + index;
201 
202 	if (!pgd_present(*pgd_k))
203 		return NULL;
204 
205 	/*
206 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
207 	 * and redundant with the set_pmd() on non-PAE. As would
208 	 * set_pud.
209 	 */
210 	pud = pud_offset(pgd, address);
211 	pud_k = pud_offset(pgd_k, address);
212 	if (!pud_present(*pud_k))
213 		return NULL;
214 
215 	pmd = pmd_offset(pud, address);
216 	pmd_k = pmd_offset(pud_k, address);
217 	if (!pmd_present(*pmd_k))
218 		return NULL;
219 
220 	if (!pmd_present(*pmd))
221 		set_pmd(pmd, *pmd_k);
222 	else
223 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
224 
225 	return pmd_k;
226 }
227 
228 void vmalloc_sync_all(void)
229 {
230 	unsigned long address;
231 
232 	if (SHARED_KERNEL_PMD)
233 		return;
234 
235 	for (address = VMALLOC_START & PMD_MASK;
236 	     address >= TASK_SIZE && address < FIXADDR_TOP;
237 	     address += PMD_SIZE) {
238 		struct page *page;
239 
240 		spin_lock(&pgd_lock);
241 		list_for_each_entry(page, &pgd_list, lru) {
242 			spinlock_t *pgt_lock;
243 			pmd_t *ret;
244 
245 			/* the pgt_lock only for Xen */
246 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
247 
248 			spin_lock(pgt_lock);
249 			ret = vmalloc_sync_one(page_address(page), address);
250 			spin_unlock(pgt_lock);
251 
252 			if (!ret)
253 				break;
254 		}
255 		spin_unlock(&pgd_lock);
256 	}
257 }
258 
259 /*
260  * 32-bit:
261  *
262  *   Handle a fault on the vmalloc or module mapping area
263  */
264 static noinline __kprobes int vmalloc_fault(unsigned long address)
265 {
266 	unsigned long pgd_paddr;
267 	pmd_t *pmd_k;
268 	pte_t *pte_k;
269 
270 	/* Make sure we are in vmalloc area: */
271 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
272 		return -1;
273 
274 	WARN_ON_ONCE(in_nmi());
275 
276 	/*
277 	 * Synchronize this task's top level page-table
278 	 * with the 'reference' page table.
279 	 *
280 	 * Do _not_ use "current" here. We might be inside
281 	 * an interrupt in the middle of a task switch..
282 	 */
283 	pgd_paddr = read_cr3();
284 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
285 	if (!pmd_k)
286 		return -1;
287 
288 	pte_k = pte_offset_kernel(pmd_k, address);
289 	if (!pte_present(*pte_k))
290 		return -1;
291 
292 	return 0;
293 }
294 
295 /*
296  * Did it hit the DOS screen memory VA from vm86 mode?
297  */
298 static inline void
299 check_v8086_mode(struct pt_regs *regs, unsigned long address,
300 		 struct task_struct *tsk)
301 {
302 	unsigned long bit;
303 
304 	if (!v8086_mode(regs))
305 		return;
306 
307 	bit = (address - 0xA0000) >> PAGE_SHIFT;
308 	if (bit < 32)
309 		tsk->thread.screen_bitmap |= 1 << bit;
310 }
311 
312 static bool low_pfn(unsigned long pfn)
313 {
314 	return pfn < max_low_pfn;
315 }
316 
317 static void dump_pagetable(unsigned long address)
318 {
319 	pgd_t *base = __va(read_cr3());
320 	pgd_t *pgd = &base[pgd_index(address)];
321 	pmd_t *pmd;
322 	pte_t *pte;
323 
324 #ifdef CONFIG_X86_PAE
325 	printk("*pdpt = %016Lx ", pgd_val(*pgd));
326 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
327 		goto out;
328 #endif
329 	pmd = pmd_offset(pud_offset(pgd, address), address);
330 	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
331 
332 	/*
333 	 * We must not directly access the pte in the highpte
334 	 * case if the page table is located in highmem.
335 	 * And let's rather not kmap-atomic the pte, just in case
336 	 * it's allocated already:
337 	 */
338 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
339 		goto out;
340 
341 	pte = pte_offset_kernel(pmd, address);
342 	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
343 out:
344 	printk("\n");
345 }
346 
347 #else /* CONFIG_X86_64: */
348 
349 void vmalloc_sync_all(void)
350 {
351 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
352 }
353 
354 /*
355  * 64-bit:
356  *
357  *   Handle a fault on the vmalloc area
358  *
359  * This assumes no large pages in there.
360  */
361 static noinline __kprobes int vmalloc_fault(unsigned long address)
362 {
363 	pgd_t *pgd, *pgd_ref;
364 	pud_t *pud, *pud_ref;
365 	pmd_t *pmd, *pmd_ref;
366 	pte_t *pte, *pte_ref;
367 
368 	/* Make sure we are in vmalloc area: */
369 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
370 		return -1;
371 
372 	WARN_ON_ONCE(in_nmi());
373 
374 	/*
375 	 * Copy kernel mappings over when needed. This can also
376 	 * happen within a race in page table update. In the later
377 	 * case just flush:
378 	 */
379 	pgd = pgd_offset(current->active_mm, address);
380 	pgd_ref = pgd_offset_k(address);
381 	if (pgd_none(*pgd_ref))
382 		return -1;
383 
384 	if (pgd_none(*pgd)) {
385 		set_pgd(pgd, *pgd_ref);
386 		arch_flush_lazy_mmu_mode();
387 	} else {
388 		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
389 	}
390 
391 	/*
392 	 * Below here mismatches are bugs because these lower tables
393 	 * are shared:
394 	 */
395 
396 	pud = pud_offset(pgd, address);
397 	pud_ref = pud_offset(pgd_ref, address);
398 	if (pud_none(*pud_ref))
399 		return -1;
400 
401 	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
402 		BUG();
403 
404 	pmd = pmd_offset(pud, address);
405 	pmd_ref = pmd_offset(pud_ref, address);
406 	if (pmd_none(*pmd_ref))
407 		return -1;
408 
409 	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
410 		BUG();
411 
412 	pte_ref = pte_offset_kernel(pmd_ref, address);
413 	if (!pte_present(*pte_ref))
414 		return -1;
415 
416 	pte = pte_offset_kernel(pmd, address);
417 
418 	/*
419 	 * Don't use pte_page here, because the mappings can point
420 	 * outside mem_map, and the NUMA hash lookup cannot handle
421 	 * that:
422 	 */
423 	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
424 		BUG();
425 
426 	return 0;
427 }
428 
429 #ifdef CONFIG_CPU_SUP_AMD
430 static const char errata93_warning[] =
431 KERN_ERR
432 "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
433 "******* Working around it, but it may cause SEGVs or burn power.\n"
434 "******* Please consider a BIOS update.\n"
435 "******* Disabling USB legacy in the BIOS may also help.\n";
436 #endif
437 
438 /*
439  * No vm86 mode in 64-bit mode:
440  */
441 static inline void
442 check_v8086_mode(struct pt_regs *regs, unsigned long address,
443 		 struct task_struct *tsk)
444 {
445 }
446 
447 static int bad_address(void *p)
448 {
449 	unsigned long dummy;
450 
451 	return probe_kernel_address((unsigned long *)p, dummy);
452 }
453 
454 static void dump_pagetable(unsigned long address)
455 {
456 	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
457 	pgd_t *pgd = base + pgd_index(address);
458 	pud_t *pud;
459 	pmd_t *pmd;
460 	pte_t *pte;
461 
462 	if (bad_address(pgd))
463 		goto bad;
464 
465 	printk("PGD %lx ", pgd_val(*pgd));
466 
467 	if (!pgd_present(*pgd))
468 		goto out;
469 
470 	pud = pud_offset(pgd, address);
471 	if (bad_address(pud))
472 		goto bad;
473 
474 	printk("PUD %lx ", pud_val(*pud));
475 	if (!pud_present(*pud) || pud_large(*pud))
476 		goto out;
477 
478 	pmd = pmd_offset(pud, address);
479 	if (bad_address(pmd))
480 		goto bad;
481 
482 	printk("PMD %lx ", pmd_val(*pmd));
483 	if (!pmd_present(*pmd) || pmd_large(*pmd))
484 		goto out;
485 
486 	pte = pte_offset_kernel(pmd, address);
487 	if (bad_address(pte))
488 		goto bad;
489 
490 	printk("PTE %lx", pte_val(*pte));
491 out:
492 	printk("\n");
493 	return;
494 bad:
495 	printk("BAD\n");
496 }
497 
498 #endif /* CONFIG_X86_64 */
499 
500 /*
501  * Workaround for K8 erratum #93 & buggy BIOS.
502  *
503  * BIOS SMM functions are required to use a specific workaround
504  * to avoid corruption of the 64bit RIP register on C stepping K8.
505  *
506  * A lot of BIOS that didn't get tested properly miss this.
507  *
508  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
509  * Try to work around it here.
510  *
511  * Note we only handle faults in kernel here.
512  * Does nothing on 32-bit.
513  */
514 static int is_errata93(struct pt_regs *regs, unsigned long address)
515 {
516 #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
517 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
518 	    || boot_cpu_data.x86 != 0xf)
519 		return 0;
520 
521 	if (address != regs->ip)
522 		return 0;
523 
524 	if ((address >> 32) != 0)
525 		return 0;
526 
527 	address |= 0xffffffffUL << 32;
528 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
529 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
530 		printk_once(errata93_warning);
531 		regs->ip = address;
532 		return 1;
533 	}
534 #endif
535 	return 0;
536 }
537 
538 /*
539  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
540  * to illegal addresses >4GB.
541  *
542  * We catch this in the page fault handler because these addresses
543  * are not reachable. Just detect this case and return.  Any code
544  * segment in LDT is compatibility mode.
545  */
546 static int is_errata100(struct pt_regs *regs, unsigned long address)
547 {
548 #ifdef CONFIG_X86_64
549 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
550 		return 1;
551 #endif
552 	return 0;
553 }
554 
555 static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
556 {
557 #ifdef CONFIG_X86_F00F_BUG
558 	unsigned long nr;
559 
560 	/*
561 	 * Pentium F0 0F C7 C8 bug workaround:
562 	 */
563 	if (boot_cpu_has_bug(X86_BUG_F00F)) {
564 		nr = (address - idt_descr.address) >> 3;
565 
566 		if (nr == 6) {
567 			do_invalid_op(regs, 0);
568 			return 1;
569 		}
570 	}
571 #endif
572 	return 0;
573 }
574 
575 static const char nx_warning[] = KERN_CRIT
576 "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
577 
578 static void
579 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
580 		unsigned long address)
581 {
582 	if (!oops_may_print())
583 		return;
584 
585 	if (error_code & PF_INSTR) {
586 		unsigned int level;
587 
588 		pte_t *pte = lookup_address(address, &level);
589 
590 		if (pte && pte_present(*pte) && !pte_exec(*pte))
591 			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
592 	}
593 
594 	printk(KERN_ALERT "BUG: unable to handle kernel ");
595 	if (address < PAGE_SIZE)
596 		printk(KERN_CONT "NULL pointer dereference");
597 	else
598 		printk(KERN_CONT "paging request");
599 
600 	printk(KERN_CONT " at %p\n", (void *) address);
601 	printk(KERN_ALERT "IP:");
602 	printk_address(regs->ip);
603 
604 	dump_pagetable(address);
605 }
606 
607 static noinline void
608 pgtable_bad(struct pt_regs *regs, unsigned long error_code,
609 	    unsigned long address)
610 {
611 	struct task_struct *tsk;
612 	unsigned long flags;
613 	int sig;
614 
615 	flags = oops_begin();
616 	tsk = current;
617 	sig = SIGKILL;
618 
619 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
620 	       tsk->comm, address);
621 	dump_pagetable(address);
622 
623 	tsk->thread.cr2		= address;
624 	tsk->thread.trap_nr	= X86_TRAP_PF;
625 	tsk->thread.error_code	= error_code;
626 
627 	if (__die("Bad pagetable", regs, error_code))
628 		sig = 0;
629 
630 	oops_end(flags, regs, sig);
631 }
632 
633 static noinline void
634 no_context(struct pt_regs *regs, unsigned long error_code,
635 	   unsigned long address, int signal, int si_code)
636 {
637 	struct task_struct *tsk = current;
638 	unsigned long *stackend;
639 	unsigned long flags;
640 	int sig;
641 
642 	/* Are we prepared to handle this kernel fault? */
643 	if (fixup_exception(regs)) {
644 		if (current_thread_info()->sig_on_uaccess_error && signal) {
645 			tsk->thread.trap_nr = X86_TRAP_PF;
646 			tsk->thread.error_code = error_code | PF_USER;
647 			tsk->thread.cr2 = address;
648 
649 			/* XXX: hwpoison faults will set the wrong code. */
650 			force_sig_info_fault(signal, si_code, address, tsk, 0);
651 		}
652 		return;
653 	}
654 
655 	/*
656 	 * 32-bit:
657 	 *
658 	 *   Valid to do another page fault here, because if this fault
659 	 *   had been triggered by is_prefetch fixup_exception would have
660 	 *   handled it.
661 	 *
662 	 * 64-bit:
663 	 *
664 	 *   Hall of shame of CPU/BIOS bugs.
665 	 */
666 	if (is_prefetch(regs, error_code, address))
667 		return;
668 
669 	if (is_errata93(regs, address))
670 		return;
671 
672 	/*
673 	 * Oops. The kernel tried to access some bad page. We'll have to
674 	 * terminate things with extreme prejudice:
675 	 */
676 	flags = oops_begin();
677 
678 	show_fault_oops(regs, error_code, address);
679 
680 	stackend = end_of_stack(tsk);
681 	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
682 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
683 
684 	tsk->thread.cr2		= address;
685 	tsk->thread.trap_nr	= X86_TRAP_PF;
686 	tsk->thread.error_code	= error_code;
687 
688 	sig = SIGKILL;
689 	if (__die("Oops", regs, error_code))
690 		sig = 0;
691 
692 	/* Executive summary in case the body of the oops scrolled away */
693 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
694 
695 	oops_end(flags, regs, sig);
696 }
697 
698 /*
699  * Print out info about fatal segfaults, if the show_unhandled_signals
700  * sysctl is set:
701  */
702 static inline void
703 show_signal_msg(struct pt_regs *regs, unsigned long error_code,
704 		unsigned long address, struct task_struct *tsk)
705 {
706 	if (!unhandled_signal(tsk, SIGSEGV))
707 		return;
708 
709 	if (!printk_ratelimit())
710 		return;
711 
712 	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
713 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
714 		tsk->comm, task_pid_nr(tsk), address,
715 		(void *)regs->ip, (void *)regs->sp, error_code);
716 
717 	print_vma_addr(KERN_CONT " in ", regs->ip);
718 
719 	printk(KERN_CONT "\n");
720 }
721 
722 static void
723 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
724 		       unsigned long address, int si_code)
725 {
726 	struct task_struct *tsk = current;
727 
728 	/* User mode accesses just cause a SIGSEGV */
729 	if (error_code & PF_USER) {
730 		/*
731 		 * It's possible to have interrupts off here:
732 		 */
733 		local_irq_enable();
734 
735 		/*
736 		 * Valid to do another page fault here because this one came
737 		 * from user space:
738 		 */
739 		if (is_prefetch(regs, error_code, address))
740 			return;
741 
742 		if (is_errata100(regs, address))
743 			return;
744 
745 #ifdef CONFIG_X86_64
746 		/*
747 		 * Instruction fetch faults in the vsyscall page might need
748 		 * emulation.
749 		 */
750 		if (unlikely((error_code & PF_INSTR) &&
751 			     ((address & ~0xfff) == VSYSCALL_START))) {
752 			if (emulate_vsyscall(regs, address))
753 				return;
754 		}
755 #endif
756 		/* Kernel addresses are always protection faults: */
757 		if (address >= TASK_SIZE)
758 			error_code |= PF_PROT;
759 
760 		if (likely(show_unhandled_signals))
761 			show_signal_msg(regs, error_code, address, tsk);
762 
763 		tsk->thread.cr2		= address;
764 		tsk->thread.error_code	= error_code;
765 		tsk->thread.trap_nr	= X86_TRAP_PF;
766 
767 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
768 
769 		return;
770 	}
771 
772 	if (is_f00f_bug(regs, address))
773 		return;
774 
775 	no_context(regs, error_code, address, SIGSEGV, si_code);
776 }
777 
778 static noinline void
779 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
780 		     unsigned long address)
781 {
782 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
783 }
784 
785 static void
786 __bad_area(struct pt_regs *regs, unsigned long error_code,
787 	   unsigned long address, int si_code)
788 {
789 	struct mm_struct *mm = current->mm;
790 
791 	/*
792 	 * Something tried to access memory that isn't in our memory map..
793 	 * Fix it, but check if it's kernel or user first..
794 	 */
795 	up_read(&mm->mmap_sem);
796 
797 	__bad_area_nosemaphore(regs, error_code, address, si_code);
798 }
799 
800 static noinline void
801 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
802 {
803 	__bad_area(regs, error_code, address, SEGV_MAPERR);
804 }
805 
806 static noinline void
807 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
808 		      unsigned long address)
809 {
810 	__bad_area(regs, error_code, address, SEGV_ACCERR);
811 }
812 
813 static void
814 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
815 	  unsigned int fault)
816 {
817 	struct task_struct *tsk = current;
818 	struct mm_struct *mm = tsk->mm;
819 	int code = BUS_ADRERR;
820 
821 	up_read(&mm->mmap_sem);
822 
823 	/* Kernel mode? Handle exceptions or die: */
824 	if (!(error_code & PF_USER)) {
825 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
826 		return;
827 	}
828 
829 	/* User-space => ok to do another page fault: */
830 	if (is_prefetch(regs, error_code, address))
831 		return;
832 
833 	tsk->thread.cr2		= address;
834 	tsk->thread.error_code	= error_code;
835 	tsk->thread.trap_nr	= X86_TRAP_PF;
836 
837 #ifdef CONFIG_MEMORY_FAILURE
838 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
839 		printk(KERN_ERR
840 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
841 			tsk->comm, tsk->pid, address);
842 		code = BUS_MCEERR_AR;
843 	}
844 #endif
845 	force_sig_info_fault(SIGBUS, code, address, tsk, fault);
846 }
847 
848 static noinline void
849 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
850 	       unsigned long address, unsigned int fault)
851 {
852 	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
853 		up_read(&current->mm->mmap_sem);
854 		no_context(regs, error_code, address, 0, 0);
855 		return;
856 	}
857 
858 	if (fault & VM_FAULT_OOM) {
859 		/* Kernel mode? Handle exceptions or die: */
860 		if (!(error_code & PF_USER)) {
861 			up_read(&current->mm->mmap_sem);
862 			no_context(regs, error_code, address,
863 				   SIGSEGV, SEGV_MAPERR);
864 			return;
865 		}
866 
867 		up_read(&current->mm->mmap_sem);
868 
869 		/*
870 		 * We ran out of memory, call the OOM killer, and return the
871 		 * userspace (which will retry the fault, or kill us if we got
872 		 * oom-killed):
873 		 */
874 		pagefault_out_of_memory();
875 	} else {
876 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
877 			     VM_FAULT_HWPOISON_LARGE))
878 			do_sigbus(regs, error_code, address, fault);
879 		else
880 			BUG();
881 	}
882 }
883 
884 static int spurious_fault_check(unsigned long error_code, pte_t *pte)
885 {
886 	if ((error_code & PF_WRITE) && !pte_write(*pte))
887 		return 0;
888 
889 	if ((error_code & PF_INSTR) && !pte_exec(*pte))
890 		return 0;
891 
892 	return 1;
893 }
894 
895 /*
896  * Handle a spurious fault caused by a stale TLB entry.
897  *
898  * This allows us to lazily refresh the TLB when increasing the
899  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
900  * eagerly is very expensive since that implies doing a full
901  * cross-processor TLB flush, even if no stale TLB entries exist
902  * on other processors.
903  *
904  * There are no security implications to leaving a stale TLB when
905  * increasing the permissions on a page.
906  */
907 static noinline __kprobes int
908 spurious_fault(unsigned long error_code, unsigned long address)
909 {
910 	pgd_t *pgd;
911 	pud_t *pud;
912 	pmd_t *pmd;
913 	pte_t *pte;
914 	int ret;
915 
916 	/* Reserved-bit violation or user access to kernel space? */
917 	if (error_code & (PF_USER | PF_RSVD))
918 		return 0;
919 
920 	pgd = init_mm.pgd + pgd_index(address);
921 	if (!pgd_present(*pgd))
922 		return 0;
923 
924 	pud = pud_offset(pgd, address);
925 	if (!pud_present(*pud))
926 		return 0;
927 
928 	if (pud_large(*pud))
929 		return spurious_fault_check(error_code, (pte_t *) pud);
930 
931 	pmd = pmd_offset(pud, address);
932 	if (!pmd_present(*pmd))
933 		return 0;
934 
935 	if (pmd_large(*pmd))
936 		return spurious_fault_check(error_code, (pte_t *) pmd);
937 
938 	pte = pte_offset_kernel(pmd, address);
939 	if (!pte_present(*pte))
940 		return 0;
941 
942 	ret = spurious_fault_check(error_code, pte);
943 	if (!ret)
944 		return 0;
945 
946 	/*
947 	 * Make sure we have permissions in PMD.
948 	 * If not, then there's a bug in the page tables:
949 	 */
950 	ret = spurious_fault_check(error_code, (pte_t *) pmd);
951 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
952 
953 	return ret;
954 }
955 
956 int show_unhandled_signals = 1;
957 
958 static inline int
959 access_error(unsigned long error_code, struct vm_area_struct *vma)
960 {
961 	if (error_code & PF_WRITE) {
962 		/* write, present and write, not present: */
963 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
964 			return 1;
965 		return 0;
966 	}
967 
968 	/* read, present: */
969 	if (unlikely(error_code & PF_PROT))
970 		return 1;
971 
972 	/* read, not present: */
973 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
974 		return 1;
975 
976 	return 0;
977 }
978 
979 static int fault_in_kernel_space(unsigned long address)
980 {
981 	return address >= TASK_SIZE_MAX;
982 }
983 
984 static inline bool smap_violation(int error_code, struct pt_regs *regs)
985 {
986 	if (error_code & PF_USER)
987 		return false;
988 
989 	if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
990 		return false;
991 
992 	return true;
993 }
994 
995 /*
996  * This routine handles page faults.  It determines the address,
997  * and the problem, and then passes it off to one of the appropriate
998  * routines.
999  */
1000 static void __kprobes
1001 __do_page_fault(struct pt_regs *regs, unsigned long error_code)
1002 {
1003 	struct vm_area_struct *vma;
1004 	struct task_struct *tsk;
1005 	unsigned long address;
1006 	struct mm_struct *mm;
1007 	int fault;
1008 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1009 
1010 	tsk = current;
1011 	mm = tsk->mm;
1012 
1013 	/* Get the faulting address: */
1014 	address = read_cr2();
1015 
1016 	/*
1017 	 * Detect and handle instructions that would cause a page fault for
1018 	 * both a tracked kernel page and a userspace page.
1019 	 */
1020 	if (kmemcheck_active(regs))
1021 		kmemcheck_hide(regs);
1022 	prefetchw(&mm->mmap_sem);
1023 
1024 	if (unlikely(kmmio_fault(regs, address)))
1025 		return;
1026 
1027 	/*
1028 	 * We fault-in kernel-space virtual memory on-demand. The
1029 	 * 'reference' page table is init_mm.pgd.
1030 	 *
1031 	 * NOTE! We MUST NOT take any locks for this case. We may
1032 	 * be in an interrupt or a critical region, and should
1033 	 * only copy the information from the master page table,
1034 	 * nothing more.
1035 	 *
1036 	 * This verifies that the fault happens in kernel space
1037 	 * (error_code & 4) == 0, and that the fault was not a
1038 	 * protection error (error_code & 9) == 0.
1039 	 */
1040 	if (unlikely(fault_in_kernel_space(address))) {
1041 		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
1042 			if (vmalloc_fault(address) >= 0)
1043 				return;
1044 
1045 			if (kmemcheck_fault(regs, address, error_code))
1046 				return;
1047 		}
1048 
1049 		/* Can handle a stale RO->RW TLB: */
1050 		if (spurious_fault(error_code, address))
1051 			return;
1052 
1053 		/* kprobes don't want to hook the spurious faults: */
1054 		if (kprobes_fault(regs))
1055 			return;
1056 		/*
1057 		 * Don't take the mm semaphore here. If we fixup a prefetch
1058 		 * fault we could otherwise deadlock:
1059 		 */
1060 		bad_area_nosemaphore(regs, error_code, address);
1061 
1062 		return;
1063 	}
1064 
1065 	/* kprobes don't want to hook the spurious faults: */
1066 	if (unlikely(kprobes_fault(regs)))
1067 		return;
1068 
1069 	if (unlikely(error_code & PF_RSVD))
1070 		pgtable_bad(regs, error_code, address);
1071 
1072 	if (static_cpu_has(X86_FEATURE_SMAP)) {
1073 		if (unlikely(smap_violation(error_code, regs))) {
1074 			bad_area_nosemaphore(regs, error_code, address);
1075 			return;
1076 		}
1077 	}
1078 
1079 	/*
1080 	 * If we're in an interrupt, have no user context or are running
1081 	 * in an atomic region then we must not take the fault:
1082 	 */
1083 	if (unlikely(in_atomic() || !mm)) {
1084 		bad_area_nosemaphore(regs, error_code, address);
1085 		return;
1086 	}
1087 
1088 	/*
1089 	 * It's safe to allow irq's after cr2 has been saved and the
1090 	 * vmalloc fault has been handled.
1091 	 *
1092 	 * User-mode registers count as a user access even for any
1093 	 * potential system fault or CPU buglet:
1094 	 */
1095 	if (user_mode_vm(regs)) {
1096 		local_irq_enable();
1097 		error_code |= PF_USER;
1098 		flags |= FAULT_FLAG_USER;
1099 	} else {
1100 		if (regs->flags & X86_EFLAGS_IF)
1101 			local_irq_enable();
1102 	}
1103 
1104 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1105 
1106 	if (error_code & PF_WRITE)
1107 		flags |= FAULT_FLAG_WRITE;
1108 
1109 	/*
1110 	 * When running in the kernel we expect faults to occur only to
1111 	 * addresses in user space.  All other faults represent errors in
1112 	 * the kernel and should generate an OOPS.  Unfortunately, in the
1113 	 * case of an erroneous fault occurring in a code path which already
1114 	 * holds mmap_sem we will deadlock attempting to validate the fault
1115 	 * against the address space.  Luckily the kernel only validly
1116 	 * references user space from well defined areas of code, which are
1117 	 * listed in the exceptions table.
1118 	 *
1119 	 * As the vast majority of faults will be valid we will only perform
1120 	 * the source reference check when there is a possibility of a
1121 	 * deadlock. Attempt to lock the address space, if we cannot we then
1122 	 * validate the source. If this is invalid we can skip the address
1123 	 * space check, thus avoiding the deadlock:
1124 	 */
1125 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1126 		if ((error_code & PF_USER) == 0 &&
1127 		    !search_exception_tables(regs->ip)) {
1128 			bad_area_nosemaphore(regs, error_code, address);
1129 			return;
1130 		}
1131 retry:
1132 		down_read(&mm->mmap_sem);
1133 	} else {
1134 		/*
1135 		 * The above down_read_trylock() might have succeeded in
1136 		 * which case we'll have missed the might_sleep() from
1137 		 * down_read():
1138 		 */
1139 		might_sleep();
1140 	}
1141 
1142 	vma = find_vma(mm, address);
1143 	if (unlikely(!vma)) {
1144 		bad_area(regs, error_code, address);
1145 		return;
1146 	}
1147 	if (likely(vma->vm_start <= address))
1148 		goto good_area;
1149 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1150 		bad_area(regs, error_code, address);
1151 		return;
1152 	}
1153 	if (error_code & PF_USER) {
1154 		/*
1155 		 * Accessing the stack below %sp is always a bug.
1156 		 * The large cushion allows instructions like enter
1157 		 * and pusha to work. ("enter $65535, $31" pushes
1158 		 * 32 pointers and then decrements %sp by 65535.)
1159 		 */
1160 		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
1161 			bad_area(regs, error_code, address);
1162 			return;
1163 		}
1164 	}
1165 	if (unlikely(expand_stack(vma, address))) {
1166 		bad_area(regs, error_code, address);
1167 		return;
1168 	}
1169 
1170 	/*
1171 	 * Ok, we have a good vm_area for this memory access, so
1172 	 * we can handle it..
1173 	 */
1174 good_area:
1175 	if (unlikely(access_error(error_code, vma))) {
1176 		bad_area_access_error(regs, error_code, address);
1177 		return;
1178 	}
1179 
1180 	/*
1181 	 * If for any reason at all we couldn't handle the fault,
1182 	 * make sure we exit gracefully rather than endlessly redo
1183 	 * the fault:
1184 	 */
1185 	fault = handle_mm_fault(mm, vma, address, flags);
1186 
1187 	/*
1188 	 * If we need to retry but a fatal signal is pending, handle the
1189 	 * signal first. We do not need to release the mmap_sem because it
1190 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
1191 	 */
1192 	if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)))
1193 		return;
1194 
1195 	if (unlikely(fault & VM_FAULT_ERROR)) {
1196 		mm_fault_error(regs, error_code, address, fault);
1197 		return;
1198 	}
1199 
1200 	/*
1201 	 * Major/minor page fault accounting is only done on the
1202 	 * initial attempt. If we go through a retry, it is extremely
1203 	 * likely that the page will be found in page cache at that point.
1204 	 */
1205 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
1206 		if (fault & VM_FAULT_MAJOR) {
1207 			tsk->maj_flt++;
1208 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
1209 				      regs, address);
1210 		} else {
1211 			tsk->min_flt++;
1212 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
1213 				      regs, address);
1214 		}
1215 		if (fault & VM_FAULT_RETRY) {
1216 			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
1217 			 * of starvation. */
1218 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
1219 			flags |= FAULT_FLAG_TRIED;
1220 			goto retry;
1221 		}
1222 	}
1223 
1224 	check_v8086_mode(regs, address, tsk);
1225 
1226 	up_read(&mm->mmap_sem);
1227 }
1228 
1229 dotraplinkage void __kprobes
1230 do_page_fault(struct pt_regs *regs, unsigned long error_code)
1231 {
1232 	enum ctx_state prev_state;
1233 
1234 	prev_state = exception_enter();
1235 	__do_page_fault(regs, error_code);
1236 	exception_exit(prev_state);
1237 }
1238 
1239 static void trace_page_fault_entries(struct pt_regs *regs,
1240 				     unsigned long error_code)
1241 {
1242 	if (user_mode(regs))
1243 		trace_page_fault_user(read_cr2(), regs, error_code);
1244 	else
1245 		trace_page_fault_kernel(read_cr2(), regs, error_code);
1246 }
1247 
1248 dotraplinkage void __kprobes
1249 trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
1250 {
1251 	enum ctx_state prev_state;
1252 
1253 	prev_state = exception_enter();
1254 	trace_page_fault_entries(regs, error_code);
1255 	__do_page_fault(regs, error_code);
1256 	exception_exit(prev_state);
1257 }
1258