xref: /openbmc/linux/arch/x86/mm/fault.c (revision 56d06fa2)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
5  */
6 #include <linux/sched.h>		/* test_thread_flag(), ...	*/
7 #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
8 #include <linux/module.h>		/* search_exception_table	*/
9 #include <linux/bootmem.h>		/* max_low_pfn			*/
10 #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
11 #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
12 #include <linux/perf_event.h>		/* perf_sw_event		*/
13 #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
14 #include <linux/prefetch.h>		/* prefetchw			*/
15 #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
16 #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
17 
18 #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
19 #include <asm/traps.h>			/* dotraplinkage, ...		*/
20 #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
21 #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
22 #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
23 #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
24 #include <asm/vm86.h>			/* struct vm86			*/
25 #include <asm/mmu_context.h>		/* vma_pkey()			*/
26 
27 #define CREATE_TRACE_POINTS
28 #include <asm/trace/exceptions.h>
29 
30 /*
31  * Page fault error code bits:
32  *
33  *   bit 0 ==	 0: no page found	1: protection fault
34  *   bit 1 ==	 0: read access		1: write access
35  *   bit 2 ==	 0: kernel-mode access	1: user-mode access
36  *   bit 3 ==				1: use of reserved bit detected
37  *   bit 4 ==				1: fault was an instruction fetch
38  *   bit 5 ==				1: protection keys block access
39  */
40 enum x86_pf_error_code {
41 
42 	PF_PROT		=		1 << 0,
43 	PF_WRITE	=		1 << 1,
44 	PF_USER		=		1 << 2,
45 	PF_RSVD		=		1 << 3,
46 	PF_INSTR	=		1 << 4,
47 	PF_PK		=		1 << 5,
48 };
49 
50 /*
51  * Returns 0 if mmiotrace is disabled, or if the fault is not
52  * handled by mmiotrace:
53  */
54 static nokprobe_inline int
55 kmmio_fault(struct pt_regs *regs, unsigned long addr)
56 {
57 	if (unlikely(is_kmmio_active()))
58 		if (kmmio_handler(regs, addr) == 1)
59 			return -1;
60 	return 0;
61 }
62 
63 static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
64 {
65 	int ret = 0;
66 
67 	/* kprobe_running() needs smp_processor_id() */
68 	if (kprobes_built_in() && !user_mode(regs)) {
69 		preempt_disable();
70 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
71 			ret = 1;
72 		preempt_enable();
73 	}
74 
75 	return ret;
76 }
77 
78 /*
79  * Prefetch quirks:
80  *
81  * 32-bit mode:
82  *
83  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
84  *   Check that here and ignore it.
85  *
86  * 64-bit mode:
87  *
88  *   Sometimes the CPU reports invalid exceptions on prefetch.
89  *   Check that here and ignore it.
90  *
91  * Opcode checker based on code by Richard Brunner.
92  */
93 static inline int
94 check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
95 		      unsigned char opcode, int *prefetch)
96 {
97 	unsigned char instr_hi = opcode & 0xf0;
98 	unsigned char instr_lo = opcode & 0x0f;
99 
100 	switch (instr_hi) {
101 	case 0x20:
102 	case 0x30:
103 		/*
104 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
105 		 * In X86_64 long mode, the CPU will signal invalid
106 		 * opcode if some of these prefixes are present so
107 		 * X86_64 will never get here anyway
108 		 */
109 		return ((instr_lo & 7) == 0x6);
110 #ifdef CONFIG_X86_64
111 	case 0x40:
112 		/*
113 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
114 		 * Need to figure out under what instruction mode the
115 		 * instruction was issued. Could check the LDT for lm,
116 		 * but for now it's good enough to assume that long
117 		 * mode only uses well known segments or kernel.
118 		 */
119 		return (!user_mode(regs) || user_64bit_mode(regs));
120 #endif
121 	case 0x60:
122 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
123 		return (instr_lo & 0xC) == 0x4;
124 	case 0xF0:
125 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
126 		return !instr_lo || (instr_lo>>1) == 1;
127 	case 0x00:
128 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
129 		if (probe_kernel_address(instr, opcode))
130 			return 0;
131 
132 		*prefetch = (instr_lo == 0xF) &&
133 			(opcode == 0x0D || opcode == 0x18);
134 		return 0;
135 	default:
136 		return 0;
137 	}
138 }
139 
140 static int
141 is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
142 {
143 	unsigned char *max_instr;
144 	unsigned char *instr;
145 	int prefetch = 0;
146 
147 	/*
148 	 * If it was a exec (instruction fetch) fault on NX page, then
149 	 * do not ignore the fault:
150 	 */
151 	if (error_code & PF_INSTR)
152 		return 0;
153 
154 	instr = (void *)convert_ip_to_linear(current, regs);
155 	max_instr = instr + 15;
156 
157 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
158 		return 0;
159 
160 	while (instr < max_instr) {
161 		unsigned char opcode;
162 
163 		if (probe_kernel_address(instr, opcode))
164 			break;
165 
166 		instr++;
167 
168 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
169 			break;
170 	}
171 	return prefetch;
172 }
173 
174 /*
175  * A protection key fault means that the PKRU value did not allow
176  * access to some PTE.  Userspace can figure out what PKRU was
177  * from the XSAVE state, and this function fills out a field in
178  * siginfo so userspace can discover which protection key was set
179  * on the PTE.
180  *
181  * If we get here, we know that the hardware signaled a PF_PK
182  * fault and that there was a VMA once we got in the fault
183  * handler.  It does *not* guarantee that the VMA we find here
184  * was the one that we faulted on.
185  *
186  * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
187  * 2. T1   : set PKRU to deny access to pkey=4, touches page
188  * 3. T1   : faults...
189  * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
190  * 5. T1   : enters fault handler, takes mmap_sem, etc...
191  * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
192  *	     faulted on a pte with its pkey=4.
193  */
194 static void fill_sig_info_pkey(int si_code, siginfo_t *info,
195 		struct vm_area_struct *vma)
196 {
197 	/* This is effectively an #ifdef */
198 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
199 		return;
200 
201 	/* Fault not from Protection Keys: nothing to do */
202 	if (si_code != SEGV_PKUERR)
203 		return;
204 	/*
205 	 * force_sig_info_fault() is called from a number of
206 	 * contexts, some of which have a VMA and some of which
207 	 * do not.  The PF_PK handing happens after we have a
208 	 * valid VMA, so we should never reach this without a
209 	 * valid VMA.
210 	 */
211 	if (!vma) {
212 		WARN_ONCE(1, "PKU fault with no VMA passed in");
213 		info->si_pkey = 0;
214 		return;
215 	}
216 	/*
217 	 * si_pkey should be thought of as a strong hint, but not
218 	 * absolutely guranteed to be 100% accurate because of
219 	 * the race explained above.
220 	 */
221 	info->si_pkey = vma_pkey(vma);
222 }
223 
224 static void
225 force_sig_info_fault(int si_signo, int si_code, unsigned long address,
226 		     struct task_struct *tsk, struct vm_area_struct *vma,
227 		     int fault)
228 {
229 	unsigned lsb = 0;
230 	siginfo_t info;
231 
232 	info.si_signo	= si_signo;
233 	info.si_errno	= 0;
234 	info.si_code	= si_code;
235 	info.si_addr	= (void __user *)address;
236 	if (fault & VM_FAULT_HWPOISON_LARGE)
237 		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
238 	if (fault & VM_FAULT_HWPOISON)
239 		lsb = PAGE_SHIFT;
240 	info.si_addr_lsb = lsb;
241 
242 	fill_sig_info_pkey(si_code, &info, vma);
243 
244 	force_sig_info(si_signo, &info, tsk);
245 }
246 
247 DEFINE_SPINLOCK(pgd_lock);
248 LIST_HEAD(pgd_list);
249 
250 #ifdef CONFIG_X86_32
251 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
252 {
253 	unsigned index = pgd_index(address);
254 	pgd_t *pgd_k;
255 	pud_t *pud, *pud_k;
256 	pmd_t *pmd, *pmd_k;
257 
258 	pgd += index;
259 	pgd_k = init_mm.pgd + index;
260 
261 	if (!pgd_present(*pgd_k))
262 		return NULL;
263 
264 	/*
265 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
266 	 * and redundant with the set_pmd() on non-PAE. As would
267 	 * set_pud.
268 	 */
269 	pud = pud_offset(pgd, address);
270 	pud_k = pud_offset(pgd_k, address);
271 	if (!pud_present(*pud_k))
272 		return NULL;
273 
274 	pmd = pmd_offset(pud, address);
275 	pmd_k = pmd_offset(pud_k, address);
276 	if (!pmd_present(*pmd_k))
277 		return NULL;
278 
279 	if (!pmd_present(*pmd))
280 		set_pmd(pmd, *pmd_k);
281 	else
282 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
283 
284 	return pmd_k;
285 }
286 
287 void vmalloc_sync_all(void)
288 {
289 	unsigned long address;
290 
291 	if (SHARED_KERNEL_PMD)
292 		return;
293 
294 	for (address = VMALLOC_START & PMD_MASK;
295 	     address >= TASK_SIZE && address < FIXADDR_TOP;
296 	     address += PMD_SIZE) {
297 		struct page *page;
298 
299 		spin_lock(&pgd_lock);
300 		list_for_each_entry(page, &pgd_list, lru) {
301 			spinlock_t *pgt_lock;
302 			pmd_t *ret;
303 
304 			/* the pgt_lock only for Xen */
305 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
306 
307 			spin_lock(pgt_lock);
308 			ret = vmalloc_sync_one(page_address(page), address);
309 			spin_unlock(pgt_lock);
310 
311 			if (!ret)
312 				break;
313 		}
314 		spin_unlock(&pgd_lock);
315 	}
316 }
317 
318 /*
319  * 32-bit:
320  *
321  *   Handle a fault on the vmalloc or module mapping area
322  */
323 static noinline int vmalloc_fault(unsigned long address)
324 {
325 	unsigned long pgd_paddr;
326 	pmd_t *pmd_k;
327 	pte_t *pte_k;
328 
329 	/* Make sure we are in vmalloc area: */
330 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
331 		return -1;
332 
333 	WARN_ON_ONCE(in_nmi());
334 
335 	/*
336 	 * Synchronize this task's top level page-table
337 	 * with the 'reference' page table.
338 	 *
339 	 * Do _not_ use "current" here. We might be inside
340 	 * an interrupt in the middle of a task switch..
341 	 */
342 	pgd_paddr = read_cr3();
343 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
344 	if (!pmd_k)
345 		return -1;
346 
347 	if (pmd_huge(*pmd_k))
348 		return 0;
349 
350 	pte_k = pte_offset_kernel(pmd_k, address);
351 	if (!pte_present(*pte_k))
352 		return -1;
353 
354 	return 0;
355 }
356 NOKPROBE_SYMBOL(vmalloc_fault);
357 
358 /*
359  * Did it hit the DOS screen memory VA from vm86 mode?
360  */
361 static inline void
362 check_v8086_mode(struct pt_regs *regs, unsigned long address,
363 		 struct task_struct *tsk)
364 {
365 #ifdef CONFIG_VM86
366 	unsigned long bit;
367 
368 	if (!v8086_mode(regs) || !tsk->thread.vm86)
369 		return;
370 
371 	bit = (address - 0xA0000) >> PAGE_SHIFT;
372 	if (bit < 32)
373 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
374 #endif
375 }
376 
377 static bool low_pfn(unsigned long pfn)
378 {
379 	return pfn < max_low_pfn;
380 }
381 
382 static void dump_pagetable(unsigned long address)
383 {
384 	pgd_t *base = __va(read_cr3());
385 	pgd_t *pgd = &base[pgd_index(address)];
386 	pmd_t *pmd;
387 	pte_t *pte;
388 
389 #ifdef CONFIG_X86_PAE
390 	printk("*pdpt = %016Lx ", pgd_val(*pgd));
391 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
392 		goto out;
393 #endif
394 	pmd = pmd_offset(pud_offset(pgd, address), address);
395 	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
396 
397 	/*
398 	 * We must not directly access the pte in the highpte
399 	 * case if the page table is located in highmem.
400 	 * And let's rather not kmap-atomic the pte, just in case
401 	 * it's allocated already:
402 	 */
403 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
404 		goto out;
405 
406 	pte = pte_offset_kernel(pmd, address);
407 	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
408 out:
409 	printk("\n");
410 }
411 
412 #else /* CONFIG_X86_64: */
413 
414 void vmalloc_sync_all(void)
415 {
416 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
417 }
418 
419 /*
420  * 64-bit:
421  *
422  *   Handle a fault on the vmalloc area
423  */
424 static noinline int vmalloc_fault(unsigned long address)
425 {
426 	pgd_t *pgd, *pgd_ref;
427 	pud_t *pud, *pud_ref;
428 	pmd_t *pmd, *pmd_ref;
429 	pte_t *pte, *pte_ref;
430 
431 	/* Make sure we are in vmalloc area: */
432 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
433 		return -1;
434 
435 	WARN_ON_ONCE(in_nmi());
436 
437 	/*
438 	 * Copy kernel mappings over when needed. This can also
439 	 * happen within a race in page table update. In the later
440 	 * case just flush:
441 	 */
442 	pgd = pgd_offset(current->active_mm, address);
443 	pgd_ref = pgd_offset_k(address);
444 	if (pgd_none(*pgd_ref))
445 		return -1;
446 
447 	if (pgd_none(*pgd)) {
448 		set_pgd(pgd, *pgd_ref);
449 		arch_flush_lazy_mmu_mode();
450 	} else {
451 		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
452 	}
453 
454 	/*
455 	 * Below here mismatches are bugs because these lower tables
456 	 * are shared:
457 	 */
458 
459 	pud = pud_offset(pgd, address);
460 	pud_ref = pud_offset(pgd_ref, address);
461 	if (pud_none(*pud_ref))
462 		return -1;
463 
464 	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
465 		BUG();
466 
467 	if (pud_huge(*pud))
468 		return 0;
469 
470 	pmd = pmd_offset(pud, address);
471 	pmd_ref = pmd_offset(pud_ref, address);
472 	if (pmd_none(*pmd_ref))
473 		return -1;
474 
475 	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
476 		BUG();
477 
478 	if (pmd_huge(*pmd))
479 		return 0;
480 
481 	pte_ref = pte_offset_kernel(pmd_ref, address);
482 	if (!pte_present(*pte_ref))
483 		return -1;
484 
485 	pte = pte_offset_kernel(pmd, address);
486 
487 	/*
488 	 * Don't use pte_page here, because the mappings can point
489 	 * outside mem_map, and the NUMA hash lookup cannot handle
490 	 * that:
491 	 */
492 	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
493 		BUG();
494 
495 	return 0;
496 }
497 NOKPROBE_SYMBOL(vmalloc_fault);
498 
499 #ifdef CONFIG_CPU_SUP_AMD
500 static const char errata93_warning[] =
501 KERN_ERR
502 "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
503 "******* Working around it, but it may cause SEGVs or burn power.\n"
504 "******* Please consider a BIOS update.\n"
505 "******* Disabling USB legacy in the BIOS may also help.\n";
506 #endif
507 
508 /*
509  * No vm86 mode in 64-bit mode:
510  */
511 static inline void
512 check_v8086_mode(struct pt_regs *regs, unsigned long address,
513 		 struct task_struct *tsk)
514 {
515 }
516 
517 static int bad_address(void *p)
518 {
519 	unsigned long dummy;
520 
521 	return probe_kernel_address((unsigned long *)p, dummy);
522 }
523 
524 static void dump_pagetable(unsigned long address)
525 {
526 	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
527 	pgd_t *pgd = base + pgd_index(address);
528 	pud_t *pud;
529 	pmd_t *pmd;
530 	pte_t *pte;
531 
532 	if (bad_address(pgd))
533 		goto bad;
534 
535 	printk("PGD %lx ", pgd_val(*pgd));
536 
537 	if (!pgd_present(*pgd))
538 		goto out;
539 
540 	pud = pud_offset(pgd, address);
541 	if (bad_address(pud))
542 		goto bad;
543 
544 	printk("PUD %lx ", pud_val(*pud));
545 	if (!pud_present(*pud) || pud_large(*pud))
546 		goto out;
547 
548 	pmd = pmd_offset(pud, address);
549 	if (bad_address(pmd))
550 		goto bad;
551 
552 	printk("PMD %lx ", pmd_val(*pmd));
553 	if (!pmd_present(*pmd) || pmd_large(*pmd))
554 		goto out;
555 
556 	pte = pte_offset_kernel(pmd, address);
557 	if (bad_address(pte))
558 		goto bad;
559 
560 	printk("PTE %lx", pte_val(*pte));
561 out:
562 	printk("\n");
563 	return;
564 bad:
565 	printk("BAD\n");
566 }
567 
568 #endif /* CONFIG_X86_64 */
569 
570 /*
571  * Workaround for K8 erratum #93 & buggy BIOS.
572  *
573  * BIOS SMM functions are required to use a specific workaround
574  * to avoid corruption of the 64bit RIP register on C stepping K8.
575  *
576  * A lot of BIOS that didn't get tested properly miss this.
577  *
578  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
579  * Try to work around it here.
580  *
581  * Note we only handle faults in kernel here.
582  * Does nothing on 32-bit.
583  */
584 static int is_errata93(struct pt_regs *regs, unsigned long address)
585 {
586 #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
587 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
588 	    || boot_cpu_data.x86 != 0xf)
589 		return 0;
590 
591 	if (address != regs->ip)
592 		return 0;
593 
594 	if ((address >> 32) != 0)
595 		return 0;
596 
597 	address |= 0xffffffffUL << 32;
598 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
599 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
600 		printk_once(errata93_warning);
601 		regs->ip = address;
602 		return 1;
603 	}
604 #endif
605 	return 0;
606 }
607 
608 /*
609  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
610  * to illegal addresses >4GB.
611  *
612  * We catch this in the page fault handler because these addresses
613  * are not reachable. Just detect this case and return.  Any code
614  * segment in LDT is compatibility mode.
615  */
616 static int is_errata100(struct pt_regs *regs, unsigned long address)
617 {
618 #ifdef CONFIG_X86_64
619 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
620 		return 1;
621 #endif
622 	return 0;
623 }
624 
625 static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
626 {
627 #ifdef CONFIG_X86_F00F_BUG
628 	unsigned long nr;
629 
630 	/*
631 	 * Pentium F0 0F C7 C8 bug workaround:
632 	 */
633 	if (boot_cpu_has_bug(X86_BUG_F00F)) {
634 		nr = (address - idt_descr.address) >> 3;
635 
636 		if (nr == 6) {
637 			do_invalid_op(regs, 0);
638 			return 1;
639 		}
640 	}
641 #endif
642 	return 0;
643 }
644 
645 static const char nx_warning[] = KERN_CRIT
646 "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
647 static const char smep_warning[] = KERN_CRIT
648 "unable to execute userspace code (SMEP?) (uid: %d)\n";
649 
650 static void
651 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
652 		unsigned long address)
653 {
654 	if (!oops_may_print())
655 		return;
656 
657 	if (error_code & PF_INSTR) {
658 		unsigned int level;
659 		pgd_t *pgd;
660 		pte_t *pte;
661 
662 		pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
663 		pgd += pgd_index(address);
664 
665 		pte = lookup_address_in_pgd(pgd, address, &level);
666 
667 		if (pte && pte_present(*pte) && !pte_exec(*pte))
668 			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
669 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
670 				(pgd_flags(*pgd) & _PAGE_USER) &&
671 				(__read_cr4() & X86_CR4_SMEP))
672 			printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
673 	}
674 
675 	printk(KERN_ALERT "BUG: unable to handle kernel ");
676 	if (address < PAGE_SIZE)
677 		printk(KERN_CONT "NULL pointer dereference");
678 	else
679 		printk(KERN_CONT "paging request");
680 
681 	printk(KERN_CONT " at %p\n", (void *) address);
682 	printk(KERN_ALERT "IP:");
683 	printk_address(regs->ip);
684 
685 	dump_pagetable(address);
686 }
687 
688 static noinline void
689 pgtable_bad(struct pt_regs *regs, unsigned long error_code,
690 	    unsigned long address)
691 {
692 	struct task_struct *tsk;
693 	unsigned long flags;
694 	int sig;
695 
696 	flags = oops_begin();
697 	tsk = current;
698 	sig = SIGKILL;
699 
700 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
701 	       tsk->comm, address);
702 	dump_pagetable(address);
703 
704 	tsk->thread.cr2		= address;
705 	tsk->thread.trap_nr	= X86_TRAP_PF;
706 	tsk->thread.error_code	= error_code;
707 
708 	if (__die("Bad pagetable", regs, error_code))
709 		sig = 0;
710 
711 	oops_end(flags, regs, sig);
712 }
713 
714 static noinline void
715 no_context(struct pt_regs *regs, unsigned long error_code,
716 	   unsigned long address, int signal, int si_code)
717 {
718 	struct task_struct *tsk = current;
719 	unsigned long flags;
720 	int sig;
721 	/* No context means no VMA to pass down */
722 	struct vm_area_struct *vma = NULL;
723 
724 	/* Are we prepared to handle this kernel fault? */
725 	if (fixup_exception(regs, X86_TRAP_PF)) {
726 		/*
727 		 * Any interrupt that takes a fault gets the fixup. This makes
728 		 * the below recursive fault logic only apply to a faults from
729 		 * task context.
730 		 */
731 		if (in_interrupt())
732 			return;
733 
734 		/*
735 		 * Per the above we're !in_interrupt(), aka. task context.
736 		 *
737 		 * In this case we need to make sure we're not recursively
738 		 * faulting through the emulate_vsyscall() logic.
739 		 */
740 		if (current_thread_info()->sig_on_uaccess_error && signal) {
741 			tsk->thread.trap_nr = X86_TRAP_PF;
742 			tsk->thread.error_code = error_code | PF_USER;
743 			tsk->thread.cr2 = address;
744 
745 			/* XXX: hwpoison faults will set the wrong code. */
746 			force_sig_info_fault(signal, si_code, address,
747 					     tsk, vma, 0);
748 		}
749 
750 		/*
751 		 * Barring that, we can do the fixup and be happy.
752 		 */
753 		return;
754 	}
755 
756 	/*
757 	 * 32-bit:
758 	 *
759 	 *   Valid to do another page fault here, because if this fault
760 	 *   had been triggered by is_prefetch fixup_exception would have
761 	 *   handled it.
762 	 *
763 	 * 64-bit:
764 	 *
765 	 *   Hall of shame of CPU/BIOS bugs.
766 	 */
767 	if (is_prefetch(regs, error_code, address))
768 		return;
769 
770 	if (is_errata93(regs, address))
771 		return;
772 
773 	/*
774 	 * Oops. The kernel tried to access some bad page. We'll have to
775 	 * terminate things with extreme prejudice:
776 	 */
777 	flags = oops_begin();
778 
779 	show_fault_oops(regs, error_code, address);
780 
781 	if (task_stack_end_corrupted(tsk))
782 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
783 
784 	tsk->thread.cr2		= address;
785 	tsk->thread.trap_nr	= X86_TRAP_PF;
786 	tsk->thread.error_code	= error_code;
787 
788 	sig = SIGKILL;
789 	if (__die("Oops", regs, error_code))
790 		sig = 0;
791 
792 	/* Executive summary in case the body of the oops scrolled away */
793 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
794 
795 	oops_end(flags, regs, sig);
796 }
797 
798 /*
799  * Print out info about fatal segfaults, if the show_unhandled_signals
800  * sysctl is set:
801  */
802 static inline void
803 show_signal_msg(struct pt_regs *regs, unsigned long error_code,
804 		unsigned long address, struct task_struct *tsk)
805 {
806 	if (!unhandled_signal(tsk, SIGSEGV))
807 		return;
808 
809 	if (!printk_ratelimit())
810 		return;
811 
812 	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
813 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
814 		tsk->comm, task_pid_nr(tsk), address,
815 		(void *)regs->ip, (void *)regs->sp, error_code);
816 
817 	print_vma_addr(KERN_CONT " in ", regs->ip);
818 
819 	printk(KERN_CONT "\n");
820 }
821 
822 static void
823 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
824 		       unsigned long address, struct vm_area_struct *vma,
825 		       int si_code)
826 {
827 	struct task_struct *tsk = current;
828 
829 	/* User mode accesses just cause a SIGSEGV */
830 	if (error_code & PF_USER) {
831 		/*
832 		 * It's possible to have interrupts off here:
833 		 */
834 		local_irq_enable();
835 
836 		/*
837 		 * Valid to do another page fault here because this one came
838 		 * from user space:
839 		 */
840 		if (is_prefetch(regs, error_code, address))
841 			return;
842 
843 		if (is_errata100(regs, address))
844 			return;
845 
846 #ifdef CONFIG_X86_64
847 		/*
848 		 * Instruction fetch faults in the vsyscall page might need
849 		 * emulation.
850 		 */
851 		if (unlikely((error_code & PF_INSTR) &&
852 			     ((address & ~0xfff) == VSYSCALL_ADDR))) {
853 			if (emulate_vsyscall(regs, address))
854 				return;
855 		}
856 #endif
857 		/* Kernel addresses are always protection faults: */
858 		if (address >= TASK_SIZE)
859 			error_code |= PF_PROT;
860 
861 		if (likely(show_unhandled_signals))
862 			show_signal_msg(regs, error_code, address, tsk);
863 
864 		tsk->thread.cr2		= address;
865 		tsk->thread.error_code	= error_code;
866 		tsk->thread.trap_nr	= X86_TRAP_PF;
867 
868 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
869 
870 		return;
871 	}
872 
873 	if (is_f00f_bug(regs, address))
874 		return;
875 
876 	no_context(regs, error_code, address, SIGSEGV, si_code);
877 }
878 
879 static noinline void
880 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
881 		     unsigned long address, struct vm_area_struct *vma)
882 {
883 	__bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
884 }
885 
886 static void
887 __bad_area(struct pt_regs *regs, unsigned long error_code,
888 	   unsigned long address,  struct vm_area_struct *vma, int si_code)
889 {
890 	struct mm_struct *mm = current->mm;
891 
892 	/*
893 	 * Something tried to access memory that isn't in our memory map..
894 	 * Fix it, but check if it's kernel or user first..
895 	 */
896 	up_read(&mm->mmap_sem);
897 
898 	__bad_area_nosemaphore(regs, error_code, address, vma, si_code);
899 }
900 
901 static noinline void
902 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
903 {
904 	__bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
905 }
906 
907 static inline bool bad_area_access_from_pkeys(unsigned long error_code,
908 		struct vm_area_struct *vma)
909 {
910 	/* This code is always called on the current mm */
911 	bool foreign = false;
912 
913 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
914 		return false;
915 	if (error_code & PF_PK)
916 		return true;
917 	/* this checks permission keys on the VMA: */
918 	if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
919 				(error_code & PF_INSTR), foreign))
920 		return true;
921 	return false;
922 }
923 
924 static noinline void
925 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
926 		      unsigned long address, struct vm_area_struct *vma)
927 {
928 	/*
929 	 * This OSPKE check is not strictly necessary at runtime.
930 	 * But, doing it this way allows compiler optimizations
931 	 * if pkeys are compiled out.
932 	 */
933 	if (bad_area_access_from_pkeys(error_code, vma))
934 		__bad_area(regs, error_code, address, vma, SEGV_PKUERR);
935 	else
936 		__bad_area(regs, error_code, address, vma, SEGV_ACCERR);
937 }
938 
939 static void
940 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
941 	  struct vm_area_struct *vma, unsigned int fault)
942 {
943 	struct task_struct *tsk = current;
944 	int code = BUS_ADRERR;
945 
946 	/* Kernel mode? Handle exceptions or die: */
947 	if (!(error_code & PF_USER)) {
948 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
949 		return;
950 	}
951 
952 	/* User-space => ok to do another page fault: */
953 	if (is_prefetch(regs, error_code, address))
954 		return;
955 
956 	tsk->thread.cr2		= address;
957 	tsk->thread.error_code	= error_code;
958 	tsk->thread.trap_nr	= X86_TRAP_PF;
959 
960 #ifdef CONFIG_MEMORY_FAILURE
961 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
962 		printk(KERN_ERR
963 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
964 			tsk->comm, tsk->pid, address);
965 		code = BUS_MCEERR_AR;
966 	}
967 #endif
968 	force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
969 }
970 
971 static noinline void
972 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
973 	       unsigned long address, struct vm_area_struct *vma,
974 	       unsigned int fault)
975 {
976 	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
977 		no_context(regs, error_code, address, 0, 0);
978 		return;
979 	}
980 
981 	if (fault & VM_FAULT_OOM) {
982 		/* Kernel mode? Handle exceptions or die: */
983 		if (!(error_code & PF_USER)) {
984 			no_context(regs, error_code, address,
985 				   SIGSEGV, SEGV_MAPERR);
986 			return;
987 		}
988 
989 		/*
990 		 * We ran out of memory, call the OOM killer, and return the
991 		 * userspace (which will retry the fault, or kill us if we got
992 		 * oom-killed):
993 		 */
994 		pagefault_out_of_memory();
995 	} else {
996 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
997 			     VM_FAULT_HWPOISON_LARGE))
998 			do_sigbus(regs, error_code, address, vma, fault);
999 		else if (fault & VM_FAULT_SIGSEGV)
1000 			bad_area_nosemaphore(regs, error_code, address, vma);
1001 		else
1002 			BUG();
1003 	}
1004 }
1005 
1006 static int spurious_fault_check(unsigned long error_code, pte_t *pte)
1007 {
1008 	if ((error_code & PF_WRITE) && !pte_write(*pte))
1009 		return 0;
1010 
1011 	if ((error_code & PF_INSTR) && !pte_exec(*pte))
1012 		return 0;
1013 	/*
1014 	 * Note: We do not do lazy flushing on protection key
1015 	 * changes, so no spurious fault will ever set PF_PK.
1016 	 */
1017 	if ((error_code & PF_PK))
1018 		return 1;
1019 
1020 	return 1;
1021 }
1022 
1023 /*
1024  * Handle a spurious fault caused by a stale TLB entry.
1025  *
1026  * This allows us to lazily refresh the TLB when increasing the
1027  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
1028  * eagerly is very expensive since that implies doing a full
1029  * cross-processor TLB flush, even if no stale TLB entries exist
1030  * on other processors.
1031  *
1032  * Spurious faults may only occur if the TLB contains an entry with
1033  * fewer permission than the page table entry.  Non-present (P = 0)
1034  * and reserved bit (R = 1) faults are never spurious.
1035  *
1036  * There are no security implications to leaving a stale TLB when
1037  * increasing the permissions on a page.
1038  *
1039  * Returns non-zero if a spurious fault was handled, zero otherwise.
1040  *
1041  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
1042  * (Optional Invalidation).
1043  */
1044 static noinline int
1045 spurious_fault(unsigned long error_code, unsigned long address)
1046 {
1047 	pgd_t *pgd;
1048 	pud_t *pud;
1049 	pmd_t *pmd;
1050 	pte_t *pte;
1051 	int ret;
1052 
1053 	/*
1054 	 * Only writes to RO or instruction fetches from NX may cause
1055 	 * spurious faults.
1056 	 *
1057 	 * These could be from user or supervisor accesses but the TLB
1058 	 * is only lazily flushed after a kernel mapping protection
1059 	 * change, so user accesses are not expected to cause spurious
1060 	 * faults.
1061 	 */
1062 	if (error_code != (PF_WRITE | PF_PROT)
1063 	    && error_code != (PF_INSTR | PF_PROT))
1064 		return 0;
1065 
1066 	pgd = init_mm.pgd + pgd_index(address);
1067 	if (!pgd_present(*pgd))
1068 		return 0;
1069 
1070 	pud = pud_offset(pgd, address);
1071 	if (!pud_present(*pud))
1072 		return 0;
1073 
1074 	if (pud_large(*pud))
1075 		return spurious_fault_check(error_code, (pte_t *) pud);
1076 
1077 	pmd = pmd_offset(pud, address);
1078 	if (!pmd_present(*pmd))
1079 		return 0;
1080 
1081 	if (pmd_large(*pmd))
1082 		return spurious_fault_check(error_code, (pte_t *) pmd);
1083 
1084 	pte = pte_offset_kernel(pmd, address);
1085 	if (!pte_present(*pte))
1086 		return 0;
1087 
1088 	ret = spurious_fault_check(error_code, pte);
1089 	if (!ret)
1090 		return 0;
1091 
1092 	/*
1093 	 * Make sure we have permissions in PMD.
1094 	 * If not, then there's a bug in the page tables:
1095 	 */
1096 	ret = spurious_fault_check(error_code, (pte_t *) pmd);
1097 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1098 
1099 	return ret;
1100 }
1101 NOKPROBE_SYMBOL(spurious_fault);
1102 
1103 int show_unhandled_signals = 1;
1104 
1105 static inline int
1106 access_error(unsigned long error_code, struct vm_area_struct *vma)
1107 {
1108 	/* This is only called for the current mm, so: */
1109 	bool foreign = false;
1110 	/*
1111 	 * Make sure to check the VMA so that we do not perform
1112 	 * faults just to hit a PF_PK as soon as we fill in a
1113 	 * page.
1114 	 */
1115 	if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
1116 				(error_code & PF_INSTR), foreign))
1117 		return 1;
1118 
1119 	if (error_code & PF_WRITE) {
1120 		/* write, present and write, not present: */
1121 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1122 			return 1;
1123 		return 0;
1124 	}
1125 
1126 	/* read, present: */
1127 	if (unlikely(error_code & PF_PROT))
1128 		return 1;
1129 
1130 	/* read, not present: */
1131 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
1132 		return 1;
1133 
1134 	return 0;
1135 }
1136 
1137 static int fault_in_kernel_space(unsigned long address)
1138 {
1139 	return address >= TASK_SIZE_MAX;
1140 }
1141 
1142 static inline bool smap_violation(int error_code, struct pt_regs *regs)
1143 {
1144 	if (!IS_ENABLED(CONFIG_X86_SMAP))
1145 		return false;
1146 
1147 	if (!static_cpu_has(X86_FEATURE_SMAP))
1148 		return false;
1149 
1150 	if (error_code & PF_USER)
1151 		return false;
1152 
1153 	if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
1154 		return false;
1155 
1156 	return true;
1157 }
1158 
1159 /*
1160  * This routine handles page faults.  It determines the address,
1161  * and the problem, and then passes it off to one of the appropriate
1162  * routines.
1163  *
1164  * This function must have noinline because both callers
1165  * {,trace_}do_page_fault() have notrace on. Having this an actual function
1166  * guarantees there's a function trace entry.
1167  */
1168 static noinline void
1169 __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1170 		unsigned long address)
1171 {
1172 	struct vm_area_struct *vma;
1173 	struct task_struct *tsk;
1174 	struct mm_struct *mm;
1175 	int fault, major = 0;
1176 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1177 
1178 	tsk = current;
1179 	mm = tsk->mm;
1180 
1181 	/*
1182 	 * Detect and handle instructions that would cause a page fault for
1183 	 * both a tracked kernel page and a userspace page.
1184 	 */
1185 	if (kmemcheck_active(regs))
1186 		kmemcheck_hide(regs);
1187 	prefetchw(&mm->mmap_sem);
1188 
1189 	if (unlikely(kmmio_fault(regs, address)))
1190 		return;
1191 
1192 	/*
1193 	 * We fault-in kernel-space virtual memory on-demand. The
1194 	 * 'reference' page table is init_mm.pgd.
1195 	 *
1196 	 * NOTE! We MUST NOT take any locks for this case. We may
1197 	 * be in an interrupt or a critical region, and should
1198 	 * only copy the information from the master page table,
1199 	 * nothing more.
1200 	 *
1201 	 * This verifies that the fault happens in kernel space
1202 	 * (error_code & 4) == 0, and that the fault was not a
1203 	 * protection error (error_code & 9) == 0.
1204 	 */
1205 	if (unlikely(fault_in_kernel_space(address))) {
1206 		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
1207 			if (vmalloc_fault(address) >= 0)
1208 				return;
1209 
1210 			if (kmemcheck_fault(regs, address, error_code))
1211 				return;
1212 		}
1213 
1214 		/* Can handle a stale RO->RW TLB: */
1215 		if (spurious_fault(error_code, address))
1216 			return;
1217 
1218 		/* kprobes don't want to hook the spurious faults: */
1219 		if (kprobes_fault(regs))
1220 			return;
1221 		/*
1222 		 * Don't take the mm semaphore here. If we fixup a prefetch
1223 		 * fault we could otherwise deadlock:
1224 		 */
1225 		bad_area_nosemaphore(regs, error_code, address, NULL);
1226 
1227 		return;
1228 	}
1229 
1230 	/* kprobes don't want to hook the spurious faults: */
1231 	if (unlikely(kprobes_fault(regs)))
1232 		return;
1233 
1234 	if (unlikely(error_code & PF_RSVD))
1235 		pgtable_bad(regs, error_code, address);
1236 
1237 	if (unlikely(smap_violation(error_code, regs))) {
1238 		bad_area_nosemaphore(regs, error_code, address, NULL);
1239 		return;
1240 	}
1241 
1242 	/*
1243 	 * If we're in an interrupt, have no user context or are running
1244 	 * in a region with pagefaults disabled then we must not take the fault
1245 	 */
1246 	if (unlikely(faulthandler_disabled() || !mm)) {
1247 		bad_area_nosemaphore(regs, error_code, address, NULL);
1248 		return;
1249 	}
1250 
1251 	/*
1252 	 * It's safe to allow irq's after cr2 has been saved and the
1253 	 * vmalloc fault has been handled.
1254 	 *
1255 	 * User-mode registers count as a user access even for any
1256 	 * potential system fault or CPU buglet:
1257 	 */
1258 	if (user_mode(regs)) {
1259 		local_irq_enable();
1260 		error_code |= PF_USER;
1261 		flags |= FAULT_FLAG_USER;
1262 	} else {
1263 		if (regs->flags & X86_EFLAGS_IF)
1264 			local_irq_enable();
1265 	}
1266 
1267 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1268 
1269 	if (error_code & PF_WRITE)
1270 		flags |= FAULT_FLAG_WRITE;
1271 	if (error_code & PF_INSTR)
1272 		flags |= FAULT_FLAG_INSTRUCTION;
1273 
1274 	/*
1275 	 * When running in the kernel we expect faults to occur only to
1276 	 * addresses in user space.  All other faults represent errors in
1277 	 * the kernel and should generate an OOPS.  Unfortunately, in the
1278 	 * case of an erroneous fault occurring in a code path which already
1279 	 * holds mmap_sem we will deadlock attempting to validate the fault
1280 	 * against the address space.  Luckily the kernel only validly
1281 	 * references user space from well defined areas of code, which are
1282 	 * listed in the exceptions table.
1283 	 *
1284 	 * As the vast majority of faults will be valid we will only perform
1285 	 * the source reference check when there is a possibility of a
1286 	 * deadlock. Attempt to lock the address space, if we cannot we then
1287 	 * validate the source. If this is invalid we can skip the address
1288 	 * space check, thus avoiding the deadlock:
1289 	 */
1290 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1291 		if ((error_code & PF_USER) == 0 &&
1292 		    !search_exception_tables(regs->ip)) {
1293 			bad_area_nosemaphore(regs, error_code, address, NULL);
1294 			return;
1295 		}
1296 retry:
1297 		down_read(&mm->mmap_sem);
1298 	} else {
1299 		/*
1300 		 * The above down_read_trylock() might have succeeded in
1301 		 * which case we'll have missed the might_sleep() from
1302 		 * down_read():
1303 		 */
1304 		might_sleep();
1305 	}
1306 
1307 	vma = find_vma(mm, address);
1308 	if (unlikely(!vma)) {
1309 		bad_area(regs, error_code, address);
1310 		return;
1311 	}
1312 	if (likely(vma->vm_start <= address))
1313 		goto good_area;
1314 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1315 		bad_area(regs, error_code, address);
1316 		return;
1317 	}
1318 	if (error_code & PF_USER) {
1319 		/*
1320 		 * Accessing the stack below %sp is always a bug.
1321 		 * The large cushion allows instructions like enter
1322 		 * and pusha to work. ("enter $65535, $31" pushes
1323 		 * 32 pointers and then decrements %sp by 65535.)
1324 		 */
1325 		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
1326 			bad_area(regs, error_code, address);
1327 			return;
1328 		}
1329 	}
1330 	if (unlikely(expand_stack(vma, address))) {
1331 		bad_area(regs, error_code, address);
1332 		return;
1333 	}
1334 
1335 	/*
1336 	 * Ok, we have a good vm_area for this memory access, so
1337 	 * we can handle it..
1338 	 */
1339 good_area:
1340 	if (unlikely(access_error(error_code, vma))) {
1341 		bad_area_access_error(regs, error_code, address, vma);
1342 		return;
1343 	}
1344 
1345 	/*
1346 	 * If for any reason at all we couldn't handle the fault,
1347 	 * make sure we exit gracefully rather than endlessly redo
1348 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1349 	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1350 	 */
1351 	fault = handle_mm_fault(mm, vma, address, flags);
1352 	major |= fault & VM_FAULT_MAJOR;
1353 
1354 	/*
1355 	 * If we need to retry the mmap_sem has already been released,
1356 	 * and if there is a fatal signal pending there is no guarantee
1357 	 * that we made any progress. Handle this case first.
1358 	 */
1359 	if (unlikely(fault & VM_FAULT_RETRY)) {
1360 		/* Retry at most once */
1361 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
1362 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
1363 			flags |= FAULT_FLAG_TRIED;
1364 			if (!fatal_signal_pending(tsk))
1365 				goto retry;
1366 		}
1367 
1368 		/* User mode? Just return to handle the fatal exception */
1369 		if (flags & FAULT_FLAG_USER)
1370 			return;
1371 
1372 		/* Not returning to user mode? Handle exceptions or die: */
1373 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
1374 		return;
1375 	}
1376 
1377 	up_read(&mm->mmap_sem);
1378 	if (unlikely(fault & VM_FAULT_ERROR)) {
1379 		mm_fault_error(regs, error_code, address, vma, fault);
1380 		return;
1381 	}
1382 
1383 	/*
1384 	 * Major/minor page fault accounting. If any of the events
1385 	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1386 	 */
1387 	if (major) {
1388 		tsk->maj_flt++;
1389 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1390 	} else {
1391 		tsk->min_flt++;
1392 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1393 	}
1394 
1395 	check_v8086_mode(regs, address, tsk);
1396 }
1397 NOKPROBE_SYMBOL(__do_page_fault);
1398 
1399 dotraplinkage void notrace
1400 do_page_fault(struct pt_regs *regs, unsigned long error_code)
1401 {
1402 	unsigned long address = read_cr2(); /* Get the faulting address */
1403 	enum ctx_state prev_state;
1404 
1405 	/*
1406 	 * We must have this function tagged with __kprobes, notrace and call
1407 	 * read_cr2() before calling anything else. To avoid calling any kind
1408 	 * of tracing machinery before we've observed the CR2 value.
1409 	 *
1410 	 * exception_{enter,exit}() contain all sorts of tracepoints.
1411 	 */
1412 
1413 	prev_state = exception_enter();
1414 	__do_page_fault(regs, error_code, address);
1415 	exception_exit(prev_state);
1416 }
1417 NOKPROBE_SYMBOL(do_page_fault);
1418 
1419 #ifdef CONFIG_TRACING
1420 static nokprobe_inline void
1421 trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1422 			 unsigned long error_code)
1423 {
1424 	if (user_mode(regs))
1425 		trace_page_fault_user(address, regs, error_code);
1426 	else
1427 		trace_page_fault_kernel(address, regs, error_code);
1428 }
1429 
1430 dotraplinkage void notrace
1431 trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
1432 {
1433 	/*
1434 	 * The exception_enter and tracepoint processing could
1435 	 * trigger another page faults (user space callchain
1436 	 * reading) and destroy the original cr2 value, so read
1437 	 * the faulting address now.
1438 	 */
1439 	unsigned long address = read_cr2();
1440 	enum ctx_state prev_state;
1441 
1442 	prev_state = exception_enter();
1443 	trace_page_fault_entries(address, regs, error_code);
1444 	__do_page_fault(regs, error_code, address);
1445 	exception_exit(prev_state);
1446 }
1447 NOKPROBE_SYMBOL(trace_do_page_fault);
1448 #endif /* CONFIG_TRACING */
1449