1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2c61e211dSHarvey Harrison /* 3c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 4c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 5f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 6c61e211dSHarvey Harrison */ 7a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 868db0cf1SIngo Molnar #include <linux/sched/task_stack.h> /* task_stack_*(), ... */ 9a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 104cdf8dbeSLinus Torvalds #include <linux/extable.h> /* search_exception_tables */ 1157c8a661SMike Rapoport #include <linux/memblock.h> /* max_low_pfn */ 129326638cSMasami Hiramatsu #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ 13a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 14cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 15f672b49bSAndi Kleen #include <linux/hugetlb.h> /* hstate_index_to_shift */ 16268bb0ceSLinus Torvalds #include <linux/prefetch.h> /* prefetchw */ 1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h> /* exception_enter(), ... */ 1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h> /* faulthandler_disabled() */ 193425d934SSai Praneeth #include <linux/efi.h> /* efi_recover_from_page_fault()*/ 2050a7ca3cSSouptick Joarder #include <linux/mm_types.h> 21c61e211dSHarvey Harrison 22019132ffSDave Hansen #include <asm/cpufeature.h> /* boot_cpu_has, ... */ 23a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 24a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 25f40c3300SAndy Lutomirski #include <asm/fixmap.h> /* VSYSCALL_ADDR */ 26f40c3300SAndy Lutomirski #include <asm/vsyscall.h> /* emulate_vsyscall */ 27ba3e127eSBrian Gerst #include <asm/vm86.h> /* struct vm86 */ 28019132ffSDave Hansen #include <asm/mmu_context.h> /* vma_pkey() */ 293425d934SSai Praneeth #include <asm/efi.h> /* efi_recover_from_page_fault()*/ 30c61e211dSHarvey Harrison 31d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS 32d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h> 33d34603b0SSeiji Aguchi 34c61e211dSHarvey Harrison /* 35b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 36b319eed0SIngo Molnar * handled by mmiotrace: 37b814d41fSIngo Molnar */ 389326638cSMasami Hiramatsu static nokprobe_inline int 3962c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr) 4086069782SPekka Paalanen { 410fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 420fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 430fd0e3daSPekka Paalanen return -1; 440fd0e3daSPekka Paalanen return 0; 4586069782SPekka Paalanen } 4686069782SPekka Paalanen 479326638cSMasami Hiramatsu static nokprobe_inline int kprobes_fault(struct pt_regs *regs) 48c61e211dSHarvey Harrison { 49a980c0efSJann Horn if (!kprobes_built_in()) 50a980c0efSJann Horn return 0; 51a980c0efSJann Horn if (user_mode(regs)) 52a980c0efSJann Horn return 0; 53a980c0efSJann Horn /* 54a980c0efSJann Horn * To be potentially processing a kprobe fault and to be allowed to call 55a980c0efSJann Horn * kprobe_running(), we have to be non-preemptible. 56a980c0efSJann Horn */ 57a980c0efSJann Horn if (preemptible()) 58a980c0efSJann Horn return 0; 59a980c0efSJann Horn if (!kprobe_running()) 60a980c0efSJann Horn return 0; 61a980c0efSJann Horn return kprobe_fault_handler(regs, X86_TRAP_PF); 62c61e211dSHarvey Harrison } 63c61e211dSHarvey Harrison 64c61e211dSHarvey Harrison /* 652d4a7167SIngo Molnar * Prefetch quirks: 662d4a7167SIngo Molnar * 672d4a7167SIngo Molnar * 32-bit mode: 682d4a7167SIngo Molnar * 69c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 70c61e211dSHarvey Harrison * Check that here and ignore it. 71c61e211dSHarvey Harrison * 722d4a7167SIngo Molnar * 64-bit mode: 732d4a7167SIngo Molnar * 74c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 75c61e211dSHarvey Harrison * Check that here and ignore it. 76c61e211dSHarvey Harrison * 772d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 78c61e211dSHarvey Harrison */ 79107a0367SIngo Molnar static inline int 80107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 81107a0367SIngo Molnar unsigned char opcode, int *prefetch) 82c61e211dSHarvey Harrison { 83107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 84107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 85c61e211dSHarvey Harrison 86c61e211dSHarvey Harrison switch (instr_hi) { 87c61e211dSHarvey Harrison case 0x20: 88c61e211dSHarvey Harrison case 0x30: 89c61e211dSHarvey Harrison /* 90c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 91c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 92c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 93c61e211dSHarvey Harrison * X86_64 will never get here anyway 94c61e211dSHarvey Harrison */ 95107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 96c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 97c61e211dSHarvey Harrison case 0x40: 98c61e211dSHarvey Harrison /* 99c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 100c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 101c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 102c61e211dSHarvey Harrison * but for now it's good enough to assume that long 103c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 104c61e211dSHarvey Harrison */ 105318f5a2aSAndy Lutomirski return (!user_mode(regs) || user_64bit_mode(regs)); 106c61e211dSHarvey Harrison #endif 107c61e211dSHarvey Harrison case 0x60: 108c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 109107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 110c61e211dSHarvey Harrison case 0xF0: 111c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 112107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 113c61e211dSHarvey Harrison case 0x00: 114c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 115107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 116107a0367SIngo Molnar return 0; 117107a0367SIngo Molnar 118107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 119107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 120107a0367SIngo Molnar return 0; 121107a0367SIngo Molnar default: 122107a0367SIngo Molnar return 0; 123107a0367SIngo Molnar } 124107a0367SIngo Molnar } 125107a0367SIngo Molnar 126107a0367SIngo Molnar static int 127107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 128107a0367SIngo Molnar { 129107a0367SIngo Molnar unsigned char *max_instr; 130107a0367SIngo Molnar unsigned char *instr; 131107a0367SIngo Molnar int prefetch = 0; 132107a0367SIngo Molnar 133107a0367SIngo Molnar /* 134107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 135107a0367SIngo Molnar * do not ignore the fault: 136107a0367SIngo Molnar */ 1371067f030SRicardo Neri if (error_code & X86_PF_INSTR) 138107a0367SIngo Molnar return 0; 139107a0367SIngo Molnar 140107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 141107a0367SIngo Molnar max_instr = instr + 15; 142107a0367SIngo Molnar 143d31bf07fSAndy Lutomirski if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) 144107a0367SIngo Molnar return 0; 145107a0367SIngo Molnar 146107a0367SIngo Molnar while (instr < max_instr) { 147107a0367SIngo Molnar unsigned char opcode; 148c61e211dSHarvey Harrison 149c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 150c61e211dSHarvey Harrison break; 151107a0367SIngo Molnar 152107a0367SIngo Molnar instr++; 153107a0367SIngo Molnar 154107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 155c61e211dSHarvey Harrison break; 156c61e211dSHarvey Harrison } 157c61e211dSHarvey Harrison return prefetch; 158c61e211dSHarvey Harrison } 159c61e211dSHarvey Harrison 160f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 161f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 1622d4a7167SIngo Molnar 163f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 164f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 165f2f13a85SIngo Molnar { 166f2f13a85SIngo Molnar unsigned index = pgd_index(address); 167f2f13a85SIngo Molnar pgd_t *pgd_k; 168e0c4f675SKirill A. Shutemov p4d_t *p4d, *p4d_k; 169f2f13a85SIngo Molnar pud_t *pud, *pud_k; 170f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 171f2f13a85SIngo Molnar 172f2f13a85SIngo Molnar pgd += index; 173f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 174f2f13a85SIngo Molnar 175f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 176f2f13a85SIngo Molnar return NULL; 177f2f13a85SIngo Molnar 178f2f13a85SIngo Molnar /* 179f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 180f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 181e0c4f675SKirill A. Shutemov * set_p4d/set_pud. 182f2f13a85SIngo Molnar */ 183e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 184e0c4f675SKirill A. Shutemov p4d_k = p4d_offset(pgd_k, address); 185e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d_k)) 186e0c4f675SKirill A. Shutemov return NULL; 187e0c4f675SKirill A. Shutemov 188e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 189e0c4f675SKirill A. Shutemov pud_k = pud_offset(p4d_k, address); 190f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 191f2f13a85SIngo Molnar return NULL; 192f2f13a85SIngo Molnar 193f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 194f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 195f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 196f2f13a85SIngo Molnar return NULL; 197f2f13a85SIngo Molnar 198b8bcfe99SJeremy Fitzhardinge if (!pmd_present(*pmd)) 199f2f13a85SIngo Molnar set_pmd(pmd, *pmd_k); 200b8bcfe99SJeremy Fitzhardinge else 201f2f13a85SIngo Molnar BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 202f2f13a85SIngo Molnar 203f2f13a85SIngo Molnar return pmd_k; 204f2f13a85SIngo Molnar } 205f2f13a85SIngo Molnar 206f2f13a85SIngo Molnar void vmalloc_sync_all(void) 207f2f13a85SIngo Molnar { 208f2f13a85SIngo Molnar unsigned long address; 209f2f13a85SIngo Molnar 210f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 211f2f13a85SIngo Molnar return; 212f2f13a85SIngo Molnar 213f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 214dc4fac84SAndy Lutomirski address >= TASK_SIZE_MAX && address < FIXADDR_TOP; 215f2f13a85SIngo Molnar address += PMD_SIZE) { 216f2f13a85SIngo Molnar struct page *page; 217f2f13a85SIngo Molnar 218a79e53d8SAndrea Arcangeli spin_lock(&pgd_lock); 219f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 220617d34d9SJeremy Fitzhardinge spinlock_t *pgt_lock; 221f01f7c56SBorislav Petkov pmd_t *ret; 222617d34d9SJeremy Fitzhardinge 223a79e53d8SAndrea Arcangeli /* the pgt_lock only for Xen */ 224617d34d9SJeremy Fitzhardinge pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 225617d34d9SJeremy Fitzhardinge 226617d34d9SJeremy Fitzhardinge spin_lock(pgt_lock); 227617d34d9SJeremy Fitzhardinge ret = vmalloc_sync_one(page_address(page), address); 228617d34d9SJeremy Fitzhardinge spin_unlock(pgt_lock); 229617d34d9SJeremy Fitzhardinge 230617d34d9SJeremy Fitzhardinge if (!ret) 231f2f13a85SIngo Molnar break; 232f2f13a85SIngo Molnar } 233a79e53d8SAndrea Arcangeli spin_unlock(&pgd_lock); 234f2f13a85SIngo Molnar } 235f2f13a85SIngo Molnar } 236f2f13a85SIngo Molnar 237f2f13a85SIngo Molnar /* 238f2f13a85SIngo Molnar * 32-bit: 239f2f13a85SIngo Molnar * 240f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 241f2f13a85SIngo Molnar */ 2429326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 243f2f13a85SIngo Molnar { 244f2f13a85SIngo Molnar unsigned long pgd_paddr; 245f2f13a85SIngo Molnar pmd_t *pmd_k; 246f2f13a85SIngo Molnar pte_t *pte_k; 247f2f13a85SIngo Molnar 248f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 249f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 250f2f13a85SIngo Molnar return -1; 251f2f13a85SIngo Molnar 252f2f13a85SIngo Molnar /* 253f2f13a85SIngo Molnar * Synchronize this task's top level page-table 254f2f13a85SIngo Molnar * with the 'reference' page table. 255f2f13a85SIngo Molnar * 256f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 257f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 258f2f13a85SIngo Molnar */ 2596c690ee1SAndy Lutomirski pgd_paddr = read_cr3_pa(); 260f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 261f2f13a85SIngo Molnar if (!pmd_k) 262f2f13a85SIngo Molnar return -1; 263f2f13a85SIngo Molnar 26418a95521SToshi Kani if (pmd_large(*pmd_k)) 265f4eafd8bSToshi Kani return 0; 266f4eafd8bSToshi Kani 267f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 268f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 269f2f13a85SIngo Molnar return -1; 270f2f13a85SIngo Molnar 271f2f13a85SIngo Molnar return 0; 272f2f13a85SIngo Molnar } 2739326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 274f2f13a85SIngo Molnar 275f2f13a85SIngo Molnar /* 276f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 277f2f13a85SIngo Molnar */ 278f2f13a85SIngo Molnar static inline void 279f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 280f2f13a85SIngo Molnar struct task_struct *tsk) 281f2f13a85SIngo Molnar { 2829fda6a06SBrian Gerst #ifdef CONFIG_VM86 283f2f13a85SIngo Molnar unsigned long bit; 284f2f13a85SIngo Molnar 2859fda6a06SBrian Gerst if (!v8086_mode(regs) || !tsk->thread.vm86) 286f2f13a85SIngo Molnar return; 287f2f13a85SIngo Molnar 288f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 289f2f13a85SIngo Molnar if (bit < 32) 2909fda6a06SBrian Gerst tsk->thread.vm86->screen_bitmap |= 1 << bit; 2919fda6a06SBrian Gerst #endif 292f2f13a85SIngo Molnar } 293c61e211dSHarvey Harrison 294087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 295087975b0SAkinobu Mita { 296087975b0SAkinobu Mita return pfn < max_low_pfn; 297087975b0SAkinobu Mita } 298087975b0SAkinobu Mita 299cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 300c61e211dSHarvey Harrison { 3016c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 302087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 303e0c4f675SKirill A. Shutemov p4d_t *p4d; 304e0c4f675SKirill A. Shutemov pud_t *pud; 305087975b0SAkinobu Mita pmd_t *pmd; 306087975b0SAkinobu Mita pte_t *pte; 3072d4a7167SIngo Molnar 308c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 30939e48d9bSJan Beulich pr_info("*pdpt = %016Lx ", pgd_val(*pgd)); 310087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 311087975b0SAkinobu Mita goto out; 31239e48d9bSJan Beulich #define pr_pde pr_cont 31339e48d9bSJan Beulich #else 31439e48d9bSJan Beulich #define pr_pde pr_info 315c61e211dSHarvey Harrison #endif 316e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 317e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 318e0c4f675SKirill A. Shutemov pmd = pmd_offset(pud, address); 31939e48d9bSJan Beulich pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 32039e48d9bSJan Beulich #undef pr_pde 321c61e211dSHarvey Harrison 322c61e211dSHarvey Harrison /* 323c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 324c61e211dSHarvey Harrison * case if the page table is located in highmem. 325c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3262d4a7167SIngo Molnar * it's allocated already: 327c61e211dSHarvey Harrison */ 328087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 329087975b0SAkinobu Mita goto out; 3302d4a7167SIngo Molnar 331087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 33239e48d9bSJan Beulich pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 333087975b0SAkinobu Mita out: 33439e48d9bSJan Beulich pr_cont("\n"); 335f2f13a85SIngo Molnar } 336f2f13a85SIngo Molnar 337f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 338f2f13a85SIngo Molnar 339f2f13a85SIngo Molnar void vmalloc_sync_all(void) 340f2f13a85SIngo Molnar { 3415372e155SKirill A. Shutemov sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); 342f2f13a85SIngo Molnar } 343f2f13a85SIngo Molnar 344f2f13a85SIngo Molnar /* 345f2f13a85SIngo Molnar * 64-bit: 346f2f13a85SIngo Molnar * 347f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 348f2f13a85SIngo Molnar */ 3499326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 350f2f13a85SIngo Molnar { 351565977a3SToshi Kani pgd_t *pgd, *pgd_k; 352565977a3SToshi Kani p4d_t *p4d, *p4d_k; 353565977a3SToshi Kani pud_t *pud; 354565977a3SToshi Kani pmd_t *pmd; 355565977a3SToshi Kani pte_t *pte; 356f2f13a85SIngo Molnar 357f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 358f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 359f2f13a85SIngo Molnar return -1; 360f2f13a85SIngo Molnar 361ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 362ebc8827fSFrederic Weisbecker 363f2f13a85SIngo Molnar /* 364f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 365f2f13a85SIngo Molnar * happen within a race in page table update. In the later 366f2f13a85SIngo Molnar * case just flush: 367f2f13a85SIngo Molnar */ 3686c690ee1SAndy Lutomirski pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address); 369565977a3SToshi Kani pgd_k = pgd_offset_k(address); 370565977a3SToshi Kani if (pgd_none(*pgd_k)) 371f2f13a85SIngo Molnar return -1; 372f2f13a85SIngo Molnar 373ed7588d5SKirill A. Shutemov if (pgtable_l5_enabled()) { 3741160c277SSamu Kallio if (pgd_none(*pgd)) { 375565977a3SToshi Kani set_pgd(pgd, *pgd_k); 3761160c277SSamu Kallio arch_flush_lazy_mmu_mode(); 37736b3a772SAndy Lutomirski } else { 378565977a3SToshi Kani BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k)); 3791160c277SSamu Kallio } 38036b3a772SAndy Lutomirski } 381f2f13a85SIngo Molnar 382b50858ceSKirill A. Shutemov /* With 4-level paging, copying happens on the p4d level. */ 383b50858ceSKirill A. Shutemov p4d = p4d_offset(pgd, address); 384565977a3SToshi Kani p4d_k = p4d_offset(pgd_k, address); 385565977a3SToshi Kani if (p4d_none(*p4d_k)) 386b50858ceSKirill A. Shutemov return -1; 387b50858ceSKirill A. Shutemov 388ed7588d5SKirill A. Shutemov if (p4d_none(*p4d) && !pgtable_l5_enabled()) { 389565977a3SToshi Kani set_p4d(p4d, *p4d_k); 390b50858ceSKirill A. Shutemov arch_flush_lazy_mmu_mode(); 391b50858ceSKirill A. Shutemov } else { 392565977a3SToshi Kani BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k)); 393b50858ceSKirill A. Shutemov } 394b50858ceSKirill A. Shutemov 39536b3a772SAndy Lutomirski BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4); 396f2f13a85SIngo Molnar 397b50858ceSKirill A. Shutemov pud = pud_offset(p4d, address); 398565977a3SToshi Kani if (pud_none(*pud)) 399f2f13a85SIngo Molnar return -1; 400f2f13a85SIngo Molnar 40118a95521SToshi Kani if (pud_large(*pud)) 402f4eafd8bSToshi Kani return 0; 403f4eafd8bSToshi Kani 404f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 405565977a3SToshi Kani if (pmd_none(*pmd)) 406f2f13a85SIngo Molnar return -1; 407f2f13a85SIngo Molnar 40818a95521SToshi Kani if (pmd_large(*pmd)) 409f4eafd8bSToshi Kani return 0; 410f4eafd8bSToshi Kani 411f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 412565977a3SToshi Kani if (!pte_present(*pte)) 413565977a3SToshi Kani return -1; 414f2f13a85SIngo Molnar 415f2f13a85SIngo Molnar return 0; 416f2f13a85SIngo Molnar } 4179326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 418f2f13a85SIngo Molnar 419e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD 420f2f13a85SIngo Molnar static const char errata93_warning[] = 421ad361c98SJoe Perches KERN_ERR 422ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 423ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 424ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 425ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 426e05139f2SJan Beulich #endif 427f2f13a85SIngo Molnar 428f2f13a85SIngo Molnar /* 429f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 430f2f13a85SIngo Molnar */ 431f2f13a85SIngo Molnar static inline void 432f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 433f2f13a85SIngo Molnar struct task_struct *tsk) 434f2f13a85SIngo Molnar { 435f2f13a85SIngo Molnar } 436f2f13a85SIngo Molnar 437f2f13a85SIngo Molnar static int bad_address(void *p) 438f2f13a85SIngo Molnar { 439f2f13a85SIngo Molnar unsigned long dummy; 440f2f13a85SIngo Molnar 441f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 442f2f13a85SIngo Molnar } 443f2f13a85SIngo Molnar 444f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 445f2f13a85SIngo Molnar { 4466c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 447087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 448e0c4f675SKirill A. Shutemov p4d_t *p4d; 449c61e211dSHarvey Harrison pud_t *pud; 450c61e211dSHarvey Harrison pmd_t *pmd; 451c61e211dSHarvey Harrison pte_t *pte; 452c61e211dSHarvey Harrison 4532d4a7167SIngo Molnar if (bad_address(pgd)) 4542d4a7167SIngo Molnar goto bad; 4552d4a7167SIngo Molnar 45639e48d9bSJan Beulich pr_info("PGD %lx ", pgd_val(*pgd)); 4572d4a7167SIngo Molnar 4582d4a7167SIngo Molnar if (!pgd_present(*pgd)) 4592d4a7167SIngo Molnar goto out; 460c61e211dSHarvey Harrison 461e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 462e0c4f675SKirill A. Shutemov if (bad_address(p4d)) 463e0c4f675SKirill A. Shutemov goto bad; 464e0c4f675SKirill A. Shutemov 46539e48d9bSJan Beulich pr_cont("P4D %lx ", p4d_val(*p4d)); 466e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d) || p4d_large(*p4d)) 467e0c4f675SKirill A. Shutemov goto out; 468e0c4f675SKirill A. Shutemov 469e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 4702d4a7167SIngo Molnar if (bad_address(pud)) 4712d4a7167SIngo Molnar goto bad; 4722d4a7167SIngo Molnar 47339e48d9bSJan Beulich pr_cont("PUD %lx ", pud_val(*pud)); 474b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 4752d4a7167SIngo Molnar goto out; 476c61e211dSHarvey Harrison 477c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 4782d4a7167SIngo Molnar if (bad_address(pmd)) 4792d4a7167SIngo Molnar goto bad; 4802d4a7167SIngo Molnar 48139e48d9bSJan Beulich pr_cont("PMD %lx ", pmd_val(*pmd)); 4822d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 4832d4a7167SIngo Molnar goto out; 484c61e211dSHarvey Harrison 485c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 4862d4a7167SIngo Molnar if (bad_address(pte)) 4872d4a7167SIngo Molnar goto bad; 4882d4a7167SIngo Molnar 48939e48d9bSJan Beulich pr_cont("PTE %lx", pte_val(*pte)); 4902d4a7167SIngo Molnar out: 49139e48d9bSJan Beulich pr_cont("\n"); 492c61e211dSHarvey Harrison return; 493c61e211dSHarvey Harrison bad: 49439e48d9bSJan Beulich pr_info("BAD\n"); 495c61e211dSHarvey Harrison } 496c61e211dSHarvey Harrison 497f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 498c61e211dSHarvey Harrison 4992d4a7167SIngo Molnar /* 5002d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 5012d4a7167SIngo Molnar * 5022d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 5032d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 5042d4a7167SIngo Molnar * 5052d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 5062d4a7167SIngo Molnar * 5072d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 5082d4a7167SIngo Molnar * Try to work around it here. 5092d4a7167SIngo Molnar * 5102d4a7167SIngo Molnar * Note we only handle faults in kernel here. 5112d4a7167SIngo Molnar * Does nothing on 32-bit. 512c61e211dSHarvey Harrison */ 513c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 514c61e211dSHarvey Harrison { 515e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 516e05139f2SJan Beulich if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 517e05139f2SJan Beulich || boot_cpu_data.x86 != 0xf) 518e05139f2SJan Beulich return 0; 519e05139f2SJan Beulich 520c61e211dSHarvey Harrison if (address != regs->ip) 521c61e211dSHarvey Harrison return 0; 5222d4a7167SIngo Molnar 523c61e211dSHarvey Harrison if ((address >> 32) != 0) 524c61e211dSHarvey Harrison return 0; 5252d4a7167SIngo Molnar 526c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 527c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 528c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 529a454ab31SIngo Molnar printk_once(errata93_warning); 530c61e211dSHarvey Harrison regs->ip = address; 531c61e211dSHarvey Harrison return 1; 532c61e211dSHarvey Harrison } 533c61e211dSHarvey Harrison #endif 534c61e211dSHarvey Harrison return 0; 535c61e211dSHarvey Harrison } 536c61e211dSHarvey Harrison 537c61e211dSHarvey Harrison /* 5382d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 5392d4a7167SIngo Molnar * to illegal addresses >4GB. 5402d4a7167SIngo Molnar * 5412d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 5422d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 543c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 544c61e211dSHarvey Harrison */ 545c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 546c61e211dSHarvey Harrison { 547c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 5482d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 549c61e211dSHarvey Harrison return 1; 550c61e211dSHarvey Harrison #endif 551c61e211dSHarvey Harrison return 0; 552c61e211dSHarvey Harrison } 553c61e211dSHarvey Harrison 554c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 555c61e211dSHarvey Harrison { 556c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 557c61e211dSHarvey Harrison unsigned long nr; 5582d4a7167SIngo Molnar 559c61e211dSHarvey Harrison /* 5602d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 561c61e211dSHarvey Harrison */ 562e2604b49SBorislav Petkov if (boot_cpu_has_bug(X86_BUG_F00F)) { 563c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 564c61e211dSHarvey Harrison 565c61e211dSHarvey Harrison if (nr == 6) { 566c61e211dSHarvey Harrison do_invalid_op(regs, 0); 567c61e211dSHarvey Harrison return 1; 568c61e211dSHarvey Harrison } 569c61e211dSHarvey Harrison } 570c61e211dSHarvey Harrison #endif 571c61e211dSHarvey Harrison return 0; 572c61e211dSHarvey Harrison } 573c61e211dSHarvey Harrison 5742d4a7167SIngo Molnar static void 5752d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, 576c61e211dSHarvey Harrison unsigned long address) 577c61e211dSHarvey Harrison { 578c61e211dSHarvey Harrison if (!oops_may_print()) 579c61e211dSHarvey Harrison return; 580c61e211dSHarvey Harrison 5811067f030SRicardo Neri if (error_code & X86_PF_INSTR) { 58293809be8SHarvey Harrison unsigned int level; 583426e34ccSMatt Fleming pgd_t *pgd; 584426e34ccSMatt Fleming pte_t *pte; 5852d4a7167SIngo Molnar 5866c690ee1SAndy Lutomirski pgd = __va(read_cr3_pa()); 587426e34ccSMatt Fleming pgd += pgd_index(address); 588426e34ccSMatt Fleming 589426e34ccSMatt Fleming pte = lookup_address_in_pgd(pgd, address, &level); 590c61e211dSHarvey Harrison 5918f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 592d79d0d8aSDmitry Vyukov pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", 593d79d0d8aSDmitry Vyukov from_kuid(&init_user_ns, current_uid())); 594eff50c34SJiri Kosina if (pte && pte_present(*pte) && pte_exec(*pte) && 595eff50c34SJiri Kosina (pgd_flags(*pgd) & _PAGE_USER) && 5961e02ce4cSAndy Lutomirski (__read_cr4() & X86_CR4_SMEP)) 597d79d0d8aSDmitry Vyukov pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n", 598d79d0d8aSDmitry Vyukov from_kuid(&init_user_ns, current_uid())); 599c61e211dSHarvey Harrison } 600fd40d6e3SHarvey Harrison 6014188f063SDmitry Vyukov pr_alert("BUG: unable to handle kernel %s at %px\n", 6024188f063SDmitry Vyukov address < PAGE_SIZE ? "NULL pointer dereference" : "paging request", 6034188f063SDmitry Vyukov (void *)address); 6042d4a7167SIngo Molnar 605c61e211dSHarvey Harrison dump_pagetable(address); 606c61e211dSHarvey Harrison } 607c61e211dSHarvey Harrison 6082d4a7167SIngo Molnar static noinline void 6092d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 6102d4a7167SIngo Molnar unsigned long address) 611c61e211dSHarvey Harrison { 6122d4a7167SIngo Molnar struct task_struct *tsk; 6132d4a7167SIngo Molnar unsigned long flags; 6142d4a7167SIngo Molnar int sig; 6152d4a7167SIngo Molnar 6162d4a7167SIngo Molnar flags = oops_begin(); 6172d4a7167SIngo Molnar tsk = current; 6182d4a7167SIngo Molnar sig = SIGKILL; 619c61e211dSHarvey Harrison 620c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 62192181f19SNick Piggin tsk->comm, address); 622c61e211dSHarvey Harrison dump_pagetable(address); 6232d4a7167SIngo Molnar 624c61e211dSHarvey Harrison tsk->thread.cr2 = address; 62551e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 626c61e211dSHarvey Harrison tsk->thread.error_code = error_code; 6272d4a7167SIngo Molnar 628c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 629874d93d1SAlexander van Heukelum sig = 0; 6302d4a7167SIngo Molnar 631874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 632c61e211dSHarvey Harrison } 633c61e211dSHarvey Harrison 634*e49d3cbeSAndy Lutomirski static void set_signal_archinfo(unsigned long address, 635*e49d3cbeSAndy Lutomirski unsigned long error_code) 636*e49d3cbeSAndy Lutomirski { 637*e49d3cbeSAndy Lutomirski struct task_struct *tsk = current; 638*e49d3cbeSAndy Lutomirski 639*e49d3cbeSAndy Lutomirski /* 640*e49d3cbeSAndy Lutomirski * To avoid leaking information about the kernel page 641*e49d3cbeSAndy Lutomirski * table layout, pretend that user-mode accesses to 642*e49d3cbeSAndy Lutomirski * kernel addresses are always protection faults. 643*e49d3cbeSAndy Lutomirski */ 644*e49d3cbeSAndy Lutomirski if (address >= TASK_SIZE_MAX) 645*e49d3cbeSAndy Lutomirski error_code |= X86_PF_PROT; 646*e49d3cbeSAndy Lutomirski 647*e49d3cbeSAndy Lutomirski tsk->thread.trap_nr = X86_TRAP_PF; 648*e49d3cbeSAndy Lutomirski tsk->thread.error_code = error_code | X86_PF_USER; 649*e49d3cbeSAndy Lutomirski tsk->thread.cr2 = address; 650*e49d3cbeSAndy Lutomirski } 651*e49d3cbeSAndy Lutomirski 6522d4a7167SIngo Molnar static noinline void 6532d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 6544fc34901SAndy Lutomirski unsigned long address, int signal, int si_code) 65592181f19SNick Piggin { 65692181f19SNick Piggin struct task_struct *tsk = current; 65792181f19SNick Piggin unsigned long flags; 65892181f19SNick Piggin int sig; 65992181f19SNick Piggin 66092181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 66181fd9c18SJann Horn if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) { 662c026b359SPeter Zijlstra /* 663c026b359SPeter Zijlstra * Any interrupt that takes a fault gets the fixup. This makes 664c026b359SPeter Zijlstra * the below recursive fault logic only apply to a faults from 665c026b359SPeter Zijlstra * task context. 666c026b359SPeter Zijlstra */ 667c026b359SPeter Zijlstra if (in_interrupt()) 668c026b359SPeter Zijlstra return; 669c026b359SPeter Zijlstra 670c026b359SPeter Zijlstra /* 671c026b359SPeter Zijlstra * Per the above we're !in_interrupt(), aka. task context. 672c026b359SPeter Zijlstra * 673c026b359SPeter Zijlstra * In this case we need to make sure we're not recursively 674c026b359SPeter Zijlstra * faulting through the emulate_vsyscall() logic. 675c026b359SPeter Zijlstra */ 6762a53ccbcSIngo Molnar if (current->thread.sig_on_uaccess_err && signal) { 677*e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 6784fc34901SAndy Lutomirski 6794fc34901SAndy Lutomirski /* XXX: hwpoison faults will set the wrong code. */ 680b4fd52f2SEric W. Biederman force_sig_fault(signal, si_code, (void __user *)address, 681b4fd52f2SEric W. Biederman tsk); 6824fc34901SAndy Lutomirski } 683c026b359SPeter Zijlstra 684c026b359SPeter Zijlstra /* 685c026b359SPeter Zijlstra * Barring that, we can do the fixup and be happy. 686c026b359SPeter Zijlstra */ 68792181f19SNick Piggin return; 6884fc34901SAndy Lutomirski } 68992181f19SNick Piggin 6906271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK 6916271cfdfSAndy Lutomirski /* 6926271cfdfSAndy Lutomirski * Stack overflow? During boot, we can fault near the initial 6936271cfdfSAndy Lutomirski * stack in the direct map, but that's not an overflow -- check 6946271cfdfSAndy Lutomirski * that we're in vmalloc space to avoid this. 6956271cfdfSAndy Lutomirski */ 6966271cfdfSAndy Lutomirski if (is_vmalloc_addr((void *)address) && 6976271cfdfSAndy Lutomirski (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || 6986271cfdfSAndy Lutomirski address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { 6996271cfdfSAndy Lutomirski unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); 7006271cfdfSAndy Lutomirski /* 7016271cfdfSAndy Lutomirski * We're likely to be running with very little stack space 7026271cfdfSAndy Lutomirski * left. It's plausible that we'd hit this condition but 7036271cfdfSAndy Lutomirski * double-fault even before we get this far, in which case 7046271cfdfSAndy Lutomirski * we're fine: the double-fault handler will deal with it. 7056271cfdfSAndy Lutomirski * 7066271cfdfSAndy Lutomirski * We don't want to make it all the way into the oops code 7076271cfdfSAndy Lutomirski * and then double-fault, though, because we're likely to 7086271cfdfSAndy Lutomirski * break the console driver and lose most of the stack dump. 7096271cfdfSAndy Lutomirski */ 7106271cfdfSAndy Lutomirski asm volatile ("movq %[stack], %%rsp\n\t" 7116271cfdfSAndy Lutomirski "call handle_stack_overflow\n\t" 7126271cfdfSAndy Lutomirski "1: jmp 1b" 713f5caf621SJosh Poimboeuf : ASM_CALL_CONSTRAINT 7146271cfdfSAndy Lutomirski : "D" ("kernel stack overflow (page fault)"), 7156271cfdfSAndy Lutomirski "S" (regs), "d" (address), 7166271cfdfSAndy Lutomirski [stack] "rm" (stack)); 7176271cfdfSAndy Lutomirski unreachable(); 7186271cfdfSAndy Lutomirski } 7196271cfdfSAndy Lutomirski #endif 7206271cfdfSAndy Lutomirski 72192181f19SNick Piggin /* 7222d4a7167SIngo Molnar * 32-bit: 7232d4a7167SIngo Molnar * 72492181f19SNick Piggin * Valid to do another page fault here, because if this fault 72592181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 72692181f19SNick Piggin * handled it. 72792181f19SNick Piggin * 7282d4a7167SIngo Molnar * 64-bit: 7292d4a7167SIngo Molnar * 73092181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 73192181f19SNick Piggin */ 73292181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 73392181f19SNick Piggin return; 73492181f19SNick Piggin 73592181f19SNick Piggin if (is_errata93(regs, address)) 73692181f19SNick Piggin return; 73792181f19SNick Piggin 73892181f19SNick Piggin /* 7393425d934SSai Praneeth * Buggy firmware could access regions which might page fault, try to 7403425d934SSai Praneeth * recover from such faults. 7413425d934SSai Praneeth */ 7423425d934SSai Praneeth if (IS_ENABLED(CONFIG_EFI)) 7433425d934SSai Praneeth efi_recover_from_page_fault(address); 7443425d934SSai Praneeth 7453425d934SSai Praneeth /* 74692181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 7472d4a7167SIngo Molnar * terminate things with extreme prejudice: 74892181f19SNick Piggin */ 74992181f19SNick Piggin flags = oops_begin(); 75092181f19SNick Piggin 75192181f19SNick Piggin show_fault_oops(regs, error_code, address); 75292181f19SNick Piggin 753a70857e4SAaron Tomlin if (task_stack_end_corrupted(tsk)) 754b0f4c4b3SPrarit Bhargava printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 75519803078SIngo Molnar 75692181f19SNick Piggin tsk->thread.cr2 = address; 75751e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 75892181f19SNick Piggin tsk->thread.error_code = error_code; 75992181f19SNick Piggin 76092181f19SNick Piggin sig = SIGKILL; 76192181f19SNick Piggin if (__die("Oops", regs, error_code)) 76292181f19SNick Piggin sig = 0; 7632d4a7167SIngo Molnar 76492181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 765b0f4c4b3SPrarit Bhargava printk(KERN_DEFAULT "CR2: %016lx\n", address); 7662d4a7167SIngo Molnar 76792181f19SNick Piggin oops_end(flags, regs, sig); 76892181f19SNick Piggin } 76992181f19SNick Piggin 7702d4a7167SIngo Molnar /* 7712d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 7722d4a7167SIngo Molnar * sysctl is set: 7732d4a7167SIngo Molnar */ 7742d4a7167SIngo Molnar static inline void 7752d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 7762d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 7772d4a7167SIngo Molnar { 778ba54d856SBorislav Petkov const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG; 779ba54d856SBorislav Petkov 7802d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 7812d4a7167SIngo Molnar return; 7822d4a7167SIngo Molnar 7832d4a7167SIngo Molnar if (!printk_ratelimit()) 7842d4a7167SIngo Molnar return; 7852d4a7167SIngo Molnar 78610a7e9d8SKees Cook printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx", 787ba54d856SBorislav Petkov loglvl, tsk->comm, task_pid_nr(tsk), address, 7882d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 7892d4a7167SIngo Molnar 7902d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 7912d4a7167SIngo Molnar 7922d4a7167SIngo Molnar printk(KERN_CONT "\n"); 793ba54d856SBorislav Petkov 794342db04aSJann Horn show_opcodes(regs, loglvl); 7952d4a7167SIngo Molnar } 7962d4a7167SIngo Molnar 79702e983b7SDave Hansen /* 79802e983b7SDave Hansen * The (legacy) vsyscall page is the long page in the kernel portion 79902e983b7SDave Hansen * of the address space that has user-accessible permissions. 80002e983b7SDave Hansen */ 80102e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr) 80202e983b7SDave Hansen { 8033ae0ad92SDave Hansen return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); 80402e983b7SDave Hansen } 80502e983b7SDave Hansen 8062d4a7167SIngo Molnar static void 8072d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 808419ceeb1SEric W. Biederman unsigned long address, u32 pkey, int si_code) 80992181f19SNick Piggin { 81092181f19SNick Piggin struct task_struct *tsk = current; 81192181f19SNick Piggin 81292181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 8136ea59b07SAndy Lutomirski if (user_mode(regs) && (error_code & X86_PF_USER)) { 81492181f19SNick Piggin /* 8152d4a7167SIngo Molnar * It's possible to have interrupts off here: 81692181f19SNick Piggin */ 81792181f19SNick Piggin local_irq_enable(); 81892181f19SNick Piggin 81992181f19SNick Piggin /* 82092181f19SNick Piggin * Valid to do another page fault here because this one came 8212d4a7167SIngo Molnar * from user space: 82292181f19SNick Piggin */ 82392181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 82492181f19SNick Piggin return; 82592181f19SNick Piggin 82692181f19SNick Piggin if (is_errata100(regs, address)) 82792181f19SNick Piggin return; 82892181f19SNick Piggin 829dc4fac84SAndy Lutomirski /* 830dc4fac84SAndy Lutomirski * To avoid leaking information about the kernel page table 831dc4fac84SAndy Lutomirski * layout, pretend that user-mode accesses to kernel addresses 832dc4fac84SAndy Lutomirski * are always protection faults. 833dc4fac84SAndy Lutomirski */ 834dc4fac84SAndy Lutomirski if (address >= TASK_SIZE_MAX) 8351067f030SRicardo Neri error_code |= X86_PF_PROT; 8363ae36655SAndy Lutomirski 837e575a86fSKees Cook if (likely(show_unhandled_signals)) 8382d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 83992181f19SNick Piggin 840*e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 8412d4a7167SIngo Molnar 8429db812dbSEric W. Biederman if (si_code == SEGV_PKUERR) 843419ceeb1SEric W. Biederman force_sig_pkuerr((void __user *)address, pkey); 8449db812dbSEric W. Biederman 845b4fd52f2SEric W. Biederman force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); 8462d4a7167SIngo Molnar 84792181f19SNick Piggin return; 84892181f19SNick Piggin } 84992181f19SNick Piggin 85092181f19SNick Piggin if (is_f00f_bug(regs, address)) 85192181f19SNick Piggin return; 85292181f19SNick Piggin 8534fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGSEGV, si_code); 85492181f19SNick Piggin } 85592181f19SNick Piggin 8562d4a7167SIngo Molnar static noinline void 8572d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 858768fd9c6SEric W. Biederman unsigned long address) 85992181f19SNick Piggin { 860419ceeb1SEric W. Biederman __bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR); 86192181f19SNick Piggin } 86292181f19SNick Piggin 8632d4a7167SIngo Molnar static void 8642d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 865419ceeb1SEric W. Biederman unsigned long address, u32 pkey, int si_code) 86692181f19SNick Piggin { 86792181f19SNick Piggin struct mm_struct *mm = current->mm; 86892181f19SNick Piggin /* 86992181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 87092181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 87192181f19SNick Piggin */ 87292181f19SNick Piggin up_read(&mm->mmap_sem); 87392181f19SNick Piggin 874aba1ecd3SEric W. Biederman __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); 87592181f19SNick Piggin } 87692181f19SNick Piggin 8772d4a7167SIngo Molnar static noinline void 8782d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 87992181f19SNick Piggin { 880419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, 0, SEGV_MAPERR); 88192181f19SNick Piggin } 88292181f19SNick Piggin 88333a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code, 88433a709b2SDave Hansen struct vm_area_struct *vma) 88533a709b2SDave Hansen { 88607f146f5SDave Hansen /* This code is always called on the current mm */ 88707f146f5SDave Hansen bool foreign = false; 88807f146f5SDave Hansen 88933a709b2SDave Hansen if (!boot_cpu_has(X86_FEATURE_OSPKE)) 89033a709b2SDave Hansen return false; 8911067f030SRicardo Neri if (error_code & X86_PF_PK) 89233a709b2SDave Hansen return true; 89307f146f5SDave Hansen /* this checks permission keys on the VMA: */ 8941067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 8951067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 89607f146f5SDave Hansen return true; 89733a709b2SDave Hansen return false; 89892181f19SNick Piggin } 89992181f19SNick Piggin 9002d4a7167SIngo Molnar static noinline void 9012d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 9027b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma) 90392181f19SNick Piggin { 904019132ffSDave Hansen /* 905019132ffSDave Hansen * This OSPKE check is not strictly necessary at runtime. 906019132ffSDave Hansen * But, doing it this way allows compiler optimizations 907019132ffSDave Hansen * if pkeys are compiled out. 908019132ffSDave Hansen */ 909aba1ecd3SEric W. Biederman if (bad_area_access_from_pkeys(error_code, vma)) { 9109db812dbSEric W. Biederman /* 9119db812dbSEric W. Biederman * A protection key fault means that the PKRU value did not allow 9129db812dbSEric W. Biederman * access to some PTE. Userspace can figure out what PKRU was 9139db812dbSEric W. Biederman * from the XSAVE state. This function captures the pkey from 9149db812dbSEric W. Biederman * the vma and passes it to userspace so userspace can discover 9159db812dbSEric W. Biederman * which protection key was set on the PTE. 9169db812dbSEric W. Biederman * 9179db812dbSEric W. Biederman * If we get here, we know that the hardware signaled a X86_PF_PK 9189db812dbSEric W. Biederman * fault and that there was a VMA once we got in the fault 9199db812dbSEric W. Biederman * handler. It does *not* guarantee that the VMA we find here 9209db812dbSEric W. Biederman * was the one that we faulted on. 9219db812dbSEric W. Biederman * 9229db812dbSEric W. Biederman * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); 9239db812dbSEric W. Biederman * 2. T1 : set PKRU to deny access to pkey=4, touches page 9249db812dbSEric W. Biederman * 3. T1 : faults... 9259db812dbSEric W. Biederman * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); 9269db812dbSEric W. Biederman * 5. T1 : enters fault handler, takes mmap_sem, etc... 9279db812dbSEric W. Biederman * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 9289db812dbSEric W. Biederman * faulted on a pte with its pkey=4. 9299db812dbSEric W. Biederman */ 930aba1ecd3SEric W. Biederman u32 pkey = vma_pkey(vma); 9319db812dbSEric W. Biederman 932419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, pkey, SEGV_PKUERR); 933aba1ecd3SEric W. Biederman } else { 934419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, 0, SEGV_ACCERR); 935aba1ecd3SEric W. Biederman } 93692181f19SNick Piggin } 93792181f19SNick Piggin 9382d4a7167SIngo Molnar static void 939a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 94027274f73SEric W. Biederman unsigned int fault) 94192181f19SNick Piggin { 94292181f19SNick Piggin struct task_struct *tsk = current; 94392181f19SNick Piggin 9442d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 9451067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 9464fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 94796054569SLinus Torvalds return; 94896054569SLinus Torvalds } 9492d4a7167SIngo Molnar 950cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 95192181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 95292181f19SNick Piggin return; 9532d4a7167SIngo Molnar 954*e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 9552d4a7167SIngo Molnar 956a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 957f672b49bSAndi Kleen if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 95840e55394SEric W. Biederman unsigned lsb = 0; 95940e55394SEric W. Biederman 96040e55394SEric W. Biederman pr_err( 961a6e04aa9SAndi Kleen "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 962a6e04aa9SAndi Kleen tsk->comm, tsk->pid, address); 96340e55394SEric W. Biederman if (fault & VM_FAULT_HWPOISON_LARGE) 96440e55394SEric W. Biederman lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 96540e55394SEric W. Biederman if (fault & VM_FAULT_HWPOISON) 96640e55394SEric W. Biederman lsb = PAGE_SHIFT; 96740e55394SEric W. Biederman force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, tsk); 96840e55394SEric W. Biederman return; 969a6e04aa9SAndi Kleen } 970a6e04aa9SAndi Kleen #endif 971b4fd52f2SEric W. Biederman force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); 97292181f19SNick Piggin } 97392181f19SNick Piggin 9743a13c4d7SJohannes Weiner static noinline void 9752d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 97625c102d8SEric W. Biederman unsigned long address, vm_fault_t fault) 97792181f19SNick Piggin { 9781067f030SRicardo Neri if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { 9794fc34901SAndy Lutomirski no_context(regs, error_code, address, 0, 0); 9803a13c4d7SJohannes Weiner return; 981b80ef10eSKOSAKI Motohiro } 982b80ef10eSKOSAKI Motohiro 9832d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 984f8626854SAndrey Vagin /* Kernel mode? Handle exceptions or die: */ 9851067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 9864fc34901SAndy Lutomirski no_context(regs, error_code, address, 9874fc34901SAndy Lutomirski SIGSEGV, SEGV_MAPERR); 9883a13c4d7SJohannes Weiner return; 989f8626854SAndrey Vagin } 990f8626854SAndrey Vagin 991c2d23f91SDavid Rientjes /* 992c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 993c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 994c2d23f91SDavid Rientjes * oom-killed): 995c2d23f91SDavid Rientjes */ 996c2d23f91SDavid Rientjes pagefault_out_of_memory(); 9972d4a7167SIngo Molnar } else { 998f672b49bSAndi Kleen if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 999f672b49bSAndi Kleen VM_FAULT_HWPOISON_LARGE)) 100027274f73SEric W. Biederman do_sigbus(regs, error_code, address, fault); 100133692f27SLinus Torvalds else if (fault & VM_FAULT_SIGSEGV) 1002768fd9c6SEric W. Biederman bad_area_nosemaphore(regs, error_code, address); 100392181f19SNick Piggin else 100492181f19SNick Piggin BUG(); 100592181f19SNick Piggin } 10062d4a7167SIngo Molnar } 100792181f19SNick Piggin 10088fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte) 1009d8b57bb7SThomas Gleixner { 10101067f030SRicardo Neri if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) 1011d8b57bb7SThomas Gleixner return 0; 10122d4a7167SIngo Molnar 10131067f030SRicardo Neri if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) 1014d8b57bb7SThomas Gleixner return 0; 1015d8b57bb7SThomas Gleixner 1016d8b57bb7SThomas Gleixner return 1; 1017d8b57bb7SThomas Gleixner } 1018d8b57bb7SThomas Gleixner 1019c61e211dSHarvey Harrison /* 10202d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 10212d4a7167SIngo Molnar * 10222d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 10232d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 10242d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 10252d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 10262d4a7167SIngo Molnar * on other processors. 10272d4a7167SIngo Molnar * 102831668511SDavid Vrabel * Spurious faults may only occur if the TLB contains an entry with 102931668511SDavid Vrabel * fewer permission than the page table entry. Non-present (P = 0) 103031668511SDavid Vrabel * and reserved bit (R = 1) faults are never spurious. 103131668511SDavid Vrabel * 10325b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 10335b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 103431668511SDavid Vrabel * 103531668511SDavid Vrabel * Returns non-zero if a spurious fault was handled, zero otherwise. 103631668511SDavid Vrabel * 103731668511SDavid Vrabel * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 103831668511SDavid Vrabel * (Optional Invalidation). 10395b727a3bSJeremy Fitzhardinge */ 10409326638cSMasami Hiramatsu static noinline int 10418fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address) 10425b727a3bSJeremy Fitzhardinge { 10435b727a3bSJeremy Fitzhardinge pgd_t *pgd; 1044e0c4f675SKirill A. Shutemov p4d_t *p4d; 10455b727a3bSJeremy Fitzhardinge pud_t *pud; 10465b727a3bSJeremy Fitzhardinge pmd_t *pmd; 10475b727a3bSJeremy Fitzhardinge pte_t *pte; 10483c3e5694SSteven Rostedt int ret; 10495b727a3bSJeremy Fitzhardinge 105031668511SDavid Vrabel /* 105131668511SDavid Vrabel * Only writes to RO or instruction fetches from NX may cause 105231668511SDavid Vrabel * spurious faults. 105331668511SDavid Vrabel * 105431668511SDavid Vrabel * These could be from user or supervisor accesses but the TLB 105531668511SDavid Vrabel * is only lazily flushed after a kernel mapping protection 105631668511SDavid Vrabel * change, so user accesses are not expected to cause spurious 105731668511SDavid Vrabel * faults. 105831668511SDavid Vrabel */ 10591067f030SRicardo Neri if (error_code != (X86_PF_WRITE | X86_PF_PROT) && 10601067f030SRicardo Neri error_code != (X86_PF_INSTR | X86_PF_PROT)) 10615b727a3bSJeremy Fitzhardinge return 0; 10625b727a3bSJeremy Fitzhardinge 10635b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 10645b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 10655b727a3bSJeremy Fitzhardinge return 0; 10665b727a3bSJeremy Fitzhardinge 1067e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 1068e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d)) 1069e0c4f675SKirill A. Shutemov return 0; 1070e0c4f675SKirill A. Shutemov 1071e0c4f675SKirill A. Shutemov if (p4d_large(*p4d)) 10728fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) p4d); 1073e0c4f675SKirill A. Shutemov 1074e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 10755b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 10765b727a3bSJeremy Fitzhardinge return 0; 10775b727a3bSJeremy Fitzhardinge 1078d8b57bb7SThomas Gleixner if (pud_large(*pud)) 10798fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) pud); 1080d8b57bb7SThomas Gleixner 10815b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 10825b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 10835b727a3bSJeremy Fitzhardinge return 0; 10845b727a3bSJeremy Fitzhardinge 1085d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 10868fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) pmd); 1087d8b57bb7SThomas Gleixner 10885b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 1089954f8571SAndrea Arcangeli if (!pte_present(*pte)) 10905b727a3bSJeremy Fitzhardinge return 0; 10915b727a3bSJeremy Fitzhardinge 10928fed6200SDave Hansen ret = spurious_kernel_fault_check(error_code, pte); 10933c3e5694SSteven Rostedt if (!ret) 10943c3e5694SSteven Rostedt return 0; 10953c3e5694SSteven Rostedt 10963c3e5694SSteven Rostedt /* 10972d4a7167SIngo Molnar * Make sure we have permissions in PMD. 10982d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 10993c3e5694SSteven Rostedt */ 11008fed6200SDave Hansen ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd); 11013c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 11022d4a7167SIngo Molnar 11033c3e5694SSteven Rostedt return ret; 11045b727a3bSJeremy Fitzhardinge } 11058fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault); 11065b727a3bSJeremy Fitzhardinge 1107c61e211dSHarvey Harrison int show_unhandled_signals = 1; 1108c61e211dSHarvey Harrison 11092d4a7167SIngo Molnar static inline int 111068da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma) 111192181f19SNick Piggin { 111207f146f5SDave Hansen /* This is only called for the current mm, so: */ 111307f146f5SDave Hansen bool foreign = false; 1114e8c6226dSDave Hansen 1115e8c6226dSDave Hansen /* 1116e8c6226dSDave Hansen * Read or write was blocked by protection keys. This is 1117e8c6226dSDave Hansen * always an unconditional error and can never result in 1118e8c6226dSDave Hansen * a follow-up action to resolve the fault, like a COW. 1119e8c6226dSDave Hansen */ 11201067f030SRicardo Neri if (error_code & X86_PF_PK) 1121e8c6226dSDave Hansen return 1; 1122e8c6226dSDave Hansen 112333a709b2SDave Hansen /* 112407f146f5SDave Hansen * Make sure to check the VMA so that we do not perform 11251067f030SRicardo Neri * faults just to hit a X86_PF_PK as soon as we fill in a 112607f146f5SDave Hansen * page. 112707f146f5SDave Hansen */ 11281067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 11291067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 113007f146f5SDave Hansen return 1; 113133a709b2SDave Hansen 11321067f030SRicardo Neri if (error_code & X86_PF_WRITE) { 11332d4a7167SIngo Molnar /* write, present and write, not present: */ 113492181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 113592181f19SNick Piggin return 1; 11362d4a7167SIngo Molnar return 0; 11372d4a7167SIngo Molnar } 11382d4a7167SIngo Molnar 11392d4a7167SIngo Molnar /* read, present: */ 11401067f030SRicardo Neri if (unlikely(error_code & X86_PF_PROT)) 114192181f19SNick Piggin return 1; 11422d4a7167SIngo Molnar 11432d4a7167SIngo Molnar /* read, not present: */ 114492181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 114592181f19SNick Piggin return 1; 114692181f19SNick Piggin 114792181f19SNick Piggin return 0; 114892181f19SNick Piggin } 114992181f19SNick Piggin 11500973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 11510973a06cSHiroshi Shimamoto { 11523ae0ad92SDave Hansen /* 11533ae0ad92SDave Hansen * On 64-bit systems, the vsyscall page is at an address above 11543ae0ad92SDave Hansen * TASK_SIZE_MAX, but is not considered part of the kernel 11553ae0ad92SDave Hansen * address space. 11563ae0ad92SDave Hansen */ 11573ae0ad92SDave Hansen if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address)) 11583ae0ad92SDave Hansen return false; 11593ae0ad92SDave Hansen 1160d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 11610973a06cSHiroshi Shimamoto } 11620973a06cSHiroshi Shimamoto 1163c61e211dSHarvey Harrison /* 11648fed6200SDave Hansen * Called for all faults where 'address' is part of the kernel address 11658fed6200SDave Hansen * space. Might get called for faults that originate from *code* that 11668fed6200SDave Hansen * ran in userspace or the kernel. 1167c61e211dSHarvey Harrison */ 11688fed6200SDave Hansen static void 11698fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, 11700ac09f9fSJiri Olsa unsigned long address) 1171c61e211dSHarvey Harrison { 11728fed6200SDave Hansen /* 1173367e3f1dSDave Hansen * Protection keys exceptions only happen on user pages. We 1174367e3f1dSDave Hansen * have no user pages in the kernel portion of the address 1175367e3f1dSDave Hansen * space, so do not expect them here. 1176367e3f1dSDave Hansen */ 1177367e3f1dSDave Hansen WARN_ON_ONCE(hw_error_code & X86_PF_PK); 1178367e3f1dSDave Hansen 1179367e3f1dSDave Hansen /* 11808fed6200SDave Hansen * We can fault-in kernel-space virtual memory on-demand. The 11818fed6200SDave Hansen * 'reference' page table is init_mm.pgd. 11828fed6200SDave Hansen * 11838fed6200SDave Hansen * NOTE! We MUST NOT take any locks for this case. We may 11848fed6200SDave Hansen * be in an interrupt or a critical region, and should 11858fed6200SDave Hansen * only copy the information from the master page table, 11868fed6200SDave Hansen * nothing more. 11878fed6200SDave Hansen * 11888fed6200SDave Hansen * Before doing this on-demand faulting, ensure that the 11898fed6200SDave Hansen * fault is not any of the following: 11908fed6200SDave Hansen * 1. A fault on a PTE with a reserved bit set. 11918fed6200SDave Hansen * 2. A fault caused by a user-mode access. (Do not demand- 11928fed6200SDave Hansen * fault kernel memory due to user-mode accesses). 11938fed6200SDave Hansen * 3. A fault caused by a page-level protection violation. 11948fed6200SDave Hansen * (A demand fault would be on a non-present page which 11958fed6200SDave Hansen * would have X86_PF_PROT==0). 11968fed6200SDave Hansen */ 11978fed6200SDave Hansen if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { 11988fed6200SDave Hansen if (vmalloc_fault(address) >= 0) 11998fed6200SDave Hansen return; 12008fed6200SDave Hansen } 12018fed6200SDave Hansen 12028fed6200SDave Hansen /* Was the fault spurious, caused by lazy TLB invalidation? */ 12038fed6200SDave Hansen if (spurious_kernel_fault(hw_error_code, address)) 12048fed6200SDave Hansen return; 12058fed6200SDave Hansen 12068fed6200SDave Hansen /* kprobes don't want to hook the spurious faults: */ 12078fed6200SDave Hansen if (kprobes_fault(regs)) 12088fed6200SDave Hansen return; 12098fed6200SDave Hansen 12108fed6200SDave Hansen /* 12118fed6200SDave Hansen * Note, despite being a "bad area", there are quite a few 12128fed6200SDave Hansen * acceptable reasons to get here, such as erratum fixups 12138fed6200SDave Hansen * and handling kernel code that can fault, like get_user(). 12148fed6200SDave Hansen * 12158fed6200SDave Hansen * Don't take the mm semaphore here. If we fixup a prefetch 12168fed6200SDave Hansen * fault we could otherwise deadlock: 12178fed6200SDave Hansen */ 1218ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 12198fed6200SDave Hansen } 12208fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault); 12218fed6200SDave Hansen 1222aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */ 1223aa37c51bSDave Hansen static inline 1224aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs, 1225aa37c51bSDave Hansen unsigned long hw_error_code, 1226c61e211dSHarvey Harrison unsigned long address) 1227c61e211dSHarvey Harrison { 1228164477c2SDave Hansen unsigned long sw_error_code; 1229c61e211dSHarvey Harrison struct vm_area_struct *vma; 12302d4a7167SIngo Molnar struct task_struct *tsk; 12312d4a7167SIngo Molnar struct mm_struct *mm; 123250a7ca3cSSouptick Joarder vm_fault_t fault, major = 0; 1233759496baSJohannes Weiner unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1234c61e211dSHarvey Harrison 1235c61e211dSHarvey Harrison tsk = current; 1236c61e211dSHarvey Harrison mm = tsk->mm; 12372d4a7167SIngo Molnar 12382d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1239e00b12e6SPeter Zijlstra if (unlikely(kprobes_fault(regs))) 12409be260a6SMasami Hiramatsu return; 1241e00b12e6SPeter Zijlstra 12425b0c2cacSDave Hansen /* 12435b0c2cacSDave Hansen * Reserved bits are never expected to be set on 12445b0c2cacSDave Hansen * entries in the user portion of the page tables. 12455b0c2cacSDave Hansen */ 1246164477c2SDave Hansen if (unlikely(hw_error_code & X86_PF_RSVD)) 1247164477c2SDave Hansen pgtable_bad(regs, hw_error_code, address); 1248e00b12e6SPeter Zijlstra 12495b0c2cacSDave Hansen /* 1250e50928d7SAndy Lutomirski * If SMAP is on, check for invalid kernel (supervisor) access to user 1251e50928d7SAndy Lutomirski * pages in the user address space. The odd case here is WRUSS, 1252e50928d7SAndy Lutomirski * which, according to the preliminary documentation, does not respect 1253e50928d7SAndy Lutomirski * SMAP and will have the USER bit set so, in all cases, SMAP 1254e50928d7SAndy Lutomirski * enforcement appears to be consistent with the USER bit. 12555b0c2cacSDave Hansen */ 1256a15781b5SAndy Lutomirski if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) && 1257a15781b5SAndy Lutomirski !(hw_error_code & X86_PF_USER) && 1258e50928d7SAndy Lutomirski !(regs->flags & X86_EFLAGS_AC))) 1259a15781b5SAndy Lutomirski { 1260ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 1261e00b12e6SPeter Zijlstra return; 1262e00b12e6SPeter Zijlstra } 1263e00b12e6SPeter Zijlstra 1264e00b12e6SPeter Zijlstra /* 1265e00b12e6SPeter Zijlstra * If we're in an interrupt, have no user context or are running 126670ffdb93SDavid Hildenbrand * in a region with pagefaults disabled then we must not take the fault 1267e00b12e6SPeter Zijlstra */ 126870ffdb93SDavid Hildenbrand if (unlikely(faulthandler_disabled() || !mm)) { 1269ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 1270e00b12e6SPeter Zijlstra return; 1271e00b12e6SPeter Zijlstra } 1272e00b12e6SPeter Zijlstra 1273c61e211dSHarvey Harrison /* 1274164477c2SDave Hansen * hw_error_code is literally the "page fault error code" passed to 1275164477c2SDave Hansen * the kernel directly from the hardware. But, we will shortly be 1276164477c2SDave Hansen * modifying it in software, so give it a new name. 1277164477c2SDave Hansen */ 1278164477c2SDave Hansen sw_error_code = hw_error_code; 1279164477c2SDave Hansen 1280164477c2SDave Hansen /* 1281891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1282891cffbdSLinus Torvalds * vmalloc fault has been handled. 1283891cffbdSLinus Torvalds * 1284891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 12852d4a7167SIngo Molnar * potential system fault or CPU buglet: 1286c61e211dSHarvey Harrison */ 1287f39b6f0eSAndy Lutomirski if (user_mode(regs)) { 1288891cffbdSLinus Torvalds local_irq_enable(); 1289164477c2SDave Hansen /* 1290164477c2SDave Hansen * Up to this point, X86_PF_USER set in hw_error_code 1291164477c2SDave Hansen * indicated a user-mode access. But, after this, 1292164477c2SDave Hansen * X86_PF_USER in sw_error_code will indicate either 1293164477c2SDave Hansen * that, *or* an implicit kernel(supervisor)-mode access 1294164477c2SDave Hansen * which originated from user mode. 1295164477c2SDave Hansen */ 1296164477c2SDave Hansen if (!(hw_error_code & X86_PF_USER)) { 1297164477c2SDave Hansen /* 1298164477c2SDave Hansen * The CPU was in user mode, but the CPU says 1299164477c2SDave Hansen * the fault was not a user-mode access. 1300164477c2SDave Hansen * Must be an implicit kernel-mode access, 1301164477c2SDave Hansen * which we do not expect to happen in the 1302164477c2SDave Hansen * user address space. 1303164477c2SDave Hansen */ 1304164477c2SDave Hansen pr_warn_once("kernel-mode error from user-mode: %lx\n", 1305164477c2SDave Hansen hw_error_code); 1306164477c2SDave Hansen 1307164477c2SDave Hansen sw_error_code |= X86_PF_USER; 1308164477c2SDave Hansen } 1309759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 13102d4a7167SIngo Molnar } else { 13112d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1312c61e211dSHarvey Harrison local_irq_enable(); 13132d4a7167SIngo Molnar } 1314c61e211dSHarvey Harrison 1315a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 13167dd1fcc2SPeter Zijlstra 1317164477c2SDave Hansen if (sw_error_code & X86_PF_WRITE) 1318759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 1319164477c2SDave Hansen if (sw_error_code & X86_PF_INSTR) 1320d61172b4SDave Hansen flags |= FAULT_FLAG_INSTRUCTION; 1321759496baSJohannes Weiner 13223ae0ad92SDave Hansen #ifdef CONFIG_X86_64 13233a1dfe6eSIngo Molnar /* 13243ae0ad92SDave Hansen * Instruction fetch faults in the vsyscall page might need 13253ae0ad92SDave Hansen * emulation. The vsyscall page is at a high address 13263ae0ad92SDave Hansen * (>PAGE_OFFSET), but is considered to be part of the user 13273ae0ad92SDave Hansen * address space. 1328c61e211dSHarvey Harrison * 13293ae0ad92SDave Hansen * The vsyscall page does not have a "real" VMA, so do this 13303ae0ad92SDave Hansen * emulation before we go searching for VMAs. 13313ae0ad92SDave Hansen */ 13323ae0ad92SDave Hansen if ((sw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) { 13333ae0ad92SDave Hansen if (emulate_vsyscall(regs, address)) 13343ae0ad92SDave Hansen return; 13353ae0ad92SDave Hansen } 13363ae0ad92SDave Hansen #endif 13373ae0ad92SDave Hansen 1338c61e211dSHarvey Harrison /* 133988259744SDave Hansen * Kernel-mode access to the user address space should only occur 134088259744SDave Hansen * on well-defined single instructions listed in the exception 134188259744SDave Hansen * tables. But, an erroneous kernel fault occurring outside one of 134288259744SDave Hansen * those areas which also holds mmap_sem might deadlock attempting 134388259744SDave Hansen * to validate the fault against the address space. 1344c61e211dSHarvey Harrison * 134588259744SDave Hansen * Only do the expensive exception table search when we might be at 134688259744SDave Hansen * risk of a deadlock. This happens if we 134788259744SDave Hansen * 1. Failed to acquire mmap_sem, and 13486344be60SAndy Lutomirski * 2. The access did not originate in userspace. 1349c61e211dSHarvey Harrison */ 135092181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 13516344be60SAndy Lutomirski if (!user_mode(regs) && !search_exception_tables(regs->ip)) { 135288259744SDave Hansen /* 135388259744SDave Hansen * Fault from code in kernel from 135488259744SDave Hansen * which we do not expect faults. 135588259744SDave Hansen */ 1356ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, sw_error_code, address); 135792181f19SNick Piggin return; 135892181f19SNick Piggin } 1359d065bd81SMichel Lespinasse retry: 1360c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 136101006074SPeter Zijlstra } else { 136201006074SPeter Zijlstra /* 13632d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 13642d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 13652d4a7167SIngo Molnar * down_read(): 136601006074SPeter Zijlstra */ 136701006074SPeter Zijlstra might_sleep(); 1368c61e211dSHarvey Harrison } 1369c61e211dSHarvey Harrison 1370c61e211dSHarvey Harrison vma = find_vma(mm, address); 137192181f19SNick Piggin if (unlikely(!vma)) { 1372164477c2SDave Hansen bad_area(regs, sw_error_code, address); 137392181f19SNick Piggin return; 137492181f19SNick Piggin } 137592181f19SNick Piggin if (likely(vma->vm_start <= address)) 1376c61e211dSHarvey Harrison goto good_area; 137792181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 1378164477c2SDave Hansen bad_area(regs, sw_error_code, address); 137992181f19SNick Piggin return; 138092181f19SNick Piggin } 138192181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 1382164477c2SDave Hansen bad_area(regs, sw_error_code, address); 138392181f19SNick Piggin return; 138492181f19SNick Piggin } 138592181f19SNick Piggin 1386c61e211dSHarvey Harrison /* 1387c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1388c61e211dSHarvey Harrison * we can handle it.. 1389c61e211dSHarvey Harrison */ 1390c61e211dSHarvey Harrison good_area: 1391164477c2SDave Hansen if (unlikely(access_error(sw_error_code, vma))) { 1392164477c2SDave Hansen bad_area_access_error(regs, sw_error_code, address, vma); 139392181f19SNick Piggin return; 1394c61e211dSHarvey Harrison } 1395c61e211dSHarvey Harrison 1396c61e211dSHarvey Harrison /* 1397c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1398c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 13999a95f3cfSPaul Cassella * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 14009a95f3cfSPaul Cassella * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1401cb0631fdSVlastimil Babka * 1402cb0631fdSVlastimil Babka * Note that handle_userfault() may also release and reacquire mmap_sem 1403cb0631fdSVlastimil Babka * (and not return with VM_FAULT_RETRY), when returning to userland to 1404cb0631fdSVlastimil Babka * repeat the page fault later with a VM_FAULT_NOPAGE retval 1405cb0631fdSVlastimil Babka * (potentially after handling any pending signal during the return to 1406cb0631fdSVlastimil Babka * userland). The return to userland is identified whenever 1407cb0631fdSVlastimil Babka * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. 1408c61e211dSHarvey Harrison */ 1409dcddffd4SKirill A. Shutemov fault = handle_mm_fault(vma, address, flags); 141026178ec1SLinus Torvalds major |= fault & VM_FAULT_MAJOR; 14112d4a7167SIngo Molnar 14123a13c4d7SJohannes Weiner /* 141326178ec1SLinus Torvalds * If we need to retry the mmap_sem has already been released, 141426178ec1SLinus Torvalds * and if there is a fatal signal pending there is no guarantee 141526178ec1SLinus Torvalds * that we made any progress. Handle this case first. 14163a13c4d7SJohannes Weiner */ 141726178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_RETRY)) { 141826178ec1SLinus Torvalds /* Retry at most once */ 141926178ec1SLinus Torvalds if (flags & FAULT_FLAG_ALLOW_RETRY) { 142026178ec1SLinus Torvalds flags &= ~FAULT_FLAG_ALLOW_RETRY; 142126178ec1SLinus Torvalds flags |= FAULT_FLAG_TRIED; 142226178ec1SLinus Torvalds if (!fatal_signal_pending(tsk)) 142326178ec1SLinus Torvalds goto retry; 142426178ec1SLinus Torvalds } 142526178ec1SLinus Torvalds 142626178ec1SLinus Torvalds /* User mode? Just return to handle the fatal exception */ 1427cf3c0a15SLinus Torvalds if (flags & FAULT_FLAG_USER) 14283a13c4d7SJohannes Weiner return; 14293a13c4d7SJohannes Weiner 143026178ec1SLinus Torvalds /* Not returning to user mode? Handle exceptions or die: */ 1431164477c2SDave Hansen no_context(regs, sw_error_code, address, SIGBUS, BUS_ADRERR); 143226178ec1SLinus Torvalds return; 143326178ec1SLinus Torvalds } 143426178ec1SLinus Torvalds 14357fb08ecaSLinus Torvalds up_read(&mm->mmap_sem); 143626178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_ERROR)) { 1437ba9f6f89SLinus Torvalds mm_fault_error(regs, sw_error_code, address, fault); 143837b23e05SKOSAKI Motohiro return; 143937b23e05SKOSAKI Motohiro } 144037b23e05SKOSAKI Motohiro 144137b23e05SKOSAKI Motohiro /* 144226178ec1SLinus Torvalds * Major/minor page fault accounting. If any of the events 144326178ec1SLinus Torvalds * returned VM_FAULT_MAJOR, we account it as a major fault. 1444d065bd81SMichel Lespinasse */ 144526178ec1SLinus Torvalds if (major) { 1446c61e211dSHarvey Harrison tsk->maj_flt++; 144726178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 1448ac17dc8eSPeter Zijlstra } else { 1449c61e211dSHarvey Harrison tsk->min_flt++; 145026178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1451d065bd81SMichel Lespinasse } 1452c61e211dSHarvey Harrison 14538c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 1454c61e211dSHarvey Harrison } 1455aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault); 1456aa37c51bSDave Hansen 1457aa37c51bSDave Hansen /* 1458aa37c51bSDave Hansen * This routine handles page faults. It determines the address, 1459aa37c51bSDave Hansen * and the problem, and then passes it off to one of the appropriate 1460aa37c51bSDave Hansen * routines. 1461aa37c51bSDave Hansen */ 1462aa37c51bSDave Hansen static noinline void 1463aa37c51bSDave Hansen __do_page_fault(struct pt_regs *regs, unsigned long hw_error_code, 1464aa37c51bSDave Hansen unsigned long address) 1465aa37c51bSDave Hansen { 1466aa37c51bSDave Hansen prefetchw(¤t->mm->mmap_sem); 1467aa37c51bSDave Hansen 1468aa37c51bSDave Hansen if (unlikely(kmmio_fault(regs, address))) 1469aa37c51bSDave Hansen return; 1470aa37c51bSDave Hansen 1471aa37c51bSDave Hansen /* Was the fault on kernel-controlled part of the address space? */ 1472aa37c51bSDave Hansen if (unlikely(fault_in_kernel_space(address))) 1473aa37c51bSDave Hansen do_kern_addr_fault(regs, hw_error_code, address); 1474aa37c51bSDave Hansen else 1475aa37c51bSDave Hansen do_user_addr_fault(regs, hw_error_code, address); 1476aa37c51bSDave Hansen } 14779326638cSMasami Hiramatsu NOKPROBE_SYMBOL(__do_page_fault); 14786ba3c97aSFrederic Weisbecker 14799326638cSMasami Hiramatsu static nokprobe_inline void 14809326638cSMasami Hiramatsu trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1481d34603b0SSeiji Aguchi unsigned long error_code) 1482d34603b0SSeiji Aguchi { 1483d34603b0SSeiji Aguchi if (user_mode(regs)) 1484d4078e23SPeter Zijlstra trace_page_fault_user(address, regs, error_code); 1485d34603b0SSeiji Aguchi else 1486d4078e23SPeter Zijlstra trace_page_fault_kernel(address, regs, error_code); 1487d34603b0SSeiji Aguchi } 1488d34603b0SSeiji Aguchi 14890ac09f9fSJiri Olsa /* 149011a7ffb0SThomas Gleixner * We must have this function blacklisted from kprobes, tagged with notrace 149111a7ffb0SThomas Gleixner * and call read_cr2() before calling anything else. To avoid calling any 149211a7ffb0SThomas Gleixner * kind of tracing machinery before we've observed the CR2 value. 149311a7ffb0SThomas Gleixner * 149411a7ffb0SThomas Gleixner * exception_{enter,exit}() contains all sorts of tracepoints. 14950ac09f9fSJiri Olsa */ 149611a7ffb0SThomas Gleixner dotraplinkage void notrace 149711a7ffb0SThomas Gleixner do_page_fault(struct pt_regs *regs, unsigned long error_code) 149811a7ffb0SThomas Gleixner { 149911a7ffb0SThomas Gleixner unsigned long address = read_cr2(); /* Get the faulting address */ 1500d4078e23SPeter Zijlstra enum ctx_state prev_state; 150125c74b10SSeiji Aguchi 150225c74b10SSeiji Aguchi prev_state = exception_enter(); 150380954747SThomas Gleixner if (trace_pagefault_enabled()) 1504d4078e23SPeter Zijlstra trace_page_fault_entries(address, regs, error_code); 150511a7ffb0SThomas Gleixner 15060ac09f9fSJiri Olsa __do_page_fault(regs, error_code, address); 150725c74b10SSeiji Aguchi exception_exit(prev_state); 150825c74b10SSeiji Aguchi } 150911a7ffb0SThomas Gleixner NOKPROBE_SYMBOL(do_page_fault); 1510