1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2c61e211dSHarvey Harrison /* 3c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 4c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 5f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 6c61e211dSHarvey Harrison */ 7a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 868db0cf1SIngo Molnar #include <linux/sched/task_stack.h> /* task_stack_*(), ... */ 9a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 104cdf8dbeSLinus Torvalds #include <linux/extable.h> /* search_exception_tables */ 1157c8a661SMike Rapoport #include <linux/memblock.h> /* max_low_pfn */ 129326638cSMasami Hiramatsu #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ 13a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 14cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 15f672b49bSAndi Kleen #include <linux/hugetlb.h> /* hstate_index_to_shift */ 16268bb0ceSLinus Torvalds #include <linux/prefetch.h> /* prefetchw */ 1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h> /* exception_enter(), ... */ 1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h> /* faulthandler_disabled() */ 193425d934SSai Praneeth #include <linux/efi.h> /* efi_recover_from_page_fault()*/ 2050a7ca3cSSouptick Joarder #include <linux/mm_types.h> 21c61e211dSHarvey Harrison 22019132ffSDave Hansen #include <asm/cpufeature.h> /* boot_cpu_has, ... */ 23a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 24a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 25f40c3300SAndy Lutomirski #include <asm/fixmap.h> /* VSYSCALL_ADDR */ 26f40c3300SAndy Lutomirski #include <asm/vsyscall.h> /* emulate_vsyscall */ 27ba3e127eSBrian Gerst #include <asm/vm86.h> /* struct vm86 */ 28019132ffSDave Hansen #include <asm/mmu_context.h> /* vma_pkey() */ 293425d934SSai Praneeth #include <asm/efi.h> /* efi_recover_from_page_fault()*/ 30a1a371c4SAndy Lutomirski #include <asm/desc.h> /* store_idt(), ... */ 31d876b673SThomas Gleixner #include <asm/cpu_entry_area.h> /* exception stack */ 32c61e211dSHarvey Harrison 33d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS 34d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h> 35d34603b0SSeiji Aguchi 36c61e211dSHarvey Harrison /* 37b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 38b319eed0SIngo Molnar * handled by mmiotrace: 39b814d41fSIngo Molnar */ 409326638cSMasami Hiramatsu static nokprobe_inline int 4162c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr) 4286069782SPekka Paalanen { 430fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 440fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 450fd0e3daSPekka Paalanen return -1; 460fd0e3daSPekka Paalanen return 0; 4786069782SPekka Paalanen } 4886069782SPekka Paalanen 49c61e211dSHarvey Harrison /* 502d4a7167SIngo Molnar * Prefetch quirks: 512d4a7167SIngo Molnar * 522d4a7167SIngo Molnar * 32-bit mode: 532d4a7167SIngo Molnar * 54c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 55c61e211dSHarvey Harrison * Check that here and ignore it. 56c61e211dSHarvey Harrison * 572d4a7167SIngo Molnar * 64-bit mode: 582d4a7167SIngo Molnar * 59c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 60c61e211dSHarvey Harrison * Check that here and ignore it. 61c61e211dSHarvey Harrison * 622d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 63c61e211dSHarvey Harrison */ 64107a0367SIngo Molnar static inline int 65107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 66107a0367SIngo Molnar unsigned char opcode, int *prefetch) 67c61e211dSHarvey Harrison { 68107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 69107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 70c61e211dSHarvey Harrison 71c61e211dSHarvey Harrison switch (instr_hi) { 72c61e211dSHarvey Harrison case 0x20: 73c61e211dSHarvey Harrison case 0x30: 74c61e211dSHarvey Harrison /* 75c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 76c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 77c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 78c61e211dSHarvey Harrison * X86_64 will never get here anyway 79c61e211dSHarvey Harrison */ 80107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 81c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 82c61e211dSHarvey Harrison case 0x40: 83c61e211dSHarvey Harrison /* 84c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 85c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 86c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 87c61e211dSHarvey Harrison * but for now it's good enough to assume that long 88c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 89c61e211dSHarvey Harrison */ 90318f5a2aSAndy Lutomirski return (!user_mode(regs) || user_64bit_mode(regs)); 91c61e211dSHarvey Harrison #endif 92c61e211dSHarvey Harrison case 0x60: 93c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 94107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 95c61e211dSHarvey Harrison case 0xF0: 96c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 97107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 98c61e211dSHarvey Harrison case 0x00: 99c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 100107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 101107a0367SIngo Molnar return 0; 102107a0367SIngo Molnar 103107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 104107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 105107a0367SIngo Molnar return 0; 106107a0367SIngo Molnar default: 107107a0367SIngo Molnar return 0; 108107a0367SIngo Molnar } 109107a0367SIngo Molnar } 110107a0367SIngo Molnar 111107a0367SIngo Molnar static int 112107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 113107a0367SIngo Molnar { 114107a0367SIngo Molnar unsigned char *max_instr; 115107a0367SIngo Molnar unsigned char *instr; 116107a0367SIngo Molnar int prefetch = 0; 117107a0367SIngo Molnar 118107a0367SIngo Molnar /* 119107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 120107a0367SIngo Molnar * do not ignore the fault: 121107a0367SIngo Molnar */ 1221067f030SRicardo Neri if (error_code & X86_PF_INSTR) 123107a0367SIngo Molnar return 0; 124107a0367SIngo Molnar 125107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 126107a0367SIngo Molnar max_instr = instr + 15; 127107a0367SIngo Molnar 128d31bf07fSAndy Lutomirski if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) 129107a0367SIngo Molnar return 0; 130107a0367SIngo Molnar 131107a0367SIngo Molnar while (instr < max_instr) { 132107a0367SIngo Molnar unsigned char opcode; 133c61e211dSHarvey Harrison 134c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 135c61e211dSHarvey Harrison break; 136107a0367SIngo Molnar 137107a0367SIngo Molnar instr++; 138107a0367SIngo Molnar 139107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 140c61e211dSHarvey Harrison break; 141c61e211dSHarvey Harrison } 142c61e211dSHarvey Harrison return prefetch; 143c61e211dSHarvey Harrison } 144c61e211dSHarvey Harrison 145f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 146f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 1472d4a7167SIngo Molnar 148f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 149f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 150f2f13a85SIngo Molnar { 151f2f13a85SIngo Molnar unsigned index = pgd_index(address); 152f2f13a85SIngo Molnar pgd_t *pgd_k; 153e0c4f675SKirill A. Shutemov p4d_t *p4d, *p4d_k; 154f2f13a85SIngo Molnar pud_t *pud, *pud_k; 155f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 156f2f13a85SIngo Molnar 157f2f13a85SIngo Molnar pgd += index; 158f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 159f2f13a85SIngo Molnar 160f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 161f2f13a85SIngo Molnar return NULL; 162f2f13a85SIngo Molnar 163f2f13a85SIngo Molnar /* 164f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 165f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 166e0c4f675SKirill A. Shutemov * set_p4d/set_pud. 167f2f13a85SIngo Molnar */ 168e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 169e0c4f675SKirill A. Shutemov p4d_k = p4d_offset(pgd_k, address); 170e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d_k)) 171e0c4f675SKirill A. Shutemov return NULL; 172e0c4f675SKirill A. Shutemov 173e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 174e0c4f675SKirill A. Shutemov pud_k = pud_offset(p4d_k, address); 175f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 176f2f13a85SIngo Molnar return NULL; 177f2f13a85SIngo Molnar 178f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 179f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 1808e998fc2SJoerg Roedel 1818e998fc2SJoerg Roedel if (pmd_present(*pmd) != pmd_present(*pmd_k)) 1828e998fc2SJoerg Roedel set_pmd(pmd, *pmd_k); 1838e998fc2SJoerg Roedel 184f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 185f2f13a85SIngo Molnar return NULL; 186b8bcfe99SJeremy Fitzhardinge else 18751b75b5bSJoerg Roedel BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k)); 188f2f13a85SIngo Molnar 189f2f13a85SIngo Molnar return pmd_k; 190f2f13a85SIngo Molnar } 191f2f13a85SIngo Molnar 192f2f13a85SIngo Molnar void vmalloc_sync_all(void) 193f2f13a85SIngo Molnar { 194f2f13a85SIngo Molnar unsigned long address; 195f2f13a85SIngo Molnar 196f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 197f2f13a85SIngo Molnar return; 198f2f13a85SIngo Molnar 199f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 2009a62d200SJoerg Roedel address >= TASK_SIZE_MAX && address < VMALLOC_END; 201f2f13a85SIngo Molnar address += PMD_SIZE) { 202f2f13a85SIngo Molnar struct page *page; 203f2f13a85SIngo Molnar 204a79e53d8SAndrea Arcangeli spin_lock(&pgd_lock); 205f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 206617d34d9SJeremy Fitzhardinge spinlock_t *pgt_lock; 207617d34d9SJeremy Fitzhardinge 208a79e53d8SAndrea Arcangeli /* the pgt_lock only for Xen */ 209617d34d9SJeremy Fitzhardinge pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 210617d34d9SJeremy Fitzhardinge 211617d34d9SJeremy Fitzhardinge spin_lock(pgt_lock); 2128e998fc2SJoerg Roedel vmalloc_sync_one(page_address(page), address); 213617d34d9SJeremy Fitzhardinge spin_unlock(pgt_lock); 214f2f13a85SIngo Molnar } 215a79e53d8SAndrea Arcangeli spin_unlock(&pgd_lock); 216f2f13a85SIngo Molnar } 217f2f13a85SIngo Molnar } 218f2f13a85SIngo Molnar 219f2f13a85SIngo Molnar /* 220f2f13a85SIngo Molnar * 32-bit: 221f2f13a85SIngo Molnar * 222f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 223f2f13a85SIngo Molnar */ 2249326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 225f2f13a85SIngo Molnar { 226f2f13a85SIngo Molnar unsigned long pgd_paddr; 227f2f13a85SIngo Molnar pmd_t *pmd_k; 228f2f13a85SIngo Molnar pte_t *pte_k; 229f2f13a85SIngo Molnar 230f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 231f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 232f2f13a85SIngo Molnar return -1; 233f2f13a85SIngo Molnar 234f2f13a85SIngo Molnar /* 235f2f13a85SIngo Molnar * Synchronize this task's top level page-table 236f2f13a85SIngo Molnar * with the 'reference' page table. 237f2f13a85SIngo Molnar * 238f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 239f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 240f2f13a85SIngo Molnar */ 2416c690ee1SAndy Lutomirski pgd_paddr = read_cr3_pa(); 242f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 243f2f13a85SIngo Molnar if (!pmd_k) 244f2f13a85SIngo Molnar return -1; 245f2f13a85SIngo Molnar 24618a95521SToshi Kani if (pmd_large(*pmd_k)) 247f4eafd8bSToshi Kani return 0; 248f4eafd8bSToshi Kani 249f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 250f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 251f2f13a85SIngo Molnar return -1; 252f2f13a85SIngo Molnar 253f2f13a85SIngo Molnar return 0; 254f2f13a85SIngo Molnar } 2559326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 256f2f13a85SIngo Molnar 257f2f13a85SIngo Molnar /* 258f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 259f2f13a85SIngo Molnar */ 260f2f13a85SIngo Molnar static inline void 261f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 262f2f13a85SIngo Molnar struct task_struct *tsk) 263f2f13a85SIngo Molnar { 2649fda6a06SBrian Gerst #ifdef CONFIG_VM86 265f2f13a85SIngo Molnar unsigned long bit; 266f2f13a85SIngo Molnar 2679fda6a06SBrian Gerst if (!v8086_mode(regs) || !tsk->thread.vm86) 268f2f13a85SIngo Molnar return; 269f2f13a85SIngo Molnar 270f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 271f2f13a85SIngo Molnar if (bit < 32) 2729fda6a06SBrian Gerst tsk->thread.vm86->screen_bitmap |= 1 << bit; 2739fda6a06SBrian Gerst #endif 274f2f13a85SIngo Molnar } 275c61e211dSHarvey Harrison 276087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 277087975b0SAkinobu Mita { 278087975b0SAkinobu Mita return pfn < max_low_pfn; 279087975b0SAkinobu Mita } 280087975b0SAkinobu Mita 281cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 282c61e211dSHarvey Harrison { 2836c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 284087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 285e0c4f675SKirill A. Shutemov p4d_t *p4d; 286e0c4f675SKirill A. Shutemov pud_t *pud; 287087975b0SAkinobu Mita pmd_t *pmd; 288087975b0SAkinobu Mita pte_t *pte; 2892d4a7167SIngo Molnar 290c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 29139e48d9bSJan Beulich pr_info("*pdpt = %016Lx ", pgd_val(*pgd)); 292087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 293087975b0SAkinobu Mita goto out; 29439e48d9bSJan Beulich #define pr_pde pr_cont 29539e48d9bSJan Beulich #else 29639e48d9bSJan Beulich #define pr_pde pr_info 297c61e211dSHarvey Harrison #endif 298e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 299e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 300e0c4f675SKirill A. Shutemov pmd = pmd_offset(pud, address); 30139e48d9bSJan Beulich pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 30239e48d9bSJan Beulich #undef pr_pde 303c61e211dSHarvey Harrison 304c61e211dSHarvey Harrison /* 305c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 306c61e211dSHarvey Harrison * case if the page table is located in highmem. 307c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3082d4a7167SIngo Molnar * it's allocated already: 309c61e211dSHarvey Harrison */ 310087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 311087975b0SAkinobu Mita goto out; 3122d4a7167SIngo Molnar 313087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 31439e48d9bSJan Beulich pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 315087975b0SAkinobu Mita out: 31639e48d9bSJan Beulich pr_cont("\n"); 317f2f13a85SIngo Molnar } 318f2f13a85SIngo Molnar 319f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 320f2f13a85SIngo Molnar 321f2f13a85SIngo Molnar void vmalloc_sync_all(void) 322f2f13a85SIngo Molnar { 3235372e155SKirill A. Shutemov sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); 324f2f13a85SIngo Molnar } 325f2f13a85SIngo Molnar 326f2f13a85SIngo Molnar /* 327f2f13a85SIngo Molnar * 64-bit: 328f2f13a85SIngo Molnar * 329f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 330f2f13a85SIngo Molnar */ 3319326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 332f2f13a85SIngo Molnar { 333565977a3SToshi Kani pgd_t *pgd, *pgd_k; 334565977a3SToshi Kani p4d_t *p4d, *p4d_k; 335565977a3SToshi Kani pud_t *pud; 336565977a3SToshi Kani pmd_t *pmd; 337565977a3SToshi Kani pte_t *pte; 338f2f13a85SIngo Molnar 339f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 340f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 341f2f13a85SIngo Molnar return -1; 342f2f13a85SIngo Molnar 343f2f13a85SIngo Molnar /* 344f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 345f2f13a85SIngo Molnar * happen within a race in page table update. In the later 346f2f13a85SIngo Molnar * case just flush: 347f2f13a85SIngo Molnar */ 3486c690ee1SAndy Lutomirski pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address); 349565977a3SToshi Kani pgd_k = pgd_offset_k(address); 350565977a3SToshi Kani if (pgd_none(*pgd_k)) 351f2f13a85SIngo Molnar return -1; 352f2f13a85SIngo Molnar 353ed7588d5SKirill A. Shutemov if (pgtable_l5_enabled()) { 3541160c277SSamu Kallio if (pgd_none(*pgd)) { 355565977a3SToshi Kani set_pgd(pgd, *pgd_k); 3561160c277SSamu Kallio arch_flush_lazy_mmu_mode(); 35736b3a772SAndy Lutomirski } else { 358565977a3SToshi Kani BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k)); 3591160c277SSamu Kallio } 36036b3a772SAndy Lutomirski } 361f2f13a85SIngo Molnar 362b50858ceSKirill A. Shutemov /* With 4-level paging, copying happens on the p4d level. */ 363b50858ceSKirill A. Shutemov p4d = p4d_offset(pgd, address); 364565977a3SToshi Kani p4d_k = p4d_offset(pgd_k, address); 365565977a3SToshi Kani if (p4d_none(*p4d_k)) 366b50858ceSKirill A. Shutemov return -1; 367b50858ceSKirill A. Shutemov 368ed7588d5SKirill A. Shutemov if (p4d_none(*p4d) && !pgtable_l5_enabled()) { 369565977a3SToshi Kani set_p4d(p4d, *p4d_k); 370b50858ceSKirill A. Shutemov arch_flush_lazy_mmu_mode(); 371b50858ceSKirill A. Shutemov } else { 372565977a3SToshi Kani BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k)); 373b50858ceSKirill A. Shutemov } 374b50858ceSKirill A. Shutemov 37536b3a772SAndy Lutomirski BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4); 376f2f13a85SIngo Molnar 377b50858ceSKirill A. Shutemov pud = pud_offset(p4d, address); 378565977a3SToshi Kani if (pud_none(*pud)) 379f2f13a85SIngo Molnar return -1; 380f2f13a85SIngo Molnar 38118a95521SToshi Kani if (pud_large(*pud)) 382f4eafd8bSToshi Kani return 0; 383f4eafd8bSToshi Kani 384f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 385565977a3SToshi Kani if (pmd_none(*pmd)) 386f2f13a85SIngo Molnar return -1; 387f2f13a85SIngo Molnar 38818a95521SToshi Kani if (pmd_large(*pmd)) 389f4eafd8bSToshi Kani return 0; 390f4eafd8bSToshi Kani 391f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 392565977a3SToshi Kani if (!pte_present(*pte)) 393565977a3SToshi Kani return -1; 394f2f13a85SIngo Molnar 395f2f13a85SIngo Molnar return 0; 396f2f13a85SIngo Molnar } 3979326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 398f2f13a85SIngo Molnar 399e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD 400f2f13a85SIngo Molnar static const char errata93_warning[] = 401ad361c98SJoe Perches KERN_ERR 402ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 403ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 404ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 405ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 406e05139f2SJan Beulich #endif 407f2f13a85SIngo Molnar 408f2f13a85SIngo Molnar /* 409f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 410f2f13a85SIngo Molnar */ 411f2f13a85SIngo Molnar static inline void 412f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 413f2f13a85SIngo Molnar struct task_struct *tsk) 414f2f13a85SIngo Molnar { 415f2f13a85SIngo Molnar } 416f2f13a85SIngo Molnar 417f2f13a85SIngo Molnar static int bad_address(void *p) 418f2f13a85SIngo Molnar { 419f2f13a85SIngo Molnar unsigned long dummy; 420f2f13a85SIngo Molnar 421f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 422f2f13a85SIngo Molnar } 423f2f13a85SIngo Molnar 424f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 425f2f13a85SIngo Molnar { 4266c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 427087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 428e0c4f675SKirill A. Shutemov p4d_t *p4d; 429c61e211dSHarvey Harrison pud_t *pud; 430c61e211dSHarvey Harrison pmd_t *pmd; 431c61e211dSHarvey Harrison pte_t *pte; 432c61e211dSHarvey Harrison 4332d4a7167SIngo Molnar if (bad_address(pgd)) 4342d4a7167SIngo Molnar goto bad; 4352d4a7167SIngo Molnar 43639e48d9bSJan Beulich pr_info("PGD %lx ", pgd_val(*pgd)); 4372d4a7167SIngo Molnar 4382d4a7167SIngo Molnar if (!pgd_present(*pgd)) 4392d4a7167SIngo Molnar goto out; 440c61e211dSHarvey Harrison 441e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 442e0c4f675SKirill A. Shutemov if (bad_address(p4d)) 443e0c4f675SKirill A. Shutemov goto bad; 444e0c4f675SKirill A. Shutemov 44539e48d9bSJan Beulich pr_cont("P4D %lx ", p4d_val(*p4d)); 446e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d) || p4d_large(*p4d)) 447e0c4f675SKirill A. Shutemov goto out; 448e0c4f675SKirill A. Shutemov 449e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 4502d4a7167SIngo Molnar if (bad_address(pud)) 4512d4a7167SIngo Molnar goto bad; 4522d4a7167SIngo Molnar 45339e48d9bSJan Beulich pr_cont("PUD %lx ", pud_val(*pud)); 454b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 4552d4a7167SIngo Molnar goto out; 456c61e211dSHarvey Harrison 457c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 4582d4a7167SIngo Molnar if (bad_address(pmd)) 4592d4a7167SIngo Molnar goto bad; 4602d4a7167SIngo Molnar 46139e48d9bSJan Beulich pr_cont("PMD %lx ", pmd_val(*pmd)); 4622d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 4632d4a7167SIngo Molnar goto out; 464c61e211dSHarvey Harrison 465c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 4662d4a7167SIngo Molnar if (bad_address(pte)) 4672d4a7167SIngo Molnar goto bad; 4682d4a7167SIngo Molnar 46939e48d9bSJan Beulich pr_cont("PTE %lx", pte_val(*pte)); 4702d4a7167SIngo Molnar out: 47139e48d9bSJan Beulich pr_cont("\n"); 472c61e211dSHarvey Harrison return; 473c61e211dSHarvey Harrison bad: 47439e48d9bSJan Beulich pr_info("BAD\n"); 475c61e211dSHarvey Harrison } 476c61e211dSHarvey Harrison 477f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 478c61e211dSHarvey Harrison 4792d4a7167SIngo Molnar /* 4802d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 4812d4a7167SIngo Molnar * 4822d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 4832d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 4842d4a7167SIngo Molnar * 4852d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 4862d4a7167SIngo Molnar * 4872d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 4882d4a7167SIngo Molnar * Try to work around it here. 4892d4a7167SIngo Molnar * 4902d4a7167SIngo Molnar * Note we only handle faults in kernel here. 4912d4a7167SIngo Molnar * Does nothing on 32-bit. 492c61e211dSHarvey Harrison */ 493c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 494c61e211dSHarvey Harrison { 495e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 496e05139f2SJan Beulich if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 497e05139f2SJan Beulich || boot_cpu_data.x86 != 0xf) 498e05139f2SJan Beulich return 0; 499e05139f2SJan Beulich 500c61e211dSHarvey Harrison if (address != regs->ip) 501c61e211dSHarvey Harrison return 0; 5022d4a7167SIngo Molnar 503c61e211dSHarvey Harrison if ((address >> 32) != 0) 504c61e211dSHarvey Harrison return 0; 5052d4a7167SIngo Molnar 506c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 507c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 508c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 509a454ab31SIngo Molnar printk_once(errata93_warning); 510c61e211dSHarvey Harrison regs->ip = address; 511c61e211dSHarvey Harrison return 1; 512c61e211dSHarvey Harrison } 513c61e211dSHarvey Harrison #endif 514c61e211dSHarvey Harrison return 0; 515c61e211dSHarvey Harrison } 516c61e211dSHarvey Harrison 517c61e211dSHarvey Harrison /* 5182d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 5192d4a7167SIngo Molnar * to illegal addresses >4GB. 5202d4a7167SIngo Molnar * 5212d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 5222d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 523c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 524c61e211dSHarvey Harrison */ 525c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 526c61e211dSHarvey Harrison { 527c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 5282d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 529c61e211dSHarvey Harrison return 1; 530c61e211dSHarvey Harrison #endif 531c61e211dSHarvey Harrison return 0; 532c61e211dSHarvey Harrison } 533c61e211dSHarvey Harrison 534c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 535c61e211dSHarvey Harrison { 536c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 537c61e211dSHarvey Harrison unsigned long nr; 5382d4a7167SIngo Molnar 539c61e211dSHarvey Harrison /* 5402d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 541c61e211dSHarvey Harrison */ 542e2604b49SBorislav Petkov if (boot_cpu_has_bug(X86_BUG_F00F)) { 543c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 544c61e211dSHarvey Harrison 545c61e211dSHarvey Harrison if (nr == 6) { 546c61e211dSHarvey Harrison do_invalid_op(regs, 0); 547c61e211dSHarvey Harrison return 1; 548c61e211dSHarvey Harrison } 549c61e211dSHarvey Harrison } 550c61e211dSHarvey Harrison #endif 551c61e211dSHarvey Harrison return 0; 552c61e211dSHarvey Harrison } 553c61e211dSHarvey Harrison 554a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) 555a1a371c4SAndy Lutomirski { 556a1a371c4SAndy Lutomirski u32 offset = (index >> 3) * sizeof(struct desc_struct); 557a1a371c4SAndy Lutomirski unsigned long addr; 558a1a371c4SAndy Lutomirski struct ldttss_desc desc; 559a1a371c4SAndy Lutomirski 560a1a371c4SAndy Lutomirski if (index == 0) { 561a1a371c4SAndy Lutomirski pr_alert("%s: NULL\n", name); 562a1a371c4SAndy Lutomirski return; 563a1a371c4SAndy Lutomirski } 564a1a371c4SAndy Lutomirski 565a1a371c4SAndy Lutomirski if (offset + sizeof(struct ldttss_desc) >= gdt->size) { 566a1a371c4SAndy Lutomirski pr_alert("%s: 0x%hx -- out of bounds\n", name, index); 567a1a371c4SAndy Lutomirski return; 568a1a371c4SAndy Lutomirski } 569a1a371c4SAndy Lutomirski 570a1a371c4SAndy Lutomirski if (probe_kernel_read(&desc, (void *)(gdt->address + offset), 571a1a371c4SAndy Lutomirski sizeof(struct ldttss_desc))) { 572a1a371c4SAndy Lutomirski pr_alert("%s: 0x%hx -- GDT entry is not readable\n", 573a1a371c4SAndy Lutomirski name, index); 574a1a371c4SAndy Lutomirski return; 575a1a371c4SAndy Lutomirski } 576a1a371c4SAndy Lutomirski 5775ccd3528SColin Ian King addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24); 578a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64 579a1a371c4SAndy Lutomirski addr |= ((u64)desc.base3 << 32); 580a1a371c4SAndy Lutomirski #endif 581a1a371c4SAndy Lutomirski pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n", 582a1a371c4SAndy Lutomirski name, index, addr, (desc.limit0 | (desc.limit1 << 16))); 583a1a371c4SAndy Lutomirski } 584a1a371c4SAndy Lutomirski 5852d4a7167SIngo Molnar static void 586a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) 587c61e211dSHarvey Harrison { 588c61e211dSHarvey Harrison if (!oops_may_print()) 589c61e211dSHarvey Harrison return; 590c61e211dSHarvey Harrison 5911067f030SRicardo Neri if (error_code & X86_PF_INSTR) { 59293809be8SHarvey Harrison unsigned int level; 593426e34ccSMatt Fleming pgd_t *pgd; 594426e34ccSMatt Fleming pte_t *pte; 5952d4a7167SIngo Molnar 5966c690ee1SAndy Lutomirski pgd = __va(read_cr3_pa()); 597426e34ccSMatt Fleming pgd += pgd_index(address); 598426e34ccSMatt Fleming 599426e34ccSMatt Fleming pte = lookup_address_in_pgd(pgd, address, &level); 600c61e211dSHarvey Harrison 6018f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 602d79d0d8aSDmitry Vyukov pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", 603d79d0d8aSDmitry Vyukov from_kuid(&init_user_ns, current_uid())); 604eff50c34SJiri Kosina if (pte && pte_present(*pte) && pte_exec(*pte) && 605eff50c34SJiri Kosina (pgd_flags(*pgd) & _PAGE_USER) && 6061e02ce4cSAndy Lutomirski (__read_cr4() & X86_CR4_SMEP)) 607d79d0d8aSDmitry Vyukov pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n", 608d79d0d8aSDmitry Vyukov from_kuid(&init_user_ns, current_uid())); 609c61e211dSHarvey Harrison } 610fd40d6e3SHarvey Harrison 611f28b11a2SSean Christopherson if (address < PAGE_SIZE && !user_mode(regs)) 612ea2f8d60SBorislav Petkov pr_alert("BUG: kernel NULL pointer dereference, address: %px\n", 613f28b11a2SSean Christopherson (void *)address); 614f28b11a2SSean Christopherson else 615ea2f8d60SBorislav Petkov pr_alert("BUG: unable to handle page fault for address: %px\n", 6164188f063SDmitry Vyukov (void *)address); 6172d4a7167SIngo Molnar 618ea2f8d60SBorislav Petkov pr_alert("#PF: %s %s in %s mode\n", 61918ea35c5SSean Christopherson (error_code & X86_PF_USER) ? "user" : "supervisor", 62018ea35c5SSean Christopherson (error_code & X86_PF_INSTR) ? "instruction fetch" : 62118ea35c5SSean Christopherson (error_code & X86_PF_WRITE) ? "write access" : 62218ea35c5SSean Christopherson "read access", 62318ea35c5SSean Christopherson user_mode(regs) ? "user" : "kernel"); 62418ea35c5SSean Christopherson pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code, 62518ea35c5SSean Christopherson !(error_code & X86_PF_PROT) ? "not-present page" : 62618ea35c5SSean Christopherson (error_code & X86_PF_RSVD) ? "reserved bit violation" : 62718ea35c5SSean Christopherson (error_code & X86_PF_PK) ? "protection keys violation" : 62818ea35c5SSean Christopherson "permissions violation"); 629a2aa52abSIngo Molnar 630a1a371c4SAndy Lutomirski if (!(error_code & X86_PF_USER) && user_mode(regs)) { 631a1a371c4SAndy Lutomirski struct desc_ptr idt, gdt; 632a1a371c4SAndy Lutomirski u16 ldtr, tr; 633a1a371c4SAndy Lutomirski 634a1a371c4SAndy Lutomirski /* 635a1a371c4SAndy Lutomirski * This can happen for quite a few reasons. The more obvious 636a1a371c4SAndy Lutomirski * ones are faults accessing the GDT, or LDT. Perhaps 637a1a371c4SAndy Lutomirski * surprisingly, if the CPU tries to deliver a benign or 638a1a371c4SAndy Lutomirski * contributory exception from user code and gets a page fault 639a1a371c4SAndy Lutomirski * during delivery, the page fault can be delivered as though 640a1a371c4SAndy Lutomirski * it originated directly from user code. This could happen 641a1a371c4SAndy Lutomirski * due to wrong permissions on the IDT, GDT, LDT, TSS, or 642a1a371c4SAndy Lutomirski * kernel or IST stack. 643a1a371c4SAndy Lutomirski */ 644a1a371c4SAndy Lutomirski store_idt(&idt); 645a1a371c4SAndy Lutomirski 646a1a371c4SAndy Lutomirski /* Usable even on Xen PV -- it's just slow. */ 647a1a371c4SAndy Lutomirski native_store_gdt(&gdt); 648a1a371c4SAndy Lutomirski 649a1a371c4SAndy Lutomirski pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n", 650a1a371c4SAndy Lutomirski idt.address, idt.size, gdt.address, gdt.size); 651a1a371c4SAndy Lutomirski 652a1a371c4SAndy Lutomirski store_ldt(ldtr); 653a1a371c4SAndy Lutomirski show_ldttss(&gdt, "LDTR", ldtr); 654a1a371c4SAndy Lutomirski 655a1a371c4SAndy Lutomirski store_tr(tr); 656a1a371c4SAndy Lutomirski show_ldttss(&gdt, "TR", tr); 657a1a371c4SAndy Lutomirski } 658a1a371c4SAndy Lutomirski 659c61e211dSHarvey Harrison dump_pagetable(address); 660c61e211dSHarvey Harrison } 661c61e211dSHarvey Harrison 6622d4a7167SIngo Molnar static noinline void 6632d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 6642d4a7167SIngo Molnar unsigned long address) 665c61e211dSHarvey Harrison { 6662d4a7167SIngo Molnar struct task_struct *tsk; 6672d4a7167SIngo Molnar unsigned long flags; 6682d4a7167SIngo Molnar int sig; 6692d4a7167SIngo Molnar 6702d4a7167SIngo Molnar flags = oops_begin(); 6712d4a7167SIngo Molnar tsk = current; 6722d4a7167SIngo Molnar sig = SIGKILL; 673c61e211dSHarvey Harrison 674c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 67592181f19SNick Piggin tsk->comm, address); 676c61e211dSHarvey Harrison dump_pagetable(address); 6772d4a7167SIngo Molnar 678c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 679874d93d1SAlexander van Heukelum sig = 0; 6802d4a7167SIngo Molnar 681874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 682c61e211dSHarvey Harrison } 683c61e211dSHarvey Harrison 684e49d3cbeSAndy Lutomirski static void set_signal_archinfo(unsigned long address, 685e49d3cbeSAndy Lutomirski unsigned long error_code) 686e49d3cbeSAndy Lutomirski { 687e49d3cbeSAndy Lutomirski struct task_struct *tsk = current; 688e49d3cbeSAndy Lutomirski 689e49d3cbeSAndy Lutomirski /* 690e49d3cbeSAndy Lutomirski * To avoid leaking information about the kernel page 691e49d3cbeSAndy Lutomirski * table layout, pretend that user-mode accesses to 692e49d3cbeSAndy Lutomirski * kernel addresses are always protection faults. 693e0a446ceSAndy Lutomirski * 694e0a446ceSAndy Lutomirski * NB: This means that failed vsyscalls with vsyscall=none 695e0a446ceSAndy Lutomirski * will have the PROT bit. This doesn't leak any 696e0a446ceSAndy Lutomirski * information and does not appear to cause any problems. 697e49d3cbeSAndy Lutomirski */ 698e49d3cbeSAndy Lutomirski if (address >= TASK_SIZE_MAX) 699e49d3cbeSAndy Lutomirski error_code |= X86_PF_PROT; 700e49d3cbeSAndy Lutomirski 701e49d3cbeSAndy Lutomirski tsk->thread.trap_nr = X86_TRAP_PF; 702e49d3cbeSAndy Lutomirski tsk->thread.error_code = error_code | X86_PF_USER; 703e49d3cbeSAndy Lutomirski tsk->thread.cr2 = address; 704e49d3cbeSAndy Lutomirski } 705e49d3cbeSAndy Lutomirski 7062d4a7167SIngo Molnar static noinline void 7072d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 7084fc34901SAndy Lutomirski unsigned long address, int signal, int si_code) 70992181f19SNick Piggin { 71092181f19SNick Piggin struct task_struct *tsk = current; 71192181f19SNick Piggin unsigned long flags; 71292181f19SNick Piggin int sig; 71392181f19SNick Piggin 714ebb53e25SAndy Lutomirski if (user_mode(regs)) { 715ebb53e25SAndy Lutomirski /* 716ebb53e25SAndy Lutomirski * This is an implicit supervisor-mode access from user 717ebb53e25SAndy Lutomirski * mode. Bypass all the kernel-mode recovery code and just 718ebb53e25SAndy Lutomirski * OOPS. 719ebb53e25SAndy Lutomirski */ 720ebb53e25SAndy Lutomirski goto oops; 721ebb53e25SAndy Lutomirski } 722ebb53e25SAndy Lutomirski 72392181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 72481fd9c18SJann Horn if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) { 725c026b359SPeter Zijlstra /* 726c026b359SPeter Zijlstra * Any interrupt that takes a fault gets the fixup. This makes 727c026b359SPeter Zijlstra * the below recursive fault logic only apply to a faults from 728c026b359SPeter Zijlstra * task context. 729c026b359SPeter Zijlstra */ 730c026b359SPeter Zijlstra if (in_interrupt()) 731c026b359SPeter Zijlstra return; 732c026b359SPeter Zijlstra 733c026b359SPeter Zijlstra /* 734c026b359SPeter Zijlstra * Per the above we're !in_interrupt(), aka. task context. 735c026b359SPeter Zijlstra * 736c026b359SPeter Zijlstra * In this case we need to make sure we're not recursively 737c026b359SPeter Zijlstra * faulting through the emulate_vsyscall() logic. 738c026b359SPeter Zijlstra */ 7392a53ccbcSIngo Molnar if (current->thread.sig_on_uaccess_err && signal) { 740e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 7414fc34901SAndy Lutomirski 7424fc34901SAndy Lutomirski /* XXX: hwpoison faults will set the wrong code. */ 7432e1661d2SEric W. Biederman force_sig_fault(signal, si_code, (void __user *)address); 7444fc34901SAndy Lutomirski } 745c026b359SPeter Zijlstra 746c026b359SPeter Zijlstra /* 747c026b359SPeter Zijlstra * Barring that, we can do the fixup and be happy. 748c026b359SPeter Zijlstra */ 74992181f19SNick Piggin return; 7504fc34901SAndy Lutomirski } 75192181f19SNick Piggin 7526271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK 7536271cfdfSAndy Lutomirski /* 7546271cfdfSAndy Lutomirski * Stack overflow? During boot, we can fault near the initial 7556271cfdfSAndy Lutomirski * stack in the direct map, but that's not an overflow -- check 7566271cfdfSAndy Lutomirski * that we're in vmalloc space to avoid this. 7576271cfdfSAndy Lutomirski */ 7586271cfdfSAndy Lutomirski if (is_vmalloc_addr((void *)address) && 7596271cfdfSAndy Lutomirski (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || 7606271cfdfSAndy Lutomirski address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { 761d876b673SThomas Gleixner unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *); 7626271cfdfSAndy Lutomirski /* 7636271cfdfSAndy Lutomirski * We're likely to be running with very little stack space 7646271cfdfSAndy Lutomirski * left. It's plausible that we'd hit this condition but 7656271cfdfSAndy Lutomirski * double-fault even before we get this far, in which case 7666271cfdfSAndy Lutomirski * we're fine: the double-fault handler will deal with it. 7676271cfdfSAndy Lutomirski * 7686271cfdfSAndy Lutomirski * We don't want to make it all the way into the oops code 7696271cfdfSAndy Lutomirski * and then double-fault, though, because we're likely to 7706271cfdfSAndy Lutomirski * break the console driver and lose most of the stack dump. 7716271cfdfSAndy Lutomirski */ 7726271cfdfSAndy Lutomirski asm volatile ("movq %[stack], %%rsp\n\t" 7736271cfdfSAndy Lutomirski "call handle_stack_overflow\n\t" 7746271cfdfSAndy Lutomirski "1: jmp 1b" 775f5caf621SJosh Poimboeuf : ASM_CALL_CONSTRAINT 7766271cfdfSAndy Lutomirski : "D" ("kernel stack overflow (page fault)"), 7776271cfdfSAndy Lutomirski "S" (regs), "d" (address), 7786271cfdfSAndy Lutomirski [stack] "rm" (stack)); 7796271cfdfSAndy Lutomirski unreachable(); 7806271cfdfSAndy Lutomirski } 7816271cfdfSAndy Lutomirski #endif 7826271cfdfSAndy Lutomirski 78392181f19SNick Piggin /* 7842d4a7167SIngo Molnar * 32-bit: 7852d4a7167SIngo Molnar * 78692181f19SNick Piggin * Valid to do another page fault here, because if this fault 78792181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 78892181f19SNick Piggin * handled it. 78992181f19SNick Piggin * 7902d4a7167SIngo Molnar * 64-bit: 7912d4a7167SIngo Molnar * 79292181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 79392181f19SNick Piggin */ 79492181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 79592181f19SNick Piggin return; 79692181f19SNick Piggin 79792181f19SNick Piggin if (is_errata93(regs, address)) 79892181f19SNick Piggin return; 79992181f19SNick Piggin 80092181f19SNick Piggin /* 8013425d934SSai Praneeth * Buggy firmware could access regions which might page fault, try to 8023425d934SSai Praneeth * recover from such faults. 8033425d934SSai Praneeth */ 8043425d934SSai Praneeth if (IS_ENABLED(CONFIG_EFI)) 8053425d934SSai Praneeth efi_recover_from_page_fault(address); 8063425d934SSai Praneeth 807ebb53e25SAndy Lutomirski oops: 8083425d934SSai Praneeth /* 80992181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 8102d4a7167SIngo Molnar * terminate things with extreme prejudice: 81192181f19SNick Piggin */ 81292181f19SNick Piggin flags = oops_begin(); 81392181f19SNick Piggin 81492181f19SNick Piggin show_fault_oops(regs, error_code, address); 81592181f19SNick Piggin 816a70857e4SAaron Tomlin if (task_stack_end_corrupted(tsk)) 817b0f4c4b3SPrarit Bhargava printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 81819803078SIngo Molnar 81992181f19SNick Piggin sig = SIGKILL; 82092181f19SNick Piggin if (__die("Oops", regs, error_code)) 82192181f19SNick Piggin sig = 0; 8222d4a7167SIngo Molnar 82392181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 824b0f4c4b3SPrarit Bhargava printk(KERN_DEFAULT "CR2: %016lx\n", address); 8252d4a7167SIngo Molnar 82692181f19SNick Piggin oops_end(flags, regs, sig); 82792181f19SNick Piggin } 82892181f19SNick Piggin 8292d4a7167SIngo Molnar /* 8302d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 8312d4a7167SIngo Molnar * sysctl is set: 8322d4a7167SIngo Molnar */ 8332d4a7167SIngo Molnar static inline void 8342d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 8352d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 8362d4a7167SIngo Molnar { 837ba54d856SBorislav Petkov const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG; 838ba54d856SBorislav Petkov 8392d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 8402d4a7167SIngo Molnar return; 8412d4a7167SIngo Molnar 8422d4a7167SIngo Molnar if (!printk_ratelimit()) 8432d4a7167SIngo Molnar return; 8442d4a7167SIngo Molnar 84510a7e9d8SKees Cook printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx", 846ba54d856SBorislav Petkov loglvl, tsk->comm, task_pid_nr(tsk), address, 8472d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 8482d4a7167SIngo Molnar 8492d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 8502d4a7167SIngo Molnar 8512d4a7167SIngo Molnar printk(KERN_CONT "\n"); 852ba54d856SBorislav Petkov 853342db04aSJann Horn show_opcodes(regs, loglvl); 8542d4a7167SIngo Molnar } 8552d4a7167SIngo Molnar 85602e983b7SDave Hansen /* 85702e983b7SDave Hansen * The (legacy) vsyscall page is the long page in the kernel portion 85802e983b7SDave Hansen * of the address space that has user-accessible permissions. 85902e983b7SDave Hansen */ 86002e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr) 86102e983b7SDave Hansen { 8623ae0ad92SDave Hansen return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); 86302e983b7SDave Hansen } 86402e983b7SDave Hansen 8652d4a7167SIngo Molnar static void 8662d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 867419ceeb1SEric W. Biederman unsigned long address, u32 pkey, int si_code) 86892181f19SNick Piggin { 86992181f19SNick Piggin struct task_struct *tsk = current; 87092181f19SNick Piggin 87192181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 8726ea59b07SAndy Lutomirski if (user_mode(regs) && (error_code & X86_PF_USER)) { 87392181f19SNick Piggin /* 8742d4a7167SIngo Molnar * It's possible to have interrupts off here: 87592181f19SNick Piggin */ 87692181f19SNick Piggin local_irq_enable(); 87792181f19SNick Piggin 87892181f19SNick Piggin /* 87992181f19SNick Piggin * Valid to do another page fault here because this one came 8802d4a7167SIngo Molnar * from user space: 88192181f19SNick Piggin */ 88292181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 88392181f19SNick Piggin return; 88492181f19SNick Piggin 88592181f19SNick Piggin if (is_errata100(regs, address)) 88692181f19SNick Piggin return; 88792181f19SNick Piggin 888dc4fac84SAndy Lutomirski /* 889dc4fac84SAndy Lutomirski * To avoid leaking information about the kernel page table 890dc4fac84SAndy Lutomirski * layout, pretend that user-mode accesses to kernel addresses 891dc4fac84SAndy Lutomirski * are always protection faults. 892dc4fac84SAndy Lutomirski */ 893dc4fac84SAndy Lutomirski if (address >= TASK_SIZE_MAX) 8941067f030SRicardo Neri error_code |= X86_PF_PROT; 8953ae36655SAndy Lutomirski 896e575a86fSKees Cook if (likely(show_unhandled_signals)) 8972d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 89892181f19SNick Piggin 899e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 9002d4a7167SIngo Molnar 9019db812dbSEric W. Biederman if (si_code == SEGV_PKUERR) 902419ceeb1SEric W. Biederman force_sig_pkuerr((void __user *)address, pkey); 9039db812dbSEric W. Biederman 9042e1661d2SEric W. Biederman force_sig_fault(SIGSEGV, si_code, (void __user *)address); 9052d4a7167SIngo Molnar 90692181f19SNick Piggin return; 90792181f19SNick Piggin } 90892181f19SNick Piggin 90992181f19SNick Piggin if (is_f00f_bug(regs, address)) 91092181f19SNick Piggin return; 91192181f19SNick Piggin 9124fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGSEGV, si_code); 91392181f19SNick Piggin } 91492181f19SNick Piggin 9152d4a7167SIngo Molnar static noinline void 9162d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 917768fd9c6SEric W. Biederman unsigned long address) 91892181f19SNick Piggin { 919419ceeb1SEric W. Biederman __bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR); 92092181f19SNick Piggin } 92192181f19SNick Piggin 9222d4a7167SIngo Molnar static void 9232d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 924419ceeb1SEric W. Biederman unsigned long address, u32 pkey, int si_code) 92592181f19SNick Piggin { 92692181f19SNick Piggin struct mm_struct *mm = current->mm; 92792181f19SNick Piggin /* 92892181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 92992181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 93092181f19SNick Piggin */ 93192181f19SNick Piggin up_read(&mm->mmap_sem); 93292181f19SNick Piggin 933aba1ecd3SEric W. Biederman __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); 93492181f19SNick Piggin } 93592181f19SNick Piggin 9362d4a7167SIngo Molnar static noinline void 9372d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 93892181f19SNick Piggin { 939419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, 0, SEGV_MAPERR); 94092181f19SNick Piggin } 94192181f19SNick Piggin 94233a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code, 94333a709b2SDave Hansen struct vm_area_struct *vma) 94433a709b2SDave Hansen { 94507f146f5SDave Hansen /* This code is always called on the current mm */ 94607f146f5SDave Hansen bool foreign = false; 94707f146f5SDave Hansen 94833a709b2SDave Hansen if (!boot_cpu_has(X86_FEATURE_OSPKE)) 94933a709b2SDave Hansen return false; 9501067f030SRicardo Neri if (error_code & X86_PF_PK) 95133a709b2SDave Hansen return true; 95207f146f5SDave Hansen /* this checks permission keys on the VMA: */ 9531067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 9541067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 95507f146f5SDave Hansen return true; 95633a709b2SDave Hansen return false; 95792181f19SNick Piggin } 95892181f19SNick Piggin 9592d4a7167SIngo Molnar static noinline void 9602d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 9617b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma) 96292181f19SNick Piggin { 963019132ffSDave Hansen /* 964019132ffSDave Hansen * This OSPKE check is not strictly necessary at runtime. 965019132ffSDave Hansen * But, doing it this way allows compiler optimizations 966019132ffSDave Hansen * if pkeys are compiled out. 967019132ffSDave Hansen */ 968aba1ecd3SEric W. Biederman if (bad_area_access_from_pkeys(error_code, vma)) { 9699db812dbSEric W. Biederman /* 9709db812dbSEric W. Biederman * A protection key fault means that the PKRU value did not allow 9719db812dbSEric W. Biederman * access to some PTE. Userspace can figure out what PKRU was 9729db812dbSEric W. Biederman * from the XSAVE state. This function captures the pkey from 9739db812dbSEric W. Biederman * the vma and passes it to userspace so userspace can discover 9749db812dbSEric W. Biederman * which protection key was set on the PTE. 9759db812dbSEric W. Biederman * 9769db812dbSEric W. Biederman * If we get here, we know that the hardware signaled a X86_PF_PK 9779db812dbSEric W. Biederman * fault and that there was a VMA once we got in the fault 9789db812dbSEric W. Biederman * handler. It does *not* guarantee that the VMA we find here 9799db812dbSEric W. Biederman * was the one that we faulted on. 9809db812dbSEric W. Biederman * 9819db812dbSEric W. Biederman * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); 9829db812dbSEric W. Biederman * 2. T1 : set PKRU to deny access to pkey=4, touches page 9839db812dbSEric W. Biederman * 3. T1 : faults... 9849db812dbSEric W. Biederman * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); 9859db812dbSEric W. Biederman * 5. T1 : enters fault handler, takes mmap_sem, etc... 9869db812dbSEric W. Biederman * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 9879db812dbSEric W. Biederman * faulted on a pte with its pkey=4. 9889db812dbSEric W. Biederman */ 989aba1ecd3SEric W. Biederman u32 pkey = vma_pkey(vma); 9909db812dbSEric W. Biederman 991419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, pkey, SEGV_PKUERR); 992aba1ecd3SEric W. Biederman } else { 993419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, 0, SEGV_ACCERR); 994aba1ecd3SEric W. Biederman } 99592181f19SNick Piggin } 99692181f19SNick Piggin 9972d4a7167SIngo Molnar static void 998a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 9993d353901SSouptick Joarder vm_fault_t fault) 100092181f19SNick Piggin { 10012d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 10021067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 10034fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 100496054569SLinus Torvalds return; 100596054569SLinus Torvalds } 10062d4a7167SIngo Molnar 1007cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 100892181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 100992181f19SNick Piggin return; 10102d4a7167SIngo Molnar 1011e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 10122d4a7167SIngo Molnar 1013a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 1014f672b49bSAndi Kleen if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 1015318759b4SEric W. Biederman struct task_struct *tsk = current; 101640e55394SEric W. Biederman unsigned lsb = 0; 101740e55394SEric W. Biederman 101840e55394SEric W. Biederman pr_err( 1019a6e04aa9SAndi Kleen "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 1020a6e04aa9SAndi Kleen tsk->comm, tsk->pid, address); 102140e55394SEric W. Biederman if (fault & VM_FAULT_HWPOISON_LARGE) 102240e55394SEric W. Biederman lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 102340e55394SEric W. Biederman if (fault & VM_FAULT_HWPOISON) 102440e55394SEric W. Biederman lsb = PAGE_SHIFT; 1025f8eac901SEric W. Biederman force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb); 102640e55394SEric W. Biederman return; 1027a6e04aa9SAndi Kleen } 1028a6e04aa9SAndi Kleen #endif 10292e1661d2SEric W. Biederman force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 103092181f19SNick Piggin } 103192181f19SNick Piggin 10323a13c4d7SJohannes Weiner static noinline void 10332d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 103425c102d8SEric W. Biederman unsigned long address, vm_fault_t fault) 103592181f19SNick Piggin { 10361067f030SRicardo Neri if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { 10374fc34901SAndy Lutomirski no_context(regs, error_code, address, 0, 0); 10383a13c4d7SJohannes Weiner return; 1039b80ef10eSKOSAKI Motohiro } 1040b80ef10eSKOSAKI Motohiro 10412d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 1042f8626854SAndrey Vagin /* Kernel mode? Handle exceptions or die: */ 10431067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 10444fc34901SAndy Lutomirski no_context(regs, error_code, address, 10454fc34901SAndy Lutomirski SIGSEGV, SEGV_MAPERR); 10463a13c4d7SJohannes Weiner return; 1047f8626854SAndrey Vagin } 1048f8626854SAndrey Vagin 1049c2d23f91SDavid Rientjes /* 1050c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 1051c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 1052c2d23f91SDavid Rientjes * oom-killed): 1053c2d23f91SDavid Rientjes */ 1054c2d23f91SDavid Rientjes pagefault_out_of_memory(); 10552d4a7167SIngo Molnar } else { 1056f672b49bSAndi Kleen if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 1057f672b49bSAndi Kleen VM_FAULT_HWPOISON_LARGE)) 105827274f73SEric W. Biederman do_sigbus(regs, error_code, address, fault); 105933692f27SLinus Torvalds else if (fault & VM_FAULT_SIGSEGV) 1060768fd9c6SEric W. Biederman bad_area_nosemaphore(regs, error_code, address); 106192181f19SNick Piggin else 106292181f19SNick Piggin BUG(); 106392181f19SNick Piggin } 10642d4a7167SIngo Molnar } 106592181f19SNick Piggin 10668fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte) 1067d8b57bb7SThomas Gleixner { 10681067f030SRicardo Neri if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) 1069d8b57bb7SThomas Gleixner return 0; 10702d4a7167SIngo Molnar 10711067f030SRicardo Neri if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) 1072d8b57bb7SThomas Gleixner return 0; 1073d8b57bb7SThomas Gleixner 1074d8b57bb7SThomas Gleixner return 1; 1075d8b57bb7SThomas Gleixner } 1076d8b57bb7SThomas Gleixner 1077c61e211dSHarvey Harrison /* 10782d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 10792d4a7167SIngo Molnar * 10802d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 10812d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 10822d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 10832d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 10842d4a7167SIngo Molnar * on other processors. 10852d4a7167SIngo Molnar * 108631668511SDavid Vrabel * Spurious faults may only occur if the TLB contains an entry with 108731668511SDavid Vrabel * fewer permission than the page table entry. Non-present (P = 0) 108831668511SDavid Vrabel * and reserved bit (R = 1) faults are never spurious. 108931668511SDavid Vrabel * 10905b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 10915b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 109231668511SDavid Vrabel * 109331668511SDavid Vrabel * Returns non-zero if a spurious fault was handled, zero otherwise. 109431668511SDavid Vrabel * 109531668511SDavid Vrabel * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 109631668511SDavid Vrabel * (Optional Invalidation). 10975b727a3bSJeremy Fitzhardinge */ 10989326638cSMasami Hiramatsu static noinline int 10998fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address) 11005b727a3bSJeremy Fitzhardinge { 11015b727a3bSJeremy Fitzhardinge pgd_t *pgd; 1102e0c4f675SKirill A. Shutemov p4d_t *p4d; 11035b727a3bSJeremy Fitzhardinge pud_t *pud; 11045b727a3bSJeremy Fitzhardinge pmd_t *pmd; 11055b727a3bSJeremy Fitzhardinge pte_t *pte; 11063c3e5694SSteven Rostedt int ret; 11075b727a3bSJeremy Fitzhardinge 110831668511SDavid Vrabel /* 110931668511SDavid Vrabel * Only writes to RO or instruction fetches from NX may cause 111031668511SDavid Vrabel * spurious faults. 111131668511SDavid Vrabel * 111231668511SDavid Vrabel * These could be from user or supervisor accesses but the TLB 111331668511SDavid Vrabel * is only lazily flushed after a kernel mapping protection 111431668511SDavid Vrabel * change, so user accesses are not expected to cause spurious 111531668511SDavid Vrabel * faults. 111631668511SDavid Vrabel */ 11171067f030SRicardo Neri if (error_code != (X86_PF_WRITE | X86_PF_PROT) && 11181067f030SRicardo Neri error_code != (X86_PF_INSTR | X86_PF_PROT)) 11195b727a3bSJeremy Fitzhardinge return 0; 11205b727a3bSJeremy Fitzhardinge 11215b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 11225b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 11235b727a3bSJeremy Fitzhardinge return 0; 11245b727a3bSJeremy Fitzhardinge 1125e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 1126e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d)) 1127e0c4f675SKirill A. Shutemov return 0; 1128e0c4f675SKirill A. Shutemov 1129e0c4f675SKirill A. Shutemov if (p4d_large(*p4d)) 11308fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) p4d); 1131e0c4f675SKirill A. Shutemov 1132e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 11335b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 11345b727a3bSJeremy Fitzhardinge return 0; 11355b727a3bSJeremy Fitzhardinge 1136d8b57bb7SThomas Gleixner if (pud_large(*pud)) 11378fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) pud); 1138d8b57bb7SThomas Gleixner 11395b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 11405b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 11415b727a3bSJeremy Fitzhardinge return 0; 11425b727a3bSJeremy Fitzhardinge 1143d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 11448fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) pmd); 1145d8b57bb7SThomas Gleixner 11465b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 1147954f8571SAndrea Arcangeli if (!pte_present(*pte)) 11485b727a3bSJeremy Fitzhardinge return 0; 11495b727a3bSJeremy Fitzhardinge 11508fed6200SDave Hansen ret = spurious_kernel_fault_check(error_code, pte); 11513c3e5694SSteven Rostedt if (!ret) 11523c3e5694SSteven Rostedt return 0; 11533c3e5694SSteven Rostedt 11543c3e5694SSteven Rostedt /* 11552d4a7167SIngo Molnar * Make sure we have permissions in PMD. 11562d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 11573c3e5694SSteven Rostedt */ 11588fed6200SDave Hansen ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd); 11593c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 11602d4a7167SIngo Molnar 11613c3e5694SSteven Rostedt return ret; 11625b727a3bSJeremy Fitzhardinge } 11638fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault); 11645b727a3bSJeremy Fitzhardinge 1165c61e211dSHarvey Harrison int show_unhandled_signals = 1; 1166c61e211dSHarvey Harrison 11672d4a7167SIngo Molnar static inline int 116868da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma) 116992181f19SNick Piggin { 117007f146f5SDave Hansen /* This is only called for the current mm, so: */ 117107f146f5SDave Hansen bool foreign = false; 1172e8c6226dSDave Hansen 1173e8c6226dSDave Hansen /* 1174e8c6226dSDave Hansen * Read or write was blocked by protection keys. This is 1175e8c6226dSDave Hansen * always an unconditional error and can never result in 1176e8c6226dSDave Hansen * a follow-up action to resolve the fault, like a COW. 1177e8c6226dSDave Hansen */ 11781067f030SRicardo Neri if (error_code & X86_PF_PK) 1179e8c6226dSDave Hansen return 1; 1180e8c6226dSDave Hansen 118133a709b2SDave Hansen /* 118207f146f5SDave Hansen * Make sure to check the VMA so that we do not perform 11831067f030SRicardo Neri * faults just to hit a X86_PF_PK as soon as we fill in a 118407f146f5SDave Hansen * page. 118507f146f5SDave Hansen */ 11861067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 11871067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 118807f146f5SDave Hansen return 1; 118933a709b2SDave Hansen 11901067f030SRicardo Neri if (error_code & X86_PF_WRITE) { 11912d4a7167SIngo Molnar /* write, present and write, not present: */ 119292181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 119392181f19SNick Piggin return 1; 11942d4a7167SIngo Molnar return 0; 11952d4a7167SIngo Molnar } 11962d4a7167SIngo Molnar 11972d4a7167SIngo Molnar /* read, present: */ 11981067f030SRicardo Neri if (unlikely(error_code & X86_PF_PROT)) 119992181f19SNick Piggin return 1; 12002d4a7167SIngo Molnar 12012d4a7167SIngo Molnar /* read, not present: */ 120292181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 120392181f19SNick Piggin return 1; 120492181f19SNick Piggin 120592181f19SNick Piggin return 0; 120692181f19SNick Piggin } 120792181f19SNick Piggin 12080973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 12090973a06cSHiroshi Shimamoto { 12103ae0ad92SDave Hansen /* 12113ae0ad92SDave Hansen * On 64-bit systems, the vsyscall page is at an address above 12123ae0ad92SDave Hansen * TASK_SIZE_MAX, but is not considered part of the kernel 12133ae0ad92SDave Hansen * address space. 12143ae0ad92SDave Hansen */ 12153ae0ad92SDave Hansen if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address)) 12163ae0ad92SDave Hansen return false; 12173ae0ad92SDave Hansen 1218d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 12190973a06cSHiroshi Shimamoto } 12200973a06cSHiroshi Shimamoto 1221c61e211dSHarvey Harrison /* 12228fed6200SDave Hansen * Called for all faults where 'address' is part of the kernel address 12238fed6200SDave Hansen * space. Might get called for faults that originate from *code* that 12248fed6200SDave Hansen * ran in userspace or the kernel. 1225c61e211dSHarvey Harrison */ 12268fed6200SDave Hansen static void 12278fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, 12280ac09f9fSJiri Olsa unsigned long address) 1229c61e211dSHarvey Harrison { 12308fed6200SDave Hansen /* 1231367e3f1dSDave Hansen * Protection keys exceptions only happen on user pages. We 1232367e3f1dSDave Hansen * have no user pages in the kernel portion of the address 1233367e3f1dSDave Hansen * space, so do not expect them here. 1234367e3f1dSDave Hansen */ 1235367e3f1dSDave Hansen WARN_ON_ONCE(hw_error_code & X86_PF_PK); 1236367e3f1dSDave Hansen 1237367e3f1dSDave Hansen /* 12388fed6200SDave Hansen * We can fault-in kernel-space virtual memory on-demand. The 12398fed6200SDave Hansen * 'reference' page table is init_mm.pgd. 12408fed6200SDave Hansen * 12418fed6200SDave Hansen * NOTE! We MUST NOT take any locks for this case. We may 12428fed6200SDave Hansen * be in an interrupt or a critical region, and should 12438fed6200SDave Hansen * only copy the information from the master page table, 12448fed6200SDave Hansen * nothing more. 12458fed6200SDave Hansen * 12468fed6200SDave Hansen * Before doing this on-demand faulting, ensure that the 12478fed6200SDave Hansen * fault is not any of the following: 12488fed6200SDave Hansen * 1. A fault on a PTE with a reserved bit set. 12498fed6200SDave Hansen * 2. A fault caused by a user-mode access. (Do not demand- 12508fed6200SDave Hansen * fault kernel memory due to user-mode accesses). 12518fed6200SDave Hansen * 3. A fault caused by a page-level protection violation. 12528fed6200SDave Hansen * (A demand fault would be on a non-present page which 12538fed6200SDave Hansen * would have X86_PF_PROT==0). 12548fed6200SDave Hansen */ 12558fed6200SDave Hansen if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { 12568fed6200SDave Hansen if (vmalloc_fault(address) >= 0) 12578fed6200SDave Hansen return; 12588fed6200SDave Hansen } 12598fed6200SDave Hansen 12608fed6200SDave Hansen /* Was the fault spurious, caused by lazy TLB invalidation? */ 12618fed6200SDave Hansen if (spurious_kernel_fault(hw_error_code, address)) 12628fed6200SDave Hansen return; 12638fed6200SDave Hansen 12648fed6200SDave Hansen /* kprobes don't want to hook the spurious faults: */ 1265b98cca44SAnshuman Khandual if (kprobe_page_fault(regs, X86_TRAP_PF)) 12668fed6200SDave Hansen return; 12678fed6200SDave Hansen 12688fed6200SDave Hansen /* 12698fed6200SDave Hansen * Note, despite being a "bad area", there are quite a few 12708fed6200SDave Hansen * acceptable reasons to get here, such as erratum fixups 12718fed6200SDave Hansen * and handling kernel code that can fault, like get_user(). 12728fed6200SDave Hansen * 12738fed6200SDave Hansen * Don't take the mm semaphore here. If we fixup a prefetch 12748fed6200SDave Hansen * fault we could otherwise deadlock: 12758fed6200SDave Hansen */ 1276ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 12778fed6200SDave Hansen } 12788fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault); 12798fed6200SDave Hansen 1280aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */ 1281aa37c51bSDave Hansen static inline 1282aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs, 1283aa37c51bSDave Hansen unsigned long hw_error_code, 1284c61e211dSHarvey Harrison unsigned long address) 1285c61e211dSHarvey Harrison { 1286c61e211dSHarvey Harrison struct vm_area_struct *vma; 1287c61e211dSHarvey Harrison struct task_struct *tsk; 12882d4a7167SIngo Molnar struct mm_struct *mm; 128950a7ca3cSSouptick Joarder vm_fault_t fault, major = 0; 1290759496baSJohannes Weiner unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1291c61e211dSHarvey Harrison 1292c61e211dSHarvey Harrison tsk = current; 1293c61e211dSHarvey Harrison mm = tsk->mm; 12942d4a7167SIngo Molnar 12952d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1296b98cca44SAnshuman Khandual if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF))) 12979be260a6SMasami Hiramatsu return; 1298e00b12e6SPeter Zijlstra 12995b0c2cacSDave Hansen /* 13005b0c2cacSDave Hansen * Reserved bits are never expected to be set on 13015b0c2cacSDave Hansen * entries in the user portion of the page tables. 13025b0c2cacSDave Hansen */ 1303164477c2SDave Hansen if (unlikely(hw_error_code & X86_PF_RSVD)) 1304164477c2SDave Hansen pgtable_bad(regs, hw_error_code, address); 1305e00b12e6SPeter Zijlstra 13065b0c2cacSDave Hansen /* 1307e50928d7SAndy Lutomirski * If SMAP is on, check for invalid kernel (supervisor) access to user 1308e50928d7SAndy Lutomirski * pages in the user address space. The odd case here is WRUSS, 1309e50928d7SAndy Lutomirski * which, according to the preliminary documentation, does not respect 1310e50928d7SAndy Lutomirski * SMAP and will have the USER bit set so, in all cases, SMAP 1311e50928d7SAndy Lutomirski * enforcement appears to be consistent with the USER bit. 13125b0c2cacSDave Hansen */ 1313a15781b5SAndy Lutomirski if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) && 1314a15781b5SAndy Lutomirski !(hw_error_code & X86_PF_USER) && 1315e50928d7SAndy Lutomirski !(regs->flags & X86_EFLAGS_AC))) 1316a15781b5SAndy Lutomirski { 1317ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 1318e00b12e6SPeter Zijlstra return; 1319e00b12e6SPeter Zijlstra } 1320e00b12e6SPeter Zijlstra 1321e00b12e6SPeter Zijlstra /* 1322e00b12e6SPeter Zijlstra * If we're in an interrupt, have no user context or are running 132370ffdb93SDavid Hildenbrand * in a region with pagefaults disabled then we must not take the fault 1324e00b12e6SPeter Zijlstra */ 132570ffdb93SDavid Hildenbrand if (unlikely(faulthandler_disabled() || !mm)) { 1326ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 1327e00b12e6SPeter Zijlstra return; 1328e00b12e6SPeter Zijlstra } 1329e00b12e6SPeter Zijlstra 1330c61e211dSHarvey Harrison /* 1331891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1332891cffbdSLinus Torvalds * vmalloc fault has been handled. 1333891cffbdSLinus Torvalds * 1334891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 13352d4a7167SIngo Molnar * potential system fault or CPU buglet: 1336c61e211dSHarvey Harrison */ 1337f39b6f0eSAndy Lutomirski if (user_mode(regs)) { 1338891cffbdSLinus Torvalds local_irq_enable(); 1339759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 13402d4a7167SIngo Molnar } else { 13412d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1342c61e211dSHarvey Harrison local_irq_enable(); 13432d4a7167SIngo Molnar } 1344c61e211dSHarvey Harrison 1345a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 13467dd1fcc2SPeter Zijlstra 13470ed32f1aSAndy Lutomirski if (hw_error_code & X86_PF_WRITE) 1348759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 13490ed32f1aSAndy Lutomirski if (hw_error_code & X86_PF_INSTR) 1350d61172b4SDave Hansen flags |= FAULT_FLAG_INSTRUCTION; 1351759496baSJohannes Weiner 13523ae0ad92SDave Hansen #ifdef CONFIG_X86_64 13533a1dfe6eSIngo Molnar /* 1354918ce325SAndy Lutomirski * Faults in the vsyscall page might need emulation. The 1355918ce325SAndy Lutomirski * vsyscall page is at a high address (>PAGE_OFFSET), but is 1356918ce325SAndy Lutomirski * considered to be part of the user address space. 1357c61e211dSHarvey Harrison * 13583ae0ad92SDave Hansen * The vsyscall page does not have a "real" VMA, so do this 13593ae0ad92SDave Hansen * emulation before we go searching for VMAs. 1360e0a446ceSAndy Lutomirski * 1361e0a446ceSAndy Lutomirski * PKRU never rejects instruction fetches, so we don't need 1362e0a446ceSAndy Lutomirski * to consider the PF_PK bit. 13633ae0ad92SDave Hansen */ 1364918ce325SAndy Lutomirski if (is_vsyscall_vaddr(address)) { 1365918ce325SAndy Lutomirski if (emulate_vsyscall(hw_error_code, regs, address)) 13663ae0ad92SDave Hansen return; 13673ae0ad92SDave Hansen } 13683ae0ad92SDave Hansen #endif 13693ae0ad92SDave Hansen 1370c61e211dSHarvey Harrison /* 137188259744SDave Hansen * Kernel-mode access to the user address space should only occur 137288259744SDave Hansen * on well-defined single instructions listed in the exception 137388259744SDave Hansen * tables. But, an erroneous kernel fault occurring outside one of 137488259744SDave Hansen * those areas which also holds mmap_sem might deadlock attempting 137588259744SDave Hansen * to validate the fault against the address space. 1376c61e211dSHarvey Harrison * 137788259744SDave Hansen * Only do the expensive exception table search when we might be at 137888259744SDave Hansen * risk of a deadlock. This happens if we 137988259744SDave Hansen * 1. Failed to acquire mmap_sem, and 13806344be60SAndy Lutomirski * 2. The access did not originate in userspace. 1381c61e211dSHarvey Harrison */ 138292181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 13836344be60SAndy Lutomirski if (!user_mode(regs) && !search_exception_tables(regs->ip)) { 138488259744SDave Hansen /* 138588259744SDave Hansen * Fault from code in kernel from 138688259744SDave Hansen * which we do not expect faults. 138788259744SDave Hansen */ 13880ed32f1aSAndy Lutomirski bad_area_nosemaphore(regs, hw_error_code, address); 138992181f19SNick Piggin return; 139092181f19SNick Piggin } 1391d065bd81SMichel Lespinasse retry: 1392c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 139301006074SPeter Zijlstra } else { 139401006074SPeter Zijlstra /* 13952d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 13962d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 13972d4a7167SIngo Molnar * down_read(): 139801006074SPeter Zijlstra */ 139901006074SPeter Zijlstra might_sleep(); 1400c61e211dSHarvey Harrison } 1401c61e211dSHarvey Harrison 1402c61e211dSHarvey Harrison vma = find_vma(mm, address); 140392181f19SNick Piggin if (unlikely(!vma)) { 14040ed32f1aSAndy Lutomirski bad_area(regs, hw_error_code, address); 140592181f19SNick Piggin return; 140692181f19SNick Piggin } 140792181f19SNick Piggin if (likely(vma->vm_start <= address)) 1408c61e211dSHarvey Harrison goto good_area; 140992181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 14100ed32f1aSAndy Lutomirski bad_area(regs, hw_error_code, address); 141192181f19SNick Piggin return; 141292181f19SNick Piggin } 141392181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 14140ed32f1aSAndy Lutomirski bad_area(regs, hw_error_code, address); 141592181f19SNick Piggin return; 141692181f19SNick Piggin } 141792181f19SNick Piggin 1418c61e211dSHarvey Harrison /* 1419c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1420c61e211dSHarvey Harrison * we can handle it.. 1421c61e211dSHarvey Harrison */ 1422c61e211dSHarvey Harrison good_area: 14230ed32f1aSAndy Lutomirski if (unlikely(access_error(hw_error_code, vma))) { 14240ed32f1aSAndy Lutomirski bad_area_access_error(regs, hw_error_code, address, vma); 142592181f19SNick Piggin return; 1426c61e211dSHarvey Harrison } 1427c61e211dSHarvey Harrison 1428c61e211dSHarvey Harrison /* 1429c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1430c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 14319a95f3cfSPaul Cassella * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 14329a95f3cfSPaul Cassella * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1433cb0631fdSVlastimil Babka * 1434cb0631fdSVlastimil Babka * Note that handle_userfault() may also release and reacquire mmap_sem 1435cb0631fdSVlastimil Babka * (and not return with VM_FAULT_RETRY), when returning to userland to 1436cb0631fdSVlastimil Babka * repeat the page fault later with a VM_FAULT_NOPAGE retval 1437cb0631fdSVlastimil Babka * (potentially after handling any pending signal during the return to 1438cb0631fdSVlastimil Babka * userland). The return to userland is identified whenever 1439cb0631fdSVlastimil Babka * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. 1440c61e211dSHarvey Harrison */ 1441dcddffd4SKirill A. Shutemov fault = handle_mm_fault(vma, address, flags); 144226178ec1SLinus Torvalds major |= fault & VM_FAULT_MAJOR; 14432d4a7167SIngo Molnar 14443a13c4d7SJohannes Weiner /* 144526178ec1SLinus Torvalds * If we need to retry the mmap_sem has already been released, 144626178ec1SLinus Torvalds * and if there is a fatal signal pending there is no guarantee 144726178ec1SLinus Torvalds * that we made any progress. Handle this case first. 14483a13c4d7SJohannes Weiner */ 144926178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_RETRY)) { 145026178ec1SLinus Torvalds /* Retry at most once */ 145126178ec1SLinus Torvalds if (flags & FAULT_FLAG_ALLOW_RETRY) { 145226178ec1SLinus Torvalds flags &= ~FAULT_FLAG_ALLOW_RETRY; 145326178ec1SLinus Torvalds flags |= FAULT_FLAG_TRIED; 145426178ec1SLinus Torvalds if (!fatal_signal_pending(tsk)) 145526178ec1SLinus Torvalds goto retry; 145626178ec1SLinus Torvalds } 145726178ec1SLinus Torvalds 145826178ec1SLinus Torvalds /* User mode? Just return to handle the fatal exception */ 1459cf3c0a15SLinus Torvalds if (flags & FAULT_FLAG_USER) 14603a13c4d7SJohannes Weiner return; 14613a13c4d7SJohannes Weiner 146226178ec1SLinus Torvalds /* Not returning to user mode? Handle exceptions or die: */ 14630ed32f1aSAndy Lutomirski no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR); 146426178ec1SLinus Torvalds return; 146526178ec1SLinus Torvalds } 146626178ec1SLinus Torvalds 14677fb08ecaSLinus Torvalds up_read(&mm->mmap_sem); 146826178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_ERROR)) { 14690ed32f1aSAndy Lutomirski mm_fault_error(regs, hw_error_code, address, fault); 147037b23e05SKOSAKI Motohiro return; 147137b23e05SKOSAKI Motohiro } 147237b23e05SKOSAKI Motohiro 147337b23e05SKOSAKI Motohiro /* 147426178ec1SLinus Torvalds * Major/minor page fault accounting. If any of the events 147526178ec1SLinus Torvalds * returned VM_FAULT_MAJOR, we account it as a major fault. 1476d065bd81SMichel Lespinasse */ 147726178ec1SLinus Torvalds if (major) { 1478c61e211dSHarvey Harrison tsk->maj_flt++; 147926178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 1480ac17dc8eSPeter Zijlstra } else { 1481c61e211dSHarvey Harrison tsk->min_flt++; 148226178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1483d065bd81SMichel Lespinasse } 1484c61e211dSHarvey Harrison 14858c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 1486c61e211dSHarvey Harrison } 1487aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault); 1488aa37c51bSDave Hansen 1489a0d14b89SPeter Zijlstra static __always_inline void 1490a0d14b89SPeter Zijlstra trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code, 1491a0d14b89SPeter Zijlstra unsigned long address) 1492d34603b0SSeiji Aguchi { 1493a0d14b89SPeter Zijlstra if (!trace_pagefault_enabled()) 1494a0d14b89SPeter Zijlstra return; 1495a0d14b89SPeter Zijlstra 1496d34603b0SSeiji Aguchi if (user_mode(regs)) 1497d4078e23SPeter Zijlstra trace_page_fault_user(address, regs, error_code); 1498d34603b0SSeiji Aguchi else 1499d4078e23SPeter Zijlstra trace_page_fault_kernel(address, regs, error_code); 1500d34603b0SSeiji Aguchi } 1501d34603b0SSeiji Aguchi 1502a0d14b89SPeter Zijlstra dotraplinkage void 1503*ee6352b2SFrederic Weisbecker do_page_fault(struct pt_regs *regs, unsigned long hw_error_code, 1504*ee6352b2SFrederic Weisbecker unsigned long address) 150511a7ffb0SThomas Gleixner { 1506*ee6352b2SFrederic Weisbecker prefetchw(¤t->mm->mmap_sem); 1507*ee6352b2SFrederic Weisbecker trace_page_fault_entries(regs, hw_error_code, address); 150825c74b10SSeiji Aguchi 1509*ee6352b2SFrederic Weisbecker if (unlikely(kmmio_fault(regs, address))) 1510*ee6352b2SFrederic Weisbecker return; 1511*ee6352b2SFrederic Weisbecker 1512*ee6352b2SFrederic Weisbecker /* Was the fault on kernel-controlled part of the address space? */ 1513*ee6352b2SFrederic Weisbecker if (unlikely(fault_in_kernel_space(address))) 1514*ee6352b2SFrederic Weisbecker do_kern_addr_fault(regs, hw_error_code, address); 1515*ee6352b2SFrederic Weisbecker else 1516*ee6352b2SFrederic Weisbecker do_user_addr_fault(regs, hw_error_code, address); 151725c74b10SSeiji Aguchi } 151811a7ffb0SThomas Gleixner NOKPROBE_SYMBOL(do_page_fault); 1519