1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2c61e211dSHarvey Harrison /* 3c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 4c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 5f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 6c61e211dSHarvey Harrison */ 7a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 868db0cf1SIngo Molnar #include <linux/sched/task_stack.h> /* task_stack_*(), ... */ 9a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 104cdf8dbeSLinus Torvalds #include <linux/extable.h> /* search_exception_tables */ 1157c8a661SMike Rapoport #include <linux/memblock.h> /* max_low_pfn */ 129326638cSMasami Hiramatsu #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ 13a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 14cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 15f672b49bSAndi Kleen #include <linux/hugetlb.h> /* hstate_index_to_shift */ 16268bb0ceSLinus Torvalds #include <linux/prefetch.h> /* prefetchw */ 1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h> /* exception_enter(), ... */ 1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h> /* faulthandler_disabled() */ 193425d934SSai Praneeth #include <linux/efi.h> /* efi_recover_from_page_fault()*/ 2050a7ca3cSSouptick Joarder #include <linux/mm_types.h> 21c61e211dSHarvey Harrison 22019132ffSDave Hansen #include <asm/cpufeature.h> /* boot_cpu_has, ... */ 23a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 24a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 25f40c3300SAndy Lutomirski #include <asm/fixmap.h> /* VSYSCALL_ADDR */ 26f40c3300SAndy Lutomirski #include <asm/vsyscall.h> /* emulate_vsyscall */ 27ba3e127eSBrian Gerst #include <asm/vm86.h> /* struct vm86 */ 28019132ffSDave Hansen #include <asm/mmu_context.h> /* vma_pkey() */ 293425d934SSai Praneeth #include <asm/efi.h> /* efi_recover_from_page_fault()*/ 30a1a371c4SAndy Lutomirski #include <asm/desc.h> /* store_idt(), ... */ 31d876b673SThomas Gleixner #include <asm/cpu_entry_area.h> /* exception stack */ 32186525bdSIngo Molnar #include <asm/pgtable_areas.h> /* VMALLOC_START, ... */ 33ef68017eSAndy Lutomirski #include <asm/kvm_para.h> /* kvm_handle_async_pf */ 34c61e211dSHarvey Harrison 35d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS 36d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h> 37d34603b0SSeiji Aguchi 38c61e211dSHarvey Harrison /* 39b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 40b319eed0SIngo Molnar * handled by mmiotrace: 41b814d41fSIngo Molnar */ 429326638cSMasami Hiramatsu static nokprobe_inline int 4362c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr) 4486069782SPekka Paalanen { 450fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 460fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 470fd0e3daSPekka Paalanen return -1; 480fd0e3daSPekka Paalanen return 0; 4986069782SPekka Paalanen } 5086069782SPekka Paalanen 51c61e211dSHarvey Harrison /* 522d4a7167SIngo Molnar * Prefetch quirks: 532d4a7167SIngo Molnar * 542d4a7167SIngo Molnar * 32-bit mode: 552d4a7167SIngo Molnar * 56c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 57c61e211dSHarvey Harrison * Check that here and ignore it. 58c61e211dSHarvey Harrison * 592d4a7167SIngo Molnar * 64-bit mode: 602d4a7167SIngo Molnar * 61c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 62c61e211dSHarvey Harrison * Check that here and ignore it. 63c61e211dSHarvey Harrison * 642d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 65c61e211dSHarvey Harrison */ 66107a0367SIngo Molnar static inline int 67107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 68107a0367SIngo Molnar unsigned char opcode, int *prefetch) 69c61e211dSHarvey Harrison { 70107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 71107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 72c61e211dSHarvey Harrison 73c61e211dSHarvey Harrison switch (instr_hi) { 74c61e211dSHarvey Harrison case 0x20: 75c61e211dSHarvey Harrison case 0x30: 76c61e211dSHarvey Harrison /* 77c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 78c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 79c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 80c61e211dSHarvey Harrison * X86_64 will never get here anyway 81c61e211dSHarvey Harrison */ 82107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 83c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 84c61e211dSHarvey Harrison case 0x40: 85c61e211dSHarvey Harrison /* 86c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 87c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 88c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 89c61e211dSHarvey Harrison * but for now it's good enough to assume that long 90c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 91c61e211dSHarvey Harrison */ 92318f5a2aSAndy Lutomirski return (!user_mode(regs) || user_64bit_mode(regs)); 93c61e211dSHarvey Harrison #endif 94c61e211dSHarvey Harrison case 0x60: 95c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 96107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 97c61e211dSHarvey Harrison case 0xF0: 98c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 99107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 100c61e211dSHarvey Harrison case 0x00: 101c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 102107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 103107a0367SIngo Molnar return 0; 104107a0367SIngo Molnar 105107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 106107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 107107a0367SIngo Molnar return 0; 108107a0367SIngo Molnar default: 109107a0367SIngo Molnar return 0; 110107a0367SIngo Molnar } 111107a0367SIngo Molnar } 112107a0367SIngo Molnar 113107a0367SIngo Molnar static int 114107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 115107a0367SIngo Molnar { 116107a0367SIngo Molnar unsigned char *max_instr; 117107a0367SIngo Molnar unsigned char *instr; 118107a0367SIngo Molnar int prefetch = 0; 119107a0367SIngo Molnar 120107a0367SIngo Molnar /* 121107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 122107a0367SIngo Molnar * do not ignore the fault: 123107a0367SIngo Molnar */ 1241067f030SRicardo Neri if (error_code & X86_PF_INSTR) 125107a0367SIngo Molnar return 0; 126107a0367SIngo Molnar 127107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 128107a0367SIngo Molnar max_instr = instr + 15; 129107a0367SIngo Molnar 130d31bf07fSAndy Lutomirski if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) 131107a0367SIngo Molnar return 0; 132107a0367SIngo Molnar 133107a0367SIngo Molnar while (instr < max_instr) { 134107a0367SIngo Molnar unsigned char opcode; 135c61e211dSHarvey Harrison 136c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 137c61e211dSHarvey Harrison break; 138107a0367SIngo Molnar 139107a0367SIngo Molnar instr++; 140107a0367SIngo Molnar 141107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 142c61e211dSHarvey Harrison break; 143c61e211dSHarvey Harrison } 144c61e211dSHarvey Harrison return prefetch; 145c61e211dSHarvey Harrison } 146c61e211dSHarvey Harrison 147f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 148f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 1492d4a7167SIngo Molnar 150f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 151f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 152f2f13a85SIngo Molnar { 153f2f13a85SIngo Molnar unsigned index = pgd_index(address); 154f2f13a85SIngo Molnar pgd_t *pgd_k; 155e0c4f675SKirill A. Shutemov p4d_t *p4d, *p4d_k; 156f2f13a85SIngo Molnar pud_t *pud, *pud_k; 157f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 158f2f13a85SIngo Molnar 159f2f13a85SIngo Molnar pgd += index; 160f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 161f2f13a85SIngo Molnar 162f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 163f2f13a85SIngo Molnar return NULL; 164f2f13a85SIngo Molnar 165f2f13a85SIngo Molnar /* 166f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 167f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 168e0c4f675SKirill A. Shutemov * set_p4d/set_pud. 169f2f13a85SIngo Molnar */ 170e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 171e0c4f675SKirill A. Shutemov p4d_k = p4d_offset(pgd_k, address); 172e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d_k)) 173e0c4f675SKirill A. Shutemov return NULL; 174e0c4f675SKirill A. Shutemov 175e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 176e0c4f675SKirill A. Shutemov pud_k = pud_offset(p4d_k, address); 177f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 178f2f13a85SIngo Molnar return NULL; 179f2f13a85SIngo Molnar 180f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 181f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 1828e998fc2SJoerg Roedel 1838e998fc2SJoerg Roedel if (pmd_present(*pmd) != pmd_present(*pmd_k)) 1848e998fc2SJoerg Roedel set_pmd(pmd, *pmd_k); 1858e998fc2SJoerg Roedel 186f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 187f2f13a85SIngo Molnar return NULL; 188b8bcfe99SJeremy Fitzhardinge else 18951b75b5bSJoerg Roedel BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k)); 190f2f13a85SIngo Molnar 191f2f13a85SIngo Molnar return pmd_k; 192f2f13a85SIngo Molnar } 193f2f13a85SIngo Molnar 19486cf69f1SJoerg Roedel void arch_sync_kernel_mappings(unsigned long start, unsigned long end) 195f2f13a85SIngo Molnar { 19686cf69f1SJoerg Roedel unsigned long addr; 197f2f13a85SIngo Molnar 19886cf69f1SJoerg Roedel for (addr = start & PMD_MASK; 19986cf69f1SJoerg Roedel addr >= TASK_SIZE_MAX && addr < VMALLOC_END; 20086cf69f1SJoerg Roedel addr += PMD_SIZE) { 201f2f13a85SIngo Molnar struct page *page; 202f2f13a85SIngo Molnar 203a79e53d8SAndrea Arcangeli spin_lock(&pgd_lock); 204f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 205617d34d9SJeremy Fitzhardinge spinlock_t *pgt_lock; 206617d34d9SJeremy Fitzhardinge 207a79e53d8SAndrea Arcangeli /* the pgt_lock only for Xen */ 208617d34d9SJeremy Fitzhardinge pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 209617d34d9SJeremy Fitzhardinge 210617d34d9SJeremy Fitzhardinge spin_lock(pgt_lock); 21186cf69f1SJoerg Roedel vmalloc_sync_one(page_address(page), addr); 212617d34d9SJeremy Fitzhardinge spin_unlock(pgt_lock); 213f2f13a85SIngo Molnar } 214a79e53d8SAndrea Arcangeli spin_unlock(&pgd_lock); 215f2f13a85SIngo Molnar } 216f2f13a85SIngo Molnar } 217f2f13a85SIngo Molnar 218f2f13a85SIngo Molnar /* 219f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 220f2f13a85SIngo Molnar */ 221f2f13a85SIngo Molnar static inline void 222f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 223f2f13a85SIngo Molnar struct task_struct *tsk) 224f2f13a85SIngo Molnar { 2259fda6a06SBrian Gerst #ifdef CONFIG_VM86 226f2f13a85SIngo Molnar unsigned long bit; 227f2f13a85SIngo Molnar 2289fda6a06SBrian Gerst if (!v8086_mode(regs) || !tsk->thread.vm86) 229f2f13a85SIngo Molnar return; 230f2f13a85SIngo Molnar 231f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 232f2f13a85SIngo Molnar if (bit < 32) 2339fda6a06SBrian Gerst tsk->thread.vm86->screen_bitmap |= 1 << bit; 2349fda6a06SBrian Gerst #endif 235f2f13a85SIngo Molnar } 236c61e211dSHarvey Harrison 237087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 238087975b0SAkinobu Mita { 239087975b0SAkinobu Mita return pfn < max_low_pfn; 240087975b0SAkinobu Mita } 241087975b0SAkinobu Mita 242cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 243c61e211dSHarvey Harrison { 2446c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 245087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 246e0c4f675SKirill A. Shutemov p4d_t *p4d; 247e0c4f675SKirill A. Shutemov pud_t *pud; 248087975b0SAkinobu Mita pmd_t *pmd; 249087975b0SAkinobu Mita pte_t *pte; 2502d4a7167SIngo Molnar 251c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 25239e48d9bSJan Beulich pr_info("*pdpt = %016Lx ", pgd_val(*pgd)); 253087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 254087975b0SAkinobu Mita goto out; 25539e48d9bSJan Beulich #define pr_pde pr_cont 25639e48d9bSJan Beulich #else 25739e48d9bSJan Beulich #define pr_pde pr_info 258c61e211dSHarvey Harrison #endif 259e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 260e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 261e0c4f675SKirill A. Shutemov pmd = pmd_offset(pud, address); 26239e48d9bSJan Beulich pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 26339e48d9bSJan Beulich #undef pr_pde 264c61e211dSHarvey Harrison 265c61e211dSHarvey Harrison /* 266c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 267c61e211dSHarvey Harrison * case if the page table is located in highmem. 268c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 2692d4a7167SIngo Molnar * it's allocated already: 270c61e211dSHarvey Harrison */ 271087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 272087975b0SAkinobu Mita goto out; 2732d4a7167SIngo Molnar 274087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 27539e48d9bSJan Beulich pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 276087975b0SAkinobu Mita out: 27739e48d9bSJan Beulich pr_cont("\n"); 278f2f13a85SIngo Molnar } 279f2f13a85SIngo Molnar 280f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 281f2f13a85SIngo Molnar 282e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD 283f2f13a85SIngo Molnar static const char errata93_warning[] = 284ad361c98SJoe Perches KERN_ERR 285ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 286ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 287ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 288ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 289e05139f2SJan Beulich #endif 290f2f13a85SIngo Molnar 291f2f13a85SIngo Molnar /* 292f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 293f2f13a85SIngo Molnar */ 294f2f13a85SIngo Molnar static inline void 295f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 296f2f13a85SIngo Molnar struct task_struct *tsk) 297f2f13a85SIngo Molnar { 298f2f13a85SIngo Molnar } 299f2f13a85SIngo Molnar 300f2f13a85SIngo Molnar static int bad_address(void *p) 301f2f13a85SIngo Molnar { 302f2f13a85SIngo Molnar unsigned long dummy; 303f2f13a85SIngo Molnar 304f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 305f2f13a85SIngo Molnar } 306f2f13a85SIngo Molnar 307f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 308f2f13a85SIngo Molnar { 3096c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 310087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 311e0c4f675SKirill A. Shutemov p4d_t *p4d; 312c61e211dSHarvey Harrison pud_t *pud; 313c61e211dSHarvey Harrison pmd_t *pmd; 314c61e211dSHarvey Harrison pte_t *pte; 315c61e211dSHarvey Harrison 3162d4a7167SIngo Molnar if (bad_address(pgd)) 3172d4a7167SIngo Molnar goto bad; 3182d4a7167SIngo Molnar 31939e48d9bSJan Beulich pr_info("PGD %lx ", pgd_val(*pgd)); 3202d4a7167SIngo Molnar 3212d4a7167SIngo Molnar if (!pgd_present(*pgd)) 3222d4a7167SIngo Molnar goto out; 323c61e211dSHarvey Harrison 324e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 325e0c4f675SKirill A. Shutemov if (bad_address(p4d)) 326e0c4f675SKirill A. Shutemov goto bad; 327e0c4f675SKirill A. Shutemov 32839e48d9bSJan Beulich pr_cont("P4D %lx ", p4d_val(*p4d)); 329e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d) || p4d_large(*p4d)) 330e0c4f675SKirill A. Shutemov goto out; 331e0c4f675SKirill A. Shutemov 332e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 3332d4a7167SIngo Molnar if (bad_address(pud)) 3342d4a7167SIngo Molnar goto bad; 3352d4a7167SIngo Molnar 33639e48d9bSJan Beulich pr_cont("PUD %lx ", pud_val(*pud)); 337b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 3382d4a7167SIngo Molnar goto out; 339c61e211dSHarvey Harrison 340c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 3412d4a7167SIngo Molnar if (bad_address(pmd)) 3422d4a7167SIngo Molnar goto bad; 3432d4a7167SIngo Molnar 34439e48d9bSJan Beulich pr_cont("PMD %lx ", pmd_val(*pmd)); 3452d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 3462d4a7167SIngo Molnar goto out; 347c61e211dSHarvey Harrison 348c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 3492d4a7167SIngo Molnar if (bad_address(pte)) 3502d4a7167SIngo Molnar goto bad; 3512d4a7167SIngo Molnar 35239e48d9bSJan Beulich pr_cont("PTE %lx", pte_val(*pte)); 3532d4a7167SIngo Molnar out: 35439e48d9bSJan Beulich pr_cont("\n"); 355c61e211dSHarvey Harrison return; 356c61e211dSHarvey Harrison bad: 35739e48d9bSJan Beulich pr_info("BAD\n"); 358c61e211dSHarvey Harrison } 359c61e211dSHarvey Harrison 360f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 361c61e211dSHarvey Harrison 3622d4a7167SIngo Molnar /* 3632d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 3642d4a7167SIngo Molnar * 3652d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 3662d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 3672d4a7167SIngo Molnar * 3682d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 3692d4a7167SIngo Molnar * 3702d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 3712d4a7167SIngo Molnar * Try to work around it here. 3722d4a7167SIngo Molnar * 3732d4a7167SIngo Molnar * Note we only handle faults in kernel here. 3742d4a7167SIngo Molnar * Does nothing on 32-bit. 375c61e211dSHarvey Harrison */ 376c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 377c61e211dSHarvey Harrison { 378e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 379e05139f2SJan Beulich if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 380e05139f2SJan Beulich || boot_cpu_data.x86 != 0xf) 381e05139f2SJan Beulich return 0; 382e05139f2SJan Beulich 383c61e211dSHarvey Harrison if (address != regs->ip) 384c61e211dSHarvey Harrison return 0; 3852d4a7167SIngo Molnar 386c61e211dSHarvey Harrison if ((address >> 32) != 0) 387c61e211dSHarvey Harrison return 0; 3882d4a7167SIngo Molnar 389c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 390c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 391c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 392a454ab31SIngo Molnar printk_once(errata93_warning); 393c61e211dSHarvey Harrison regs->ip = address; 394c61e211dSHarvey Harrison return 1; 395c61e211dSHarvey Harrison } 396c61e211dSHarvey Harrison #endif 397c61e211dSHarvey Harrison return 0; 398c61e211dSHarvey Harrison } 399c61e211dSHarvey Harrison 400c61e211dSHarvey Harrison /* 4012d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 4022d4a7167SIngo Molnar * to illegal addresses >4GB. 4032d4a7167SIngo Molnar * 4042d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 4052d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 406c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 407c61e211dSHarvey Harrison */ 408c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 409c61e211dSHarvey Harrison { 410c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 4112d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 412c61e211dSHarvey Harrison return 1; 413c61e211dSHarvey Harrison #endif 414c61e211dSHarvey Harrison return 0; 415c61e211dSHarvey Harrison } 416c61e211dSHarvey Harrison 417c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 418c61e211dSHarvey Harrison { 419c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 420c61e211dSHarvey Harrison unsigned long nr; 4212d4a7167SIngo Molnar 422c61e211dSHarvey Harrison /* 4232d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 424c61e211dSHarvey Harrison */ 425e2604b49SBorislav Petkov if (boot_cpu_has_bug(X86_BUG_F00F)) { 426c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 427c61e211dSHarvey Harrison 428c61e211dSHarvey Harrison if (nr == 6) { 429c61e211dSHarvey Harrison do_invalid_op(regs, 0); 430c61e211dSHarvey Harrison return 1; 431c61e211dSHarvey Harrison } 432c61e211dSHarvey Harrison } 433c61e211dSHarvey Harrison #endif 434c61e211dSHarvey Harrison return 0; 435c61e211dSHarvey Harrison } 436c61e211dSHarvey Harrison 437a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) 438a1a371c4SAndy Lutomirski { 439a1a371c4SAndy Lutomirski u32 offset = (index >> 3) * sizeof(struct desc_struct); 440a1a371c4SAndy Lutomirski unsigned long addr; 441a1a371c4SAndy Lutomirski struct ldttss_desc desc; 442a1a371c4SAndy Lutomirski 443a1a371c4SAndy Lutomirski if (index == 0) { 444a1a371c4SAndy Lutomirski pr_alert("%s: NULL\n", name); 445a1a371c4SAndy Lutomirski return; 446a1a371c4SAndy Lutomirski } 447a1a371c4SAndy Lutomirski 448a1a371c4SAndy Lutomirski if (offset + sizeof(struct ldttss_desc) >= gdt->size) { 449a1a371c4SAndy Lutomirski pr_alert("%s: 0x%hx -- out of bounds\n", name, index); 450a1a371c4SAndy Lutomirski return; 451a1a371c4SAndy Lutomirski } 452a1a371c4SAndy Lutomirski 453a1a371c4SAndy Lutomirski if (probe_kernel_read(&desc, (void *)(gdt->address + offset), 454a1a371c4SAndy Lutomirski sizeof(struct ldttss_desc))) { 455a1a371c4SAndy Lutomirski pr_alert("%s: 0x%hx -- GDT entry is not readable\n", 456a1a371c4SAndy Lutomirski name, index); 457a1a371c4SAndy Lutomirski return; 458a1a371c4SAndy Lutomirski } 459a1a371c4SAndy Lutomirski 4605ccd3528SColin Ian King addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24); 461a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64 462a1a371c4SAndy Lutomirski addr |= ((u64)desc.base3 << 32); 463a1a371c4SAndy Lutomirski #endif 464a1a371c4SAndy Lutomirski pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n", 465a1a371c4SAndy Lutomirski name, index, addr, (desc.limit0 | (desc.limit1 << 16))); 466a1a371c4SAndy Lutomirski } 467a1a371c4SAndy Lutomirski 4682d4a7167SIngo Molnar static void 469a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) 470c61e211dSHarvey Harrison { 471c61e211dSHarvey Harrison if (!oops_may_print()) 472c61e211dSHarvey Harrison return; 473c61e211dSHarvey Harrison 4741067f030SRicardo Neri if (error_code & X86_PF_INSTR) { 47593809be8SHarvey Harrison unsigned int level; 476426e34ccSMatt Fleming pgd_t *pgd; 477426e34ccSMatt Fleming pte_t *pte; 4782d4a7167SIngo Molnar 4796c690ee1SAndy Lutomirski pgd = __va(read_cr3_pa()); 480426e34ccSMatt Fleming pgd += pgd_index(address); 481426e34ccSMatt Fleming 482426e34ccSMatt Fleming pte = lookup_address_in_pgd(pgd, address, &level); 483c61e211dSHarvey Harrison 4848f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 485d79d0d8aSDmitry Vyukov pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", 486d79d0d8aSDmitry Vyukov from_kuid(&init_user_ns, current_uid())); 487eff50c34SJiri Kosina if (pte && pte_present(*pte) && pte_exec(*pte) && 488eff50c34SJiri Kosina (pgd_flags(*pgd) & _PAGE_USER) && 4891e02ce4cSAndy Lutomirski (__read_cr4() & X86_CR4_SMEP)) 490d79d0d8aSDmitry Vyukov pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n", 491d79d0d8aSDmitry Vyukov from_kuid(&init_user_ns, current_uid())); 492c61e211dSHarvey Harrison } 493fd40d6e3SHarvey Harrison 494f28b11a2SSean Christopherson if (address < PAGE_SIZE && !user_mode(regs)) 495ea2f8d60SBorislav Petkov pr_alert("BUG: kernel NULL pointer dereference, address: %px\n", 496f28b11a2SSean Christopherson (void *)address); 497f28b11a2SSean Christopherson else 498ea2f8d60SBorislav Petkov pr_alert("BUG: unable to handle page fault for address: %px\n", 4994188f063SDmitry Vyukov (void *)address); 5002d4a7167SIngo Molnar 501ea2f8d60SBorislav Petkov pr_alert("#PF: %s %s in %s mode\n", 50218ea35c5SSean Christopherson (error_code & X86_PF_USER) ? "user" : "supervisor", 50318ea35c5SSean Christopherson (error_code & X86_PF_INSTR) ? "instruction fetch" : 50418ea35c5SSean Christopherson (error_code & X86_PF_WRITE) ? "write access" : 50518ea35c5SSean Christopherson "read access", 50618ea35c5SSean Christopherson user_mode(regs) ? "user" : "kernel"); 50718ea35c5SSean Christopherson pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code, 50818ea35c5SSean Christopherson !(error_code & X86_PF_PROT) ? "not-present page" : 50918ea35c5SSean Christopherson (error_code & X86_PF_RSVD) ? "reserved bit violation" : 51018ea35c5SSean Christopherson (error_code & X86_PF_PK) ? "protection keys violation" : 51118ea35c5SSean Christopherson "permissions violation"); 512a2aa52abSIngo Molnar 513a1a371c4SAndy Lutomirski if (!(error_code & X86_PF_USER) && user_mode(regs)) { 514a1a371c4SAndy Lutomirski struct desc_ptr idt, gdt; 515a1a371c4SAndy Lutomirski u16 ldtr, tr; 516a1a371c4SAndy Lutomirski 517a1a371c4SAndy Lutomirski /* 518a1a371c4SAndy Lutomirski * This can happen for quite a few reasons. The more obvious 519a1a371c4SAndy Lutomirski * ones are faults accessing the GDT, or LDT. Perhaps 520a1a371c4SAndy Lutomirski * surprisingly, if the CPU tries to deliver a benign or 521a1a371c4SAndy Lutomirski * contributory exception from user code and gets a page fault 522a1a371c4SAndy Lutomirski * during delivery, the page fault can be delivered as though 523a1a371c4SAndy Lutomirski * it originated directly from user code. This could happen 524a1a371c4SAndy Lutomirski * due to wrong permissions on the IDT, GDT, LDT, TSS, or 525a1a371c4SAndy Lutomirski * kernel or IST stack. 526a1a371c4SAndy Lutomirski */ 527a1a371c4SAndy Lutomirski store_idt(&idt); 528a1a371c4SAndy Lutomirski 529a1a371c4SAndy Lutomirski /* Usable even on Xen PV -- it's just slow. */ 530a1a371c4SAndy Lutomirski native_store_gdt(&gdt); 531a1a371c4SAndy Lutomirski 532a1a371c4SAndy Lutomirski pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n", 533a1a371c4SAndy Lutomirski idt.address, idt.size, gdt.address, gdt.size); 534a1a371c4SAndy Lutomirski 535a1a371c4SAndy Lutomirski store_ldt(ldtr); 536a1a371c4SAndy Lutomirski show_ldttss(&gdt, "LDTR", ldtr); 537a1a371c4SAndy Lutomirski 538a1a371c4SAndy Lutomirski store_tr(tr); 539a1a371c4SAndy Lutomirski show_ldttss(&gdt, "TR", tr); 540a1a371c4SAndy Lutomirski } 541a1a371c4SAndy Lutomirski 542c61e211dSHarvey Harrison dump_pagetable(address); 543c61e211dSHarvey Harrison } 544c61e211dSHarvey Harrison 5452d4a7167SIngo Molnar static noinline void 5462d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 5472d4a7167SIngo Molnar unsigned long address) 548c61e211dSHarvey Harrison { 5492d4a7167SIngo Molnar struct task_struct *tsk; 5502d4a7167SIngo Molnar unsigned long flags; 5512d4a7167SIngo Molnar int sig; 5522d4a7167SIngo Molnar 5532d4a7167SIngo Molnar flags = oops_begin(); 5542d4a7167SIngo Molnar tsk = current; 5552d4a7167SIngo Molnar sig = SIGKILL; 556c61e211dSHarvey Harrison 557c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 55892181f19SNick Piggin tsk->comm, address); 559c61e211dSHarvey Harrison dump_pagetable(address); 5602d4a7167SIngo Molnar 561c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 562874d93d1SAlexander van Heukelum sig = 0; 5632d4a7167SIngo Molnar 564874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 565c61e211dSHarvey Harrison } 566c61e211dSHarvey Harrison 567e49d3cbeSAndy Lutomirski static void set_signal_archinfo(unsigned long address, 568e49d3cbeSAndy Lutomirski unsigned long error_code) 569e49d3cbeSAndy Lutomirski { 570e49d3cbeSAndy Lutomirski struct task_struct *tsk = current; 571e49d3cbeSAndy Lutomirski 572e49d3cbeSAndy Lutomirski /* 573e49d3cbeSAndy Lutomirski * To avoid leaking information about the kernel page 574e49d3cbeSAndy Lutomirski * table layout, pretend that user-mode accesses to 575e49d3cbeSAndy Lutomirski * kernel addresses are always protection faults. 576e0a446ceSAndy Lutomirski * 577e0a446ceSAndy Lutomirski * NB: This means that failed vsyscalls with vsyscall=none 578e0a446ceSAndy Lutomirski * will have the PROT bit. This doesn't leak any 579e0a446ceSAndy Lutomirski * information and does not appear to cause any problems. 580e49d3cbeSAndy Lutomirski */ 581e49d3cbeSAndy Lutomirski if (address >= TASK_SIZE_MAX) 582e49d3cbeSAndy Lutomirski error_code |= X86_PF_PROT; 583e49d3cbeSAndy Lutomirski 584e49d3cbeSAndy Lutomirski tsk->thread.trap_nr = X86_TRAP_PF; 585e49d3cbeSAndy Lutomirski tsk->thread.error_code = error_code | X86_PF_USER; 586e49d3cbeSAndy Lutomirski tsk->thread.cr2 = address; 587e49d3cbeSAndy Lutomirski } 588e49d3cbeSAndy Lutomirski 5892d4a7167SIngo Molnar static noinline void 5902d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 5914fc34901SAndy Lutomirski unsigned long address, int signal, int si_code) 59292181f19SNick Piggin { 59392181f19SNick Piggin struct task_struct *tsk = current; 59492181f19SNick Piggin unsigned long flags; 59592181f19SNick Piggin int sig; 59692181f19SNick Piggin 597ebb53e25SAndy Lutomirski if (user_mode(regs)) { 598ebb53e25SAndy Lutomirski /* 599ebb53e25SAndy Lutomirski * This is an implicit supervisor-mode access from user 600ebb53e25SAndy Lutomirski * mode. Bypass all the kernel-mode recovery code and just 601ebb53e25SAndy Lutomirski * OOPS. 602ebb53e25SAndy Lutomirski */ 603ebb53e25SAndy Lutomirski goto oops; 604ebb53e25SAndy Lutomirski } 605ebb53e25SAndy Lutomirski 60692181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 60781fd9c18SJann Horn if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) { 608c026b359SPeter Zijlstra /* 609c026b359SPeter Zijlstra * Any interrupt that takes a fault gets the fixup. This makes 610c026b359SPeter Zijlstra * the below recursive fault logic only apply to a faults from 611c026b359SPeter Zijlstra * task context. 612c026b359SPeter Zijlstra */ 613c026b359SPeter Zijlstra if (in_interrupt()) 614c026b359SPeter Zijlstra return; 615c026b359SPeter Zijlstra 616c026b359SPeter Zijlstra /* 617c026b359SPeter Zijlstra * Per the above we're !in_interrupt(), aka. task context. 618c026b359SPeter Zijlstra * 619c026b359SPeter Zijlstra * In this case we need to make sure we're not recursively 620c026b359SPeter Zijlstra * faulting through the emulate_vsyscall() logic. 621c026b359SPeter Zijlstra */ 6222a53ccbcSIngo Molnar if (current->thread.sig_on_uaccess_err && signal) { 623e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 6244fc34901SAndy Lutomirski 6254fc34901SAndy Lutomirski /* XXX: hwpoison faults will set the wrong code. */ 6262e1661d2SEric W. Biederman force_sig_fault(signal, si_code, (void __user *)address); 6274fc34901SAndy Lutomirski } 628c026b359SPeter Zijlstra 629c026b359SPeter Zijlstra /* 630c026b359SPeter Zijlstra * Barring that, we can do the fixup and be happy. 631c026b359SPeter Zijlstra */ 63292181f19SNick Piggin return; 6334fc34901SAndy Lutomirski } 63492181f19SNick Piggin 6356271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK 6366271cfdfSAndy Lutomirski /* 6376271cfdfSAndy Lutomirski * Stack overflow? During boot, we can fault near the initial 6386271cfdfSAndy Lutomirski * stack in the direct map, but that's not an overflow -- check 6396271cfdfSAndy Lutomirski * that we're in vmalloc space to avoid this. 6406271cfdfSAndy Lutomirski */ 6416271cfdfSAndy Lutomirski if (is_vmalloc_addr((void *)address) && 6426271cfdfSAndy Lutomirski (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || 6436271cfdfSAndy Lutomirski address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { 644d876b673SThomas Gleixner unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *); 6456271cfdfSAndy Lutomirski /* 6466271cfdfSAndy Lutomirski * We're likely to be running with very little stack space 6476271cfdfSAndy Lutomirski * left. It's plausible that we'd hit this condition but 6486271cfdfSAndy Lutomirski * double-fault even before we get this far, in which case 6496271cfdfSAndy Lutomirski * we're fine: the double-fault handler will deal with it. 6506271cfdfSAndy Lutomirski * 6516271cfdfSAndy Lutomirski * We don't want to make it all the way into the oops code 6526271cfdfSAndy Lutomirski * and then double-fault, though, because we're likely to 6536271cfdfSAndy Lutomirski * break the console driver and lose most of the stack dump. 6546271cfdfSAndy Lutomirski */ 6556271cfdfSAndy Lutomirski asm volatile ("movq %[stack], %%rsp\n\t" 6566271cfdfSAndy Lutomirski "call handle_stack_overflow\n\t" 6576271cfdfSAndy Lutomirski "1: jmp 1b" 658f5caf621SJosh Poimboeuf : ASM_CALL_CONSTRAINT 6596271cfdfSAndy Lutomirski : "D" ("kernel stack overflow (page fault)"), 6606271cfdfSAndy Lutomirski "S" (regs), "d" (address), 6616271cfdfSAndy Lutomirski [stack] "rm" (stack)); 6626271cfdfSAndy Lutomirski unreachable(); 6636271cfdfSAndy Lutomirski } 6646271cfdfSAndy Lutomirski #endif 6656271cfdfSAndy Lutomirski 66692181f19SNick Piggin /* 6672d4a7167SIngo Molnar * 32-bit: 6682d4a7167SIngo Molnar * 66992181f19SNick Piggin * Valid to do another page fault here, because if this fault 67092181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 67192181f19SNick Piggin * handled it. 67292181f19SNick Piggin * 6732d4a7167SIngo Molnar * 64-bit: 6742d4a7167SIngo Molnar * 67592181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 67692181f19SNick Piggin */ 67792181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 67892181f19SNick Piggin return; 67992181f19SNick Piggin 68092181f19SNick Piggin if (is_errata93(regs, address)) 68192181f19SNick Piggin return; 68292181f19SNick Piggin 68392181f19SNick Piggin /* 6843425d934SSai Praneeth * Buggy firmware could access regions which might page fault, try to 6853425d934SSai Praneeth * recover from such faults. 6863425d934SSai Praneeth */ 6873425d934SSai Praneeth if (IS_ENABLED(CONFIG_EFI)) 6883425d934SSai Praneeth efi_recover_from_page_fault(address); 6893425d934SSai Praneeth 690ebb53e25SAndy Lutomirski oops: 6913425d934SSai Praneeth /* 69292181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 6932d4a7167SIngo Molnar * terminate things with extreme prejudice: 69492181f19SNick Piggin */ 69592181f19SNick Piggin flags = oops_begin(); 69692181f19SNick Piggin 69792181f19SNick Piggin show_fault_oops(regs, error_code, address); 69892181f19SNick Piggin 699a70857e4SAaron Tomlin if (task_stack_end_corrupted(tsk)) 700b0f4c4b3SPrarit Bhargava printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 70119803078SIngo Molnar 70292181f19SNick Piggin sig = SIGKILL; 70392181f19SNick Piggin if (__die("Oops", regs, error_code)) 70492181f19SNick Piggin sig = 0; 7052d4a7167SIngo Molnar 70692181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 707b0f4c4b3SPrarit Bhargava printk(KERN_DEFAULT "CR2: %016lx\n", address); 7082d4a7167SIngo Molnar 70992181f19SNick Piggin oops_end(flags, regs, sig); 71092181f19SNick Piggin } 71192181f19SNick Piggin 7122d4a7167SIngo Molnar /* 7132d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 7142d4a7167SIngo Molnar * sysctl is set: 7152d4a7167SIngo Molnar */ 7162d4a7167SIngo Molnar static inline void 7172d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 7182d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 7192d4a7167SIngo Molnar { 720ba54d856SBorislav Petkov const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG; 721ba54d856SBorislav Petkov 7222d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 7232d4a7167SIngo Molnar return; 7242d4a7167SIngo Molnar 7252d4a7167SIngo Molnar if (!printk_ratelimit()) 7262d4a7167SIngo Molnar return; 7272d4a7167SIngo Molnar 72810a7e9d8SKees Cook printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx", 729ba54d856SBorislav Petkov loglvl, tsk->comm, task_pid_nr(tsk), address, 7302d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 7312d4a7167SIngo Molnar 7322d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 7332d4a7167SIngo Molnar 7342d4a7167SIngo Molnar printk(KERN_CONT "\n"); 735ba54d856SBorislav Petkov 736342db04aSJann Horn show_opcodes(regs, loglvl); 7372d4a7167SIngo Molnar } 7382d4a7167SIngo Molnar 73902e983b7SDave Hansen /* 74002e983b7SDave Hansen * The (legacy) vsyscall page is the long page in the kernel portion 74102e983b7SDave Hansen * of the address space that has user-accessible permissions. 74202e983b7SDave Hansen */ 74302e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr) 74402e983b7SDave Hansen { 7453ae0ad92SDave Hansen return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); 74602e983b7SDave Hansen } 74702e983b7SDave Hansen 7482d4a7167SIngo Molnar static void 7492d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 750419ceeb1SEric W. Biederman unsigned long address, u32 pkey, int si_code) 75192181f19SNick Piggin { 75292181f19SNick Piggin struct task_struct *tsk = current; 75392181f19SNick Piggin 75492181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 7556ea59b07SAndy Lutomirski if (user_mode(regs) && (error_code & X86_PF_USER)) { 75692181f19SNick Piggin /* 7572d4a7167SIngo Molnar * It's possible to have interrupts off here: 75892181f19SNick Piggin */ 75992181f19SNick Piggin local_irq_enable(); 76092181f19SNick Piggin 76192181f19SNick Piggin /* 76292181f19SNick Piggin * Valid to do another page fault here because this one came 7632d4a7167SIngo Molnar * from user space: 76492181f19SNick Piggin */ 76592181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 76692181f19SNick Piggin return; 76792181f19SNick Piggin 76892181f19SNick Piggin if (is_errata100(regs, address)) 76992181f19SNick Piggin return; 77092181f19SNick Piggin 771dc4fac84SAndy Lutomirski /* 772dc4fac84SAndy Lutomirski * To avoid leaking information about the kernel page table 773dc4fac84SAndy Lutomirski * layout, pretend that user-mode accesses to kernel addresses 774dc4fac84SAndy Lutomirski * are always protection faults. 775dc4fac84SAndy Lutomirski */ 776dc4fac84SAndy Lutomirski if (address >= TASK_SIZE_MAX) 7771067f030SRicardo Neri error_code |= X86_PF_PROT; 7783ae36655SAndy Lutomirski 779e575a86fSKees Cook if (likely(show_unhandled_signals)) 7802d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 78192181f19SNick Piggin 782e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 7832d4a7167SIngo Molnar 7849db812dbSEric W. Biederman if (si_code == SEGV_PKUERR) 785419ceeb1SEric W. Biederman force_sig_pkuerr((void __user *)address, pkey); 7869db812dbSEric W. Biederman 7872e1661d2SEric W. Biederman force_sig_fault(SIGSEGV, si_code, (void __user *)address); 7882d4a7167SIngo Molnar 789*ca4c6a98SThomas Gleixner local_irq_disable(); 790*ca4c6a98SThomas Gleixner 79192181f19SNick Piggin return; 79292181f19SNick Piggin } 79392181f19SNick Piggin 79492181f19SNick Piggin if (is_f00f_bug(regs, address)) 79592181f19SNick Piggin return; 79692181f19SNick Piggin 7974fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGSEGV, si_code); 79892181f19SNick Piggin } 79992181f19SNick Piggin 8002d4a7167SIngo Molnar static noinline void 8012d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 802768fd9c6SEric W. Biederman unsigned long address) 80392181f19SNick Piggin { 804419ceeb1SEric W. Biederman __bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR); 80592181f19SNick Piggin } 80692181f19SNick Piggin 8072d4a7167SIngo Molnar static void 8082d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 809419ceeb1SEric W. Biederman unsigned long address, u32 pkey, int si_code) 81092181f19SNick Piggin { 81192181f19SNick Piggin struct mm_struct *mm = current->mm; 81292181f19SNick Piggin /* 81392181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 81492181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 81592181f19SNick Piggin */ 816d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 81792181f19SNick Piggin 818aba1ecd3SEric W. Biederman __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); 81992181f19SNick Piggin } 82092181f19SNick Piggin 8212d4a7167SIngo Molnar static noinline void 8222d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 82392181f19SNick Piggin { 824419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, 0, SEGV_MAPERR); 82592181f19SNick Piggin } 82692181f19SNick Piggin 82733a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code, 82833a709b2SDave Hansen struct vm_area_struct *vma) 82933a709b2SDave Hansen { 83007f146f5SDave Hansen /* This code is always called on the current mm */ 83107f146f5SDave Hansen bool foreign = false; 83207f146f5SDave Hansen 83333a709b2SDave Hansen if (!boot_cpu_has(X86_FEATURE_OSPKE)) 83433a709b2SDave Hansen return false; 8351067f030SRicardo Neri if (error_code & X86_PF_PK) 83633a709b2SDave Hansen return true; 83707f146f5SDave Hansen /* this checks permission keys on the VMA: */ 8381067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 8391067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 84007f146f5SDave Hansen return true; 84133a709b2SDave Hansen return false; 84292181f19SNick Piggin } 84392181f19SNick Piggin 8442d4a7167SIngo Molnar static noinline void 8452d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 8467b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma) 84792181f19SNick Piggin { 848019132ffSDave Hansen /* 849019132ffSDave Hansen * This OSPKE check is not strictly necessary at runtime. 850019132ffSDave Hansen * But, doing it this way allows compiler optimizations 851019132ffSDave Hansen * if pkeys are compiled out. 852019132ffSDave Hansen */ 853aba1ecd3SEric W. Biederman if (bad_area_access_from_pkeys(error_code, vma)) { 8549db812dbSEric W. Biederman /* 8559db812dbSEric W. Biederman * A protection key fault means that the PKRU value did not allow 8569db812dbSEric W. Biederman * access to some PTE. Userspace can figure out what PKRU was 8579db812dbSEric W. Biederman * from the XSAVE state. This function captures the pkey from 8589db812dbSEric W. Biederman * the vma and passes it to userspace so userspace can discover 8599db812dbSEric W. Biederman * which protection key was set on the PTE. 8609db812dbSEric W. Biederman * 8619db812dbSEric W. Biederman * If we get here, we know that the hardware signaled a X86_PF_PK 8629db812dbSEric W. Biederman * fault and that there was a VMA once we got in the fault 8639db812dbSEric W. Biederman * handler. It does *not* guarantee that the VMA we find here 8649db812dbSEric W. Biederman * was the one that we faulted on. 8659db812dbSEric W. Biederman * 8669db812dbSEric W. Biederman * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); 8679db812dbSEric W. Biederman * 2. T1 : set PKRU to deny access to pkey=4, touches page 8689db812dbSEric W. Biederman * 3. T1 : faults... 8699db812dbSEric W. Biederman * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); 870c1e8d7c6SMichel Lespinasse * 5. T1 : enters fault handler, takes mmap_lock, etc... 8719db812dbSEric W. Biederman * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 8729db812dbSEric W. Biederman * faulted on a pte with its pkey=4. 8739db812dbSEric W. Biederman */ 874aba1ecd3SEric W. Biederman u32 pkey = vma_pkey(vma); 8759db812dbSEric W. Biederman 876419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, pkey, SEGV_PKUERR); 877aba1ecd3SEric W. Biederman } else { 878419ceeb1SEric W. Biederman __bad_area(regs, error_code, address, 0, SEGV_ACCERR); 879aba1ecd3SEric W. Biederman } 88092181f19SNick Piggin } 88192181f19SNick Piggin 8822d4a7167SIngo Molnar static void 883a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 8843d353901SSouptick Joarder vm_fault_t fault) 88592181f19SNick Piggin { 8862d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 8871067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 8884fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 88996054569SLinus Torvalds return; 89096054569SLinus Torvalds } 8912d4a7167SIngo Molnar 892cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 89392181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 89492181f19SNick Piggin return; 8952d4a7167SIngo Molnar 896e49d3cbeSAndy Lutomirski set_signal_archinfo(address, error_code); 8972d4a7167SIngo Molnar 898a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 899f672b49bSAndi Kleen if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 900318759b4SEric W. Biederman struct task_struct *tsk = current; 90140e55394SEric W. Biederman unsigned lsb = 0; 90240e55394SEric W. Biederman 90340e55394SEric W. Biederman pr_err( 904a6e04aa9SAndi Kleen "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 905a6e04aa9SAndi Kleen tsk->comm, tsk->pid, address); 90640e55394SEric W. Biederman if (fault & VM_FAULT_HWPOISON_LARGE) 90740e55394SEric W. Biederman lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 90840e55394SEric W. Biederman if (fault & VM_FAULT_HWPOISON) 90940e55394SEric W. Biederman lsb = PAGE_SHIFT; 910f8eac901SEric W. Biederman force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb); 91140e55394SEric W. Biederman return; 912a6e04aa9SAndi Kleen } 913a6e04aa9SAndi Kleen #endif 9142e1661d2SEric W. Biederman force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 91592181f19SNick Piggin } 91692181f19SNick Piggin 9173a13c4d7SJohannes Weiner static noinline void 9182d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 91925c102d8SEric W. Biederman unsigned long address, vm_fault_t fault) 92092181f19SNick Piggin { 9211067f030SRicardo Neri if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { 9224fc34901SAndy Lutomirski no_context(regs, error_code, address, 0, 0); 9233a13c4d7SJohannes Weiner return; 924b80ef10eSKOSAKI Motohiro } 925b80ef10eSKOSAKI Motohiro 9262d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 927f8626854SAndrey Vagin /* Kernel mode? Handle exceptions or die: */ 9281067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 9294fc34901SAndy Lutomirski no_context(regs, error_code, address, 9304fc34901SAndy Lutomirski SIGSEGV, SEGV_MAPERR); 9313a13c4d7SJohannes Weiner return; 932f8626854SAndrey Vagin } 933f8626854SAndrey Vagin 934c2d23f91SDavid Rientjes /* 935c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 936c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 937c2d23f91SDavid Rientjes * oom-killed): 938c2d23f91SDavid Rientjes */ 939c2d23f91SDavid Rientjes pagefault_out_of_memory(); 9402d4a7167SIngo Molnar } else { 941f672b49bSAndi Kleen if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 942f672b49bSAndi Kleen VM_FAULT_HWPOISON_LARGE)) 94327274f73SEric W. Biederman do_sigbus(regs, error_code, address, fault); 94433692f27SLinus Torvalds else if (fault & VM_FAULT_SIGSEGV) 945768fd9c6SEric W. Biederman bad_area_nosemaphore(regs, error_code, address); 94692181f19SNick Piggin else 94792181f19SNick Piggin BUG(); 94892181f19SNick Piggin } 9492d4a7167SIngo Molnar } 95092181f19SNick Piggin 9518fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte) 952d8b57bb7SThomas Gleixner { 9531067f030SRicardo Neri if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) 954d8b57bb7SThomas Gleixner return 0; 9552d4a7167SIngo Molnar 9561067f030SRicardo Neri if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) 957d8b57bb7SThomas Gleixner return 0; 958d8b57bb7SThomas Gleixner 959d8b57bb7SThomas Gleixner return 1; 960d8b57bb7SThomas Gleixner } 961d8b57bb7SThomas Gleixner 962c61e211dSHarvey Harrison /* 9632d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 9642d4a7167SIngo Molnar * 9652d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 9662d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 9672d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 9682d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 9692d4a7167SIngo Molnar * on other processors. 9702d4a7167SIngo Molnar * 97131668511SDavid Vrabel * Spurious faults may only occur if the TLB contains an entry with 97231668511SDavid Vrabel * fewer permission than the page table entry. Non-present (P = 0) 97331668511SDavid Vrabel * and reserved bit (R = 1) faults are never spurious. 97431668511SDavid Vrabel * 9755b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 9765b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 97731668511SDavid Vrabel * 97831668511SDavid Vrabel * Returns non-zero if a spurious fault was handled, zero otherwise. 97931668511SDavid Vrabel * 98031668511SDavid Vrabel * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 98131668511SDavid Vrabel * (Optional Invalidation). 9825b727a3bSJeremy Fitzhardinge */ 9839326638cSMasami Hiramatsu static noinline int 9848fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address) 9855b727a3bSJeremy Fitzhardinge { 9865b727a3bSJeremy Fitzhardinge pgd_t *pgd; 987e0c4f675SKirill A. Shutemov p4d_t *p4d; 9885b727a3bSJeremy Fitzhardinge pud_t *pud; 9895b727a3bSJeremy Fitzhardinge pmd_t *pmd; 9905b727a3bSJeremy Fitzhardinge pte_t *pte; 9913c3e5694SSteven Rostedt int ret; 9925b727a3bSJeremy Fitzhardinge 99331668511SDavid Vrabel /* 99431668511SDavid Vrabel * Only writes to RO or instruction fetches from NX may cause 99531668511SDavid Vrabel * spurious faults. 99631668511SDavid Vrabel * 99731668511SDavid Vrabel * These could be from user or supervisor accesses but the TLB 99831668511SDavid Vrabel * is only lazily flushed after a kernel mapping protection 99931668511SDavid Vrabel * change, so user accesses are not expected to cause spurious 100031668511SDavid Vrabel * faults. 100131668511SDavid Vrabel */ 10021067f030SRicardo Neri if (error_code != (X86_PF_WRITE | X86_PF_PROT) && 10031067f030SRicardo Neri error_code != (X86_PF_INSTR | X86_PF_PROT)) 10045b727a3bSJeremy Fitzhardinge return 0; 10055b727a3bSJeremy Fitzhardinge 10065b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 10075b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 10085b727a3bSJeremy Fitzhardinge return 0; 10095b727a3bSJeremy Fitzhardinge 1010e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 1011e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d)) 1012e0c4f675SKirill A. Shutemov return 0; 1013e0c4f675SKirill A. Shutemov 1014e0c4f675SKirill A. Shutemov if (p4d_large(*p4d)) 10158fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) p4d); 1016e0c4f675SKirill A. Shutemov 1017e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 10185b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 10195b727a3bSJeremy Fitzhardinge return 0; 10205b727a3bSJeremy Fitzhardinge 1021d8b57bb7SThomas Gleixner if (pud_large(*pud)) 10228fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) pud); 1023d8b57bb7SThomas Gleixner 10245b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 10255b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 10265b727a3bSJeremy Fitzhardinge return 0; 10275b727a3bSJeremy Fitzhardinge 1028d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 10298fed6200SDave Hansen return spurious_kernel_fault_check(error_code, (pte_t *) pmd); 1030d8b57bb7SThomas Gleixner 10315b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 1032954f8571SAndrea Arcangeli if (!pte_present(*pte)) 10335b727a3bSJeremy Fitzhardinge return 0; 10345b727a3bSJeremy Fitzhardinge 10358fed6200SDave Hansen ret = spurious_kernel_fault_check(error_code, pte); 10363c3e5694SSteven Rostedt if (!ret) 10373c3e5694SSteven Rostedt return 0; 10383c3e5694SSteven Rostedt 10393c3e5694SSteven Rostedt /* 10402d4a7167SIngo Molnar * Make sure we have permissions in PMD. 10412d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 10423c3e5694SSteven Rostedt */ 10438fed6200SDave Hansen ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd); 10443c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 10452d4a7167SIngo Molnar 10463c3e5694SSteven Rostedt return ret; 10475b727a3bSJeremy Fitzhardinge } 10488fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault); 10495b727a3bSJeremy Fitzhardinge 1050c61e211dSHarvey Harrison int show_unhandled_signals = 1; 1051c61e211dSHarvey Harrison 10522d4a7167SIngo Molnar static inline int 105368da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma) 105492181f19SNick Piggin { 105507f146f5SDave Hansen /* This is only called for the current mm, so: */ 105607f146f5SDave Hansen bool foreign = false; 1057e8c6226dSDave Hansen 1058e8c6226dSDave Hansen /* 1059e8c6226dSDave Hansen * Read or write was blocked by protection keys. This is 1060e8c6226dSDave Hansen * always an unconditional error and can never result in 1061e8c6226dSDave Hansen * a follow-up action to resolve the fault, like a COW. 1062e8c6226dSDave Hansen */ 10631067f030SRicardo Neri if (error_code & X86_PF_PK) 1064e8c6226dSDave Hansen return 1; 1065e8c6226dSDave Hansen 106633a709b2SDave Hansen /* 106707f146f5SDave Hansen * Make sure to check the VMA so that we do not perform 10681067f030SRicardo Neri * faults just to hit a X86_PF_PK as soon as we fill in a 106907f146f5SDave Hansen * page. 107007f146f5SDave Hansen */ 10711067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 10721067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 107307f146f5SDave Hansen return 1; 107433a709b2SDave Hansen 10751067f030SRicardo Neri if (error_code & X86_PF_WRITE) { 10762d4a7167SIngo Molnar /* write, present and write, not present: */ 107792181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 107892181f19SNick Piggin return 1; 10792d4a7167SIngo Molnar return 0; 10802d4a7167SIngo Molnar } 10812d4a7167SIngo Molnar 10822d4a7167SIngo Molnar /* read, present: */ 10831067f030SRicardo Neri if (unlikely(error_code & X86_PF_PROT)) 108492181f19SNick Piggin return 1; 10852d4a7167SIngo Molnar 10862d4a7167SIngo Molnar /* read, not present: */ 10873122e80eSAnshuman Khandual if (unlikely(!vma_is_accessible(vma))) 108892181f19SNick Piggin return 1; 108992181f19SNick Piggin 109092181f19SNick Piggin return 0; 109192181f19SNick Piggin } 109292181f19SNick Piggin 10930973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 10940973a06cSHiroshi Shimamoto { 10953ae0ad92SDave Hansen /* 10963ae0ad92SDave Hansen * On 64-bit systems, the vsyscall page is at an address above 10973ae0ad92SDave Hansen * TASK_SIZE_MAX, but is not considered part of the kernel 10983ae0ad92SDave Hansen * address space. 10993ae0ad92SDave Hansen */ 11003ae0ad92SDave Hansen if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address)) 11013ae0ad92SDave Hansen return false; 11023ae0ad92SDave Hansen 1103d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 11040973a06cSHiroshi Shimamoto } 11050973a06cSHiroshi Shimamoto 1106c61e211dSHarvey Harrison /* 11078fed6200SDave Hansen * Called for all faults where 'address' is part of the kernel address 11088fed6200SDave Hansen * space. Might get called for faults that originate from *code* that 11098fed6200SDave Hansen * ran in userspace or the kernel. 1110c61e211dSHarvey Harrison */ 11118fed6200SDave Hansen static void 11128fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, 11130ac09f9fSJiri Olsa unsigned long address) 1114c61e211dSHarvey Harrison { 11158fed6200SDave Hansen /* 1116367e3f1dSDave Hansen * Protection keys exceptions only happen on user pages. We 1117367e3f1dSDave Hansen * have no user pages in the kernel portion of the address 1118367e3f1dSDave Hansen * space, so do not expect them here. 1119367e3f1dSDave Hansen */ 1120367e3f1dSDave Hansen WARN_ON_ONCE(hw_error_code & X86_PF_PK); 1121367e3f1dSDave Hansen 11228fed6200SDave Hansen /* Was the fault spurious, caused by lazy TLB invalidation? */ 11238fed6200SDave Hansen if (spurious_kernel_fault(hw_error_code, address)) 11248fed6200SDave Hansen return; 11258fed6200SDave Hansen 11268fed6200SDave Hansen /* kprobes don't want to hook the spurious faults: */ 1127b98cca44SAnshuman Khandual if (kprobe_page_fault(regs, X86_TRAP_PF)) 11288fed6200SDave Hansen return; 11298fed6200SDave Hansen 11308fed6200SDave Hansen /* 11318fed6200SDave Hansen * Note, despite being a "bad area", there are quite a few 11328fed6200SDave Hansen * acceptable reasons to get here, such as erratum fixups 11338fed6200SDave Hansen * and handling kernel code that can fault, like get_user(). 11348fed6200SDave Hansen * 11358fed6200SDave Hansen * Don't take the mm semaphore here. If we fixup a prefetch 11368fed6200SDave Hansen * fault we could otherwise deadlock: 11378fed6200SDave Hansen */ 1138ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 11398fed6200SDave Hansen } 11408fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault); 11418fed6200SDave Hansen 1142aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */ 1143aa37c51bSDave Hansen static inline 1144aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs, 1145aa37c51bSDave Hansen unsigned long hw_error_code, 1146c61e211dSHarvey Harrison unsigned long address) 1147c61e211dSHarvey Harrison { 1148c61e211dSHarvey Harrison struct vm_area_struct *vma; 1149c61e211dSHarvey Harrison struct task_struct *tsk; 11502d4a7167SIngo Molnar struct mm_struct *mm; 115150a7ca3cSSouptick Joarder vm_fault_t fault, major = 0; 1152dde16072SPeter Xu unsigned int flags = FAULT_FLAG_DEFAULT; 1153c61e211dSHarvey Harrison 1154c61e211dSHarvey Harrison tsk = current; 1155c61e211dSHarvey Harrison mm = tsk->mm; 11562d4a7167SIngo Molnar 11572d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1158b98cca44SAnshuman Khandual if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF))) 11599be260a6SMasami Hiramatsu return; 1160e00b12e6SPeter Zijlstra 11615b0c2cacSDave Hansen /* 11625b0c2cacSDave Hansen * Reserved bits are never expected to be set on 11635b0c2cacSDave Hansen * entries in the user portion of the page tables. 11645b0c2cacSDave Hansen */ 1165164477c2SDave Hansen if (unlikely(hw_error_code & X86_PF_RSVD)) 1166164477c2SDave Hansen pgtable_bad(regs, hw_error_code, address); 1167e00b12e6SPeter Zijlstra 11685b0c2cacSDave Hansen /* 1169e50928d7SAndy Lutomirski * If SMAP is on, check for invalid kernel (supervisor) access to user 1170e50928d7SAndy Lutomirski * pages in the user address space. The odd case here is WRUSS, 1171e50928d7SAndy Lutomirski * which, according to the preliminary documentation, does not respect 1172e50928d7SAndy Lutomirski * SMAP and will have the USER bit set so, in all cases, SMAP 1173e50928d7SAndy Lutomirski * enforcement appears to be consistent with the USER bit. 11745b0c2cacSDave Hansen */ 1175a15781b5SAndy Lutomirski if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) && 1176a15781b5SAndy Lutomirski !(hw_error_code & X86_PF_USER) && 1177e50928d7SAndy Lutomirski !(regs->flags & X86_EFLAGS_AC))) 1178a15781b5SAndy Lutomirski { 1179ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 1180e00b12e6SPeter Zijlstra return; 1181e00b12e6SPeter Zijlstra } 1182e00b12e6SPeter Zijlstra 1183e00b12e6SPeter Zijlstra /* 1184e00b12e6SPeter Zijlstra * If we're in an interrupt, have no user context or are running 118570ffdb93SDavid Hildenbrand * in a region with pagefaults disabled then we must not take the fault 1186e00b12e6SPeter Zijlstra */ 118770ffdb93SDavid Hildenbrand if (unlikely(faulthandler_disabled() || !mm)) { 1188ba9f6f89SLinus Torvalds bad_area_nosemaphore(regs, hw_error_code, address); 1189e00b12e6SPeter Zijlstra return; 1190e00b12e6SPeter Zijlstra } 1191e00b12e6SPeter Zijlstra 1192c61e211dSHarvey Harrison /* 1193891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1194891cffbdSLinus Torvalds * vmalloc fault has been handled. 1195891cffbdSLinus Torvalds * 1196891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 11972d4a7167SIngo Molnar * potential system fault or CPU buglet: 1198c61e211dSHarvey Harrison */ 1199f39b6f0eSAndy Lutomirski if (user_mode(regs)) { 1200891cffbdSLinus Torvalds local_irq_enable(); 1201759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 12022d4a7167SIngo Molnar } else { 12032d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1204c61e211dSHarvey Harrison local_irq_enable(); 12052d4a7167SIngo Molnar } 1206c61e211dSHarvey Harrison 1207a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 12087dd1fcc2SPeter Zijlstra 12090ed32f1aSAndy Lutomirski if (hw_error_code & X86_PF_WRITE) 1210759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 12110ed32f1aSAndy Lutomirski if (hw_error_code & X86_PF_INSTR) 1212d61172b4SDave Hansen flags |= FAULT_FLAG_INSTRUCTION; 1213759496baSJohannes Weiner 12143ae0ad92SDave Hansen #ifdef CONFIG_X86_64 12153a1dfe6eSIngo Molnar /* 1216918ce325SAndy Lutomirski * Faults in the vsyscall page might need emulation. The 1217918ce325SAndy Lutomirski * vsyscall page is at a high address (>PAGE_OFFSET), but is 1218918ce325SAndy Lutomirski * considered to be part of the user address space. 1219c61e211dSHarvey Harrison * 12203ae0ad92SDave Hansen * The vsyscall page does not have a "real" VMA, so do this 12213ae0ad92SDave Hansen * emulation before we go searching for VMAs. 1222e0a446ceSAndy Lutomirski * 1223e0a446ceSAndy Lutomirski * PKRU never rejects instruction fetches, so we don't need 1224e0a446ceSAndy Lutomirski * to consider the PF_PK bit. 12253ae0ad92SDave Hansen */ 1226918ce325SAndy Lutomirski if (is_vsyscall_vaddr(address)) { 1227918ce325SAndy Lutomirski if (emulate_vsyscall(hw_error_code, regs, address)) 12283ae0ad92SDave Hansen return; 12293ae0ad92SDave Hansen } 12303ae0ad92SDave Hansen #endif 12313ae0ad92SDave Hansen 1232c61e211dSHarvey Harrison /* 123388259744SDave Hansen * Kernel-mode access to the user address space should only occur 123488259744SDave Hansen * on well-defined single instructions listed in the exception 123588259744SDave Hansen * tables. But, an erroneous kernel fault occurring outside one of 1236c1e8d7c6SMichel Lespinasse * those areas which also holds mmap_lock might deadlock attempting 123788259744SDave Hansen * to validate the fault against the address space. 1238c61e211dSHarvey Harrison * 123988259744SDave Hansen * Only do the expensive exception table search when we might be at 124088259744SDave Hansen * risk of a deadlock. This happens if we 1241c1e8d7c6SMichel Lespinasse * 1. Failed to acquire mmap_lock, and 12426344be60SAndy Lutomirski * 2. The access did not originate in userspace. 1243c61e211dSHarvey Harrison */ 1244d8ed45c5SMichel Lespinasse if (unlikely(!mmap_read_trylock(mm))) { 12456344be60SAndy Lutomirski if (!user_mode(regs) && !search_exception_tables(regs->ip)) { 124688259744SDave Hansen /* 124788259744SDave Hansen * Fault from code in kernel from 124888259744SDave Hansen * which we do not expect faults. 124988259744SDave Hansen */ 12500ed32f1aSAndy Lutomirski bad_area_nosemaphore(regs, hw_error_code, address); 125192181f19SNick Piggin return; 125292181f19SNick Piggin } 1253d065bd81SMichel Lespinasse retry: 1254d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 125501006074SPeter Zijlstra } else { 125601006074SPeter Zijlstra /* 12572d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 12582d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 12592d4a7167SIngo Molnar * down_read(): 126001006074SPeter Zijlstra */ 126101006074SPeter Zijlstra might_sleep(); 1262c61e211dSHarvey Harrison } 1263c61e211dSHarvey Harrison 1264c61e211dSHarvey Harrison vma = find_vma(mm, address); 126592181f19SNick Piggin if (unlikely(!vma)) { 12660ed32f1aSAndy Lutomirski bad_area(regs, hw_error_code, address); 126792181f19SNick Piggin return; 126892181f19SNick Piggin } 126992181f19SNick Piggin if (likely(vma->vm_start <= address)) 1270c61e211dSHarvey Harrison goto good_area; 127192181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 12720ed32f1aSAndy Lutomirski bad_area(regs, hw_error_code, address); 127392181f19SNick Piggin return; 127492181f19SNick Piggin } 127592181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 12760ed32f1aSAndy Lutomirski bad_area(regs, hw_error_code, address); 127792181f19SNick Piggin return; 127892181f19SNick Piggin } 127992181f19SNick Piggin 1280c61e211dSHarvey Harrison /* 1281c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1282c61e211dSHarvey Harrison * we can handle it.. 1283c61e211dSHarvey Harrison */ 1284c61e211dSHarvey Harrison good_area: 12850ed32f1aSAndy Lutomirski if (unlikely(access_error(hw_error_code, vma))) { 12860ed32f1aSAndy Lutomirski bad_area_access_error(regs, hw_error_code, address, vma); 128792181f19SNick Piggin return; 1288c61e211dSHarvey Harrison } 1289c61e211dSHarvey Harrison 1290c61e211dSHarvey Harrison /* 1291c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1292c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 12939a95f3cfSPaul Cassella * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 1294c1e8d7c6SMichel Lespinasse * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked. 1295cb0631fdSVlastimil Babka * 1296c1e8d7c6SMichel Lespinasse * Note that handle_userfault() may also release and reacquire mmap_lock 1297cb0631fdSVlastimil Babka * (and not return with VM_FAULT_RETRY), when returning to userland to 1298cb0631fdSVlastimil Babka * repeat the page fault later with a VM_FAULT_NOPAGE retval 1299cb0631fdSVlastimil Babka * (potentially after handling any pending signal during the return to 1300cb0631fdSVlastimil Babka * userland). The return to userland is identified whenever 1301cb0631fdSVlastimil Babka * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. 1302c61e211dSHarvey Harrison */ 1303dcddffd4SKirill A. Shutemov fault = handle_mm_fault(vma, address, flags); 130426178ec1SLinus Torvalds major |= fault & VM_FAULT_MAJOR; 13052d4a7167SIngo Molnar 130639678191SPeter Xu /* Quick path to respond to signals */ 130739678191SPeter Xu if (fault_signal_pending(fault, regs)) { 130839678191SPeter Xu if (!user_mode(regs)) 130939678191SPeter Xu no_context(regs, hw_error_code, address, SIGBUS, 131039678191SPeter Xu BUS_ADRERR); 131139678191SPeter Xu return; 131239678191SPeter Xu } 131339678191SPeter Xu 13143a13c4d7SJohannes Weiner /* 1315c1e8d7c6SMichel Lespinasse * If we need to retry the mmap_lock has already been released, 131626178ec1SLinus Torvalds * and if there is a fatal signal pending there is no guarantee 131726178ec1SLinus Torvalds * that we made any progress. Handle this case first. 13183a13c4d7SJohannes Weiner */ 131939678191SPeter Xu if (unlikely((fault & VM_FAULT_RETRY) && 132039678191SPeter Xu (flags & FAULT_FLAG_ALLOW_RETRY))) { 132126178ec1SLinus Torvalds flags |= FAULT_FLAG_TRIED; 132226178ec1SLinus Torvalds goto retry; 132326178ec1SLinus Torvalds } 132426178ec1SLinus Torvalds 1325d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 132626178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_ERROR)) { 13270ed32f1aSAndy Lutomirski mm_fault_error(regs, hw_error_code, address, fault); 132837b23e05SKOSAKI Motohiro return; 132937b23e05SKOSAKI Motohiro } 133037b23e05SKOSAKI Motohiro 133137b23e05SKOSAKI Motohiro /* 133226178ec1SLinus Torvalds * Major/minor page fault accounting. If any of the events 133326178ec1SLinus Torvalds * returned VM_FAULT_MAJOR, we account it as a major fault. 1334d065bd81SMichel Lespinasse */ 133526178ec1SLinus Torvalds if (major) { 1336c61e211dSHarvey Harrison tsk->maj_flt++; 133726178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 1338ac17dc8eSPeter Zijlstra } else { 1339c61e211dSHarvey Harrison tsk->min_flt++; 134026178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1341d065bd81SMichel Lespinasse } 1342c61e211dSHarvey Harrison 13438c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 1344c61e211dSHarvey Harrison } 1345aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault); 1346aa37c51bSDave Hansen 1347a0d14b89SPeter Zijlstra static __always_inline void 1348a0d14b89SPeter Zijlstra trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code, 1349a0d14b89SPeter Zijlstra unsigned long address) 1350d34603b0SSeiji Aguchi { 1351a0d14b89SPeter Zijlstra if (!trace_pagefault_enabled()) 1352a0d14b89SPeter Zijlstra return; 1353a0d14b89SPeter Zijlstra 1354d34603b0SSeiji Aguchi if (user_mode(regs)) 1355d4078e23SPeter Zijlstra trace_page_fault_user(address, regs, error_code); 1356d34603b0SSeiji Aguchi else 1357d4078e23SPeter Zijlstra trace_page_fault_kernel(address, regs, error_code); 1358d34603b0SSeiji Aguchi } 1359d34603b0SSeiji Aguchi 1360a0d14b89SPeter Zijlstra dotraplinkage void 1361ee6352b2SFrederic Weisbecker do_page_fault(struct pt_regs *regs, unsigned long hw_error_code, 1362ee6352b2SFrederic Weisbecker unsigned long address) 136311a7ffb0SThomas Gleixner { 1364da1c55f1SMichel Lespinasse prefetchw(¤t->mm->mmap_lock); 1365ef68017eSAndy Lutomirski /* 1366ef68017eSAndy Lutomirski * KVM has two types of events that are, logically, interrupts, but 1367ef68017eSAndy Lutomirski * are unfortunately delivered using the #PF vector. These events are 1368ef68017eSAndy Lutomirski * "you just accessed valid memory, but the host doesn't have it right 1369ef68017eSAndy Lutomirski * now, so I'll put you to sleep if you continue" and "that memory 1370ef68017eSAndy Lutomirski * you tried to access earlier is available now." 1371ef68017eSAndy Lutomirski * 1372ef68017eSAndy Lutomirski * We are relying on the interrupted context being sane (valid RSP, 1373ef68017eSAndy Lutomirski * relevant locks not held, etc.), which is fine as long as the 1374ef68017eSAndy Lutomirski * interrupted context had IF=1. We are also relying on the KVM 1375ef68017eSAndy Lutomirski * async pf type field and CR2 being read consistently instead of 1376ef68017eSAndy Lutomirski * getting values from real and async page faults mixed up. 1377ef68017eSAndy Lutomirski * 1378ef68017eSAndy Lutomirski * Fingers crossed. 1379ef68017eSAndy Lutomirski */ 1380ef68017eSAndy Lutomirski if (kvm_handle_async_pf(regs, (u32)address)) 1381ef68017eSAndy Lutomirski return; 1382ef68017eSAndy Lutomirski 1383ee6352b2SFrederic Weisbecker trace_page_fault_entries(regs, hw_error_code, address); 138425c74b10SSeiji Aguchi 1385ee6352b2SFrederic Weisbecker if (unlikely(kmmio_fault(regs, address))) 1386ee6352b2SFrederic Weisbecker return; 1387ee6352b2SFrederic Weisbecker 1388ee6352b2SFrederic Weisbecker /* Was the fault on kernel-controlled part of the address space? */ 1389*ca4c6a98SThomas Gleixner if (unlikely(fault_in_kernel_space(address))) { 1390ee6352b2SFrederic Weisbecker do_kern_addr_fault(regs, hw_error_code, address); 1391*ca4c6a98SThomas Gleixner } else { 1392ee6352b2SFrederic Weisbecker do_user_addr_fault(regs, hw_error_code, address); 1393*ca4c6a98SThomas Gleixner /* 1394*ca4c6a98SThomas Gleixner * User address page fault handling might have reenabled 1395*ca4c6a98SThomas Gleixner * interrupts. Fixing up all potential exit points of 1396*ca4c6a98SThomas Gleixner * do_user_addr_fault() and its leaf functions is just not 1397*ca4c6a98SThomas Gleixner * doable w/o creating an unholy mess or turning the code 1398*ca4c6a98SThomas Gleixner * upside down. 1399*ca4c6a98SThomas Gleixner */ 1400*ca4c6a98SThomas Gleixner local_irq_disable(); 1401*ca4c6a98SThomas Gleixner } 140225c74b10SSeiji Aguchi } 140311a7ffb0SThomas Gleixner NOKPROBE_SYMBOL(do_page_fault); 1404