1c61e211dSHarvey Harrison /* 2c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 3c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5c61e211dSHarvey Harrison */ 6a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 7a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 8a2bcd473SIngo Molnar #include <linux/module.h> /* search_exception_table */ 9a2bcd473SIngo Molnar #include <linux/bootmem.h> /* max_low_pfn */ 109326638cSMasami Hiramatsu #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ 11a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 12cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 13f672b49bSAndi Kleen #include <linux/hugetlb.h> /* hstate_index_to_shift */ 14268bb0ceSLinus Torvalds #include <linux/prefetch.h> /* prefetchw */ 1556dd9470SFrederic Weisbecker #include <linux/context_tracking.h> /* exception_enter(), ... */ 1670ffdb93SDavid Hildenbrand #include <linux/uaccess.h> /* faulthandler_disabled() */ 17c61e211dSHarvey Harrison 18a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 19a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 20f8561296SVegard Nossum #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 21f40c3300SAndy Lutomirski #include <asm/fixmap.h> /* VSYSCALL_ADDR */ 22f40c3300SAndy Lutomirski #include <asm/vsyscall.h> /* emulate_vsyscall */ 23ba3e127eSBrian Gerst #include <asm/vm86.h> /* struct vm86 */ 24c61e211dSHarvey Harrison 25d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS 26d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h> 27d34603b0SSeiji Aguchi 28c61e211dSHarvey Harrison /* 292d4a7167SIngo Molnar * Page fault error code bits: 302d4a7167SIngo Molnar * 312d4a7167SIngo Molnar * bit 0 == 0: no page found 1: protection fault 322d4a7167SIngo Molnar * bit 1 == 0: read access 1: write access 332d4a7167SIngo Molnar * bit 2 == 0: kernel-mode access 1: user-mode access 342d4a7167SIngo Molnar * bit 3 == 1: use of reserved bit detected 352d4a7167SIngo Molnar * bit 4 == 1: fault was an instruction fetch 36c61e211dSHarvey Harrison */ 372d4a7167SIngo Molnar enum x86_pf_error_code { 382d4a7167SIngo Molnar 392d4a7167SIngo Molnar PF_PROT = 1 << 0, 402d4a7167SIngo Molnar PF_WRITE = 1 << 1, 412d4a7167SIngo Molnar PF_USER = 1 << 2, 422d4a7167SIngo Molnar PF_RSVD = 1 << 3, 432d4a7167SIngo Molnar PF_INSTR = 1 << 4, 442d4a7167SIngo Molnar }; 45c61e211dSHarvey Harrison 46b814d41fSIngo Molnar /* 47b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 48b319eed0SIngo Molnar * handled by mmiotrace: 49b814d41fSIngo Molnar */ 509326638cSMasami Hiramatsu static nokprobe_inline int 5162c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr) 5286069782SPekka Paalanen { 530fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 540fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 550fd0e3daSPekka Paalanen return -1; 560fd0e3daSPekka Paalanen return 0; 5786069782SPekka Paalanen } 5886069782SPekka Paalanen 599326638cSMasami Hiramatsu static nokprobe_inline int kprobes_fault(struct pt_regs *regs) 60c61e211dSHarvey Harrison { 61c61e211dSHarvey Harrison int ret = 0; 62c61e211dSHarvey Harrison 63c61e211dSHarvey Harrison /* kprobe_running() needs smp_processor_id() */ 64f39b6f0eSAndy Lutomirski if (kprobes_built_in() && !user_mode(regs)) { 65c61e211dSHarvey Harrison preempt_disable(); 66c61e211dSHarvey Harrison if (kprobe_running() && kprobe_fault_handler(regs, 14)) 67c61e211dSHarvey Harrison ret = 1; 68c61e211dSHarvey Harrison preempt_enable(); 69c61e211dSHarvey Harrison } 70c61e211dSHarvey Harrison 71c61e211dSHarvey Harrison return ret; 72c61e211dSHarvey Harrison } 73c61e211dSHarvey Harrison 74c61e211dSHarvey Harrison /* 752d4a7167SIngo Molnar * Prefetch quirks: 762d4a7167SIngo Molnar * 772d4a7167SIngo Molnar * 32-bit mode: 782d4a7167SIngo Molnar * 79c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 80c61e211dSHarvey Harrison * Check that here and ignore it. 81c61e211dSHarvey Harrison * 822d4a7167SIngo Molnar * 64-bit mode: 832d4a7167SIngo Molnar * 84c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 85c61e211dSHarvey Harrison * Check that here and ignore it. 86c61e211dSHarvey Harrison * 872d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 88c61e211dSHarvey Harrison */ 89107a0367SIngo Molnar static inline int 90107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 91107a0367SIngo Molnar unsigned char opcode, int *prefetch) 92c61e211dSHarvey Harrison { 93107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 94107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 95c61e211dSHarvey Harrison 96c61e211dSHarvey Harrison switch (instr_hi) { 97c61e211dSHarvey Harrison case 0x20: 98c61e211dSHarvey Harrison case 0x30: 99c61e211dSHarvey Harrison /* 100c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 101c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 102c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 103c61e211dSHarvey Harrison * X86_64 will never get here anyway 104c61e211dSHarvey Harrison */ 105107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 106c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 107c61e211dSHarvey Harrison case 0x40: 108c61e211dSHarvey Harrison /* 109c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 110c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 111c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 112c61e211dSHarvey Harrison * but for now it's good enough to assume that long 113c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 114c61e211dSHarvey Harrison */ 115318f5a2aSAndy Lutomirski return (!user_mode(regs) || user_64bit_mode(regs)); 116c61e211dSHarvey Harrison #endif 117c61e211dSHarvey Harrison case 0x60: 118c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 119107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 120c61e211dSHarvey Harrison case 0xF0: 121c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 122107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 123c61e211dSHarvey Harrison case 0x00: 124c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 125107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 126107a0367SIngo Molnar return 0; 127107a0367SIngo Molnar 128107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 129107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 130107a0367SIngo Molnar return 0; 131107a0367SIngo Molnar default: 132107a0367SIngo Molnar return 0; 133107a0367SIngo Molnar } 134107a0367SIngo Molnar } 135107a0367SIngo Molnar 136107a0367SIngo Molnar static int 137107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 138107a0367SIngo Molnar { 139107a0367SIngo Molnar unsigned char *max_instr; 140107a0367SIngo Molnar unsigned char *instr; 141107a0367SIngo Molnar int prefetch = 0; 142107a0367SIngo Molnar 143107a0367SIngo Molnar /* 144107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 145107a0367SIngo Molnar * do not ignore the fault: 146107a0367SIngo Molnar */ 147107a0367SIngo Molnar if (error_code & PF_INSTR) 148107a0367SIngo Molnar return 0; 149107a0367SIngo Molnar 150107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 151107a0367SIngo Molnar max_instr = instr + 15; 152107a0367SIngo Molnar 153d31bf07fSAndy Lutomirski if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) 154107a0367SIngo Molnar return 0; 155107a0367SIngo Molnar 156107a0367SIngo Molnar while (instr < max_instr) { 157107a0367SIngo Molnar unsigned char opcode; 158c61e211dSHarvey Harrison 159c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 160c61e211dSHarvey Harrison break; 161107a0367SIngo Molnar 162107a0367SIngo Molnar instr++; 163107a0367SIngo Molnar 164107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 165c61e211dSHarvey Harrison break; 166c61e211dSHarvey Harrison } 167c61e211dSHarvey Harrison return prefetch; 168c61e211dSHarvey Harrison } 169c61e211dSHarvey Harrison 1702d4a7167SIngo Molnar static void 1712d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address, 172f672b49bSAndi Kleen struct task_struct *tsk, int fault) 173c61e211dSHarvey Harrison { 174f672b49bSAndi Kleen unsigned lsb = 0; 175c61e211dSHarvey Harrison siginfo_t info; 176c61e211dSHarvey Harrison 177c61e211dSHarvey Harrison info.si_signo = si_signo; 178c61e211dSHarvey Harrison info.si_errno = 0; 179c61e211dSHarvey Harrison info.si_code = si_code; 180c61e211dSHarvey Harrison info.si_addr = (void __user *)address; 181f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON_LARGE) 182f672b49bSAndi Kleen lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 183f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON) 184f672b49bSAndi Kleen lsb = PAGE_SHIFT; 185f672b49bSAndi Kleen info.si_addr_lsb = lsb; 1862d4a7167SIngo Molnar 187c61e211dSHarvey Harrison force_sig_info(si_signo, &info, tsk); 188c61e211dSHarvey Harrison } 189c61e211dSHarvey Harrison 190f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 191f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 1922d4a7167SIngo Molnar 193f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 194f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 195f2f13a85SIngo Molnar { 196f2f13a85SIngo Molnar unsigned index = pgd_index(address); 197f2f13a85SIngo Molnar pgd_t *pgd_k; 198f2f13a85SIngo Molnar pud_t *pud, *pud_k; 199f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 200f2f13a85SIngo Molnar 201f2f13a85SIngo Molnar pgd += index; 202f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 203f2f13a85SIngo Molnar 204f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 205f2f13a85SIngo Molnar return NULL; 206f2f13a85SIngo Molnar 207f2f13a85SIngo Molnar /* 208f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 209f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 210f2f13a85SIngo Molnar * set_pud. 211f2f13a85SIngo Molnar */ 212f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 213f2f13a85SIngo Molnar pud_k = pud_offset(pgd_k, address); 214f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 215f2f13a85SIngo Molnar return NULL; 216f2f13a85SIngo Molnar 217f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 218f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 219f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 220f2f13a85SIngo Molnar return NULL; 221f2f13a85SIngo Molnar 222b8bcfe99SJeremy Fitzhardinge if (!pmd_present(*pmd)) 223f2f13a85SIngo Molnar set_pmd(pmd, *pmd_k); 224b8bcfe99SJeremy Fitzhardinge else 225f2f13a85SIngo Molnar BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 226f2f13a85SIngo Molnar 227f2f13a85SIngo Molnar return pmd_k; 228f2f13a85SIngo Molnar } 229f2f13a85SIngo Molnar 230f2f13a85SIngo Molnar void vmalloc_sync_all(void) 231f2f13a85SIngo Molnar { 232f2f13a85SIngo Molnar unsigned long address; 233f2f13a85SIngo Molnar 234f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 235f2f13a85SIngo Molnar return; 236f2f13a85SIngo Molnar 237f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 238f2f13a85SIngo Molnar address >= TASK_SIZE && address < FIXADDR_TOP; 239f2f13a85SIngo Molnar address += PMD_SIZE) { 240f2f13a85SIngo Molnar struct page *page; 241f2f13a85SIngo Molnar 242a79e53d8SAndrea Arcangeli spin_lock(&pgd_lock); 243f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 244617d34d9SJeremy Fitzhardinge spinlock_t *pgt_lock; 245f01f7c56SBorislav Petkov pmd_t *ret; 246617d34d9SJeremy Fitzhardinge 247a79e53d8SAndrea Arcangeli /* the pgt_lock only for Xen */ 248617d34d9SJeremy Fitzhardinge pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 249617d34d9SJeremy Fitzhardinge 250617d34d9SJeremy Fitzhardinge spin_lock(pgt_lock); 251617d34d9SJeremy Fitzhardinge ret = vmalloc_sync_one(page_address(page), address); 252617d34d9SJeremy Fitzhardinge spin_unlock(pgt_lock); 253617d34d9SJeremy Fitzhardinge 254617d34d9SJeremy Fitzhardinge if (!ret) 255f2f13a85SIngo Molnar break; 256f2f13a85SIngo Molnar } 257a79e53d8SAndrea Arcangeli spin_unlock(&pgd_lock); 258f2f13a85SIngo Molnar } 259f2f13a85SIngo Molnar } 260f2f13a85SIngo Molnar 261f2f13a85SIngo Molnar /* 262f2f13a85SIngo Molnar * 32-bit: 263f2f13a85SIngo Molnar * 264f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 265f2f13a85SIngo Molnar */ 2669326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 267f2f13a85SIngo Molnar { 268f2f13a85SIngo Molnar unsigned long pgd_paddr; 269f2f13a85SIngo Molnar pmd_t *pmd_k; 270f2f13a85SIngo Molnar pte_t *pte_k; 271f2f13a85SIngo Molnar 272f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 273f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 274f2f13a85SIngo Molnar return -1; 275f2f13a85SIngo Molnar 276ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 277ebc8827fSFrederic Weisbecker 278f2f13a85SIngo Molnar /* 279f2f13a85SIngo Molnar * Synchronize this task's top level page-table 280f2f13a85SIngo Molnar * with the 'reference' page table. 281f2f13a85SIngo Molnar * 282f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 283f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 284f2f13a85SIngo Molnar */ 285f2f13a85SIngo Molnar pgd_paddr = read_cr3(); 286f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 287f2f13a85SIngo Molnar if (!pmd_k) 288f2f13a85SIngo Molnar return -1; 289f2f13a85SIngo Molnar 290*f4eafd8bSToshi Kani if (pmd_huge(*pmd_k)) 291*f4eafd8bSToshi Kani return 0; 292*f4eafd8bSToshi Kani 293f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 294f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 295f2f13a85SIngo Molnar return -1; 296f2f13a85SIngo Molnar 297f2f13a85SIngo Molnar return 0; 298f2f13a85SIngo Molnar } 2999326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 300f2f13a85SIngo Molnar 301f2f13a85SIngo Molnar /* 302f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 303f2f13a85SIngo Molnar */ 304f2f13a85SIngo Molnar static inline void 305f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 306f2f13a85SIngo Molnar struct task_struct *tsk) 307f2f13a85SIngo Molnar { 3089fda6a06SBrian Gerst #ifdef CONFIG_VM86 309f2f13a85SIngo Molnar unsigned long bit; 310f2f13a85SIngo Molnar 3119fda6a06SBrian Gerst if (!v8086_mode(regs) || !tsk->thread.vm86) 312f2f13a85SIngo Molnar return; 313f2f13a85SIngo Molnar 314f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 315f2f13a85SIngo Molnar if (bit < 32) 3169fda6a06SBrian Gerst tsk->thread.vm86->screen_bitmap |= 1 << bit; 3179fda6a06SBrian Gerst #endif 318f2f13a85SIngo Molnar } 319c61e211dSHarvey Harrison 320087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 321087975b0SAkinobu Mita { 322087975b0SAkinobu Mita return pfn < max_low_pfn; 323087975b0SAkinobu Mita } 324087975b0SAkinobu Mita 325cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 326c61e211dSHarvey Harrison { 327087975b0SAkinobu Mita pgd_t *base = __va(read_cr3()); 328087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 329087975b0SAkinobu Mita pmd_t *pmd; 330087975b0SAkinobu Mita pte_t *pte; 3312d4a7167SIngo Molnar 332c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 333087975b0SAkinobu Mita printk("*pdpt = %016Lx ", pgd_val(*pgd)); 334087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 335087975b0SAkinobu Mita goto out; 336c61e211dSHarvey Harrison #endif 337087975b0SAkinobu Mita pmd = pmd_offset(pud_offset(pgd, address), address); 338087975b0SAkinobu Mita printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 339c61e211dSHarvey Harrison 340c61e211dSHarvey Harrison /* 341c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 342c61e211dSHarvey Harrison * case if the page table is located in highmem. 343c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3442d4a7167SIngo Molnar * it's allocated already: 345c61e211dSHarvey Harrison */ 346087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 347087975b0SAkinobu Mita goto out; 3482d4a7167SIngo Molnar 349087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 350087975b0SAkinobu Mita printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 351087975b0SAkinobu Mita out: 352c61e211dSHarvey Harrison printk("\n"); 353f2f13a85SIngo Molnar } 354f2f13a85SIngo Molnar 355f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 356f2f13a85SIngo Molnar 357f2f13a85SIngo Molnar void vmalloc_sync_all(void) 358f2f13a85SIngo Molnar { 3599661d5bcSYasuaki Ishimatsu sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0); 360f2f13a85SIngo Molnar } 361f2f13a85SIngo Molnar 362f2f13a85SIngo Molnar /* 363f2f13a85SIngo Molnar * 64-bit: 364f2f13a85SIngo Molnar * 365f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 366f2f13a85SIngo Molnar */ 3679326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 368f2f13a85SIngo Molnar { 369f2f13a85SIngo Molnar pgd_t *pgd, *pgd_ref; 370f2f13a85SIngo Molnar pud_t *pud, *pud_ref; 371f2f13a85SIngo Molnar pmd_t *pmd, *pmd_ref; 372f2f13a85SIngo Molnar pte_t *pte, *pte_ref; 373f2f13a85SIngo Molnar 374f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 375f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 376f2f13a85SIngo Molnar return -1; 377f2f13a85SIngo Molnar 378ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 379ebc8827fSFrederic Weisbecker 380f2f13a85SIngo Molnar /* 381f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 382f2f13a85SIngo Molnar * happen within a race in page table update. In the later 383f2f13a85SIngo Molnar * case just flush: 384f2f13a85SIngo Molnar */ 385f2f13a85SIngo Molnar pgd = pgd_offset(current->active_mm, address); 386f2f13a85SIngo Molnar pgd_ref = pgd_offset_k(address); 387f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 388f2f13a85SIngo Molnar return -1; 389f2f13a85SIngo Molnar 3901160c277SSamu Kallio if (pgd_none(*pgd)) { 391f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 3921160c277SSamu Kallio arch_flush_lazy_mmu_mode(); 3931160c277SSamu Kallio } else { 394f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 3951160c277SSamu Kallio } 396f2f13a85SIngo Molnar 397f2f13a85SIngo Molnar /* 398f2f13a85SIngo Molnar * Below here mismatches are bugs because these lower tables 399f2f13a85SIngo Molnar * are shared: 400f2f13a85SIngo Molnar */ 401f2f13a85SIngo Molnar 402f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 403f2f13a85SIngo Molnar pud_ref = pud_offset(pgd_ref, address); 404f2f13a85SIngo Molnar if (pud_none(*pud_ref)) 405f2f13a85SIngo Molnar return -1; 406f2f13a85SIngo Molnar 407*f4eafd8bSToshi Kani if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) 408f2f13a85SIngo Molnar BUG(); 409f2f13a85SIngo Molnar 410*f4eafd8bSToshi Kani if (pud_huge(*pud)) 411*f4eafd8bSToshi Kani return 0; 412*f4eafd8bSToshi Kani 413f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 414f2f13a85SIngo Molnar pmd_ref = pmd_offset(pud_ref, address); 415f2f13a85SIngo Molnar if (pmd_none(*pmd_ref)) 416f2f13a85SIngo Molnar return -1; 417f2f13a85SIngo Molnar 418*f4eafd8bSToshi Kani if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) 419f2f13a85SIngo Molnar BUG(); 420f2f13a85SIngo Molnar 421*f4eafd8bSToshi Kani if (pmd_huge(*pmd)) 422*f4eafd8bSToshi Kani return 0; 423*f4eafd8bSToshi Kani 424f2f13a85SIngo Molnar pte_ref = pte_offset_kernel(pmd_ref, address); 425f2f13a85SIngo Molnar if (!pte_present(*pte_ref)) 426f2f13a85SIngo Molnar return -1; 427f2f13a85SIngo Molnar 428f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 429f2f13a85SIngo Molnar 430f2f13a85SIngo Molnar /* 431f2f13a85SIngo Molnar * Don't use pte_page here, because the mappings can point 432f2f13a85SIngo Molnar * outside mem_map, and the NUMA hash lookup cannot handle 433f2f13a85SIngo Molnar * that: 434f2f13a85SIngo Molnar */ 435f2f13a85SIngo Molnar if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 436f2f13a85SIngo Molnar BUG(); 437f2f13a85SIngo Molnar 438f2f13a85SIngo Molnar return 0; 439f2f13a85SIngo Molnar } 4409326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 441f2f13a85SIngo Molnar 442e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD 443f2f13a85SIngo Molnar static const char errata93_warning[] = 444ad361c98SJoe Perches KERN_ERR 445ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 446ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 447ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 448ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 449e05139f2SJan Beulich #endif 450f2f13a85SIngo Molnar 451f2f13a85SIngo Molnar /* 452f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 453f2f13a85SIngo Molnar */ 454f2f13a85SIngo Molnar static inline void 455f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 456f2f13a85SIngo Molnar struct task_struct *tsk) 457f2f13a85SIngo Molnar { 458f2f13a85SIngo Molnar } 459f2f13a85SIngo Molnar 460f2f13a85SIngo Molnar static int bad_address(void *p) 461f2f13a85SIngo Molnar { 462f2f13a85SIngo Molnar unsigned long dummy; 463f2f13a85SIngo Molnar 464f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 465f2f13a85SIngo Molnar } 466f2f13a85SIngo Molnar 467f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 468f2f13a85SIngo Molnar { 469087975b0SAkinobu Mita pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); 470087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 471c61e211dSHarvey Harrison pud_t *pud; 472c61e211dSHarvey Harrison pmd_t *pmd; 473c61e211dSHarvey Harrison pte_t *pte; 474c61e211dSHarvey Harrison 4752d4a7167SIngo Molnar if (bad_address(pgd)) 4762d4a7167SIngo Molnar goto bad; 4772d4a7167SIngo Molnar 478c61e211dSHarvey Harrison printk("PGD %lx ", pgd_val(*pgd)); 4792d4a7167SIngo Molnar 4802d4a7167SIngo Molnar if (!pgd_present(*pgd)) 4812d4a7167SIngo Molnar goto out; 482c61e211dSHarvey Harrison 483c61e211dSHarvey Harrison pud = pud_offset(pgd, address); 4842d4a7167SIngo Molnar if (bad_address(pud)) 4852d4a7167SIngo Molnar goto bad; 4862d4a7167SIngo Molnar 487c61e211dSHarvey Harrison printk("PUD %lx ", pud_val(*pud)); 488b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 4892d4a7167SIngo Molnar goto out; 490c61e211dSHarvey Harrison 491c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 4922d4a7167SIngo Molnar if (bad_address(pmd)) 4932d4a7167SIngo Molnar goto bad; 4942d4a7167SIngo Molnar 495c61e211dSHarvey Harrison printk("PMD %lx ", pmd_val(*pmd)); 4962d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 4972d4a7167SIngo Molnar goto out; 498c61e211dSHarvey Harrison 499c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 5002d4a7167SIngo Molnar if (bad_address(pte)) 5012d4a7167SIngo Molnar goto bad; 5022d4a7167SIngo Molnar 503c61e211dSHarvey Harrison printk("PTE %lx", pte_val(*pte)); 5042d4a7167SIngo Molnar out: 505c61e211dSHarvey Harrison printk("\n"); 506c61e211dSHarvey Harrison return; 507c61e211dSHarvey Harrison bad: 508c61e211dSHarvey Harrison printk("BAD\n"); 509c61e211dSHarvey Harrison } 510c61e211dSHarvey Harrison 511f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 512c61e211dSHarvey Harrison 5132d4a7167SIngo Molnar /* 5142d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 5152d4a7167SIngo Molnar * 5162d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 5172d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 5182d4a7167SIngo Molnar * 5192d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 5202d4a7167SIngo Molnar * 5212d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 5222d4a7167SIngo Molnar * Try to work around it here. 5232d4a7167SIngo Molnar * 5242d4a7167SIngo Molnar * Note we only handle faults in kernel here. 5252d4a7167SIngo Molnar * Does nothing on 32-bit. 526c61e211dSHarvey Harrison */ 527c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 528c61e211dSHarvey Harrison { 529e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 530e05139f2SJan Beulich if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 531e05139f2SJan Beulich || boot_cpu_data.x86 != 0xf) 532e05139f2SJan Beulich return 0; 533e05139f2SJan Beulich 534c61e211dSHarvey Harrison if (address != regs->ip) 535c61e211dSHarvey Harrison return 0; 5362d4a7167SIngo Molnar 537c61e211dSHarvey Harrison if ((address >> 32) != 0) 538c61e211dSHarvey Harrison return 0; 5392d4a7167SIngo Molnar 540c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 541c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 542c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 543a454ab31SIngo Molnar printk_once(errata93_warning); 544c61e211dSHarvey Harrison regs->ip = address; 545c61e211dSHarvey Harrison return 1; 546c61e211dSHarvey Harrison } 547c61e211dSHarvey Harrison #endif 548c61e211dSHarvey Harrison return 0; 549c61e211dSHarvey Harrison } 550c61e211dSHarvey Harrison 551c61e211dSHarvey Harrison /* 5522d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 5532d4a7167SIngo Molnar * to illegal addresses >4GB. 5542d4a7167SIngo Molnar * 5552d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 5562d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 557c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 558c61e211dSHarvey Harrison */ 559c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 560c61e211dSHarvey Harrison { 561c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 5622d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 563c61e211dSHarvey Harrison return 1; 564c61e211dSHarvey Harrison #endif 565c61e211dSHarvey Harrison return 0; 566c61e211dSHarvey Harrison } 567c61e211dSHarvey Harrison 568c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 569c61e211dSHarvey Harrison { 570c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 571c61e211dSHarvey Harrison unsigned long nr; 5722d4a7167SIngo Molnar 573c61e211dSHarvey Harrison /* 5742d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 575c61e211dSHarvey Harrison */ 576e2604b49SBorislav Petkov if (boot_cpu_has_bug(X86_BUG_F00F)) { 577c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 578c61e211dSHarvey Harrison 579c61e211dSHarvey Harrison if (nr == 6) { 580c61e211dSHarvey Harrison do_invalid_op(regs, 0); 581c61e211dSHarvey Harrison return 1; 582c61e211dSHarvey Harrison } 583c61e211dSHarvey Harrison } 584c61e211dSHarvey Harrison #endif 585c61e211dSHarvey Harrison return 0; 586c61e211dSHarvey Harrison } 587c61e211dSHarvey Harrison 5888f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT 5898f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 590eff50c34SJiri Kosina static const char smep_warning[] = KERN_CRIT 591eff50c34SJiri Kosina "unable to execute userspace code (SMEP?) (uid: %d)\n"; 5928f766149SIngo Molnar 5932d4a7167SIngo Molnar static void 5942d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, 595c61e211dSHarvey Harrison unsigned long address) 596c61e211dSHarvey Harrison { 597c61e211dSHarvey Harrison if (!oops_may_print()) 598c61e211dSHarvey Harrison return; 599c61e211dSHarvey Harrison 600c61e211dSHarvey Harrison if (error_code & PF_INSTR) { 60193809be8SHarvey Harrison unsigned int level; 602426e34ccSMatt Fleming pgd_t *pgd; 603426e34ccSMatt Fleming pte_t *pte; 6042d4a7167SIngo Molnar 605426e34ccSMatt Fleming pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK); 606426e34ccSMatt Fleming pgd += pgd_index(address); 607426e34ccSMatt Fleming 608426e34ccSMatt Fleming pte = lookup_address_in_pgd(pgd, address, &level); 609c61e211dSHarvey Harrison 6108f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 611078de5f7SEric W. Biederman printk(nx_warning, from_kuid(&init_user_ns, current_uid())); 612eff50c34SJiri Kosina if (pte && pte_present(*pte) && pte_exec(*pte) && 613eff50c34SJiri Kosina (pgd_flags(*pgd) & _PAGE_USER) && 6141e02ce4cSAndy Lutomirski (__read_cr4() & X86_CR4_SMEP)) 615eff50c34SJiri Kosina printk(smep_warning, from_kuid(&init_user_ns, current_uid())); 616c61e211dSHarvey Harrison } 617fd40d6e3SHarvey Harrison 618c61e211dSHarvey Harrison printk(KERN_ALERT "BUG: unable to handle kernel "); 619c61e211dSHarvey Harrison if (address < PAGE_SIZE) 620c61e211dSHarvey Harrison printk(KERN_CONT "NULL pointer dereference"); 621c61e211dSHarvey Harrison else 622c61e211dSHarvey Harrison printk(KERN_CONT "paging request"); 6232d4a7167SIngo Molnar 624f294a8ceSVegard Nossum printk(KERN_CONT " at %p\n", (void *) address); 625c61e211dSHarvey Harrison printk(KERN_ALERT "IP:"); 6265f01c988SJiri Slaby printk_address(regs->ip); 6272d4a7167SIngo Molnar 628c61e211dSHarvey Harrison dump_pagetable(address); 629c61e211dSHarvey Harrison } 630c61e211dSHarvey Harrison 6312d4a7167SIngo Molnar static noinline void 6322d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 6332d4a7167SIngo Molnar unsigned long address) 634c61e211dSHarvey Harrison { 6352d4a7167SIngo Molnar struct task_struct *tsk; 6362d4a7167SIngo Molnar unsigned long flags; 6372d4a7167SIngo Molnar int sig; 6382d4a7167SIngo Molnar 6392d4a7167SIngo Molnar flags = oops_begin(); 6402d4a7167SIngo Molnar tsk = current; 6412d4a7167SIngo Molnar sig = SIGKILL; 642c61e211dSHarvey Harrison 643c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 64492181f19SNick Piggin tsk->comm, address); 645c61e211dSHarvey Harrison dump_pagetable(address); 6462d4a7167SIngo Molnar 647c61e211dSHarvey Harrison tsk->thread.cr2 = address; 64851e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 649c61e211dSHarvey Harrison tsk->thread.error_code = error_code; 6502d4a7167SIngo Molnar 651c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 652874d93d1SAlexander van Heukelum sig = 0; 6532d4a7167SIngo Molnar 654874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 655c61e211dSHarvey Harrison } 656c61e211dSHarvey Harrison 6572d4a7167SIngo Molnar static noinline void 6582d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 6594fc34901SAndy Lutomirski unsigned long address, int signal, int si_code) 66092181f19SNick Piggin { 66192181f19SNick Piggin struct task_struct *tsk = current; 66292181f19SNick Piggin unsigned long flags; 66392181f19SNick Piggin int sig; 66492181f19SNick Piggin 66592181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 6664fc34901SAndy Lutomirski if (fixup_exception(regs)) { 667c026b359SPeter Zijlstra /* 668c026b359SPeter Zijlstra * Any interrupt that takes a fault gets the fixup. This makes 669c026b359SPeter Zijlstra * the below recursive fault logic only apply to a faults from 670c026b359SPeter Zijlstra * task context. 671c026b359SPeter Zijlstra */ 672c026b359SPeter Zijlstra if (in_interrupt()) 673c026b359SPeter Zijlstra return; 674c026b359SPeter Zijlstra 675c026b359SPeter Zijlstra /* 676c026b359SPeter Zijlstra * Per the above we're !in_interrupt(), aka. task context. 677c026b359SPeter Zijlstra * 678c026b359SPeter Zijlstra * In this case we need to make sure we're not recursively 679c026b359SPeter Zijlstra * faulting through the emulate_vsyscall() logic. 680c026b359SPeter Zijlstra */ 6814fc34901SAndy Lutomirski if (current_thread_info()->sig_on_uaccess_error && signal) { 68251e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 6834fc34901SAndy Lutomirski tsk->thread.error_code = error_code | PF_USER; 6844fc34901SAndy Lutomirski tsk->thread.cr2 = address; 6854fc34901SAndy Lutomirski 6864fc34901SAndy Lutomirski /* XXX: hwpoison faults will set the wrong code. */ 6874fc34901SAndy Lutomirski force_sig_info_fault(signal, si_code, address, tsk, 0); 6884fc34901SAndy Lutomirski } 689c026b359SPeter Zijlstra 690c026b359SPeter Zijlstra /* 691c026b359SPeter Zijlstra * Barring that, we can do the fixup and be happy. 692c026b359SPeter Zijlstra */ 69392181f19SNick Piggin return; 6944fc34901SAndy Lutomirski } 69592181f19SNick Piggin 69692181f19SNick Piggin /* 6972d4a7167SIngo Molnar * 32-bit: 6982d4a7167SIngo Molnar * 69992181f19SNick Piggin * Valid to do another page fault here, because if this fault 70092181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 70192181f19SNick Piggin * handled it. 70292181f19SNick Piggin * 7032d4a7167SIngo Molnar * 64-bit: 7042d4a7167SIngo Molnar * 70592181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 70692181f19SNick Piggin */ 70792181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 70892181f19SNick Piggin return; 70992181f19SNick Piggin 71092181f19SNick Piggin if (is_errata93(regs, address)) 71192181f19SNick Piggin return; 71292181f19SNick Piggin 71392181f19SNick Piggin /* 71492181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 7152d4a7167SIngo Molnar * terminate things with extreme prejudice: 71692181f19SNick Piggin */ 71792181f19SNick Piggin flags = oops_begin(); 71892181f19SNick Piggin 71992181f19SNick Piggin show_fault_oops(regs, error_code, address); 72092181f19SNick Piggin 721a70857e4SAaron Tomlin if (task_stack_end_corrupted(tsk)) 722b0f4c4b3SPrarit Bhargava printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 72319803078SIngo Molnar 72492181f19SNick Piggin tsk->thread.cr2 = address; 72551e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 72692181f19SNick Piggin tsk->thread.error_code = error_code; 72792181f19SNick Piggin 72892181f19SNick Piggin sig = SIGKILL; 72992181f19SNick Piggin if (__die("Oops", regs, error_code)) 73092181f19SNick Piggin sig = 0; 7312d4a7167SIngo Molnar 73292181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 733b0f4c4b3SPrarit Bhargava printk(KERN_DEFAULT "CR2: %016lx\n", address); 7342d4a7167SIngo Molnar 73592181f19SNick Piggin oops_end(flags, regs, sig); 73692181f19SNick Piggin } 73792181f19SNick Piggin 7382d4a7167SIngo Molnar /* 7392d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 7402d4a7167SIngo Molnar * sysctl is set: 7412d4a7167SIngo Molnar */ 7422d4a7167SIngo Molnar static inline void 7432d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 7442d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 7452d4a7167SIngo Molnar { 7462d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 7472d4a7167SIngo Molnar return; 7482d4a7167SIngo Molnar 7492d4a7167SIngo Molnar if (!printk_ratelimit()) 7502d4a7167SIngo Molnar return; 7512d4a7167SIngo Molnar 752a1a08d1cSRoland Dreier printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 7532d4a7167SIngo Molnar task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 7542d4a7167SIngo Molnar tsk->comm, task_pid_nr(tsk), address, 7552d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 7562d4a7167SIngo Molnar 7572d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 7582d4a7167SIngo Molnar 7592d4a7167SIngo Molnar printk(KERN_CONT "\n"); 7602d4a7167SIngo Molnar } 7612d4a7167SIngo Molnar 7622d4a7167SIngo Molnar static void 7632d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 7642d4a7167SIngo Molnar unsigned long address, int si_code) 76592181f19SNick Piggin { 76692181f19SNick Piggin struct task_struct *tsk = current; 76792181f19SNick Piggin 76892181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 76992181f19SNick Piggin if (error_code & PF_USER) { 77092181f19SNick Piggin /* 7712d4a7167SIngo Molnar * It's possible to have interrupts off here: 77292181f19SNick Piggin */ 77392181f19SNick Piggin local_irq_enable(); 77492181f19SNick Piggin 77592181f19SNick Piggin /* 77692181f19SNick Piggin * Valid to do another page fault here because this one came 7772d4a7167SIngo Molnar * from user space: 77892181f19SNick Piggin */ 77992181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 78092181f19SNick Piggin return; 78192181f19SNick Piggin 78292181f19SNick Piggin if (is_errata100(regs, address)) 78392181f19SNick Piggin return; 78492181f19SNick Piggin 7853ae36655SAndy Lutomirski #ifdef CONFIG_X86_64 7863ae36655SAndy Lutomirski /* 7873ae36655SAndy Lutomirski * Instruction fetch faults in the vsyscall page might need 7883ae36655SAndy Lutomirski * emulation. 7893ae36655SAndy Lutomirski */ 7903ae36655SAndy Lutomirski if (unlikely((error_code & PF_INSTR) && 791f40c3300SAndy Lutomirski ((address & ~0xfff) == VSYSCALL_ADDR))) { 7923ae36655SAndy Lutomirski if (emulate_vsyscall(regs, address)) 7933ae36655SAndy Lutomirski return; 7943ae36655SAndy Lutomirski } 7953ae36655SAndy Lutomirski #endif 796e575a86fSKees Cook /* Kernel addresses are always protection faults: */ 797e575a86fSKees Cook if (address >= TASK_SIZE) 798e575a86fSKees Cook error_code |= PF_PROT; 7993ae36655SAndy Lutomirski 800e575a86fSKees Cook if (likely(show_unhandled_signals)) 8012d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 80292181f19SNick Piggin 80392181f19SNick Piggin tsk->thread.cr2 = address; 804e575a86fSKees Cook tsk->thread.error_code = error_code; 80551e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 8062d4a7167SIngo Molnar 807f672b49bSAndi Kleen force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 8082d4a7167SIngo Molnar 80992181f19SNick Piggin return; 81092181f19SNick Piggin } 81192181f19SNick Piggin 81292181f19SNick Piggin if (is_f00f_bug(regs, address)) 81392181f19SNick Piggin return; 81492181f19SNick Piggin 8154fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGSEGV, si_code); 81692181f19SNick Piggin } 81792181f19SNick Piggin 8182d4a7167SIngo Molnar static noinline void 8192d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 8202d4a7167SIngo Molnar unsigned long address) 82192181f19SNick Piggin { 82292181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 82392181f19SNick Piggin } 82492181f19SNick Piggin 8252d4a7167SIngo Molnar static void 8262d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 8272d4a7167SIngo Molnar unsigned long address, int si_code) 82892181f19SNick Piggin { 82992181f19SNick Piggin struct mm_struct *mm = current->mm; 83092181f19SNick Piggin 83192181f19SNick Piggin /* 83292181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 83392181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 83492181f19SNick Piggin */ 83592181f19SNick Piggin up_read(&mm->mmap_sem); 83692181f19SNick Piggin 83792181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, si_code); 83892181f19SNick Piggin } 83992181f19SNick Piggin 8402d4a7167SIngo Molnar static noinline void 8412d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 84292181f19SNick Piggin { 84392181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_MAPERR); 84492181f19SNick Piggin } 84592181f19SNick Piggin 8462d4a7167SIngo Molnar static noinline void 8472d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 8482d4a7167SIngo Molnar unsigned long address) 84992181f19SNick Piggin { 85092181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_ACCERR); 85192181f19SNick Piggin } 85292181f19SNick Piggin 8532d4a7167SIngo Molnar static void 854a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 855a6e04aa9SAndi Kleen unsigned int fault) 85692181f19SNick Piggin { 85792181f19SNick Piggin struct task_struct *tsk = current; 858a6e04aa9SAndi Kleen int code = BUS_ADRERR; 85992181f19SNick Piggin 8602d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 86196054569SLinus Torvalds if (!(error_code & PF_USER)) { 8624fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 86396054569SLinus Torvalds return; 86496054569SLinus Torvalds } 8652d4a7167SIngo Molnar 866cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 86792181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 86892181f19SNick Piggin return; 8692d4a7167SIngo Molnar 87092181f19SNick Piggin tsk->thread.cr2 = address; 87192181f19SNick Piggin tsk->thread.error_code = error_code; 87251e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 8732d4a7167SIngo Molnar 874a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 875f672b49bSAndi Kleen if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 876a6e04aa9SAndi Kleen printk(KERN_ERR 877a6e04aa9SAndi Kleen "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 878a6e04aa9SAndi Kleen tsk->comm, tsk->pid, address); 879a6e04aa9SAndi Kleen code = BUS_MCEERR_AR; 880a6e04aa9SAndi Kleen } 881a6e04aa9SAndi Kleen #endif 882f672b49bSAndi Kleen force_sig_info_fault(SIGBUS, code, address, tsk, fault); 88392181f19SNick Piggin } 88492181f19SNick Piggin 8853a13c4d7SJohannes Weiner static noinline void 8862d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 8872d4a7167SIngo Molnar unsigned long address, unsigned int fault) 88892181f19SNick Piggin { 8893a13c4d7SJohannes Weiner if (fatal_signal_pending(current) && !(error_code & PF_USER)) { 8904fc34901SAndy Lutomirski no_context(regs, error_code, address, 0, 0); 8913a13c4d7SJohannes Weiner return; 892b80ef10eSKOSAKI Motohiro } 893b80ef10eSKOSAKI Motohiro 8942d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 895f8626854SAndrey Vagin /* Kernel mode? Handle exceptions or die: */ 896f8626854SAndrey Vagin if (!(error_code & PF_USER)) { 8974fc34901SAndy Lutomirski no_context(regs, error_code, address, 8984fc34901SAndy Lutomirski SIGSEGV, SEGV_MAPERR); 8993a13c4d7SJohannes Weiner return; 900f8626854SAndrey Vagin } 901f8626854SAndrey Vagin 902c2d23f91SDavid Rientjes /* 903c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 904c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 905c2d23f91SDavid Rientjes * oom-killed): 906c2d23f91SDavid Rientjes */ 907c2d23f91SDavid Rientjes pagefault_out_of_memory(); 9082d4a7167SIngo Molnar } else { 909f672b49bSAndi Kleen if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 910f672b49bSAndi Kleen VM_FAULT_HWPOISON_LARGE)) 911a6e04aa9SAndi Kleen do_sigbus(regs, error_code, address, fault); 91233692f27SLinus Torvalds else if (fault & VM_FAULT_SIGSEGV) 91333692f27SLinus Torvalds bad_area_nosemaphore(regs, error_code, address); 91492181f19SNick Piggin else 91592181f19SNick Piggin BUG(); 91692181f19SNick Piggin } 9172d4a7167SIngo Molnar } 91892181f19SNick Piggin 919d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte) 920d8b57bb7SThomas Gleixner { 921d8b57bb7SThomas Gleixner if ((error_code & PF_WRITE) && !pte_write(*pte)) 922d8b57bb7SThomas Gleixner return 0; 9232d4a7167SIngo Molnar 924d8b57bb7SThomas Gleixner if ((error_code & PF_INSTR) && !pte_exec(*pte)) 925d8b57bb7SThomas Gleixner return 0; 926d8b57bb7SThomas Gleixner 927d8b57bb7SThomas Gleixner return 1; 928d8b57bb7SThomas Gleixner } 929d8b57bb7SThomas Gleixner 930c61e211dSHarvey Harrison /* 9312d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 9322d4a7167SIngo Molnar * 9332d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 9342d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 9352d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 9362d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 9372d4a7167SIngo Molnar * on other processors. 9382d4a7167SIngo Molnar * 93931668511SDavid Vrabel * Spurious faults may only occur if the TLB contains an entry with 94031668511SDavid Vrabel * fewer permission than the page table entry. Non-present (P = 0) 94131668511SDavid Vrabel * and reserved bit (R = 1) faults are never spurious. 94231668511SDavid Vrabel * 9435b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 9445b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 94531668511SDavid Vrabel * 94631668511SDavid Vrabel * Returns non-zero if a spurious fault was handled, zero otherwise. 94731668511SDavid Vrabel * 94831668511SDavid Vrabel * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 94931668511SDavid Vrabel * (Optional Invalidation). 9505b727a3bSJeremy Fitzhardinge */ 9519326638cSMasami Hiramatsu static noinline int 9522d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address) 9535b727a3bSJeremy Fitzhardinge { 9545b727a3bSJeremy Fitzhardinge pgd_t *pgd; 9555b727a3bSJeremy Fitzhardinge pud_t *pud; 9565b727a3bSJeremy Fitzhardinge pmd_t *pmd; 9575b727a3bSJeremy Fitzhardinge pte_t *pte; 9583c3e5694SSteven Rostedt int ret; 9595b727a3bSJeremy Fitzhardinge 96031668511SDavid Vrabel /* 96131668511SDavid Vrabel * Only writes to RO or instruction fetches from NX may cause 96231668511SDavid Vrabel * spurious faults. 96331668511SDavid Vrabel * 96431668511SDavid Vrabel * These could be from user or supervisor accesses but the TLB 96531668511SDavid Vrabel * is only lazily flushed after a kernel mapping protection 96631668511SDavid Vrabel * change, so user accesses are not expected to cause spurious 96731668511SDavid Vrabel * faults. 96831668511SDavid Vrabel */ 96931668511SDavid Vrabel if (error_code != (PF_WRITE | PF_PROT) 97031668511SDavid Vrabel && error_code != (PF_INSTR | PF_PROT)) 9715b727a3bSJeremy Fitzhardinge return 0; 9725b727a3bSJeremy Fitzhardinge 9735b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 9745b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 9755b727a3bSJeremy Fitzhardinge return 0; 9765b727a3bSJeremy Fitzhardinge 9775b727a3bSJeremy Fitzhardinge pud = pud_offset(pgd, address); 9785b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 9795b727a3bSJeremy Fitzhardinge return 0; 9805b727a3bSJeremy Fitzhardinge 981d8b57bb7SThomas Gleixner if (pud_large(*pud)) 982d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pud); 983d8b57bb7SThomas Gleixner 9845b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 9855b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 9865b727a3bSJeremy Fitzhardinge return 0; 9875b727a3bSJeremy Fitzhardinge 988d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 989d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pmd); 990d8b57bb7SThomas Gleixner 9915b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 992954f8571SAndrea Arcangeli if (!pte_present(*pte)) 9935b727a3bSJeremy Fitzhardinge return 0; 9945b727a3bSJeremy Fitzhardinge 9953c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, pte); 9963c3e5694SSteven Rostedt if (!ret) 9973c3e5694SSteven Rostedt return 0; 9983c3e5694SSteven Rostedt 9993c3e5694SSteven Rostedt /* 10002d4a7167SIngo Molnar * Make sure we have permissions in PMD. 10012d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 10023c3e5694SSteven Rostedt */ 10033c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, (pte_t *) pmd); 10043c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 10052d4a7167SIngo Molnar 10063c3e5694SSteven Rostedt return ret; 10075b727a3bSJeremy Fitzhardinge } 10089326638cSMasami Hiramatsu NOKPROBE_SYMBOL(spurious_fault); 10095b727a3bSJeremy Fitzhardinge 1010c61e211dSHarvey Harrison int show_unhandled_signals = 1; 1011c61e211dSHarvey Harrison 10122d4a7167SIngo Molnar static inline int 101368da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma) 101492181f19SNick Piggin { 101568da336aSMichel Lespinasse if (error_code & PF_WRITE) { 10162d4a7167SIngo Molnar /* write, present and write, not present: */ 101792181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 101892181f19SNick Piggin return 1; 10192d4a7167SIngo Molnar return 0; 10202d4a7167SIngo Molnar } 10212d4a7167SIngo Molnar 10222d4a7167SIngo Molnar /* read, present: */ 10232d4a7167SIngo Molnar if (unlikely(error_code & PF_PROT)) 102492181f19SNick Piggin return 1; 10252d4a7167SIngo Molnar 10262d4a7167SIngo Molnar /* read, not present: */ 102792181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 102892181f19SNick Piggin return 1; 102992181f19SNick Piggin 103092181f19SNick Piggin return 0; 103192181f19SNick Piggin } 103292181f19SNick Piggin 10330973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 10340973a06cSHiroshi Shimamoto { 1035d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 10360973a06cSHiroshi Shimamoto } 10370973a06cSHiroshi Shimamoto 103840d3cd66SH. Peter Anvin static inline bool smap_violation(int error_code, struct pt_regs *regs) 103940d3cd66SH. Peter Anvin { 10404640c7eeSH. Peter Anvin if (!IS_ENABLED(CONFIG_X86_SMAP)) 10414640c7eeSH. Peter Anvin return false; 10424640c7eeSH. Peter Anvin 10434640c7eeSH. Peter Anvin if (!static_cpu_has(X86_FEATURE_SMAP)) 10444640c7eeSH. Peter Anvin return false; 10454640c7eeSH. Peter Anvin 104640d3cd66SH. Peter Anvin if (error_code & PF_USER) 104740d3cd66SH. Peter Anvin return false; 104840d3cd66SH. Peter Anvin 1049f39b6f0eSAndy Lutomirski if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) 105040d3cd66SH. Peter Anvin return false; 105140d3cd66SH. Peter Anvin 105240d3cd66SH. Peter Anvin return true; 105340d3cd66SH. Peter Anvin } 105440d3cd66SH. Peter Anvin 1055c61e211dSHarvey Harrison /* 1056c61e211dSHarvey Harrison * This routine handles page faults. It determines the address, 1057c61e211dSHarvey Harrison * and the problem, and then passes it off to one of the appropriate 1058c61e211dSHarvey Harrison * routines. 1059d4078e23SPeter Zijlstra * 1060d4078e23SPeter Zijlstra * This function must have noinline because both callers 1061d4078e23SPeter Zijlstra * {,trace_}do_page_fault() have notrace on. Having this an actual function 1062d4078e23SPeter Zijlstra * guarantees there's a function trace entry. 1063c61e211dSHarvey Harrison */ 10649326638cSMasami Hiramatsu static noinline void 10650ac09f9fSJiri Olsa __do_page_fault(struct pt_regs *regs, unsigned long error_code, 10660ac09f9fSJiri Olsa unsigned long address) 1067c61e211dSHarvey Harrison { 1068c61e211dSHarvey Harrison struct vm_area_struct *vma; 10692d4a7167SIngo Molnar struct task_struct *tsk; 10702d4a7167SIngo Molnar struct mm_struct *mm; 107126178ec1SLinus Torvalds int fault, major = 0; 1072759496baSJohannes Weiner unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1073c61e211dSHarvey Harrison 1074c61e211dSHarvey Harrison tsk = current; 1075c61e211dSHarvey Harrison mm = tsk->mm; 10762d4a7167SIngo Molnar 1077f8561296SVegard Nossum /* 1078f8561296SVegard Nossum * Detect and handle instructions that would cause a page fault for 1079f8561296SVegard Nossum * both a tracked kernel page and a userspace page. 1080f8561296SVegard Nossum */ 1081f8561296SVegard Nossum if (kmemcheck_active(regs)) 1082f8561296SVegard Nossum kmemcheck_hide(regs); 10835dfaf90fSIngo Molnar prefetchw(&mm->mmap_sem); 1084f8561296SVegard Nossum 10850fd0e3daSPekka Paalanen if (unlikely(kmmio_fault(regs, address))) 108686069782SPekka Paalanen return; 1087c61e211dSHarvey Harrison 1088c61e211dSHarvey Harrison /* 1089c61e211dSHarvey Harrison * We fault-in kernel-space virtual memory on-demand. The 1090c61e211dSHarvey Harrison * 'reference' page table is init_mm.pgd. 1091c61e211dSHarvey Harrison * 1092c61e211dSHarvey Harrison * NOTE! We MUST NOT take any locks for this case. We may 1093c61e211dSHarvey Harrison * be in an interrupt or a critical region, and should 1094c61e211dSHarvey Harrison * only copy the information from the master page table, 1095c61e211dSHarvey Harrison * nothing more. 1096c61e211dSHarvey Harrison * 1097c61e211dSHarvey Harrison * This verifies that the fault happens in kernel space 1098c61e211dSHarvey Harrison * (error_code & 4) == 0, and that the fault was not a 1099c61e211dSHarvey Harrison * protection error (error_code & 9) == 0. 1100c61e211dSHarvey Harrison */ 11010973a06cSHiroshi Shimamoto if (unlikely(fault_in_kernel_space(address))) { 1102f8561296SVegard Nossum if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 1103f8561296SVegard Nossum if (vmalloc_fault(address) >= 0) 1104c61e211dSHarvey Harrison return; 11055b727a3bSJeremy Fitzhardinge 1106f8561296SVegard Nossum if (kmemcheck_fault(regs, address, error_code)) 1107f8561296SVegard Nossum return; 1108f8561296SVegard Nossum } 1109f8561296SVegard Nossum 11102d4a7167SIngo Molnar /* Can handle a stale RO->RW TLB: */ 111192181f19SNick Piggin if (spurious_fault(error_code, address)) 11125b727a3bSJeremy Fitzhardinge return; 11135b727a3bSJeremy Fitzhardinge 11142d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1115e00b12e6SPeter Zijlstra if (kprobes_fault(regs)) 11169be260a6SMasami Hiramatsu return; 1117c61e211dSHarvey Harrison /* 1118c61e211dSHarvey Harrison * Don't take the mm semaphore here. If we fixup a prefetch 11192d4a7167SIngo Molnar * fault we could otherwise deadlock: 1120c61e211dSHarvey Harrison */ 112192181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 11222d4a7167SIngo Molnar 112392181f19SNick Piggin return; 1124c61e211dSHarvey Harrison } 1125c61e211dSHarvey Harrison 11262d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1127e00b12e6SPeter Zijlstra if (unlikely(kprobes_fault(regs))) 11289be260a6SMasami Hiramatsu return; 1129e00b12e6SPeter Zijlstra 1130e00b12e6SPeter Zijlstra if (unlikely(error_code & PF_RSVD)) 1131e00b12e6SPeter Zijlstra pgtable_bad(regs, error_code, address); 1132e00b12e6SPeter Zijlstra 1133e00b12e6SPeter Zijlstra if (unlikely(smap_violation(error_code, regs))) { 1134e00b12e6SPeter Zijlstra bad_area_nosemaphore(regs, error_code, address); 1135e00b12e6SPeter Zijlstra return; 1136e00b12e6SPeter Zijlstra } 1137e00b12e6SPeter Zijlstra 1138e00b12e6SPeter Zijlstra /* 1139e00b12e6SPeter Zijlstra * If we're in an interrupt, have no user context or are running 114070ffdb93SDavid Hildenbrand * in a region with pagefaults disabled then we must not take the fault 1141e00b12e6SPeter Zijlstra */ 114270ffdb93SDavid Hildenbrand if (unlikely(faulthandler_disabled() || !mm)) { 1143e00b12e6SPeter Zijlstra bad_area_nosemaphore(regs, error_code, address); 1144e00b12e6SPeter Zijlstra return; 1145e00b12e6SPeter Zijlstra } 1146e00b12e6SPeter Zijlstra 1147c61e211dSHarvey Harrison /* 1148891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1149891cffbdSLinus Torvalds * vmalloc fault has been handled. 1150891cffbdSLinus Torvalds * 1151891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 11522d4a7167SIngo Molnar * potential system fault or CPU buglet: 1153c61e211dSHarvey Harrison */ 1154f39b6f0eSAndy Lutomirski if (user_mode(regs)) { 1155891cffbdSLinus Torvalds local_irq_enable(); 1156891cffbdSLinus Torvalds error_code |= PF_USER; 1157759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 11582d4a7167SIngo Molnar } else { 11592d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1160c61e211dSHarvey Harrison local_irq_enable(); 11612d4a7167SIngo Molnar } 1162c61e211dSHarvey Harrison 1163a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 11647dd1fcc2SPeter Zijlstra 1165759496baSJohannes Weiner if (error_code & PF_WRITE) 1166759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 1167759496baSJohannes Weiner 11683a1dfe6eSIngo Molnar /* 11693a1dfe6eSIngo Molnar * When running in the kernel we expect faults to occur only to 11702d4a7167SIngo Molnar * addresses in user space. All other faults represent errors in 11712d4a7167SIngo Molnar * the kernel and should generate an OOPS. Unfortunately, in the 11722d4a7167SIngo Molnar * case of an erroneous fault occurring in a code path which already 11732d4a7167SIngo Molnar * holds mmap_sem we will deadlock attempting to validate the fault 11742d4a7167SIngo Molnar * against the address space. Luckily the kernel only validly 11752d4a7167SIngo Molnar * references user space from well defined areas of code, which are 11762d4a7167SIngo Molnar * listed in the exceptions table. 1177c61e211dSHarvey Harrison * 1178c61e211dSHarvey Harrison * As the vast majority of faults will be valid we will only perform 11792d4a7167SIngo Molnar * the source reference check when there is a possibility of a 11802d4a7167SIngo Molnar * deadlock. Attempt to lock the address space, if we cannot we then 11812d4a7167SIngo Molnar * validate the source. If this is invalid we can skip the address 11822d4a7167SIngo Molnar * space check, thus avoiding the deadlock: 1183c61e211dSHarvey Harrison */ 118492181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1185c61e211dSHarvey Harrison if ((error_code & PF_USER) == 0 && 118692181f19SNick Piggin !search_exception_tables(regs->ip)) { 118792181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 118892181f19SNick Piggin return; 118992181f19SNick Piggin } 1190d065bd81SMichel Lespinasse retry: 1191c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 119201006074SPeter Zijlstra } else { 119301006074SPeter Zijlstra /* 11942d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 11952d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 11962d4a7167SIngo Molnar * down_read(): 119701006074SPeter Zijlstra */ 119801006074SPeter Zijlstra might_sleep(); 1199c61e211dSHarvey Harrison } 1200c61e211dSHarvey Harrison 1201c61e211dSHarvey Harrison vma = find_vma(mm, address); 120292181f19SNick Piggin if (unlikely(!vma)) { 120392181f19SNick Piggin bad_area(regs, error_code, address); 120492181f19SNick Piggin return; 120592181f19SNick Piggin } 120692181f19SNick Piggin if (likely(vma->vm_start <= address)) 1207c61e211dSHarvey Harrison goto good_area; 120892181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 120992181f19SNick Piggin bad_area(regs, error_code, address); 121092181f19SNick Piggin return; 121192181f19SNick Piggin } 1212c61e211dSHarvey Harrison if (error_code & PF_USER) { 1213c61e211dSHarvey Harrison /* 1214c61e211dSHarvey Harrison * Accessing the stack below %sp is always a bug. 1215c61e211dSHarvey Harrison * The large cushion allows instructions like enter 1216c61e211dSHarvey Harrison * and pusha to work. ("enter $65535, $31" pushes 1217c61e211dSHarvey Harrison * 32 pointers and then decrements %sp by 65535.) 1218c61e211dSHarvey Harrison */ 121992181f19SNick Piggin if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 122092181f19SNick Piggin bad_area(regs, error_code, address); 122192181f19SNick Piggin return; 1222c61e211dSHarvey Harrison } 122392181f19SNick Piggin } 122492181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 122592181f19SNick Piggin bad_area(regs, error_code, address); 122692181f19SNick Piggin return; 122792181f19SNick Piggin } 122892181f19SNick Piggin 1229c61e211dSHarvey Harrison /* 1230c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1231c61e211dSHarvey Harrison * we can handle it.. 1232c61e211dSHarvey Harrison */ 1233c61e211dSHarvey Harrison good_area: 123468da336aSMichel Lespinasse if (unlikely(access_error(error_code, vma))) { 123592181f19SNick Piggin bad_area_access_error(regs, error_code, address); 123692181f19SNick Piggin return; 1237c61e211dSHarvey Harrison } 1238c61e211dSHarvey Harrison 1239c61e211dSHarvey Harrison /* 1240c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1241c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 12429a95f3cfSPaul Cassella * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 12439a95f3cfSPaul Cassella * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1244c61e211dSHarvey Harrison */ 1245d065bd81SMichel Lespinasse fault = handle_mm_fault(mm, vma, address, flags); 124626178ec1SLinus Torvalds major |= fault & VM_FAULT_MAJOR; 12472d4a7167SIngo Molnar 12483a13c4d7SJohannes Weiner /* 124926178ec1SLinus Torvalds * If we need to retry the mmap_sem has already been released, 125026178ec1SLinus Torvalds * and if there is a fatal signal pending there is no guarantee 125126178ec1SLinus Torvalds * that we made any progress. Handle this case first. 12523a13c4d7SJohannes Weiner */ 125326178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_RETRY)) { 125426178ec1SLinus Torvalds /* Retry at most once */ 125526178ec1SLinus Torvalds if (flags & FAULT_FLAG_ALLOW_RETRY) { 125626178ec1SLinus Torvalds flags &= ~FAULT_FLAG_ALLOW_RETRY; 125726178ec1SLinus Torvalds flags |= FAULT_FLAG_TRIED; 125826178ec1SLinus Torvalds if (!fatal_signal_pending(tsk)) 125926178ec1SLinus Torvalds goto retry; 126026178ec1SLinus Torvalds } 126126178ec1SLinus Torvalds 126226178ec1SLinus Torvalds /* User mode? Just return to handle the fatal exception */ 1263cf3c0a15SLinus Torvalds if (flags & FAULT_FLAG_USER) 12643a13c4d7SJohannes Weiner return; 12653a13c4d7SJohannes Weiner 126626178ec1SLinus Torvalds /* Not returning to user mode? Handle exceptions or die: */ 126726178ec1SLinus Torvalds no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 126826178ec1SLinus Torvalds return; 126926178ec1SLinus Torvalds } 127026178ec1SLinus Torvalds 12717fb08ecaSLinus Torvalds up_read(&mm->mmap_sem); 127226178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_ERROR)) { 12733a13c4d7SJohannes Weiner mm_fault_error(regs, error_code, address, fault); 127437b23e05SKOSAKI Motohiro return; 127537b23e05SKOSAKI Motohiro } 127637b23e05SKOSAKI Motohiro 127737b23e05SKOSAKI Motohiro /* 127826178ec1SLinus Torvalds * Major/minor page fault accounting. If any of the events 127926178ec1SLinus Torvalds * returned VM_FAULT_MAJOR, we account it as a major fault. 1280d065bd81SMichel Lespinasse */ 128126178ec1SLinus Torvalds if (major) { 1282c61e211dSHarvey Harrison tsk->maj_flt++; 128326178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 1284ac17dc8eSPeter Zijlstra } else { 1285c61e211dSHarvey Harrison tsk->min_flt++; 128626178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1287d065bd81SMichel Lespinasse } 1288c61e211dSHarvey Harrison 12898c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 1290c61e211dSHarvey Harrison } 12919326638cSMasami Hiramatsu NOKPROBE_SYMBOL(__do_page_fault); 12926ba3c97aSFrederic Weisbecker 12939326638cSMasami Hiramatsu dotraplinkage void notrace 12946ba3c97aSFrederic Weisbecker do_page_fault(struct pt_regs *regs, unsigned long error_code) 12956ba3c97aSFrederic Weisbecker { 1296d4078e23SPeter Zijlstra unsigned long address = read_cr2(); /* Get the faulting address */ 12976c1e0256SFrederic Weisbecker enum ctx_state prev_state; 1298d4078e23SPeter Zijlstra 1299d4078e23SPeter Zijlstra /* 1300d4078e23SPeter Zijlstra * We must have this function tagged with __kprobes, notrace and call 1301d4078e23SPeter Zijlstra * read_cr2() before calling anything else. To avoid calling any kind 1302d4078e23SPeter Zijlstra * of tracing machinery before we've observed the CR2 value. 1303d4078e23SPeter Zijlstra * 1304d4078e23SPeter Zijlstra * exception_{enter,exit}() contain all sorts of tracepoints. 1305d4078e23SPeter Zijlstra */ 13066c1e0256SFrederic Weisbecker 13076c1e0256SFrederic Weisbecker prev_state = exception_enter(); 13080ac09f9fSJiri Olsa __do_page_fault(regs, error_code, address); 13096c1e0256SFrederic Weisbecker exception_exit(prev_state); 13106ba3c97aSFrederic Weisbecker } 13119326638cSMasami Hiramatsu NOKPROBE_SYMBOL(do_page_fault); 131225c74b10SSeiji Aguchi 1313d4078e23SPeter Zijlstra #ifdef CONFIG_TRACING 13149326638cSMasami Hiramatsu static nokprobe_inline void 13159326638cSMasami Hiramatsu trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1316d34603b0SSeiji Aguchi unsigned long error_code) 1317d34603b0SSeiji Aguchi { 1318d34603b0SSeiji Aguchi if (user_mode(regs)) 1319d4078e23SPeter Zijlstra trace_page_fault_user(address, regs, error_code); 1320d34603b0SSeiji Aguchi else 1321d4078e23SPeter Zijlstra trace_page_fault_kernel(address, regs, error_code); 1322d34603b0SSeiji Aguchi } 1323d34603b0SSeiji Aguchi 13249326638cSMasami Hiramatsu dotraplinkage void notrace 132525c74b10SSeiji Aguchi trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 132625c74b10SSeiji Aguchi { 13270ac09f9fSJiri Olsa /* 13280ac09f9fSJiri Olsa * The exception_enter and tracepoint processing could 13290ac09f9fSJiri Olsa * trigger another page faults (user space callchain 13300ac09f9fSJiri Olsa * reading) and destroy the original cr2 value, so read 13310ac09f9fSJiri Olsa * the faulting address now. 13320ac09f9fSJiri Olsa */ 13330ac09f9fSJiri Olsa unsigned long address = read_cr2(); 1334d4078e23SPeter Zijlstra enum ctx_state prev_state; 133525c74b10SSeiji Aguchi 133625c74b10SSeiji Aguchi prev_state = exception_enter(); 1337d4078e23SPeter Zijlstra trace_page_fault_entries(address, regs, error_code); 13380ac09f9fSJiri Olsa __do_page_fault(regs, error_code, address); 133925c74b10SSeiji Aguchi exception_exit(prev_state); 134025c74b10SSeiji Aguchi } 13419326638cSMasami Hiramatsu NOKPROBE_SYMBOL(trace_do_page_fault); 1342d4078e23SPeter Zijlstra #endif /* CONFIG_TRACING */ 1343