1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2c61e211dSHarvey Harrison /* 3c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 4c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 5f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 6c61e211dSHarvey Harrison */ 7a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 868db0cf1SIngo Molnar #include <linux/sched/task_stack.h> /* task_stack_*(), ... */ 9a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 104cdf8dbeSLinus Torvalds #include <linux/extable.h> /* search_exception_tables */ 11a2bcd473SIngo Molnar #include <linux/bootmem.h> /* max_low_pfn */ 129326638cSMasami Hiramatsu #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ 13a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 14cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 15f672b49bSAndi Kleen #include <linux/hugetlb.h> /* hstate_index_to_shift */ 16268bb0ceSLinus Torvalds #include <linux/prefetch.h> /* prefetchw */ 1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h> /* exception_enter(), ... */ 1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h> /* faulthandler_disabled() */ 19c61e211dSHarvey Harrison 20019132ffSDave Hansen #include <asm/cpufeature.h> /* boot_cpu_has, ... */ 21a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 22a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 23f40c3300SAndy Lutomirski #include <asm/fixmap.h> /* VSYSCALL_ADDR */ 24f40c3300SAndy Lutomirski #include <asm/vsyscall.h> /* emulate_vsyscall */ 25ba3e127eSBrian Gerst #include <asm/vm86.h> /* struct vm86 */ 26019132ffSDave Hansen #include <asm/mmu_context.h> /* vma_pkey() */ 27c61e211dSHarvey Harrison 28d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS 29d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h> 30d34603b0SSeiji Aguchi 31c61e211dSHarvey Harrison /* 32b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 33b319eed0SIngo Molnar * handled by mmiotrace: 34b814d41fSIngo Molnar */ 359326638cSMasami Hiramatsu static nokprobe_inline int 3662c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr) 3786069782SPekka Paalanen { 380fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 390fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 400fd0e3daSPekka Paalanen return -1; 410fd0e3daSPekka Paalanen return 0; 4286069782SPekka Paalanen } 4386069782SPekka Paalanen 449326638cSMasami Hiramatsu static nokprobe_inline int kprobes_fault(struct pt_regs *regs) 45c61e211dSHarvey Harrison { 46c61e211dSHarvey Harrison int ret = 0; 47c61e211dSHarvey Harrison 48c61e211dSHarvey Harrison /* kprobe_running() needs smp_processor_id() */ 49f39b6f0eSAndy Lutomirski if (kprobes_built_in() && !user_mode(regs)) { 50c61e211dSHarvey Harrison preempt_disable(); 51c61e211dSHarvey Harrison if (kprobe_running() && kprobe_fault_handler(regs, 14)) 52c61e211dSHarvey Harrison ret = 1; 53c61e211dSHarvey Harrison preempt_enable(); 54c61e211dSHarvey Harrison } 55c61e211dSHarvey Harrison 56c61e211dSHarvey Harrison return ret; 57c61e211dSHarvey Harrison } 58c61e211dSHarvey Harrison 59c61e211dSHarvey Harrison /* 602d4a7167SIngo Molnar * Prefetch quirks: 612d4a7167SIngo Molnar * 622d4a7167SIngo Molnar * 32-bit mode: 632d4a7167SIngo Molnar * 64c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 65c61e211dSHarvey Harrison * Check that here and ignore it. 66c61e211dSHarvey Harrison * 672d4a7167SIngo Molnar * 64-bit mode: 682d4a7167SIngo Molnar * 69c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 70c61e211dSHarvey Harrison * Check that here and ignore it. 71c61e211dSHarvey Harrison * 722d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 73c61e211dSHarvey Harrison */ 74107a0367SIngo Molnar static inline int 75107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 76107a0367SIngo Molnar unsigned char opcode, int *prefetch) 77c61e211dSHarvey Harrison { 78107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 79107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 80c61e211dSHarvey Harrison 81c61e211dSHarvey Harrison switch (instr_hi) { 82c61e211dSHarvey Harrison case 0x20: 83c61e211dSHarvey Harrison case 0x30: 84c61e211dSHarvey Harrison /* 85c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 86c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 87c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 88c61e211dSHarvey Harrison * X86_64 will never get here anyway 89c61e211dSHarvey Harrison */ 90107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 91c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 92c61e211dSHarvey Harrison case 0x40: 93c61e211dSHarvey Harrison /* 94c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 95c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 96c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 97c61e211dSHarvey Harrison * but for now it's good enough to assume that long 98c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 99c61e211dSHarvey Harrison */ 100318f5a2aSAndy Lutomirski return (!user_mode(regs) || user_64bit_mode(regs)); 101c61e211dSHarvey Harrison #endif 102c61e211dSHarvey Harrison case 0x60: 103c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 104107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 105c61e211dSHarvey Harrison case 0xF0: 106c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 107107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 108c61e211dSHarvey Harrison case 0x00: 109c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 110107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 111107a0367SIngo Molnar return 0; 112107a0367SIngo Molnar 113107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 114107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 115107a0367SIngo Molnar return 0; 116107a0367SIngo Molnar default: 117107a0367SIngo Molnar return 0; 118107a0367SIngo Molnar } 119107a0367SIngo Molnar } 120107a0367SIngo Molnar 121107a0367SIngo Molnar static int 122107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 123107a0367SIngo Molnar { 124107a0367SIngo Molnar unsigned char *max_instr; 125107a0367SIngo Molnar unsigned char *instr; 126107a0367SIngo Molnar int prefetch = 0; 127107a0367SIngo Molnar 128107a0367SIngo Molnar /* 129107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 130107a0367SIngo Molnar * do not ignore the fault: 131107a0367SIngo Molnar */ 1321067f030SRicardo Neri if (error_code & X86_PF_INSTR) 133107a0367SIngo Molnar return 0; 134107a0367SIngo Molnar 135107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 136107a0367SIngo Molnar max_instr = instr + 15; 137107a0367SIngo Molnar 138d31bf07fSAndy Lutomirski if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) 139107a0367SIngo Molnar return 0; 140107a0367SIngo Molnar 141107a0367SIngo Molnar while (instr < max_instr) { 142107a0367SIngo Molnar unsigned char opcode; 143c61e211dSHarvey Harrison 144c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 145c61e211dSHarvey Harrison break; 146107a0367SIngo Molnar 147107a0367SIngo Molnar instr++; 148107a0367SIngo Molnar 149107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 150c61e211dSHarvey Harrison break; 151c61e211dSHarvey Harrison } 152c61e211dSHarvey Harrison return prefetch; 153c61e211dSHarvey Harrison } 154c61e211dSHarvey Harrison 155019132ffSDave Hansen /* 156019132ffSDave Hansen * A protection key fault means that the PKRU value did not allow 157019132ffSDave Hansen * access to some PTE. Userspace can figure out what PKRU was 158019132ffSDave Hansen * from the XSAVE state, and this function fills out a field in 159019132ffSDave Hansen * siginfo so userspace can discover which protection key was set 160019132ffSDave Hansen * on the PTE. 161019132ffSDave Hansen * 1621067f030SRicardo Neri * If we get here, we know that the hardware signaled a X86_PF_PK 163019132ffSDave Hansen * fault and that there was a VMA once we got in the fault 164019132ffSDave Hansen * handler. It does *not* guarantee that the VMA we find here 165019132ffSDave Hansen * was the one that we faulted on. 166019132ffSDave Hansen * 167019132ffSDave Hansen * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); 168019132ffSDave Hansen * 2. T1 : set PKRU to deny access to pkey=4, touches page 169019132ffSDave Hansen * 3. T1 : faults... 170019132ffSDave Hansen * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); 171019132ffSDave Hansen * 5. T1 : enters fault handler, takes mmap_sem, etc... 172019132ffSDave Hansen * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 173019132ffSDave Hansen * faulted on a pte with its pkey=4. 174019132ffSDave Hansen */ 175beacd6f7SEric W. Biederman static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info, 176beacd6f7SEric W. Biederman u32 *pkey) 177019132ffSDave Hansen { 178019132ffSDave Hansen /* This is effectively an #ifdef */ 179019132ffSDave Hansen if (!boot_cpu_has(X86_FEATURE_OSPKE)) 180019132ffSDave Hansen return; 181019132ffSDave Hansen 182019132ffSDave Hansen /* Fault not from Protection Keys: nothing to do */ 183beacd6f7SEric W. Biederman if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV)) 184019132ffSDave Hansen return; 185019132ffSDave Hansen /* 186019132ffSDave Hansen * force_sig_info_fault() is called from a number of 187019132ffSDave Hansen * contexts, some of which have a VMA and some of which 1881067f030SRicardo Neri * do not. The X86_PF_PK handing happens after we have a 189019132ffSDave Hansen * valid VMA, so we should never reach this without a 190019132ffSDave Hansen * valid VMA. 191019132ffSDave Hansen */ 192a3c4fb7cSLaurent Dufour if (!pkey) { 193019132ffSDave Hansen WARN_ONCE(1, "PKU fault with no VMA passed in"); 194019132ffSDave Hansen info->si_pkey = 0; 195019132ffSDave Hansen return; 196019132ffSDave Hansen } 197019132ffSDave Hansen /* 198019132ffSDave Hansen * si_pkey should be thought of as a strong hint, but not 199019132ffSDave Hansen * absolutely guranteed to be 100% accurate because of 200019132ffSDave Hansen * the race explained above. 201019132ffSDave Hansen */ 202a3c4fb7cSLaurent Dufour info->si_pkey = *pkey; 203019132ffSDave Hansen } 204019132ffSDave Hansen 2052d4a7167SIngo Molnar static void 2062d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address, 207a3c4fb7cSLaurent Dufour struct task_struct *tsk, u32 *pkey, int fault) 208c61e211dSHarvey Harrison { 209f672b49bSAndi Kleen unsigned lsb = 0; 210c61e211dSHarvey Harrison siginfo_t info; 211c61e211dSHarvey Harrison 212c61e211dSHarvey Harrison info.si_signo = si_signo; 213c61e211dSHarvey Harrison info.si_errno = 0; 214c61e211dSHarvey Harrison info.si_code = si_code; 215c61e211dSHarvey Harrison info.si_addr = (void __user *)address; 216f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON_LARGE) 217f672b49bSAndi Kleen lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 218f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON) 219f672b49bSAndi Kleen lsb = PAGE_SHIFT; 220f672b49bSAndi Kleen info.si_addr_lsb = lsb; 2212d4a7167SIngo Molnar 222beacd6f7SEric W. Biederman fill_sig_info_pkey(si_signo, si_code, &info, pkey); 223019132ffSDave Hansen 224c61e211dSHarvey Harrison force_sig_info(si_signo, &info, tsk); 225c61e211dSHarvey Harrison } 226c61e211dSHarvey Harrison 227f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 228f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 2292d4a7167SIngo Molnar 230f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 231f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 232f2f13a85SIngo Molnar { 233f2f13a85SIngo Molnar unsigned index = pgd_index(address); 234f2f13a85SIngo Molnar pgd_t *pgd_k; 235e0c4f675SKirill A. Shutemov p4d_t *p4d, *p4d_k; 236f2f13a85SIngo Molnar pud_t *pud, *pud_k; 237f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 238f2f13a85SIngo Molnar 239f2f13a85SIngo Molnar pgd += index; 240f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 241f2f13a85SIngo Molnar 242f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 243f2f13a85SIngo Molnar return NULL; 244f2f13a85SIngo Molnar 245f2f13a85SIngo Molnar /* 246f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 247f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 248e0c4f675SKirill A. Shutemov * set_p4d/set_pud. 249f2f13a85SIngo Molnar */ 250e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 251e0c4f675SKirill A. Shutemov p4d_k = p4d_offset(pgd_k, address); 252e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d_k)) 253e0c4f675SKirill A. Shutemov return NULL; 254e0c4f675SKirill A. Shutemov 255e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 256e0c4f675SKirill A. Shutemov pud_k = pud_offset(p4d_k, address); 257f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 258f2f13a85SIngo Molnar return NULL; 259f2f13a85SIngo Molnar 260f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 261f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 262f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 263f2f13a85SIngo Molnar return NULL; 264f2f13a85SIngo Molnar 265b8bcfe99SJeremy Fitzhardinge if (!pmd_present(*pmd)) 266f2f13a85SIngo Molnar set_pmd(pmd, *pmd_k); 267b8bcfe99SJeremy Fitzhardinge else 268f2f13a85SIngo Molnar BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 269f2f13a85SIngo Molnar 270f2f13a85SIngo Molnar return pmd_k; 271f2f13a85SIngo Molnar } 272f2f13a85SIngo Molnar 273f2f13a85SIngo Molnar void vmalloc_sync_all(void) 274f2f13a85SIngo Molnar { 275f2f13a85SIngo Molnar unsigned long address; 276f2f13a85SIngo Molnar 277f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 278f2f13a85SIngo Molnar return; 279f2f13a85SIngo Molnar 280f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 281dc4fac84SAndy Lutomirski address >= TASK_SIZE_MAX && address < FIXADDR_TOP; 282f2f13a85SIngo Molnar address += PMD_SIZE) { 283f2f13a85SIngo Molnar struct page *page; 284f2f13a85SIngo Molnar 285a79e53d8SAndrea Arcangeli spin_lock(&pgd_lock); 286f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 287617d34d9SJeremy Fitzhardinge spinlock_t *pgt_lock; 288f01f7c56SBorislav Petkov pmd_t *ret; 289617d34d9SJeremy Fitzhardinge 290a79e53d8SAndrea Arcangeli /* the pgt_lock only for Xen */ 291617d34d9SJeremy Fitzhardinge pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 292617d34d9SJeremy Fitzhardinge 293617d34d9SJeremy Fitzhardinge spin_lock(pgt_lock); 294617d34d9SJeremy Fitzhardinge ret = vmalloc_sync_one(page_address(page), address); 295617d34d9SJeremy Fitzhardinge spin_unlock(pgt_lock); 296617d34d9SJeremy Fitzhardinge 297617d34d9SJeremy Fitzhardinge if (!ret) 298f2f13a85SIngo Molnar break; 299f2f13a85SIngo Molnar } 300a79e53d8SAndrea Arcangeli spin_unlock(&pgd_lock); 301f2f13a85SIngo Molnar } 302f2f13a85SIngo Molnar } 303f2f13a85SIngo Molnar 304f2f13a85SIngo Molnar /* 305f2f13a85SIngo Molnar * 32-bit: 306f2f13a85SIngo Molnar * 307f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 308f2f13a85SIngo Molnar */ 3099326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 310f2f13a85SIngo Molnar { 311f2f13a85SIngo Molnar unsigned long pgd_paddr; 312f2f13a85SIngo Molnar pmd_t *pmd_k; 313f2f13a85SIngo Molnar pte_t *pte_k; 314f2f13a85SIngo Molnar 315f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 316f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 317f2f13a85SIngo Molnar return -1; 318f2f13a85SIngo Molnar 319ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 320ebc8827fSFrederic Weisbecker 321f2f13a85SIngo Molnar /* 322f2f13a85SIngo Molnar * Synchronize this task's top level page-table 323f2f13a85SIngo Molnar * with the 'reference' page table. 324f2f13a85SIngo Molnar * 325f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 326f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 327f2f13a85SIngo Molnar */ 3286c690ee1SAndy Lutomirski pgd_paddr = read_cr3_pa(); 329f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 330f2f13a85SIngo Molnar if (!pmd_k) 331f2f13a85SIngo Molnar return -1; 332f2f13a85SIngo Molnar 333*18a95521SToshi Kani if (pmd_large(*pmd_k)) 334f4eafd8bSToshi Kani return 0; 335f4eafd8bSToshi Kani 336f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 337f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 338f2f13a85SIngo Molnar return -1; 339f2f13a85SIngo Molnar 340f2f13a85SIngo Molnar return 0; 341f2f13a85SIngo Molnar } 3429326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 343f2f13a85SIngo Molnar 344f2f13a85SIngo Molnar /* 345f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 346f2f13a85SIngo Molnar */ 347f2f13a85SIngo Molnar static inline void 348f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 349f2f13a85SIngo Molnar struct task_struct *tsk) 350f2f13a85SIngo Molnar { 3519fda6a06SBrian Gerst #ifdef CONFIG_VM86 352f2f13a85SIngo Molnar unsigned long bit; 353f2f13a85SIngo Molnar 3549fda6a06SBrian Gerst if (!v8086_mode(regs) || !tsk->thread.vm86) 355f2f13a85SIngo Molnar return; 356f2f13a85SIngo Molnar 357f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 358f2f13a85SIngo Molnar if (bit < 32) 3599fda6a06SBrian Gerst tsk->thread.vm86->screen_bitmap |= 1 << bit; 3609fda6a06SBrian Gerst #endif 361f2f13a85SIngo Molnar } 362c61e211dSHarvey Harrison 363087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 364087975b0SAkinobu Mita { 365087975b0SAkinobu Mita return pfn < max_low_pfn; 366087975b0SAkinobu Mita } 367087975b0SAkinobu Mita 368cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 369c61e211dSHarvey Harrison { 3706c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 371087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 372e0c4f675SKirill A. Shutemov p4d_t *p4d; 373e0c4f675SKirill A. Shutemov pud_t *pud; 374087975b0SAkinobu Mita pmd_t *pmd; 375087975b0SAkinobu Mita pte_t *pte; 3762d4a7167SIngo Molnar 377c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 37839e48d9bSJan Beulich pr_info("*pdpt = %016Lx ", pgd_val(*pgd)); 379087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 380087975b0SAkinobu Mita goto out; 38139e48d9bSJan Beulich #define pr_pde pr_cont 38239e48d9bSJan Beulich #else 38339e48d9bSJan Beulich #define pr_pde pr_info 384c61e211dSHarvey Harrison #endif 385e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 386e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 387e0c4f675SKirill A. Shutemov pmd = pmd_offset(pud, address); 38839e48d9bSJan Beulich pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 38939e48d9bSJan Beulich #undef pr_pde 390c61e211dSHarvey Harrison 391c61e211dSHarvey Harrison /* 392c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 393c61e211dSHarvey Harrison * case if the page table is located in highmem. 394c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3952d4a7167SIngo Molnar * it's allocated already: 396c61e211dSHarvey Harrison */ 397087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 398087975b0SAkinobu Mita goto out; 3992d4a7167SIngo Molnar 400087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 40139e48d9bSJan Beulich pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 402087975b0SAkinobu Mita out: 40339e48d9bSJan Beulich pr_cont("\n"); 404f2f13a85SIngo Molnar } 405f2f13a85SIngo Molnar 406f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 407f2f13a85SIngo Molnar 408f2f13a85SIngo Molnar void vmalloc_sync_all(void) 409f2f13a85SIngo Molnar { 4105372e155SKirill A. Shutemov sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); 411f2f13a85SIngo Molnar } 412f2f13a85SIngo Molnar 413f2f13a85SIngo Molnar /* 414f2f13a85SIngo Molnar * 64-bit: 415f2f13a85SIngo Molnar * 416f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 417f2f13a85SIngo Molnar */ 4189326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 419f2f13a85SIngo Molnar { 420f2f13a85SIngo Molnar pgd_t *pgd, *pgd_ref; 421b50858ceSKirill A. Shutemov p4d_t *p4d, *p4d_ref; 422f2f13a85SIngo Molnar pud_t *pud, *pud_ref; 423f2f13a85SIngo Molnar pmd_t *pmd, *pmd_ref; 424f2f13a85SIngo Molnar pte_t *pte, *pte_ref; 425f2f13a85SIngo Molnar 426f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 427f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 428f2f13a85SIngo Molnar return -1; 429f2f13a85SIngo Molnar 430ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 431ebc8827fSFrederic Weisbecker 432f2f13a85SIngo Molnar /* 433f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 434f2f13a85SIngo Molnar * happen within a race in page table update. In the later 435f2f13a85SIngo Molnar * case just flush: 436f2f13a85SIngo Molnar */ 4376c690ee1SAndy Lutomirski pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address); 438f2f13a85SIngo Molnar pgd_ref = pgd_offset_k(address); 439f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 440f2f13a85SIngo Molnar return -1; 441f2f13a85SIngo Molnar 44236b3a772SAndy Lutomirski if (CONFIG_PGTABLE_LEVELS > 4) { 4431160c277SSamu Kallio if (pgd_none(*pgd)) { 444f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 4451160c277SSamu Kallio arch_flush_lazy_mmu_mode(); 44636b3a772SAndy Lutomirski } else { 447f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 4481160c277SSamu Kallio } 44936b3a772SAndy Lutomirski } 450f2f13a85SIngo Molnar 451b50858ceSKirill A. Shutemov /* With 4-level paging, copying happens on the p4d level. */ 452b50858ceSKirill A. Shutemov p4d = p4d_offset(pgd, address); 453b50858ceSKirill A. Shutemov p4d_ref = p4d_offset(pgd_ref, address); 454b50858ceSKirill A. Shutemov if (p4d_none(*p4d_ref)) 455b50858ceSKirill A. Shutemov return -1; 456b50858ceSKirill A. Shutemov 45736b3a772SAndy Lutomirski if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) { 458b50858ceSKirill A. Shutemov set_p4d(p4d, *p4d_ref); 459b50858ceSKirill A. Shutemov arch_flush_lazy_mmu_mode(); 460b50858ceSKirill A. Shutemov } else { 461b50858ceSKirill A. Shutemov BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_ref)); 462b50858ceSKirill A. Shutemov } 463b50858ceSKirill A. Shutemov 464f2f13a85SIngo Molnar /* 465f2f13a85SIngo Molnar * Below here mismatches are bugs because these lower tables 466f2f13a85SIngo Molnar * are shared: 467f2f13a85SIngo Molnar */ 46836b3a772SAndy Lutomirski BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4); 469f2f13a85SIngo Molnar 470b50858ceSKirill A. Shutemov pud = pud_offset(p4d, address); 471b50858ceSKirill A. Shutemov pud_ref = pud_offset(p4d_ref, address); 472f2f13a85SIngo Molnar if (pud_none(*pud_ref)) 473f2f13a85SIngo Molnar return -1; 474f2f13a85SIngo Molnar 475f4eafd8bSToshi Kani if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) 476f2f13a85SIngo Molnar BUG(); 477f2f13a85SIngo Molnar 478*18a95521SToshi Kani if (pud_large(*pud)) 479f4eafd8bSToshi Kani return 0; 480f4eafd8bSToshi Kani 481f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 482f2f13a85SIngo Molnar pmd_ref = pmd_offset(pud_ref, address); 483f2f13a85SIngo Molnar if (pmd_none(*pmd_ref)) 484f2f13a85SIngo Molnar return -1; 485f2f13a85SIngo Molnar 486f4eafd8bSToshi Kani if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) 487f2f13a85SIngo Molnar BUG(); 488f2f13a85SIngo Molnar 489*18a95521SToshi Kani if (pmd_large(*pmd)) 490f4eafd8bSToshi Kani return 0; 491f4eafd8bSToshi Kani 492f2f13a85SIngo Molnar pte_ref = pte_offset_kernel(pmd_ref, address); 493f2f13a85SIngo Molnar if (!pte_present(*pte_ref)) 494f2f13a85SIngo Molnar return -1; 495f2f13a85SIngo Molnar 496f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 497f2f13a85SIngo Molnar 498f2f13a85SIngo Molnar /* 499f2f13a85SIngo Molnar * Don't use pte_page here, because the mappings can point 500f2f13a85SIngo Molnar * outside mem_map, and the NUMA hash lookup cannot handle 501f2f13a85SIngo Molnar * that: 502f2f13a85SIngo Molnar */ 503f2f13a85SIngo Molnar if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 504f2f13a85SIngo Molnar BUG(); 505f2f13a85SIngo Molnar 506f2f13a85SIngo Molnar return 0; 507f2f13a85SIngo Molnar } 5089326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 509f2f13a85SIngo Molnar 510e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD 511f2f13a85SIngo Molnar static const char errata93_warning[] = 512ad361c98SJoe Perches KERN_ERR 513ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 514ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 515ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 516ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 517e05139f2SJan Beulich #endif 518f2f13a85SIngo Molnar 519f2f13a85SIngo Molnar /* 520f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 521f2f13a85SIngo Molnar */ 522f2f13a85SIngo Molnar static inline void 523f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 524f2f13a85SIngo Molnar struct task_struct *tsk) 525f2f13a85SIngo Molnar { 526f2f13a85SIngo Molnar } 527f2f13a85SIngo Molnar 528f2f13a85SIngo Molnar static int bad_address(void *p) 529f2f13a85SIngo Molnar { 530f2f13a85SIngo Molnar unsigned long dummy; 531f2f13a85SIngo Molnar 532f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 533f2f13a85SIngo Molnar } 534f2f13a85SIngo Molnar 535f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 536f2f13a85SIngo Molnar { 5376c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 538087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 539e0c4f675SKirill A. Shutemov p4d_t *p4d; 540c61e211dSHarvey Harrison pud_t *pud; 541c61e211dSHarvey Harrison pmd_t *pmd; 542c61e211dSHarvey Harrison pte_t *pte; 543c61e211dSHarvey Harrison 5442d4a7167SIngo Molnar if (bad_address(pgd)) 5452d4a7167SIngo Molnar goto bad; 5462d4a7167SIngo Molnar 54739e48d9bSJan Beulich pr_info("PGD %lx ", pgd_val(*pgd)); 5482d4a7167SIngo Molnar 5492d4a7167SIngo Molnar if (!pgd_present(*pgd)) 5502d4a7167SIngo Molnar goto out; 551c61e211dSHarvey Harrison 552e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 553e0c4f675SKirill A. Shutemov if (bad_address(p4d)) 554e0c4f675SKirill A. Shutemov goto bad; 555e0c4f675SKirill A. Shutemov 55639e48d9bSJan Beulich pr_cont("P4D %lx ", p4d_val(*p4d)); 557e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d) || p4d_large(*p4d)) 558e0c4f675SKirill A. Shutemov goto out; 559e0c4f675SKirill A. Shutemov 560e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 5612d4a7167SIngo Molnar if (bad_address(pud)) 5622d4a7167SIngo Molnar goto bad; 5632d4a7167SIngo Molnar 56439e48d9bSJan Beulich pr_cont("PUD %lx ", pud_val(*pud)); 565b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 5662d4a7167SIngo Molnar goto out; 567c61e211dSHarvey Harrison 568c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 5692d4a7167SIngo Molnar if (bad_address(pmd)) 5702d4a7167SIngo Molnar goto bad; 5712d4a7167SIngo Molnar 57239e48d9bSJan Beulich pr_cont("PMD %lx ", pmd_val(*pmd)); 5732d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 5742d4a7167SIngo Molnar goto out; 575c61e211dSHarvey Harrison 576c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 5772d4a7167SIngo Molnar if (bad_address(pte)) 5782d4a7167SIngo Molnar goto bad; 5792d4a7167SIngo Molnar 58039e48d9bSJan Beulich pr_cont("PTE %lx", pte_val(*pte)); 5812d4a7167SIngo Molnar out: 58239e48d9bSJan Beulich pr_cont("\n"); 583c61e211dSHarvey Harrison return; 584c61e211dSHarvey Harrison bad: 58539e48d9bSJan Beulich pr_info("BAD\n"); 586c61e211dSHarvey Harrison } 587c61e211dSHarvey Harrison 588f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 589c61e211dSHarvey Harrison 5902d4a7167SIngo Molnar /* 5912d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 5922d4a7167SIngo Molnar * 5932d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 5942d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 5952d4a7167SIngo Molnar * 5962d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 5972d4a7167SIngo Molnar * 5982d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 5992d4a7167SIngo Molnar * Try to work around it here. 6002d4a7167SIngo Molnar * 6012d4a7167SIngo Molnar * Note we only handle faults in kernel here. 6022d4a7167SIngo Molnar * Does nothing on 32-bit. 603c61e211dSHarvey Harrison */ 604c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 605c61e211dSHarvey Harrison { 606e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 607e05139f2SJan Beulich if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 608e05139f2SJan Beulich || boot_cpu_data.x86 != 0xf) 609e05139f2SJan Beulich return 0; 610e05139f2SJan Beulich 611c61e211dSHarvey Harrison if (address != regs->ip) 612c61e211dSHarvey Harrison return 0; 6132d4a7167SIngo Molnar 614c61e211dSHarvey Harrison if ((address >> 32) != 0) 615c61e211dSHarvey Harrison return 0; 6162d4a7167SIngo Molnar 617c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 618c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 619c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 620a454ab31SIngo Molnar printk_once(errata93_warning); 621c61e211dSHarvey Harrison regs->ip = address; 622c61e211dSHarvey Harrison return 1; 623c61e211dSHarvey Harrison } 624c61e211dSHarvey Harrison #endif 625c61e211dSHarvey Harrison return 0; 626c61e211dSHarvey Harrison } 627c61e211dSHarvey Harrison 628c61e211dSHarvey Harrison /* 6292d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 6302d4a7167SIngo Molnar * to illegal addresses >4GB. 6312d4a7167SIngo Molnar * 6322d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 6332d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 634c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 635c61e211dSHarvey Harrison */ 636c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 637c61e211dSHarvey Harrison { 638c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 6392d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 640c61e211dSHarvey Harrison return 1; 641c61e211dSHarvey Harrison #endif 642c61e211dSHarvey Harrison return 0; 643c61e211dSHarvey Harrison } 644c61e211dSHarvey Harrison 645c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 646c61e211dSHarvey Harrison { 647c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 648c61e211dSHarvey Harrison unsigned long nr; 6492d4a7167SIngo Molnar 650c61e211dSHarvey Harrison /* 6512d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 652c61e211dSHarvey Harrison */ 653e2604b49SBorislav Petkov if (boot_cpu_has_bug(X86_BUG_F00F)) { 654c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 655c61e211dSHarvey Harrison 656c61e211dSHarvey Harrison if (nr == 6) { 657c61e211dSHarvey Harrison do_invalid_op(regs, 0); 658c61e211dSHarvey Harrison return 1; 659c61e211dSHarvey Harrison } 660c61e211dSHarvey Harrison } 661c61e211dSHarvey Harrison #endif 662c61e211dSHarvey Harrison return 0; 663c61e211dSHarvey Harrison } 664c61e211dSHarvey Harrison 6658f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT 6668f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 667eff50c34SJiri Kosina static const char smep_warning[] = KERN_CRIT 668eff50c34SJiri Kosina "unable to execute userspace code (SMEP?) (uid: %d)\n"; 6698f766149SIngo Molnar 6702d4a7167SIngo Molnar static void 6712d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, 672c61e211dSHarvey Harrison unsigned long address) 673c61e211dSHarvey Harrison { 674c61e211dSHarvey Harrison if (!oops_may_print()) 675c61e211dSHarvey Harrison return; 676c61e211dSHarvey Harrison 6771067f030SRicardo Neri if (error_code & X86_PF_INSTR) { 67893809be8SHarvey Harrison unsigned int level; 679426e34ccSMatt Fleming pgd_t *pgd; 680426e34ccSMatt Fleming pte_t *pte; 6812d4a7167SIngo Molnar 6826c690ee1SAndy Lutomirski pgd = __va(read_cr3_pa()); 683426e34ccSMatt Fleming pgd += pgd_index(address); 684426e34ccSMatt Fleming 685426e34ccSMatt Fleming pte = lookup_address_in_pgd(pgd, address, &level); 686c61e211dSHarvey Harrison 6878f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 688078de5f7SEric W. Biederman printk(nx_warning, from_kuid(&init_user_ns, current_uid())); 689eff50c34SJiri Kosina if (pte && pte_present(*pte) && pte_exec(*pte) && 690eff50c34SJiri Kosina (pgd_flags(*pgd) & _PAGE_USER) && 6911e02ce4cSAndy Lutomirski (__read_cr4() & X86_CR4_SMEP)) 692eff50c34SJiri Kosina printk(smep_warning, from_kuid(&init_user_ns, current_uid())); 693c61e211dSHarvey Harrison } 694fd40d6e3SHarvey Harrison 695c61e211dSHarvey Harrison printk(KERN_ALERT "BUG: unable to handle kernel "); 696c61e211dSHarvey Harrison if (address < PAGE_SIZE) 697c61e211dSHarvey Harrison printk(KERN_CONT "NULL pointer dereference"); 698c61e211dSHarvey Harrison else 699c61e211dSHarvey Harrison printk(KERN_CONT "paging request"); 7002d4a7167SIngo Molnar 701328b4ed9SLinus Torvalds printk(KERN_CONT " at %px\n", (void *) address); 702bb5e5ce5SJosh Poimboeuf printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip); 7032d4a7167SIngo Molnar 704c61e211dSHarvey Harrison dump_pagetable(address); 705c61e211dSHarvey Harrison } 706c61e211dSHarvey Harrison 7072d4a7167SIngo Molnar static noinline void 7082d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 7092d4a7167SIngo Molnar unsigned long address) 710c61e211dSHarvey Harrison { 7112d4a7167SIngo Molnar struct task_struct *tsk; 7122d4a7167SIngo Molnar unsigned long flags; 7132d4a7167SIngo Molnar int sig; 7142d4a7167SIngo Molnar 7152d4a7167SIngo Molnar flags = oops_begin(); 7162d4a7167SIngo Molnar tsk = current; 7172d4a7167SIngo Molnar sig = SIGKILL; 718c61e211dSHarvey Harrison 719c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 72092181f19SNick Piggin tsk->comm, address); 721c61e211dSHarvey Harrison dump_pagetable(address); 7222d4a7167SIngo Molnar 723c61e211dSHarvey Harrison tsk->thread.cr2 = address; 72451e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 725c61e211dSHarvey Harrison tsk->thread.error_code = error_code; 7262d4a7167SIngo Molnar 727c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 728874d93d1SAlexander van Heukelum sig = 0; 7292d4a7167SIngo Molnar 730874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 731c61e211dSHarvey Harrison } 732c61e211dSHarvey Harrison 7332d4a7167SIngo Molnar static noinline void 7342d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 7354fc34901SAndy Lutomirski unsigned long address, int signal, int si_code) 73692181f19SNick Piggin { 73792181f19SNick Piggin struct task_struct *tsk = current; 73892181f19SNick Piggin unsigned long flags; 73992181f19SNick Piggin int sig; 74092181f19SNick Piggin 74192181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 742548acf19STony Luck if (fixup_exception(regs, X86_TRAP_PF)) { 743c026b359SPeter Zijlstra /* 744c026b359SPeter Zijlstra * Any interrupt that takes a fault gets the fixup. This makes 745c026b359SPeter Zijlstra * the below recursive fault logic only apply to a faults from 746c026b359SPeter Zijlstra * task context. 747c026b359SPeter Zijlstra */ 748c026b359SPeter Zijlstra if (in_interrupt()) 749c026b359SPeter Zijlstra return; 750c026b359SPeter Zijlstra 751c026b359SPeter Zijlstra /* 752c026b359SPeter Zijlstra * Per the above we're !in_interrupt(), aka. task context. 753c026b359SPeter Zijlstra * 754c026b359SPeter Zijlstra * In this case we need to make sure we're not recursively 755c026b359SPeter Zijlstra * faulting through the emulate_vsyscall() logic. 756c026b359SPeter Zijlstra */ 7572a53ccbcSIngo Molnar if (current->thread.sig_on_uaccess_err && signal) { 75851e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 7591067f030SRicardo Neri tsk->thread.error_code = error_code | X86_PF_USER; 7604fc34901SAndy Lutomirski tsk->thread.cr2 = address; 7614fc34901SAndy Lutomirski 7624fc34901SAndy Lutomirski /* XXX: hwpoison faults will set the wrong code. */ 7637b2d0dbaSDave Hansen force_sig_info_fault(signal, si_code, address, 764a3c4fb7cSLaurent Dufour tsk, NULL, 0); 7654fc34901SAndy Lutomirski } 766c026b359SPeter Zijlstra 767c026b359SPeter Zijlstra /* 768c026b359SPeter Zijlstra * Barring that, we can do the fixup and be happy. 769c026b359SPeter Zijlstra */ 77092181f19SNick Piggin return; 7714fc34901SAndy Lutomirski } 77292181f19SNick Piggin 7736271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK 7746271cfdfSAndy Lutomirski /* 7756271cfdfSAndy Lutomirski * Stack overflow? During boot, we can fault near the initial 7766271cfdfSAndy Lutomirski * stack in the direct map, but that's not an overflow -- check 7776271cfdfSAndy Lutomirski * that we're in vmalloc space to avoid this. 7786271cfdfSAndy Lutomirski */ 7796271cfdfSAndy Lutomirski if (is_vmalloc_addr((void *)address) && 7806271cfdfSAndy Lutomirski (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || 7816271cfdfSAndy Lutomirski address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { 7826271cfdfSAndy Lutomirski unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); 7836271cfdfSAndy Lutomirski /* 7846271cfdfSAndy Lutomirski * We're likely to be running with very little stack space 7856271cfdfSAndy Lutomirski * left. It's plausible that we'd hit this condition but 7866271cfdfSAndy Lutomirski * double-fault even before we get this far, in which case 7876271cfdfSAndy Lutomirski * we're fine: the double-fault handler will deal with it. 7886271cfdfSAndy Lutomirski * 7896271cfdfSAndy Lutomirski * We don't want to make it all the way into the oops code 7906271cfdfSAndy Lutomirski * and then double-fault, though, because we're likely to 7916271cfdfSAndy Lutomirski * break the console driver and lose most of the stack dump. 7926271cfdfSAndy Lutomirski */ 7936271cfdfSAndy Lutomirski asm volatile ("movq %[stack], %%rsp\n\t" 7946271cfdfSAndy Lutomirski "call handle_stack_overflow\n\t" 7956271cfdfSAndy Lutomirski "1: jmp 1b" 796f5caf621SJosh Poimboeuf : ASM_CALL_CONSTRAINT 7976271cfdfSAndy Lutomirski : "D" ("kernel stack overflow (page fault)"), 7986271cfdfSAndy Lutomirski "S" (regs), "d" (address), 7996271cfdfSAndy Lutomirski [stack] "rm" (stack)); 8006271cfdfSAndy Lutomirski unreachable(); 8016271cfdfSAndy Lutomirski } 8026271cfdfSAndy Lutomirski #endif 8036271cfdfSAndy Lutomirski 80492181f19SNick Piggin /* 8052d4a7167SIngo Molnar * 32-bit: 8062d4a7167SIngo Molnar * 80792181f19SNick Piggin * Valid to do another page fault here, because if this fault 80892181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 80992181f19SNick Piggin * handled it. 81092181f19SNick Piggin * 8112d4a7167SIngo Molnar * 64-bit: 8122d4a7167SIngo Molnar * 81392181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 81492181f19SNick Piggin */ 81592181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 81692181f19SNick Piggin return; 81792181f19SNick Piggin 81892181f19SNick Piggin if (is_errata93(regs, address)) 81992181f19SNick Piggin return; 82092181f19SNick Piggin 82192181f19SNick Piggin /* 82292181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 8232d4a7167SIngo Molnar * terminate things with extreme prejudice: 82492181f19SNick Piggin */ 82592181f19SNick Piggin flags = oops_begin(); 82692181f19SNick Piggin 82792181f19SNick Piggin show_fault_oops(regs, error_code, address); 82892181f19SNick Piggin 829a70857e4SAaron Tomlin if (task_stack_end_corrupted(tsk)) 830b0f4c4b3SPrarit Bhargava printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 83119803078SIngo Molnar 83292181f19SNick Piggin tsk->thread.cr2 = address; 83351e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 83492181f19SNick Piggin tsk->thread.error_code = error_code; 83592181f19SNick Piggin 83692181f19SNick Piggin sig = SIGKILL; 83792181f19SNick Piggin if (__die("Oops", regs, error_code)) 83892181f19SNick Piggin sig = 0; 8392d4a7167SIngo Molnar 84092181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 841b0f4c4b3SPrarit Bhargava printk(KERN_DEFAULT "CR2: %016lx\n", address); 8422d4a7167SIngo Molnar 84392181f19SNick Piggin oops_end(flags, regs, sig); 84492181f19SNick Piggin } 84592181f19SNick Piggin 8462d4a7167SIngo Molnar /* 8472d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 8482d4a7167SIngo Molnar * sysctl is set: 8492d4a7167SIngo Molnar */ 8502d4a7167SIngo Molnar static inline void 8512d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 8522d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 8532d4a7167SIngo Molnar { 8542d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 8552d4a7167SIngo Molnar return; 8562d4a7167SIngo Molnar 8572d4a7167SIngo Molnar if (!printk_ratelimit()) 8582d4a7167SIngo Molnar return; 8592d4a7167SIngo Molnar 86010a7e9d8SKees Cook printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx", 8612d4a7167SIngo Molnar task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 8622d4a7167SIngo Molnar tsk->comm, task_pid_nr(tsk), address, 8632d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 8642d4a7167SIngo Molnar 8652d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 8662d4a7167SIngo Molnar 8672d4a7167SIngo Molnar printk(KERN_CONT "\n"); 8682d4a7167SIngo Molnar } 8692d4a7167SIngo Molnar 8702d4a7167SIngo Molnar static void 8712d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 872a3c4fb7cSLaurent Dufour unsigned long address, u32 *pkey, int si_code) 87392181f19SNick Piggin { 87492181f19SNick Piggin struct task_struct *tsk = current; 87592181f19SNick Piggin 87692181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 8771067f030SRicardo Neri if (error_code & X86_PF_USER) { 87892181f19SNick Piggin /* 8792d4a7167SIngo Molnar * It's possible to have interrupts off here: 88092181f19SNick Piggin */ 88192181f19SNick Piggin local_irq_enable(); 88292181f19SNick Piggin 88392181f19SNick Piggin /* 88492181f19SNick Piggin * Valid to do another page fault here because this one came 8852d4a7167SIngo Molnar * from user space: 88692181f19SNick Piggin */ 88792181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 88892181f19SNick Piggin return; 88992181f19SNick Piggin 89092181f19SNick Piggin if (is_errata100(regs, address)) 89192181f19SNick Piggin return; 89292181f19SNick Piggin 8933ae36655SAndy Lutomirski #ifdef CONFIG_X86_64 8943ae36655SAndy Lutomirski /* 8953ae36655SAndy Lutomirski * Instruction fetch faults in the vsyscall page might need 8963ae36655SAndy Lutomirski * emulation. 8973ae36655SAndy Lutomirski */ 8981067f030SRicardo Neri if (unlikely((error_code & X86_PF_INSTR) && 899f40c3300SAndy Lutomirski ((address & ~0xfff) == VSYSCALL_ADDR))) { 9003ae36655SAndy Lutomirski if (emulate_vsyscall(regs, address)) 9013ae36655SAndy Lutomirski return; 9023ae36655SAndy Lutomirski } 9033ae36655SAndy Lutomirski #endif 904dc4fac84SAndy Lutomirski 905dc4fac84SAndy Lutomirski /* 906dc4fac84SAndy Lutomirski * To avoid leaking information about the kernel page table 907dc4fac84SAndy Lutomirski * layout, pretend that user-mode accesses to kernel addresses 908dc4fac84SAndy Lutomirski * are always protection faults. 909dc4fac84SAndy Lutomirski */ 910dc4fac84SAndy Lutomirski if (address >= TASK_SIZE_MAX) 9111067f030SRicardo Neri error_code |= X86_PF_PROT; 9123ae36655SAndy Lutomirski 913e575a86fSKees Cook if (likely(show_unhandled_signals)) 9142d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 91592181f19SNick Piggin 91692181f19SNick Piggin tsk->thread.cr2 = address; 917e575a86fSKees Cook tsk->thread.error_code = error_code; 91851e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 9192d4a7167SIngo Molnar 920a3c4fb7cSLaurent Dufour force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0); 9212d4a7167SIngo Molnar 92292181f19SNick Piggin return; 92392181f19SNick Piggin } 92492181f19SNick Piggin 92592181f19SNick Piggin if (is_f00f_bug(regs, address)) 92692181f19SNick Piggin return; 92792181f19SNick Piggin 9284fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGSEGV, si_code); 92992181f19SNick Piggin } 93092181f19SNick Piggin 9312d4a7167SIngo Molnar static noinline void 9322d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 933a3c4fb7cSLaurent Dufour unsigned long address, u32 *pkey) 93492181f19SNick Piggin { 935a3c4fb7cSLaurent Dufour __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR); 93692181f19SNick Piggin } 93792181f19SNick Piggin 9382d4a7167SIngo Molnar static void 9392d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 9407b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma, int si_code) 94192181f19SNick Piggin { 94292181f19SNick Piggin struct mm_struct *mm = current->mm; 943a3c4fb7cSLaurent Dufour u32 pkey; 944a3c4fb7cSLaurent Dufour 945a3c4fb7cSLaurent Dufour if (vma) 946a3c4fb7cSLaurent Dufour pkey = vma_pkey(vma); 94792181f19SNick Piggin 94892181f19SNick Piggin /* 94992181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 95092181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 95192181f19SNick Piggin */ 95292181f19SNick Piggin up_read(&mm->mmap_sem); 95392181f19SNick Piggin 954a3c4fb7cSLaurent Dufour __bad_area_nosemaphore(regs, error_code, address, 955a3c4fb7cSLaurent Dufour (vma) ? &pkey : NULL, si_code); 95692181f19SNick Piggin } 95792181f19SNick Piggin 9582d4a7167SIngo Molnar static noinline void 9592d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 96092181f19SNick Piggin { 9617b2d0dbaSDave Hansen __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); 96292181f19SNick Piggin } 96392181f19SNick Piggin 96433a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code, 96533a709b2SDave Hansen struct vm_area_struct *vma) 96633a709b2SDave Hansen { 96707f146f5SDave Hansen /* This code is always called on the current mm */ 96807f146f5SDave Hansen bool foreign = false; 96907f146f5SDave Hansen 97033a709b2SDave Hansen if (!boot_cpu_has(X86_FEATURE_OSPKE)) 97133a709b2SDave Hansen return false; 9721067f030SRicardo Neri if (error_code & X86_PF_PK) 97333a709b2SDave Hansen return true; 97407f146f5SDave Hansen /* this checks permission keys on the VMA: */ 9751067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 9761067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 97707f146f5SDave Hansen return true; 97833a709b2SDave Hansen return false; 97992181f19SNick Piggin } 98092181f19SNick Piggin 9812d4a7167SIngo Molnar static noinline void 9822d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 9837b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma) 98492181f19SNick Piggin { 985019132ffSDave Hansen /* 986019132ffSDave Hansen * This OSPKE check is not strictly necessary at runtime. 987019132ffSDave Hansen * But, doing it this way allows compiler optimizations 988019132ffSDave Hansen * if pkeys are compiled out. 989019132ffSDave Hansen */ 99033a709b2SDave Hansen if (bad_area_access_from_pkeys(error_code, vma)) 991019132ffSDave Hansen __bad_area(regs, error_code, address, vma, SEGV_PKUERR); 992019132ffSDave Hansen else 9937b2d0dbaSDave Hansen __bad_area(regs, error_code, address, vma, SEGV_ACCERR); 99492181f19SNick Piggin } 99592181f19SNick Piggin 9962d4a7167SIngo Molnar static void 997a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 998a3c4fb7cSLaurent Dufour u32 *pkey, unsigned int fault) 99992181f19SNick Piggin { 100092181f19SNick Piggin struct task_struct *tsk = current; 1001a6e04aa9SAndi Kleen int code = BUS_ADRERR; 100292181f19SNick Piggin 10032d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 10041067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 10054fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 100696054569SLinus Torvalds return; 100796054569SLinus Torvalds } 10082d4a7167SIngo Molnar 1009cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 101092181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 101192181f19SNick Piggin return; 10122d4a7167SIngo Molnar 101392181f19SNick Piggin tsk->thread.cr2 = address; 101492181f19SNick Piggin tsk->thread.error_code = error_code; 101551e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 10162d4a7167SIngo Molnar 1017a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 1018f672b49bSAndi Kleen if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 1019a6e04aa9SAndi Kleen printk(KERN_ERR 1020a6e04aa9SAndi Kleen "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 1021a6e04aa9SAndi Kleen tsk->comm, tsk->pid, address); 1022a6e04aa9SAndi Kleen code = BUS_MCEERR_AR; 1023a6e04aa9SAndi Kleen } 1024a6e04aa9SAndi Kleen #endif 1025a3c4fb7cSLaurent Dufour force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault); 102692181f19SNick Piggin } 102792181f19SNick Piggin 10283a13c4d7SJohannes Weiner static noinline void 10292d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 1030a3c4fb7cSLaurent Dufour unsigned long address, u32 *pkey, unsigned int fault) 103192181f19SNick Piggin { 10321067f030SRicardo Neri if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { 10334fc34901SAndy Lutomirski no_context(regs, error_code, address, 0, 0); 10343a13c4d7SJohannes Weiner return; 1035b80ef10eSKOSAKI Motohiro } 1036b80ef10eSKOSAKI Motohiro 10372d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 1038f8626854SAndrey Vagin /* Kernel mode? Handle exceptions or die: */ 10391067f030SRicardo Neri if (!(error_code & X86_PF_USER)) { 10404fc34901SAndy Lutomirski no_context(regs, error_code, address, 10414fc34901SAndy Lutomirski SIGSEGV, SEGV_MAPERR); 10423a13c4d7SJohannes Weiner return; 1043f8626854SAndrey Vagin } 1044f8626854SAndrey Vagin 1045c2d23f91SDavid Rientjes /* 1046c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 1047c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 1048c2d23f91SDavid Rientjes * oom-killed): 1049c2d23f91SDavid Rientjes */ 1050c2d23f91SDavid Rientjes pagefault_out_of_memory(); 10512d4a7167SIngo Molnar } else { 1052f672b49bSAndi Kleen if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 1053f672b49bSAndi Kleen VM_FAULT_HWPOISON_LARGE)) 1054a3c4fb7cSLaurent Dufour do_sigbus(regs, error_code, address, pkey, fault); 105533692f27SLinus Torvalds else if (fault & VM_FAULT_SIGSEGV) 1056a3c4fb7cSLaurent Dufour bad_area_nosemaphore(regs, error_code, address, pkey); 105792181f19SNick Piggin else 105892181f19SNick Piggin BUG(); 105992181f19SNick Piggin } 10602d4a7167SIngo Molnar } 106192181f19SNick Piggin 1062d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte) 1063d8b57bb7SThomas Gleixner { 10641067f030SRicardo Neri if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) 1065d8b57bb7SThomas Gleixner return 0; 10662d4a7167SIngo Molnar 10671067f030SRicardo Neri if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) 1068d8b57bb7SThomas Gleixner return 0; 1069b3ecd515SDave Hansen /* 1070b3ecd515SDave Hansen * Note: We do not do lazy flushing on protection key 10711067f030SRicardo Neri * changes, so no spurious fault will ever set X86_PF_PK. 1072b3ecd515SDave Hansen */ 10731067f030SRicardo Neri if ((error_code & X86_PF_PK)) 1074b3ecd515SDave Hansen return 1; 1075d8b57bb7SThomas Gleixner 1076d8b57bb7SThomas Gleixner return 1; 1077d8b57bb7SThomas Gleixner } 1078d8b57bb7SThomas Gleixner 1079c61e211dSHarvey Harrison /* 10802d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 10812d4a7167SIngo Molnar * 10822d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 10832d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 10842d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 10852d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 10862d4a7167SIngo Molnar * on other processors. 10872d4a7167SIngo Molnar * 108831668511SDavid Vrabel * Spurious faults may only occur if the TLB contains an entry with 108931668511SDavid Vrabel * fewer permission than the page table entry. Non-present (P = 0) 109031668511SDavid Vrabel * and reserved bit (R = 1) faults are never spurious. 109131668511SDavid Vrabel * 10925b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 10935b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 109431668511SDavid Vrabel * 109531668511SDavid Vrabel * Returns non-zero if a spurious fault was handled, zero otherwise. 109631668511SDavid Vrabel * 109731668511SDavid Vrabel * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 109831668511SDavid Vrabel * (Optional Invalidation). 10995b727a3bSJeremy Fitzhardinge */ 11009326638cSMasami Hiramatsu static noinline int 11012d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address) 11025b727a3bSJeremy Fitzhardinge { 11035b727a3bSJeremy Fitzhardinge pgd_t *pgd; 1104e0c4f675SKirill A. Shutemov p4d_t *p4d; 11055b727a3bSJeremy Fitzhardinge pud_t *pud; 11065b727a3bSJeremy Fitzhardinge pmd_t *pmd; 11075b727a3bSJeremy Fitzhardinge pte_t *pte; 11083c3e5694SSteven Rostedt int ret; 11095b727a3bSJeremy Fitzhardinge 111031668511SDavid Vrabel /* 111131668511SDavid Vrabel * Only writes to RO or instruction fetches from NX may cause 111231668511SDavid Vrabel * spurious faults. 111331668511SDavid Vrabel * 111431668511SDavid Vrabel * These could be from user or supervisor accesses but the TLB 111531668511SDavid Vrabel * is only lazily flushed after a kernel mapping protection 111631668511SDavid Vrabel * change, so user accesses are not expected to cause spurious 111731668511SDavid Vrabel * faults. 111831668511SDavid Vrabel */ 11191067f030SRicardo Neri if (error_code != (X86_PF_WRITE | X86_PF_PROT) && 11201067f030SRicardo Neri error_code != (X86_PF_INSTR | X86_PF_PROT)) 11215b727a3bSJeremy Fitzhardinge return 0; 11225b727a3bSJeremy Fitzhardinge 11235b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 11245b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 11255b727a3bSJeremy Fitzhardinge return 0; 11265b727a3bSJeremy Fitzhardinge 1127e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, address); 1128e0c4f675SKirill A. Shutemov if (!p4d_present(*p4d)) 1129e0c4f675SKirill A. Shutemov return 0; 1130e0c4f675SKirill A. Shutemov 1131e0c4f675SKirill A. Shutemov if (p4d_large(*p4d)) 1132e0c4f675SKirill A. Shutemov return spurious_fault_check(error_code, (pte_t *) p4d); 1133e0c4f675SKirill A. Shutemov 1134e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, address); 11355b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 11365b727a3bSJeremy Fitzhardinge return 0; 11375b727a3bSJeremy Fitzhardinge 1138d8b57bb7SThomas Gleixner if (pud_large(*pud)) 1139d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pud); 1140d8b57bb7SThomas Gleixner 11415b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 11425b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 11435b727a3bSJeremy Fitzhardinge return 0; 11445b727a3bSJeremy Fitzhardinge 1145d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 1146d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pmd); 1147d8b57bb7SThomas Gleixner 11485b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 1149954f8571SAndrea Arcangeli if (!pte_present(*pte)) 11505b727a3bSJeremy Fitzhardinge return 0; 11515b727a3bSJeremy Fitzhardinge 11523c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, pte); 11533c3e5694SSteven Rostedt if (!ret) 11543c3e5694SSteven Rostedt return 0; 11553c3e5694SSteven Rostedt 11563c3e5694SSteven Rostedt /* 11572d4a7167SIngo Molnar * Make sure we have permissions in PMD. 11582d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 11593c3e5694SSteven Rostedt */ 11603c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, (pte_t *) pmd); 11613c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 11622d4a7167SIngo Molnar 11633c3e5694SSteven Rostedt return ret; 11645b727a3bSJeremy Fitzhardinge } 11659326638cSMasami Hiramatsu NOKPROBE_SYMBOL(spurious_fault); 11665b727a3bSJeremy Fitzhardinge 1167c61e211dSHarvey Harrison int show_unhandled_signals = 1; 1168c61e211dSHarvey Harrison 11692d4a7167SIngo Molnar static inline int 117068da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma) 117192181f19SNick Piggin { 117207f146f5SDave Hansen /* This is only called for the current mm, so: */ 117307f146f5SDave Hansen bool foreign = false; 1174e8c6226dSDave Hansen 1175e8c6226dSDave Hansen /* 1176e8c6226dSDave Hansen * Read or write was blocked by protection keys. This is 1177e8c6226dSDave Hansen * always an unconditional error and can never result in 1178e8c6226dSDave Hansen * a follow-up action to resolve the fault, like a COW. 1179e8c6226dSDave Hansen */ 11801067f030SRicardo Neri if (error_code & X86_PF_PK) 1181e8c6226dSDave Hansen return 1; 1182e8c6226dSDave Hansen 118333a709b2SDave Hansen /* 118407f146f5SDave Hansen * Make sure to check the VMA so that we do not perform 11851067f030SRicardo Neri * faults just to hit a X86_PF_PK as soon as we fill in a 118607f146f5SDave Hansen * page. 118707f146f5SDave Hansen */ 11881067f030SRicardo Neri if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), 11891067f030SRicardo Neri (error_code & X86_PF_INSTR), foreign)) 119007f146f5SDave Hansen return 1; 119133a709b2SDave Hansen 11921067f030SRicardo Neri if (error_code & X86_PF_WRITE) { 11932d4a7167SIngo Molnar /* write, present and write, not present: */ 119492181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 119592181f19SNick Piggin return 1; 11962d4a7167SIngo Molnar return 0; 11972d4a7167SIngo Molnar } 11982d4a7167SIngo Molnar 11992d4a7167SIngo Molnar /* read, present: */ 12001067f030SRicardo Neri if (unlikely(error_code & X86_PF_PROT)) 120192181f19SNick Piggin return 1; 12022d4a7167SIngo Molnar 12032d4a7167SIngo Molnar /* read, not present: */ 120492181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 120592181f19SNick Piggin return 1; 120692181f19SNick Piggin 120792181f19SNick Piggin return 0; 120892181f19SNick Piggin } 120992181f19SNick Piggin 12100973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 12110973a06cSHiroshi Shimamoto { 1212d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 12130973a06cSHiroshi Shimamoto } 12140973a06cSHiroshi Shimamoto 121540d3cd66SH. Peter Anvin static inline bool smap_violation(int error_code, struct pt_regs *regs) 121640d3cd66SH. Peter Anvin { 12174640c7eeSH. Peter Anvin if (!IS_ENABLED(CONFIG_X86_SMAP)) 12184640c7eeSH. Peter Anvin return false; 12194640c7eeSH. Peter Anvin 12204640c7eeSH. Peter Anvin if (!static_cpu_has(X86_FEATURE_SMAP)) 12214640c7eeSH. Peter Anvin return false; 12224640c7eeSH. Peter Anvin 12231067f030SRicardo Neri if (error_code & X86_PF_USER) 122440d3cd66SH. Peter Anvin return false; 122540d3cd66SH. Peter Anvin 1226f39b6f0eSAndy Lutomirski if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) 122740d3cd66SH. Peter Anvin return false; 122840d3cd66SH. Peter Anvin 122940d3cd66SH. Peter Anvin return true; 123040d3cd66SH. Peter Anvin } 123140d3cd66SH. Peter Anvin 1232c61e211dSHarvey Harrison /* 1233c61e211dSHarvey Harrison * This routine handles page faults. It determines the address, 1234c61e211dSHarvey Harrison * and the problem, and then passes it off to one of the appropriate 1235c61e211dSHarvey Harrison * routines. 1236c61e211dSHarvey Harrison */ 12379326638cSMasami Hiramatsu static noinline void 12380ac09f9fSJiri Olsa __do_page_fault(struct pt_regs *regs, unsigned long error_code, 12390ac09f9fSJiri Olsa unsigned long address) 1240c61e211dSHarvey Harrison { 1241c61e211dSHarvey Harrison struct vm_area_struct *vma; 12422d4a7167SIngo Molnar struct task_struct *tsk; 12432d4a7167SIngo Molnar struct mm_struct *mm; 124426178ec1SLinus Torvalds int fault, major = 0; 1245759496baSJohannes Weiner unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1246a3c4fb7cSLaurent Dufour u32 pkey; 1247c61e211dSHarvey Harrison 1248c61e211dSHarvey Harrison tsk = current; 1249c61e211dSHarvey Harrison mm = tsk->mm; 12502d4a7167SIngo Molnar 12515dfaf90fSIngo Molnar prefetchw(&mm->mmap_sem); 1252f8561296SVegard Nossum 12530fd0e3daSPekka Paalanen if (unlikely(kmmio_fault(regs, address))) 125486069782SPekka Paalanen return; 1255c61e211dSHarvey Harrison 1256c61e211dSHarvey Harrison /* 1257c61e211dSHarvey Harrison * We fault-in kernel-space virtual memory on-demand. The 1258c61e211dSHarvey Harrison * 'reference' page table is init_mm.pgd. 1259c61e211dSHarvey Harrison * 1260c61e211dSHarvey Harrison * NOTE! We MUST NOT take any locks for this case. We may 1261c61e211dSHarvey Harrison * be in an interrupt or a critical region, and should 1262c61e211dSHarvey Harrison * only copy the information from the master page table, 1263c61e211dSHarvey Harrison * nothing more. 1264c61e211dSHarvey Harrison * 1265c61e211dSHarvey Harrison * This verifies that the fault happens in kernel space 1266c61e211dSHarvey Harrison * (error_code & 4) == 0, and that the fault was not a 1267c61e211dSHarvey Harrison * protection error (error_code & 9) == 0. 1268c61e211dSHarvey Harrison */ 12690973a06cSHiroshi Shimamoto if (unlikely(fault_in_kernel_space(address))) { 12701067f030SRicardo Neri if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { 1271f8561296SVegard Nossum if (vmalloc_fault(address) >= 0) 1272c61e211dSHarvey Harrison return; 1273f8561296SVegard Nossum } 1274f8561296SVegard Nossum 12752d4a7167SIngo Molnar /* Can handle a stale RO->RW TLB: */ 127692181f19SNick Piggin if (spurious_fault(error_code, address)) 12775b727a3bSJeremy Fitzhardinge return; 12785b727a3bSJeremy Fitzhardinge 12792d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1280e00b12e6SPeter Zijlstra if (kprobes_fault(regs)) 12819be260a6SMasami Hiramatsu return; 1282c61e211dSHarvey Harrison /* 1283c61e211dSHarvey Harrison * Don't take the mm semaphore here. If we fixup a prefetch 12842d4a7167SIngo Molnar * fault we could otherwise deadlock: 1285c61e211dSHarvey Harrison */ 12867b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 12872d4a7167SIngo Molnar 128892181f19SNick Piggin return; 1289c61e211dSHarvey Harrison } 1290c61e211dSHarvey Harrison 12912d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1292e00b12e6SPeter Zijlstra if (unlikely(kprobes_fault(regs))) 12939be260a6SMasami Hiramatsu return; 1294e00b12e6SPeter Zijlstra 12951067f030SRicardo Neri if (unlikely(error_code & X86_PF_RSVD)) 1296e00b12e6SPeter Zijlstra pgtable_bad(regs, error_code, address); 1297e00b12e6SPeter Zijlstra 1298e00b12e6SPeter Zijlstra if (unlikely(smap_violation(error_code, regs))) { 12997b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 1300e00b12e6SPeter Zijlstra return; 1301e00b12e6SPeter Zijlstra } 1302e00b12e6SPeter Zijlstra 1303e00b12e6SPeter Zijlstra /* 1304e00b12e6SPeter Zijlstra * If we're in an interrupt, have no user context or are running 130570ffdb93SDavid Hildenbrand * in a region with pagefaults disabled then we must not take the fault 1306e00b12e6SPeter Zijlstra */ 130770ffdb93SDavid Hildenbrand if (unlikely(faulthandler_disabled() || !mm)) { 13087b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 1309e00b12e6SPeter Zijlstra return; 1310e00b12e6SPeter Zijlstra } 1311e00b12e6SPeter Zijlstra 1312c61e211dSHarvey Harrison /* 1313891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1314891cffbdSLinus Torvalds * vmalloc fault has been handled. 1315891cffbdSLinus Torvalds * 1316891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 13172d4a7167SIngo Molnar * potential system fault or CPU buglet: 1318c61e211dSHarvey Harrison */ 1319f39b6f0eSAndy Lutomirski if (user_mode(regs)) { 1320891cffbdSLinus Torvalds local_irq_enable(); 13211067f030SRicardo Neri error_code |= X86_PF_USER; 1322759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 13232d4a7167SIngo Molnar } else { 13242d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1325c61e211dSHarvey Harrison local_irq_enable(); 13262d4a7167SIngo Molnar } 1327c61e211dSHarvey Harrison 1328a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 13297dd1fcc2SPeter Zijlstra 13301067f030SRicardo Neri if (error_code & X86_PF_WRITE) 1331759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 13321067f030SRicardo Neri if (error_code & X86_PF_INSTR) 1333d61172b4SDave Hansen flags |= FAULT_FLAG_INSTRUCTION; 1334759496baSJohannes Weiner 13353a1dfe6eSIngo Molnar /* 13363a1dfe6eSIngo Molnar * When running in the kernel we expect faults to occur only to 13372d4a7167SIngo Molnar * addresses in user space. All other faults represent errors in 13382d4a7167SIngo Molnar * the kernel and should generate an OOPS. Unfortunately, in the 13392d4a7167SIngo Molnar * case of an erroneous fault occurring in a code path which already 13402d4a7167SIngo Molnar * holds mmap_sem we will deadlock attempting to validate the fault 13412d4a7167SIngo Molnar * against the address space. Luckily the kernel only validly 13422d4a7167SIngo Molnar * references user space from well defined areas of code, which are 13432d4a7167SIngo Molnar * listed in the exceptions table. 1344c61e211dSHarvey Harrison * 1345c61e211dSHarvey Harrison * As the vast majority of faults will be valid we will only perform 13462d4a7167SIngo Molnar * the source reference check when there is a possibility of a 13472d4a7167SIngo Molnar * deadlock. Attempt to lock the address space, if we cannot we then 13482d4a7167SIngo Molnar * validate the source. If this is invalid we can skip the address 13492d4a7167SIngo Molnar * space check, thus avoiding the deadlock: 1350c61e211dSHarvey Harrison */ 135192181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 13521067f030SRicardo Neri if (!(error_code & X86_PF_USER) && 135392181f19SNick Piggin !search_exception_tables(regs->ip)) { 13547b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 135592181f19SNick Piggin return; 135692181f19SNick Piggin } 1357d065bd81SMichel Lespinasse retry: 1358c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 135901006074SPeter Zijlstra } else { 136001006074SPeter Zijlstra /* 13612d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 13622d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 13632d4a7167SIngo Molnar * down_read(): 136401006074SPeter Zijlstra */ 136501006074SPeter Zijlstra might_sleep(); 1366c61e211dSHarvey Harrison } 1367c61e211dSHarvey Harrison 1368c61e211dSHarvey Harrison vma = find_vma(mm, address); 136992181f19SNick Piggin if (unlikely(!vma)) { 137092181f19SNick Piggin bad_area(regs, error_code, address); 137192181f19SNick Piggin return; 137292181f19SNick Piggin } 137392181f19SNick Piggin if (likely(vma->vm_start <= address)) 1374c61e211dSHarvey Harrison goto good_area; 137592181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 137692181f19SNick Piggin bad_area(regs, error_code, address); 137792181f19SNick Piggin return; 137892181f19SNick Piggin } 13791067f030SRicardo Neri if (error_code & X86_PF_USER) { 1380c61e211dSHarvey Harrison /* 1381c61e211dSHarvey Harrison * Accessing the stack below %sp is always a bug. 1382c61e211dSHarvey Harrison * The large cushion allows instructions like enter 1383c61e211dSHarvey Harrison * and pusha to work. ("enter $65535, $31" pushes 1384c61e211dSHarvey Harrison * 32 pointers and then decrements %sp by 65535.) 1385c61e211dSHarvey Harrison */ 138692181f19SNick Piggin if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 138792181f19SNick Piggin bad_area(regs, error_code, address); 138892181f19SNick Piggin return; 1389c61e211dSHarvey Harrison } 139092181f19SNick Piggin } 139192181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 139292181f19SNick Piggin bad_area(regs, error_code, address); 139392181f19SNick Piggin return; 139492181f19SNick Piggin } 139592181f19SNick Piggin 1396c61e211dSHarvey Harrison /* 1397c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1398c61e211dSHarvey Harrison * we can handle it.. 1399c61e211dSHarvey Harrison */ 1400c61e211dSHarvey Harrison good_area: 140168da336aSMichel Lespinasse if (unlikely(access_error(error_code, vma))) { 14027b2d0dbaSDave Hansen bad_area_access_error(regs, error_code, address, vma); 140392181f19SNick Piggin return; 1404c61e211dSHarvey Harrison } 1405c61e211dSHarvey Harrison 1406c61e211dSHarvey Harrison /* 1407c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1408c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 14099a95f3cfSPaul Cassella * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 14109a95f3cfSPaul Cassella * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1411cb0631fdSVlastimil Babka * 1412cb0631fdSVlastimil Babka * Note that handle_userfault() may also release and reacquire mmap_sem 1413cb0631fdSVlastimil Babka * (and not return with VM_FAULT_RETRY), when returning to userland to 1414cb0631fdSVlastimil Babka * repeat the page fault later with a VM_FAULT_NOPAGE retval 1415cb0631fdSVlastimil Babka * (potentially after handling any pending signal during the return to 1416cb0631fdSVlastimil Babka * userland). The return to userland is identified whenever 1417cb0631fdSVlastimil Babka * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. 1418cb0631fdSVlastimil Babka * Thus we have to be careful about not touching vma after handling the 1419cb0631fdSVlastimil Babka * fault, so we read the pkey beforehand. 1420c61e211dSHarvey Harrison */ 1421cb0631fdSVlastimil Babka pkey = vma_pkey(vma); 1422dcddffd4SKirill A. Shutemov fault = handle_mm_fault(vma, address, flags); 142326178ec1SLinus Torvalds major |= fault & VM_FAULT_MAJOR; 14242d4a7167SIngo Molnar 14253a13c4d7SJohannes Weiner /* 142626178ec1SLinus Torvalds * If we need to retry the mmap_sem has already been released, 142726178ec1SLinus Torvalds * and if there is a fatal signal pending there is no guarantee 142826178ec1SLinus Torvalds * that we made any progress. Handle this case first. 14293a13c4d7SJohannes Weiner */ 143026178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_RETRY)) { 143126178ec1SLinus Torvalds /* Retry at most once */ 143226178ec1SLinus Torvalds if (flags & FAULT_FLAG_ALLOW_RETRY) { 143326178ec1SLinus Torvalds flags &= ~FAULT_FLAG_ALLOW_RETRY; 143426178ec1SLinus Torvalds flags |= FAULT_FLAG_TRIED; 143526178ec1SLinus Torvalds if (!fatal_signal_pending(tsk)) 143626178ec1SLinus Torvalds goto retry; 143726178ec1SLinus Torvalds } 143826178ec1SLinus Torvalds 143926178ec1SLinus Torvalds /* User mode? Just return to handle the fatal exception */ 1440cf3c0a15SLinus Torvalds if (flags & FAULT_FLAG_USER) 14413a13c4d7SJohannes Weiner return; 14423a13c4d7SJohannes Weiner 144326178ec1SLinus Torvalds /* Not returning to user mode? Handle exceptions or die: */ 144426178ec1SLinus Torvalds no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 144526178ec1SLinus Torvalds return; 144626178ec1SLinus Torvalds } 144726178ec1SLinus Torvalds 14487fb08ecaSLinus Torvalds up_read(&mm->mmap_sem); 144926178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_ERROR)) { 1450a3c4fb7cSLaurent Dufour mm_fault_error(regs, error_code, address, &pkey, fault); 145137b23e05SKOSAKI Motohiro return; 145237b23e05SKOSAKI Motohiro } 145337b23e05SKOSAKI Motohiro 145437b23e05SKOSAKI Motohiro /* 145526178ec1SLinus Torvalds * Major/minor page fault accounting. If any of the events 145626178ec1SLinus Torvalds * returned VM_FAULT_MAJOR, we account it as a major fault. 1457d065bd81SMichel Lespinasse */ 145826178ec1SLinus Torvalds if (major) { 1459c61e211dSHarvey Harrison tsk->maj_flt++; 146026178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 1461ac17dc8eSPeter Zijlstra } else { 1462c61e211dSHarvey Harrison tsk->min_flt++; 146326178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1464d065bd81SMichel Lespinasse } 1465c61e211dSHarvey Harrison 14668c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 1467c61e211dSHarvey Harrison } 14689326638cSMasami Hiramatsu NOKPROBE_SYMBOL(__do_page_fault); 14696ba3c97aSFrederic Weisbecker 14709326638cSMasami Hiramatsu static nokprobe_inline void 14719326638cSMasami Hiramatsu trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1472d34603b0SSeiji Aguchi unsigned long error_code) 1473d34603b0SSeiji Aguchi { 1474d34603b0SSeiji Aguchi if (user_mode(regs)) 1475d4078e23SPeter Zijlstra trace_page_fault_user(address, regs, error_code); 1476d34603b0SSeiji Aguchi else 1477d4078e23SPeter Zijlstra trace_page_fault_kernel(address, regs, error_code); 1478d34603b0SSeiji Aguchi } 1479d34603b0SSeiji Aguchi 14800ac09f9fSJiri Olsa /* 148111a7ffb0SThomas Gleixner * We must have this function blacklisted from kprobes, tagged with notrace 148211a7ffb0SThomas Gleixner * and call read_cr2() before calling anything else. To avoid calling any 148311a7ffb0SThomas Gleixner * kind of tracing machinery before we've observed the CR2 value. 148411a7ffb0SThomas Gleixner * 148511a7ffb0SThomas Gleixner * exception_{enter,exit}() contains all sorts of tracepoints. 14860ac09f9fSJiri Olsa */ 148711a7ffb0SThomas Gleixner dotraplinkage void notrace 148811a7ffb0SThomas Gleixner do_page_fault(struct pt_regs *regs, unsigned long error_code) 148911a7ffb0SThomas Gleixner { 149011a7ffb0SThomas Gleixner unsigned long address = read_cr2(); /* Get the faulting address */ 1491d4078e23SPeter Zijlstra enum ctx_state prev_state; 149225c74b10SSeiji Aguchi 149325c74b10SSeiji Aguchi prev_state = exception_enter(); 149480954747SThomas Gleixner if (trace_pagefault_enabled()) 1495d4078e23SPeter Zijlstra trace_page_fault_entries(address, regs, error_code); 149611a7ffb0SThomas Gleixner 14970ac09f9fSJiri Olsa __do_page_fault(regs, error_code, address); 149825c74b10SSeiji Aguchi exception_exit(prev_state); 149925c74b10SSeiji Aguchi } 150011a7ffb0SThomas Gleixner NOKPROBE_SYMBOL(do_page_fault); 1501