1c61e211dSHarvey Harrison /* 2c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 3c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5c61e211dSHarvey Harrison */ 6a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 7a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 8a2bcd473SIngo Molnar #include <linux/module.h> /* search_exception_table */ 9a2bcd473SIngo Molnar #include <linux/bootmem.h> /* max_low_pfn */ 109326638cSMasami Hiramatsu #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ 11a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 12cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 13f672b49bSAndi Kleen #include <linux/hugetlb.h> /* hstate_index_to_shift */ 14268bb0ceSLinus Torvalds #include <linux/prefetch.h> /* prefetchw */ 1556dd9470SFrederic Weisbecker #include <linux/context_tracking.h> /* exception_enter(), ... */ 1670ffdb93SDavid Hildenbrand #include <linux/uaccess.h> /* faulthandler_disabled() */ 17c61e211dSHarvey Harrison 18019132ffSDave Hansen #include <asm/cpufeature.h> /* boot_cpu_has, ... */ 19a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 20a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 21f8561296SVegard Nossum #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 22f40c3300SAndy Lutomirski #include <asm/fixmap.h> /* VSYSCALL_ADDR */ 23f40c3300SAndy Lutomirski #include <asm/vsyscall.h> /* emulate_vsyscall */ 24ba3e127eSBrian Gerst #include <asm/vm86.h> /* struct vm86 */ 25019132ffSDave Hansen #include <asm/mmu_context.h> /* vma_pkey() */ 26c61e211dSHarvey Harrison 27d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS 28d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h> 29d34603b0SSeiji Aguchi 30c61e211dSHarvey Harrison /* 312d4a7167SIngo Molnar * Page fault error code bits: 322d4a7167SIngo Molnar * 332d4a7167SIngo Molnar * bit 0 == 0: no page found 1: protection fault 342d4a7167SIngo Molnar * bit 1 == 0: read access 1: write access 352d4a7167SIngo Molnar * bit 2 == 0: kernel-mode access 1: user-mode access 362d4a7167SIngo Molnar * bit 3 == 1: use of reserved bit detected 372d4a7167SIngo Molnar * bit 4 == 1: fault was an instruction fetch 38b3ecd515SDave Hansen * bit 5 == 1: protection keys block access 39c61e211dSHarvey Harrison */ 402d4a7167SIngo Molnar enum x86_pf_error_code { 412d4a7167SIngo Molnar 422d4a7167SIngo Molnar PF_PROT = 1 << 0, 432d4a7167SIngo Molnar PF_WRITE = 1 << 1, 442d4a7167SIngo Molnar PF_USER = 1 << 2, 452d4a7167SIngo Molnar PF_RSVD = 1 << 3, 462d4a7167SIngo Molnar PF_INSTR = 1 << 4, 47b3ecd515SDave Hansen PF_PK = 1 << 5, 482d4a7167SIngo Molnar }; 49c61e211dSHarvey Harrison 50b814d41fSIngo Molnar /* 51b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 52b319eed0SIngo Molnar * handled by mmiotrace: 53b814d41fSIngo Molnar */ 549326638cSMasami Hiramatsu static nokprobe_inline int 5562c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr) 5686069782SPekka Paalanen { 570fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 580fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 590fd0e3daSPekka Paalanen return -1; 600fd0e3daSPekka Paalanen return 0; 6186069782SPekka Paalanen } 6286069782SPekka Paalanen 639326638cSMasami Hiramatsu static nokprobe_inline int kprobes_fault(struct pt_regs *regs) 64c61e211dSHarvey Harrison { 65c61e211dSHarvey Harrison int ret = 0; 66c61e211dSHarvey Harrison 67c61e211dSHarvey Harrison /* kprobe_running() needs smp_processor_id() */ 68f39b6f0eSAndy Lutomirski if (kprobes_built_in() && !user_mode(regs)) { 69c61e211dSHarvey Harrison preempt_disable(); 70c61e211dSHarvey Harrison if (kprobe_running() && kprobe_fault_handler(regs, 14)) 71c61e211dSHarvey Harrison ret = 1; 72c61e211dSHarvey Harrison preempt_enable(); 73c61e211dSHarvey Harrison } 74c61e211dSHarvey Harrison 75c61e211dSHarvey Harrison return ret; 76c61e211dSHarvey Harrison } 77c61e211dSHarvey Harrison 78c61e211dSHarvey Harrison /* 792d4a7167SIngo Molnar * Prefetch quirks: 802d4a7167SIngo Molnar * 812d4a7167SIngo Molnar * 32-bit mode: 822d4a7167SIngo Molnar * 83c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 84c61e211dSHarvey Harrison * Check that here and ignore it. 85c61e211dSHarvey Harrison * 862d4a7167SIngo Molnar * 64-bit mode: 872d4a7167SIngo Molnar * 88c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 89c61e211dSHarvey Harrison * Check that here and ignore it. 90c61e211dSHarvey Harrison * 912d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 92c61e211dSHarvey Harrison */ 93107a0367SIngo Molnar static inline int 94107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 95107a0367SIngo Molnar unsigned char opcode, int *prefetch) 96c61e211dSHarvey Harrison { 97107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 98107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 99c61e211dSHarvey Harrison 100c61e211dSHarvey Harrison switch (instr_hi) { 101c61e211dSHarvey Harrison case 0x20: 102c61e211dSHarvey Harrison case 0x30: 103c61e211dSHarvey Harrison /* 104c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 105c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 106c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 107c61e211dSHarvey Harrison * X86_64 will never get here anyway 108c61e211dSHarvey Harrison */ 109107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 110c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 111c61e211dSHarvey Harrison case 0x40: 112c61e211dSHarvey Harrison /* 113c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 114c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 115c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 116c61e211dSHarvey Harrison * but for now it's good enough to assume that long 117c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 118c61e211dSHarvey Harrison */ 119318f5a2aSAndy Lutomirski return (!user_mode(regs) || user_64bit_mode(regs)); 120c61e211dSHarvey Harrison #endif 121c61e211dSHarvey Harrison case 0x60: 122c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 123107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 124c61e211dSHarvey Harrison case 0xF0: 125c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 126107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 127c61e211dSHarvey Harrison case 0x00: 128c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 129107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 130107a0367SIngo Molnar return 0; 131107a0367SIngo Molnar 132107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 133107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 134107a0367SIngo Molnar return 0; 135107a0367SIngo Molnar default: 136107a0367SIngo Molnar return 0; 137107a0367SIngo Molnar } 138107a0367SIngo Molnar } 139107a0367SIngo Molnar 140107a0367SIngo Molnar static int 141107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 142107a0367SIngo Molnar { 143107a0367SIngo Molnar unsigned char *max_instr; 144107a0367SIngo Molnar unsigned char *instr; 145107a0367SIngo Molnar int prefetch = 0; 146107a0367SIngo Molnar 147107a0367SIngo Molnar /* 148107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 149107a0367SIngo Molnar * do not ignore the fault: 150107a0367SIngo Molnar */ 151107a0367SIngo Molnar if (error_code & PF_INSTR) 152107a0367SIngo Molnar return 0; 153107a0367SIngo Molnar 154107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 155107a0367SIngo Molnar max_instr = instr + 15; 156107a0367SIngo Molnar 157d31bf07fSAndy Lutomirski if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) 158107a0367SIngo Molnar return 0; 159107a0367SIngo Molnar 160107a0367SIngo Molnar while (instr < max_instr) { 161107a0367SIngo Molnar unsigned char opcode; 162c61e211dSHarvey Harrison 163c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 164c61e211dSHarvey Harrison break; 165107a0367SIngo Molnar 166107a0367SIngo Molnar instr++; 167107a0367SIngo Molnar 168107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 169c61e211dSHarvey Harrison break; 170c61e211dSHarvey Harrison } 171c61e211dSHarvey Harrison return prefetch; 172c61e211dSHarvey Harrison } 173c61e211dSHarvey Harrison 174019132ffSDave Hansen /* 175019132ffSDave Hansen * A protection key fault means that the PKRU value did not allow 176019132ffSDave Hansen * access to some PTE. Userspace can figure out what PKRU was 177019132ffSDave Hansen * from the XSAVE state, and this function fills out a field in 178019132ffSDave Hansen * siginfo so userspace can discover which protection key was set 179019132ffSDave Hansen * on the PTE. 180019132ffSDave Hansen * 181019132ffSDave Hansen * If we get here, we know that the hardware signaled a PF_PK 182019132ffSDave Hansen * fault and that there was a VMA once we got in the fault 183019132ffSDave Hansen * handler. It does *not* guarantee that the VMA we find here 184019132ffSDave Hansen * was the one that we faulted on. 185019132ffSDave Hansen * 186019132ffSDave Hansen * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); 187019132ffSDave Hansen * 2. T1 : set PKRU to deny access to pkey=4, touches page 188019132ffSDave Hansen * 3. T1 : faults... 189019132ffSDave Hansen * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); 190019132ffSDave Hansen * 5. T1 : enters fault handler, takes mmap_sem, etc... 191019132ffSDave Hansen * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 192019132ffSDave Hansen * faulted on a pte with its pkey=4. 193019132ffSDave Hansen */ 194019132ffSDave Hansen static void fill_sig_info_pkey(int si_code, siginfo_t *info, 195019132ffSDave Hansen struct vm_area_struct *vma) 196019132ffSDave Hansen { 197019132ffSDave Hansen /* This is effectively an #ifdef */ 198019132ffSDave Hansen if (!boot_cpu_has(X86_FEATURE_OSPKE)) 199019132ffSDave Hansen return; 200019132ffSDave Hansen 201019132ffSDave Hansen /* Fault not from Protection Keys: nothing to do */ 202019132ffSDave Hansen if (si_code != SEGV_PKUERR) 203019132ffSDave Hansen return; 204019132ffSDave Hansen /* 205019132ffSDave Hansen * force_sig_info_fault() is called from a number of 206019132ffSDave Hansen * contexts, some of which have a VMA and some of which 207019132ffSDave Hansen * do not. The PF_PK handing happens after we have a 208019132ffSDave Hansen * valid VMA, so we should never reach this without a 209019132ffSDave Hansen * valid VMA. 210019132ffSDave Hansen */ 211019132ffSDave Hansen if (!vma) { 212019132ffSDave Hansen WARN_ONCE(1, "PKU fault with no VMA passed in"); 213019132ffSDave Hansen info->si_pkey = 0; 214019132ffSDave Hansen return; 215019132ffSDave Hansen } 216019132ffSDave Hansen /* 217019132ffSDave Hansen * si_pkey should be thought of as a strong hint, but not 218019132ffSDave Hansen * absolutely guranteed to be 100% accurate because of 219019132ffSDave Hansen * the race explained above. 220019132ffSDave Hansen */ 221019132ffSDave Hansen info->si_pkey = vma_pkey(vma); 222019132ffSDave Hansen } 223019132ffSDave Hansen 2242d4a7167SIngo Molnar static void 2252d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address, 2267b2d0dbaSDave Hansen struct task_struct *tsk, struct vm_area_struct *vma, 2277b2d0dbaSDave Hansen int fault) 228c61e211dSHarvey Harrison { 229f672b49bSAndi Kleen unsigned lsb = 0; 230c61e211dSHarvey Harrison siginfo_t info; 231c61e211dSHarvey Harrison 232c61e211dSHarvey Harrison info.si_signo = si_signo; 233c61e211dSHarvey Harrison info.si_errno = 0; 234c61e211dSHarvey Harrison info.si_code = si_code; 235c61e211dSHarvey Harrison info.si_addr = (void __user *)address; 236f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON_LARGE) 237f672b49bSAndi Kleen lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 238f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON) 239f672b49bSAndi Kleen lsb = PAGE_SHIFT; 240f672b49bSAndi Kleen info.si_addr_lsb = lsb; 2412d4a7167SIngo Molnar 242019132ffSDave Hansen fill_sig_info_pkey(si_code, &info, vma); 243019132ffSDave Hansen 244c61e211dSHarvey Harrison force_sig_info(si_signo, &info, tsk); 245c61e211dSHarvey Harrison } 246c61e211dSHarvey Harrison 247f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 248f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 2492d4a7167SIngo Molnar 250f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 251f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 252f2f13a85SIngo Molnar { 253f2f13a85SIngo Molnar unsigned index = pgd_index(address); 254f2f13a85SIngo Molnar pgd_t *pgd_k; 255f2f13a85SIngo Molnar pud_t *pud, *pud_k; 256f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 257f2f13a85SIngo Molnar 258f2f13a85SIngo Molnar pgd += index; 259f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 260f2f13a85SIngo Molnar 261f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 262f2f13a85SIngo Molnar return NULL; 263f2f13a85SIngo Molnar 264f2f13a85SIngo Molnar /* 265f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 266f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 267f2f13a85SIngo Molnar * set_pud. 268f2f13a85SIngo Molnar */ 269f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 270f2f13a85SIngo Molnar pud_k = pud_offset(pgd_k, address); 271f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 272f2f13a85SIngo Molnar return NULL; 273f2f13a85SIngo Molnar 274f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 275f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 276f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 277f2f13a85SIngo Molnar return NULL; 278f2f13a85SIngo Molnar 279b8bcfe99SJeremy Fitzhardinge if (!pmd_present(*pmd)) 280f2f13a85SIngo Molnar set_pmd(pmd, *pmd_k); 281b8bcfe99SJeremy Fitzhardinge else 282f2f13a85SIngo Molnar BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 283f2f13a85SIngo Molnar 284f2f13a85SIngo Molnar return pmd_k; 285f2f13a85SIngo Molnar } 286f2f13a85SIngo Molnar 287f2f13a85SIngo Molnar void vmalloc_sync_all(void) 288f2f13a85SIngo Molnar { 289f2f13a85SIngo Molnar unsigned long address; 290f2f13a85SIngo Molnar 291f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 292f2f13a85SIngo Molnar return; 293f2f13a85SIngo Molnar 294f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 295f2f13a85SIngo Molnar address >= TASK_SIZE && address < FIXADDR_TOP; 296f2f13a85SIngo Molnar address += PMD_SIZE) { 297f2f13a85SIngo Molnar struct page *page; 298f2f13a85SIngo Molnar 299a79e53d8SAndrea Arcangeli spin_lock(&pgd_lock); 300f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 301617d34d9SJeremy Fitzhardinge spinlock_t *pgt_lock; 302f01f7c56SBorislav Petkov pmd_t *ret; 303617d34d9SJeremy Fitzhardinge 304a79e53d8SAndrea Arcangeli /* the pgt_lock only for Xen */ 305617d34d9SJeremy Fitzhardinge pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 306617d34d9SJeremy Fitzhardinge 307617d34d9SJeremy Fitzhardinge spin_lock(pgt_lock); 308617d34d9SJeremy Fitzhardinge ret = vmalloc_sync_one(page_address(page), address); 309617d34d9SJeremy Fitzhardinge spin_unlock(pgt_lock); 310617d34d9SJeremy Fitzhardinge 311617d34d9SJeremy Fitzhardinge if (!ret) 312f2f13a85SIngo Molnar break; 313f2f13a85SIngo Molnar } 314a79e53d8SAndrea Arcangeli spin_unlock(&pgd_lock); 315f2f13a85SIngo Molnar } 316f2f13a85SIngo Molnar } 317f2f13a85SIngo Molnar 318f2f13a85SIngo Molnar /* 319f2f13a85SIngo Molnar * 32-bit: 320f2f13a85SIngo Molnar * 321f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 322f2f13a85SIngo Molnar */ 3239326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 324f2f13a85SIngo Molnar { 325f2f13a85SIngo Molnar unsigned long pgd_paddr; 326f2f13a85SIngo Molnar pmd_t *pmd_k; 327f2f13a85SIngo Molnar pte_t *pte_k; 328f2f13a85SIngo Molnar 329f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 330f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 331f2f13a85SIngo Molnar return -1; 332f2f13a85SIngo Molnar 333ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 334ebc8827fSFrederic Weisbecker 335f2f13a85SIngo Molnar /* 336f2f13a85SIngo Molnar * Synchronize this task's top level page-table 337f2f13a85SIngo Molnar * with the 'reference' page table. 338f2f13a85SIngo Molnar * 339f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 340f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 341f2f13a85SIngo Molnar */ 342f2f13a85SIngo Molnar pgd_paddr = read_cr3(); 343f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 344f2f13a85SIngo Molnar if (!pmd_k) 345f2f13a85SIngo Molnar return -1; 346f2f13a85SIngo Molnar 347f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 348f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 349f2f13a85SIngo Molnar return -1; 350f2f13a85SIngo Molnar 351f2f13a85SIngo Molnar return 0; 352f2f13a85SIngo Molnar } 3539326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 354f2f13a85SIngo Molnar 355f2f13a85SIngo Molnar /* 356f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 357f2f13a85SIngo Molnar */ 358f2f13a85SIngo Molnar static inline void 359f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 360f2f13a85SIngo Molnar struct task_struct *tsk) 361f2f13a85SIngo Molnar { 3629fda6a06SBrian Gerst #ifdef CONFIG_VM86 363f2f13a85SIngo Molnar unsigned long bit; 364f2f13a85SIngo Molnar 3659fda6a06SBrian Gerst if (!v8086_mode(regs) || !tsk->thread.vm86) 366f2f13a85SIngo Molnar return; 367f2f13a85SIngo Molnar 368f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 369f2f13a85SIngo Molnar if (bit < 32) 3709fda6a06SBrian Gerst tsk->thread.vm86->screen_bitmap |= 1 << bit; 3719fda6a06SBrian Gerst #endif 372f2f13a85SIngo Molnar } 373c61e211dSHarvey Harrison 374087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 375087975b0SAkinobu Mita { 376087975b0SAkinobu Mita return pfn < max_low_pfn; 377087975b0SAkinobu Mita } 378087975b0SAkinobu Mita 379cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 380c61e211dSHarvey Harrison { 381087975b0SAkinobu Mita pgd_t *base = __va(read_cr3()); 382087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 383087975b0SAkinobu Mita pmd_t *pmd; 384087975b0SAkinobu Mita pte_t *pte; 3852d4a7167SIngo Molnar 386c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 387087975b0SAkinobu Mita printk("*pdpt = %016Lx ", pgd_val(*pgd)); 388087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 389087975b0SAkinobu Mita goto out; 390c61e211dSHarvey Harrison #endif 391087975b0SAkinobu Mita pmd = pmd_offset(pud_offset(pgd, address), address); 392087975b0SAkinobu Mita printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 393c61e211dSHarvey Harrison 394c61e211dSHarvey Harrison /* 395c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 396c61e211dSHarvey Harrison * case if the page table is located in highmem. 397c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3982d4a7167SIngo Molnar * it's allocated already: 399c61e211dSHarvey Harrison */ 400087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 401087975b0SAkinobu Mita goto out; 4022d4a7167SIngo Molnar 403087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 404087975b0SAkinobu Mita printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 405087975b0SAkinobu Mita out: 406c61e211dSHarvey Harrison printk("\n"); 407f2f13a85SIngo Molnar } 408f2f13a85SIngo Molnar 409f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 410f2f13a85SIngo Molnar 411f2f13a85SIngo Molnar void vmalloc_sync_all(void) 412f2f13a85SIngo Molnar { 4139661d5bcSYasuaki Ishimatsu sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0); 414f2f13a85SIngo Molnar } 415f2f13a85SIngo Molnar 416f2f13a85SIngo Molnar /* 417f2f13a85SIngo Molnar * 64-bit: 418f2f13a85SIngo Molnar * 419f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 420f2f13a85SIngo Molnar * 421f2f13a85SIngo Molnar * This assumes no large pages in there. 422f2f13a85SIngo Molnar */ 4239326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address) 424f2f13a85SIngo Molnar { 425f2f13a85SIngo Molnar pgd_t *pgd, *pgd_ref; 426f2f13a85SIngo Molnar pud_t *pud, *pud_ref; 427f2f13a85SIngo Molnar pmd_t *pmd, *pmd_ref; 428f2f13a85SIngo Molnar pte_t *pte, *pte_ref; 429f2f13a85SIngo Molnar 430f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 431f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 432f2f13a85SIngo Molnar return -1; 433f2f13a85SIngo Molnar 434ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 435ebc8827fSFrederic Weisbecker 436f2f13a85SIngo Molnar /* 437f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 438f2f13a85SIngo Molnar * happen within a race in page table update. In the later 439f2f13a85SIngo Molnar * case just flush: 440f2f13a85SIngo Molnar */ 441f2f13a85SIngo Molnar pgd = pgd_offset(current->active_mm, address); 442f2f13a85SIngo Molnar pgd_ref = pgd_offset_k(address); 443f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 444f2f13a85SIngo Molnar return -1; 445f2f13a85SIngo Molnar 4461160c277SSamu Kallio if (pgd_none(*pgd)) { 447f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 4481160c277SSamu Kallio arch_flush_lazy_mmu_mode(); 4491160c277SSamu Kallio } else { 450f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 4511160c277SSamu Kallio } 452f2f13a85SIngo Molnar 453f2f13a85SIngo Molnar /* 454f2f13a85SIngo Molnar * Below here mismatches are bugs because these lower tables 455f2f13a85SIngo Molnar * are shared: 456f2f13a85SIngo Molnar */ 457f2f13a85SIngo Molnar 458f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 459f2f13a85SIngo Molnar pud_ref = pud_offset(pgd_ref, address); 460f2f13a85SIngo Molnar if (pud_none(*pud_ref)) 461f2f13a85SIngo Molnar return -1; 462f2f13a85SIngo Molnar 463f2f13a85SIngo Molnar if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 464f2f13a85SIngo Molnar BUG(); 465f2f13a85SIngo Molnar 466f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 467f2f13a85SIngo Molnar pmd_ref = pmd_offset(pud_ref, address); 468f2f13a85SIngo Molnar if (pmd_none(*pmd_ref)) 469f2f13a85SIngo Molnar return -1; 470f2f13a85SIngo Molnar 471f2f13a85SIngo Molnar if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 472f2f13a85SIngo Molnar BUG(); 473f2f13a85SIngo Molnar 474f2f13a85SIngo Molnar pte_ref = pte_offset_kernel(pmd_ref, address); 475f2f13a85SIngo Molnar if (!pte_present(*pte_ref)) 476f2f13a85SIngo Molnar return -1; 477f2f13a85SIngo Molnar 478f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 479f2f13a85SIngo Molnar 480f2f13a85SIngo Molnar /* 481f2f13a85SIngo Molnar * Don't use pte_page here, because the mappings can point 482f2f13a85SIngo Molnar * outside mem_map, and the NUMA hash lookup cannot handle 483f2f13a85SIngo Molnar * that: 484f2f13a85SIngo Molnar */ 485f2f13a85SIngo Molnar if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 486f2f13a85SIngo Molnar BUG(); 487f2f13a85SIngo Molnar 488f2f13a85SIngo Molnar return 0; 489f2f13a85SIngo Molnar } 4909326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault); 491f2f13a85SIngo Molnar 492e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD 493f2f13a85SIngo Molnar static const char errata93_warning[] = 494ad361c98SJoe Perches KERN_ERR 495ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 496ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 497ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 498ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 499e05139f2SJan Beulich #endif 500f2f13a85SIngo Molnar 501f2f13a85SIngo Molnar /* 502f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 503f2f13a85SIngo Molnar */ 504f2f13a85SIngo Molnar static inline void 505f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 506f2f13a85SIngo Molnar struct task_struct *tsk) 507f2f13a85SIngo Molnar { 508f2f13a85SIngo Molnar } 509f2f13a85SIngo Molnar 510f2f13a85SIngo Molnar static int bad_address(void *p) 511f2f13a85SIngo Molnar { 512f2f13a85SIngo Molnar unsigned long dummy; 513f2f13a85SIngo Molnar 514f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 515f2f13a85SIngo Molnar } 516f2f13a85SIngo Molnar 517f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 518f2f13a85SIngo Molnar { 519087975b0SAkinobu Mita pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); 520087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 521c61e211dSHarvey Harrison pud_t *pud; 522c61e211dSHarvey Harrison pmd_t *pmd; 523c61e211dSHarvey Harrison pte_t *pte; 524c61e211dSHarvey Harrison 5252d4a7167SIngo Molnar if (bad_address(pgd)) 5262d4a7167SIngo Molnar goto bad; 5272d4a7167SIngo Molnar 528c61e211dSHarvey Harrison printk("PGD %lx ", pgd_val(*pgd)); 5292d4a7167SIngo Molnar 5302d4a7167SIngo Molnar if (!pgd_present(*pgd)) 5312d4a7167SIngo Molnar goto out; 532c61e211dSHarvey Harrison 533c61e211dSHarvey Harrison pud = pud_offset(pgd, address); 5342d4a7167SIngo Molnar if (bad_address(pud)) 5352d4a7167SIngo Molnar goto bad; 5362d4a7167SIngo Molnar 537c61e211dSHarvey Harrison printk("PUD %lx ", pud_val(*pud)); 538b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 5392d4a7167SIngo Molnar goto out; 540c61e211dSHarvey Harrison 541c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 5422d4a7167SIngo Molnar if (bad_address(pmd)) 5432d4a7167SIngo Molnar goto bad; 5442d4a7167SIngo Molnar 545c61e211dSHarvey Harrison printk("PMD %lx ", pmd_val(*pmd)); 5462d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 5472d4a7167SIngo Molnar goto out; 548c61e211dSHarvey Harrison 549c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 5502d4a7167SIngo Molnar if (bad_address(pte)) 5512d4a7167SIngo Molnar goto bad; 5522d4a7167SIngo Molnar 553c61e211dSHarvey Harrison printk("PTE %lx", pte_val(*pte)); 5542d4a7167SIngo Molnar out: 555c61e211dSHarvey Harrison printk("\n"); 556c61e211dSHarvey Harrison return; 557c61e211dSHarvey Harrison bad: 558c61e211dSHarvey Harrison printk("BAD\n"); 559c61e211dSHarvey Harrison } 560c61e211dSHarvey Harrison 561f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 562c61e211dSHarvey Harrison 5632d4a7167SIngo Molnar /* 5642d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 5652d4a7167SIngo Molnar * 5662d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 5672d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 5682d4a7167SIngo Molnar * 5692d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 5702d4a7167SIngo Molnar * 5712d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 5722d4a7167SIngo Molnar * Try to work around it here. 5732d4a7167SIngo Molnar * 5742d4a7167SIngo Molnar * Note we only handle faults in kernel here. 5752d4a7167SIngo Molnar * Does nothing on 32-bit. 576c61e211dSHarvey Harrison */ 577c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 578c61e211dSHarvey Harrison { 579e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 580e05139f2SJan Beulich if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 581e05139f2SJan Beulich || boot_cpu_data.x86 != 0xf) 582e05139f2SJan Beulich return 0; 583e05139f2SJan Beulich 584c61e211dSHarvey Harrison if (address != regs->ip) 585c61e211dSHarvey Harrison return 0; 5862d4a7167SIngo Molnar 587c61e211dSHarvey Harrison if ((address >> 32) != 0) 588c61e211dSHarvey Harrison return 0; 5892d4a7167SIngo Molnar 590c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 591c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 592c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 593a454ab31SIngo Molnar printk_once(errata93_warning); 594c61e211dSHarvey Harrison regs->ip = address; 595c61e211dSHarvey Harrison return 1; 596c61e211dSHarvey Harrison } 597c61e211dSHarvey Harrison #endif 598c61e211dSHarvey Harrison return 0; 599c61e211dSHarvey Harrison } 600c61e211dSHarvey Harrison 601c61e211dSHarvey Harrison /* 6022d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 6032d4a7167SIngo Molnar * to illegal addresses >4GB. 6042d4a7167SIngo Molnar * 6052d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 6062d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 607c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 608c61e211dSHarvey Harrison */ 609c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 610c61e211dSHarvey Harrison { 611c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 6122d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 613c61e211dSHarvey Harrison return 1; 614c61e211dSHarvey Harrison #endif 615c61e211dSHarvey Harrison return 0; 616c61e211dSHarvey Harrison } 617c61e211dSHarvey Harrison 618c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 619c61e211dSHarvey Harrison { 620c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 621c61e211dSHarvey Harrison unsigned long nr; 6222d4a7167SIngo Molnar 623c61e211dSHarvey Harrison /* 6242d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 625c61e211dSHarvey Harrison */ 626e2604b49SBorislav Petkov if (boot_cpu_has_bug(X86_BUG_F00F)) { 627c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 628c61e211dSHarvey Harrison 629c61e211dSHarvey Harrison if (nr == 6) { 630c61e211dSHarvey Harrison do_invalid_op(regs, 0); 631c61e211dSHarvey Harrison return 1; 632c61e211dSHarvey Harrison } 633c61e211dSHarvey Harrison } 634c61e211dSHarvey Harrison #endif 635c61e211dSHarvey Harrison return 0; 636c61e211dSHarvey Harrison } 637c61e211dSHarvey Harrison 6388f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT 6398f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 640eff50c34SJiri Kosina static const char smep_warning[] = KERN_CRIT 641eff50c34SJiri Kosina "unable to execute userspace code (SMEP?) (uid: %d)\n"; 6428f766149SIngo Molnar 6432d4a7167SIngo Molnar static void 6442d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, 645c61e211dSHarvey Harrison unsigned long address) 646c61e211dSHarvey Harrison { 647c61e211dSHarvey Harrison if (!oops_may_print()) 648c61e211dSHarvey Harrison return; 649c61e211dSHarvey Harrison 650c61e211dSHarvey Harrison if (error_code & PF_INSTR) { 65193809be8SHarvey Harrison unsigned int level; 652426e34ccSMatt Fleming pgd_t *pgd; 653426e34ccSMatt Fleming pte_t *pte; 6542d4a7167SIngo Molnar 655426e34ccSMatt Fleming pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK); 656426e34ccSMatt Fleming pgd += pgd_index(address); 657426e34ccSMatt Fleming 658426e34ccSMatt Fleming pte = lookup_address_in_pgd(pgd, address, &level); 659c61e211dSHarvey Harrison 6608f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 661078de5f7SEric W. Biederman printk(nx_warning, from_kuid(&init_user_ns, current_uid())); 662eff50c34SJiri Kosina if (pte && pte_present(*pte) && pte_exec(*pte) && 663eff50c34SJiri Kosina (pgd_flags(*pgd) & _PAGE_USER) && 6641e02ce4cSAndy Lutomirski (__read_cr4() & X86_CR4_SMEP)) 665eff50c34SJiri Kosina printk(smep_warning, from_kuid(&init_user_ns, current_uid())); 666c61e211dSHarvey Harrison } 667fd40d6e3SHarvey Harrison 668c61e211dSHarvey Harrison printk(KERN_ALERT "BUG: unable to handle kernel "); 669c61e211dSHarvey Harrison if (address < PAGE_SIZE) 670c61e211dSHarvey Harrison printk(KERN_CONT "NULL pointer dereference"); 671c61e211dSHarvey Harrison else 672c61e211dSHarvey Harrison printk(KERN_CONT "paging request"); 6732d4a7167SIngo Molnar 674f294a8ceSVegard Nossum printk(KERN_CONT " at %p\n", (void *) address); 675c61e211dSHarvey Harrison printk(KERN_ALERT "IP:"); 6765f01c988SJiri Slaby printk_address(regs->ip); 6772d4a7167SIngo Molnar 678c61e211dSHarvey Harrison dump_pagetable(address); 679c61e211dSHarvey Harrison } 680c61e211dSHarvey Harrison 6812d4a7167SIngo Molnar static noinline void 6822d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 6832d4a7167SIngo Molnar unsigned long address) 684c61e211dSHarvey Harrison { 6852d4a7167SIngo Molnar struct task_struct *tsk; 6862d4a7167SIngo Molnar unsigned long flags; 6872d4a7167SIngo Molnar int sig; 6882d4a7167SIngo Molnar 6892d4a7167SIngo Molnar flags = oops_begin(); 6902d4a7167SIngo Molnar tsk = current; 6912d4a7167SIngo Molnar sig = SIGKILL; 692c61e211dSHarvey Harrison 693c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 69492181f19SNick Piggin tsk->comm, address); 695c61e211dSHarvey Harrison dump_pagetable(address); 6962d4a7167SIngo Molnar 697c61e211dSHarvey Harrison tsk->thread.cr2 = address; 69851e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 699c61e211dSHarvey Harrison tsk->thread.error_code = error_code; 7002d4a7167SIngo Molnar 701c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 702874d93d1SAlexander van Heukelum sig = 0; 7032d4a7167SIngo Molnar 704874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 705c61e211dSHarvey Harrison } 706c61e211dSHarvey Harrison 7072d4a7167SIngo Molnar static noinline void 7082d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 7094fc34901SAndy Lutomirski unsigned long address, int signal, int si_code) 71092181f19SNick Piggin { 71192181f19SNick Piggin struct task_struct *tsk = current; 71292181f19SNick Piggin unsigned long flags; 71392181f19SNick Piggin int sig; 7147b2d0dbaSDave Hansen /* No context means no VMA to pass down */ 7157b2d0dbaSDave Hansen struct vm_area_struct *vma = NULL; 71692181f19SNick Piggin 71792181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 7184fc34901SAndy Lutomirski if (fixup_exception(regs)) { 719c026b359SPeter Zijlstra /* 720c026b359SPeter Zijlstra * Any interrupt that takes a fault gets the fixup. This makes 721c026b359SPeter Zijlstra * the below recursive fault logic only apply to a faults from 722c026b359SPeter Zijlstra * task context. 723c026b359SPeter Zijlstra */ 724c026b359SPeter Zijlstra if (in_interrupt()) 725c026b359SPeter Zijlstra return; 726c026b359SPeter Zijlstra 727c026b359SPeter Zijlstra /* 728c026b359SPeter Zijlstra * Per the above we're !in_interrupt(), aka. task context. 729c026b359SPeter Zijlstra * 730c026b359SPeter Zijlstra * In this case we need to make sure we're not recursively 731c026b359SPeter Zijlstra * faulting through the emulate_vsyscall() logic. 732c026b359SPeter Zijlstra */ 7334fc34901SAndy Lutomirski if (current_thread_info()->sig_on_uaccess_error && signal) { 73451e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 7354fc34901SAndy Lutomirski tsk->thread.error_code = error_code | PF_USER; 7364fc34901SAndy Lutomirski tsk->thread.cr2 = address; 7374fc34901SAndy Lutomirski 7384fc34901SAndy Lutomirski /* XXX: hwpoison faults will set the wrong code. */ 7397b2d0dbaSDave Hansen force_sig_info_fault(signal, si_code, address, 7407b2d0dbaSDave Hansen tsk, vma, 0); 7414fc34901SAndy Lutomirski } 742c026b359SPeter Zijlstra 743c026b359SPeter Zijlstra /* 744c026b359SPeter Zijlstra * Barring that, we can do the fixup and be happy. 745c026b359SPeter Zijlstra */ 74692181f19SNick Piggin return; 7474fc34901SAndy Lutomirski } 74892181f19SNick Piggin 74992181f19SNick Piggin /* 7502d4a7167SIngo Molnar * 32-bit: 7512d4a7167SIngo Molnar * 75292181f19SNick Piggin * Valid to do another page fault here, because if this fault 75392181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 75492181f19SNick Piggin * handled it. 75592181f19SNick Piggin * 7562d4a7167SIngo Molnar * 64-bit: 7572d4a7167SIngo Molnar * 75892181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 75992181f19SNick Piggin */ 76092181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 76192181f19SNick Piggin return; 76292181f19SNick Piggin 76392181f19SNick Piggin if (is_errata93(regs, address)) 76492181f19SNick Piggin return; 76592181f19SNick Piggin 76692181f19SNick Piggin /* 76792181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 7682d4a7167SIngo Molnar * terminate things with extreme prejudice: 76992181f19SNick Piggin */ 77092181f19SNick Piggin flags = oops_begin(); 77192181f19SNick Piggin 77292181f19SNick Piggin show_fault_oops(regs, error_code, address); 77392181f19SNick Piggin 774a70857e4SAaron Tomlin if (task_stack_end_corrupted(tsk)) 775b0f4c4b3SPrarit Bhargava printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 77619803078SIngo Molnar 77792181f19SNick Piggin tsk->thread.cr2 = address; 77851e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 77992181f19SNick Piggin tsk->thread.error_code = error_code; 78092181f19SNick Piggin 78192181f19SNick Piggin sig = SIGKILL; 78292181f19SNick Piggin if (__die("Oops", regs, error_code)) 78392181f19SNick Piggin sig = 0; 7842d4a7167SIngo Molnar 78592181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 786b0f4c4b3SPrarit Bhargava printk(KERN_DEFAULT "CR2: %016lx\n", address); 7872d4a7167SIngo Molnar 78892181f19SNick Piggin oops_end(flags, regs, sig); 78992181f19SNick Piggin } 79092181f19SNick Piggin 7912d4a7167SIngo Molnar /* 7922d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 7932d4a7167SIngo Molnar * sysctl is set: 7942d4a7167SIngo Molnar */ 7952d4a7167SIngo Molnar static inline void 7962d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 7972d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 7982d4a7167SIngo Molnar { 7992d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 8002d4a7167SIngo Molnar return; 8012d4a7167SIngo Molnar 8022d4a7167SIngo Molnar if (!printk_ratelimit()) 8032d4a7167SIngo Molnar return; 8042d4a7167SIngo Molnar 805a1a08d1cSRoland Dreier printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 8062d4a7167SIngo Molnar task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 8072d4a7167SIngo Molnar tsk->comm, task_pid_nr(tsk), address, 8082d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 8092d4a7167SIngo Molnar 8102d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 8112d4a7167SIngo Molnar 8122d4a7167SIngo Molnar printk(KERN_CONT "\n"); 8132d4a7167SIngo Molnar } 8142d4a7167SIngo Molnar 8152d4a7167SIngo Molnar static void 8162d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 8177b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma, 8187b2d0dbaSDave Hansen int si_code) 81992181f19SNick Piggin { 82092181f19SNick Piggin struct task_struct *tsk = current; 82192181f19SNick Piggin 82292181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 82392181f19SNick Piggin if (error_code & PF_USER) { 82492181f19SNick Piggin /* 8252d4a7167SIngo Molnar * It's possible to have interrupts off here: 82692181f19SNick Piggin */ 82792181f19SNick Piggin local_irq_enable(); 82892181f19SNick Piggin 82992181f19SNick Piggin /* 83092181f19SNick Piggin * Valid to do another page fault here because this one came 8312d4a7167SIngo Molnar * from user space: 83292181f19SNick Piggin */ 83392181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 83492181f19SNick Piggin return; 83592181f19SNick Piggin 83692181f19SNick Piggin if (is_errata100(regs, address)) 83792181f19SNick Piggin return; 83892181f19SNick Piggin 8393ae36655SAndy Lutomirski #ifdef CONFIG_X86_64 8403ae36655SAndy Lutomirski /* 8413ae36655SAndy Lutomirski * Instruction fetch faults in the vsyscall page might need 8423ae36655SAndy Lutomirski * emulation. 8433ae36655SAndy Lutomirski */ 8443ae36655SAndy Lutomirski if (unlikely((error_code & PF_INSTR) && 845f40c3300SAndy Lutomirski ((address & ~0xfff) == VSYSCALL_ADDR))) { 8463ae36655SAndy Lutomirski if (emulate_vsyscall(regs, address)) 8473ae36655SAndy Lutomirski return; 8483ae36655SAndy Lutomirski } 8493ae36655SAndy Lutomirski #endif 850e575a86fSKees Cook /* Kernel addresses are always protection faults: */ 851e575a86fSKees Cook if (address >= TASK_SIZE) 852e575a86fSKees Cook error_code |= PF_PROT; 8533ae36655SAndy Lutomirski 854e575a86fSKees Cook if (likely(show_unhandled_signals)) 8552d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 85692181f19SNick Piggin 85792181f19SNick Piggin tsk->thread.cr2 = address; 858e575a86fSKees Cook tsk->thread.error_code = error_code; 85951e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 8602d4a7167SIngo Molnar 8617b2d0dbaSDave Hansen force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); 8622d4a7167SIngo Molnar 86392181f19SNick Piggin return; 86492181f19SNick Piggin } 86592181f19SNick Piggin 86692181f19SNick Piggin if (is_f00f_bug(regs, address)) 86792181f19SNick Piggin return; 86892181f19SNick Piggin 8694fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGSEGV, si_code); 87092181f19SNick Piggin } 87192181f19SNick Piggin 8722d4a7167SIngo Molnar static noinline void 8732d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 8747b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma) 87592181f19SNick Piggin { 8767b2d0dbaSDave Hansen __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); 87792181f19SNick Piggin } 87892181f19SNick Piggin 8792d4a7167SIngo Molnar static void 8802d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 8817b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma, int si_code) 88292181f19SNick Piggin { 88392181f19SNick Piggin struct mm_struct *mm = current->mm; 88492181f19SNick Piggin 88592181f19SNick Piggin /* 88692181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 88792181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 88892181f19SNick Piggin */ 88992181f19SNick Piggin up_read(&mm->mmap_sem); 89092181f19SNick Piggin 8917b2d0dbaSDave Hansen __bad_area_nosemaphore(regs, error_code, address, vma, si_code); 89292181f19SNick Piggin } 89392181f19SNick Piggin 8942d4a7167SIngo Molnar static noinline void 8952d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 89692181f19SNick Piggin { 8977b2d0dbaSDave Hansen __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); 89892181f19SNick Piggin } 89992181f19SNick Piggin 900*33a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code, 901*33a709b2SDave Hansen struct vm_area_struct *vma) 902*33a709b2SDave Hansen { 903*33a709b2SDave Hansen if (!boot_cpu_has(X86_FEATURE_OSPKE)) 904*33a709b2SDave Hansen return false; 905*33a709b2SDave Hansen if (error_code & PF_PK) 906*33a709b2SDave Hansen return true; 907*33a709b2SDave Hansen return false; 908*33a709b2SDave Hansen } 909*33a709b2SDave Hansen 9102d4a7167SIngo Molnar static noinline void 9112d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 9127b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma) 91392181f19SNick Piggin { 914019132ffSDave Hansen /* 915019132ffSDave Hansen * This OSPKE check is not strictly necessary at runtime. 916019132ffSDave Hansen * But, doing it this way allows compiler optimizations 917019132ffSDave Hansen * if pkeys are compiled out. 918019132ffSDave Hansen */ 919*33a709b2SDave Hansen if (bad_area_access_from_pkeys(error_code, vma)) 920019132ffSDave Hansen __bad_area(regs, error_code, address, vma, SEGV_PKUERR); 921019132ffSDave Hansen else 9227b2d0dbaSDave Hansen __bad_area(regs, error_code, address, vma, SEGV_ACCERR); 92392181f19SNick Piggin } 92492181f19SNick Piggin 9252d4a7167SIngo Molnar static void 926a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 9277b2d0dbaSDave Hansen struct vm_area_struct *vma, unsigned int fault) 92892181f19SNick Piggin { 92992181f19SNick Piggin struct task_struct *tsk = current; 930a6e04aa9SAndi Kleen int code = BUS_ADRERR; 93192181f19SNick Piggin 9322d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 93396054569SLinus Torvalds if (!(error_code & PF_USER)) { 9344fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 93596054569SLinus Torvalds return; 93696054569SLinus Torvalds } 9372d4a7167SIngo Molnar 938cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 93992181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 94092181f19SNick Piggin return; 9412d4a7167SIngo Molnar 94292181f19SNick Piggin tsk->thread.cr2 = address; 94392181f19SNick Piggin tsk->thread.error_code = error_code; 94451e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 9452d4a7167SIngo Molnar 946a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 947f672b49bSAndi Kleen if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 948a6e04aa9SAndi Kleen printk(KERN_ERR 949a6e04aa9SAndi Kleen "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 950a6e04aa9SAndi Kleen tsk->comm, tsk->pid, address); 951a6e04aa9SAndi Kleen code = BUS_MCEERR_AR; 952a6e04aa9SAndi Kleen } 953a6e04aa9SAndi Kleen #endif 9547b2d0dbaSDave Hansen force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); 95592181f19SNick Piggin } 95692181f19SNick Piggin 9573a13c4d7SJohannes Weiner static noinline void 9582d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 9597b2d0dbaSDave Hansen unsigned long address, struct vm_area_struct *vma, 9607b2d0dbaSDave Hansen unsigned int fault) 96192181f19SNick Piggin { 9623a13c4d7SJohannes Weiner if (fatal_signal_pending(current) && !(error_code & PF_USER)) { 9634fc34901SAndy Lutomirski no_context(regs, error_code, address, 0, 0); 9643a13c4d7SJohannes Weiner return; 965b80ef10eSKOSAKI Motohiro } 966b80ef10eSKOSAKI Motohiro 9672d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 968f8626854SAndrey Vagin /* Kernel mode? Handle exceptions or die: */ 969f8626854SAndrey Vagin if (!(error_code & PF_USER)) { 9704fc34901SAndy Lutomirski no_context(regs, error_code, address, 9714fc34901SAndy Lutomirski SIGSEGV, SEGV_MAPERR); 9723a13c4d7SJohannes Weiner return; 973f8626854SAndrey Vagin } 974f8626854SAndrey Vagin 975c2d23f91SDavid Rientjes /* 976c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 977c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 978c2d23f91SDavid Rientjes * oom-killed): 979c2d23f91SDavid Rientjes */ 980c2d23f91SDavid Rientjes pagefault_out_of_memory(); 9812d4a7167SIngo Molnar } else { 982f672b49bSAndi Kleen if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 983f672b49bSAndi Kleen VM_FAULT_HWPOISON_LARGE)) 9847b2d0dbaSDave Hansen do_sigbus(regs, error_code, address, vma, fault); 98533692f27SLinus Torvalds else if (fault & VM_FAULT_SIGSEGV) 9867b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, vma); 98792181f19SNick Piggin else 98892181f19SNick Piggin BUG(); 98992181f19SNick Piggin } 9902d4a7167SIngo Molnar } 99192181f19SNick Piggin 992d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte) 993d8b57bb7SThomas Gleixner { 994d8b57bb7SThomas Gleixner if ((error_code & PF_WRITE) && !pte_write(*pte)) 995d8b57bb7SThomas Gleixner return 0; 9962d4a7167SIngo Molnar 997d8b57bb7SThomas Gleixner if ((error_code & PF_INSTR) && !pte_exec(*pte)) 998d8b57bb7SThomas Gleixner return 0; 999b3ecd515SDave Hansen /* 1000b3ecd515SDave Hansen * Note: We do not do lazy flushing on protection key 1001b3ecd515SDave Hansen * changes, so no spurious fault will ever set PF_PK. 1002b3ecd515SDave Hansen */ 1003b3ecd515SDave Hansen if ((error_code & PF_PK)) 1004b3ecd515SDave Hansen return 1; 1005d8b57bb7SThomas Gleixner 1006d8b57bb7SThomas Gleixner return 1; 1007d8b57bb7SThomas Gleixner } 1008d8b57bb7SThomas Gleixner 1009c61e211dSHarvey Harrison /* 10102d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 10112d4a7167SIngo Molnar * 10122d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 10132d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 10142d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 10152d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 10162d4a7167SIngo Molnar * on other processors. 10172d4a7167SIngo Molnar * 101831668511SDavid Vrabel * Spurious faults may only occur if the TLB contains an entry with 101931668511SDavid Vrabel * fewer permission than the page table entry. Non-present (P = 0) 102031668511SDavid Vrabel * and reserved bit (R = 1) faults are never spurious. 102131668511SDavid Vrabel * 10225b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 10235b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 102431668511SDavid Vrabel * 102531668511SDavid Vrabel * Returns non-zero if a spurious fault was handled, zero otherwise. 102631668511SDavid Vrabel * 102731668511SDavid Vrabel * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 102831668511SDavid Vrabel * (Optional Invalidation). 10295b727a3bSJeremy Fitzhardinge */ 10309326638cSMasami Hiramatsu static noinline int 10312d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address) 10325b727a3bSJeremy Fitzhardinge { 10335b727a3bSJeremy Fitzhardinge pgd_t *pgd; 10345b727a3bSJeremy Fitzhardinge pud_t *pud; 10355b727a3bSJeremy Fitzhardinge pmd_t *pmd; 10365b727a3bSJeremy Fitzhardinge pte_t *pte; 10373c3e5694SSteven Rostedt int ret; 10385b727a3bSJeremy Fitzhardinge 103931668511SDavid Vrabel /* 104031668511SDavid Vrabel * Only writes to RO or instruction fetches from NX may cause 104131668511SDavid Vrabel * spurious faults. 104231668511SDavid Vrabel * 104331668511SDavid Vrabel * These could be from user or supervisor accesses but the TLB 104431668511SDavid Vrabel * is only lazily flushed after a kernel mapping protection 104531668511SDavid Vrabel * change, so user accesses are not expected to cause spurious 104631668511SDavid Vrabel * faults. 104731668511SDavid Vrabel */ 104831668511SDavid Vrabel if (error_code != (PF_WRITE | PF_PROT) 104931668511SDavid Vrabel && error_code != (PF_INSTR | PF_PROT)) 10505b727a3bSJeremy Fitzhardinge return 0; 10515b727a3bSJeremy Fitzhardinge 10525b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 10535b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 10545b727a3bSJeremy Fitzhardinge return 0; 10555b727a3bSJeremy Fitzhardinge 10565b727a3bSJeremy Fitzhardinge pud = pud_offset(pgd, address); 10575b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 10585b727a3bSJeremy Fitzhardinge return 0; 10595b727a3bSJeremy Fitzhardinge 1060d8b57bb7SThomas Gleixner if (pud_large(*pud)) 1061d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pud); 1062d8b57bb7SThomas Gleixner 10635b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 10645b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 10655b727a3bSJeremy Fitzhardinge return 0; 10665b727a3bSJeremy Fitzhardinge 1067d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 1068d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pmd); 1069d8b57bb7SThomas Gleixner 10705b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 1071954f8571SAndrea Arcangeli if (!pte_present(*pte)) 10725b727a3bSJeremy Fitzhardinge return 0; 10735b727a3bSJeremy Fitzhardinge 10743c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, pte); 10753c3e5694SSteven Rostedt if (!ret) 10763c3e5694SSteven Rostedt return 0; 10773c3e5694SSteven Rostedt 10783c3e5694SSteven Rostedt /* 10792d4a7167SIngo Molnar * Make sure we have permissions in PMD. 10802d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 10813c3e5694SSteven Rostedt */ 10823c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, (pte_t *) pmd); 10833c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 10842d4a7167SIngo Molnar 10853c3e5694SSteven Rostedt return ret; 10865b727a3bSJeremy Fitzhardinge } 10879326638cSMasami Hiramatsu NOKPROBE_SYMBOL(spurious_fault); 10885b727a3bSJeremy Fitzhardinge 1089c61e211dSHarvey Harrison int show_unhandled_signals = 1; 1090c61e211dSHarvey Harrison 10912d4a7167SIngo Molnar static inline int 109268da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma) 109392181f19SNick Piggin { 1094*33a709b2SDave Hansen /* 1095*33a709b2SDave Hansen * Access or read was blocked by protection keys. We do 1096*33a709b2SDave Hansen * this check before any others because we do not want 1097*33a709b2SDave Hansen * to, for instance, confuse a protection-key-denied 1098*33a709b2SDave Hansen * write with one for which we should do a COW. 1099*33a709b2SDave Hansen */ 1100*33a709b2SDave Hansen if (error_code & PF_PK) 1101*33a709b2SDave Hansen return 1; 1102*33a709b2SDave Hansen 110368da336aSMichel Lespinasse if (error_code & PF_WRITE) { 11042d4a7167SIngo Molnar /* write, present and write, not present: */ 110592181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 110692181f19SNick Piggin return 1; 11072d4a7167SIngo Molnar return 0; 11082d4a7167SIngo Molnar } 11092d4a7167SIngo Molnar 11102d4a7167SIngo Molnar /* read, present: */ 11112d4a7167SIngo Molnar if (unlikely(error_code & PF_PROT)) 111292181f19SNick Piggin return 1; 11132d4a7167SIngo Molnar 11142d4a7167SIngo Molnar /* read, not present: */ 111592181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 111692181f19SNick Piggin return 1; 111792181f19SNick Piggin 111892181f19SNick Piggin return 0; 111992181f19SNick Piggin } 112092181f19SNick Piggin 11210973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 11220973a06cSHiroshi Shimamoto { 1123d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 11240973a06cSHiroshi Shimamoto } 11250973a06cSHiroshi Shimamoto 112640d3cd66SH. Peter Anvin static inline bool smap_violation(int error_code, struct pt_regs *regs) 112740d3cd66SH. Peter Anvin { 11284640c7eeSH. Peter Anvin if (!IS_ENABLED(CONFIG_X86_SMAP)) 11294640c7eeSH. Peter Anvin return false; 11304640c7eeSH. Peter Anvin 11314640c7eeSH. Peter Anvin if (!static_cpu_has(X86_FEATURE_SMAP)) 11324640c7eeSH. Peter Anvin return false; 11334640c7eeSH. Peter Anvin 113440d3cd66SH. Peter Anvin if (error_code & PF_USER) 113540d3cd66SH. Peter Anvin return false; 113640d3cd66SH. Peter Anvin 1137f39b6f0eSAndy Lutomirski if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) 113840d3cd66SH. Peter Anvin return false; 113940d3cd66SH. Peter Anvin 114040d3cd66SH. Peter Anvin return true; 114140d3cd66SH. Peter Anvin } 114240d3cd66SH. Peter Anvin 1143c61e211dSHarvey Harrison /* 1144c61e211dSHarvey Harrison * This routine handles page faults. It determines the address, 1145c61e211dSHarvey Harrison * and the problem, and then passes it off to one of the appropriate 1146c61e211dSHarvey Harrison * routines. 1147d4078e23SPeter Zijlstra * 1148d4078e23SPeter Zijlstra * This function must have noinline because both callers 1149d4078e23SPeter Zijlstra * {,trace_}do_page_fault() have notrace on. Having this an actual function 1150d4078e23SPeter Zijlstra * guarantees there's a function trace entry. 1151c61e211dSHarvey Harrison */ 11529326638cSMasami Hiramatsu static noinline void 11530ac09f9fSJiri Olsa __do_page_fault(struct pt_regs *regs, unsigned long error_code, 11540ac09f9fSJiri Olsa unsigned long address) 1155c61e211dSHarvey Harrison { 1156c61e211dSHarvey Harrison struct vm_area_struct *vma; 11572d4a7167SIngo Molnar struct task_struct *tsk; 11582d4a7167SIngo Molnar struct mm_struct *mm; 115926178ec1SLinus Torvalds int fault, major = 0; 1160759496baSJohannes Weiner unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1161c61e211dSHarvey Harrison 1162c61e211dSHarvey Harrison tsk = current; 1163c61e211dSHarvey Harrison mm = tsk->mm; 11642d4a7167SIngo Molnar 1165f8561296SVegard Nossum /* 1166f8561296SVegard Nossum * Detect and handle instructions that would cause a page fault for 1167f8561296SVegard Nossum * both a tracked kernel page and a userspace page. 1168f8561296SVegard Nossum */ 1169f8561296SVegard Nossum if (kmemcheck_active(regs)) 1170f8561296SVegard Nossum kmemcheck_hide(regs); 11715dfaf90fSIngo Molnar prefetchw(&mm->mmap_sem); 1172f8561296SVegard Nossum 11730fd0e3daSPekka Paalanen if (unlikely(kmmio_fault(regs, address))) 117486069782SPekka Paalanen return; 1175c61e211dSHarvey Harrison 1176c61e211dSHarvey Harrison /* 1177c61e211dSHarvey Harrison * We fault-in kernel-space virtual memory on-demand. The 1178c61e211dSHarvey Harrison * 'reference' page table is init_mm.pgd. 1179c61e211dSHarvey Harrison * 1180c61e211dSHarvey Harrison * NOTE! We MUST NOT take any locks for this case. We may 1181c61e211dSHarvey Harrison * be in an interrupt or a critical region, and should 1182c61e211dSHarvey Harrison * only copy the information from the master page table, 1183c61e211dSHarvey Harrison * nothing more. 1184c61e211dSHarvey Harrison * 1185c61e211dSHarvey Harrison * This verifies that the fault happens in kernel space 1186c61e211dSHarvey Harrison * (error_code & 4) == 0, and that the fault was not a 1187c61e211dSHarvey Harrison * protection error (error_code & 9) == 0. 1188c61e211dSHarvey Harrison */ 11890973a06cSHiroshi Shimamoto if (unlikely(fault_in_kernel_space(address))) { 1190f8561296SVegard Nossum if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 1191f8561296SVegard Nossum if (vmalloc_fault(address) >= 0) 1192c61e211dSHarvey Harrison return; 11935b727a3bSJeremy Fitzhardinge 1194f8561296SVegard Nossum if (kmemcheck_fault(regs, address, error_code)) 1195f8561296SVegard Nossum return; 1196f8561296SVegard Nossum } 1197f8561296SVegard Nossum 11982d4a7167SIngo Molnar /* Can handle a stale RO->RW TLB: */ 119992181f19SNick Piggin if (spurious_fault(error_code, address)) 12005b727a3bSJeremy Fitzhardinge return; 12015b727a3bSJeremy Fitzhardinge 12022d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1203e00b12e6SPeter Zijlstra if (kprobes_fault(regs)) 12049be260a6SMasami Hiramatsu return; 1205c61e211dSHarvey Harrison /* 1206c61e211dSHarvey Harrison * Don't take the mm semaphore here. If we fixup a prefetch 12072d4a7167SIngo Molnar * fault we could otherwise deadlock: 1208c61e211dSHarvey Harrison */ 12097b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 12102d4a7167SIngo Molnar 121192181f19SNick Piggin return; 1212c61e211dSHarvey Harrison } 1213c61e211dSHarvey Harrison 12142d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1215e00b12e6SPeter Zijlstra if (unlikely(kprobes_fault(regs))) 12169be260a6SMasami Hiramatsu return; 1217e00b12e6SPeter Zijlstra 1218e00b12e6SPeter Zijlstra if (unlikely(error_code & PF_RSVD)) 1219e00b12e6SPeter Zijlstra pgtable_bad(regs, error_code, address); 1220e00b12e6SPeter Zijlstra 1221e00b12e6SPeter Zijlstra if (unlikely(smap_violation(error_code, regs))) { 12227b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 1223e00b12e6SPeter Zijlstra return; 1224e00b12e6SPeter Zijlstra } 1225e00b12e6SPeter Zijlstra 1226e00b12e6SPeter Zijlstra /* 1227e00b12e6SPeter Zijlstra * If we're in an interrupt, have no user context or are running 122870ffdb93SDavid Hildenbrand * in a region with pagefaults disabled then we must not take the fault 1229e00b12e6SPeter Zijlstra */ 123070ffdb93SDavid Hildenbrand if (unlikely(faulthandler_disabled() || !mm)) { 12317b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 1232e00b12e6SPeter Zijlstra return; 1233e00b12e6SPeter Zijlstra } 1234e00b12e6SPeter Zijlstra 1235c61e211dSHarvey Harrison /* 1236891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1237891cffbdSLinus Torvalds * vmalloc fault has been handled. 1238891cffbdSLinus Torvalds * 1239891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 12402d4a7167SIngo Molnar * potential system fault or CPU buglet: 1241c61e211dSHarvey Harrison */ 1242f39b6f0eSAndy Lutomirski if (user_mode(regs)) { 1243891cffbdSLinus Torvalds local_irq_enable(); 1244891cffbdSLinus Torvalds error_code |= PF_USER; 1245759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 12462d4a7167SIngo Molnar } else { 12472d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1248c61e211dSHarvey Harrison local_irq_enable(); 12492d4a7167SIngo Molnar } 1250c61e211dSHarvey Harrison 1251a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 12527dd1fcc2SPeter Zijlstra 1253759496baSJohannes Weiner if (error_code & PF_WRITE) 1254759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 1255759496baSJohannes Weiner 12563a1dfe6eSIngo Molnar /* 12573a1dfe6eSIngo Molnar * When running in the kernel we expect faults to occur only to 12582d4a7167SIngo Molnar * addresses in user space. All other faults represent errors in 12592d4a7167SIngo Molnar * the kernel and should generate an OOPS. Unfortunately, in the 12602d4a7167SIngo Molnar * case of an erroneous fault occurring in a code path which already 12612d4a7167SIngo Molnar * holds mmap_sem we will deadlock attempting to validate the fault 12622d4a7167SIngo Molnar * against the address space. Luckily the kernel only validly 12632d4a7167SIngo Molnar * references user space from well defined areas of code, which are 12642d4a7167SIngo Molnar * listed in the exceptions table. 1265c61e211dSHarvey Harrison * 1266c61e211dSHarvey Harrison * As the vast majority of faults will be valid we will only perform 12672d4a7167SIngo Molnar * the source reference check when there is a possibility of a 12682d4a7167SIngo Molnar * deadlock. Attempt to lock the address space, if we cannot we then 12692d4a7167SIngo Molnar * validate the source. If this is invalid we can skip the address 12702d4a7167SIngo Molnar * space check, thus avoiding the deadlock: 1271c61e211dSHarvey Harrison */ 127292181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1273c61e211dSHarvey Harrison if ((error_code & PF_USER) == 0 && 127492181f19SNick Piggin !search_exception_tables(regs->ip)) { 12757b2d0dbaSDave Hansen bad_area_nosemaphore(regs, error_code, address, NULL); 127692181f19SNick Piggin return; 127792181f19SNick Piggin } 1278d065bd81SMichel Lespinasse retry: 1279c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 128001006074SPeter Zijlstra } else { 128101006074SPeter Zijlstra /* 12822d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 12832d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 12842d4a7167SIngo Molnar * down_read(): 128501006074SPeter Zijlstra */ 128601006074SPeter Zijlstra might_sleep(); 1287c61e211dSHarvey Harrison } 1288c61e211dSHarvey Harrison 1289c61e211dSHarvey Harrison vma = find_vma(mm, address); 129092181f19SNick Piggin if (unlikely(!vma)) { 129192181f19SNick Piggin bad_area(regs, error_code, address); 129292181f19SNick Piggin return; 129392181f19SNick Piggin } 129492181f19SNick Piggin if (likely(vma->vm_start <= address)) 1295c61e211dSHarvey Harrison goto good_area; 129692181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 129792181f19SNick Piggin bad_area(regs, error_code, address); 129892181f19SNick Piggin return; 129992181f19SNick Piggin } 1300c61e211dSHarvey Harrison if (error_code & PF_USER) { 1301c61e211dSHarvey Harrison /* 1302c61e211dSHarvey Harrison * Accessing the stack below %sp is always a bug. 1303c61e211dSHarvey Harrison * The large cushion allows instructions like enter 1304c61e211dSHarvey Harrison * and pusha to work. ("enter $65535, $31" pushes 1305c61e211dSHarvey Harrison * 32 pointers and then decrements %sp by 65535.) 1306c61e211dSHarvey Harrison */ 130792181f19SNick Piggin if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 130892181f19SNick Piggin bad_area(regs, error_code, address); 130992181f19SNick Piggin return; 1310c61e211dSHarvey Harrison } 131192181f19SNick Piggin } 131292181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 131392181f19SNick Piggin bad_area(regs, error_code, address); 131492181f19SNick Piggin return; 131592181f19SNick Piggin } 131692181f19SNick Piggin 1317c61e211dSHarvey Harrison /* 1318c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1319c61e211dSHarvey Harrison * we can handle it.. 1320c61e211dSHarvey Harrison */ 1321c61e211dSHarvey Harrison good_area: 132268da336aSMichel Lespinasse if (unlikely(access_error(error_code, vma))) { 13237b2d0dbaSDave Hansen bad_area_access_error(regs, error_code, address, vma); 132492181f19SNick Piggin return; 1325c61e211dSHarvey Harrison } 1326c61e211dSHarvey Harrison 1327c61e211dSHarvey Harrison /* 1328c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1329c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 13309a95f3cfSPaul Cassella * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 13319a95f3cfSPaul Cassella * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1332c61e211dSHarvey Harrison */ 1333d065bd81SMichel Lespinasse fault = handle_mm_fault(mm, vma, address, flags); 133426178ec1SLinus Torvalds major |= fault & VM_FAULT_MAJOR; 13352d4a7167SIngo Molnar 13363a13c4d7SJohannes Weiner /* 133726178ec1SLinus Torvalds * If we need to retry the mmap_sem has already been released, 133826178ec1SLinus Torvalds * and if there is a fatal signal pending there is no guarantee 133926178ec1SLinus Torvalds * that we made any progress. Handle this case first. 13403a13c4d7SJohannes Weiner */ 134126178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_RETRY)) { 134226178ec1SLinus Torvalds /* Retry at most once */ 134326178ec1SLinus Torvalds if (flags & FAULT_FLAG_ALLOW_RETRY) { 134426178ec1SLinus Torvalds flags &= ~FAULT_FLAG_ALLOW_RETRY; 134526178ec1SLinus Torvalds flags |= FAULT_FLAG_TRIED; 134626178ec1SLinus Torvalds if (!fatal_signal_pending(tsk)) 134726178ec1SLinus Torvalds goto retry; 134826178ec1SLinus Torvalds } 134926178ec1SLinus Torvalds 135026178ec1SLinus Torvalds /* User mode? Just return to handle the fatal exception */ 1351cf3c0a15SLinus Torvalds if (flags & FAULT_FLAG_USER) 13523a13c4d7SJohannes Weiner return; 13533a13c4d7SJohannes Weiner 135426178ec1SLinus Torvalds /* Not returning to user mode? Handle exceptions or die: */ 135526178ec1SLinus Torvalds no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 135626178ec1SLinus Torvalds return; 135726178ec1SLinus Torvalds } 135826178ec1SLinus Torvalds 13597fb08ecaSLinus Torvalds up_read(&mm->mmap_sem); 136026178ec1SLinus Torvalds if (unlikely(fault & VM_FAULT_ERROR)) { 13617b2d0dbaSDave Hansen mm_fault_error(regs, error_code, address, vma, fault); 136237b23e05SKOSAKI Motohiro return; 136337b23e05SKOSAKI Motohiro } 136437b23e05SKOSAKI Motohiro 136537b23e05SKOSAKI Motohiro /* 136626178ec1SLinus Torvalds * Major/minor page fault accounting. If any of the events 136726178ec1SLinus Torvalds * returned VM_FAULT_MAJOR, we account it as a major fault. 1368d065bd81SMichel Lespinasse */ 136926178ec1SLinus Torvalds if (major) { 1370c61e211dSHarvey Harrison tsk->maj_flt++; 137126178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 1372ac17dc8eSPeter Zijlstra } else { 1373c61e211dSHarvey Harrison tsk->min_flt++; 137426178ec1SLinus Torvalds perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1375d065bd81SMichel Lespinasse } 1376c61e211dSHarvey Harrison 13778c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 1378c61e211dSHarvey Harrison } 13799326638cSMasami Hiramatsu NOKPROBE_SYMBOL(__do_page_fault); 13806ba3c97aSFrederic Weisbecker 13819326638cSMasami Hiramatsu dotraplinkage void notrace 13826ba3c97aSFrederic Weisbecker do_page_fault(struct pt_regs *regs, unsigned long error_code) 13836ba3c97aSFrederic Weisbecker { 1384d4078e23SPeter Zijlstra unsigned long address = read_cr2(); /* Get the faulting address */ 13856c1e0256SFrederic Weisbecker enum ctx_state prev_state; 1386d4078e23SPeter Zijlstra 1387d4078e23SPeter Zijlstra /* 1388d4078e23SPeter Zijlstra * We must have this function tagged with __kprobes, notrace and call 1389d4078e23SPeter Zijlstra * read_cr2() before calling anything else. To avoid calling any kind 1390d4078e23SPeter Zijlstra * of tracing machinery before we've observed the CR2 value. 1391d4078e23SPeter Zijlstra * 1392d4078e23SPeter Zijlstra * exception_{enter,exit}() contain all sorts of tracepoints. 1393d4078e23SPeter Zijlstra */ 13946c1e0256SFrederic Weisbecker 13956c1e0256SFrederic Weisbecker prev_state = exception_enter(); 13960ac09f9fSJiri Olsa __do_page_fault(regs, error_code, address); 13976c1e0256SFrederic Weisbecker exception_exit(prev_state); 13986ba3c97aSFrederic Weisbecker } 13999326638cSMasami Hiramatsu NOKPROBE_SYMBOL(do_page_fault); 140025c74b10SSeiji Aguchi 1401d4078e23SPeter Zijlstra #ifdef CONFIG_TRACING 14029326638cSMasami Hiramatsu static nokprobe_inline void 14039326638cSMasami Hiramatsu trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1404d34603b0SSeiji Aguchi unsigned long error_code) 1405d34603b0SSeiji Aguchi { 1406d34603b0SSeiji Aguchi if (user_mode(regs)) 1407d4078e23SPeter Zijlstra trace_page_fault_user(address, regs, error_code); 1408d34603b0SSeiji Aguchi else 1409d4078e23SPeter Zijlstra trace_page_fault_kernel(address, regs, error_code); 1410d34603b0SSeiji Aguchi } 1411d34603b0SSeiji Aguchi 14129326638cSMasami Hiramatsu dotraplinkage void notrace 141325c74b10SSeiji Aguchi trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 141425c74b10SSeiji Aguchi { 14150ac09f9fSJiri Olsa /* 14160ac09f9fSJiri Olsa * The exception_enter and tracepoint processing could 14170ac09f9fSJiri Olsa * trigger another page faults (user space callchain 14180ac09f9fSJiri Olsa * reading) and destroy the original cr2 value, so read 14190ac09f9fSJiri Olsa * the faulting address now. 14200ac09f9fSJiri Olsa */ 14210ac09f9fSJiri Olsa unsigned long address = read_cr2(); 1422d4078e23SPeter Zijlstra enum ctx_state prev_state; 142325c74b10SSeiji Aguchi 142425c74b10SSeiji Aguchi prev_state = exception_enter(); 1425d4078e23SPeter Zijlstra trace_page_fault_entries(address, regs, error_code); 14260ac09f9fSJiri Olsa __do_page_fault(regs, error_code, address); 142725c74b10SSeiji Aguchi exception_exit(prev_state); 142825c74b10SSeiji Aguchi } 14299326638cSMasami Hiramatsu NOKPROBE_SYMBOL(trace_do_page_fault); 1430d4078e23SPeter Zijlstra #endif /* CONFIG_TRACING */ 1431