1c61e211dSHarvey Harrison /* 2c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 3c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5c61e211dSHarvey Harrison */ 6a2bcd473SIngo Molnar #include <linux/magic.h> /* STACK_END_MAGIC */ 7a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 8a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 9a2bcd473SIngo Molnar #include <linux/module.h> /* search_exception_table */ 10a2bcd473SIngo Molnar #include <linux/bootmem.h> /* max_low_pfn */ 11a2bcd473SIngo Molnar #include <linux/kprobes.h> /* __kprobes, ... */ 12a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 13cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 14f672b49bSAndi Kleen #include <linux/hugetlb.h> /* hstate_index_to_shift */ 15268bb0ceSLinus Torvalds #include <linux/prefetch.h> /* prefetchw */ 1656dd9470SFrederic Weisbecker #include <linux/context_tracking.h> /* exception_enter(), ... */ 17c61e211dSHarvey Harrison 18a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 19a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 20f8561296SVegard Nossum #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 21fab1167cSH. Peter Anvin #include <asm/fixmap.h> /* VSYSCALL_START */ 22c61e211dSHarvey Harrison 23c61e211dSHarvey Harrison /* 242d4a7167SIngo Molnar * Page fault error code bits: 252d4a7167SIngo Molnar * 262d4a7167SIngo Molnar * bit 0 == 0: no page found 1: protection fault 272d4a7167SIngo Molnar * bit 1 == 0: read access 1: write access 282d4a7167SIngo Molnar * bit 2 == 0: kernel-mode access 1: user-mode access 292d4a7167SIngo Molnar * bit 3 == 1: use of reserved bit detected 302d4a7167SIngo Molnar * bit 4 == 1: fault was an instruction fetch 31c61e211dSHarvey Harrison */ 322d4a7167SIngo Molnar enum x86_pf_error_code { 332d4a7167SIngo Molnar 342d4a7167SIngo Molnar PF_PROT = 1 << 0, 352d4a7167SIngo Molnar PF_WRITE = 1 << 1, 362d4a7167SIngo Molnar PF_USER = 1 << 2, 372d4a7167SIngo Molnar PF_RSVD = 1 << 3, 382d4a7167SIngo Molnar PF_INSTR = 1 << 4, 392d4a7167SIngo Molnar }; 40c61e211dSHarvey Harrison 41b814d41fSIngo Molnar /* 42b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 43b319eed0SIngo Molnar * handled by mmiotrace: 44b814d41fSIngo Molnar */ 4562c9295fSMasami Hiramatsu static inline int __kprobes 4662c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr) 4786069782SPekka Paalanen { 480fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 490fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 500fd0e3daSPekka Paalanen return -1; 510fd0e3daSPekka Paalanen return 0; 5286069782SPekka Paalanen } 5386069782SPekka Paalanen 5462c9295fSMasami Hiramatsu static inline int __kprobes notify_page_fault(struct pt_regs *regs) 55c61e211dSHarvey Harrison { 56c61e211dSHarvey Harrison int ret = 0; 57c61e211dSHarvey Harrison 58c61e211dSHarvey Harrison /* kprobe_running() needs smp_processor_id() */ 59b1801812SIngo Molnar if (kprobes_built_in() && !user_mode_vm(regs)) { 60c61e211dSHarvey Harrison preempt_disable(); 61c61e211dSHarvey Harrison if (kprobe_running() && kprobe_fault_handler(regs, 14)) 62c61e211dSHarvey Harrison ret = 1; 63c61e211dSHarvey Harrison preempt_enable(); 64c61e211dSHarvey Harrison } 65c61e211dSHarvey Harrison 66c61e211dSHarvey Harrison return ret; 67c61e211dSHarvey Harrison } 68c61e211dSHarvey Harrison 69c61e211dSHarvey Harrison /* 702d4a7167SIngo Molnar * Prefetch quirks: 712d4a7167SIngo Molnar * 722d4a7167SIngo Molnar * 32-bit mode: 732d4a7167SIngo Molnar * 74c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 75c61e211dSHarvey Harrison * Check that here and ignore it. 76c61e211dSHarvey Harrison * 772d4a7167SIngo Molnar * 64-bit mode: 782d4a7167SIngo Molnar * 79c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 80c61e211dSHarvey Harrison * Check that here and ignore it. 81c61e211dSHarvey Harrison * 822d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 83c61e211dSHarvey Harrison */ 84107a0367SIngo Molnar static inline int 85107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 86107a0367SIngo Molnar unsigned char opcode, int *prefetch) 87c61e211dSHarvey Harrison { 88107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 89107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 90c61e211dSHarvey Harrison 91c61e211dSHarvey Harrison switch (instr_hi) { 92c61e211dSHarvey Harrison case 0x20: 93c61e211dSHarvey Harrison case 0x30: 94c61e211dSHarvey Harrison /* 95c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 96c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 97c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 98c61e211dSHarvey Harrison * X86_64 will never get here anyway 99c61e211dSHarvey Harrison */ 100107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 101c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 102c61e211dSHarvey Harrison case 0x40: 103c61e211dSHarvey Harrison /* 104c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 105c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 106c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 107c61e211dSHarvey Harrison * but for now it's good enough to assume that long 108c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 109c61e211dSHarvey Harrison */ 110318f5a2aSAndy Lutomirski return (!user_mode(regs) || user_64bit_mode(regs)); 111c61e211dSHarvey Harrison #endif 112c61e211dSHarvey Harrison case 0x60: 113c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 114107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 115c61e211dSHarvey Harrison case 0xF0: 116c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 117107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 118c61e211dSHarvey Harrison case 0x00: 119c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 120107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 121107a0367SIngo Molnar return 0; 122107a0367SIngo Molnar 123107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 124107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 125107a0367SIngo Molnar return 0; 126107a0367SIngo Molnar default: 127107a0367SIngo Molnar return 0; 128107a0367SIngo Molnar } 129107a0367SIngo Molnar } 130107a0367SIngo Molnar 131107a0367SIngo Molnar static int 132107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 133107a0367SIngo Molnar { 134107a0367SIngo Molnar unsigned char *max_instr; 135107a0367SIngo Molnar unsigned char *instr; 136107a0367SIngo Molnar int prefetch = 0; 137107a0367SIngo Molnar 138107a0367SIngo Molnar /* 139107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 140107a0367SIngo Molnar * do not ignore the fault: 141107a0367SIngo Molnar */ 142107a0367SIngo Molnar if (error_code & PF_INSTR) 143107a0367SIngo Molnar return 0; 144107a0367SIngo Molnar 145107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 146107a0367SIngo Molnar max_instr = instr + 15; 147107a0367SIngo Molnar 148107a0367SIngo Molnar if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) 149107a0367SIngo Molnar return 0; 150107a0367SIngo Molnar 151107a0367SIngo Molnar while (instr < max_instr) { 152107a0367SIngo Molnar unsigned char opcode; 153c61e211dSHarvey Harrison 154c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 155c61e211dSHarvey Harrison break; 156107a0367SIngo Molnar 157107a0367SIngo Molnar instr++; 158107a0367SIngo Molnar 159107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 160c61e211dSHarvey Harrison break; 161c61e211dSHarvey Harrison } 162c61e211dSHarvey Harrison return prefetch; 163c61e211dSHarvey Harrison } 164c61e211dSHarvey Harrison 1652d4a7167SIngo Molnar static void 1662d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address, 167f672b49bSAndi Kleen struct task_struct *tsk, int fault) 168c61e211dSHarvey Harrison { 169f672b49bSAndi Kleen unsigned lsb = 0; 170c61e211dSHarvey Harrison siginfo_t info; 171c61e211dSHarvey Harrison 172c61e211dSHarvey Harrison info.si_signo = si_signo; 173c61e211dSHarvey Harrison info.si_errno = 0; 174c61e211dSHarvey Harrison info.si_code = si_code; 175c61e211dSHarvey Harrison info.si_addr = (void __user *)address; 176f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON_LARGE) 177f672b49bSAndi Kleen lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 178f672b49bSAndi Kleen if (fault & VM_FAULT_HWPOISON) 179f672b49bSAndi Kleen lsb = PAGE_SHIFT; 180f672b49bSAndi Kleen info.si_addr_lsb = lsb; 1812d4a7167SIngo Molnar 182c61e211dSHarvey Harrison force_sig_info(si_signo, &info, tsk); 183c61e211dSHarvey Harrison } 184c61e211dSHarvey Harrison 185f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 186f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 1872d4a7167SIngo Molnar 188f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 189f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 190f2f13a85SIngo Molnar { 191f2f13a85SIngo Molnar unsigned index = pgd_index(address); 192f2f13a85SIngo Molnar pgd_t *pgd_k; 193f2f13a85SIngo Molnar pud_t *pud, *pud_k; 194f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 195f2f13a85SIngo Molnar 196f2f13a85SIngo Molnar pgd += index; 197f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 198f2f13a85SIngo Molnar 199f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 200f2f13a85SIngo Molnar return NULL; 201f2f13a85SIngo Molnar 202f2f13a85SIngo Molnar /* 203f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 204f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 205f2f13a85SIngo Molnar * set_pud. 206f2f13a85SIngo Molnar */ 207f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 208f2f13a85SIngo Molnar pud_k = pud_offset(pgd_k, address); 209f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 210f2f13a85SIngo Molnar return NULL; 211f2f13a85SIngo Molnar 212f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 213f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 214f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 215f2f13a85SIngo Molnar return NULL; 216f2f13a85SIngo Molnar 217b8bcfe99SJeremy Fitzhardinge if (!pmd_present(*pmd)) 218f2f13a85SIngo Molnar set_pmd(pmd, *pmd_k); 219b8bcfe99SJeremy Fitzhardinge else 220f2f13a85SIngo Molnar BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 221f2f13a85SIngo Molnar 222f2f13a85SIngo Molnar return pmd_k; 223f2f13a85SIngo Molnar } 224f2f13a85SIngo Molnar 225f2f13a85SIngo Molnar void vmalloc_sync_all(void) 226f2f13a85SIngo Molnar { 227f2f13a85SIngo Molnar unsigned long address; 228f2f13a85SIngo Molnar 229f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 230f2f13a85SIngo Molnar return; 231f2f13a85SIngo Molnar 232f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 233f2f13a85SIngo Molnar address >= TASK_SIZE && address < FIXADDR_TOP; 234f2f13a85SIngo Molnar address += PMD_SIZE) { 235f2f13a85SIngo Molnar struct page *page; 236f2f13a85SIngo Molnar 237a79e53d8SAndrea Arcangeli spin_lock(&pgd_lock); 238f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 239617d34d9SJeremy Fitzhardinge spinlock_t *pgt_lock; 240f01f7c56SBorislav Petkov pmd_t *ret; 241617d34d9SJeremy Fitzhardinge 242a79e53d8SAndrea Arcangeli /* the pgt_lock only for Xen */ 243617d34d9SJeremy Fitzhardinge pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 244617d34d9SJeremy Fitzhardinge 245617d34d9SJeremy Fitzhardinge spin_lock(pgt_lock); 246617d34d9SJeremy Fitzhardinge ret = vmalloc_sync_one(page_address(page), address); 247617d34d9SJeremy Fitzhardinge spin_unlock(pgt_lock); 248617d34d9SJeremy Fitzhardinge 249617d34d9SJeremy Fitzhardinge if (!ret) 250f2f13a85SIngo Molnar break; 251f2f13a85SIngo Molnar } 252a79e53d8SAndrea Arcangeli spin_unlock(&pgd_lock); 253f2f13a85SIngo Molnar } 254f2f13a85SIngo Molnar } 255f2f13a85SIngo Molnar 256f2f13a85SIngo Molnar /* 257f2f13a85SIngo Molnar * 32-bit: 258f2f13a85SIngo Molnar * 259f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 260f2f13a85SIngo Molnar */ 26162c9295fSMasami Hiramatsu static noinline __kprobes int vmalloc_fault(unsigned long address) 262f2f13a85SIngo Molnar { 263f2f13a85SIngo Molnar unsigned long pgd_paddr; 264f2f13a85SIngo Molnar pmd_t *pmd_k; 265f2f13a85SIngo Molnar pte_t *pte_k; 266f2f13a85SIngo Molnar 267f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 268f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 269f2f13a85SIngo Molnar return -1; 270f2f13a85SIngo Molnar 271ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 272ebc8827fSFrederic Weisbecker 273f2f13a85SIngo Molnar /* 274f2f13a85SIngo Molnar * Synchronize this task's top level page-table 275f2f13a85SIngo Molnar * with the 'reference' page table. 276f2f13a85SIngo Molnar * 277f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 278f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 279f2f13a85SIngo Molnar */ 280f2f13a85SIngo Molnar pgd_paddr = read_cr3(); 281f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 282f2f13a85SIngo Molnar if (!pmd_k) 283f2f13a85SIngo Molnar return -1; 284f2f13a85SIngo Molnar 285f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 286f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 287f2f13a85SIngo Molnar return -1; 288f2f13a85SIngo Molnar 289f2f13a85SIngo Molnar return 0; 290f2f13a85SIngo Molnar } 291f2f13a85SIngo Molnar 292f2f13a85SIngo Molnar /* 293f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 294f2f13a85SIngo Molnar */ 295f2f13a85SIngo Molnar static inline void 296f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 297f2f13a85SIngo Molnar struct task_struct *tsk) 298f2f13a85SIngo Molnar { 299f2f13a85SIngo Molnar unsigned long bit; 300f2f13a85SIngo Molnar 301f2f13a85SIngo Molnar if (!v8086_mode(regs)) 302f2f13a85SIngo Molnar return; 303f2f13a85SIngo Molnar 304f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 305f2f13a85SIngo Molnar if (bit < 32) 306f2f13a85SIngo Molnar tsk->thread.screen_bitmap |= 1 << bit; 307f2f13a85SIngo Molnar } 308c61e211dSHarvey Harrison 309087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 310087975b0SAkinobu Mita { 311087975b0SAkinobu Mita return pfn < max_low_pfn; 312087975b0SAkinobu Mita } 313087975b0SAkinobu Mita 314cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 315c61e211dSHarvey Harrison { 316087975b0SAkinobu Mita pgd_t *base = __va(read_cr3()); 317087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 318087975b0SAkinobu Mita pmd_t *pmd; 319087975b0SAkinobu Mita pte_t *pte; 3202d4a7167SIngo Molnar 321c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 322087975b0SAkinobu Mita printk("*pdpt = %016Lx ", pgd_val(*pgd)); 323087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 324087975b0SAkinobu Mita goto out; 325c61e211dSHarvey Harrison #endif 326087975b0SAkinobu Mita pmd = pmd_offset(pud_offset(pgd, address), address); 327087975b0SAkinobu Mita printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 328c61e211dSHarvey Harrison 329c61e211dSHarvey Harrison /* 330c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 331c61e211dSHarvey Harrison * case if the page table is located in highmem. 332c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3332d4a7167SIngo Molnar * it's allocated already: 334c61e211dSHarvey Harrison */ 335087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 336087975b0SAkinobu Mita goto out; 3372d4a7167SIngo Molnar 338087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 339087975b0SAkinobu Mita printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 340087975b0SAkinobu Mita out: 341c61e211dSHarvey Harrison printk("\n"); 342f2f13a85SIngo Molnar } 343f2f13a85SIngo Molnar 344f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 345f2f13a85SIngo Molnar 346f2f13a85SIngo Molnar void vmalloc_sync_all(void) 347f2f13a85SIngo Molnar { 3486afb5157SHaicheng Li sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); 349f2f13a85SIngo Molnar } 350f2f13a85SIngo Molnar 351f2f13a85SIngo Molnar /* 352f2f13a85SIngo Molnar * 64-bit: 353f2f13a85SIngo Molnar * 354f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 355f2f13a85SIngo Molnar * 356f2f13a85SIngo Molnar * This assumes no large pages in there. 357f2f13a85SIngo Molnar */ 35862c9295fSMasami Hiramatsu static noinline __kprobes int vmalloc_fault(unsigned long address) 359f2f13a85SIngo Molnar { 360f2f13a85SIngo Molnar pgd_t *pgd, *pgd_ref; 361f2f13a85SIngo Molnar pud_t *pud, *pud_ref; 362f2f13a85SIngo Molnar pmd_t *pmd, *pmd_ref; 363f2f13a85SIngo Molnar pte_t *pte, *pte_ref; 364f2f13a85SIngo Molnar 365f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 366f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 367f2f13a85SIngo Molnar return -1; 368f2f13a85SIngo Molnar 369ebc8827fSFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 370ebc8827fSFrederic Weisbecker 371f2f13a85SIngo Molnar /* 372f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 373f2f13a85SIngo Molnar * happen within a race in page table update. In the later 374f2f13a85SIngo Molnar * case just flush: 375f2f13a85SIngo Molnar */ 376f2f13a85SIngo Molnar pgd = pgd_offset(current->active_mm, address); 377f2f13a85SIngo Molnar pgd_ref = pgd_offset_k(address); 378f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 379f2f13a85SIngo Molnar return -1; 380f2f13a85SIngo Molnar 3811160c277SSamu Kallio if (pgd_none(*pgd)) { 382f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 3831160c277SSamu Kallio arch_flush_lazy_mmu_mode(); 3841160c277SSamu Kallio } else { 385f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 3861160c277SSamu Kallio } 387f2f13a85SIngo Molnar 388f2f13a85SIngo Molnar /* 389f2f13a85SIngo Molnar * Below here mismatches are bugs because these lower tables 390f2f13a85SIngo Molnar * are shared: 391f2f13a85SIngo Molnar */ 392f2f13a85SIngo Molnar 393f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 394f2f13a85SIngo Molnar pud_ref = pud_offset(pgd_ref, address); 395f2f13a85SIngo Molnar if (pud_none(*pud_ref)) 396f2f13a85SIngo Molnar return -1; 397f2f13a85SIngo Molnar 398f2f13a85SIngo Molnar if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 399f2f13a85SIngo Molnar BUG(); 400f2f13a85SIngo Molnar 401f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 402f2f13a85SIngo Molnar pmd_ref = pmd_offset(pud_ref, address); 403f2f13a85SIngo Molnar if (pmd_none(*pmd_ref)) 404f2f13a85SIngo Molnar return -1; 405f2f13a85SIngo Molnar 406f2f13a85SIngo Molnar if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 407f2f13a85SIngo Molnar BUG(); 408f2f13a85SIngo Molnar 409f2f13a85SIngo Molnar pte_ref = pte_offset_kernel(pmd_ref, address); 410f2f13a85SIngo Molnar if (!pte_present(*pte_ref)) 411f2f13a85SIngo Molnar return -1; 412f2f13a85SIngo Molnar 413f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 414f2f13a85SIngo Molnar 415f2f13a85SIngo Molnar /* 416f2f13a85SIngo Molnar * Don't use pte_page here, because the mappings can point 417f2f13a85SIngo Molnar * outside mem_map, and the NUMA hash lookup cannot handle 418f2f13a85SIngo Molnar * that: 419f2f13a85SIngo Molnar */ 420f2f13a85SIngo Molnar if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 421f2f13a85SIngo Molnar BUG(); 422f2f13a85SIngo Molnar 423f2f13a85SIngo Molnar return 0; 424f2f13a85SIngo Molnar } 425f2f13a85SIngo Molnar 426e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD 427f2f13a85SIngo Molnar static const char errata93_warning[] = 428ad361c98SJoe Perches KERN_ERR 429ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 430ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 431ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 432ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 433e05139f2SJan Beulich #endif 434f2f13a85SIngo Molnar 435f2f13a85SIngo Molnar /* 436f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 437f2f13a85SIngo Molnar */ 438f2f13a85SIngo Molnar static inline void 439f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 440f2f13a85SIngo Molnar struct task_struct *tsk) 441f2f13a85SIngo Molnar { 442f2f13a85SIngo Molnar } 443f2f13a85SIngo Molnar 444f2f13a85SIngo Molnar static int bad_address(void *p) 445f2f13a85SIngo Molnar { 446f2f13a85SIngo Molnar unsigned long dummy; 447f2f13a85SIngo Molnar 448f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 449f2f13a85SIngo Molnar } 450f2f13a85SIngo Molnar 451f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 452f2f13a85SIngo Molnar { 453087975b0SAkinobu Mita pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); 454087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 455c61e211dSHarvey Harrison pud_t *pud; 456c61e211dSHarvey Harrison pmd_t *pmd; 457c61e211dSHarvey Harrison pte_t *pte; 458c61e211dSHarvey Harrison 4592d4a7167SIngo Molnar if (bad_address(pgd)) 4602d4a7167SIngo Molnar goto bad; 4612d4a7167SIngo Molnar 462c61e211dSHarvey Harrison printk("PGD %lx ", pgd_val(*pgd)); 4632d4a7167SIngo Molnar 4642d4a7167SIngo Molnar if (!pgd_present(*pgd)) 4652d4a7167SIngo Molnar goto out; 466c61e211dSHarvey Harrison 467c61e211dSHarvey Harrison pud = pud_offset(pgd, address); 4682d4a7167SIngo Molnar if (bad_address(pud)) 4692d4a7167SIngo Molnar goto bad; 4702d4a7167SIngo Molnar 471c61e211dSHarvey Harrison printk("PUD %lx ", pud_val(*pud)); 472b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 4732d4a7167SIngo Molnar goto out; 474c61e211dSHarvey Harrison 475c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 4762d4a7167SIngo Molnar if (bad_address(pmd)) 4772d4a7167SIngo Molnar goto bad; 4782d4a7167SIngo Molnar 479c61e211dSHarvey Harrison printk("PMD %lx ", pmd_val(*pmd)); 4802d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 4812d4a7167SIngo Molnar goto out; 482c61e211dSHarvey Harrison 483c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 4842d4a7167SIngo Molnar if (bad_address(pte)) 4852d4a7167SIngo Molnar goto bad; 4862d4a7167SIngo Molnar 487c61e211dSHarvey Harrison printk("PTE %lx", pte_val(*pte)); 4882d4a7167SIngo Molnar out: 489c61e211dSHarvey Harrison printk("\n"); 490c61e211dSHarvey Harrison return; 491c61e211dSHarvey Harrison bad: 492c61e211dSHarvey Harrison printk("BAD\n"); 493c61e211dSHarvey Harrison } 494c61e211dSHarvey Harrison 495f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 496c61e211dSHarvey Harrison 4972d4a7167SIngo Molnar /* 4982d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 4992d4a7167SIngo Molnar * 5002d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 5012d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 5022d4a7167SIngo Molnar * 5032d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 5042d4a7167SIngo Molnar * 5052d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 5062d4a7167SIngo Molnar * Try to work around it here. 5072d4a7167SIngo Molnar * 5082d4a7167SIngo Molnar * Note we only handle faults in kernel here. 5092d4a7167SIngo Molnar * Does nothing on 32-bit. 510c61e211dSHarvey Harrison */ 511c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 512c61e211dSHarvey Harrison { 513e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 514e05139f2SJan Beulich if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 515e05139f2SJan Beulich || boot_cpu_data.x86 != 0xf) 516e05139f2SJan Beulich return 0; 517e05139f2SJan Beulich 518c61e211dSHarvey Harrison if (address != regs->ip) 519c61e211dSHarvey Harrison return 0; 5202d4a7167SIngo Molnar 521c61e211dSHarvey Harrison if ((address >> 32) != 0) 522c61e211dSHarvey Harrison return 0; 5232d4a7167SIngo Molnar 524c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 525c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 526c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 527a454ab31SIngo Molnar printk_once(errata93_warning); 528c61e211dSHarvey Harrison regs->ip = address; 529c61e211dSHarvey Harrison return 1; 530c61e211dSHarvey Harrison } 531c61e211dSHarvey Harrison #endif 532c61e211dSHarvey Harrison return 0; 533c61e211dSHarvey Harrison } 534c61e211dSHarvey Harrison 535c61e211dSHarvey Harrison /* 5362d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 5372d4a7167SIngo Molnar * to illegal addresses >4GB. 5382d4a7167SIngo Molnar * 5392d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 5402d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 541c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 542c61e211dSHarvey Harrison */ 543c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 544c61e211dSHarvey Harrison { 545c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 5462d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 547c61e211dSHarvey Harrison return 1; 548c61e211dSHarvey Harrison #endif 549c61e211dSHarvey Harrison return 0; 550c61e211dSHarvey Harrison } 551c61e211dSHarvey Harrison 552c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 553c61e211dSHarvey Harrison { 554c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 555c61e211dSHarvey Harrison unsigned long nr; 5562d4a7167SIngo Molnar 557c61e211dSHarvey Harrison /* 5582d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 559c61e211dSHarvey Harrison */ 560e2604b49SBorislav Petkov if (boot_cpu_has_bug(X86_BUG_F00F)) { 561c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 562c61e211dSHarvey Harrison 563c61e211dSHarvey Harrison if (nr == 6) { 564c61e211dSHarvey Harrison do_invalid_op(regs, 0); 565c61e211dSHarvey Harrison return 1; 566c61e211dSHarvey Harrison } 567c61e211dSHarvey Harrison } 568c61e211dSHarvey Harrison #endif 569c61e211dSHarvey Harrison return 0; 570c61e211dSHarvey Harrison } 571c61e211dSHarvey Harrison 5728f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT 5738f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 5748f766149SIngo Molnar 5752d4a7167SIngo Molnar static void 5762d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, 577c61e211dSHarvey Harrison unsigned long address) 578c61e211dSHarvey Harrison { 579c61e211dSHarvey Harrison if (!oops_may_print()) 580c61e211dSHarvey Harrison return; 581c61e211dSHarvey Harrison 582c61e211dSHarvey Harrison if (error_code & PF_INSTR) { 58393809be8SHarvey Harrison unsigned int level; 5842d4a7167SIngo Molnar 585c61e211dSHarvey Harrison pte_t *pte = lookup_address(address, &level); 586c61e211dSHarvey Harrison 5878f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 588078de5f7SEric W. Biederman printk(nx_warning, from_kuid(&init_user_ns, current_uid())); 589c61e211dSHarvey Harrison } 590fd40d6e3SHarvey Harrison 591c61e211dSHarvey Harrison printk(KERN_ALERT "BUG: unable to handle kernel "); 592c61e211dSHarvey Harrison if (address < PAGE_SIZE) 593c61e211dSHarvey Harrison printk(KERN_CONT "NULL pointer dereference"); 594c61e211dSHarvey Harrison else 595c61e211dSHarvey Harrison printk(KERN_CONT "paging request"); 5962d4a7167SIngo Molnar 597f294a8ceSVegard Nossum printk(KERN_CONT " at %p\n", (void *) address); 598c61e211dSHarvey Harrison printk(KERN_ALERT "IP:"); 599c61e211dSHarvey Harrison printk_address(regs->ip, 1); 6002d4a7167SIngo Molnar 601c61e211dSHarvey Harrison dump_pagetable(address); 602c61e211dSHarvey Harrison } 603c61e211dSHarvey Harrison 6042d4a7167SIngo Molnar static noinline void 6052d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 6062d4a7167SIngo Molnar unsigned long address) 607c61e211dSHarvey Harrison { 6082d4a7167SIngo Molnar struct task_struct *tsk; 6092d4a7167SIngo Molnar unsigned long flags; 6102d4a7167SIngo Molnar int sig; 6112d4a7167SIngo Molnar 6122d4a7167SIngo Molnar flags = oops_begin(); 6132d4a7167SIngo Molnar tsk = current; 6142d4a7167SIngo Molnar sig = SIGKILL; 615c61e211dSHarvey Harrison 616c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 61792181f19SNick Piggin tsk->comm, address); 618c61e211dSHarvey Harrison dump_pagetable(address); 6192d4a7167SIngo Molnar 620c61e211dSHarvey Harrison tsk->thread.cr2 = address; 62151e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 622c61e211dSHarvey Harrison tsk->thread.error_code = error_code; 6232d4a7167SIngo Molnar 624c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 625874d93d1SAlexander van Heukelum sig = 0; 6262d4a7167SIngo Molnar 627874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 628c61e211dSHarvey Harrison } 629c61e211dSHarvey Harrison 6302d4a7167SIngo Molnar static noinline void 6312d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 6324fc34901SAndy Lutomirski unsigned long address, int signal, int si_code) 63392181f19SNick Piggin { 63492181f19SNick Piggin struct task_struct *tsk = current; 63519803078SIngo Molnar unsigned long *stackend; 63692181f19SNick Piggin unsigned long flags; 63792181f19SNick Piggin int sig; 63892181f19SNick Piggin 63992181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 6404fc34901SAndy Lutomirski if (fixup_exception(regs)) { 6414fc34901SAndy Lutomirski if (current_thread_info()->sig_on_uaccess_error && signal) { 64251e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 6434fc34901SAndy Lutomirski tsk->thread.error_code = error_code | PF_USER; 6444fc34901SAndy Lutomirski tsk->thread.cr2 = address; 6454fc34901SAndy Lutomirski 6464fc34901SAndy Lutomirski /* XXX: hwpoison faults will set the wrong code. */ 6474fc34901SAndy Lutomirski force_sig_info_fault(signal, si_code, address, tsk, 0); 6484fc34901SAndy Lutomirski } 64992181f19SNick Piggin return; 6504fc34901SAndy Lutomirski } 65192181f19SNick Piggin 65292181f19SNick Piggin /* 6532d4a7167SIngo Molnar * 32-bit: 6542d4a7167SIngo Molnar * 65592181f19SNick Piggin * Valid to do another page fault here, because if this fault 65692181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 65792181f19SNick Piggin * handled it. 65892181f19SNick Piggin * 6592d4a7167SIngo Molnar * 64-bit: 6602d4a7167SIngo Molnar * 66192181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 66292181f19SNick Piggin */ 66392181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 66492181f19SNick Piggin return; 66592181f19SNick Piggin 66692181f19SNick Piggin if (is_errata93(regs, address)) 66792181f19SNick Piggin return; 66892181f19SNick Piggin 66992181f19SNick Piggin /* 67092181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 6712d4a7167SIngo Molnar * terminate things with extreme prejudice: 67292181f19SNick Piggin */ 67392181f19SNick Piggin flags = oops_begin(); 67492181f19SNick Piggin 67592181f19SNick Piggin show_fault_oops(regs, error_code, address); 67692181f19SNick Piggin 67719803078SIngo Molnar stackend = end_of_stack(tsk); 6780e7810beSJan Beulich if (tsk != &init_task && *stackend != STACK_END_MAGIC) 679b0f4c4b3SPrarit Bhargava printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 68019803078SIngo Molnar 68192181f19SNick Piggin tsk->thread.cr2 = address; 68251e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 68392181f19SNick Piggin tsk->thread.error_code = error_code; 68492181f19SNick Piggin 68592181f19SNick Piggin sig = SIGKILL; 68692181f19SNick Piggin if (__die("Oops", regs, error_code)) 68792181f19SNick Piggin sig = 0; 6882d4a7167SIngo Molnar 68992181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 690b0f4c4b3SPrarit Bhargava printk(KERN_DEFAULT "CR2: %016lx\n", address); 6912d4a7167SIngo Molnar 69292181f19SNick Piggin oops_end(flags, regs, sig); 69392181f19SNick Piggin } 69492181f19SNick Piggin 6952d4a7167SIngo Molnar /* 6962d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 6972d4a7167SIngo Molnar * sysctl is set: 6982d4a7167SIngo Molnar */ 6992d4a7167SIngo Molnar static inline void 7002d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 7012d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 7022d4a7167SIngo Molnar { 7032d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 7042d4a7167SIngo Molnar return; 7052d4a7167SIngo Molnar 7062d4a7167SIngo Molnar if (!printk_ratelimit()) 7072d4a7167SIngo Molnar return; 7082d4a7167SIngo Molnar 709a1a08d1cSRoland Dreier printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 7102d4a7167SIngo Molnar task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 7112d4a7167SIngo Molnar tsk->comm, task_pid_nr(tsk), address, 7122d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 7132d4a7167SIngo Molnar 7142d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 7152d4a7167SIngo Molnar 7162d4a7167SIngo Molnar printk(KERN_CONT "\n"); 7172d4a7167SIngo Molnar } 7182d4a7167SIngo Molnar 7192d4a7167SIngo Molnar static void 7202d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 7212d4a7167SIngo Molnar unsigned long address, int si_code) 72292181f19SNick Piggin { 72392181f19SNick Piggin struct task_struct *tsk = current; 72492181f19SNick Piggin 72592181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 72692181f19SNick Piggin if (error_code & PF_USER) { 72792181f19SNick Piggin /* 7282d4a7167SIngo Molnar * It's possible to have interrupts off here: 72992181f19SNick Piggin */ 73092181f19SNick Piggin local_irq_enable(); 73192181f19SNick Piggin 73292181f19SNick Piggin /* 73392181f19SNick Piggin * Valid to do another page fault here because this one came 7342d4a7167SIngo Molnar * from user space: 73592181f19SNick Piggin */ 73692181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 73792181f19SNick Piggin return; 73892181f19SNick Piggin 73992181f19SNick Piggin if (is_errata100(regs, address)) 74092181f19SNick Piggin return; 74192181f19SNick Piggin 7423ae36655SAndy Lutomirski #ifdef CONFIG_X86_64 7433ae36655SAndy Lutomirski /* 7443ae36655SAndy Lutomirski * Instruction fetch faults in the vsyscall page might need 7453ae36655SAndy Lutomirski * emulation. 7463ae36655SAndy Lutomirski */ 7473ae36655SAndy Lutomirski if (unlikely((error_code & PF_INSTR) && 7483ae36655SAndy Lutomirski ((address & ~0xfff) == VSYSCALL_START))) { 7493ae36655SAndy Lutomirski if (emulate_vsyscall(regs, address)) 7503ae36655SAndy Lutomirski return; 7513ae36655SAndy Lutomirski } 7523ae36655SAndy Lutomirski #endif 753e575a86fSKees Cook /* Kernel addresses are always protection faults: */ 754e575a86fSKees Cook if (address >= TASK_SIZE) 755e575a86fSKees Cook error_code |= PF_PROT; 7563ae36655SAndy Lutomirski 757e575a86fSKees Cook if (likely(show_unhandled_signals)) 7582d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 75992181f19SNick Piggin 76092181f19SNick Piggin tsk->thread.cr2 = address; 761e575a86fSKees Cook tsk->thread.error_code = error_code; 76251e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 7632d4a7167SIngo Molnar 764f672b49bSAndi Kleen force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 7652d4a7167SIngo Molnar 76692181f19SNick Piggin return; 76792181f19SNick Piggin } 76892181f19SNick Piggin 76992181f19SNick Piggin if (is_f00f_bug(regs, address)) 77092181f19SNick Piggin return; 77192181f19SNick Piggin 7724fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGSEGV, si_code); 77392181f19SNick Piggin } 77492181f19SNick Piggin 7752d4a7167SIngo Molnar static noinline void 7762d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 7772d4a7167SIngo Molnar unsigned long address) 77892181f19SNick Piggin { 77992181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 78092181f19SNick Piggin } 78192181f19SNick Piggin 7822d4a7167SIngo Molnar static void 7832d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 7842d4a7167SIngo Molnar unsigned long address, int si_code) 78592181f19SNick Piggin { 78692181f19SNick Piggin struct mm_struct *mm = current->mm; 78792181f19SNick Piggin 78892181f19SNick Piggin /* 78992181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 79092181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 79192181f19SNick Piggin */ 79292181f19SNick Piggin up_read(&mm->mmap_sem); 79392181f19SNick Piggin 79492181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, si_code); 79592181f19SNick Piggin } 79692181f19SNick Piggin 7972d4a7167SIngo Molnar static noinline void 7982d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 79992181f19SNick Piggin { 80092181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_MAPERR); 80192181f19SNick Piggin } 80292181f19SNick Piggin 8032d4a7167SIngo Molnar static noinline void 8042d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 8052d4a7167SIngo Molnar unsigned long address) 80692181f19SNick Piggin { 80792181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_ACCERR); 80892181f19SNick Piggin } 80992181f19SNick Piggin 8102d4a7167SIngo Molnar static void 811a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 812a6e04aa9SAndi Kleen unsigned int fault) 81392181f19SNick Piggin { 81492181f19SNick Piggin struct task_struct *tsk = current; 81592181f19SNick Piggin struct mm_struct *mm = tsk->mm; 816a6e04aa9SAndi Kleen int code = BUS_ADRERR; 81792181f19SNick Piggin 81892181f19SNick Piggin up_read(&mm->mmap_sem); 81992181f19SNick Piggin 8202d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 82196054569SLinus Torvalds if (!(error_code & PF_USER)) { 8224fc34901SAndy Lutomirski no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 82396054569SLinus Torvalds return; 82496054569SLinus Torvalds } 8252d4a7167SIngo Molnar 826cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 82792181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 82892181f19SNick Piggin return; 8292d4a7167SIngo Molnar 83092181f19SNick Piggin tsk->thread.cr2 = address; 83192181f19SNick Piggin tsk->thread.error_code = error_code; 83251e7dc70SSrikar Dronamraju tsk->thread.trap_nr = X86_TRAP_PF; 8332d4a7167SIngo Molnar 834a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 835f672b49bSAndi Kleen if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 836a6e04aa9SAndi Kleen printk(KERN_ERR 837a6e04aa9SAndi Kleen "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 838a6e04aa9SAndi Kleen tsk->comm, tsk->pid, address); 839a6e04aa9SAndi Kleen code = BUS_MCEERR_AR; 840a6e04aa9SAndi Kleen } 841a6e04aa9SAndi Kleen #endif 842f672b49bSAndi Kleen force_sig_info_fault(SIGBUS, code, address, tsk, fault); 84392181f19SNick Piggin } 84492181f19SNick Piggin 8453a13c4d7SJohannes Weiner static noinline void 8462d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 8472d4a7167SIngo Molnar unsigned long address, unsigned int fault) 84892181f19SNick Piggin { 8493a13c4d7SJohannes Weiner if (fatal_signal_pending(current) && !(error_code & PF_USER)) { 850b80ef10eSKOSAKI Motohiro up_read(¤t->mm->mmap_sem); 8514fc34901SAndy Lutomirski no_context(regs, error_code, address, 0, 0); 8523a13c4d7SJohannes Weiner return; 853b80ef10eSKOSAKI Motohiro } 854b80ef10eSKOSAKI Motohiro 8552d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 856f8626854SAndrey Vagin /* Kernel mode? Handle exceptions or die: */ 857f8626854SAndrey Vagin if (!(error_code & PF_USER)) { 858f8626854SAndrey Vagin up_read(¤t->mm->mmap_sem); 8594fc34901SAndy Lutomirski no_context(regs, error_code, address, 8604fc34901SAndy Lutomirski SIGSEGV, SEGV_MAPERR); 8613a13c4d7SJohannes Weiner return; 862f8626854SAndrey Vagin } 863f8626854SAndrey Vagin 864c2d23f91SDavid Rientjes up_read(¤t->mm->mmap_sem); 865c2d23f91SDavid Rientjes 866c2d23f91SDavid Rientjes /* 867c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 868c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 869c2d23f91SDavid Rientjes * oom-killed): 870c2d23f91SDavid Rientjes */ 871c2d23f91SDavid Rientjes pagefault_out_of_memory(); 8722d4a7167SIngo Molnar } else { 873f672b49bSAndi Kleen if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 874f672b49bSAndi Kleen VM_FAULT_HWPOISON_LARGE)) 875a6e04aa9SAndi Kleen do_sigbus(regs, error_code, address, fault); 87692181f19SNick Piggin else 87792181f19SNick Piggin BUG(); 87892181f19SNick Piggin } 8792d4a7167SIngo Molnar } 88092181f19SNick Piggin 881d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte) 882d8b57bb7SThomas Gleixner { 883d8b57bb7SThomas Gleixner if ((error_code & PF_WRITE) && !pte_write(*pte)) 884d8b57bb7SThomas Gleixner return 0; 8852d4a7167SIngo Molnar 886d8b57bb7SThomas Gleixner if ((error_code & PF_INSTR) && !pte_exec(*pte)) 887d8b57bb7SThomas Gleixner return 0; 888d8b57bb7SThomas Gleixner 889d8b57bb7SThomas Gleixner return 1; 890d8b57bb7SThomas Gleixner } 891d8b57bb7SThomas Gleixner 892c61e211dSHarvey Harrison /* 8932d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 8942d4a7167SIngo Molnar * 8952d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 8962d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 8972d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 8982d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 8992d4a7167SIngo Molnar * on other processors. 9002d4a7167SIngo Molnar * 9015b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 9025b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 9035b727a3bSJeremy Fitzhardinge */ 90462c9295fSMasami Hiramatsu static noinline __kprobes int 9052d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address) 9065b727a3bSJeremy Fitzhardinge { 9075b727a3bSJeremy Fitzhardinge pgd_t *pgd; 9085b727a3bSJeremy Fitzhardinge pud_t *pud; 9095b727a3bSJeremy Fitzhardinge pmd_t *pmd; 9105b727a3bSJeremy Fitzhardinge pte_t *pte; 9113c3e5694SSteven Rostedt int ret; 9125b727a3bSJeremy Fitzhardinge 9135b727a3bSJeremy Fitzhardinge /* Reserved-bit violation or user access to kernel space? */ 9145b727a3bSJeremy Fitzhardinge if (error_code & (PF_USER | PF_RSVD)) 9155b727a3bSJeremy Fitzhardinge return 0; 9165b727a3bSJeremy Fitzhardinge 9175b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 9185b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 9195b727a3bSJeremy Fitzhardinge return 0; 9205b727a3bSJeremy Fitzhardinge 9215b727a3bSJeremy Fitzhardinge pud = pud_offset(pgd, address); 9225b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 9235b727a3bSJeremy Fitzhardinge return 0; 9245b727a3bSJeremy Fitzhardinge 925d8b57bb7SThomas Gleixner if (pud_large(*pud)) 926d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pud); 927d8b57bb7SThomas Gleixner 9285b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 9295b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 9305b727a3bSJeremy Fitzhardinge return 0; 9315b727a3bSJeremy Fitzhardinge 932d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 933d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pmd); 934d8b57bb7SThomas Gleixner 9355b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 936954f8571SAndrea Arcangeli if (!pte_present(*pte)) 9375b727a3bSJeremy Fitzhardinge return 0; 9385b727a3bSJeremy Fitzhardinge 9393c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, pte); 9403c3e5694SSteven Rostedt if (!ret) 9413c3e5694SSteven Rostedt return 0; 9423c3e5694SSteven Rostedt 9433c3e5694SSteven Rostedt /* 9442d4a7167SIngo Molnar * Make sure we have permissions in PMD. 9452d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 9463c3e5694SSteven Rostedt */ 9473c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, (pte_t *) pmd); 9483c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 9492d4a7167SIngo Molnar 9503c3e5694SSteven Rostedt return ret; 9515b727a3bSJeremy Fitzhardinge } 9525b727a3bSJeremy Fitzhardinge 953c61e211dSHarvey Harrison int show_unhandled_signals = 1; 954c61e211dSHarvey Harrison 9552d4a7167SIngo Molnar static inline int 95668da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma) 95792181f19SNick Piggin { 95868da336aSMichel Lespinasse if (error_code & PF_WRITE) { 9592d4a7167SIngo Molnar /* write, present and write, not present: */ 96092181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 96192181f19SNick Piggin return 1; 9622d4a7167SIngo Molnar return 0; 9632d4a7167SIngo Molnar } 9642d4a7167SIngo Molnar 9652d4a7167SIngo Molnar /* read, present: */ 9662d4a7167SIngo Molnar if (unlikely(error_code & PF_PROT)) 96792181f19SNick Piggin return 1; 9682d4a7167SIngo Molnar 9692d4a7167SIngo Molnar /* read, not present: */ 97092181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 97192181f19SNick Piggin return 1; 97292181f19SNick Piggin 97392181f19SNick Piggin return 0; 97492181f19SNick Piggin } 97592181f19SNick Piggin 9760973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 9770973a06cSHiroshi Shimamoto { 978d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 9790973a06cSHiroshi Shimamoto } 9800973a06cSHiroshi Shimamoto 98140d3cd66SH. Peter Anvin static inline bool smap_violation(int error_code, struct pt_regs *regs) 98240d3cd66SH. Peter Anvin { 98340d3cd66SH. Peter Anvin if (error_code & PF_USER) 98440d3cd66SH. Peter Anvin return false; 98540d3cd66SH. Peter Anvin 98640d3cd66SH. Peter Anvin if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC)) 98740d3cd66SH. Peter Anvin return false; 98840d3cd66SH. Peter Anvin 98940d3cd66SH. Peter Anvin return true; 99040d3cd66SH. Peter Anvin } 99140d3cd66SH. Peter Anvin 992c61e211dSHarvey Harrison /* 993c61e211dSHarvey Harrison * This routine handles page faults. It determines the address, 994c61e211dSHarvey Harrison * and the problem, and then passes it off to one of the appropriate 995c61e211dSHarvey Harrison * routines. 996c61e211dSHarvey Harrison */ 9976ba3c97aSFrederic Weisbecker static void __kprobes 9986ba3c97aSFrederic Weisbecker __do_page_fault(struct pt_regs *regs, unsigned long error_code) 999c61e211dSHarvey Harrison { 1000c61e211dSHarvey Harrison struct vm_area_struct *vma; 10012d4a7167SIngo Molnar struct task_struct *tsk; 10022d4a7167SIngo Molnar unsigned long address; 10032d4a7167SIngo Molnar struct mm_struct *mm; 1004c61e211dSHarvey Harrison int fault; 1005759496baSJohannes Weiner unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1006c61e211dSHarvey Harrison 1007c61e211dSHarvey Harrison tsk = current; 1008c61e211dSHarvey Harrison mm = tsk->mm; 10092d4a7167SIngo Molnar 10102d4a7167SIngo Molnar /* Get the faulting address: */ 1011c61e211dSHarvey Harrison address = read_cr2(); 1012c61e211dSHarvey Harrison 1013f8561296SVegard Nossum /* 1014f8561296SVegard Nossum * Detect and handle instructions that would cause a page fault for 1015f8561296SVegard Nossum * both a tracked kernel page and a userspace page. 1016f8561296SVegard Nossum */ 1017f8561296SVegard Nossum if (kmemcheck_active(regs)) 1018f8561296SVegard Nossum kmemcheck_hide(regs); 10195dfaf90fSIngo Molnar prefetchw(&mm->mmap_sem); 1020f8561296SVegard Nossum 10210fd0e3daSPekka Paalanen if (unlikely(kmmio_fault(regs, address))) 102286069782SPekka Paalanen return; 1023c61e211dSHarvey Harrison 1024c61e211dSHarvey Harrison /* 1025c61e211dSHarvey Harrison * We fault-in kernel-space virtual memory on-demand. The 1026c61e211dSHarvey Harrison * 'reference' page table is init_mm.pgd. 1027c61e211dSHarvey Harrison * 1028c61e211dSHarvey Harrison * NOTE! We MUST NOT take any locks for this case. We may 1029c61e211dSHarvey Harrison * be in an interrupt or a critical region, and should 1030c61e211dSHarvey Harrison * only copy the information from the master page table, 1031c61e211dSHarvey Harrison * nothing more. 1032c61e211dSHarvey Harrison * 1033c61e211dSHarvey Harrison * This verifies that the fault happens in kernel space 1034c61e211dSHarvey Harrison * (error_code & 4) == 0, and that the fault was not a 1035c61e211dSHarvey Harrison * protection error (error_code & 9) == 0. 1036c61e211dSHarvey Harrison */ 10370973a06cSHiroshi Shimamoto if (unlikely(fault_in_kernel_space(address))) { 1038f8561296SVegard Nossum if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 1039f8561296SVegard Nossum if (vmalloc_fault(address) >= 0) 1040c61e211dSHarvey Harrison return; 10415b727a3bSJeremy Fitzhardinge 1042f8561296SVegard Nossum if (kmemcheck_fault(regs, address, error_code)) 1043f8561296SVegard Nossum return; 1044f8561296SVegard Nossum } 1045f8561296SVegard Nossum 10462d4a7167SIngo Molnar /* Can handle a stale RO->RW TLB: */ 104792181f19SNick Piggin if (spurious_fault(error_code, address)) 10485b727a3bSJeremy Fitzhardinge return; 10495b727a3bSJeremy Fitzhardinge 10502d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 10519be260a6SMasami Hiramatsu if (notify_page_fault(regs)) 10529be260a6SMasami Hiramatsu return; 1053c61e211dSHarvey Harrison /* 1054c61e211dSHarvey Harrison * Don't take the mm semaphore here. If we fixup a prefetch 10552d4a7167SIngo Molnar * fault we could otherwise deadlock: 1056c61e211dSHarvey Harrison */ 105792181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 10582d4a7167SIngo Molnar 105992181f19SNick Piggin return; 1060c61e211dSHarvey Harrison } 1061c61e211dSHarvey Harrison 10622d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1063f8a6b2b9SIngo Molnar if (unlikely(notify_page_fault(regs))) 10649be260a6SMasami Hiramatsu return; 1065c61e211dSHarvey Harrison /* 1066891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1067891cffbdSLinus Torvalds * vmalloc fault has been handled. 1068891cffbdSLinus Torvalds * 1069891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 10702d4a7167SIngo Molnar * potential system fault or CPU buglet: 1071c61e211dSHarvey Harrison */ 1072891cffbdSLinus Torvalds if (user_mode_vm(regs)) { 1073891cffbdSLinus Torvalds local_irq_enable(); 1074891cffbdSLinus Torvalds error_code |= PF_USER; 1075759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 10762d4a7167SIngo Molnar } else { 10772d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1078c61e211dSHarvey Harrison local_irq_enable(); 10792d4a7167SIngo Molnar } 1080c61e211dSHarvey Harrison 1081c61e211dSHarvey Harrison if (unlikely(error_code & PF_RSVD)) 108292181f19SNick Piggin pgtable_bad(regs, error_code, address); 1083c61e211dSHarvey Harrison 108440d3cd66SH. Peter Anvin if (static_cpu_has(X86_FEATURE_SMAP)) { 108540d3cd66SH. Peter Anvin if (unlikely(smap_violation(error_code, regs))) { 108640d3cd66SH. Peter Anvin bad_area_nosemaphore(regs, error_code, address); 108740d3cd66SH. Peter Anvin return; 108840d3cd66SH. Peter Anvin } 108940d3cd66SH. Peter Anvin } 109040d3cd66SH. Peter Anvin 1091a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 10927dd1fcc2SPeter Zijlstra 1093c61e211dSHarvey Harrison /* 10942d4a7167SIngo Molnar * If we're in an interrupt, have no user context or are running 10952d4a7167SIngo Molnar * in an atomic region then we must not take the fault: 1096c61e211dSHarvey Harrison */ 109792181f19SNick Piggin if (unlikely(in_atomic() || !mm)) { 109892181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 109992181f19SNick Piggin return; 110092181f19SNick Piggin } 1101c61e211dSHarvey Harrison 1102759496baSJohannes Weiner if (error_code & PF_WRITE) 1103759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 1104759496baSJohannes Weiner 11053a1dfe6eSIngo Molnar /* 11063a1dfe6eSIngo Molnar * When running in the kernel we expect faults to occur only to 11072d4a7167SIngo Molnar * addresses in user space. All other faults represent errors in 11082d4a7167SIngo Molnar * the kernel and should generate an OOPS. Unfortunately, in the 11092d4a7167SIngo Molnar * case of an erroneous fault occurring in a code path which already 11102d4a7167SIngo Molnar * holds mmap_sem we will deadlock attempting to validate the fault 11112d4a7167SIngo Molnar * against the address space. Luckily the kernel only validly 11122d4a7167SIngo Molnar * references user space from well defined areas of code, which are 11132d4a7167SIngo Molnar * listed in the exceptions table. 1114c61e211dSHarvey Harrison * 1115c61e211dSHarvey Harrison * As the vast majority of faults will be valid we will only perform 11162d4a7167SIngo Molnar * the source reference check when there is a possibility of a 11172d4a7167SIngo Molnar * deadlock. Attempt to lock the address space, if we cannot we then 11182d4a7167SIngo Molnar * validate the source. If this is invalid we can skip the address 11192d4a7167SIngo Molnar * space check, thus avoiding the deadlock: 1120c61e211dSHarvey Harrison */ 112192181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1122c61e211dSHarvey Harrison if ((error_code & PF_USER) == 0 && 112392181f19SNick Piggin !search_exception_tables(regs->ip)) { 112492181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 112592181f19SNick Piggin return; 112692181f19SNick Piggin } 1127d065bd81SMichel Lespinasse retry: 1128c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 112901006074SPeter Zijlstra } else { 113001006074SPeter Zijlstra /* 11312d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 11322d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 11332d4a7167SIngo Molnar * down_read(): 113401006074SPeter Zijlstra */ 113501006074SPeter Zijlstra might_sleep(); 1136c61e211dSHarvey Harrison } 1137c61e211dSHarvey Harrison 1138c61e211dSHarvey Harrison vma = find_vma(mm, address); 113992181f19SNick Piggin if (unlikely(!vma)) { 114092181f19SNick Piggin bad_area(regs, error_code, address); 114192181f19SNick Piggin return; 114292181f19SNick Piggin } 114392181f19SNick Piggin if (likely(vma->vm_start <= address)) 1144c61e211dSHarvey Harrison goto good_area; 114592181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 114692181f19SNick Piggin bad_area(regs, error_code, address); 114792181f19SNick Piggin return; 114892181f19SNick Piggin } 1149c61e211dSHarvey Harrison if (error_code & PF_USER) { 1150c61e211dSHarvey Harrison /* 1151c61e211dSHarvey Harrison * Accessing the stack below %sp is always a bug. 1152c61e211dSHarvey Harrison * The large cushion allows instructions like enter 1153c61e211dSHarvey Harrison * and pusha to work. ("enter $65535, $31" pushes 1154c61e211dSHarvey Harrison * 32 pointers and then decrements %sp by 65535.) 1155c61e211dSHarvey Harrison */ 115692181f19SNick Piggin if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 115792181f19SNick Piggin bad_area(regs, error_code, address); 115892181f19SNick Piggin return; 1159c61e211dSHarvey Harrison } 116092181f19SNick Piggin } 116192181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 116292181f19SNick Piggin bad_area(regs, error_code, address); 116392181f19SNick Piggin return; 116492181f19SNick Piggin } 116592181f19SNick Piggin 1166c61e211dSHarvey Harrison /* 1167c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1168c61e211dSHarvey Harrison * we can handle it.. 1169c61e211dSHarvey Harrison */ 1170c61e211dSHarvey Harrison good_area: 117168da336aSMichel Lespinasse if (unlikely(access_error(error_code, vma))) { 117292181f19SNick Piggin bad_area_access_error(regs, error_code, address); 117392181f19SNick Piggin return; 1174c61e211dSHarvey Harrison } 1175c61e211dSHarvey Harrison 1176c61e211dSHarvey Harrison /* 1177c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1178c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 11792d4a7167SIngo Molnar * the fault: 1180c61e211dSHarvey Harrison */ 1181d065bd81SMichel Lespinasse fault = handle_mm_fault(mm, vma, address, flags); 11822d4a7167SIngo Molnar 11833a13c4d7SJohannes Weiner /* 11843a13c4d7SJohannes Weiner * If we need to retry but a fatal signal is pending, handle the 11853a13c4d7SJohannes Weiner * signal first. We do not need to release the mmap_sem because it 11863a13c4d7SJohannes Weiner * would already be released in __lock_page_or_retry in mm/filemap.c. 11873a13c4d7SJohannes Weiner */ 11883a13c4d7SJohannes Weiner if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))) 11893a13c4d7SJohannes Weiner return; 11903a13c4d7SJohannes Weiner 11913a13c4d7SJohannes Weiner if (unlikely(fault & VM_FAULT_ERROR)) { 11923a13c4d7SJohannes Weiner mm_fault_error(regs, error_code, address, fault); 119337b23e05SKOSAKI Motohiro return; 119437b23e05SKOSAKI Motohiro } 119537b23e05SKOSAKI Motohiro 119637b23e05SKOSAKI Motohiro /* 1197d065bd81SMichel Lespinasse * Major/minor page fault accounting is only done on the 1198d065bd81SMichel Lespinasse * initial attempt. If we go through a retry, it is extremely 1199d065bd81SMichel Lespinasse * likely that the page will be found in page cache at that point. 1200d065bd81SMichel Lespinasse */ 1201d065bd81SMichel Lespinasse if (flags & FAULT_FLAG_ALLOW_RETRY) { 1202ac17dc8eSPeter Zijlstra if (fault & VM_FAULT_MAJOR) { 1203c61e211dSHarvey Harrison tsk->maj_flt++; 1204a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 120578f13e95SPeter Zijlstra regs, address); 1206ac17dc8eSPeter Zijlstra } else { 1207c61e211dSHarvey Harrison tsk->min_flt++; 1208a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 120978f13e95SPeter Zijlstra regs, address); 1210ac17dc8eSPeter Zijlstra } 1211d065bd81SMichel Lespinasse if (fault & VM_FAULT_RETRY) { 1212d065bd81SMichel Lespinasse /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 1213d065bd81SMichel Lespinasse * of starvation. */ 1214d065bd81SMichel Lespinasse flags &= ~FAULT_FLAG_ALLOW_RETRY; 121545cac65bSShaohua Li flags |= FAULT_FLAG_TRIED; 1216d065bd81SMichel Lespinasse goto retry; 1217d065bd81SMichel Lespinasse } 1218d065bd81SMichel Lespinasse } 1219c61e211dSHarvey Harrison 12208c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 12218c938f9fSIngo Molnar 1222c61e211dSHarvey Harrison up_read(&mm->mmap_sem); 1223c61e211dSHarvey Harrison } 12246ba3c97aSFrederic Weisbecker 12256ba3c97aSFrederic Weisbecker dotraplinkage void __kprobes 12266ba3c97aSFrederic Weisbecker do_page_fault(struct pt_regs *regs, unsigned long error_code) 12276ba3c97aSFrederic Weisbecker { 12286c1e0256SFrederic Weisbecker enum ctx_state prev_state; 12296c1e0256SFrederic Weisbecker 12306c1e0256SFrederic Weisbecker prev_state = exception_enter(); 12316ba3c97aSFrederic Weisbecker __do_page_fault(regs, error_code); 12326c1e0256SFrederic Weisbecker exception_exit(prev_state); 12336ba3c97aSFrederic Weisbecker } 1234*25c74b10SSeiji Aguchi 1235*25c74b10SSeiji Aguchi dotraplinkage void __kprobes 1236*25c74b10SSeiji Aguchi trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 1237*25c74b10SSeiji Aguchi { 1238*25c74b10SSeiji Aguchi enum ctx_state prev_state; 1239*25c74b10SSeiji Aguchi 1240*25c74b10SSeiji Aguchi prev_state = exception_enter(); 1241*25c74b10SSeiji Aguchi __do_page_fault(regs, error_code); 1242*25c74b10SSeiji Aguchi exception_exit(prev_state); 1243*25c74b10SSeiji Aguchi } 1244