1c61e211dSHarvey Harrison /* 2c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 3c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5c61e211dSHarvey Harrison */ 6a2bcd473SIngo Molnar #include <linux/magic.h> /* STACK_END_MAGIC */ 7a2bcd473SIngo Molnar #include <linux/sched.h> /* test_thread_flag(), ... */ 8a2bcd473SIngo Molnar #include <linux/kdebug.h> /* oops_begin/end, ... */ 9a2bcd473SIngo Molnar #include <linux/module.h> /* search_exception_table */ 10a2bcd473SIngo Molnar #include <linux/bootmem.h> /* max_low_pfn */ 11a2bcd473SIngo Molnar #include <linux/kprobes.h> /* __kprobes, ... */ 12a2bcd473SIngo Molnar #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 13*cdd6c482SIngo Molnar #include <linux/perf_event.h> /* perf_sw_event */ 14c61e211dSHarvey Harrison 15a2bcd473SIngo Molnar #include <asm/traps.h> /* dotraplinkage, ... */ 16a2bcd473SIngo Molnar #include <asm/pgalloc.h> /* pgd_*(), ... */ 17f8561296SVegard Nossum #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 18c61e211dSHarvey Harrison 19c61e211dSHarvey Harrison /* 202d4a7167SIngo Molnar * Page fault error code bits: 212d4a7167SIngo Molnar * 222d4a7167SIngo Molnar * bit 0 == 0: no page found 1: protection fault 232d4a7167SIngo Molnar * bit 1 == 0: read access 1: write access 242d4a7167SIngo Molnar * bit 2 == 0: kernel-mode access 1: user-mode access 252d4a7167SIngo Molnar * bit 3 == 1: use of reserved bit detected 262d4a7167SIngo Molnar * bit 4 == 1: fault was an instruction fetch 27c61e211dSHarvey Harrison */ 282d4a7167SIngo Molnar enum x86_pf_error_code { 292d4a7167SIngo Molnar 302d4a7167SIngo Molnar PF_PROT = 1 << 0, 312d4a7167SIngo Molnar PF_WRITE = 1 << 1, 322d4a7167SIngo Molnar PF_USER = 1 << 2, 332d4a7167SIngo Molnar PF_RSVD = 1 << 3, 342d4a7167SIngo Molnar PF_INSTR = 1 << 4, 352d4a7167SIngo Molnar }; 36c61e211dSHarvey Harrison 37b814d41fSIngo Molnar /* 38b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 39b319eed0SIngo Molnar * handled by mmiotrace: 40b814d41fSIngo Molnar */ 410fd0e3daSPekka Paalanen static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) 4286069782SPekka Paalanen { 430fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 440fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 450fd0e3daSPekka Paalanen return -1; 460fd0e3daSPekka Paalanen return 0; 4786069782SPekka Paalanen } 4886069782SPekka Paalanen 49c61e211dSHarvey Harrison static inline int notify_page_fault(struct pt_regs *regs) 50c61e211dSHarvey Harrison { 51c61e211dSHarvey Harrison int ret = 0; 52c61e211dSHarvey Harrison 53c61e211dSHarvey Harrison /* kprobe_running() needs smp_processor_id() */ 54b1801812SIngo Molnar if (kprobes_built_in() && !user_mode_vm(regs)) { 55c61e211dSHarvey Harrison preempt_disable(); 56c61e211dSHarvey Harrison if (kprobe_running() && kprobe_fault_handler(regs, 14)) 57c61e211dSHarvey Harrison ret = 1; 58c61e211dSHarvey Harrison preempt_enable(); 59c61e211dSHarvey Harrison } 60c61e211dSHarvey Harrison 61c61e211dSHarvey Harrison return ret; 62c61e211dSHarvey Harrison } 63c61e211dSHarvey Harrison 64c61e211dSHarvey Harrison /* 652d4a7167SIngo Molnar * Prefetch quirks: 662d4a7167SIngo Molnar * 672d4a7167SIngo Molnar * 32-bit mode: 682d4a7167SIngo Molnar * 69c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 70c61e211dSHarvey Harrison * Check that here and ignore it. 71c61e211dSHarvey Harrison * 722d4a7167SIngo Molnar * 64-bit mode: 732d4a7167SIngo Molnar * 74c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 75c61e211dSHarvey Harrison * Check that here and ignore it. 76c61e211dSHarvey Harrison * 772d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 78c61e211dSHarvey Harrison */ 79107a0367SIngo Molnar static inline int 80107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 81107a0367SIngo Molnar unsigned char opcode, int *prefetch) 82c61e211dSHarvey Harrison { 83107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 84107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 85c61e211dSHarvey Harrison 86c61e211dSHarvey Harrison switch (instr_hi) { 87c61e211dSHarvey Harrison case 0x20: 88c61e211dSHarvey Harrison case 0x30: 89c61e211dSHarvey Harrison /* 90c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 91c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 92c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 93c61e211dSHarvey Harrison * X86_64 will never get here anyway 94c61e211dSHarvey Harrison */ 95107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 96c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 97c61e211dSHarvey Harrison case 0x40: 98c61e211dSHarvey Harrison /* 99c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 100c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 101c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 102c61e211dSHarvey Harrison * but for now it's good enough to assume that long 103c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 104c61e211dSHarvey Harrison */ 105107a0367SIngo Molnar return (!user_mode(regs)) || (regs->cs == __USER_CS); 106c61e211dSHarvey Harrison #endif 107c61e211dSHarvey Harrison case 0x60: 108c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 109107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 110c61e211dSHarvey Harrison case 0xF0: 111c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 112107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 113c61e211dSHarvey Harrison case 0x00: 114c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 115107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 116107a0367SIngo Molnar return 0; 117107a0367SIngo Molnar 118107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 119107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 120107a0367SIngo Molnar return 0; 121107a0367SIngo Molnar default: 122107a0367SIngo Molnar return 0; 123107a0367SIngo Molnar } 124107a0367SIngo Molnar } 125107a0367SIngo Molnar 126107a0367SIngo Molnar static int 127107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 128107a0367SIngo Molnar { 129107a0367SIngo Molnar unsigned char *max_instr; 130107a0367SIngo Molnar unsigned char *instr; 131107a0367SIngo Molnar int prefetch = 0; 132107a0367SIngo Molnar 133107a0367SIngo Molnar /* 134107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 135107a0367SIngo Molnar * do not ignore the fault: 136107a0367SIngo Molnar */ 137107a0367SIngo Molnar if (error_code & PF_INSTR) 138107a0367SIngo Molnar return 0; 139107a0367SIngo Molnar 140107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 141107a0367SIngo Molnar max_instr = instr + 15; 142107a0367SIngo Molnar 143107a0367SIngo Molnar if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) 144107a0367SIngo Molnar return 0; 145107a0367SIngo Molnar 146107a0367SIngo Molnar while (instr < max_instr) { 147107a0367SIngo Molnar unsigned char opcode; 148c61e211dSHarvey Harrison 149c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 150c61e211dSHarvey Harrison break; 151107a0367SIngo Molnar 152107a0367SIngo Molnar instr++; 153107a0367SIngo Molnar 154107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 155c61e211dSHarvey Harrison break; 156c61e211dSHarvey Harrison } 157c61e211dSHarvey Harrison return prefetch; 158c61e211dSHarvey Harrison } 159c61e211dSHarvey Harrison 1602d4a7167SIngo Molnar static void 1612d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address, 1622d4a7167SIngo Molnar struct task_struct *tsk) 163c61e211dSHarvey Harrison { 164c61e211dSHarvey Harrison siginfo_t info; 165c61e211dSHarvey Harrison 166c61e211dSHarvey Harrison info.si_signo = si_signo; 167c61e211dSHarvey Harrison info.si_errno = 0; 168c61e211dSHarvey Harrison info.si_code = si_code; 169c61e211dSHarvey Harrison info.si_addr = (void __user *)address; 1702d4a7167SIngo Molnar 171c61e211dSHarvey Harrison force_sig_info(si_signo, &info, tsk); 172c61e211dSHarvey Harrison } 173c61e211dSHarvey Harrison 174f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 175f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 1762d4a7167SIngo Molnar 177f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 178f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 179f2f13a85SIngo Molnar { 180f2f13a85SIngo Molnar unsigned index = pgd_index(address); 181f2f13a85SIngo Molnar pgd_t *pgd_k; 182f2f13a85SIngo Molnar pud_t *pud, *pud_k; 183f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 184f2f13a85SIngo Molnar 185f2f13a85SIngo Molnar pgd += index; 186f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 187f2f13a85SIngo Molnar 188f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 189f2f13a85SIngo Molnar return NULL; 190f2f13a85SIngo Molnar 191f2f13a85SIngo Molnar /* 192f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 193f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 194f2f13a85SIngo Molnar * set_pud. 195f2f13a85SIngo Molnar */ 196f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 197f2f13a85SIngo Molnar pud_k = pud_offset(pgd_k, address); 198f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 199f2f13a85SIngo Molnar return NULL; 200f2f13a85SIngo Molnar 201f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 202f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 203f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 204f2f13a85SIngo Molnar return NULL; 205f2f13a85SIngo Molnar 206b8bcfe99SJeremy Fitzhardinge if (!pmd_present(*pmd)) 207f2f13a85SIngo Molnar set_pmd(pmd, *pmd_k); 208b8bcfe99SJeremy Fitzhardinge else 209f2f13a85SIngo Molnar BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 210f2f13a85SIngo Molnar 211f2f13a85SIngo Molnar return pmd_k; 212f2f13a85SIngo Molnar } 213f2f13a85SIngo Molnar 214f2f13a85SIngo Molnar void vmalloc_sync_all(void) 215f2f13a85SIngo Molnar { 216f2f13a85SIngo Molnar unsigned long address; 217f2f13a85SIngo Molnar 218f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 219f2f13a85SIngo Molnar return; 220f2f13a85SIngo Molnar 221f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 222f2f13a85SIngo Molnar address >= TASK_SIZE && address < FIXADDR_TOP; 223f2f13a85SIngo Molnar address += PMD_SIZE) { 224f2f13a85SIngo Molnar 225f2f13a85SIngo Molnar unsigned long flags; 226f2f13a85SIngo Molnar struct page *page; 227f2f13a85SIngo Molnar 228f2f13a85SIngo Molnar spin_lock_irqsave(&pgd_lock, flags); 229f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 230f2f13a85SIngo Molnar if (!vmalloc_sync_one(page_address(page), address)) 231f2f13a85SIngo Molnar break; 232f2f13a85SIngo Molnar } 233f2f13a85SIngo Molnar spin_unlock_irqrestore(&pgd_lock, flags); 234f2f13a85SIngo Molnar } 235f2f13a85SIngo Molnar } 236f2f13a85SIngo Molnar 237f2f13a85SIngo Molnar /* 238f2f13a85SIngo Molnar * 32-bit: 239f2f13a85SIngo Molnar * 240f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 241f2f13a85SIngo Molnar */ 242f2f13a85SIngo Molnar static noinline int vmalloc_fault(unsigned long address) 243f2f13a85SIngo Molnar { 244f2f13a85SIngo Molnar unsigned long pgd_paddr; 245f2f13a85SIngo Molnar pmd_t *pmd_k; 246f2f13a85SIngo Molnar pte_t *pte_k; 247f2f13a85SIngo Molnar 248f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 249f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 250f2f13a85SIngo Molnar return -1; 251f2f13a85SIngo Molnar 252f2f13a85SIngo Molnar /* 253f2f13a85SIngo Molnar * Synchronize this task's top level page-table 254f2f13a85SIngo Molnar * with the 'reference' page table. 255f2f13a85SIngo Molnar * 256f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 257f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 258f2f13a85SIngo Molnar */ 259f2f13a85SIngo Molnar pgd_paddr = read_cr3(); 260f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 261f2f13a85SIngo Molnar if (!pmd_k) 262f2f13a85SIngo Molnar return -1; 263f2f13a85SIngo Molnar 264f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 265f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 266f2f13a85SIngo Molnar return -1; 267f2f13a85SIngo Molnar 268f2f13a85SIngo Molnar return 0; 269f2f13a85SIngo Molnar } 270f2f13a85SIngo Molnar 271f2f13a85SIngo Molnar /* 272f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 273f2f13a85SIngo Molnar */ 274f2f13a85SIngo Molnar static inline void 275f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 276f2f13a85SIngo Molnar struct task_struct *tsk) 277f2f13a85SIngo Molnar { 278f2f13a85SIngo Molnar unsigned long bit; 279f2f13a85SIngo Molnar 280f2f13a85SIngo Molnar if (!v8086_mode(regs)) 281f2f13a85SIngo Molnar return; 282f2f13a85SIngo Molnar 283f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 284f2f13a85SIngo Molnar if (bit < 32) 285f2f13a85SIngo Molnar tsk->thread.screen_bitmap |= 1 << bit; 286f2f13a85SIngo Molnar } 287c61e211dSHarvey Harrison 288087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn) 289087975b0SAkinobu Mita { 290087975b0SAkinobu Mita return pfn < max_low_pfn; 291087975b0SAkinobu Mita } 292087975b0SAkinobu Mita 293cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 294c61e211dSHarvey Harrison { 295087975b0SAkinobu Mita pgd_t *base = __va(read_cr3()); 296087975b0SAkinobu Mita pgd_t *pgd = &base[pgd_index(address)]; 297087975b0SAkinobu Mita pmd_t *pmd; 298087975b0SAkinobu Mita pte_t *pte; 2992d4a7167SIngo Molnar 300c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 301087975b0SAkinobu Mita printk("*pdpt = %016Lx ", pgd_val(*pgd)); 302087975b0SAkinobu Mita if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 303087975b0SAkinobu Mita goto out; 304c61e211dSHarvey Harrison #endif 305087975b0SAkinobu Mita pmd = pmd_offset(pud_offset(pgd, address), address); 306087975b0SAkinobu Mita printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 307c61e211dSHarvey Harrison 308c61e211dSHarvey Harrison /* 309c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 310c61e211dSHarvey Harrison * case if the page table is located in highmem. 311c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3122d4a7167SIngo Molnar * it's allocated already: 313c61e211dSHarvey Harrison */ 314087975b0SAkinobu Mita if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 315087975b0SAkinobu Mita goto out; 3162d4a7167SIngo Molnar 317087975b0SAkinobu Mita pte = pte_offset_kernel(pmd, address); 318087975b0SAkinobu Mita printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 319087975b0SAkinobu Mita out: 320c61e211dSHarvey Harrison printk("\n"); 321f2f13a85SIngo Molnar } 322f2f13a85SIngo Molnar 323f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 324f2f13a85SIngo Molnar 325f2f13a85SIngo Molnar void vmalloc_sync_all(void) 326f2f13a85SIngo Molnar { 327f2f13a85SIngo Molnar unsigned long address; 328f2f13a85SIngo Molnar 329f2f13a85SIngo Molnar for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; 330f2f13a85SIngo Molnar address += PGDIR_SIZE) { 331f2f13a85SIngo Molnar 332f2f13a85SIngo Molnar const pgd_t *pgd_ref = pgd_offset_k(address); 333f2f13a85SIngo Molnar unsigned long flags; 334f2f13a85SIngo Molnar struct page *page; 335f2f13a85SIngo Molnar 336f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 337f2f13a85SIngo Molnar continue; 338f2f13a85SIngo Molnar 339f2f13a85SIngo Molnar spin_lock_irqsave(&pgd_lock, flags); 340f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 341f2f13a85SIngo Molnar pgd_t *pgd; 342f2f13a85SIngo Molnar pgd = (pgd_t *)page_address(page) + pgd_index(address); 343f2f13a85SIngo Molnar if (pgd_none(*pgd)) 344f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 345f2f13a85SIngo Molnar else 346f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 347f2f13a85SIngo Molnar } 348f2f13a85SIngo Molnar spin_unlock_irqrestore(&pgd_lock, flags); 349f2f13a85SIngo Molnar } 350f2f13a85SIngo Molnar } 351f2f13a85SIngo Molnar 352f2f13a85SIngo Molnar /* 353f2f13a85SIngo Molnar * 64-bit: 354f2f13a85SIngo Molnar * 355f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 356f2f13a85SIngo Molnar * 357f2f13a85SIngo Molnar * This assumes no large pages in there. 358f2f13a85SIngo Molnar */ 359f2f13a85SIngo Molnar static noinline int vmalloc_fault(unsigned long address) 360f2f13a85SIngo Molnar { 361f2f13a85SIngo Molnar pgd_t *pgd, *pgd_ref; 362f2f13a85SIngo Molnar pud_t *pud, *pud_ref; 363f2f13a85SIngo Molnar pmd_t *pmd, *pmd_ref; 364f2f13a85SIngo Molnar pte_t *pte, *pte_ref; 365f2f13a85SIngo Molnar 366f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 367f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 368f2f13a85SIngo Molnar return -1; 369f2f13a85SIngo Molnar 370f2f13a85SIngo Molnar /* 371f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 372f2f13a85SIngo Molnar * happen within a race in page table update. In the later 373f2f13a85SIngo Molnar * case just flush: 374f2f13a85SIngo Molnar */ 375f2f13a85SIngo Molnar pgd = pgd_offset(current->active_mm, address); 376f2f13a85SIngo Molnar pgd_ref = pgd_offset_k(address); 377f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 378f2f13a85SIngo Molnar return -1; 379f2f13a85SIngo Molnar 380f2f13a85SIngo Molnar if (pgd_none(*pgd)) 381f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 382f2f13a85SIngo Molnar else 383f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 384f2f13a85SIngo Molnar 385f2f13a85SIngo Molnar /* 386f2f13a85SIngo Molnar * Below here mismatches are bugs because these lower tables 387f2f13a85SIngo Molnar * are shared: 388f2f13a85SIngo Molnar */ 389f2f13a85SIngo Molnar 390f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 391f2f13a85SIngo Molnar pud_ref = pud_offset(pgd_ref, address); 392f2f13a85SIngo Molnar if (pud_none(*pud_ref)) 393f2f13a85SIngo Molnar return -1; 394f2f13a85SIngo Molnar 395f2f13a85SIngo Molnar if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 396f2f13a85SIngo Molnar BUG(); 397f2f13a85SIngo Molnar 398f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 399f2f13a85SIngo Molnar pmd_ref = pmd_offset(pud_ref, address); 400f2f13a85SIngo Molnar if (pmd_none(*pmd_ref)) 401f2f13a85SIngo Molnar return -1; 402f2f13a85SIngo Molnar 403f2f13a85SIngo Molnar if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 404f2f13a85SIngo Molnar BUG(); 405f2f13a85SIngo Molnar 406f2f13a85SIngo Molnar pte_ref = pte_offset_kernel(pmd_ref, address); 407f2f13a85SIngo Molnar if (!pte_present(*pte_ref)) 408f2f13a85SIngo Molnar return -1; 409f2f13a85SIngo Molnar 410f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 411f2f13a85SIngo Molnar 412f2f13a85SIngo Molnar /* 413f2f13a85SIngo Molnar * Don't use pte_page here, because the mappings can point 414f2f13a85SIngo Molnar * outside mem_map, and the NUMA hash lookup cannot handle 415f2f13a85SIngo Molnar * that: 416f2f13a85SIngo Molnar */ 417f2f13a85SIngo Molnar if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 418f2f13a85SIngo Molnar BUG(); 419f2f13a85SIngo Molnar 420f2f13a85SIngo Molnar return 0; 421f2f13a85SIngo Molnar } 422f2f13a85SIngo Molnar 423f2f13a85SIngo Molnar static const char errata93_warning[] = 424ad361c98SJoe Perches KERN_ERR 425ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 426ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n" 427ad361c98SJoe Perches "******* Please consider a BIOS update.\n" 428ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n"; 429f2f13a85SIngo Molnar 430f2f13a85SIngo Molnar /* 431f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 432f2f13a85SIngo Molnar */ 433f2f13a85SIngo Molnar static inline void 434f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 435f2f13a85SIngo Molnar struct task_struct *tsk) 436f2f13a85SIngo Molnar { 437f2f13a85SIngo Molnar } 438f2f13a85SIngo Molnar 439f2f13a85SIngo Molnar static int bad_address(void *p) 440f2f13a85SIngo Molnar { 441f2f13a85SIngo Molnar unsigned long dummy; 442f2f13a85SIngo Molnar 443f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 444f2f13a85SIngo Molnar } 445f2f13a85SIngo Molnar 446f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 447f2f13a85SIngo Molnar { 448087975b0SAkinobu Mita pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); 449087975b0SAkinobu Mita pgd_t *pgd = base + pgd_index(address); 450c61e211dSHarvey Harrison pud_t *pud; 451c61e211dSHarvey Harrison pmd_t *pmd; 452c61e211dSHarvey Harrison pte_t *pte; 453c61e211dSHarvey Harrison 4542d4a7167SIngo Molnar if (bad_address(pgd)) 4552d4a7167SIngo Molnar goto bad; 4562d4a7167SIngo Molnar 457c61e211dSHarvey Harrison printk("PGD %lx ", pgd_val(*pgd)); 4582d4a7167SIngo Molnar 4592d4a7167SIngo Molnar if (!pgd_present(*pgd)) 4602d4a7167SIngo Molnar goto out; 461c61e211dSHarvey Harrison 462c61e211dSHarvey Harrison pud = pud_offset(pgd, address); 4632d4a7167SIngo Molnar if (bad_address(pud)) 4642d4a7167SIngo Molnar goto bad; 4652d4a7167SIngo Molnar 466c61e211dSHarvey Harrison printk("PUD %lx ", pud_val(*pud)); 467b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 4682d4a7167SIngo Molnar goto out; 469c61e211dSHarvey Harrison 470c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 4712d4a7167SIngo Molnar if (bad_address(pmd)) 4722d4a7167SIngo Molnar goto bad; 4732d4a7167SIngo Molnar 474c61e211dSHarvey Harrison printk("PMD %lx ", pmd_val(*pmd)); 4752d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 4762d4a7167SIngo Molnar goto out; 477c61e211dSHarvey Harrison 478c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 4792d4a7167SIngo Molnar if (bad_address(pte)) 4802d4a7167SIngo Molnar goto bad; 4812d4a7167SIngo Molnar 482c61e211dSHarvey Harrison printk("PTE %lx", pte_val(*pte)); 4832d4a7167SIngo Molnar out: 484c61e211dSHarvey Harrison printk("\n"); 485c61e211dSHarvey Harrison return; 486c61e211dSHarvey Harrison bad: 487c61e211dSHarvey Harrison printk("BAD\n"); 488c61e211dSHarvey Harrison } 489c61e211dSHarvey Harrison 490f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 491c61e211dSHarvey Harrison 4922d4a7167SIngo Molnar /* 4932d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 4942d4a7167SIngo Molnar * 4952d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 4962d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 4972d4a7167SIngo Molnar * 4982d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 4992d4a7167SIngo Molnar * 5002d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 5012d4a7167SIngo Molnar * Try to work around it here. 5022d4a7167SIngo Molnar * 5032d4a7167SIngo Molnar * Note we only handle faults in kernel here. 5042d4a7167SIngo Molnar * Does nothing on 32-bit. 505c61e211dSHarvey Harrison */ 506c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 507c61e211dSHarvey Harrison { 508c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 509c61e211dSHarvey Harrison if (address != regs->ip) 510c61e211dSHarvey Harrison return 0; 5112d4a7167SIngo Molnar 512c61e211dSHarvey Harrison if ((address >> 32) != 0) 513c61e211dSHarvey Harrison return 0; 5142d4a7167SIngo Molnar 515c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 516c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 517c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 518a454ab31SIngo Molnar printk_once(errata93_warning); 519c61e211dSHarvey Harrison regs->ip = address; 520c61e211dSHarvey Harrison return 1; 521c61e211dSHarvey Harrison } 522c61e211dSHarvey Harrison #endif 523c61e211dSHarvey Harrison return 0; 524c61e211dSHarvey Harrison } 525c61e211dSHarvey Harrison 526c61e211dSHarvey Harrison /* 5272d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 5282d4a7167SIngo Molnar * to illegal addresses >4GB. 5292d4a7167SIngo Molnar * 5302d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 5312d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 532c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 533c61e211dSHarvey Harrison */ 534c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 535c61e211dSHarvey Harrison { 536c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 5372d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 538c61e211dSHarvey Harrison return 1; 539c61e211dSHarvey Harrison #endif 540c61e211dSHarvey Harrison return 0; 541c61e211dSHarvey Harrison } 542c61e211dSHarvey Harrison 543c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 544c61e211dSHarvey Harrison { 545c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 546c61e211dSHarvey Harrison unsigned long nr; 5472d4a7167SIngo Molnar 548c61e211dSHarvey Harrison /* 5492d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 550c61e211dSHarvey Harrison */ 551c61e211dSHarvey Harrison if (boot_cpu_data.f00f_bug) { 552c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 553c61e211dSHarvey Harrison 554c61e211dSHarvey Harrison if (nr == 6) { 555c61e211dSHarvey Harrison do_invalid_op(regs, 0); 556c61e211dSHarvey Harrison return 1; 557c61e211dSHarvey Harrison } 558c61e211dSHarvey Harrison } 559c61e211dSHarvey Harrison #endif 560c61e211dSHarvey Harrison return 0; 561c61e211dSHarvey Harrison } 562c61e211dSHarvey Harrison 5638f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT 5648f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 5658f766149SIngo Molnar 5662d4a7167SIngo Molnar static void 5672d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, 568c61e211dSHarvey Harrison unsigned long address) 569c61e211dSHarvey Harrison { 570c61e211dSHarvey Harrison if (!oops_may_print()) 571c61e211dSHarvey Harrison return; 572c61e211dSHarvey Harrison 573c61e211dSHarvey Harrison if (error_code & PF_INSTR) { 57493809be8SHarvey Harrison unsigned int level; 5752d4a7167SIngo Molnar 576c61e211dSHarvey Harrison pte_t *pte = lookup_address(address, &level); 577c61e211dSHarvey Harrison 5788f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 5798f766149SIngo Molnar printk(nx_warning, current_uid()); 580c61e211dSHarvey Harrison } 581fd40d6e3SHarvey Harrison 582c61e211dSHarvey Harrison printk(KERN_ALERT "BUG: unable to handle kernel "); 583c61e211dSHarvey Harrison if (address < PAGE_SIZE) 584c61e211dSHarvey Harrison printk(KERN_CONT "NULL pointer dereference"); 585c61e211dSHarvey Harrison else 586c61e211dSHarvey Harrison printk(KERN_CONT "paging request"); 5872d4a7167SIngo Molnar 588f294a8ceSVegard Nossum printk(KERN_CONT " at %p\n", (void *) address); 589c61e211dSHarvey Harrison printk(KERN_ALERT "IP:"); 590c61e211dSHarvey Harrison printk_address(regs->ip, 1); 5912d4a7167SIngo Molnar 592c61e211dSHarvey Harrison dump_pagetable(address); 593c61e211dSHarvey Harrison } 594c61e211dSHarvey Harrison 5952d4a7167SIngo Molnar static noinline void 5962d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 5972d4a7167SIngo Molnar unsigned long address) 598c61e211dSHarvey Harrison { 5992d4a7167SIngo Molnar struct task_struct *tsk; 6002d4a7167SIngo Molnar unsigned long flags; 6012d4a7167SIngo Molnar int sig; 6022d4a7167SIngo Molnar 6032d4a7167SIngo Molnar flags = oops_begin(); 6042d4a7167SIngo Molnar tsk = current; 6052d4a7167SIngo Molnar sig = SIGKILL; 606c61e211dSHarvey Harrison 607c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 60892181f19SNick Piggin tsk->comm, address); 609c61e211dSHarvey Harrison dump_pagetable(address); 6102d4a7167SIngo Molnar 611c61e211dSHarvey Harrison tsk->thread.cr2 = address; 612c61e211dSHarvey Harrison tsk->thread.trap_no = 14; 613c61e211dSHarvey Harrison tsk->thread.error_code = error_code; 6142d4a7167SIngo Molnar 615c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 616874d93d1SAlexander van Heukelum sig = 0; 6172d4a7167SIngo Molnar 618874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 619c61e211dSHarvey Harrison } 620c61e211dSHarvey Harrison 6212d4a7167SIngo Molnar static noinline void 6222d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 6232d4a7167SIngo Molnar unsigned long address) 62492181f19SNick Piggin { 62592181f19SNick Piggin struct task_struct *tsk = current; 62619803078SIngo Molnar unsigned long *stackend; 62792181f19SNick Piggin unsigned long flags; 62892181f19SNick Piggin int sig; 62992181f19SNick Piggin 63092181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 63192181f19SNick Piggin if (fixup_exception(regs)) 63292181f19SNick Piggin return; 63392181f19SNick Piggin 63492181f19SNick Piggin /* 6352d4a7167SIngo Molnar * 32-bit: 6362d4a7167SIngo Molnar * 63792181f19SNick Piggin * Valid to do another page fault here, because if this fault 63892181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 63992181f19SNick Piggin * handled it. 64092181f19SNick Piggin * 6412d4a7167SIngo Molnar * 64-bit: 6422d4a7167SIngo Molnar * 64392181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 64492181f19SNick Piggin */ 64592181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 64692181f19SNick Piggin return; 64792181f19SNick Piggin 64892181f19SNick Piggin if (is_errata93(regs, address)) 64992181f19SNick Piggin return; 65092181f19SNick Piggin 65192181f19SNick Piggin /* 65292181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 6532d4a7167SIngo Molnar * terminate things with extreme prejudice: 65492181f19SNick Piggin */ 65592181f19SNick Piggin flags = oops_begin(); 65692181f19SNick Piggin 65792181f19SNick Piggin show_fault_oops(regs, error_code, address); 65892181f19SNick Piggin 65919803078SIngo Molnar stackend = end_of_stack(tsk); 66019803078SIngo Molnar if (*stackend != STACK_END_MAGIC) 66119803078SIngo Molnar printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 66219803078SIngo Molnar 66392181f19SNick Piggin tsk->thread.cr2 = address; 66492181f19SNick Piggin tsk->thread.trap_no = 14; 66592181f19SNick Piggin tsk->thread.error_code = error_code; 66692181f19SNick Piggin 66792181f19SNick Piggin sig = SIGKILL; 66892181f19SNick Piggin if (__die("Oops", regs, error_code)) 66992181f19SNick Piggin sig = 0; 6702d4a7167SIngo Molnar 67192181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 67292181f19SNick Piggin printk(KERN_EMERG "CR2: %016lx\n", address); 6732d4a7167SIngo Molnar 67492181f19SNick Piggin oops_end(flags, regs, sig); 67592181f19SNick Piggin } 67692181f19SNick Piggin 6772d4a7167SIngo Molnar /* 6782d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 6792d4a7167SIngo Molnar * sysctl is set: 6802d4a7167SIngo Molnar */ 6812d4a7167SIngo Molnar static inline void 6822d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 6832d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 6842d4a7167SIngo Molnar { 6852d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 6862d4a7167SIngo Molnar return; 6872d4a7167SIngo Molnar 6882d4a7167SIngo Molnar if (!printk_ratelimit()) 6892d4a7167SIngo Molnar return; 6902d4a7167SIngo Molnar 691a1a08d1cSRoland Dreier printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 6922d4a7167SIngo Molnar task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 6932d4a7167SIngo Molnar tsk->comm, task_pid_nr(tsk), address, 6942d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 6952d4a7167SIngo Molnar 6962d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 6972d4a7167SIngo Molnar 6982d4a7167SIngo Molnar printk(KERN_CONT "\n"); 6992d4a7167SIngo Molnar } 7002d4a7167SIngo Molnar 7012d4a7167SIngo Molnar static void 7022d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 7032d4a7167SIngo Molnar unsigned long address, int si_code) 70492181f19SNick Piggin { 70592181f19SNick Piggin struct task_struct *tsk = current; 70692181f19SNick Piggin 70792181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 70892181f19SNick Piggin if (error_code & PF_USER) { 70992181f19SNick Piggin /* 7102d4a7167SIngo Molnar * It's possible to have interrupts off here: 71192181f19SNick Piggin */ 71292181f19SNick Piggin local_irq_enable(); 71392181f19SNick Piggin 71492181f19SNick Piggin /* 71592181f19SNick Piggin * Valid to do another page fault here because this one came 7162d4a7167SIngo Molnar * from user space: 71792181f19SNick Piggin */ 71892181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 71992181f19SNick Piggin return; 72092181f19SNick Piggin 72192181f19SNick Piggin if (is_errata100(regs, address)) 72292181f19SNick Piggin return; 72392181f19SNick Piggin 7242d4a7167SIngo Molnar if (unlikely(show_unhandled_signals)) 7252d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 72692181f19SNick Piggin 7272d4a7167SIngo Molnar /* Kernel addresses are always protection faults: */ 72892181f19SNick Piggin tsk->thread.cr2 = address; 72992181f19SNick Piggin tsk->thread.error_code = error_code | (address >= TASK_SIZE); 73092181f19SNick Piggin tsk->thread.trap_no = 14; 7312d4a7167SIngo Molnar 73292181f19SNick Piggin force_sig_info_fault(SIGSEGV, si_code, address, tsk); 7332d4a7167SIngo Molnar 73492181f19SNick Piggin return; 73592181f19SNick Piggin } 73692181f19SNick Piggin 73792181f19SNick Piggin if (is_f00f_bug(regs, address)) 73892181f19SNick Piggin return; 73992181f19SNick Piggin 74092181f19SNick Piggin no_context(regs, error_code, address); 74192181f19SNick Piggin } 74292181f19SNick Piggin 7432d4a7167SIngo Molnar static noinline void 7442d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 7452d4a7167SIngo Molnar unsigned long address) 74692181f19SNick Piggin { 74792181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 74892181f19SNick Piggin } 74992181f19SNick Piggin 7502d4a7167SIngo Molnar static void 7512d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 7522d4a7167SIngo Molnar unsigned long address, int si_code) 75392181f19SNick Piggin { 75492181f19SNick Piggin struct mm_struct *mm = current->mm; 75592181f19SNick Piggin 75692181f19SNick Piggin /* 75792181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 75892181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 75992181f19SNick Piggin */ 76092181f19SNick Piggin up_read(&mm->mmap_sem); 76192181f19SNick Piggin 76292181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, si_code); 76392181f19SNick Piggin } 76492181f19SNick Piggin 7652d4a7167SIngo Molnar static noinline void 7662d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 76792181f19SNick Piggin { 76892181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_MAPERR); 76992181f19SNick Piggin } 77092181f19SNick Piggin 7712d4a7167SIngo Molnar static noinline void 7722d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 7732d4a7167SIngo Molnar unsigned long address) 77492181f19SNick Piggin { 77592181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_ACCERR); 77692181f19SNick Piggin } 77792181f19SNick Piggin 77892181f19SNick Piggin /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ 7792d4a7167SIngo Molnar static void 7802d4a7167SIngo Molnar out_of_memory(struct pt_regs *regs, unsigned long error_code, 7812d4a7167SIngo Molnar unsigned long address) 78292181f19SNick Piggin { 78392181f19SNick Piggin /* 78492181f19SNick Piggin * We ran out of memory, call the OOM killer, and return the userspace 7852d4a7167SIngo Molnar * (which will retry the fault, or kill us if we got oom-killed): 78692181f19SNick Piggin */ 78792181f19SNick Piggin up_read(¤t->mm->mmap_sem); 7882d4a7167SIngo Molnar 78992181f19SNick Piggin pagefault_out_of_memory(); 79092181f19SNick Piggin } 79192181f19SNick Piggin 7922d4a7167SIngo Molnar static void 7932d4a7167SIngo Molnar do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) 79492181f19SNick Piggin { 79592181f19SNick Piggin struct task_struct *tsk = current; 79692181f19SNick Piggin struct mm_struct *mm = tsk->mm; 79792181f19SNick Piggin 79892181f19SNick Piggin up_read(&mm->mmap_sem); 79992181f19SNick Piggin 8002d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 80192181f19SNick Piggin if (!(error_code & PF_USER)) 80292181f19SNick Piggin no_context(regs, error_code, address); 8032d4a7167SIngo Molnar 804cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 80592181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 80692181f19SNick Piggin return; 8072d4a7167SIngo Molnar 80892181f19SNick Piggin tsk->thread.cr2 = address; 80992181f19SNick Piggin tsk->thread.error_code = error_code; 81092181f19SNick Piggin tsk->thread.trap_no = 14; 8112d4a7167SIngo Molnar 81292181f19SNick Piggin force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); 81392181f19SNick Piggin } 81492181f19SNick Piggin 8152d4a7167SIngo Molnar static noinline void 8162d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 8172d4a7167SIngo Molnar unsigned long address, unsigned int fault) 81892181f19SNick Piggin { 8192d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 82092181f19SNick Piggin out_of_memory(regs, error_code, address); 8212d4a7167SIngo Molnar } else { 8222d4a7167SIngo Molnar if (fault & VM_FAULT_SIGBUS) 82392181f19SNick Piggin do_sigbus(regs, error_code, address); 82492181f19SNick Piggin else 82592181f19SNick Piggin BUG(); 82692181f19SNick Piggin } 8272d4a7167SIngo Molnar } 82892181f19SNick Piggin 829d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte) 830d8b57bb7SThomas Gleixner { 831d8b57bb7SThomas Gleixner if ((error_code & PF_WRITE) && !pte_write(*pte)) 832d8b57bb7SThomas Gleixner return 0; 8332d4a7167SIngo Molnar 834d8b57bb7SThomas Gleixner if ((error_code & PF_INSTR) && !pte_exec(*pte)) 835d8b57bb7SThomas Gleixner return 0; 836d8b57bb7SThomas Gleixner 837d8b57bb7SThomas Gleixner return 1; 838d8b57bb7SThomas Gleixner } 839d8b57bb7SThomas Gleixner 840c61e211dSHarvey Harrison /* 8412d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 8422d4a7167SIngo Molnar * 8432d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 8442d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 8452d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 8462d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 8472d4a7167SIngo Molnar * on other processors. 8482d4a7167SIngo Molnar * 8495b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 8505b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 8515b727a3bSJeremy Fitzhardinge */ 8522d4a7167SIngo Molnar static noinline int 8532d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address) 8545b727a3bSJeremy Fitzhardinge { 8555b727a3bSJeremy Fitzhardinge pgd_t *pgd; 8565b727a3bSJeremy Fitzhardinge pud_t *pud; 8575b727a3bSJeremy Fitzhardinge pmd_t *pmd; 8585b727a3bSJeremy Fitzhardinge pte_t *pte; 8593c3e5694SSteven Rostedt int ret; 8605b727a3bSJeremy Fitzhardinge 8615b727a3bSJeremy Fitzhardinge /* Reserved-bit violation or user access to kernel space? */ 8625b727a3bSJeremy Fitzhardinge if (error_code & (PF_USER | PF_RSVD)) 8635b727a3bSJeremy Fitzhardinge return 0; 8645b727a3bSJeremy Fitzhardinge 8655b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 8665b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 8675b727a3bSJeremy Fitzhardinge return 0; 8685b727a3bSJeremy Fitzhardinge 8695b727a3bSJeremy Fitzhardinge pud = pud_offset(pgd, address); 8705b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 8715b727a3bSJeremy Fitzhardinge return 0; 8725b727a3bSJeremy Fitzhardinge 873d8b57bb7SThomas Gleixner if (pud_large(*pud)) 874d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pud); 875d8b57bb7SThomas Gleixner 8765b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 8775b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 8785b727a3bSJeremy Fitzhardinge return 0; 8795b727a3bSJeremy Fitzhardinge 880d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 881d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pmd); 882d8b57bb7SThomas Gleixner 8835b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 8845b727a3bSJeremy Fitzhardinge if (!pte_present(*pte)) 8855b727a3bSJeremy Fitzhardinge return 0; 8865b727a3bSJeremy Fitzhardinge 8873c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, pte); 8883c3e5694SSteven Rostedt if (!ret) 8893c3e5694SSteven Rostedt return 0; 8903c3e5694SSteven Rostedt 8913c3e5694SSteven Rostedt /* 8922d4a7167SIngo Molnar * Make sure we have permissions in PMD. 8932d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 8943c3e5694SSteven Rostedt */ 8953c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, (pte_t *) pmd); 8963c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 8972d4a7167SIngo Molnar 8983c3e5694SSteven Rostedt return ret; 8995b727a3bSJeremy Fitzhardinge } 9005b727a3bSJeremy Fitzhardinge 901c61e211dSHarvey Harrison int show_unhandled_signals = 1; 902c61e211dSHarvey Harrison 9032d4a7167SIngo Molnar static inline int 9042d4a7167SIngo Molnar access_error(unsigned long error_code, int write, struct vm_area_struct *vma) 90592181f19SNick Piggin { 90692181f19SNick Piggin if (write) { 9072d4a7167SIngo Molnar /* write, present and write, not present: */ 90892181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 90992181f19SNick Piggin return 1; 9102d4a7167SIngo Molnar return 0; 9112d4a7167SIngo Molnar } 9122d4a7167SIngo Molnar 9132d4a7167SIngo Molnar /* read, present: */ 9142d4a7167SIngo Molnar if (unlikely(error_code & PF_PROT)) 91592181f19SNick Piggin return 1; 9162d4a7167SIngo Molnar 9172d4a7167SIngo Molnar /* read, not present: */ 91892181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 91992181f19SNick Piggin return 1; 92092181f19SNick Piggin 92192181f19SNick Piggin return 0; 92292181f19SNick Piggin } 92392181f19SNick Piggin 9240973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 9250973a06cSHiroshi Shimamoto { 926d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 9270973a06cSHiroshi Shimamoto } 9280973a06cSHiroshi Shimamoto 929c61e211dSHarvey Harrison /* 930c61e211dSHarvey Harrison * This routine handles page faults. It determines the address, 931c61e211dSHarvey Harrison * and the problem, and then passes it off to one of the appropriate 932c61e211dSHarvey Harrison * routines. 933c61e211dSHarvey Harrison */ 934c3731c68SIngo Molnar dotraplinkage void __kprobes 935c3731c68SIngo Molnar do_page_fault(struct pt_regs *regs, unsigned long error_code) 936c61e211dSHarvey Harrison { 937c61e211dSHarvey Harrison struct vm_area_struct *vma; 9382d4a7167SIngo Molnar struct task_struct *tsk; 9392d4a7167SIngo Molnar unsigned long address; 9402d4a7167SIngo Molnar struct mm_struct *mm; 94192181f19SNick Piggin int write; 942c61e211dSHarvey Harrison int fault; 943c61e211dSHarvey Harrison 944c61e211dSHarvey Harrison tsk = current; 945c61e211dSHarvey Harrison mm = tsk->mm; 9462d4a7167SIngo Molnar 9472d4a7167SIngo Molnar /* Get the faulting address: */ 948c61e211dSHarvey Harrison address = read_cr2(); 949c61e211dSHarvey Harrison 950f8561296SVegard Nossum /* 951f8561296SVegard Nossum * Detect and handle instructions that would cause a page fault for 952f8561296SVegard Nossum * both a tracked kernel page and a userspace page. 953f8561296SVegard Nossum */ 954f8561296SVegard Nossum if (kmemcheck_active(regs)) 955f8561296SVegard Nossum kmemcheck_hide(regs); 9565dfaf90fSIngo Molnar prefetchw(&mm->mmap_sem); 957f8561296SVegard Nossum 9580fd0e3daSPekka Paalanen if (unlikely(kmmio_fault(regs, address))) 95986069782SPekka Paalanen return; 960c61e211dSHarvey Harrison 961c61e211dSHarvey Harrison /* 962c61e211dSHarvey Harrison * We fault-in kernel-space virtual memory on-demand. The 963c61e211dSHarvey Harrison * 'reference' page table is init_mm.pgd. 964c61e211dSHarvey Harrison * 965c61e211dSHarvey Harrison * NOTE! We MUST NOT take any locks for this case. We may 966c61e211dSHarvey Harrison * be in an interrupt or a critical region, and should 967c61e211dSHarvey Harrison * only copy the information from the master page table, 968c61e211dSHarvey Harrison * nothing more. 969c61e211dSHarvey Harrison * 970c61e211dSHarvey Harrison * This verifies that the fault happens in kernel space 971c61e211dSHarvey Harrison * (error_code & 4) == 0, and that the fault was not a 972c61e211dSHarvey Harrison * protection error (error_code & 9) == 0. 973c61e211dSHarvey Harrison */ 9740973a06cSHiroshi Shimamoto if (unlikely(fault_in_kernel_space(address))) { 975f8561296SVegard Nossum if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 976f8561296SVegard Nossum if (vmalloc_fault(address) >= 0) 977c61e211dSHarvey Harrison return; 9785b727a3bSJeremy Fitzhardinge 979f8561296SVegard Nossum if (kmemcheck_fault(regs, address, error_code)) 980f8561296SVegard Nossum return; 981f8561296SVegard Nossum } 982f8561296SVegard Nossum 9832d4a7167SIngo Molnar /* Can handle a stale RO->RW TLB: */ 98492181f19SNick Piggin if (spurious_fault(error_code, address)) 9855b727a3bSJeremy Fitzhardinge return; 9865b727a3bSJeremy Fitzhardinge 9872d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 9889be260a6SMasami Hiramatsu if (notify_page_fault(regs)) 9899be260a6SMasami Hiramatsu return; 990c61e211dSHarvey Harrison /* 991c61e211dSHarvey Harrison * Don't take the mm semaphore here. If we fixup a prefetch 9922d4a7167SIngo Molnar * fault we could otherwise deadlock: 993c61e211dSHarvey Harrison */ 99492181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 9952d4a7167SIngo Molnar 99692181f19SNick Piggin return; 997c61e211dSHarvey Harrison } 998c61e211dSHarvey Harrison 9992d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1000f8a6b2b9SIngo Molnar if (unlikely(notify_page_fault(regs))) 10019be260a6SMasami Hiramatsu return; 1002c61e211dSHarvey Harrison /* 1003891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1004891cffbdSLinus Torvalds * vmalloc fault has been handled. 1005891cffbdSLinus Torvalds * 1006891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 10072d4a7167SIngo Molnar * potential system fault or CPU buglet: 1008c61e211dSHarvey Harrison */ 1009891cffbdSLinus Torvalds if (user_mode_vm(regs)) { 1010891cffbdSLinus Torvalds local_irq_enable(); 1011891cffbdSLinus Torvalds error_code |= PF_USER; 10122d4a7167SIngo Molnar } else { 10132d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1014c61e211dSHarvey Harrison local_irq_enable(); 10152d4a7167SIngo Molnar } 1016c61e211dSHarvey Harrison 1017c61e211dSHarvey Harrison if (unlikely(error_code & PF_RSVD)) 101892181f19SNick Piggin pgtable_bad(regs, error_code, address); 1019c61e211dSHarvey Harrison 1020*cdd6c482SIngo Molnar perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 10217dd1fcc2SPeter Zijlstra 1022c61e211dSHarvey Harrison /* 10232d4a7167SIngo Molnar * If we're in an interrupt, have no user context or are running 10242d4a7167SIngo Molnar * in an atomic region then we must not take the fault: 1025c61e211dSHarvey Harrison */ 102692181f19SNick Piggin if (unlikely(in_atomic() || !mm)) { 102792181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 102892181f19SNick Piggin return; 102992181f19SNick Piggin } 1030c61e211dSHarvey Harrison 10313a1dfe6eSIngo Molnar /* 10323a1dfe6eSIngo Molnar * When running in the kernel we expect faults to occur only to 10332d4a7167SIngo Molnar * addresses in user space. All other faults represent errors in 10342d4a7167SIngo Molnar * the kernel and should generate an OOPS. Unfortunately, in the 10352d4a7167SIngo Molnar * case of an erroneous fault occurring in a code path which already 10362d4a7167SIngo Molnar * holds mmap_sem we will deadlock attempting to validate the fault 10372d4a7167SIngo Molnar * against the address space. Luckily the kernel only validly 10382d4a7167SIngo Molnar * references user space from well defined areas of code, which are 10392d4a7167SIngo Molnar * listed in the exceptions table. 1040c61e211dSHarvey Harrison * 1041c61e211dSHarvey Harrison * As the vast majority of faults will be valid we will only perform 10422d4a7167SIngo Molnar * the source reference check when there is a possibility of a 10432d4a7167SIngo Molnar * deadlock. Attempt to lock the address space, if we cannot we then 10442d4a7167SIngo Molnar * validate the source. If this is invalid we can skip the address 10452d4a7167SIngo Molnar * space check, thus avoiding the deadlock: 1046c61e211dSHarvey Harrison */ 104792181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1048c61e211dSHarvey Harrison if ((error_code & PF_USER) == 0 && 104992181f19SNick Piggin !search_exception_tables(regs->ip)) { 105092181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 105192181f19SNick Piggin return; 105292181f19SNick Piggin } 1053c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 105401006074SPeter Zijlstra } else { 105501006074SPeter Zijlstra /* 10562d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 10572d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 10582d4a7167SIngo Molnar * down_read(): 105901006074SPeter Zijlstra */ 106001006074SPeter Zijlstra might_sleep(); 1061c61e211dSHarvey Harrison } 1062c61e211dSHarvey Harrison 1063c61e211dSHarvey Harrison vma = find_vma(mm, address); 106492181f19SNick Piggin if (unlikely(!vma)) { 106592181f19SNick Piggin bad_area(regs, error_code, address); 106692181f19SNick Piggin return; 106792181f19SNick Piggin } 106892181f19SNick Piggin if (likely(vma->vm_start <= address)) 1069c61e211dSHarvey Harrison goto good_area; 107092181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 107192181f19SNick Piggin bad_area(regs, error_code, address); 107292181f19SNick Piggin return; 107392181f19SNick Piggin } 1074c61e211dSHarvey Harrison if (error_code & PF_USER) { 1075c61e211dSHarvey Harrison /* 1076c61e211dSHarvey Harrison * Accessing the stack below %sp is always a bug. 1077c61e211dSHarvey Harrison * The large cushion allows instructions like enter 1078c61e211dSHarvey Harrison * and pusha to work. ("enter $65535, $31" pushes 1079c61e211dSHarvey Harrison * 32 pointers and then decrements %sp by 65535.) 1080c61e211dSHarvey Harrison */ 108192181f19SNick Piggin if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 108292181f19SNick Piggin bad_area(regs, error_code, address); 108392181f19SNick Piggin return; 1084c61e211dSHarvey Harrison } 108592181f19SNick Piggin } 108692181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 108792181f19SNick Piggin bad_area(regs, error_code, address); 108892181f19SNick Piggin return; 108992181f19SNick Piggin } 109092181f19SNick Piggin 1091c61e211dSHarvey Harrison /* 1092c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1093c61e211dSHarvey Harrison * we can handle it.. 1094c61e211dSHarvey Harrison */ 1095c61e211dSHarvey Harrison good_area: 109692181f19SNick Piggin write = error_code & PF_WRITE; 10972d4a7167SIngo Molnar 109892181f19SNick Piggin if (unlikely(access_error(error_code, write, vma))) { 109992181f19SNick Piggin bad_area_access_error(regs, error_code, address); 110092181f19SNick Piggin return; 1101c61e211dSHarvey Harrison } 1102c61e211dSHarvey Harrison 1103c61e211dSHarvey Harrison /* 1104c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1105c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 11062d4a7167SIngo Molnar * the fault: 1107c61e211dSHarvey Harrison */ 1108d06063ccSLinus Torvalds fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 11092d4a7167SIngo Molnar 1110c61e211dSHarvey Harrison if (unlikely(fault & VM_FAULT_ERROR)) { 111192181f19SNick Piggin mm_fault_error(regs, error_code, address, fault); 111292181f19SNick Piggin return; 1113c61e211dSHarvey Harrison } 11142d4a7167SIngo Molnar 1115ac17dc8eSPeter Zijlstra if (fault & VM_FAULT_MAJOR) { 1116c61e211dSHarvey Harrison tsk->maj_flt++; 1117*cdd6c482SIngo Molnar perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 111878f13e95SPeter Zijlstra regs, address); 1119ac17dc8eSPeter Zijlstra } else { 1120c61e211dSHarvey Harrison tsk->min_flt++; 1121*cdd6c482SIngo Molnar perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 112278f13e95SPeter Zijlstra regs, address); 1123ac17dc8eSPeter Zijlstra } 1124c61e211dSHarvey Harrison 11258c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 11268c938f9fSIngo Molnar 1127c61e211dSHarvey Harrison up_read(&mm->mmap_sem); 1128c61e211dSHarvey Harrison } 1129