1c61e211dSHarvey Harrison /* 2c61e211dSHarvey Harrison * Copyright (C) 1995 Linus Torvalds 3c61e211dSHarvey Harrison * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4f8eeb2e6SIngo Molnar * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5c61e211dSHarvey Harrison */ 6c61e211dSHarvey Harrison #include <linux/interrupt.h> 72d4a7167SIngo Molnar #include <linux/mmiotrace.h> 82d4a7167SIngo Molnar #include <linux/bootmem.h> 9c61e211dSHarvey Harrison #include <linux/compiler.h> 10c61e211dSHarvey Harrison #include <linux/highmem.h> 11c61e211dSHarvey Harrison #include <linux/kprobes.h> 12c61e211dSHarvey Harrison #include <linux/uaccess.h> 132d4a7167SIngo Molnar #include <linux/vmalloc.h> 142d4a7167SIngo Molnar #include <linux/vt_kern.h> 152d4a7167SIngo Molnar #include <linux/signal.h> 162d4a7167SIngo Molnar #include <linux/kernel.h> 172d4a7167SIngo Molnar #include <linux/ptrace.h> 182d4a7167SIngo Molnar #include <linux/string.h> 192d4a7167SIngo Molnar #include <linux/module.h> 20c61e211dSHarvey Harrison #include <linux/kdebug.h> 212d4a7167SIngo Molnar #include <linux/errno.h> 227c9f8861SEric Sandeen #include <linux/magic.h> 232d4a7167SIngo Molnar #include <linux/sched.h> 242d4a7167SIngo Molnar #include <linux/types.h> 252d4a7167SIngo Molnar #include <linux/init.h> 262d4a7167SIngo Molnar #include <linux/mman.h> 272d4a7167SIngo Molnar #include <linux/tty.h> 282d4a7167SIngo Molnar #include <linux/smp.h> 292d4a7167SIngo Molnar #include <linux/mm.h> 30*7dd1fcc2SPeter Zijlstra #include <linux/perf_counter.h> 31c61e211dSHarvey Harrison 32c61e211dSHarvey Harrison #include <asm-generic/sections.h> 332d4a7167SIngo Molnar 342d4a7167SIngo Molnar #include <asm/tlbflush.h> 352d4a7167SIngo Molnar #include <asm/pgalloc.h> 362d4a7167SIngo Molnar #include <asm/segment.h> 372d4a7167SIngo Molnar #include <asm/system.h> 382d4a7167SIngo Molnar #include <asm/proto.h> 3970ef5641SJaswinder Singh #include <asm/traps.h> 402d4a7167SIngo Molnar #include <asm/desc.h> 41c61e211dSHarvey Harrison 42c61e211dSHarvey Harrison /* 432d4a7167SIngo Molnar * Page fault error code bits: 442d4a7167SIngo Molnar * 452d4a7167SIngo Molnar * bit 0 == 0: no page found 1: protection fault 462d4a7167SIngo Molnar * bit 1 == 0: read access 1: write access 472d4a7167SIngo Molnar * bit 2 == 0: kernel-mode access 1: user-mode access 482d4a7167SIngo Molnar * bit 3 == 1: use of reserved bit detected 492d4a7167SIngo Molnar * bit 4 == 1: fault was an instruction fetch 50c61e211dSHarvey Harrison */ 512d4a7167SIngo Molnar enum x86_pf_error_code { 522d4a7167SIngo Molnar 532d4a7167SIngo Molnar PF_PROT = 1 << 0, 542d4a7167SIngo Molnar PF_WRITE = 1 << 1, 552d4a7167SIngo Molnar PF_USER = 1 << 2, 562d4a7167SIngo Molnar PF_RSVD = 1 << 3, 572d4a7167SIngo Molnar PF_INSTR = 1 << 4, 582d4a7167SIngo Molnar }; 59c61e211dSHarvey Harrison 60b814d41fSIngo Molnar /* 61b319eed0SIngo Molnar * Returns 0 if mmiotrace is disabled, or if the fault is not 62b319eed0SIngo Molnar * handled by mmiotrace: 63b814d41fSIngo Molnar */ 640fd0e3daSPekka Paalanen static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) 6586069782SPekka Paalanen { 660fd0e3daSPekka Paalanen if (unlikely(is_kmmio_active())) 670fd0e3daSPekka Paalanen if (kmmio_handler(regs, addr) == 1) 680fd0e3daSPekka Paalanen return -1; 690fd0e3daSPekka Paalanen return 0; 7086069782SPekka Paalanen } 7186069782SPekka Paalanen 72c61e211dSHarvey Harrison static inline int notify_page_fault(struct pt_regs *regs) 73c61e211dSHarvey Harrison { 74c61e211dSHarvey Harrison int ret = 0; 75c61e211dSHarvey Harrison 76c61e211dSHarvey Harrison /* kprobe_running() needs smp_processor_id() */ 77b1801812SIngo Molnar if (kprobes_built_in() && !user_mode_vm(regs)) { 78c61e211dSHarvey Harrison preempt_disable(); 79c61e211dSHarvey Harrison if (kprobe_running() && kprobe_fault_handler(regs, 14)) 80c61e211dSHarvey Harrison ret = 1; 81c61e211dSHarvey Harrison preempt_enable(); 82c61e211dSHarvey Harrison } 83c61e211dSHarvey Harrison 84c61e211dSHarvey Harrison return ret; 85c61e211dSHarvey Harrison } 86c61e211dSHarvey Harrison 87c61e211dSHarvey Harrison /* 882d4a7167SIngo Molnar * Prefetch quirks: 892d4a7167SIngo Molnar * 902d4a7167SIngo Molnar * 32-bit mode: 912d4a7167SIngo Molnar * 92c61e211dSHarvey Harrison * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 93c61e211dSHarvey Harrison * Check that here and ignore it. 94c61e211dSHarvey Harrison * 952d4a7167SIngo Molnar * 64-bit mode: 962d4a7167SIngo Molnar * 97c61e211dSHarvey Harrison * Sometimes the CPU reports invalid exceptions on prefetch. 98c61e211dSHarvey Harrison * Check that here and ignore it. 99c61e211dSHarvey Harrison * 1002d4a7167SIngo Molnar * Opcode checker based on code by Richard Brunner. 101c61e211dSHarvey Harrison */ 102107a0367SIngo Molnar static inline int 103107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 104107a0367SIngo Molnar unsigned char opcode, int *prefetch) 105c61e211dSHarvey Harrison { 106107a0367SIngo Molnar unsigned char instr_hi = opcode & 0xf0; 107107a0367SIngo Molnar unsigned char instr_lo = opcode & 0x0f; 108c61e211dSHarvey Harrison 109c61e211dSHarvey Harrison switch (instr_hi) { 110c61e211dSHarvey Harrison case 0x20: 111c61e211dSHarvey Harrison case 0x30: 112c61e211dSHarvey Harrison /* 113c61e211dSHarvey Harrison * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 114c61e211dSHarvey Harrison * In X86_64 long mode, the CPU will signal invalid 115c61e211dSHarvey Harrison * opcode if some of these prefixes are present so 116c61e211dSHarvey Harrison * X86_64 will never get here anyway 117c61e211dSHarvey Harrison */ 118107a0367SIngo Molnar return ((instr_lo & 7) == 0x6); 119c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 120c61e211dSHarvey Harrison case 0x40: 121c61e211dSHarvey Harrison /* 122c61e211dSHarvey Harrison * In AMD64 long mode 0x40..0x4F are valid REX prefixes 123c61e211dSHarvey Harrison * Need to figure out under what instruction mode the 124c61e211dSHarvey Harrison * instruction was issued. Could check the LDT for lm, 125c61e211dSHarvey Harrison * but for now it's good enough to assume that long 126c61e211dSHarvey Harrison * mode only uses well known segments or kernel. 127c61e211dSHarvey Harrison */ 128107a0367SIngo Molnar return (!user_mode(regs)) || (regs->cs == __USER_CS); 129c61e211dSHarvey Harrison #endif 130c61e211dSHarvey Harrison case 0x60: 131c61e211dSHarvey Harrison /* 0x64 thru 0x67 are valid prefixes in all modes. */ 132107a0367SIngo Molnar return (instr_lo & 0xC) == 0x4; 133c61e211dSHarvey Harrison case 0xF0: 134c61e211dSHarvey Harrison /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 135107a0367SIngo Molnar return !instr_lo || (instr_lo>>1) == 1; 136c61e211dSHarvey Harrison case 0x00: 137c61e211dSHarvey Harrison /* Prefetch instruction is 0x0F0D or 0x0F18 */ 138107a0367SIngo Molnar if (probe_kernel_address(instr, opcode)) 139107a0367SIngo Molnar return 0; 140107a0367SIngo Molnar 141107a0367SIngo Molnar *prefetch = (instr_lo == 0xF) && 142107a0367SIngo Molnar (opcode == 0x0D || opcode == 0x18); 143107a0367SIngo Molnar return 0; 144107a0367SIngo Molnar default: 145107a0367SIngo Molnar return 0; 146107a0367SIngo Molnar } 147107a0367SIngo Molnar } 148107a0367SIngo Molnar 149107a0367SIngo Molnar static int 150107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 151107a0367SIngo Molnar { 152107a0367SIngo Molnar unsigned char *max_instr; 153107a0367SIngo Molnar unsigned char *instr; 154107a0367SIngo Molnar int prefetch = 0; 155107a0367SIngo Molnar 156107a0367SIngo Molnar /* 157107a0367SIngo Molnar * If it was a exec (instruction fetch) fault on NX page, then 158107a0367SIngo Molnar * do not ignore the fault: 159107a0367SIngo Molnar */ 160107a0367SIngo Molnar if (error_code & PF_INSTR) 161107a0367SIngo Molnar return 0; 162107a0367SIngo Molnar 163107a0367SIngo Molnar instr = (void *)convert_ip_to_linear(current, regs); 164107a0367SIngo Molnar max_instr = instr + 15; 165107a0367SIngo Molnar 166107a0367SIngo Molnar if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) 167107a0367SIngo Molnar return 0; 168107a0367SIngo Molnar 169107a0367SIngo Molnar while (instr < max_instr) { 170107a0367SIngo Molnar unsigned char opcode; 171c61e211dSHarvey Harrison 172c61e211dSHarvey Harrison if (probe_kernel_address(instr, opcode)) 173c61e211dSHarvey Harrison break; 174107a0367SIngo Molnar 175107a0367SIngo Molnar instr++; 176107a0367SIngo Molnar 177107a0367SIngo Molnar if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 178c61e211dSHarvey Harrison break; 179c61e211dSHarvey Harrison } 180c61e211dSHarvey Harrison return prefetch; 181c61e211dSHarvey Harrison } 182c61e211dSHarvey Harrison 1832d4a7167SIngo Molnar static void 1842d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address, 1852d4a7167SIngo Molnar struct task_struct *tsk) 186c61e211dSHarvey Harrison { 187c61e211dSHarvey Harrison siginfo_t info; 188c61e211dSHarvey Harrison 189c61e211dSHarvey Harrison info.si_signo = si_signo; 190c61e211dSHarvey Harrison info.si_errno = 0; 191c61e211dSHarvey Harrison info.si_code = si_code; 192c61e211dSHarvey Harrison info.si_addr = (void __user *)address; 1932d4a7167SIngo Molnar 194c61e211dSHarvey Harrison force_sig_info(si_signo, &info, tsk); 195c61e211dSHarvey Harrison } 196c61e211dSHarvey Harrison 197f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock); 198f2f13a85SIngo Molnar LIST_HEAD(pgd_list); 1992d4a7167SIngo Molnar 200f2f13a85SIngo Molnar #ifdef CONFIG_X86_32 201f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 202f2f13a85SIngo Molnar { 203f2f13a85SIngo Molnar unsigned index = pgd_index(address); 204f2f13a85SIngo Molnar pgd_t *pgd_k; 205f2f13a85SIngo Molnar pud_t *pud, *pud_k; 206f2f13a85SIngo Molnar pmd_t *pmd, *pmd_k; 207f2f13a85SIngo Molnar 208f2f13a85SIngo Molnar pgd += index; 209f2f13a85SIngo Molnar pgd_k = init_mm.pgd + index; 210f2f13a85SIngo Molnar 211f2f13a85SIngo Molnar if (!pgd_present(*pgd_k)) 212f2f13a85SIngo Molnar return NULL; 213f2f13a85SIngo Molnar 214f2f13a85SIngo Molnar /* 215f2f13a85SIngo Molnar * set_pgd(pgd, *pgd_k); here would be useless on PAE 216f2f13a85SIngo Molnar * and redundant with the set_pmd() on non-PAE. As would 217f2f13a85SIngo Molnar * set_pud. 218f2f13a85SIngo Molnar */ 219f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 220f2f13a85SIngo Molnar pud_k = pud_offset(pgd_k, address); 221f2f13a85SIngo Molnar if (!pud_present(*pud_k)) 222f2f13a85SIngo Molnar return NULL; 223f2f13a85SIngo Molnar 224f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 225f2f13a85SIngo Molnar pmd_k = pmd_offset(pud_k, address); 226f2f13a85SIngo Molnar if (!pmd_present(*pmd_k)) 227f2f13a85SIngo Molnar return NULL; 228f2f13a85SIngo Molnar 229f2f13a85SIngo Molnar if (!pmd_present(*pmd)) { 230f2f13a85SIngo Molnar set_pmd(pmd, *pmd_k); 231f2f13a85SIngo Molnar arch_flush_lazy_mmu_mode(); 232f2f13a85SIngo Molnar } else { 233f2f13a85SIngo Molnar BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 234c61e211dSHarvey Harrison } 235f2f13a85SIngo Molnar 236f2f13a85SIngo Molnar return pmd_k; 237f2f13a85SIngo Molnar } 238f2f13a85SIngo Molnar 239f2f13a85SIngo Molnar void vmalloc_sync_all(void) 240f2f13a85SIngo Molnar { 241f2f13a85SIngo Molnar unsigned long address; 242f2f13a85SIngo Molnar 243f2f13a85SIngo Molnar if (SHARED_KERNEL_PMD) 244f2f13a85SIngo Molnar return; 245f2f13a85SIngo Molnar 246f2f13a85SIngo Molnar for (address = VMALLOC_START & PMD_MASK; 247f2f13a85SIngo Molnar address >= TASK_SIZE && address < FIXADDR_TOP; 248f2f13a85SIngo Molnar address += PMD_SIZE) { 249f2f13a85SIngo Molnar 250f2f13a85SIngo Molnar unsigned long flags; 251f2f13a85SIngo Molnar struct page *page; 252f2f13a85SIngo Molnar 253f2f13a85SIngo Molnar spin_lock_irqsave(&pgd_lock, flags); 254f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 255f2f13a85SIngo Molnar if (!vmalloc_sync_one(page_address(page), address)) 256f2f13a85SIngo Molnar break; 257f2f13a85SIngo Molnar } 258f2f13a85SIngo Molnar spin_unlock_irqrestore(&pgd_lock, flags); 259f2f13a85SIngo Molnar } 260f2f13a85SIngo Molnar } 261f2f13a85SIngo Molnar 262f2f13a85SIngo Molnar /* 263f2f13a85SIngo Molnar * 32-bit: 264f2f13a85SIngo Molnar * 265f2f13a85SIngo Molnar * Handle a fault on the vmalloc or module mapping area 266f2f13a85SIngo Molnar */ 267f2f13a85SIngo Molnar static noinline int vmalloc_fault(unsigned long address) 268f2f13a85SIngo Molnar { 269f2f13a85SIngo Molnar unsigned long pgd_paddr; 270f2f13a85SIngo Molnar pmd_t *pmd_k; 271f2f13a85SIngo Molnar pte_t *pte_k; 272f2f13a85SIngo Molnar 273f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 274f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 275f2f13a85SIngo Molnar return -1; 276f2f13a85SIngo Molnar 277f2f13a85SIngo Molnar /* 278f2f13a85SIngo Molnar * Synchronize this task's top level page-table 279f2f13a85SIngo Molnar * with the 'reference' page table. 280f2f13a85SIngo Molnar * 281f2f13a85SIngo Molnar * Do _not_ use "current" here. We might be inside 282f2f13a85SIngo Molnar * an interrupt in the middle of a task switch.. 283f2f13a85SIngo Molnar */ 284f2f13a85SIngo Molnar pgd_paddr = read_cr3(); 285f2f13a85SIngo Molnar pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 286f2f13a85SIngo Molnar if (!pmd_k) 287f2f13a85SIngo Molnar return -1; 288f2f13a85SIngo Molnar 289f2f13a85SIngo Molnar pte_k = pte_offset_kernel(pmd_k, address); 290f2f13a85SIngo Molnar if (!pte_present(*pte_k)) 291f2f13a85SIngo Molnar return -1; 292f2f13a85SIngo Molnar 293f2f13a85SIngo Molnar return 0; 294f2f13a85SIngo Molnar } 295f2f13a85SIngo Molnar 296f2f13a85SIngo Molnar /* 297f2f13a85SIngo Molnar * Did it hit the DOS screen memory VA from vm86 mode? 298f2f13a85SIngo Molnar */ 299f2f13a85SIngo Molnar static inline void 300f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 301f2f13a85SIngo Molnar struct task_struct *tsk) 302f2f13a85SIngo Molnar { 303f2f13a85SIngo Molnar unsigned long bit; 304f2f13a85SIngo Molnar 305f2f13a85SIngo Molnar if (!v8086_mode(regs)) 306f2f13a85SIngo Molnar return; 307f2f13a85SIngo Molnar 308f2f13a85SIngo Molnar bit = (address - 0xA0000) >> PAGE_SHIFT; 309f2f13a85SIngo Molnar if (bit < 32) 310f2f13a85SIngo Molnar tsk->thread.screen_bitmap |= 1 << bit; 311f2f13a85SIngo Molnar } 312c61e211dSHarvey Harrison 313cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address) 314c61e211dSHarvey Harrison { 315c61e211dSHarvey Harrison __typeof__(pte_val(__pte(0))) page; 316c61e211dSHarvey Harrison 317c61e211dSHarvey Harrison page = read_cr3(); 318c61e211dSHarvey Harrison page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; 3192d4a7167SIngo Molnar 320c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE 321c61e211dSHarvey Harrison printk("*pdpt = %016Lx ", page); 322c61e211dSHarvey Harrison if ((page >> PAGE_SHIFT) < max_low_pfn 323c61e211dSHarvey Harrison && page & _PAGE_PRESENT) { 324c61e211dSHarvey Harrison page &= PAGE_MASK; 325c61e211dSHarvey Harrison page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) 326c61e211dSHarvey Harrison & (PTRS_PER_PMD - 1)]; 327c61e211dSHarvey Harrison printk(KERN_CONT "*pde = %016Lx ", page); 328c61e211dSHarvey Harrison page &= ~_PAGE_NX; 329c61e211dSHarvey Harrison } 330c61e211dSHarvey Harrison #else 331c61e211dSHarvey Harrison printk("*pde = %08lx ", page); 332c61e211dSHarvey Harrison #endif 333c61e211dSHarvey Harrison 334c61e211dSHarvey Harrison /* 335c61e211dSHarvey Harrison * We must not directly access the pte in the highpte 336c61e211dSHarvey Harrison * case if the page table is located in highmem. 337c61e211dSHarvey Harrison * And let's rather not kmap-atomic the pte, just in case 3382d4a7167SIngo Molnar * it's allocated already: 339c61e211dSHarvey Harrison */ 340c61e211dSHarvey Harrison if ((page >> PAGE_SHIFT) < max_low_pfn 341c61e211dSHarvey Harrison && (page & _PAGE_PRESENT) 342c61e211dSHarvey Harrison && !(page & _PAGE_PSE)) { 3432d4a7167SIngo Molnar 344c61e211dSHarvey Harrison page &= PAGE_MASK; 345c61e211dSHarvey Harrison page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) 346c61e211dSHarvey Harrison & (PTRS_PER_PTE - 1)]; 347c61e211dSHarvey Harrison printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); 348c61e211dSHarvey Harrison } 349c61e211dSHarvey Harrison 350c61e211dSHarvey Harrison printk("\n"); 351f2f13a85SIngo Molnar } 352f2f13a85SIngo Molnar 353f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */ 354f2f13a85SIngo Molnar 355f2f13a85SIngo Molnar void vmalloc_sync_all(void) 356f2f13a85SIngo Molnar { 357f2f13a85SIngo Molnar unsigned long address; 358f2f13a85SIngo Molnar 359f2f13a85SIngo Molnar for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; 360f2f13a85SIngo Molnar address += PGDIR_SIZE) { 361f2f13a85SIngo Molnar 362f2f13a85SIngo Molnar const pgd_t *pgd_ref = pgd_offset_k(address); 363f2f13a85SIngo Molnar unsigned long flags; 364f2f13a85SIngo Molnar struct page *page; 365f2f13a85SIngo Molnar 366f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 367f2f13a85SIngo Molnar continue; 368f2f13a85SIngo Molnar 369f2f13a85SIngo Molnar spin_lock_irqsave(&pgd_lock, flags); 370f2f13a85SIngo Molnar list_for_each_entry(page, &pgd_list, lru) { 371f2f13a85SIngo Molnar pgd_t *pgd; 372f2f13a85SIngo Molnar pgd = (pgd_t *)page_address(page) + pgd_index(address); 373f2f13a85SIngo Molnar if (pgd_none(*pgd)) 374f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 375f2f13a85SIngo Molnar else 376f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 377f2f13a85SIngo Molnar } 378f2f13a85SIngo Molnar spin_unlock_irqrestore(&pgd_lock, flags); 379f2f13a85SIngo Molnar } 380f2f13a85SIngo Molnar } 381f2f13a85SIngo Molnar 382f2f13a85SIngo Molnar /* 383f2f13a85SIngo Molnar * 64-bit: 384f2f13a85SIngo Molnar * 385f2f13a85SIngo Molnar * Handle a fault on the vmalloc area 386f2f13a85SIngo Molnar * 387f2f13a85SIngo Molnar * This assumes no large pages in there. 388f2f13a85SIngo Molnar */ 389f2f13a85SIngo Molnar static noinline int vmalloc_fault(unsigned long address) 390f2f13a85SIngo Molnar { 391f2f13a85SIngo Molnar pgd_t *pgd, *pgd_ref; 392f2f13a85SIngo Molnar pud_t *pud, *pud_ref; 393f2f13a85SIngo Molnar pmd_t *pmd, *pmd_ref; 394f2f13a85SIngo Molnar pte_t *pte, *pte_ref; 395f2f13a85SIngo Molnar 396f2f13a85SIngo Molnar /* Make sure we are in vmalloc area: */ 397f2f13a85SIngo Molnar if (!(address >= VMALLOC_START && address < VMALLOC_END)) 398f2f13a85SIngo Molnar return -1; 399f2f13a85SIngo Molnar 400f2f13a85SIngo Molnar /* 401f2f13a85SIngo Molnar * Copy kernel mappings over when needed. This can also 402f2f13a85SIngo Molnar * happen within a race in page table update. In the later 403f2f13a85SIngo Molnar * case just flush: 404f2f13a85SIngo Molnar */ 405f2f13a85SIngo Molnar pgd = pgd_offset(current->active_mm, address); 406f2f13a85SIngo Molnar pgd_ref = pgd_offset_k(address); 407f2f13a85SIngo Molnar if (pgd_none(*pgd_ref)) 408f2f13a85SIngo Molnar return -1; 409f2f13a85SIngo Molnar 410f2f13a85SIngo Molnar if (pgd_none(*pgd)) 411f2f13a85SIngo Molnar set_pgd(pgd, *pgd_ref); 412f2f13a85SIngo Molnar else 413f2f13a85SIngo Molnar BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 414f2f13a85SIngo Molnar 415f2f13a85SIngo Molnar /* 416f2f13a85SIngo Molnar * Below here mismatches are bugs because these lower tables 417f2f13a85SIngo Molnar * are shared: 418f2f13a85SIngo Molnar */ 419f2f13a85SIngo Molnar 420f2f13a85SIngo Molnar pud = pud_offset(pgd, address); 421f2f13a85SIngo Molnar pud_ref = pud_offset(pgd_ref, address); 422f2f13a85SIngo Molnar if (pud_none(*pud_ref)) 423f2f13a85SIngo Molnar return -1; 424f2f13a85SIngo Molnar 425f2f13a85SIngo Molnar if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 426f2f13a85SIngo Molnar BUG(); 427f2f13a85SIngo Molnar 428f2f13a85SIngo Molnar pmd = pmd_offset(pud, address); 429f2f13a85SIngo Molnar pmd_ref = pmd_offset(pud_ref, address); 430f2f13a85SIngo Molnar if (pmd_none(*pmd_ref)) 431f2f13a85SIngo Molnar return -1; 432f2f13a85SIngo Molnar 433f2f13a85SIngo Molnar if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 434f2f13a85SIngo Molnar BUG(); 435f2f13a85SIngo Molnar 436f2f13a85SIngo Molnar pte_ref = pte_offset_kernel(pmd_ref, address); 437f2f13a85SIngo Molnar if (!pte_present(*pte_ref)) 438f2f13a85SIngo Molnar return -1; 439f2f13a85SIngo Molnar 440f2f13a85SIngo Molnar pte = pte_offset_kernel(pmd, address); 441f2f13a85SIngo Molnar 442f2f13a85SIngo Molnar /* 443f2f13a85SIngo Molnar * Don't use pte_page here, because the mappings can point 444f2f13a85SIngo Molnar * outside mem_map, and the NUMA hash lookup cannot handle 445f2f13a85SIngo Molnar * that: 446f2f13a85SIngo Molnar */ 447f2f13a85SIngo Molnar if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 448f2f13a85SIngo Molnar BUG(); 449f2f13a85SIngo Molnar 450f2f13a85SIngo Molnar return 0; 451f2f13a85SIngo Molnar } 452f2f13a85SIngo Molnar 453f2f13a85SIngo Molnar static const char errata93_warning[] = 454f2f13a85SIngo Molnar KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 455f2f13a85SIngo Molnar KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" 456f2f13a85SIngo Molnar KERN_ERR "******* Please consider a BIOS update.\n" 457f2f13a85SIngo Molnar KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; 458f2f13a85SIngo Molnar 459f2f13a85SIngo Molnar /* 460f2f13a85SIngo Molnar * No vm86 mode in 64-bit mode: 461f2f13a85SIngo Molnar */ 462f2f13a85SIngo Molnar static inline void 463f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address, 464f2f13a85SIngo Molnar struct task_struct *tsk) 465f2f13a85SIngo Molnar { 466f2f13a85SIngo Molnar } 467f2f13a85SIngo Molnar 468f2f13a85SIngo Molnar static int bad_address(void *p) 469f2f13a85SIngo Molnar { 470f2f13a85SIngo Molnar unsigned long dummy; 471f2f13a85SIngo Molnar 472f2f13a85SIngo Molnar return probe_kernel_address((unsigned long *)p, dummy); 473f2f13a85SIngo Molnar } 474f2f13a85SIngo Molnar 475f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address) 476f2f13a85SIngo Molnar { 477c61e211dSHarvey Harrison pgd_t *pgd; 478c61e211dSHarvey Harrison pud_t *pud; 479c61e211dSHarvey Harrison pmd_t *pmd; 480c61e211dSHarvey Harrison pte_t *pte; 481c61e211dSHarvey Harrison 482c61e211dSHarvey Harrison pgd = (pgd_t *)read_cr3(); 483c61e211dSHarvey Harrison 484c61e211dSHarvey Harrison pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 4852d4a7167SIngo Molnar 486c61e211dSHarvey Harrison pgd += pgd_index(address); 4872d4a7167SIngo Molnar if (bad_address(pgd)) 4882d4a7167SIngo Molnar goto bad; 4892d4a7167SIngo Molnar 490c61e211dSHarvey Harrison printk("PGD %lx ", pgd_val(*pgd)); 4912d4a7167SIngo Molnar 4922d4a7167SIngo Molnar if (!pgd_present(*pgd)) 4932d4a7167SIngo Molnar goto out; 494c61e211dSHarvey Harrison 495c61e211dSHarvey Harrison pud = pud_offset(pgd, address); 4962d4a7167SIngo Molnar if (bad_address(pud)) 4972d4a7167SIngo Molnar goto bad; 4982d4a7167SIngo Molnar 499c61e211dSHarvey Harrison printk("PUD %lx ", pud_val(*pud)); 500b5360222SAndi Kleen if (!pud_present(*pud) || pud_large(*pud)) 5012d4a7167SIngo Molnar goto out; 502c61e211dSHarvey Harrison 503c61e211dSHarvey Harrison pmd = pmd_offset(pud, address); 5042d4a7167SIngo Molnar if (bad_address(pmd)) 5052d4a7167SIngo Molnar goto bad; 5062d4a7167SIngo Molnar 507c61e211dSHarvey Harrison printk("PMD %lx ", pmd_val(*pmd)); 5082d4a7167SIngo Molnar if (!pmd_present(*pmd) || pmd_large(*pmd)) 5092d4a7167SIngo Molnar goto out; 510c61e211dSHarvey Harrison 511c61e211dSHarvey Harrison pte = pte_offset_kernel(pmd, address); 5122d4a7167SIngo Molnar if (bad_address(pte)) 5132d4a7167SIngo Molnar goto bad; 5142d4a7167SIngo Molnar 515c61e211dSHarvey Harrison printk("PTE %lx", pte_val(*pte)); 5162d4a7167SIngo Molnar out: 517c61e211dSHarvey Harrison printk("\n"); 518c61e211dSHarvey Harrison return; 519c61e211dSHarvey Harrison bad: 520c61e211dSHarvey Harrison printk("BAD\n"); 521c61e211dSHarvey Harrison } 522c61e211dSHarvey Harrison 523f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */ 524c61e211dSHarvey Harrison 5252d4a7167SIngo Molnar /* 5262d4a7167SIngo Molnar * Workaround for K8 erratum #93 & buggy BIOS. 5272d4a7167SIngo Molnar * 5282d4a7167SIngo Molnar * BIOS SMM functions are required to use a specific workaround 5292d4a7167SIngo Molnar * to avoid corruption of the 64bit RIP register on C stepping K8. 5302d4a7167SIngo Molnar * 5312d4a7167SIngo Molnar * A lot of BIOS that didn't get tested properly miss this. 5322d4a7167SIngo Molnar * 5332d4a7167SIngo Molnar * The OS sees this as a page fault with the upper 32bits of RIP cleared. 5342d4a7167SIngo Molnar * Try to work around it here. 5352d4a7167SIngo Molnar * 5362d4a7167SIngo Molnar * Note we only handle faults in kernel here. 5372d4a7167SIngo Molnar * Does nothing on 32-bit. 538c61e211dSHarvey Harrison */ 539c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address) 540c61e211dSHarvey Harrison { 541c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 5422d4a7167SIngo Molnar static int once; 5432d4a7167SIngo Molnar 544c61e211dSHarvey Harrison if (address != regs->ip) 545c61e211dSHarvey Harrison return 0; 5462d4a7167SIngo Molnar 547c61e211dSHarvey Harrison if ((address >> 32) != 0) 548c61e211dSHarvey Harrison return 0; 5492d4a7167SIngo Molnar 550c61e211dSHarvey Harrison address |= 0xffffffffUL << 32; 551c61e211dSHarvey Harrison if ((address >= (u64)_stext && address <= (u64)_etext) || 552c61e211dSHarvey Harrison (address >= MODULES_VADDR && address <= MODULES_END)) { 5532d4a7167SIngo Molnar if (!once) { 554c61e211dSHarvey Harrison printk(errata93_warning); 5552d4a7167SIngo Molnar once = 1; 556c61e211dSHarvey Harrison } 557c61e211dSHarvey Harrison regs->ip = address; 558c61e211dSHarvey Harrison return 1; 559c61e211dSHarvey Harrison } 560c61e211dSHarvey Harrison #endif 561c61e211dSHarvey Harrison return 0; 562c61e211dSHarvey Harrison } 563c61e211dSHarvey Harrison 564c61e211dSHarvey Harrison /* 5652d4a7167SIngo Molnar * Work around K8 erratum #100 K8 in compat mode occasionally jumps 5662d4a7167SIngo Molnar * to illegal addresses >4GB. 5672d4a7167SIngo Molnar * 5682d4a7167SIngo Molnar * We catch this in the page fault handler because these addresses 5692d4a7167SIngo Molnar * are not reachable. Just detect this case and return. Any code 570c61e211dSHarvey Harrison * segment in LDT is compatibility mode. 571c61e211dSHarvey Harrison */ 572c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address) 573c61e211dSHarvey Harrison { 574c61e211dSHarvey Harrison #ifdef CONFIG_X86_64 5752d4a7167SIngo Molnar if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 576c61e211dSHarvey Harrison return 1; 577c61e211dSHarvey Harrison #endif 578c61e211dSHarvey Harrison return 0; 579c61e211dSHarvey Harrison } 580c61e211dSHarvey Harrison 581c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 582c61e211dSHarvey Harrison { 583c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG 584c61e211dSHarvey Harrison unsigned long nr; 5852d4a7167SIngo Molnar 586c61e211dSHarvey Harrison /* 5872d4a7167SIngo Molnar * Pentium F0 0F C7 C8 bug workaround: 588c61e211dSHarvey Harrison */ 589c61e211dSHarvey Harrison if (boot_cpu_data.f00f_bug) { 590c61e211dSHarvey Harrison nr = (address - idt_descr.address) >> 3; 591c61e211dSHarvey Harrison 592c61e211dSHarvey Harrison if (nr == 6) { 593c61e211dSHarvey Harrison do_invalid_op(regs, 0); 594c61e211dSHarvey Harrison return 1; 595c61e211dSHarvey Harrison } 596c61e211dSHarvey Harrison } 597c61e211dSHarvey Harrison #endif 598c61e211dSHarvey Harrison return 0; 599c61e211dSHarvey Harrison } 600c61e211dSHarvey Harrison 6018f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT 6028f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 6038f766149SIngo Molnar 6042d4a7167SIngo Molnar static void 6052d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, 606c61e211dSHarvey Harrison unsigned long address) 607c61e211dSHarvey Harrison { 608c61e211dSHarvey Harrison if (!oops_may_print()) 609c61e211dSHarvey Harrison return; 610c61e211dSHarvey Harrison 611c61e211dSHarvey Harrison if (error_code & PF_INSTR) { 61293809be8SHarvey Harrison unsigned int level; 6132d4a7167SIngo Molnar 614c61e211dSHarvey Harrison pte_t *pte = lookup_address(address, &level); 615c61e211dSHarvey Harrison 6168f766149SIngo Molnar if (pte && pte_present(*pte) && !pte_exec(*pte)) 6178f766149SIngo Molnar printk(nx_warning, current_uid()); 618c61e211dSHarvey Harrison } 619fd40d6e3SHarvey Harrison 620c61e211dSHarvey Harrison printk(KERN_ALERT "BUG: unable to handle kernel "); 621c61e211dSHarvey Harrison if (address < PAGE_SIZE) 622c61e211dSHarvey Harrison printk(KERN_CONT "NULL pointer dereference"); 623c61e211dSHarvey Harrison else 624c61e211dSHarvey Harrison printk(KERN_CONT "paging request"); 6252d4a7167SIngo Molnar 626f294a8ceSVegard Nossum printk(KERN_CONT " at %p\n", (void *) address); 627c61e211dSHarvey Harrison printk(KERN_ALERT "IP:"); 628c61e211dSHarvey Harrison printk_address(regs->ip, 1); 6292d4a7167SIngo Molnar 630c61e211dSHarvey Harrison dump_pagetable(address); 631c61e211dSHarvey Harrison } 632c61e211dSHarvey Harrison 6332d4a7167SIngo Molnar static noinline void 6342d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code, 6352d4a7167SIngo Molnar unsigned long address) 636c61e211dSHarvey Harrison { 6372d4a7167SIngo Molnar struct task_struct *tsk; 6382d4a7167SIngo Molnar unsigned long flags; 6392d4a7167SIngo Molnar int sig; 6402d4a7167SIngo Molnar 6412d4a7167SIngo Molnar flags = oops_begin(); 6422d4a7167SIngo Molnar tsk = current; 6432d4a7167SIngo Molnar sig = SIGKILL; 644c61e211dSHarvey Harrison 645c61e211dSHarvey Harrison printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 64692181f19SNick Piggin tsk->comm, address); 647c61e211dSHarvey Harrison dump_pagetable(address); 6482d4a7167SIngo Molnar 649c61e211dSHarvey Harrison tsk->thread.cr2 = address; 650c61e211dSHarvey Harrison tsk->thread.trap_no = 14; 651c61e211dSHarvey Harrison tsk->thread.error_code = error_code; 6522d4a7167SIngo Molnar 653c61e211dSHarvey Harrison if (__die("Bad pagetable", regs, error_code)) 654874d93d1SAlexander van Heukelum sig = 0; 6552d4a7167SIngo Molnar 656874d93d1SAlexander van Heukelum oops_end(flags, regs, sig); 657c61e211dSHarvey Harrison } 658c61e211dSHarvey Harrison 6592d4a7167SIngo Molnar static noinline void 6602d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code, 6612d4a7167SIngo Molnar unsigned long address) 66292181f19SNick Piggin { 66392181f19SNick Piggin struct task_struct *tsk = current; 66419803078SIngo Molnar unsigned long *stackend; 66592181f19SNick Piggin unsigned long flags; 66692181f19SNick Piggin int sig; 66792181f19SNick Piggin 66892181f19SNick Piggin /* Are we prepared to handle this kernel fault? */ 66992181f19SNick Piggin if (fixup_exception(regs)) 67092181f19SNick Piggin return; 67192181f19SNick Piggin 67292181f19SNick Piggin /* 6732d4a7167SIngo Molnar * 32-bit: 6742d4a7167SIngo Molnar * 67592181f19SNick Piggin * Valid to do another page fault here, because if this fault 67692181f19SNick Piggin * had been triggered by is_prefetch fixup_exception would have 67792181f19SNick Piggin * handled it. 67892181f19SNick Piggin * 6792d4a7167SIngo Molnar * 64-bit: 6802d4a7167SIngo Molnar * 68192181f19SNick Piggin * Hall of shame of CPU/BIOS bugs. 68292181f19SNick Piggin */ 68392181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 68492181f19SNick Piggin return; 68592181f19SNick Piggin 68692181f19SNick Piggin if (is_errata93(regs, address)) 68792181f19SNick Piggin return; 68892181f19SNick Piggin 68992181f19SNick Piggin /* 69092181f19SNick Piggin * Oops. The kernel tried to access some bad page. We'll have to 6912d4a7167SIngo Molnar * terminate things with extreme prejudice: 69292181f19SNick Piggin */ 69392181f19SNick Piggin flags = oops_begin(); 69492181f19SNick Piggin 69592181f19SNick Piggin show_fault_oops(regs, error_code, address); 69692181f19SNick Piggin 69719803078SIngo Molnar stackend = end_of_stack(tsk); 69819803078SIngo Molnar if (*stackend != STACK_END_MAGIC) 69919803078SIngo Molnar printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 70019803078SIngo Molnar 70192181f19SNick Piggin tsk->thread.cr2 = address; 70292181f19SNick Piggin tsk->thread.trap_no = 14; 70392181f19SNick Piggin tsk->thread.error_code = error_code; 70492181f19SNick Piggin 70592181f19SNick Piggin sig = SIGKILL; 70692181f19SNick Piggin if (__die("Oops", regs, error_code)) 70792181f19SNick Piggin sig = 0; 7082d4a7167SIngo Molnar 70992181f19SNick Piggin /* Executive summary in case the body of the oops scrolled away */ 71092181f19SNick Piggin printk(KERN_EMERG "CR2: %016lx\n", address); 7112d4a7167SIngo Molnar 71292181f19SNick Piggin oops_end(flags, regs, sig); 71392181f19SNick Piggin } 71492181f19SNick Piggin 7152d4a7167SIngo Molnar /* 7162d4a7167SIngo Molnar * Print out info about fatal segfaults, if the show_unhandled_signals 7172d4a7167SIngo Molnar * sysctl is set: 7182d4a7167SIngo Molnar */ 7192d4a7167SIngo Molnar static inline void 7202d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code, 7212d4a7167SIngo Molnar unsigned long address, struct task_struct *tsk) 7222d4a7167SIngo Molnar { 7232d4a7167SIngo Molnar if (!unhandled_signal(tsk, SIGSEGV)) 7242d4a7167SIngo Molnar return; 7252d4a7167SIngo Molnar 7262d4a7167SIngo Molnar if (!printk_ratelimit()) 7272d4a7167SIngo Molnar return; 7282d4a7167SIngo Molnar 7292d4a7167SIngo Molnar printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 7302d4a7167SIngo Molnar task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 7312d4a7167SIngo Molnar tsk->comm, task_pid_nr(tsk), address, 7322d4a7167SIngo Molnar (void *)regs->ip, (void *)regs->sp, error_code); 7332d4a7167SIngo Molnar 7342d4a7167SIngo Molnar print_vma_addr(KERN_CONT " in ", regs->ip); 7352d4a7167SIngo Molnar 7362d4a7167SIngo Molnar printk(KERN_CONT "\n"); 7372d4a7167SIngo Molnar } 7382d4a7167SIngo Molnar 7392d4a7167SIngo Molnar static void 7402d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 7412d4a7167SIngo Molnar unsigned long address, int si_code) 74292181f19SNick Piggin { 74392181f19SNick Piggin struct task_struct *tsk = current; 74492181f19SNick Piggin 74592181f19SNick Piggin /* User mode accesses just cause a SIGSEGV */ 74692181f19SNick Piggin if (error_code & PF_USER) { 74792181f19SNick Piggin /* 7482d4a7167SIngo Molnar * It's possible to have interrupts off here: 74992181f19SNick Piggin */ 75092181f19SNick Piggin local_irq_enable(); 75192181f19SNick Piggin 75292181f19SNick Piggin /* 75392181f19SNick Piggin * Valid to do another page fault here because this one came 7542d4a7167SIngo Molnar * from user space: 75592181f19SNick Piggin */ 75692181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 75792181f19SNick Piggin return; 75892181f19SNick Piggin 75992181f19SNick Piggin if (is_errata100(regs, address)) 76092181f19SNick Piggin return; 76192181f19SNick Piggin 7622d4a7167SIngo Molnar if (unlikely(show_unhandled_signals)) 7632d4a7167SIngo Molnar show_signal_msg(regs, error_code, address, tsk); 76492181f19SNick Piggin 7652d4a7167SIngo Molnar /* Kernel addresses are always protection faults: */ 76692181f19SNick Piggin tsk->thread.cr2 = address; 76792181f19SNick Piggin tsk->thread.error_code = error_code | (address >= TASK_SIZE); 76892181f19SNick Piggin tsk->thread.trap_no = 14; 7692d4a7167SIngo Molnar 77092181f19SNick Piggin force_sig_info_fault(SIGSEGV, si_code, address, tsk); 7712d4a7167SIngo Molnar 77292181f19SNick Piggin return; 77392181f19SNick Piggin } 77492181f19SNick Piggin 77592181f19SNick Piggin if (is_f00f_bug(regs, address)) 77692181f19SNick Piggin return; 77792181f19SNick Piggin 77892181f19SNick Piggin no_context(regs, error_code, address); 77992181f19SNick Piggin } 78092181f19SNick Piggin 7812d4a7167SIngo Molnar static noinline void 7822d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 7832d4a7167SIngo Molnar unsigned long address) 78492181f19SNick Piggin { 78592181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 78692181f19SNick Piggin } 78792181f19SNick Piggin 7882d4a7167SIngo Molnar static void 7892d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code, 7902d4a7167SIngo Molnar unsigned long address, int si_code) 79192181f19SNick Piggin { 79292181f19SNick Piggin struct mm_struct *mm = current->mm; 79392181f19SNick Piggin 79492181f19SNick Piggin /* 79592181f19SNick Piggin * Something tried to access memory that isn't in our memory map.. 79692181f19SNick Piggin * Fix it, but check if it's kernel or user first.. 79792181f19SNick Piggin */ 79892181f19SNick Piggin up_read(&mm->mmap_sem); 79992181f19SNick Piggin 80092181f19SNick Piggin __bad_area_nosemaphore(regs, error_code, address, si_code); 80192181f19SNick Piggin } 80292181f19SNick Piggin 8032d4a7167SIngo Molnar static noinline void 8042d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 80592181f19SNick Piggin { 80692181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_MAPERR); 80792181f19SNick Piggin } 80892181f19SNick Piggin 8092d4a7167SIngo Molnar static noinline void 8102d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 8112d4a7167SIngo Molnar unsigned long address) 81292181f19SNick Piggin { 81392181f19SNick Piggin __bad_area(regs, error_code, address, SEGV_ACCERR); 81492181f19SNick Piggin } 81592181f19SNick Piggin 81692181f19SNick Piggin /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ 8172d4a7167SIngo Molnar static void 8182d4a7167SIngo Molnar out_of_memory(struct pt_regs *regs, unsigned long error_code, 8192d4a7167SIngo Molnar unsigned long address) 82092181f19SNick Piggin { 82192181f19SNick Piggin /* 82292181f19SNick Piggin * We ran out of memory, call the OOM killer, and return the userspace 8232d4a7167SIngo Molnar * (which will retry the fault, or kill us if we got oom-killed): 82492181f19SNick Piggin */ 82592181f19SNick Piggin up_read(¤t->mm->mmap_sem); 8262d4a7167SIngo Molnar 82792181f19SNick Piggin pagefault_out_of_memory(); 82892181f19SNick Piggin } 82992181f19SNick Piggin 8302d4a7167SIngo Molnar static void 8312d4a7167SIngo Molnar do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) 83292181f19SNick Piggin { 83392181f19SNick Piggin struct task_struct *tsk = current; 83492181f19SNick Piggin struct mm_struct *mm = tsk->mm; 83592181f19SNick Piggin 83692181f19SNick Piggin up_read(&mm->mmap_sem); 83792181f19SNick Piggin 8382d4a7167SIngo Molnar /* Kernel mode? Handle exceptions or die: */ 83992181f19SNick Piggin if (!(error_code & PF_USER)) 84092181f19SNick Piggin no_context(regs, error_code, address); 8412d4a7167SIngo Molnar 842cd1b68f0SIngo Molnar /* User-space => ok to do another page fault: */ 84392181f19SNick Piggin if (is_prefetch(regs, error_code, address)) 84492181f19SNick Piggin return; 8452d4a7167SIngo Molnar 84692181f19SNick Piggin tsk->thread.cr2 = address; 84792181f19SNick Piggin tsk->thread.error_code = error_code; 84892181f19SNick Piggin tsk->thread.trap_no = 14; 8492d4a7167SIngo Molnar 85092181f19SNick Piggin force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); 85192181f19SNick Piggin } 85292181f19SNick Piggin 8532d4a7167SIngo Molnar static noinline void 8542d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code, 8552d4a7167SIngo Molnar unsigned long address, unsigned int fault) 85692181f19SNick Piggin { 8572d4a7167SIngo Molnar if (fault & VM_FAULT_OOM) { 85892181f19SNick Piggin out_of_memory(regs, error_code, address); 8592d4a7167SIngo Molnar } else { 8602d4a7167SIngo Molnar if (fault & VM_FAULT_SIGBUS) 86192181f19SNick Piggin do_sigbus(regs, error_code, address); 86292181f19SNick Piggin else 86392181f19SNick Piggin BUG(); 86492181f19SNick Piggin } 8652d4a7167SIngo Molnar } 86692181f19SNick Piggin 867d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte) 868d8b57bb7SThomas Gleixner { 869d8b57bb7SThomas Gleixner if ((error_code & PF_WRITE) && !pte_write(*pte)) 870d8b57bb7SThomas Gleixner return 0; 8712d4a7167SIngo Molnar 872d8b57bb7SThomas Gleixner if ((error_code & PF_INSTR) && !pte_exec(*pte)) 873d8b57bb7SThomas Gleixner return 0; 874d8b57bb7SThomas Gleixner 875d8b57bb7SThomas Gleixner return 1; 876d8b57bb7SThomas Gleixner } 877d8b57bb7SThomas Gleixner 878c61e211dSHarvey Harrison /* 8792d4a7167SIngo Molnar * Handle a spurious fault caused by a stale TLB entry. 8802d4a7167SIngo Molnar * 8812d4a7167SIngo Molnar * This allows us to lazily refresh the TLB when increasing the 8822d4a7167SIngo Molnar * permissions of a kernel page (RO -> RW or NX -> X). Doing it 8832d4a7167SIngo Molnar * eagerly is very expensive since that implies doing a full 8842d4a7167SIngo Molnar * cross-processor TLB flush, even if no stale TLB entries exist 8852d4a7167SIngo Molnar * on other processors. 8862d4a7167SIngo Molnar * 8875b727a3bSJeremy Fitzhardinge * There are no security implications to leaving a stale TLB when 8885b727a3bSJeremy Fitzhardinge * increasing the permissions on a page. 8895b727a3bSJeremy Fitzhardinge */ 8902d4a7167SIngo Molnar static noinline int 8912d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address) 8925b727a3bSJeremy Fitzhardinge { 8935b727a3bSJeremy Fitzhardinge pgd_t *pgd; 8945b727a3bSJeremy Fitzhardinge pud_t *pud; 8955b727a3bSJeremy Fitzhardinge pmd_t *pmd; 8965b727a3bSJeremy Fitzhardinge pte_t *pte; 8973c3e5694SSteven Rostedt int ret; 8985b727a3bSJeremy Fitzhardinge 8995b727a3bSJeremy Fitzhardinge /* Reserved-bit violation or user access to kernel space? */ 9005b727a3bSJeremy Fitzhardinge if (error_code & (PF_USER | PF_RSVD)) 9015b727a3bSJeremy Fitzhardinge return 0; 9025b727a3bSJeremy Fitzhardinge 9035b727a3bSJeremy Fitzhardinge pgd = init_mm.pgd + pgd_index(address); 9045b727a3bSJeremy Fitzhardinge if (!pgd_present(*pgd)) 9055b727a3bSJeremy Fitzhardinge return 0; 9065b727a3bSJeremy Fitzhardinge 9075b727a3bSJeremy Fitzhardinge pud = pud_offset(pgd, address); 9085b727a3bSJeremy Fitzhardinge if (!pud_present(*pud)) 9095b727a3bSJeremy Fitzhardinge return 0; 9105b727a3bSJeremy Fitzhardinge 911d8b57bb7SThomas Gleixner if (pud_large(*pud)) 912d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pud); 913d8b57bb7SThomas Gleixner 9145b727a3bSJeremy Fitzhardinge pmd = pmd_offset(pud, address); 9155b727a3bSJeremy Fitzhardinge if (!pmd_present(*pmd)) 9165b727a3bSJeremy Fitzhardinge return 0; 9175b727a3bSJeremy Fitzhardinge 918d8b57bb7SThomas Gleixner if (pmd_large(*pmd)) 919d8b57bb7SThomas Gleixner return spurious_fault_check(error_code, (pte_t *) pmd); 920d8b57bb7SThomas Gleixner 9215b727a3bSJeremy Fitzhardinge pte = pte_offset_kernel(pmd, address); 9225b727a3bSJeremy Fitzhardinge if (!pte_present(*pte)) 9235b727a3bSJeremy Fitzhardinge return 0; 9245b727a3bSJeremy Fitzhardinge 9253c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, pte); 9263c3e5694SSteven Rostedt if (!ret) 9273c3e5694SSteven Rostedt return 0; 9283c3e5694SSteven Rostedt 9293c3e5694SSteven Rostedt /* 9302d4a7167SIngo Molnar * Make sure we have permissions in PMD. 9312d4a7167SIngo Molnar * If not, then there's a bug in the page tables: 9323c3e5694SSteven Rostedt */ 9333c3e5694SSteven Rostedt ret = spurious_fault_check(error_code, (pte_t *) pmd); 9343c3e5694SSteven Rostedt WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 9352d4a7167SIngo Molnar 9363c3e5694SSteven Rostedt return ret; 9375b727a3bSJeremy Fitzhardinge } 9385b727a3bSJeremy Fitzhardinge 939c61e211dSHarvey Harrison int show_unhandled_signals = 1; 940c61e211dSHarvey Harrison 9412d4a7167SIngo Molnar static inline int 9422d4a7167SIngo Molnar access_error(unsigned long error_code, int write, struct vm_area_struct *vma) 94392181f19SNick Piggin { 94492181f19SNick Piggin if (write) { 9452d4a7167SIngo Molnar /* write, present and write, not present: */ 94692181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_WRITE))) 94792181f19SNick Piggin return 1; 9482d4a7167SIngo Molnar return 0; 9492d4a7167SIngo Molnar } 9502d4a7167SIngo Molnar 9512d4a7167SIngo Molnar /* read, present: */ 9522d4a7167SIngo Molnar if (unlikely(error_code & PF_PROT)) 95392181f19SNick Piggin return 1; 9542d4a7167SIngo Molnar 9552d4a7167SIngo Molnar /* read, not present: */ 95692181f19SNick Piggin if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 95792181f19SNick Piggin return 1; 95892181f19SNick Piggin 95992181f19SNick Piggin return 0; 96092181f19SNick Piggin } 96192181f19SNick Piggin 9620973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address) 9630973a06cSHiroshi Shimamoto { 964d9517346SIngo Molnar return address >= TASK_SIZE_MAX; 9650973a06cSHiroshi Shimamoto } 9660973a06cSHiroshi Shimamoto 967c61e211dSHarvey Harrison /* 968c61e211dSHarvey Harrison * This routine handles page faults. It determines the address, 969c61e211dSHarvey Harrison * and the problem, and then passes it off to one of the appropriate 970c61e211dSHarvey Harrison * routines. 971c61e211dSHarvey Harrison */ 972c3731c68SIngo Molnar dotraplinkage void __kprobes 973c3731c68SIngo Molnar do_page_fault(struct pt_regs *regs, unsigned long error_code) 974c61e211dSHarvey Harrison { 975c61e211dSHarvey Harrison struct vm_area_struct *vma; 9762d4a7167SIngo Molnar struct task_struct *tsk; 9772d4a7167SIngo Molnar unsigned long address; 9782d4a7167SIngo Molnar struct mm_struct *mm; 97992181f19SNick Piggin int write; 980c61e211dSHarvey Harrison int fault; 981c61e211dSHarvey Harrison 982c61e211dSHarvey Harrison tsk = current; 983c61e211dSHarvey Harrison mm = tsk->mm; 9842d4a7167SIngo Molnar 985c61e211dSHarvey Harrison prefetchw(&mm->mmap_sem); 986c61e211dSHarvey Harrison 9872d4a7167SIngo Molnar /* Get the faulting address: */ 988c61e211dSHarvey Harrison address = read_cr2(); 989c61e211dSHarvey Harrison 9900fd0e3daSPekka Paalanen if (unlikely(kmmio_fault(regs, address))) 99186069782SPekka Paalanen return; 992c61e211dSHarvey Harrison 993c61e211dSHarvey Harrison /* 994c61e211dSHarvey Harrison * We fault-in kernel-space virtual memory on-demand. The 995c61e211dSHarvey Harrison * 'reference' page table is init_mm.pgd. 996c61e211dSHarvey Harrison * 997c61e211dSHarvey Harrison * NOTE! We MUST NOT take any locks for this case. We may 998c61e211dSHarvey Harrison * be in an interrupt or a critical region, and should 999c61e211dSHarvey Harrison * only copy the information from the master page table, 1000c61e211dSHarvey Harrison * nothing more. 1001c61e211dSHarvey Harrison * 1002c61e211dSHarvey Harrison * This verifies that the fault happens in kernel space 1003c61e211dSHarvey Harrison * (error_code & 4) == 0, and that the fault was not a 1004c61e211dSHarvey Harrison * protection error (error_code & 9) == 0. 1005c61e211dSHarvey Harrison */ 10060973a06cSHiroshi Shimamoto if (unlikely(fault_in_kernel_space(address))) { 1007c61e211dSHarvey Harrison if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 1008c61e211dSHarvey Harrison vmalloc_fault(address) >= 0) 1009c61e211dSHarvey Harrison return; 10105b727a3bSJeremy Fitzhardinge 10112d4a7167SIngo Molnar /* Can handle a stale RO->RW TLB: */ 101292181f19SNick Piggin if (spurious_fault(error_code, address)) 10135b727a3bSJeremy Fitzhardinge return; 10145b727a3bSJeremy Fitzhardinge 10152d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 10169be260a6SMasami Hiramatsu if (notify_page_fault(regs)) 10179be260a6SMasami Hiramatsu return; 1018c61e211dSHarvey Harrison /* 1019c61e211dSHarvey Harrison * Don't take the mm semaphore here. If we fixup a prefetch 10202d4a7167SIngo Molnar * fault we could otherwise deadlock: 1021c61e211dSHarvey Harrison */ 102292181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 10232d4a7167SIngo Molnar 102492181f19SNick Piggin return; 1025c61e211dSHarvey Harrison } 1026c61e211dSHarvey Harrison 10272d4a7167SIngo Molnar /* kprobes don't want to hook the spurious faults: */ 1028f8a6b2b9SIngo Molnar if (unlikely(notify_page_fault(regs))) 10299be260a6SMasami Hiramatsu return; 1030c61e211dSHarvey Harrison /* 1031891cffbdSLinus Torvalds * It's safe to allow irq's after cr2 has been saved and the 1032891cffbdSLinus Torvalds * vmalloc fault has been handled. 1033891cffbdSLinus Torvalds * 1034891cffbdSLinus Torvalds * User-mode registers count as a user access even for any 10352d4a7167SIngo Molnar * potential system fault or CPU buglet: 1036c61e211dSHarvey Harrison */ 1037891cffbdSLinus Torvalds if (user_mode_vm(regs)) { 1038891cffbdSLinus Torvalds local_irq_enable(); 1039891cffbdSLinus Torvalds error_code |= PF_USER; 10402d4a7167SIngo Molnar } else { 10412d4a7167SIngo Molnar if (regs->flags & X86_EFLAGS_IF) 1042c61e211dSHarvey Harrison local_irq_enable(); 10432d4a7167SIngo Molnar } 1044c61e211dSHarvey Harrison 1045c61e211dSHarvey Harrison if (unlikely(error_code & PF_RSVD)) 104692181f19SNick Piggin pgtable_bad(regs, error_code, address); 1047c61e211dSHarvey Harrison 1048*7dd1fcc2SPeter Zijlstra perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); 1049*7dd1fcc2SPeter Zijlstra 1050c61e211dSHarvey Harrison /* 10512d4a7167SIngo Molnar * If we're in an interrupt, have no user context or are running 10522d4a7167SIngo Molnar * in an atomic region then we must not take the fault: 1053c61e211dSHarvey Harrison */ 105492181f19SNick Piggin if (unlikely(in_atomic() || !mm)) { 105592181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 105692181f19SNick Piggin return; 105792181f19SNick Piggin } 1058c61e211dSHarvey Harrison 10593a1dfe6eSIngo Molnar /* 10603a1dfe6eSIngo Molnar * When running in the kernel we expect faults to occur only to 10612d4a7167SIngo Molnar * addresses in user space. All other faults represent errors in 10622d4a7167SIngo Molnar * the kernel and should generate an OOPS. Unfortunately, in the 10632d4a7167SIngo Molnar * case of an erroneous fault occurring in a code path which already 10642d4a7167SIngo Molnar * holds mmap_sem we will deadlock attempting to validate the fault 10652d4a7167SIngo Molnar * against the address space. Luckily the kernel only validly 10662d4a7167SIngo Molnar * references user space from well defined areas of code, which are 10672d4a7167SIngo Molnar * listed in the exceptions table. 1068c61e211dSHarvey Harrison * 1069c61e211dSHarvey Harrison * As the vast majority of faults will be valid we will only perform 10702d4a7167SIngo Molnar * the source reference check when there is a possibility of a 10712d4a7167SIngo Molnar * deadlock. Attempt to lock the address space, if we cannot we then 10722d4a7167SIngo Molnar * validate the source. If this is invalid we can skip the address 10732d4a7167SIngo Molnar * space check, thus avoiding the deadlock: 1074c61e211dSHarvey Harrison */ 107592181f19SNick Piggin if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1076c61e211dSHarvey Harrison if ((error_code & PF_USER) == 0 && 107792181f19SNick Piggin !search_exception_tables(regs->ip)) { 107892181f19SNick Piggin bad_area_nosemaphore(regs, error_code, address); 107992181f19SNick Piggin return; 108092181f19SNick Piggin } 1081c61e211dSHarvey Harrison down_read(&mm->mmap_sem); 108201006074SPeter Zijlstra } else { 108301006074SPeter Zijlstra /* 10842d4a7167SIngo Molnar * The above down_read_trylock() might have succeeded in 10852d4a7167SIngo Molnar * which case we'll have missed the might_sleep() from 10862d4a7167SIngo Molnar * down_read(): 108701006074SPeter Zijlstra */ 108801006074SPeter Zijlstra might_sleep(); 1089c61e211dSHarvey Harrison } 1090c61e211dSHarvey Harrison 1091c61e211dSHarvey Harrison vma = find_vma(mm, address); 109292181f19SNick Piggin if (unlikely(!vma)) { 109392181f19SNick Piggin bad_area(regs, error_code, address); 109492181f19SNick Piggin return; 109592181f19SNick Piggin } 109692181f19SNick Piggin if (likely(vma->vm_start <= address)) 1097c61e211dSHarvey Harrison goto good_area; 109892181f19SNick Piggin if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 109992181f19SNick Piggin bad_area(regs, error_code, address); 110092181f19SNick Piggin return; 110192181f19SNick Piggin } 1102c61e211dSHarvey Harrison if (error_code & PF_USER) { 1103c61e211dSHarvey Harrison /* 1104c61e211dSHarvey Harrison * Accessing the stack below %sp is always a bug. 1105c61e211dSHarvey Harrison * The large cushion allows instructions like enter 1106c61e211dSHarvey Harrison * and pusha to work. ("enter $65535, $31" pushes 1107c61e211dSHarvey Harrison * 32 pointers and then decrements %sp by 65535.) 1108c61e211dSHarvey Harrison */ 110992181f19SNick Piggin if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 111092181f19SNick Piggin bad_area(regs, error_code, address); 111192181f19SNick Piggin return; 1112c61e211dSHarvey Harrison } 111392181f19SNick Piggin } 111492181f19SNick Piggin if (unlikely(expand_stack(vma, address))) { 111592181f19SNick Piggin bad_area(regs, error_code, address); 111692181f19SNick Piggin return; 111792181f19SNick Piggin } 111892181f19SNick Piggin 1119c61e211dSHarvey Harrison /* 1120c61e211dSHarvey Harrison * Ok, we have a good vm_area for this memory access, so 1121c61e211dSHarvey Harrison * we can handle it.. 1122c61e211dSHarvey Harrison */ 1123c61e211dSHarvey Harrison good_area: 112492181f19SNick Piggin write = error_code & PF_WRITE; 11252d4a7167SIngo Molnar 112692181f19SNick Piggin if (unlikely(access_error(error_code, write, vma))) { 112792181f19SNick Piggin bad_area_access_error(regs, error_code, address); 112892181f19SNick Piggin return; 1129c61e211dSHarvey Harrison } 1130c61e211dSHarvey Harrison 1131c61e211dSHarvey Harrison /* 1132c61e211dSHarvey Harrison * If for any reason at all we couldn't handle the fault, 1133c61e211dSHarvey Harrison * make sure we exit gracefully rather than endlessly redo 11342d4a7167SIngo Molnar * the fault: 1135c61e211dSHarvey Harrison */ 1136c61e211dSHarvey Harrison fault = handle_mm_fault(mm, vma, address, write); 11372d4a7167SIngo Molnar 1138c61e211dSHarvey Harrison if (unlikely(fault & VM_FAULT_ERROR)) { 113992181f19SNick Piggin mm_fault_error(regs, error_code, address, fault); 114092181f19SNick Piggin return; 1141c61e211dSHarvey Harrison } 11422d4a7167SIngo Molnar 1143c61e211dSHarvey Harrison if (fault & VM_FAULT_MAJOR) 1144c61e211dSHarvey Harrison tsk->maj_flt++; 1145c61e211dSHarvey Harrison else 1146c61e211dSHarvey Harrison tsk->min_flt++; 1147c61e211dSHarvey Harrison 11488c938f9fSIngo Molnar check_v8086_mode(regs, address, tsk); 11498c938f9fSIngo Molnar 1150c61e211dSHarvey Harrison up_read(&mm->mmap_sem); 1151c61e211dSHarvey Harrison } 1152