128080329SPaul Mundt /* 228080329SPaul Mundt * Page fault handler for SH with an MMU. 328080329SPaul Mundt * 428080329SPaul Mundt * Copyright (C) 1999 Niibe Yutaka 528080329SPaul Mundt * Copyright (C) 2003 - 2012 Paul Mundt 628080329SPaul Mundt * 728080329SPaul Mundt * Based on linux/arch/i386/mm/fault.c: 828080329SPaul Mundt * Copyright (C) 1995 Linus Torvalds 928080329SPaul Mundt * 1028080329SPaul Mundt * This file is subject to the terms and conditions of the GNU General Public 1128080329SPaul Mundt * License. See the file "COPYING" in the main directory of this archive 1228080329SPaul Mundt * for more details. 1328080329SPaul Mundt */ 1428080329SPaul Mundt #include <linux/kernel.h> 1528080329SPaul Mundt #include <linux/mm.h> 163f07c014SIngo Molnar #include <linux/sched/signal.h> 1728080329SPaul Mundt #include <linux/hardirq.h> 1828080329SPaul Mundt #include <linux/kprobes.h> 1928080329SPaul Mundt #include <linux/perf_event.h> 2028080329SPaul Mundt #include <linux/kdebug.h> 2170ffdb93SDavid Hildenbrand #include <linux/uaccess.h> 2228080329SPaul Mundt #include <asm/io_trapped.h> 2328080329SPaul Mundt #include <asm/mmu_context.h> 2428080329SPaul Mundt #include <asm/tlbflush.h> 2528080329SPaul Mundt #include <asm/traps.h> 2628080329SPaul Mundt 2728080329SPaul Mundt static inline int notify_page_fault(struct pt_regs *regs, int trap) 2828080329SPaul Mundt { 2928080329SPaul Mundt int ret = 0; 3028080329SPaul Mundt 3128080329SPaul Mundt if (kprobes_built_in() && !user_mode(regs)) { 3228080329SPaul Mundt preempt_disable(); 3328080329SPaul Mundt if (kprobe_running() && kprobe_fault_handler(regs, trap)) 3428080329SPaul Mundt ret = 1; 3528080329SPaul Mundt preempt_enable(); 3628080329SPaul Mundt } 3728080329SPaul Mundt 3828080329SPaul Mundt return ret; 3928080329SPaul Mundt } 4028080329SPaul Mundt 4128080329SPaul Mundt static void 42*e1656829SEric W. Biederman force_sig_info_fault(int si_signo, int si_code, unsigned long address) 4328080329SPaul Mundt { 44*e1656829SEric W. Biederman force_sig_fault(si_signo, si_code, (void __user *)address, current); 4528080329SPaul Mundt } 4628080329SPaul Mundt 4728080329SPaul Mundt /* 4828080329SPaul Mundt * This is useful to dump out the page tables associated with 4928080329SPaul Mundt * 'addr' in mm 'mm'. 5028080329SPaul Mundt */ 5128080329SPaul Mundt static void show_pte(struct mm_struct *mm, unsigned long addr) 5228080329SPaul Mundt { 5328080329SPaul Mundt pgd_t *pgd; 5428080329SPaul Mundt 5590eed7d8SPaul Mundt if (mm) { 5628080329SPaul Mundt pgd = mm->pgd; 5790eed7d8SPaul Mundt } else { 5828080329SPaul Mundt pgd = get_TTB(); 5928080329SPaul Mundt 6090eed7d8SPaul Mundt if (unlikely(!pgd)) 6190eed7d8SPaul Mundt pgd = swapper_pg_dir; 6290eed7d8SPaul Mundt } 6390eed7d8SPaul Mundt 6428080329SPaul Mundt printk(KERN_ALERT "pgd = %p\n", pgd); 6528080329SPaul Mundt pgd += pgd_index(addr); 6628080329SPaul Mundt printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, 6728080329SPaul Mundt (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd)); 6828080329SPaul Mundt 6928080329SPaul Mundt do { 7028080329SPaul Mundt pud_t *pud; 7128080329SPaul Mundt pmd_t *pmd; 7228080329SPaul Mundt pte_t *pte; 7328080329SPaul Mundt 7428080329SPaul Mundt if (pgd_none(*pgd)) 7528080329SPaul Mundt break; 7628080329SPaul Mundt 7728080329SPaul Mundt if (pgd_bad(*pgd)) { 7828080329SPaul Mundt printk("(bad)"); 7928080329SPaul Mundt break; 8028080329SPaul Mundt } 8128080329SPaul Mundt 8228080329SPaul Mundt pud = pud_offset(pgd, addr); 8328080329SPaul Mundt if (PTRS_PER_PUD != 1) 8428080329SPaul Mundt printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2), 8528080329SPaul Mundt (u64)pud_val(*pud)); 8628080329SPaul Mundt 8728080329SPaul Mundt if (pud_none(*pud)) 8828080329SPaul Mundt break; 8928080329SPaul Mundt 9028080329SPaul Mundt if (pud_bad(*pud)) { 9128080329SPaul Mundt printk("(bad)"); 9228080329SPaul Mundt break; 9328080329SPaul Mundt } 9428080329SPaul Mundt 9528080329SPaul Mundt pmd = pmd_offset(pud, addr); 9628080329SPaul Mundt if (PTRS_PER_PMD != 1) 9728080329SPaul Mundt printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2), 9828080329SPaul Mundt (u64)pmd_val(*pmd)); 9928080329SPaul Mundt 10028080329SPaul Mundt if (pmd_none(*pmd)) 10128080329SPaul Mundt break; 10228080329SPaul Mundt 10328080329SPaul Mundt if (pmd_bad(*pmd)) { 10428080329SPaul Mundt printk("(bad)"); 10528080329SPaul Mundt break; 10628080329SPaul Mundt } 10728080329SPaul Mundt 10828080329SPaul Mundt /* We must not map this if we have highmem enabled */ 10928080329SPaul Mundt if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) 11028080329SPaul Mundt break; 11128080329SPaul Mundt 11228080329SPaul Mundt pte = pte_offset_kernel(pmd, addr); 11328080329SPaul Mundt printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2), 11428080329SPaul Mundt (u64)pte_val(*pte)); 11528080329SPaul Mundt } while (0); 11628080329SPaul Mundt 11728080329SPaul Mundt printk("\n"); 11828080329SPaul Mundt } 11928080329SPaul Mundt 12028080329SPaul Mundt static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 12128080329SPaul Mundt { 12228080329SPaul Mundt unsigned index = pgd_index(address); 12328080329SPaul Mundt pgd_t *pgd_k; 12428080329SPaul Mundt pud_t *pud, *pud_k; 12528080329SPaul Mundt pmd_t *pmd, *pmd_k; 12628080329SPaul Mundt 12728080329SPaul Mundt pgd += index; 12828080329SPaul Mundt pgd_k = init_mm.pgd + index; 12928080329SPaul Mundt 13028080329SPaul Mundt if (!pgd_present(*pgd_k)) 13128080329SPaul Mundt return NULL; 13228080329SPaul Mundt 13328080329SPaul Mundt pud = pud_offset(pgd, address); 13428080329SPaul Mundt pud_k = pud_offset(pgd_k, address); 13528080329SPaul Mundt if (!pud_present(*pud_k)) 13628080329SPaul Mundt return NULL; 13728080329SPaul Mundt 13828080329SPaul Mundt if (!pud_present(*pud)) 13928080329SPaul Mundt set_pud(pud, *pud_k); 14028080329SPaul Mundt 14128080329SPaul Mundt pmd = pmd_offset(pud, address); 14228080329SPaul Mundt pmd_k = pmd_offset(pud_k, address); 14328080329SPaul Mundt if (!pmd_present(*pmd_k)) 14428080329SPaul Mundt return NULL; 14528080329SPaul Mundt 14628080329SPaul Mundt if (!pmd_present(*pmd)) 14728080329SPaul Mundt set_pmd(pmd, *pmd_k); 14828080329SPaul Mundt else { 14928080329SPaul Mundt /* 15028080329SPaul Mundt * The page tables are fully synchronised so there must 15128080329SPaul Mundt * be another reason for the fault. Return NULL here to 15228080329SPaul Mundt * signal that we have not taken care of the fault. 15328080329SPaul Mundt */ 15428080329SPaul Mundt BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 15528080329SPaul Mundt return NULL; 15628080329SPaul Mundt } 15728080329SPaul Mundt 15828080329SPaul Mundt return pmd_k; 15928080329SPaul Mundt } 16028080329SPaul Mundt 161d8fd35fcSPaul Mundt #ifdef CONFIG_SH_STORE_QUEUES 162d8fd35fcSPaul Mundt #define __FAULT_ADDR_LIMIT P3_ADDR_MAX 163d8fd35fcSPaul Mundt #else 164d8fd35fcSPaul Mundt #define __FAULT_ADDR_LIMIT VMALLOC_END 165d8fd35fcSPaul Mundt #endif 166d8fd35fcSPaul Mundt 16728080329SPaul Mundt /* 16828080329SPaul Mundt * Handle a fault on the vmalloc or module mapping area 16928080329SPaul Mundt */ 17028080329SPaul Mundt static noinline int vmalloc_fault(unsigned long address) 17128080329SPaul Mundt { 17228080329SPaul Mundt pgd_t *pgd_k; 17328080329SPaul Mundt pmd_t *pmd_k; 17428080329SPaul Mundt pte_t *pte_k; 17528080329SPaul Mundt 176c3e0af98SPaul Mundt /* Make sure we are in vmalloc/module/P3 area: */ 177d8fd35fcSPaul Mundt if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT)) 17828080329SPaul Mundt return -1; 17928080329SPaul Mundt 18028080329SPaul Mundt /* 18128080329SPaul Mundt * Synchronize this task's top level page-table 18228080329SPaul Mundt * with the 'reference' page table. 18328080329SPaul Mundt * 18428080329SPaul Mundt * Do _not_ use "current" here. We might be inside 18528080329SPaul Mundt * an interrupt in the middle of a task switch.. 18628080329SPaul Mundt */ 18728080329SPaul Mundt pgd_k = get_TTB(); 18828080329SPaul Mundt pmd_k = vmalloc_sync_one(pgd_k, address); 18928080329SPaul Mundt if (!pmd_k) 19028080329SPaul Mundt return -1; 19128080329SPaul Mundt 19228080329SPaul Mundt pte_k = pte_offset_kernel(pmd_k, address); 19328080329SPaul Mundt if (!pte_present(*pte_k)) 19428080329SPaul Mundt return -1; 19528080329SPaul Mundt 19628080329SPaul Mundt return 0; 19728080329SPaul Mundt } 19828080329SPaul Mundt 19928080329SPaul Mundt static void 20028080329SPaul Mundt show_fault_oops(struct pt_regs *regs, unsigned long address) 20128080329SPaul Mundt { 20228080329SPaul Mundt if (!oops_may_print()) 20328080329SPaul Mundt return; 20428080329SPaul Mundt 20528080329SPaul Mundt printk(KERN_ALERT "BUG: unable to handle kernel "); 20628080329SPaul Mundt if (address < PAGE_SIZE) 20728080329SPaul Mundt printk(KERN_CONT "NULL pointer dereference"); 20828080329SPaul Mundt else 20928080329SPaul Mundt printk(KERN_CONT "paging request"); 21028080329SPaul Mundt 21128080329SPaul Mundt printk(KERN_CONT " at %08lx\n", address); 21228080329SPaul Mundt printk(KERN_ALERT "PC:"); 21328080329SPaul Mundt printk_address(regs->pc, 1); 21428080329SPaul Mundt 21528080329SPaul Mundt show_pte(NULL, address); 21628080329SPaul Mundt } 21728080329SPaul Mundt 21828080329SPaul Mundt static noinline void 21928080329SPaul Mundt no_context(struct pt_regs *regs, unsigned long error_code, 22028080329SPaul Mundt unsigned long address) 22128080329SPaul Mundt { 22228080329SPaul Mundt /* Are we prepared to handle this kernel fault? */ 22328080329SPaul Mundt if (fixup_exception(regs)) 22428080329SPaul Mundt return; 22528080329SPaul Mundt 22628080329SPaul Mundt if (handle_trapped_io(regs, address)) 22728080329SPaul Mundt return; 22828080329SPaul Mundt 22928080329SPaul Mundt /* 23028080329SPaul Mundt * Oops. The kernel tried to access some bad page. We'll have to 23128080329SPaul Mundt * terminate things with extreme prejudice. 23228080329SPaul Mundt */ 23328080329SPaul Mundt bust_spinlocks(1); 23428080329SPaul Mundt 23528080329SPaul Mundt show_fault_oops(regs, address); 23628080329SPaul Mundt 23728080329SPaul Mundt die("Oops", regs, error_code); 23828080329SPaul Mundt bust_spinlocks(0); 23928080329SPaul Mundt do_exit(SIGKILL); 24028080329SPaul Mundt } 24128080329SPaul Mundt 24228080329SPaul Mundt static void 24328080329SPaul Mundt __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 24428080329SPaul Mundt unsigned long address, int si_code) 24528080329SPaul Mundt { 24628080329SPaul Mundt /* User mode accesses just cause a SIGSEGV */ 24728080329SPaul Mundt if (user_mode(regs)) { 24828080329SPaul Mundt /* 24928080329SPaul Mundt * It's possible to have interrupts off here: 25028080329SPaul Mundt */ 25128080329SPaul Mundt local_irq_enable(); 25228080329SPaul Mundt 253*e1656829SEric W. Biederman force_sig_info_fault(SIGSEGV, si_code, address); 25428080329SPaul Mundt 25528080329SPaul Mundt return; 25628080329SPaul Mundt } 25728080329SPaul Mundt 25828080329SPaul Mundt no_context(regs, error_code, address); 25928080329SPaul Mundt } 26028080329SPaul Mundt 26128080329SPaul Mundt static noinline void 26228080329SPaul Mundt bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 26328080329SPaul Mundt unsigned long address) 26428080329SPaul Mundt { 26528080329SPaul Mundt __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 26628080329SPaul Mundt } 26728080329SPaul Mundt 26828080329SPaul Mundt static void 26928080329SPaul Mundt __bad_area(struct pt_regs *regs, unsigned long error_code, 27028080329SPaul Mundt unsigned long address, int si_code) 27128080329SPaul Mundt { 27228080329SPaul Mundt struct mm_struct *mm = current->mm; 27328080329SPaul Mundt 27428080329SPaul Mundt /* 27528080329SPaul Mundt * Something tried to access memory that isn't in our memory map.. 27628080329SPaul Mundt * Fix it, but check if it's kernel or user first.. 27728080329SPaul Mundt */ 27828080329SPaul Mundt up_read(&mm->mmap_sem); 27928080329SPaul Mundt 28028080329SPaul Mundt __bad_area_nosemaphore(regs, error_code, address, si_code); 28128080329SPaul Mundt } 28228080329SPaul Mundt 28328080329SPaul Mundt static noinline void 28428080329SPaul Mundt bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 28528080329SPaul Mundt { 28628080329SPaul Mundt __bad_area(regs, error_code, address, SEGV_MAPERR); 28728080329SPaul Mundt } 28828080329SPaul Mundt 28928080329SPaul Mundt static noinline void 29028080329SPaul Mundt bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 29128080329SPaul Mundt unsigned long address) 29228080329SPaul Mundt { 29328080329SPaul Mundt __bad_area(regs, error_code, address, SEGV_ACCERR); 29428080329SPaul Mundt } 29528080329SPaul Mundt 29628080329SPaul Mundt static void 29728080329SPaul Mundt do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) 29828080329SPaul Mundt { 29928080329SPaul Mundt struct task_struct *tsk = current; 30028080329SPaul Mundt struct mm_struct *mm = tsk->mm; 30128080329SPaul Mundt 30228080329SPaul Mundt up_read(&mm->mmap_sem); 30328080329SPaul Mundt 30428080329SPaul Mundt /* Kernel mode? Handle exceptions or die: */ 30528080329SPaul Mundt if (!user_mode(regs)) 30628080329SPaul Mundt no_context(regs, error_code, address); 30728080329SPaul Mundt 308*e1656829SEric W. Biederman force_sig_info_fault(SIGBUS, BUS_ADRERR, address); 30928080329SPaul Mundt } 31028080329SPaul Mundt 31128080329SPaul Mundt static noinline int 31228080329SPaul Mundt mm_fault_error(struct pt_regs *regs, unsigned long error_code, 31350a7ca3cSSouptick Joarder unsigned long address, vm_fault_t fault) 31428080329SPaul Mundt { 31528080329SPaul Mundt /* 31628080329SPaul Mundt * Pagefault was interrupted by SIGKILL. We have no reason to 31728080329SPaul Mundt * continue pagefault. 31828080329SPaul Mundt */ 31928080329SPaul Mundt if (fatal_signal_pending(current)) { 32028080329SPaul Mundt if (!(fault & VM_FAULT_RETRY)) 32128080329SPaul Mundt up_read(¤t->mm->mmap_sem); 32228080329SPaul Mundt if (!user_mode(regs)) 32328080329SPaul Mundt no_context(regs, error_code, address); 32428080329SPaul Mundt return 1; 32528080329SPaul Mundt } 32628080329SPaul Mundt 32728080329SPaul Mundt if (!(fault & VM_FAULT_ERROR)) 32828080329SPaul Mundt return 0; 32928080329SPaul Mundt 33028080329SPaul Mundt if (fault & VM_FAULT_OOM) { 33128080329SPaul Mundt /* Kernel mode? Handle exceptions or die: */ 33228080329SPaul Mundt if (!user_mode(regs)) { 33328080329SPaul Mundt up_read(¤t->mm->mmap_sem); 33428080329SPaul Mundt no_context(regs, error_code, address); 33528080329SPaul Mundt return 1; 33628080329SPaul Mundt } 337c2d23f91SDavid Rientjes up_read(¤t->mm->mmap_sem); 33828080329SPaul Mundt 339c2d23f91SDavid Rientjes /* 340c2d23f91SDavid Rientjes * We ran out of memory, call the OOM killer, and return the 341c2d23f91SDavid Rientjes * userspace (which will retry the fault, or kill us if we got 342c2d23f91SDavid Rientjes * oom-killed): 343c2d23f91SDavid Rientjes */ 344c2d23f91SDavid Rientjes pagefault_out_of_memory(); 34528080329SPaul Mundt } else { 34628080329SPaul Mundt if (fault & VM_FAULT_SIGBUS) 34728080329SPaul Mundt do_sigbus(regs, error_code, address); 34833692f27SLinus Torvalds else if (fault & VM_FAULT_SIGSEGV) 34933692f27SLinus Torvalds bad_area(regs, error_code, address); 35028080329SPaul Mundt else 35128080329SPaul Mundt BUG(); 35228080329SPaul Mundt } 35328080329SPaul Mundt 35428080329SPaul Mundt return 1; 35528080329SPaul Mundt } 35628080329SPaul Mundt 35728080329SPaul Mundt static inline int access_error(int error_code, struct vm_area_struct *vma) 35828080329SPaul Mundt { 35928080329SPaul Mundt if (error_code & FAULT_CODE_WRITE) { 36028080329SPaul Mundt /* write, present and write, not present: */ 36128080329SPaul Mundt if (unlikely(!(vma->vm_flags & VM_WRITE))) 36228080329SPaul Mundt return 1; 36328080329SPaul Mundt return 0; 36428080329SPaul Mundt } 36528080329SPaul Mundt 36628080329SPaul Mundt /* ITLB miss on NX page */ 36728080329SPaul Mundt if (unlikely((error_code & FAULT_CODE_ITLB) && 36828080329SPaul Mundt !(vma->vm_flags & VM_EXEC))) 36928080329SPaul Mundt return 1; 37028080329SPaul Mundt 37128080329SPaul Mundt /* read, not present: */ 37228080329SPaul Mundt if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 37328080329SPaul Mundt return 1; 37428080329SPaul Mundt 37528080329SPaul Mundt return 0; 37628080329SPaul Mundt } 37728080329SPaul Mundt 37828080329SPaul Mundt static int fault_in_kernel_space(unsigned long address) 37928080329SPaul Mundt { 38028080329SPaul Mundt return address >= TASK_SIZE; 38128080329SPaul Mundt } 38228080329SPaul Mundt 38328080329SPaul Mundt /* 38428080329SPaul Mundt * This routine handles page faults. It determines the address, 38528080329SPaul Mundt * and the problem, and then passes it off to one of the appropriate 38628080329SPaul Mundt * routines. 38728080329SPaul Mundt */ 38828080329SPaul Mundt asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, 38928080329SPaul Mundt unsigned long error_code, 39028080329SPaul Mundt unsigned long address) 39128080329SPaul Mundt { 39228080329SPaul Mundt unsigned long vec; 39328080329SPaul Mundt struct task_struct *tsk; 39428080329SPaul Mundt struct mm_struct *mm; 39528080329SPaul Mundt struct vm_area_struct * vma; 39650a7ca3cSSouptick Joarder vm_fault_t fault; 397759496baSJohannes Weiner unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 39828080329SPaul Mundt 39928080329SPaul Mundt tsk = current; 40028080329SPaul Mundt mm = tsk->mm; 40128080329SPaul Mundt vec = lookup_exception_vector(); 40228080329SPaul Mundt 40328080329SPaul Mundt /* 40428080329SPaul Mundt * We fault-in kernel-space virtual memory on-demand. The 40528080329SPaul Mundt * 'reference' page table is init_mm.pgd. 40628080329SPaul Mundt * 40728080329SPaul Mundt * NOTE! We MUST NOT take any locks for this case. We may 40828080329SPaul Mundt * be in an interrupt or a critical region, and should 40928080329SPaul Mundt * only copy the information from the master page table, 41028080329SPaul Mundt * nothing more. 41128080329SPaul Mundt */ 41228080329SPaul Mundt if (unlikely(fault_in_kernel_space(address))) { 41328080329SPaul Mundt if (vmalloc_fault(address) >= 0) 41428080329SPaul Mundt return; 41528080329SPaul Mundt if (notify_page_fault(regs, vec)) 41628080329SPaul Mundt return; 41728080329SPaul Mundt 41828080329SPaul Mundt bad_area_nosemaphore(regs, error_code, address); 41928080329SPaul Mundt return; 42028080329SPaul Mundt } 42128080329SPaul Mundt 42228080329SPaul Mundt if (unlikely(notify_page_fault(regs, vec))) 42328080329SPaul Mundt return; 42428080329SPaul Mundt 42528080329SPaul Mundt /* Only enable interrupts if they were on before the fault */ 42628080329SPaul Mundt if ((regs->sr & SR_IMASK) != SR_IMASK) 42728080329SPaul Mundt local_irq_enable(); 42828080329SPaul Mundt 42928080329SPaul Mundt perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 43028080329SPaul Mundt 43128080329SPaul Mundt /* 43228080329SPaul Mundt * If we're in an interrupt, have no user context or are running 43370ffdb93SDavid Hildenbrand * with pagefaults disabled then we must not take the fault: 43428080329SPaul Mundt */ 43570ffdb93SDavid Hildenbrand if (unlikely(faulthandler_disabled() || !mm)) { 43628080329SPaul Mundt bad_area_nosemaphore(regs, error_code, address); 43728080329SPaul Mundt return; 43828080329SPaul Mundt } 43928080329SPaul Mundt 44028080329SPaul Mundt retry: 44128080329SPaul Mundt down_read(&mm->mmap_sem); 44228080329SPaul Mundt 44328080329SPaul Mundt vma = find_vma(mm, address); 44428080329SPaul Mundt if (unlikely(!vma)) { 44528080329SPaul Mundt bad_area(regs, error_code, address); 44628080329SPaul Mundt return; 44728080329SPaul Mundt } 44828080329SPaul Mundt if (likely(vma->vm_start <= address)) 44928080329SPaul Mundt goto good_area; 45028080329SPaul Mundt if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 45128080329SPaul Mundt bad_area(regs, error_code, address); 45228080329SPaul Mundt return; 45328080329SPaul Mundt } 45428080329SPaul Mundt if (unlikely(expand_stack(vma, address))) { 45528080329SPaul Mundt bad_area(regs, error_code, address); 45628080329SPaul Mundt return; 45728080329SPaul Mundt } 45828080329SPaul Mundt 45928080329SPaul Mundt /* 46028080329SPaul Mundt * Ok, we have a good vm_area for this memory access, so 46128080329SPaul Mundt * we can handle it.. 46228080329SPaul Mundt */ 46328080329SPaul Mundt good_area: 46428080329SPaul Mundt if (unlikely(access_error(error_code, vma))) { 46528080329SPaul Mundt bad_area_access_error(regs, error_code, address); 46628080329SPaul Mundt return; 46728080329SPaul Mundt } 46828080329SPaul Mundt 46928080329SPaul Mundt set_thread_fault_code(error_code); 47028080329SPaul Mundt 471759496baSJohannes Weiner if (user_mode(regs)) 472759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 473759496baSJohannes Weiner if (error_code & FAULT_CODE_WRITE) 474759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE; 475759496baSJohannes Weiner 47628080329SPaul Mundt /* 47728080329SPaul Mundt * If for any reason at all we couldn't handle the fault, 47828080329SPaul Mundt * make sure we exit gracefully rather than endlessly redo 47928080329SPaul Mundt * the fault. 48028080329SPaul Mundt */ 481dcddffd4SKirill A. Shutemov fault = handle_mm_fault(vma, address, flags); 48228080329SPaul Mundt 48328080329SPaul Mundt if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) 48428080329SPaul Mundt if (mm_fault_error(regs, error_code, address, fault)) 48528080329SPaul Mundt return; 48628080329SPaul Mundt 48728080329SPaul Mundt if (flags & FAULT_FLAG_ALLOW_RETRY) { 48828080329SPaul Mundt if (fault & VM_FAULT_MAJOR) { 48928080329SPaul Mundt tsk->maj_flt++; 49028080329SPaul Mundt perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 49128080329SPaul Mundt regs, address); 49228080329SPaul Mundt } else { 49328080329SPaul Mundt tsk->min_flt++; 49428080329SPaul Mundt perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 49528080329SPaul Mundt regs, address); 49628080329SPaul Mundt } 49728080329SPaul Mundt if (fault & VM_FAULT_RETRY) { 49828080329SPaul Mundt flags &= ~FAULT_FLAG_ALLOW_RETRY; 49945cac65bSShaohua Li flags |= FAULT_FLAG_TRIED; 50028080329SPaul Mundt 50128080329SPaul Mundt /* 50228080329SPaul Mundt * No need to up_read(&mm->mmap_sem) as we would 50328080329SPaul Mundt * have already released it in __lock_page_or_retry 50428080329SPaul Mundt * in mm/filemap.c. 50528080329SPaul Mundt */ 50628080329SPaul Mundt goto retry; 50728080329SPaul Mundt } 50828080329SPaul Mundt } 50928080329SPaul Mundt 51028080329SPaul Mundt up_read(&mm->mmap_sem); 51128080329SPaul Mundt } 512