xref: /openbmc/linux/arch/arc/mm/fault.c (revision d89775fc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Page Fault Handling for ARC (TLB Miss / ProtV)
3  *
4  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5  */
6 
7 #include <linux/signal.h>
8 #include <linux/interrupt.h>
9 #include <linux/sched/signal.h>
10 #include <linux/errno.h>
11 #include <linux/ptrace.h>
12 #include <linux/uaccess.h>
13 #include <linux/kdebug.h>
14 #include <linux/perf_event.h>
15 #include <linux/mm_types.h>
16 #include <asm/mmu.h>
17 
18 /*
19  * kernel virtual address is required to implement vmalloc/pkmap/fixmap
20  * Refer to asm/processor.h for System Memory Map
21  *
22  * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
23  * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
24  */
25 noinline static int handle_kernel_vaddr_fault(unsigned long address)
26 {
27 	/*
28 	 * Synchronize this task's top level page-table
29 	 * with the 'reference' page table.
30 	 */
31 	pgd_t *pgd, *pgd_k;
32 	p4d_t *p4d, *p4d_k;
33 	pud_t *pud, *pud_k;
34 	pmd_t *pmd, *pmd_k;
35 
36 	pgd = pgd_offset_fast(current->active_mm, address);
37 	pgd_k = pgd_offset_k(address);
38 
39 	if (!pgd_present(*pgd_k))
40 		goto bad_area;
41 
42 	p4d = p4d_offset(pgd, address);
43 	p4d_k = p4d_offset(pgd_k, address);
44 	if (!p4d_present(*p4d_k))
45 		goto bad_area;
46 
47 	pud = pud_offset(p4d, address);
48 	pud_k = pud_offset(p4d_k, address);
49 	if (!pud_present(*pud_k))
50 		goto bad_area;
51 
52 	pmd = pmd_offset(pud, address);
53 	pmd_k = pmd_offset(pud_k, address);
54 	if (!pmd_present(*pmd_k))
55 		goto bad_area;
56 
57 	set_pmd(pmd, *pmd_k);
58 
59 	/* XXX: create the TLB entry here */
60 	return 0;
61 
62 bad_area:
63 	return 1;
64 }
65 
66 void do_page_fault(unsigned long address, struct pt_regs *regs)
67 {
68 	struct vm_area_struct *vma = NULL;
69 	struct task_struct *tsk = current;
70 	struct mm_struct *mm = tsk->mm;
71 	int sig, si_code = SEGV_MAPERR;
72 	unsigned int write = 0, exec = 0, mask;
73 	vm_fault_t fault = VM_FAULT_SIGSEGV;	/* handle_mm_fault() output */
74 	unsigned int flags;			/* handle_mm_fault() input */
75 
76 	/*
77 	 * NOTE! We MUST NOT take any locks for this case. We may
78 	 * be in an interrupt or a critical region, and should
79 	 * only copy the information from the master page table,
80 	 * nothing more.
81 	 */
82 	if (address >= VMALLOC_START && !user_mode(regs)) {
83 		if (unlikely(handle_kernel_vaddr_fault(address)))
84 			goto no_context;
85 		else
86 			return;
87 	}
88 
89 	/*
90 	 * If we're in an interrupt or have no user
91 	 * context, we must not take the fault..
92 	 */
93 	if (faulthandler_disabled() || !mm)
94 		goto no_context;
95 
96 	if (regs->ecr_cause & ECR_C_PROTV_STORE)	/* ST/EX */
97 		write = 1;
98 	else if ((regs->ecr_vec == ECR_V_PROTV) &&
99 	         (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
100 		exec = 1;
101 
102 	flags = FAULT_FLAG_DEFAULT;
103 	if (user_mode(regs))
104 		flags |= FAULT_FLAG_USER;
105 	if (write)
106 		flags |= FAULT_FLAG_WRITE;
107 
108 retry:
109 	mmap_read_lock(mm);
110 
111 	vma = find_vma(mm, address);
112 	if (!vma)
113 		goto bad_area;
114 	if (unlikely(address < vma->vm_start)) {
115 		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
116 			goto bad_area;
117 	}
118 
119 	/*
120 	 * vm_area is good, now check permissions for this memory access
121 	 */
122 	mask = VM_READ;
123 	if (write)
124 		mask = VM_WRITE;
125 	if (exec)
126 		mask = VM_EXEC;
127 
128 	if (!(vma->vm_flags & mask)) {
129 		si_code = SEGV_ACCERR;
130 		goto bad_area;
131 	}
132 
133 	fault = handle_mm_fault(vma, address, flags);
134 
135 	/* Quick path to respond to signals */
136 	if (fault_signal_pending(fault, regs)) {
137 		if (!user_mode(regs))
138 			goto no_context;
139 		return;
140 	}
141 
142 	/*
143 	 * Fault retry nuances, mmap_lock already relinquished by core mm
144 	 */
145 	if (unlikely((fault & VM_FAULT_RETRY) &&
146 		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
147 		flags |= FAULT_FLAG_TRIED;
148 		goto retry;
149 	}
150 
151 bad_area:
152 	mmap_read_unlock(mm);
153 
154 	/*
155 	 * Major/minor page fault accounting
156 	 * (in case of retry we only land here once)
157 	 */
158 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
159 
160 	if (likely(!(fault & VM_FAULT_ERROR))) {
161 		if (fault & VM_FAULT_MAJOR) {
162 			tsk->maj_flt++;
163 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
164 				      regs, address);
165 		} else {
166 			tsk->min_flt++;
167 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
168 				      regs, address);
169 		}
170 
171 		/* Normal return path: fault Handled Gracefully */
172 		return;
173 	}
174 
175 	if (!user_mode(regs))
176 		goto no_context;
177 
178 	if (fault & VM_FAULT_OOM) {
179 		pagefault_out_of_memory();
180 		return;
181 	}
182 
183 	if (fault & VM_FAULT_SIGBUS) {
184 		sig = SIGBUS;
185 		si_code = BUS_ADRERR;
186 	}
187 	else {
188 		sig = SIGSEGV;
189 	}
190 
191 	tsk->thread.fault_address = address;
192 	force_sig_fault(sig, si_code, (void __user *)address);
193 	return;
194 
195 no_context:
196 	if (fixup_exception(regs))
197 		return;
198 
199 	die("Oops", regs, address);
200 }
201