xref: /openbmc/linux/arch/arc/mm/fault.c (revision 7883017b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Page Fault Handling for ARC (TLB Miss / ProtV)
3  *
4  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5  */
6 
7 #include <linux/signal.h>
8 #include <linux/interrupt.h>
9 #include <linux/sched/signal.h>
10 #include <linux/errno.h>
11 #include <linux/ptrace.h>
12 #include <linux/uaccess.h>
13 #include <linux/kdebug.h>
14 #include <linux/perf_event.h>
15 #include <linux/mm_types.h>
16 #include <asm/mmu.h>
17 
18 /*
19  * kernel virtual address is required to implement vmalloc/pkmap/fixmap
20  * Refer to asm/processor.h for System Memory Map
21  *
22  * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
23  * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
24  */
25 noinline static int handle_kernel_vaddr_fault(unsigned long address)
26 {
27 	/*
28 	 * Synchronize this task's top level page-table
29 	 * with the 'reference' page table.
30 	 */
31 	pgd_t *pgd, *pgd_k;
32 	p4d_t *p4d, *p4d_k;
33 	pud_t *pud, *pud_k;
34 	pmd_t *pmd, *pmd_k;
35 
36 	pgd = pgd_offset(current->active_mm, address);
37 	pgd_k = pgd_offset_k(address);
38 
39 	if (pgd_none (*pgd_k))
40 		goto bad_area;
41 	if (!pgd_present(*pgd))
42 		set_pgd(pgd, *pgd_k);
43 
44 	p4d = p4d_offset(pgd, address);
45 	p4d_k = p4d_offset(pgd_k, address);
46 	if (p4d_none(*p4d_k))
47 		goto bad_area;
48 	if (!p4d_present(*p4d))
49 		set_p4d(p4d, *p4d_k);
50 
51 	pud = pud_offset(p4d, address);
52 	pud_k = pud_offset(p4d_k, address);
53 	if (pud_none(*pud_k))
54 		goto bad_area;
55 	if (!pud_present(*pud))
56 		set_pud(pud, *pud_k);
57 
58 	pmd = pmd_offset(pud, address);
59 	pmd_k = pmd_offset(pud_k, address);
60 	if (pmd_none(*pmd_k))
61 		goto bad_area;
62 	if (!pmd_present(*pmd))
63 		set_pmd(pmd, *pmd_k);
64 
65 	/* XXX: create the TLB entry here */
66 	return 0;
67 
68 bad_area:
69 	return 1;
70 }
71 
72 void do_page_fault(unsigned long address, struct pt_regs *regs)
73 {
74 	struct vm_area_struct *vma = NULL;
75 	struct task_struct *tsk = current;
76 	struct mm_struct *mm = tsk->mm;
77 	int sig, si_code = SEGV_MAPERR;
78 	unsigned int write = 0, exec = 0, mask;
79 	vm_fault_t fault = VM_FAULT_SIGSEGV;	/* handle_mm_fault() output */
80 	unsigned int flags;			/* handle_mm_fault() input */
81 
82 	/*
83 	 * NOTE! We MUST NOT take any locks for this case. We may
84 	 * be in an interrupt or a critical region, and should
85 	 * only copy the information from the master page table,
86 	 * nothing more.
87 	 */
88 	if (address >= VMALLOC_START && !user_mode(regs)) {
89 		if (unlikely(handle_kernel_vaddr_fault(address)))
90 			goto no_context;
91 		else
92 			return;
93 	}
94 
95 	/*
96 	 * If we're in an interrupt or have no user
97 	 * context, we must not take the fault..
98 	 */
99 	if (faulthandler_disabled() || !mm)
100 		goto no_context;
101 
102 	if (regs->ecr_cause & ECR_C_PROTV_STORE)	/* ST/EX */
103 		write = 1;
104 	else if ((regs->ecr_vec == ECR_V_PROTV) &&
105 	         (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
106 		exec = 1;
107 
108 	flags = FAULT_FLAG_DEFAULT;
109 	if (user_mode(regs))
110 		flags |= FAULT_FLAG_USER;
111 	if (write)
112 		flags |= FAULT_FLAG_WRITE;
113 
114 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
115 retry:
116 	mmap_read_lock(mm);
117 
118 	vma = find_vma(mm, address);
119 	if (!vma)
120 		goto bad_area;
121 	if (unlikely(address < vma->vm_start)) {
122 		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
123 			goto bad_area;
124 	}
125 
126 	/*
127 	 * vm_area is good, now check permissions for this memory access
128 	 */
129 	mask = VM_READ;
130 	if (write)
131 		mask = VM_WRITE;
132 	if (exec)
133 		mask = VM_EXEC;
134 
135 	if (!(vma->vm_flags & mask)) {
136 		si_code = SEGV_ACCERR;
137 		goto bad_area;
138 	}
139 
140 	fault = handle_mm_fault(vma, address, flags, regs);
141 
142 	/* Quick path to respond to signals */
143 	if (fault_signal_pending(fault, regs)) {
144 		if (!user_mode(regs))
145 			goto no_context;
146 		return;
147 	}
148 
149 	/* The fault is fully completed (including releasing mmap lock) */
150 	if (fault & VM_FAULT_COMPLETED)
151 		return;
152 
153 	/*
154 	 * Fault retry nuances, mmap_lock already relinquished by core mm
155 	 */
156 	if (unlikely(fault & VM_FAULT_RETRY)) {
157 		flags |= FAULT_FLAG_TRIED;
158 		goto retry;
159 	}
160 
161 bad_area:
162 	mmap_read_unlock(mm);
163 
164 	/*
165 	 * Major/minor page fault accounting
166 	 * (in case of retry we only land here once)
167 	 */
168 	if (likely(!(fault & VM_FAULT_ERROR)))
169 		/* Normal return path: fault Handled Gracefully */
170 		return;
171 
172 	if (!user_mode(regs))
173 		goto no_context;
174 
175 	if (fault & VM_FAULT_OOM) {
176 		pagefault_out_of_memory();
177 		return;
178 	}
179 
180 	if (fault & VM_FAULT_SIGBUS) {
181 		sig = SIGBUS;
182 		si_code = BUS_ADRERR;
183 	}
184 	else {
185 		sig = SIGSEGV;
186 	}
187 
188 	tsk->thread.fault_address = address;
189 	force_sig_fault(sig, si_code, (void __user *)address);
190 	return;
191 
192 no_context:
193 	if (fixup_exception(regs))
194 		return;
195 
196 	die("Oops", regs, address);
197 }
198