1 /*
2 * ARM TLB (Translation lookaside buffer) helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 #include "exec/exec-all.h"
13 #include "exec/helper-proto.h"
14
15
16 /*
17 * Returns true if the stage 1 translation regime is using LPAE format page
18 * tables. Used when raising alignment exceptions, whose FSR changes depending
19 * on whether the long or short descriptor format is in use.
20 */
arm_s1_regime_using_lpae_format(CPUARMState * env,ARMMMUIdx mmu_idx)21 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
22 {
23 mmu_idx = stage_1_mmu_idx(mmu_idx);
24 return regime_using_lpae_format(env, mmu_idx);
25 }
26
merge_syn_data_abort(uint32_t template_syn,ARMMMUFaultInfo * fi,unsigned int target_el,bool same_el,bool is_write,int fsc)27 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
28 ARMMMUFaultInfo *fi,
29 unsigned int target_el,
30 bool same_el, bool is_write,
31 int fsc)
32 {
33 uint32_t syn;
34
35 /*
36 * ISV is only set for stage-2 data aborts routed to EL2 and
37 * never for stage-1 page table walks faulting on stage 2
38 * or for stage-1 faults.
39 *
40 * Furthermore, ISV is only set for certain kinds of load/stores.
41 * If the template syndrome does not have ISV set, we should leave
42 * it cleared.
43 *
44 * See ARMv8 specs, D7-1974:
45 * ISS encoding for an exception from a Data Abort, the
46 * ISV field.
47 *
48 * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation,
49 * Access Flag, and Permission faults caused by LD64B, ST64B,
50 * ST64BV, or ST64BV0 insns report syndrome info even for stage-1
51 * faults and regardless of the target EL.
52 */
53 if (template_syn & ARM_EL_VNCR) {
54 /*
55 * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case:
56 * they are always reported as "same EL", even though we are going
57 * from EL1 to EL2.
58 */
59 assert(!fi->stage2);
60 syn = syn_data_abort_vncr(fi->ea, is_write, fsc);
61 } else if (!(template_syn & ARM_EL_ISV) || target_el != 2
62 || fi->s1ptw || !fi->stage2) {
63 syn = syn_data_abort_no_iss(same_el, 0,
64 fi->ea, 0, fi->s1ptw, is_write, fsc);
65 } else {
66 /*
67 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
68 * syndrome created at translation time.
69 * Now we create the runtime syndrome with the remaining fields.
70 */
71 syn = syn_data_abort_with_iss(same_el,
72 0, 0, 0, 0, 0,
73 fi->ea, 0, fi->s1ptw, is_write, fsc,
74 true);
75 /* Merge the runtime syndrome with the template syndrome. */
76 syn |= template_syn;
77 }
78 return syn;
79 }
80
compute_fsr_fsc(CPUARMState * env,ARMMMUFaultInfo * fi,int target_el,int mmu_idx,uint32_t * ret_fsc)81 static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
82 int target_el, int mmu_idx, uint32_t *ret_fsc)
83 {
84 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
85 uint32_t fsr, fsc;
86
87 /*
88 * For M-profile there is no guest-facing FSR. We compute a
89 * short-form value for env->exception.fsr which we will then
90 * examine in arm_v7m_cpu_do_interrupt(). In theory we could
91 * use the LPAE format instead as long as both bits of code agree
92 * (and arm_fi_to_lfsc() handled the M-profile specific
93 * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases).
94 */
95 if (!arm_feature(env, ARM_FEATURE_M) &&
96 (target_el == 2 || arm_el_is_aa64(env, target_el) ||
97 arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) {
98 /*
99 * LPAE format fault status register : bottom 6 bits are
100 * status code in the same form as needed for syndrome
101 */
102 fsr = arm_fi_to_lfsc(fi);
103 fsc = extract32(fsr, 0, 6);
104 } else {
105 fsr = arm_fi_to_sfsc(fi);
106 /*
107 * Short format FSR : this fault will never actually be reported
108 * to an EL that uses a syndrome register. Use a (currently)
109 * reserved FSR code in case the constructed syndrome does leak
110 * into the guest somehow.
111 */
112 fsc = 0x3f;
113 }
114
115 *ret_fsc = fsc;
116 return fsr;
117 }
118
report_as_gpc_exception(ARMCPU * cpu,int current_el,ARMMMUFaultInfo * fi)119 static bool report_as_gpc_exception(ARMCPU *cpu, int current_el,
120 ARMMMUFaultInfo *fi)
121 {
122 bool ret;
123
124 switch (fi->gpcf) {
125 case GPCF_None:
126 return false;
127 case GPCF_AddressSize:
128 case GPCF_Walk:
129 case GPCF_EABT:
130 /* R_PYTGX: GPT faults are reported as GPC. */
131 ret = true;
132 break;
133 case GPCF_Fail:
134 /*
135 * R_BLYPM: A GPF at EL3 is reported as insn or data abort.
136 * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
137 * if SCR_EL3.GPF is set, otherwise an insn or data abort.
138 */
139 ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3;
140 break;
141 default:
142 g_assert_not_reached();
143 }
144
145 assert(cpu_isar_feature(aa64_rme, cpu));
146 assert(fi->type == ARMFault_GPCFOnWalk ||
147 fi->type == ARMFault_GPCFOnOutput);
148 if (fi->gpcf == GPCF_AddressSize) {
149 assert(fi->level == 0);
150 } else {
151 assert(fi->level >= 0 && fi->level <= 1);
152 }
153
154 return ret;
155 }
156
encode_gpcsc(ARMMMUFaultInfo * fi)157 static unsigned encode_gpcsc(ARMMMUFaultInfo *fi)
158 {
159 static uint8_t const gpcsc[] = {
160 [GPCF_AddressSize] = 0b000000,
161 [GPCF_Walk] = 0b000100,
162 [GPCF_Fail] = 0b001100,
163 [GPCF_EABT] = 0b010100,
164 };
165
166 /* Note that we've validated fi->gpcf and fi->level above. */
167 return gpcsc[fi->gpcf] | fi->level;
168 }
169
170 static G_NORETURN
arm_deliver_fault(ARMCPU * cpu,vaddr addr,MMUAccessType access_type,int mmu_idx,ARMMMUFaultInfo * fi)171 void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
172 MMUAccessType access_type,
173 int mmu_idx, ARMMMUFaultInfo *fi)
174 {
175 CPUARMState *env = &cpu->env;
176 int target_el = exception_target_el(env);
177 int current_el = arm_current_el(env);
178 bool same_el;
179 uint32_t syn, exc, fsr, fsc;
180 /*
181 * We know this must be a data or insn abort, and that
182 * env->exception.syndrome contains the template syndrome set
183 * up at translate time. So we can check only the VNCR bit
184 * (and indeed syndrome does not have the EC field in it,
185 * because we masked that out in disas_set_insn_syndrome())
186 */
187 bool is_vncr = (access_type != MMU_INST_FETCH) &&
188 (env->exception.syndrome & ARM_EL_VNCR);
189
190 if (is_vncr) {
191 /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */
192 target_el = 2;
193 }
194
195 if (report_as_gpc_exception(cpu, current_el, fi)) {
196 target_el = 3;
197
198 fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
199
200 syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk,
201 access_type == MMU_INST_FETCH,
202 encode_gpcsc(fi), is_vncr,
203 0, fi->s1ptw,
204 access_type == MMU_DATA_STORE, fsc);
205
206 env->cp15.mfar_el3 = fi->paddr;
207 switch (fi->paddr_space) {
208 case ARMSS_Secure:
209 break;
210 case ARMSS_NonSecure:
211 env->cp15.mfar_el3 |= R_MFAR_NS_MASK;
212 break;
213 case ARMSS_Root:
214 env->cp15.mfar_el3 |= R_MFAR_NSE_MASK;
215 break;
216 case ARMSS_Realm:
217 env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK;
218 break;
219 default:
220 g_assert_not_reached();
221 }
222
223 exc = EXCP_GPC;
224 goto do_raise;
225 }
226
227 /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
228 if (fi->gpcf == GPCF_Fail && target_el < 2) {
229 if (arm_hcr_el2_eff(env) & HCR_GPF) {
230 target_el = 2;
231 }
232 }
233
234 if (fi->stage2) {
235 target_el = 2;
236 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
237 if (arm_is_secure_below_el3(env) && fi->s1ns) {
238 env->cp15.hpfar_el2 |= HPFAR_NS;
239 }
240 }
241
242 same_el = current_el == target_el;
243 fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
244
245 if (access_type == MMU_INST_FETCH) {
246 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
247 exc = EXCP_PREFETCH_ABORT;
248 } else {
249 syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el,
250 same_el, access_type == MMU_DATA_STORE,
251 fsc);
252 if (access_type == MMU_DATA_STORE
253 && arm_feature(env, ARM_FEATURE_V6)) {
254 fsr |= (1 << 11);
255 }
256 exc = EXCP_DATA_ABORT;
257 }
258
259 do_raise:
260 env->exception.vaddress = addr;
261 env->exception.fsr = fsr;
262 raise_exception(env, exc, syn, target_el);
263 }
264
265 /* Raise a data fault alignment exception for the specified virtual address */
arm_cpu_do_unaligned_access(CPUState * cs,vaddr vaddr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)266 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
267 MMUAccessType access_type,
268 int mmu_idx, uintptr_t retaddr)
269 {
270 ARMCPU *cpu = ARM_CPU(cs);
271 ARMMMUFaultInfo fi = {};
272
273 /* now we have a real cpu fault */
274 cpu_restore_state(cs, retaddr);
275
276 fi.type = ARMFault_Alignment;
277 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
278 }
279
helper_exception_pc_alignment(CPUARMState * env,target_ulong pc)280 void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
281 {
282 ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
283 int target_el = exception_target_el(env);
284 int mmu_idx = arm_env_mmu_index(env);
285 uint32_t fsc;
286
287 env->exception.vaddress = pc;
288
289 /*
290 * Note that the fsc is not applicable to this exception,
291 * since any syndrome is pcalignment not insn_abort.
292 */
293 env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc);
294 raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
295 }
296
297 #if !defined(CONFIG_USER_ONLY)
298
299 /*
300 * arm_cpu_do_transaction_failed: handle a memory system error response
301 * (eg "no device/memory present at address") by raising an external abort
302 * exception
303 */
arm_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)304 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
305 vaddr addr, unsigned size,
306 MMUAccessType access_type,
307 int mmu_idx, MemTxAttrs attrs,
308 MemTxResult response, uintptr_t retaddr)
309 {
310 ARMCPU *cpu = ARM_CPU(cs);
311 ARMMMUFaultInfo fi = {};
312
313 /* now we have a real cpu fault */
314 cpu_restore_state(cs, retaddr);
315
316 fi.ea = arm_extabort_type(response);
317 fi.type = ARMFault_SyncExternal;
318 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
319 }
320
arm_cpu_tlb_fill_align(CPUState * cs,CPUTLBEntryFull * out,vaddr address,MMUAccessType access_type,int mmu_idx,MemOp memop,int size,bool probe,uintptr_t ra)321 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address,
322 MMUAccessType access_type, int mmu_idx,
323 MemOp memop, int size, bool probe, uintptr_t ra)
324 {
325 ARMCPU *cpu = ARM_CPU(cs);
326 GetPhysAddrResult res = {};
327 ARMMMUFaultInfo local_fi, *fi;
328
329 /*
330 * Allow S1_ptw_translate to see any fault generated here.
331 * Since this may recurse, read and clear.
332 */
333 fi = cpu->env.tlb_fi;
334 if (fi) {
335 cpu->env.tlb_fi = NULL;
336 } else {
337 fi = memset(&local_fi, 0, sizeof(local_fi));
338 }
339
340 /*
341 * Per R_XCHFJ, alignment fault not due to memory type has
342 * highest precedence. Otherwise, walk the page table and
343 * and collect the page description.
344 */
345 if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
346 fi->type = ARMFault_Alignment;
347 } else if (!get_phys_addr(&cpu->env, address, access_type, memop,
348 core_to_arm_mmu_idx(&cpu->env, mmu_idx),
349 &res, fi)) {
350 res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
351 res.f.extra.arm.shareability = res.cacheattrs.shareability;
352 *out = res.f;
353 return true;
354 }
355 if (probe) {
356 return false;
357 }
358
359 /* Now we have a real cpu fault. */
360 cpu_restore_state(cs, ra);
361 arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
362 }
363 #else
arm_cpu_record_sigsegv(CPUState * cs,vaddr addr,MMUAccessType access_type,bool maperr,uintptr_t ra)364 void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
365 MMUAccessType access_type,
366 bool maperr, uintptr_t ra)
367 {
368 ARMMMUFaultInfo fi = {
369 .type = maperr ? ARMFault_Translation : ARMFault_Permission,
370 .level = 3,
371 };
372 ARMCPU *cpu = ARM_CPU(cs);
373
374 /*
375 * We report both ESR and FAR to signal handlers.
376 * For now, it's easiest to deliver the fault normally.
377 */
378 cpu_restore_state(cs, ra);
379 arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
380 }
381
arm_cpu_record_sigbus(CPUState * cs,vaddr addr,MMUAccessType access_type,uintptr_t ra)382 void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
383 MMUAccessType access_type, uintptr_t ra)
384 {
385 arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
386 }
387 #endif /* !defined(CONFIG_USER_ONLY) */
388