xref: /openbmc/qemu/target/arm/tcg/hflags.c (revision 25657fc6)
1 /*
2  * ARM hflags
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "exec/helper-proto.h"
12 #include "cpregs.h"
13 
14 static inline bool fgt_svc(CPUARMState *env, int el)
15 {
16     /*
17      * Assuming fine-grained-traps are active, return true if we
18      * should be trapping on SVC instructions. Only AArch64 can
19      * trap on an SVC at EL1, but we don't need to special-case this
20      * because if this is AArch32 EL1 then arm_fgt_active() is false.
21      * We also know el is 0 or 1.
22      */
23     return el == 0 ?
24         FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
25         FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
26 }
27 
28 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
29                                            ARMMMUIdx mmu_idx,
30                                            CPUARMTBFlags flags)
31 {
32     DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
33     DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
34 
35     if (arm_singlestep_active(env)) {
36         DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
37     }
38 
39     return flags;
40 }
41 
42 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
43                                               ARMMMUIdx mmu_idx,
44                                               CPUARMTBFlags flags)
45 {
46     bool sctlr_b = arm_sctlr_b(env);
47 
48     if (sctlr_b) {
49         DP_TBFLAG_A32(flags, SCTLR__B, 1);
50     }
51     if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
52         DP_TBFLAG_ANY(flags, BE_DATA, 1);
53     }
54     DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
55 
56     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
57 }
58 
59 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
60                                         ARMMMUIdx mmu_idx)
61 {
62     CPUARMTBFlags flags = {};
63     uint32_t ccr = env->v7m.ccr[env->v7m.secure];
64 
65     /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
66     if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
67         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
68     }
69 
70     if (arm_v7m_is_handler_mode(env)) {
71         DP_TBFLAG_M32(flags, HANDLER, 1);
72     }
73 
74     /*
75      * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
76      * is suppressing them because the requested execution priority
77      * is less than 0.
78      */
79     if (arm_feature(env, ARM_FEATURE_V8) &&
80         !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
81           (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
82         DP_TBFLAG_M32(flags, STACKCHECK, 1);
83     }
84 
85     if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
86         DP_TBFLAG_M32(flags, SECURE, 1);
87     }
88 
89     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
90 }
91 
92 /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
93 static bool sme_fa64(CPUARMState *env, int el)
94 {
95     if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
96         return false;
97     }
98 
99     if (el <= 1 && !el_is_in_host(env, el)) {
100         if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
101             return false;
102         }
103     }
104     if (el <= 2 && arm_is_el2_enabled(env)) {
105         if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
106             return false;
107         }
108     }
109     if (arm_feature(env, ARM_FEATURE_EL3)) {
110         if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
111             return false;
112         }
113     }
114 
115     return true;
116 }
117 
118 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
119                                         ARMMMUIdx mmu_idx)
120 {
121     CPUARMTBFlags flags = {};
122     int el = arm_current_el(env);
123 
124     if (arm_sctlr(env, el) & SCTLR_A) {
125         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
126     }
127 
128     if (arm_el_is_aa64(env, 1)) {
129         DP_TBFLAG_A32(flags, VFPEN, 1);
130     }
131 
132     if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
133         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
134         DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
135     }
136 
137     if (arm_fgt_active(env, el)) {
138         DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
139         if (fgt_svc(env, el)) {
140             DP_TBFLAG_ANY(flags, FGT_SVC, 1);
141         }
142     }
143 
144     if (env->uncached_cpsr & CPSR_IL) {
145         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
146     }
147 
148     /*
149      * The SME exception we are testing for is raised via
150      * AArch64.CheckFPAdvSIMDEnabled(), as called from
151      * AArch32.CheckAdvSIMDOrFPEnabled().
152      */
153     if (el == 0
154         && FIELD_EX64(env->svcr, SVCR, SM)
155         && (!arm_is_el2_enabled(env)
156             || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
157         && arm_el_is_aa64(env, 1)
158         && !sme_fa64(env, el)) {
159         DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
160     }
161 
162     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
163 }
164 
165 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
166                                         ARMMMUIdx mmu_idx)
167 {
168     CPUARMTBFlags flags = {};
169     ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
170     uint64_t tcr = regime_tcr(env, mmu_idx);
171     uint64_t sctlr;
172     int tbii, tbid;
173 
174     DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
175 
176     /* Get control bits for tagged addresses.  */
177     tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
178     tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
179 
180     DP_TBFLAG_A64(flags, TBII, tbii);
181     DP_TBFLAG_A64(flags, TBID, tbid);
182 
183     if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
184         int sve_el = sve_exception_el(env, el);
185 
186         /*
187          * If either FP or SVE are disabled, translator does not need len.
188          * If SVE EL > FP EL, FP exception has precedence, and translator
189          * does not need SVE EL.  Save potential re-translations by forcing
190          * the unneeded data to zero.
191          */
192         if (fp_el != 0) {
193             if (sve_el > fp_el) {
194                 sve_el = 0;
195             }
196         } else if (sve_el == 0) {
197             DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
198         }
199         DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
200     }
201     if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
202         int sme_el = sme_exception_el(env, el);
203         bool sm = FIELD_EX64(env->svcr, SVCR, SM);
204 
205         DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
206         if (sme_el == 0) {
207             /* Similarly, do not compute SVL if SME is disabled. */
208             int svl = sve_vqm1_for_el_sm(env, el, true);
209             DP_TBFLAG_A64(flags, SVL, svl);
210             if (sm) {
211                 /* If SVE is disabled, we will not have set VL above. */
212                 DP_TBFLAG_A64(flags, VL, svl);
213             }
214         }
215         if (sm) {
216             DP_TBFLAG_A64(flags, PSTATE_SM, 1);
217             DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
218         }
219         DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
220     }
221 
222     sctlr = regime_sctlr(env, stage1);
223 
224     if (sctlr & SCTLR_A) {
225         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
226     }
227 
228     if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
229         DP_TBFLAG_ANY(flags, BE_DATA, 1);
230     }
231 
232     if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
233         /*
234          * In order to save space in flags, we record only whether
235          * pauth is "inactive", meaning all insns are implemented as
236          * a nop, or "active" when some action must be performed.
237          * The decision of which action to take is left to a helper.
238          */
239         if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
240             DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
241         }
242     }
243 
244     if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
245         /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
246         if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
247             DP_TBFLAG_A64(flags, BT, 1);
248         }
249     }
250 
251     /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
252     if (!(env->pstate & PSTATE_UAO)) {
253         switch (mmu_idx) {
254         case ARMMMUIdx_E10_1:
255         case ARMMMUIdx_E10_1_PAN:
256             /* TODO: ARMv8.3-NV */
257             DP_TBFLAG_A64(flags, UNPRIV, 1);
258             break;
259         case ARMMMUIdx_E20_2:
260         case ARMMMUIdx_E20_2_PAN:
261             /*
262              * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
263              * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
264              */
265             if (env->cp15.hcr_el2 & HCR_TGE) {
266                 DP_TBFLAG_A64(flags, UNPRIV, 1);
267             }
268             break;
269         default:
270             break;
271         }
272     }
273 
274     if (env->pstate & PSTATE_IL) {
275         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
276     }
277 
278     if (arm_fgt_active(env, el)) {
279         DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
280         if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
281             DP_TBFLAG_A64(flags, FGT_ERET, 1);
282         }
283         if (fgt_svc(env, el)) {
284             DP_TBFLAG_ANY(flags, FGT_SVC, 1);
285         }
286     }
287 
288     if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
289         /*
290          * Set MTE_ACTIVE if any access may be Checked, and leave clear
291          * if all accesses must be Unchecked:
292          * 1) If no TBI, then there are no tags in the address to check,
293          * 2) If Tag Check Override, then all accesses are Unchecked,
294          * 3) If Tag Check Fail == 0, then Checked access have no effect,
295          * 4) If no Allocation Tag Access, then all accesses are Unchecked.
296          */
297         if (allocation_tag_access_enabled(env, el, sctlr)) {
298             DP_TBFLAG_A64(flags, ATA, 1);
299             if (tbid
300                 && !(env->pstate & PSTATE_TCO)
301                 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
302                 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
303             }
304         }
305         /* And again for unprivileged accesses, if required.  */
306         if (EX_TBFLAG_A64(flags, UNPRIV)
307             && tbid
308             && !(env->pstate & PSTATE_TCO)
309             && (sctlr & SCTLR_TCF0)
310             && allocation_tag_access_enabled(env, 0, sctlr)) {
311             DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
312         }
313         /* Cache TCMA as well as TBI. */
314         DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
315     }
316 
317     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
318 }
319 
320 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
321 {
322     int el = arm_current_el(env);
323     int fp_el = fp_exception_el(env, el);
324     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
325 
326     if (is_a64(env)) {
327         return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
328     } else if (arm_feature(env, ARM_FEATURE_M)) {
329         return rebuild_hflags_m32(env, fp_el, mmu_idx);
330     } else {
331         return rebuild_hflags_a32(env, fp_el, mmu_idx);
332     }
333 }
334 
335 void arm_rebuild_hflags(CPUARMState *env)
336 {
337     env->hflags = rebuild_hflags_internal(env);
338 }
339 
340 /*
341  * If we have triggered a EL state change we can't rely on the
342  * translator having passed it to us, we need to recompute.
343  */
344 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
345 {
346     int el = arm_current_el(env);
347     int fp_el = fp_exception_el(env, el);
348     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
349 
350     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
351 }
352 
353 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
354 {
355     int fp_el = fp_exception_el(env, el);
356     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
357 
358     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
359 }
360 
361 /*
362  * If we have triggered a EL state change we can't rely on the
363  * translator having passed it to us, we need to recompute.
364  */
365 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
366 {
367     int el = arm_current_el(env);
368     int fp_el = fp_exception_el(env, el);
369     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
370     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
371 }
372 
373 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
374 {
375     int fp_el = fp_exception_el(env, el);
376     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
377 
378     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
379 }
380 
381 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
382 {
383     int fp_el = fp_exception_el(env, el);
384     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
385 
386     env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
387 }
388 
389 void assert_hflags_rebuild_correctly(CPUARMState *env)
390 {
391 #ifdef CONFIG_DEBUG_TCG
392     CPUARMTBFlags c = env->hflags;
393     CPUARMTBFlags r = rebuild_hflags_internal(env);
394 
395     if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
396         fprintf(stderr, "TCG hflags mismatch "
397                         "(current:(0x%08x,0x" TARGET_FMT_lx ")"
398                         " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
399                 c.flags, c.flags2, r.flags, r.flags2);
400         abort();
401     }
402 #endif
403 }
404