xref: /openbmc/qemu/target/arm/tcg/hflags.c (revision 4c7c0d24)
1 /*
2  * ARM hflags
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 #include "exec/helper-proto.h"
13 #include "cpregs.h"
14 
15 static inline bool fgt_svc(CPUARMState *env, int el)
16 {
17     /*
18      * Assuming fine-grained-traps are active, return true if we
19      * should be trapping on SVC instructions. Only AArch64 can
20      * trap on an SVC at EL1, but we don't need to special-case this
21      * because if this is AArch32 EL1 then arm_fgt_active() is false.
22      * We also know el is 0 or 1.
23      */
24     return el == 0 ?
25         FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
26         FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
27 }
28 
29 /* Return true if memory alignment should be enforced. */
30 static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr)
31 {
32 #ifdef CONFIG_USER_ONLY
33     return false;
34 #else
35     /* Check the alignment enable bit. */
36     if (sctlr & SCTLR_A) {
37         return true;
38     }
39 
40     /*
41      * With PMSA, when the MPU is disabled, all memory types in the
42      * default map are Normal, so don't need aligment enforcing.
43      */
44     if (arm_feature(env, ARM_FEATURE_PMSA)) {
45         return false;
46     }
47 
48     /*
49      * With VMSA, if translation is disabled, then the default memory type
50      * is Device(-nGnRnE) instead of Normal, which requires that alignment
51      * be enforced.  Since this affects all ram, it is most efficient
52      * to handle this during translation.
53      */
54     if (sctlr & SCTLR_M) {
55         /* Translation enabled: memory type in PTE via MAIR_ELx. */
56         return false;
57     }
58     if (el < 2 && (arm_hcr_el2_eff(env) & (HCR_DC | HCR_VM))) {
59         /* Stage 2 translation enabled: memory type in PTE. */
60         return false;
61     }
62     return true;
63 #endif
64 }
65 
66 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
67                                            ARMMMUIdx mmu_idx,
68                                            CPUARMTBFlags flags)
69 {
70     DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
71     DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
72 
73     if (arm_singlestep_active(env)) {
74         DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
75     }
76 
77     return flags;
78 }
79 
80 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
81                                               ARMMMUIdx mmu_idx,
82                                               CPUARMTBFlags flags)
83 {
84     bool sctlr_b = arm_sctlr_b(env);
85 
86     if (sctlr_b) {
87         DP_TBFLAG_A32(flags, SCTLR__B, 1);
88     }
89     if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
90         DP_TBFLAG_ANY(flags, BE_DATA, 1);
91     }
92     DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
93 
94     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
95 }
96 
97 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
98                                         ARMMMUIdx mmu_idx)
99 {
100     CPUARMTBFlags flags = {};
101     uint32_t ccr = env->v7m.ccr[env->v7m.secure];
102 
103     /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
104     if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
105         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
106     }
107 
108     if (arm_v7m_is_handler_mode(env)) {
109         DP_TBFLAG_M32(flags, HANDLER, 1);
110     }
111 
112     /*
113      * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
114      * is suppressing them because the requested execution priority
115      * is less than 0.
116      */
117     if (arm_feature(env, ARM_FEATURE_V8) &&
118         !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
119           (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
120         DP_TBFLAG_M32(flags, STACKCHECK, 1);
121     }
122 
123     if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
124         DP_TBFLAG_M32(flags, SECURE, 1);
125     }
126 
127     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
128 }
129 
130 /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
131 static bool sme_fa64(CPUARMState *env, int el)
132 {
133     if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
134         return false;
135     }
136 
137     if (el <= 1 && !el_is_in_host(env, el)) {
138         if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
139             return false;
140         }
141     }
142     if (el <= 2 && arm_is_el2_enabled(env)) {
143         if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
144             return false;
145         }
146     }
147     if (arm_feature(env, ARM_FEATURE_EL3)) {
148         if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
149             return false;
150         }
151     }
152 
153     return true;
154 }
155 
156 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
157                                         ARMMMUIdx mmu_idx)
158 {
159     CPUARMTBFlags flags = {};
160     int el = arm_current_el(env);
161     uint64_t sctlr = arm_sctlr(env, el);
162 
163     if (aprofile_require_alignment(env, el, sctlr)) {
164         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
165     }
166 
167     if (arm_el_is_aa64(env, 1)) {
168         DP_TBFLAG_A32(flags, VFPEN, 1);
169     }
170 
171     if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
172         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
173         DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
174     }
175 
176     if (arm_fgt_active(env, el)) {
177         DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
178         if (fgt_svc(env, el)) {
179             DP_TBFLAG_ANY(flags, FGT_SVC, 1);
180         }
181     }
182 
183     if (env->uncached_cpsr & CPSR_IL) {
184         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
185     }
186 
187     /*
188      * The SME exception we are testing for is raised via
189      * AArch64.CheckFPAdvSIMDEnabled(), as called from
190      * AArch32.CheckAdvSIMDOrFPEnabled().
191      */
192     if (el == 0
193         && FIELD_EX64(env->svcr, SVCR, SM)
194         && (!arm_is_el2_enabled(env)
195             || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
196         && arm_el_is_aa64(env, 1)
197         && !sme_fa64(env, el)) {
198         DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
199     }
200 
201     if (arm_aa32_secure_pl1_0(env)) {
202         DP_TBFLAG_A32(flags, S_PL1_0, 1);
203     }
204 
205     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
206 }
207 
208 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
209                                         ARMMMUIdx mmu_idx)
210 {
211     CPUARMTBFlags flags = {};
212     ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
213     uint64_t tcr = regime_tcr(env, mmu_idx);
214     uint64_t hcr = arm_hcr_el2_eff(env);
215     uint64_t sctlr;
216     int tbii, tbid;
217 
218     DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
219 
220     /* Get control bits for tagged addresses.  */
221     tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
222     tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
223 
224     DP_TBFLAG_A64(flags, TBII, tbii);
225     DP_TBFLAG_A64(flags, TBID, tbid);
226 
227     if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
228         int sve_el = sve_exception_el(env, el);
229 
230         /*
231          * If either FP or SVE are disabled, translator does not need len.
232          * If SVE EL > FP EL, FP exception has precedence, and translator
233          * does not need SVE EL.  Save potential re-translations by forcing
234          * the unneeded data to zero.
235          */
236         if (fp_el != 0) {
237             if (sve_el > fp_el) {
238                 sve_el = 0;
239             }
240         } else if (sve_el == 0) {
241             DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
242         }
243         DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
244     }
245     if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
246         int sme_el = sme_exception_el(env, el);
247         bool sm = FIELD_EX64(env->svcr, SVCR, SM);
248 
249         DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
250         if (sme_el == 0) {
251             /* Similarly, do not compute SVL if SME is disabled. */
252             int svl = sve_vqm1_for_el_sm(env, el, true);
253             DP_TBFLAG_A64(flags, SVL, svl);
254             if (sm) {
255                 /* If SVE is disabled, we will not have set VL above. */
256                 DP_TBFLAG_A64(flags, VL, svl);
257             }
258         }
259         if (sm) {
260             DP_TBFLAG_A64(flags, PSTATE_SM, 1);
261             DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
262         }
263         DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
264     }
265 
266     sctlr = regime_sctlr(env, stage1);
267 
268     if (aprofile_require_alignment(env, el, sctlr)) {
269         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
270     }
271 
272     if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
273         DP_TBFLAG_ANY(flags, BE_DATA, 1);
274     }
275 
276     if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
277         /*
278          * In order to save space in flags, we record only whether
279          * pauth is "inactive", meaning all insns are implemented as
280          * a nop, or "active" when some action must be performed.
281          * The decision of which action to take is left to a helper.
282          */
283         if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
284             DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
285         }
286     }
287 
288     if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
289         /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
290         if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
291             DP_TBFLAG_A64(flags, BT, 1);
292         }
293     }
294 
295     if (cpu_isar_feature(aa64_lse2, env_archcpu(env))) {
296         if (sctlr & SCTLR_nAA) {
297             DP_TBFLAG_A64(flags, NAA, 1);
298         }
299     }
300 
301     /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
302     if (!(env->pstate & PSTATE_UAO)) {
303         switch (mmu_idx) {
304         case ARMMMUIdx_E10_1:
305         case ARMMMUIdx_E10_1_PAN:
306             /* FEAT_NV: NV,NV1 == 1,1 means we don't do UNPRIV accesses */
307             if ((hcr & (HCR_NV | HCR_NV1)) != (HCR_NV | HCR_NV1)) {
308                 DP_TBFLAG_A64(flags, UNPRIV, 1);
309             }
310             break;
311         case ARMMMUIdx_E20_2:
312         case ARMMMUIdx_E20_2_PAN:
313             /*
314              * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
315              * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
316              */
317             if (env->cp15.hcr_el2 & HCR_TGE) {
318                 DP_TBFLAG_A64(flags, UNPRIV, 1);
319             }
320             break;
321         default:
322             break;
323         }
324     }
325 
326     if (env->pstate & PSTATE_IL) {
327         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
328     }
329 
330     if (arm_fgt_active(env, el)) {
331         DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
332         if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
333             DP_TBFLAG_A64(flags, TRAP_ERET, 1);
334         }
335         if (fgt_svc(env, el)) {
336             DP_TBFLAG_ANY(flags, FGT_SVC, 1);
337         }
338     }
339 
340     /*
341      * ERET can also be trapped for FEAT_NV. arm_hcr_el2_eff() takes care
342      * of "is EL2 enabled" and the NV bit can only be set if FEAT_NV is present.
343      */
344     if (el == 1 && (hcr & HCR_NV)) {
345         DP_TBFLAG_A64(flags, TRAP_ERET, 1);
346         DP_TBFLAG_A64(flags, NV, 1);
347         if (hcr & HCR_NV1) {
348             DP_TBFLAG_A64(flags, NV1, 1);
349         }
350         if (hcr & HCR_NV2) {
351             DP_TBFLAG_A64(flags, NV2, 1);
352             if (hcr & HCR_E2H) {
353                 DP_TBFLAG_A64(flags, NV2_MEM_E20, 1);
354             }
355             if (env->cp15.sctlr_el[2] & SCTLR_EE) {
356                 DP_TBFLAG_A64(flags, NV2_MEM_BE, 1);
357             }
358         }
359     }
360 
361     if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
362         /*
363          * Set MTE_ACTIVE if any access may be Checked, and leave clear
364          * if all accesses must be Unchecked:
365          * 1) If no TBI, then there are no tags in the address to check,
366          * 2) If Tag Check Override, then all accesses are Unchecked,
367          * 3) If Tag Check Fail == 0, then Checked access have no effect,
368          * 4) If no Allocation Tag Access, then all accesses are Unchecked.
369          */
370         if (allocation_tag_access_enabled(env, el, sctlr)) {
371             DP_TBFLAG_A64(flags, ATA, 1);
372             if (tbid
373                 && !(env->pstate & PSTATE_TCO)
374                 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
375                 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
376                 if (!EX_TBFLAG_A64(flags, UNPRIV)) {
377                     /*
378                      * In non-unpriv contexts (eg EL0), unpriv load/stores
379                      * act like normal ones; duplicate the MTE info to
380                      * avoid translate-a64.c having to check UNPRIV to see
381                      * whether it is OK to index into MTE_ACTIVE[].
382                      */
383                     DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
384                 }
385             }
386         }
387         /* And again for unprivileged accesses, if required.  */
388         if (EX_TBFLAG_A64(flags, UNPRIV)
389             && tbid
390             && !(env->pstate & PSTATE_TCO)
391             && (sctlr & SCTLR_TCF0)
392             && allocation_tag_access_enabled(env, 0, sctlr)) {
393             DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
394         }
395         /*
396          * For unpriv tag-setting accesses we also need ATA0. Again, in
397          * contexts where unpriv and normal insns are the same we
398          * duplicate the ATA bit to save effort for translate-a64.c.
399          */
400         if (EX_TBFLAG_A64(flags, UNPRIV)) {
401             if (allocation_tag_access_enabled(env, 0, sctlr)) {
402                 DP_TBFLAG_A64(flags, ATA0, 1);
403             }
404         } else {
405             DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA));
406         }
407         /* Cache TCMA as well as TBI. */
408         DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
409     }
410 
411     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
412 }
413 
414 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
415 {
416     int el = arm_current_el(env);
417     int fp_el = fp_exception_el(env, el);
418     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
419 
420     if (is_a64(env)) {
421         return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
422     } else if (arm_feature(env, ARM_FEATURE_M)) {
423         return rebuild_hflags_m32(env, fp_el, mmu_idx);
424     } else {
425         return rebuild_hflags_a32(env, fp_el, mmu_idx);
426     }
427 }
428 
429 void arm_rebuild_hflags(CPUARMState *env)
430 {
431     env->hflags = rebuild_hflags_internal(env);
432 }
433 
434 /*
435  * If we have triggered a EL state change we can't rely on the
436  * translator having passed it to us, we need to recompute.
437  */
438 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
439 {
440     int el = arm_current_el(env);
441     int fp_el = fp_exception_el(env, el);
442     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
443 
444     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
445 }
446 
447 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
448 {
449     int fp_el = fp_exception_el(env, el);
450     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
451 
452     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
453 }
454 
455 /*
456  * If we have triggered a EL state change we can't rely on the
457  * translator having passed it to us, we need to recompute.
458  */
459 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
460 {
461     int el = arm_current_el(env);
462     int fp_el = fp_exception_el(env, el);
463     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
464     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
465 }
466 
467 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
468 {
469     int fp_el = fp_exception_el(env, el);
470     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
471 
472     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
473 }
474 
475 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
476 {
477     int fp_el = fp_exception_el(env, el);
478     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
479 
480     env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
481 }
482 
483 void assert_hflags_rebuild_correctly(CPUARMState *env)
484 {
485 #ifdef CONFIG_DEBUG_TCG
486     CPUARMTBFlags c = env->hflags;
487     CPUARMTBFlags r = rebuild_hflags_internal(env);
488 
489     if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
490         fprintf(stderr, "TCG hflags mismatch "
491                         "(current:(0x%08x,0x" TARGET_FMT_lx ")"
492                         " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
493                 c.flags, c.flags2, r.flags, r.flags2);
494         abort();
495     }
496 #endif
497 }
498