xref: /openbmc/qemu/target/arm/tcg/translate-m-nocp.c (revision 7d87775f)
1 /*
2  *  ARM translation: M-profile NOCP special-case instructions
3  *
4  *  Copyright (c) 2020 Linaro, Ltd.
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "translate.h"
22 #include "translate-a32.h"
23 
24 #include "decode-m-nocp.c.inc"
25 
26 /*
27  * Decode VLLDM and VLSTM are nonstandard because:
28  *  * if there is no FPU then these insns must NOP in
29  *    Secure state and UNDEF in Nonsecure state
30  *  * if there is an FPU then these insns do not have
31  *    the usual behaviour that vfp_access_check() provides of
32  *    being controlled by CPACR/NSACR enable bits or the
33  *    lazy-stacking logic.
34  */
35 static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
36 {
37     TCGv_i32 fptr;
38 
39     if (!arm_dc_feature(s, ARM_FEATURE_M) ||
40         !arm_dc_feature(s, ARM_FEATURE_V8)) {
41         return false;
42     }
43 
44     if (a->op) {
45         /*
46          * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
47          * to take the IMPDEF option to make memory accesses to the stack
48          * slots that correspond to the D16-D31 registers (discarding
49          * read data and writing UNKNOWN values), so for us the T2
50          * encoding behaves identically to the T1 encoding.
51          */
52         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
53             return false;
54         }
55     } else {
56         /*
57          * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
58          * This is currently architecturally impossible, but we add the
59          * check to stay in line with the pseudocode. Note that we must
60          * emit code for the UNDEF so it takes precedence over the NOCP.
61          */
62         if (dc_isar_feature(aa32_simd_r32, s)) {
63             unallocated_encoding(s);
64             return true;
65         }
66     }
67 
68     /*
69      * If not secure, UNDEF. We must emit code for this
70      * rather than returning false so that this takes
71      * precedence over the m-nocp.decode NOCP fallback.
72      */
73     if (!s->v8m_secure) {
74         unallocated_encoding(s);
75         return true;
76     }
77 
78     s->eci_handled = true;
79 
80     /* If no fpu, NOP. */
81     if (!dc_isar_feature(aa32_vfp, s)) {
82         clear_eci_state(s);
83         return true;
84     }
85 
86     fptr = load_reg(s, a->rn);
87     if (a->l) {
88         gen_helper_v7m_vlldm(tcg_env, fptr);
89     } else {
90         gen_helper_v7m_vlstm(tcg_env, fptr);
91     }
92 
93     clear_eci_state(s);
94 
95     /*
96      * End the TB, because we have updated FP control bits,
97      * and possibly VPR or LTPSIZE.
98      */
99     s->base.is_jmp = DISAS_UPDATE_EXIT;
100     return true;
101 }
102 
103 static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
104 {
105     int btmreg, topreg;
106     TCGv_i64 zero;
107     TCGv_i32 aspen, sfpa;
108 
109     if (!dc_isar_feature(aa32_m_sec_state, s)) {
110         /* Before v8.1M, fall through in decode to NOCP check */
111         return false;
112     }
113 
114     /* Explicitly UNDEF because this takes precedence over NOCP */
115     if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
116         unallocated_encoding(s);
117         return true;
118     }
119 
120     s->eci_handled = true;
121 
122     if (!dc_isar_feature(aa32_vfp_simd, s)) {
123         /* NOP if we have neither FP nor MVE */
124         clear_eci_state(s);
125         return true;
126     }
127 
128     /*
129      * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
130      * active floating point context so we must NOP (without doing
131      * any lazy state preservation or the NOCP check).
132      */
133     aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
134     sfpa = load_cpu_field(v7m.control[M_REG_S]);
135     tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
136     tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
137     tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
138     tcg_gen_or_i32(sfpa, sfpa, aspen);
139     arm_gen_condlabel(s);
140     tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel.label);
141 
142     if (s->fp_excp_el != 0) {
143         gen_exception_insn_el(s, 0, EXCP_NOCP,
144                               syn_uncategorized(), s->fp_excp_el);
145         return true;
146     }
147 
148     topreg = a->vd + a->imm - 1;
149     btmreg = a->vd;
150 
151     /* Convert to Sreg numbers if the insn specified in Dregs */
152     if (a->size == 3) {
153         topreg = topreg * 2 + 1;
154         btmreg *= 2;
155     }
156 
157     if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
158         /* UNPREDICTABLE: we choose to undef */
159         unallocated_encoding(s);
160         return true;
161     }
162 
163     /* Silently ignore requests to clear D16-D31 if they don't exist */
164     if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
165         topreg = 31;
166     }
167 
168     if (!vfp_access_check(s)) {
169         return true;
170     }
171 
172     /* Zero the Sregs from btmreg to topreg inclusive. */
173     zero = tcg_constant_i64(0);
174     if (btmreg & 1) {
175         write_neon_element64(zero, btmreg >> 1, 1, MO_32);
176         btmreg++;
177     }
178     for (; btmreg + 1 <= topreg; btmreg += 2) {
179         write_neon_element64(zero, btmreg >> 1, 0, MO_64);
180     }
181     if (btmreg == topreg) {
182         write_neon_element64(zero, btmreg >> 1, 0, MO_32);
183         btmreg++;
184     }
185     assert(btmreg == topreg + 1);
186     if (dc_isar_feature(aa32_mve, s)) {
187         store_cpu_field(tcg_constant_i32(0), v7m.vpr);
188     }
189 
190     clear_eci_state(s);
191     return true;
192 }
193 
194 /*
195  * M-profile provides two different sets of instructions that can
196  * access floating point system registers: VMSR/VMRS (which move
197  * to/from a general purpose register) and VLDR/VSTR sysreg (which
198  * move directly to/from memory). In some cases there are also side
199  * effects which must happen after any write to memory (which could
200  * cause an exception). So we implement the common logic for the
201  * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
202  * which take pointers to callback functions which will perform the
203  * actual "read/write general purpose register" and "read/write
204  * memory" operations.
205  */
206 
207 /*
208  * Emit code to store the sysreg to its final destination; frees the
209  * TCG temp 'value' it is passed. do_access is true to do the store,
210  * and false to skip it and only perform side-effects like base
211  * register writeback.
212  */
213 typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
214                                bool do_access);
215 /*
216  * Emit code to load the value to be copied to the sysreg; returns
217  * a new TCG temporary. do_access is true to do the store,
218  * and false to skip it and only perform side-effects like base
219  * register writeback.
220  */
221 typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
222                                   bool do_access);
223 
224 /* Common decode/access checks for fp sysreg read/write */
225 typedef enum FPSysRegCheckResult {
226     FPSysRegCheckFailed, /* caller should return false */
227     FPSysRegCheckDone, /* caller should return true */
228     FPSysRegCheckContinue, /* caller should continue generating code */
229 } FPSysRegCheckResult;
230 
231 static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
232 {
233     if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
234         return FPSysRegCheckFailed;
235     }
236 
237     switch (regno) {
238     case ARM_VFP_FPSCR:
239     case QEMU_VFP_FPSCR_NZCV:
240         break;
241     case ARM_VFP_FPSCR_NZCVQC:
242         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
243             return FPSysRegCheckFailed;
244         }
245         break;
246     case ARM_VFP_FPCXT_S:
247     case ARM_VFP_FPCXT_NS:
248         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
249             return FPSysRegCheckFailed;
250         }
251         if (!s->v8m_secure) {
252             return FPSysRegCheckFailed;
253         }
254         break;
255     case ARM_VFP_VPR:
256     case ARM_VFP_P0:
257         if (!dc_isar_feature(aa32_mve, s)) {
258             return FPSysRegCheckFailed;
259         }
260         break;
261     default:
262         return FPSysRegCheckFailed;
263     }
264 
265     /*
266      * FPCXT_NS is a special case: it has specific handling for
267      * "current FP state is inactive", and must do the PreserveFPState()
268      * but not the usual full set of actions done by ExecuteFPCheck().
269      * So we don't call vfp_access_check() and the callers must handle this.
270      */
271     if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
272         return FPSysRegCheckDone;
273     }
274     return FPSysRegCheckContinue;
275 }
276 
277 static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
278                                   TCGLabel *label)
279 {
280     /*
281      * FPCXT_NS is a special case: it has specific handling for
282      * "current FP state is inactive", and must do the PreserveFPState()
283      * but not the usual full set of actions done by ExecuteFPCheck().
284      * We don't have a TB flag that matches the fpInactive check, so we
285      * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
286      *
287      * Emit code that checks fpInactive and does a conditional
288      * branch to label based on it:
289      *  if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
290      *  if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
291      */
292     assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
293 
294     /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
295     TCGv_i32 aspen, fpca;
296     aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
297     fpca = load_cpu_field(v7m.control[M_REG_S]);
298     tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
299     tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
300     tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
301     tcg_gen_or_i32(fpca, fpca, aspen);
302     tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
303 }
304 
305 static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
306                                   fp_sysreg_loadfn *loadfn,
307                                   void *opaque)
308 {
309     /* Do a write to an M-profile floating point system register */
310     TCGv_i32 tmp;
311     TCGLabel *lab_end = NULL;
312 
313     switch (fp_sysreg_checks(s, regno)) {
314     case FPSysRegCheckFailed:
315         return false;
316     case FPSysRegCheckDone:
317         return true;
318     case FPSysRegCheckContinue:
319         break;
320     }
321 
322     switch (regno) {
323     case ARM_VFP_FPSCR:
324         tmp = loadfn(s, opaque, true);
325         gen_helper_vfp_set_fpscr(tcg_env, tmp);
326         gen_lookup_tb(s);
327         break;
328     case ARM_VFP_FPSCR_NZCVQC:
329     {
330         TCGv_i32 fpscr;
331         tmp = loadfn(s, opaque, true);
332         if (dc_isar_feature(aa32_mve, s)) {
333             /* QC is only present for MVE; otherwise RES0 */
334             TCGv_i32 qc = tcg_temp_new_i32();
335             tcg_gen_andi_i32(qc, tmp, FPSR_QC);
336             /*
337              * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
338              * here writing the same value into all elements is simplest.
339              */
340             tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
341                                  16, 16, qc);
342         }
343         tcg_gen_andi_i32(tmp, tmp, FPSR_NZCV_MASK);
344         fpscr = load_cpu_field_low32(vfp.fpsr);
345         tcg_gen_andi_i32(fpscr, fpscr, ~FPSR_NZCV_MASK);
346         tcg_gen_or_i32(fpscr, fpscr, tmp);
347         store_cpu_field_low32(fpscr, vfp.fpsr);
348         break;
349     }
350     case ARM_VFP_FPCXT_NS:
351     {
352         TCGLabel *lab_active = gen_new_label();
353 
354         lab_end = gen_new_label();
355         gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
356         /*
357          * fpInactive case: write is a NOP, so only do side effects
358          * like register writeback before we branch to end
359          */
360         loadfn(s, opaque, false);
361         tcg_gen_br(lab_end);
362 
363         gen_set_label(lab_active);
364         /*
365          * !fpInactive: if FPU disabled, take NOCP exception;
366          * otherwise PreserveFPState(), and then FPCXT_NS writes
367          * behave the same as FPCXT_S writes.
368          */
369         if (!vfp_access_check_m(s, true)) {
370             /*
371              * This was only a conditional exception, so override
372              * gen_exception_insn_el()'s default to DISAS_NORETURN
373              */
374             s->base.is_jmp = DISAS_NEXT;
375             break;
376         }
377     }
378     /* fall through */
379     case ARM_VFP_FPCXT_S:
380     {
381         TCGv_i32 sfpa, control;
382         /*
383          * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
384          * bits [27:0] from value and zeroes bits [31:28].
385          */
386         tmp = loadfn(s, opaque, true);
387         sfpa = tcg_temp_new_i32();
388         tcg_gen_shri_i32(sfpa, tmp, 31);
389         control = load_cpu_field(v7m.control[M_REG_S]);
390         tcg_gen_deposit_i32(control, control, sfpa,
391                             R_V7M_CONTROL_SFPA_SHIFT, 1);
392         store_cpu_field(control, v7m.control[M_REG_S]);
393         tcg_gen_andi_i32(tmp, tmp, ~FPSR_NZCV_MASK);
394         gen_helper_vfp_set_fpscr(tcg_env, tmp);
395         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
396         break;
397     }
398     case ARM_VFP_VPR:
399         /* Behaves as NOP if not privileged */
400         if (IS_USER(s)) {
401             loadfn(s, opaque, false);
402             break;
403         }
404         tmp = loadfn(s, opaque, true);
405         store_cpu_field(tmp, v7m.vpr);
406         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
407         break;
408     case ARM_VFP_P0:
409     {
410         TCGv_i32 vpr;
411         tmp = loadfn(s, opaque, true);
412         vpr = load_cpu_field(v7m.vpr);
413         tcg_gen_deposit_i32(vpr, vpr, tmp,
414                             R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
415         store_cpu_field(vpr, v7m.vpr);
416         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
417         break;
418     }
419     default:
420         g_assert_not_reached();
421     }
422     if (lab_end) {
423         gen_set_label(lab_end);
424     }
425     return true;
426 }
427 
428 static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
429                                  fp_sysreg_storefn *storefn,
430                                  void *opaque)
431 {
432     /* Do a read from an M-profile floating point system register */
433     TCGv_i32 tmp;
434     TCGLabel *lab_end = NULL;
435     bool lookup_tb = false;
436 
437     switch (fp_sysreg_checks(s, regno)) {
438     case FPSysRegCheckFailed:
439         return false;
440     case FPSysRegCheckDone:
441         return true;
442     case FPSysRegCheckContinue:
443         break;
444     }
445 
446     if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
447         /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
448         regno = QEMU_VFP_FPSCR_NZCV;
449     }
450 
451     switch (regno) {
452     case ARM_VFP_FPSCR:
453         tmp = tcg_temp_new_i32();
454         gen_helper_vfp_get_fpscr(tmp, tcg_env);
455         storefn(s, opaque, tmp, true);
456         break;
457     case ARM_VFP_FPSCR_NZCVQC:
458         tmp = tcg_temp_new_i32();
459         gen_helper_vfp_get_fpscr(tmp, tcg_env);
460         tcg_gen_andi_i32(tmp, tmp, FPSR_NZCVQC_MASK);
461         storefn(s, opaque, tmp, true);
462         break;
463     case QEMU_VFP_FPSCR_NZCV:
464         /*
465          * Read just NZCV; this is a special case to avoid the
466          * helper call for the "VMRS to CPSR.NZCV" insn.
467          */
468         tmp = load_cpu_field_low32(vfp.fpsr);
469         tcg_gen_andi_i32(tmp, tmp, FPSR_NZCV_MASK);
470         storefn(s, opaque, tmp, true);
471         break;
472     case ARM_VFP_FPCXT_S:
473     {
474         TCGv_i32 control, sfpa, fpscr;
475         /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
476         tmp = tcg_temp_new_i32();
477         sfpa = tcg_temp_new_i32();
478         gen_helper_vfp_get_fpscr(tmp, tcg_env);
479         tcg_gen_andi_i32(tmp, tmp, ~FPSR_NZCV_MASK);
480         control = load_cpu_field(v7m.control[M_REG_S]);
481         tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
482         tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
483         tcg_gen_or_i32(tmp, tmp, sfpa);
484         /*
485          * Store result before updating FPSCR etc, in case
486          * it is a memory write which causes an exception.
487          */
488         storefn(s, opaque, tmp, true);
489         /*
490          * Now we must reset FPSCR from FPDSCR_NS, and clear
491          * CONTROL.SFPA; so we'll end the TB here.
492          */
493         tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
494         store_cpu_field(control, v7m.control[M_REG_S]);
495         fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
496         gen_helper_vfp_set_fpscr(tcg_env, fpscr);
497         lookup_tb = true;
498         break;
499     }
500     case ARM_VFP_FPCXT_NS:
501     {
502         TCGv_i32 control, sfpa, fpscr, fpdscr;
503         TCGLabel *lab_active = gen_new_label();
504 
505         lookup_tb = true;
506 
507         gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
508         /* fpInactive case: reads as FPDSCR_NS */
509         tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
510         storefn(s, opaque, tmp, true);
511         lab_end = gen_new_label();
512         tcg_gen_br(lab_end);
513 
514         gen_set_label(lab_active);
515         /*
516          * !fpInactive: if FPU disabled, take NOCP exception;
517          * otherwise PreserveFPState(), and then FPCXT_NS
518          * reads the same as FPCXT_S.
519          */
520         if (!vfp_access_check_m(s, true)) {
521             /*
522              * This was only a conditional exception, so override
523              * gen_exception_insn_el()'s default to DISAS_NORETURN
524              */
525             s->base.is_jmp = DISAS_NEXT;
526             break;
527         }
528         tmp = tcg_temp_new_i32();
529         sfpa = tcg_temp_new_i32();
530         fpscr = tcg_temp_new_i32();
531         gen_helper_vfp_get_fpscr(fpscr, tcg_env);
532         tcg_gen_andi_i32(tmp, fpscr, ~FPSR_NZCV_MASK);
533         control = load_cpu_field(v7m.control[M_REG_S]);
534         tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
535         tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
536         tcg_gen_or_i32(tmp, tmp, sfpa);
537         /* Store result before updating FPSCR, in case it faults */
538         storefn(s, opaque, tmp, true);
539         /* If SFPA is zero then set FPSCR from FPDSCR_NS */
540         fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
541         tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0),
542                             fpdscr, fpscr);
543         gen_helper_vfp_set_fpscr(tcg_env, fpscr);
544         break;
545     }
546     case ARM_VFP_VPR:
547         /* Behaves as NOP if not privileged */
548         if (IS_USER(s)) {
549             storefn(s, opaque, NULL, false);
550             break;
551         }
552         tmp = load_cpu_field(v7m.vpr);
553         storefn(s, opaque, tmp, true);
554         break;
555     case ARM_VFP_P0:
556         tmp = load_cpu_field(v7m.vpr);
557         tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
558         storefn(s, opaque, tmp, true);
559         break;
560     default:
561         g_assert_not_reached();
562     }
563 
564     if (lab_end) {
565         gen_set_label(lab_end);
566     }
567     if (lookup_tb) {
568         gen_lookup_tb(s);
569     }
570     return true;
571 }
572 
573 static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
574                              bool do_access)
575 {
576     arg_VMSR_VMRS *a = opaque;
577 
578     if (!do_access) {
579         return;
580     }
581 
582     if (a->rt == 15) {
583         /* Set the 4 flag bits in the CPSR */
584         gen_set_nzcv(value);
585     } else {
586         store_reg(s, a->rt, value);
587     }
588 }
589 
590 static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
591 {
592     arg_VMSR_VMRS *a = opaque;
593 
594     if (!do_access) {
595         return NULL;
596     }
597     return load_reg(s, a->rt);
598 }
599 
600 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
601 {
602     /*
603      * Accesses to R15 are UNPREDICTABLE; we choose to undef.
604      * FPSCR -> r15 is a special case which writes to the PSR flags;
605      * set a->reg to a special value to tell gen_M_fp_sysreg_read()
606      * we only care about the top 4 bits of FPSCR there.
607      */
608     if (a->rt == 15) {
609         if (a->l && a->reg == ARM_VFP_FPSCR) {
610             a->reg = QEMU_VFP_FPSCR_NZCV;
611         } else {
612             return false;
613         }
614     }
615 
616     if (a->l) {
617         /* VMRS, move FP system register to gp register */
618         return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
619     } else {
620         /* VMSR, move gp register to FP system register */
621         return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
622     }
623 }
624 
625 static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
626                                 bool do_access)
627 {
628     arg_vldr_sysreg *a = opaque;
629     uint32_t offset = a->imm;
630     TCGv_i32 addr;
631 
632     if (!a->a) {
633         offset = -offset;
634     }
635 
636     if (!do_access && !a->w) {
637         return;
638     }
639 
640     addr = load_reg(s, a->rn);
641     if (a->p) {
642         tcg_gen_addi_i32(addr, addr, offset);
643     }
644 
645     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
646         gen_helper_v8m_stackcheck(tcg_env, addr);
647     }
648 
649     if (do_access) {
650         gen_aa32_st_i32(s, value, addr, get_mem_index(s),
651                         MO_UL | MO_ALIGN | s->be_data);
652     }
653 
654     if (a->w) {
655         /* writeback */
656         if (!a->p) {
657             tcg_gen_addi_i32(addr, addr, offset);
658         }
659         store_reg(s, a->rn, addr);
660     }
661 }
662 
663 static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
664                                     bool do_access)
665 {
666     arg_vldr_sysreg *a = opaque;
667     uint32_t offset = a->imm;
668     TCGv_i32 addr;
669     TCGv_i32 value = NULL;
670 
671     if (!a->a) {
672         offset = -offset;
673     }
674 
675     if (!do_access && !a->w) {
676         return NULL;
677     }
678 
679     addr = load_reg(s, a->rn);
680     if (a->p) {
681         tcg_gen_addi_i32(addr, addr, offset);
682     }
683 
684     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
685         gen_helper_v8m_stackcheck(tcg_env, addr);
686     }
687 
688     if (do_access) {
689         value = tcg_temp_new_i32();
690         gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
691                         MO_UL | MO_ALIGN | s->be_data);
692     }
693 
694     if (a->w) {
695         /* writeback */
696         if (!a->p) {
697             tcg_gen_addi_i32(addr, addr, offset);
698         }
699         store_reg(s, a->rn, addr);
700     }
701     return value;
702 }
703 
704 static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
705 {
706     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
707         return false;
708     }
709     if (a->rn == 15) {
710         return false;
711     }
712     return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
713 }
714 
715 static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
716 {
717     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
718         return false;
719     }
720     if (a->rn == 15) {
721         return false;
722     }
723     return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
724 }
725 
726 static bool trans_NOCP(DisasContext *s, arg_nocp *a)
727 {
728     /*
729      * Handle M-profile early check for disabled coprocessor:
730      * all we need to do here is emit the NOCP exception if
731      * the coprocessor is disabled. Otherwise we return false
732      * and the real VFP/etc decode will handle the insn.
733      */
734     assert(arm_dc_feature(s, ARM_FEATURE_M));
735 
736     if (a->cp == 11) {
737         a->cp = 10;
738     }
739     if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
740         (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
741         /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
742         a->cp = 10;
743     }
744 
745     if (a->cp != 10) {
746         gen_exception_insn(s, 0, EXCP_NOCP, syn_uncategorized());
747         return true;
748     }
749 
750     if (s->fp_excp_el != 0) {
751         gen_exception_insn_el(s, 0, EXCP_NOCP,
752                               syn_uncategorized(), s->fp_excp_el);
753         return true;
754     }
755 
756     return false;
757 }
758 
759 static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
760 {
761     /* This range needs a coprocessor check for v8.1M and later only */
762     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
763         return false;
764     }
765     return trans_NOCP(s, a);
766 }
767