xref: /openbmc/qemu/target/arm/tcg/translate-m-nocp.c (revision 21063bce)
1 /*
2  *  ARM translation: M-profile NOCP special-case instructions
3  *
4  *  Copyright (c) 2020 Linaro, Ltd.
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "translate.h"
24 #include "translate-a32.h"
25 
26 #include "decode-m-nocp.c.inc"
27 
28 /*
29  * Decode VLLDM and VLSTM are nonstandard because:
30  *  * if there is no FPU then these insns must NOP in
31  *    Secure state and UNDEF in Nonsecure state
32  *  * if there is an FPU then these insns do not have
33  *    the usual behaviour that vfp_access_check() provides of
34  *    being controlled by CPACR/NSACR enable bits or the
35  *    lazy-stacking logic.
36  */
37 static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
38 {
39     TCGv_i32 fptr;
40 
41     if (!arm_dc_feature(s, ARM_FEATURE_M) ||
42         !arm_dc_feature(s, ARM_FEATURE_V8)) {
43         return false;
44     }
45 
46     if (a->op) {
47         /*
48          * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
49          * to take the IMPDEF option to make memory accesses to the stack
50          * slots that correspond to the D16-D31 registers (discarding
51          * read data and writing UNKNOWN values), so for us the T2
52          * encoding behaves identically to the T1 encoding.
53          */
54         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
55             return false;
56         }
57     } else {
58         /*
59          * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
60          * This is currently architecturally impossible, but we add the
61          * check to stay in line with the pseudocode. Note that we must
62          * emit code for the UNDEF so it takes precedence over the NOCP.
63          */
64         if (dc_isar_feature(aa32_simd_r32, s)) {
65             unallocated_encoding(s);
66             return true;
67         }
68     }
69 
70     /*
71      * If not secure, UNDEF. We must emit code for this
72      * rather than returning false so that this takes
73      * precedence over the m-nocp.decode NOCP fallback.
74      */
75     if (!s->v8m_secure) {
76         unallocated_encoding(s);
77         return true;
78     }
79 
80     s->eci_handled = true;
81 
82     /* If no fpu, NOP. */
83     if (!dc_isar_feature(aa32_vfp, s)) {
84         clear_eci_state(s);
85         return true;
86     }
87 
88     fptr = load_reg(s, a->rn);
89     if (a->l) {
90         gen_helper_v7m_vlldm(cpu_env, fptr);
91     } else {
92         gen_helper_v7m_vlstm(cpu_env, fptr);
93     }
94     tcg_temp_free_i32(fptr);
95 
96     clear_eci_state(s);
97 
98     /*
99      * End the TB, because we have updated FP control bits,
100      * and possibly VPR or LTPSIZE.
101      */
102     s->base.is_jmp = DISAS_UPDATE_EXIT;
103     return true;
104 }
105 
106 static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
107 {
108     int btmreg, topreg;
109     TCGv_i64 zero;
110     TCGv_i32 aspen, sfpa;
111 
112     if (!dc_isar_feature(aa32_m_sec_state, s)) {
113         /* Before v8.1M, fall through in decode to NOCP check */
114         return false;
115     }
116 
117     /* Explicitly UNDEF because this takes precedence over NOCP */
118     if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
119         unallocated_encoding(s);
120         return true;
121     }
122 
123     s->eci_handled = true;
124 
125     if (!dc_isar_feature(aa32_vfp_simd, s)) {
126         /* NOP if we have neither FP nor MVE */
127         clear_eci_state(s);
128         return true;
129     }
130 
131     /*
132      * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
133      * active floating point context so we must NOP (without doing
134      * any lazy state preservation or the NOCP check).
135      */
136     aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
137     sfpa = load_cpu_field(v7m.control[M_REG_S]);
138     tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
139     tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
140     tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
141     tcg_gen_or_i32(sfpa, sfpa, aspen);
142     arm_gen_condlabel(s);
143     tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel.label);
144 
145     if (s->fp_excp_el != 0) {
146         gen_exception_insn_el(s, 0, EXCP_NOCP,
147                               syn_uncategorized(), s->fp_excp_el);
148         return true;
149     }
150 
151     topreg = a->vd + a->imm - 1;
152     btmreg = a->vd;
153 
154     /* Convert to Sreg numbers if the insn specified in Dregs */
155     if (a->size == 3) {
156         topreg = topreg * 2 + 1;
157         btmreg *= 2;
158     }
159 
160     if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
161         /* UNPREDICTABLE: we choose to undef */
162         unallocated_encoding(s);
163         return true;
164     }
165 
166     /* Silently ignore requests to clear D16-D31 if they don't exist */
167     if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
168         topreg = 31;
169     }
170 
171     if (!vfp_access_check(s)) {
172         return true;
173     }
174 
175     /* Zero the Sregs from btmreg to topreg inclusive. */
176     zero = tcg_constant_i64(0);
177     if (btmreg & 1) {
178         write_neon_element64(zero, btmreg >> 1, 1, MO_32);
179         btmreg++;
180     }
181     for (; btmreg + 1 <= topreg; btmreg += 2) {
182         write_neon_element64(zero, btmreg >> 1, 0, MO_64);
183     }
184     if (btmreg == topreg) {
185         write_neon_element64(zero, btmreg >> 1, 0, MO_32);
186         btmreg++;
187     }
188     assert(btmreg == topreg + 1);
189     if (dc_isar_feature(aa32_mve, s)) {
190         store_cpu_field(tcg_constant_i32(0), v7m.vpr);
191     }
192 
193     clear_eci_state(s);
194     return true;
195 }
196 
197 /*
198  * M-profile provides two different sets of instructions that can
199  * access floating point system registers: VMSR/VMRS (which move
200  * to/from a general purpose register) and VLDR/VSTR sysreg (which
201  * move directly to/from memory). In some cases there are also side
202  * effects which must happen after any write to memory (which could
203  * cause an exception). So we implement the common logic for the
204  * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
205  * which take pointers to callback functions which will perform the
206  * actual "read/write general purpose register" and "read/write
207  * memory" operations.
208  */
209 
210 /*
211  * Emit code to store the sysreg to its final destination; frees the
212  * TCG temp 'value' it is passed. do_access is true to do the store,
213  * and false to skip it and only perform side-effects like base
214  * register writeback.
215  */
216 typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
217                                bool do_access);
218 /*
219  * Emit code to load the value to be copied to the sysreg; returns
220  * a new TCG temporary. do_access is true to do the store,
221  * and false to skip it and only perform side-effects like base
222  * register writeback.
223  */
224 typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
225                                   bool do_access);
226 
227 /* Common decode/access checks for fp sysreg read/write */
228 typedef enum FPSysRegCheckResult {
229     FPSysRegCheckFailed, /* caller should return false */
230     FPSysRegCheckDone, /* caller should return true */
231     FPSysRegCheckContinue, /* caller should continue generating code */
232 } FPSysRegCheckResult;
233 
234 static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
235 {
236     if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
237         return FPSysRegCheckFailed;
238     }
239 
240     switch (regno) {
241     case ARM_VFP_FPSCR:
242     case QEMU_VFP_FPSCR_NZCV:
243         break;
244     case ARM_VFP_FPSCR_NZCVQC:
245         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
246             return FPSysRegCheckFailed;
247         }
248         break;
249     case ARM_VFP_FPCXT_S:
250     case ARM_VFP_FPCXT_NS:
251         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
252             return FPSysRegCheckFailed;
253         }
254         if (!s->v8m_secure) {
255             return FPSysRegCheckFailed;
256         }
257         break;
258     case ARM_VFP_VPR:
259     case ARM_VFP_P0:
260         if (!dc_isar_feature(aa32_mve, s)) {
261             return FPSysRegCheckFailed;
262         }
263         break;
264     default:
265         return FPSysRegCheckFailed;
266     }
267 
268     /*
269      * FPCXT_NS is a special case: it has specific handling for
270      * "current FP state is inactive", and must do the PreserveFPState()
271      * but not the usual full set of actions done by ExecuteFPCheck().
272      * So we don't call vfp_access_check() and the callers must handle this.
273      */
274     if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
275         return FPSysRegCheckDone;
276     }
277     return FPSysRegCheckContinue;
278 }
279 
280 static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
281                                   TCGLabel *label)
282 {
283     /*
284      * FPCXT_NS is a special case: it has specific handling for
285      * "current FP state is inactive", and must do the PreserveFPState()
286      * but not the usual full set of actions done by ExecuteFPCheck().
287      * We don't have a TB flag that matches the fpInactive check, so we
288      * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
289      *
290      * Emit code that checks fpInactive and does a conditional
291      * branch to label based on it:
292      *  if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
293      *  if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
294      */
295     assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
296 
297     /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
298     TCGv_i32 aspen, fpca;
299     aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
300     fpca = load_cpu_field(v7m.control[M_REG_S]);
301     tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
302     tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
303     tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
304     tcg_gen_or_i32(fpca, fpca, aspen);
305     tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
306     tcg_temp_free_i32(aspen);
307     tcg_temp_free_i32(fpca);
308 }
309 
310 static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
311                                   fp_sysreg_loadfn *loadfn,
312                                   void *opaque)
313 {
314     /* Do a write to an M-profile floating point system register */
315     TCGv_i32 tmp;
316     TCGLabel *lab_end = NULL;
317 
318     switch (fp_sysreg_checks(s, regno)) {
319     case FPSysRegCheckFailed:
320         return false;
321     case FPSysRegCheckDone:
322         return true;
323     case FPSysRegCheckContinue:
324         break;
325     }
326 
327     switch (regno) {
328     case ARM_VFP_FPSCR:
329         tmp = loadfn(s, opaque, true);
330         gen_helper_vfp_set_fpscr(cpu_env, tmp);
331         tcg_temp_free_i32(tmp);
332         gen_lookup_tb(s);
333         break;
334     case ARM_VFP_FPSCR_NZCVQC:
335     {
336         TCGv_i32 fpscr;
337         tmp = loadfn(s, opaque, true);
338         if (dc_isar_feature(aa32_mve, s)) {
339             /* QC is only present for MVE; otherwise RES0 */
340             TCGv_i32 qc = tcg_temp_new_i32();
341             tcg_gen_andi_i32(qc, tmp, FPCR_QC);
342             /*
343              * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
344              * here writing the same value into all elements is simplest.
345              */
346             tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
347                                  16, 16, qc);
348         }
349         tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
350         fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
351         tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
352         tcg_gen_or_i32(fpscr, fpscr, tmp);
353         store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
354         tcg_temp_free_i32(tmp);
355         break;
356     }
357     case ARM_VFP_FPCXT_NS:
358     {
359         TCGLabel *lab_active = gen_new_label();
360 
361         lab_end = gen_new_label();
362         gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
363         /*
364          * fpInactive case: write is a NOP, so only do side effects
365          * like register writeback before we branch to end
366          */
367         loadfn(s, opaque, false);
368         tcg_gen_br(lab_end);
369 
370         gen_set_label(lab_active);
371         /*
372          * !fpInactive: if FPU disabled, take NOCP exception;
373          * otherwise PreserveFPState(), and then FPCXT_NS writes
374          * behave the same as FPCXT_S writes.
375          */
376         if (!vfp_access_check_m(s, true)) {
377             /*
378              * This was only a conditional exception, so override
379              * gen_exception_insn_el()'s default to DISAS_NORETURN
380              */
381             s->base.is_jmp = DISAS_NEXT;
382             break;
383         }
384     }
385     /* fall through */
386     case ARM_VFP_FPCXT_S:
387     {
388         TCGv_i32 sfpa, control;
389         /*
390          * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
391          * bits [27:0] from value and zeroes bits [31:28].
392          */
393         tmp = loadfn(s, opaque, true);
394         sfpa = tcg_temp_new_i32();
395         tcg_gen_shri_i32(sfpa, tmp, 31);
396         control = load_cpu_field(v7m.control[M_REG_S]);
397         tcg_gen_deposit_i32(control, control, sfpa,
398                             R_V7M_CONTROL_SFPA_SHIFT, 1);
399         store_cpu_field(control, v7m.control[M_REG_S]);
400         tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
401         gen_helper_vfp_set_fpscr(cpu_env, tmp);
402         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
403         tcg_temp_free_i32(tmp);
404         tcg_temp_free_i32(sfpa);
405         break;
406     }
407     case ARM_VFP_VPR:
408         /* Behaves as NOP if not privileged */
409         if (IS_USER(s)) {
410             loadfn(s, opaque, false);
411             break;
412         }
413         tmp = loadfn(s, opaque, true);
414         store_cpu_field(tmp, v7m.vpr);
415         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
416         break;
417     case ARM_VFP_P0:
418     {
419         TCGv_i32 vpr;
420         tmp = loadfn(s, opaque, true);
421         vpr = load_cpu_field(v7m.vpr);
422         tcg_gen_deposit_i32(vpr, vpr, tmp,
423                             R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
424         store_cpu_field(vpr, v7m.vpr);
425         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
426         tcg_temp_free_i32(tmp);
427         break;
428     }
429     default:
430         g_assert_not_reached();
431     }
432     if (lab_end) {
433         gen_set_label(lab_end);
434     }
435     return true;
436 }
437 
438 static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
439                                  fp_sysreg_storefn *storefn,
440                                  void *opaque)
441 {
442     /* Do a read from an M-profile floating point system register */
443     TCGv_i32 tmp;
444     TCGLabel *lab_end = NULL;
445     bool lookup_tb = false;
446 
447     switch (fp_sysreg_checks(s, regno)) {
448     case FPSysRegCheckFailed:
449         return false;
450     case FPSysRegCheckDone:
451         return true;
452     case FPSysRegCheckContinue:
453         break;
454     }
455 
456     if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
457         /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
458         regno = QEMU_VFP_FPSCR_NZCV;
459     }
460 
461     switch (regno) {
462     case ARM_VFP_FPSCR:
463         tmp = tcg_temp_new_i32();
464         gen_helper_vfp_get_fpscr(tmp, cpu_env);
465         storefn(s, opaque, tmp, true);
466         break;
467     case ARM_VFP_FPSCR_NZCVQC:
468         tmp = tcg_temp_new_i32();
469         gen_helper_vfp_get_fpscr(tmp, cpu_env);
470         tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
471         storefn(s, opaque, tmp, true);
472         break;
473     case QEMU_VFP_FPSCR_NZCV:
474         /*
475          * Read just NZCV; this is a special case to avoid the
476          * helper call for the "VMRS to CPSR.NZCV" insn.
477          */
478         tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
479         tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
480         storefn(s, opaque, tmp, true);
481         break;
482     case ARM_VFP_FPCXT_S:
483     {
484         TCGv_i32 control, sfpa, fpscr;
485         /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
486         tmp = tcg_temp_new_i32();
487         sfpa = tcg_temp_new_i32();
488         gen_helper_vfp_get_fpscr(tmp, cpu_env);
489         tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
490         control = load_cpu_field(v7m.control[M_REG_S]);
491         tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
492         tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
493         tcg_gen_or_i32(tmp, tmp, sfpa);
494         tcg_temp_free_i32(sfpa);
495         /*
496          * Store result before updating FPSCR etc, in case
497          * it is a memory write which causes an exception.
498          */
499         storefn(s, opaque, tmp, true);
500         /*
501          * Now we must reset FPSCR from FPDSCR_NS, and clear
502          * CONTROL.SFPA; so we'll end the TB here.
503          */
504         tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
505         store_cpu_field(control, v7m.control[M_REG_S]);
506         fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
507         gen_helper_vfp_set_fpscr(cpu_env, fpscr);
508         tcg_temp_free_i32(fpscr);
509         lookup_tb = true;
510         break;
511     }
512     case ARM_VFP_FPCXT_NS:
513     {
514         TCGv_i32 control, sfpa, fpscr, fpdscr;
515         TCGLabel *lab_active = gen_new_label();
516 
517         lookup_tb = true;
518 
519         gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
520         /* fpInactive case: reads as FPDSCR_NS */
521         TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
522         storefn(s, opaque, tmp, true);
523         lab_end = gen_new_label();
524         tcg_gen_br(lab_end);
525 
526         gen_set_label(lab_active);
527         /*
528          * !fpInactive: if FPU disabled, take NOCP exception;
529          * otherwise PreserveFPState(), and then FPCXT_NS
530          * reads the same as FPCXT_S.
531          */
532         if (!vfp_access_check_m(s, true)) {
533             /*
534              * This was only a conditional exception, so override
535              * gen_exception_insn_el()'s default to DISAS_NORETURN
536              */
537             s->base.is_jmp = DISAS_NEXT;
538             break;
539         }
540         tmp = tcg_temp_new_i32();
541         sfpa = tcg_temp_new_i32();
542         fpscr = tcg_temp_new_i32();
543         gen_helper_vfp_get_fpscr(fpscr, cpu_env);
544         tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
545         control = load_cpu_field(v7m.control[M_REG_S]);
546         tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
547         tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
548         tcg_gen_or_i32(tmp, tmp, sfpa);
549         tcg_temp_free_i32(control);
550         /* Store result before updating FPSCR, in case it faults */
551         storefn(s, opaque, tmp, true);
552         /* If SFPA is zero then set FPSCR from FPDSCR_NS */
553         fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
554         tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0),
555                             fpdscr, fpscr);
556         gen_helper_vfp_set_fpscr(cpu_env, fpscr);
557         tcg_temp_free_i32(sfpa);
558         tcg_temp_free_i32(fpdscr);
559         tcg_temp_free_i32(fpscr);
560         break;
561     }
562     case ARM_VFP_VPR:
563         /* Behaves as NOP if not privileged */
564         if (IS_USER(s)) {
565             storefn(s, opaque, NULL, false);
566             break;
567         }
568         tmp = load_cpu_field(v7m.vpr);
569         storefn(s, opaque, tmp, true);
570         break;
571     case ARM_VFP_P0:
572         tmp = load_cpu_field(v7m.vpr);
573         tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
574         storefn(s, opaque, tmp, true);
575         break;
576     default:
577         g_assert_not_reached();
578     }
579 
580     if (lab_end) {
581         gen_set_label(lab_end);
582     }
583     if (lookup_tb) {
584         gen_lookup_tb(s);
585     }
586     return true;
587 }
588 
589 static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
590                              bool do_access)
591 {
592     arg_VMSR_VMRS *a = opaque;
593 
594     if (!do_access) {
595         return;
596     }
597 
598     if (a->rt == 15) {
599         /* Set the 4 flag bits in the CPSR */
600         gen_set_nzcv(value);
601         tcg_temp_free_i32(value);
602     } else {
603         store_reg(s, a->rt, value);
604     }
605 }
606 
607 static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
608 {
609     arg_VMSR_VMRS *a = opaque;
610 
611     if (!do_access) {
612         return NULL;
613     }
614     return load_reg(s, a->rt);
615 }
616 
617 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
618 {
619     /*
620      * Accesses to R15 are UNPREDICTABLE; we choose to undef.
621      * FPSCR -> r15 is a special case which writes to the PSR flags;
622      * set a->reg to a special value to tell gen_M_fp_sysreg_read()
623      * we only care about the top 4 bits of FPSCR there.
624      */
625     if (a->rt == 15) {
626         if (a->l && a->reg == ARM_VFP_FPSCR) {
627             a->reg = QEMU_VFP_FPSCR_NZCV;
628         } else {
629             return false;
630         }
631     }
632 
633     if (a->l) {
634         /* VMRS, move FP system register to gp register */
635         return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
636     } else {
637         /* VMSR, move gp register to FP system register */
638         return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
639     }
640 }
641 
642 static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
643                                 bool do_access)
644 {
645     arg_vldr_sysreg *a = opaque;
646     uint32_t offset = a->imm;
647     TCGv_i32 addr;
648 
649     if (!a->a) {
650         offset = -offset;
651     }
652 
653     if (!do_access && !a->w) {
654         return;
655     }
656 
657     addr = load_reg(s, a->rn);
658     if (a->p) {
659         tcg_gen_addi_i32(addr, addr, offset);
660     }
661 
662     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
663         gen_helper_v8m_stackcheck(cpu_env, addr);
664     }
665 
666     if (do_access) {
667         gen_aa32_st_i32(s, value, addr, get_mem_index(s),
668                         MO_UL | MO_ALIGN | s->be_data);
669         tcg_temp_free_i32(value);
670     }
671 
672     if (a->w) {
673         /* writeback */
674         if (!a->p) {
675             tcg_gen_addi_i32(addr, addr, offset);
676         }
677         store_reg(s, a->rn, addr);
678     } else {
679         tcg_temp_free_i32(addr);
680     }
681 }
682 
683 static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
684                                     bool do_access)
685 {
686     arg_vldr_sysreg *a = opaque;
687     uint32_t offset = a->imm;
688     TCGv_i32 addr;
689     TCGv_i32 value = NULL;
690 
691     if (!a->a) {
692         offset = -offset;
693     }
694 
695     if (!do_access && !a->w) {
696         return NULL;
697     }
698 
699     addr = load_reg(s, a->rn);
700     if (a->p) {
701         tcg_gen_addi_i32(addr, addr, offset);
702     }
703 
704     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
705         gen_helper_v8m_stackcheck(cpu_env, addr);
706     }
707 
708     if (do_access) {
709         value = tcg_temp_new_i32();
710         gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
711                         MO_UL | MO_ALIGN | s->be_data);
712     }
713 
714     if (a->w) {
715         /* writeback */
716         if (!a->p) {
717             tcg_gen_addi_i32(addr, addr, offset);
718         }
719         store_reg(s, a->rn, addr);
720     } else {
721         tcg_temp_free_i32(addr);
722     }
723     return value;
724 }
725 
726 static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
727 {
728     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
729         return false;
730     }
731     if (a->rn == 15) {
732         return false;
733     }
734     return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
735 }
736 
737 static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
738 {
739     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
740         return false;
741     }
742     if (a->rn == 15) {
743         return false;
744     }
745     return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
746 }
747 
748 static bool trans_NOCP(DisasContext *s, arg_nocp *a)
749 {
750     /*
751      * Handle M-profile early check for disabled coprocessor:
752      * all we need to do here is emit the NOCP exception if
753      * the coprocessor is disabled. Otherwise we return false
754      * and the real VFP/etc decode will handle the insn.
755      */
756     assert(arm_dc_feature(s, ARM_FEATURE_M));
757 
758     if (a->cp == 11) {
759         a->cp = 10;
760     }
761     if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
762         (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
763         /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
764         a->cp = 10;
765     }
766 
767     if (a->cp != 10) {
768         gen_exception_insn(s, 0, EXCP_NOCP, syn_uncategorized());
769         return true;
770     }
771 
772     if (s->fp_excp_el != 0) {
773         gen_exception_insn_el(s, 0, EXCP_NOCP,
774                               syn_uncategorized(), s->fp_excp_el);
775         return true;
776     }
777 
778     return false;
779 }
780 
781 static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
782 {
783     /* This range needs a coprocessor check for v8.1M and later only */
784     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
785         return false;
786     }
787     return trans_NOCP(s, a);
788 }
789