xref: /openbmc/qemu/target/arm/tcg/m_helper.c (revision ce7325c1)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "cpu-features.h"
13 #include "gdbstub/helpers.h"
14 #include "exec/helper-proto.h"
15 #include "qemu/main-loop.h"
16 #include "qemu/bitops.h"
17 #include "qemu/log.h"
18 #include "exec/exec-all.h"
19 #include "exec/page-protection.h"
20 #ifdef CONFIG_TCG
21 #include "exec/cpu_ldst.h"
22 #include "semihosting/common-semi.h"
23 #endif
24 #if !defined(CONFIG_USER_ONLY)
25 #include "hw/intc/armv7m_nvic.h"
26 #endif
27 
28 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
29                          uint32_t reg, uint32_t val)
30 {
31     /* Only APSR is actually writable */
32     if (!(reg & 4)) {
33         uint32_t apsrmask = 0;
34 
35         if (mask & 8) {
36             apsrmask |= XPSR_NZCV | XPSR_Q;
37         }
38         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
39             apsrmask |= XPSR_GE;
40         }
41         xpsr_write(env, val, apsrmask);
42     }
43 }
44 
45 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
46 {
47     uint32_t mask = 0;
48 
49     if ((reg & 1) && el) {
50         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
51     }
52     if (!(reg & 4)) {
53         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
54         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
55             mask |= XPSR_GE;
56         }
57     }
58     /* EPSR reads as zero */
59     return xpsr_read(env) & mask;
60 }
61 
62 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure)
63 {
64     uint32_t value = env->v7m.control[secure];
65 
66     if (!secure) {
67         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
68         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
69     }
70     return value;
71 }
72 
73 #ifdef CONFIG_USER_ONLY
74 
75 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
76 {
77     uint32_t mask = extract32(maskreg, 8, 4);
78     uint32_t reg = extract32(maskreg, 0, 8);
79 
80     switch (reg) {
81     case 0 ... 7: /* xPSR sub-fields */
82         v7m_msr_xpsr(env, mask, reg, val);
83         break;
84     case 20: /* CONTROL */
85         /* There are no sub-fields that are actually writable from EL0. */
86         break;
87     default:
88         /* Unprivileged writes to other registers are ignored */
89         break;
90     }
91 }
92 
93 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
94 {
95     switch (reg) {
96     case 0 ... 7: /* xPSR sub-fields */
97         return v7m_mrs_xpsr(env, reg, 0);
98     case 20: /* CONTROL */
99         return arm_v7m_mrs_control(env, 0);
100     default:
101         /* Unprivileged reads others as zero.  */
102         return 0;
103     }
104 }
105 
106 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
107 {
108     /* translate.c should never generate calls here in user-only mode */
109     g_assert_not_reached();
110 }
111 
112 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
113 {
114     /* translate.c should never generate calls here in user-only mode */
115     g_assert_not_reached();
116 }
117 
118 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
119 {
120     /* translate.c should never generate calls here in user-only mode */
121     g_assert_not_reached();
122 }
123 
124 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
125 {
126     /* translate.c should never generate calls here in user-only mode */
127     g_assert_not_reached();
128 }
129 
130 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
131 {
132     /* translate.c should never generate calls here in user-only mode */
133     g_assert_not_reached();
134 }
135 
136 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
137 {
138     /*
139      * The TT instructions can be used by unprivileged code, but in
140      * user-only emulation we don't have the MPU.
141      * Luckily since we know we are NonSecure unprivileged (and that in
142      * turn means that the A flag wasn't specified), all the bits in the
143      * register must be zero:
144      *  IREGION: 0 because IRVALID is 0
145      *  IRVALID: 0 because NS
146      *  S: 0 because NS
147      *  NSRW: 0 because NS
148      *  NSR: 0 because NS
149      *  RW: 0 because unpriv and A flag not set
150      *  R: 0 because unpriv and A flag not set
151      *  SRVALID: 0 because NS
152      *  MRVALID: 0 because unpriv and A flag not set
153      *  SREGION: 0 because SRVALID is 0
154      *  MREGION: 0 because MRVALID is 0
155      */
156     return 0;
157 }
158 
159 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
160 {
161     return ARMMMUIdx_MUser;
162 }
163 
164 #else /* !CONFIG_USER_ONLY */
165 
166 static ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
167                                      bool secstate, bool priv, bool negpri)
168 {
169     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
170 
171     if (priv) {
172         mmu_idx |= ARM_MMU_IDX_M_PRIV;
173     }
174 
175     if (negpri) {
176         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
177     }
178 
179     if (secstate) {
180         mmu_idx |= ARM_MMU_IDX_M_S;
181     }
182 
183     return mmu_idx;
184 }
185 
186 static ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
187                                                        bool secstate, bool priv)
188 {
189     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
190 
191     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
192 }
193 
194 /* Return the MMU index for a v7M CPU in the specified security state */
195 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
196 {
197     bool priv = arm_v7m_is_handler_mode(env) ||
198         !(env->v7m.control[secstate] & 1);
199 
200     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
201 }
202 
203 /*
204  * What kind of stack write are we doing? This affects how exceptions
205  * generated during the stacking are treated.
206  */
207 typedef enum StackingMode {
208     STACK_NORMAL,
209     STACK_IGNFAULTS,
210     STACK_LAZYFP,
211 } StackingMode;
212 
213 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
214                             ARMMMUIdx mmu_idx, StackingMode mode)
215 {
216     CPUState *cs = CPU(cpu);
217     CPUARMState *env = &cpu->env;
218     MemTxResult txres;
219     GetPhysAddrResult res = {};
220     ARMMMUFaultInfo fi = {};
221     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
222     int exc;
223     bool exc_secure;
224 
225     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
226         /* MPU/SAU lookup failed */
227         if (fi.type == ARMFault_QEMU_SFault) {
228             if (mode == STACK_LAZYFP) {
229                 qemu_log_mask(CPU_LOG_INT,
230                               "...SecureFault with SFSR.LSPERR "
231                               "during lazy stacking\n");
232                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
233             } else {
234                 qemu_log_mask(CPU_LOG_INT,
235                               "...SecureFault with SFSR.AUVIOL "
236                               "during stacking\n");
237                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
238             }
239             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
240             env->v7m.sfar = addr;
241             exc = ARMV7M_EXCP_SECURE;
242             exc_secure = false;
243         } else {
244             if (mode == STACK_LAZYFP) {
245                 qemu_log_mask(CPU_LOG_INT,
246                               "...MemManageFault with CFSR.MLSPERR\n");
247                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
248             } else {
249                 qemu_log_mask(CPU_LOG_INT,
250                               "...MemManageFault with CFSR.MSTKERR\n");
251                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
252             }
253             exc = ARMV7M_EXCP_MEM;
254             exc_secure = secure;
255         }
256         goto pend_fault;
257     }
258     address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
259                          value, res.f.attrs, &txres);
260     if (txres != MEMTX_OK) {
261         /* BusFault trying to write the data */
262         if (mode == STACK_LAZYFP) {
263             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
264             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
265         } else {
266             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
267             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
268         }
269         exc = ARMV7M_EXCP_BUS;
270         exc_secure = false;
271         goto pend_fault;
272     }
273     return true;
274 
275 pend_fault:
276     /*
277      * By pending the exception at this point we are making
278      * the IMPDEF choice "overridden exceptions pended" (see the
279      * MergeExcInfo() pseudocode). The other choice would be to not
280      * pend them now and then make a choice about which to throw away
281      * later if we have two derived exceptions.
282      * The only case when we must not pend the exception but instead
283      * throw it away is if we are doing the push of the callee registers
284      * and we've already generated a derived exception (this is indicated
285      * by the caller passing STACK_IGNFAULTS). Even in this case we will
286      * still update the fault status registers.
287      */
288     switch (mode) {
289     case STACK_NORMAL:
290         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
291         break;
292     case STACK_LAZYFP:
293         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
294         break;
295     case STACK_IGNFAULTS:
296         break;
297     }
298     return false;
299 }
300 
301 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
302                            ARMMMUIdx mmu_idx)
303 {
304     CPUState *cs = CPU(cpu);
305     CPUARMState *env = &cpu->env;
306     MemTxResult txres;
307     GetPhysAddrResult res = {};
308     ARMMMUFaultInfo fi = {};
309     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
310     int exc;
311     bool exc_secure;
312     uint32_t value;
313 
314     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
315         /* MPU/SAU lookup failed */
316         if (fi.type == ARMFault_QEMU_SFault) {
317             qemu_log_mask(CPU_LOG_INT,
318                           "...SecureFault with SFSR.AUVIOL during unstack\n");
319             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
320             env->v7m.sfar = addr;
321             exc = ARMV7M_EXCP_SECURE;
322             exc_secure = false;
323         } else {
324             qemu_log_mask(CPU_LOG_INT,
325                           "...MemManageFault with CFSR.MUNSTKERR\n");
326             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
327             exc = ARMV7M_EXCP_MEM;
328             exc_secure = secure;
329         }
330         goto pend_fault;
331     }
332 
333     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
334                               res.f.phys_addr, res.f.attrs, &txres);
335     if (txres != MEMTX_OK) {
336         /* BusFault trying to read the data */
337         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
338         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
339         exc = ARMV7M_EXCP_BUS;
340         exc_secure = false;
341         goto pend_fault;
342     }
343 
344     *dest = value;
345     return true;
346 
347 pend_fault:
348     /*
349      * By pending the exception at this point we are making
350      * the IMPDEF choice "overridden exceptions pended" (see the
351      * MergeExcInfo() pseudocode). The other choice would be to not
352      * pend them now and then make a choice about which to throw away
353      * later if we have two derived exceptions.
354      */
355     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
356     return false;
357 }
358 
359 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
360 {
361     /*
362      * Preserve FP state (because LSPACT was set and we are about
363      * to execute an FP instruction). This corresponds to the
364      * PreserveFPState() pseudocode.
365      * We may throw an exception if the stacking fails.
366      */
367     ARMCPU *cpu = env_archcpu(env);
368     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
369     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
370     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
371     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
372     uint32_t fpcar = env->v7m.fpcar[is_secure];
373     bool stacked_ok = true;
374     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
375     bool take_exception;
376 
377     /* Take the BQL as we are going to touch the NVIC */
378     bql_lock();
379 
380     /* Check the background context had access to the FPU */
381     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
382         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
383         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
384         stacked_ok = false;
385     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
386         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
387         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
388         stacked_ok = false;
389     }
390 
391     if (!splimviol && stacked_ok) {
392         /* We only stack if the stack limit wasn't violated */
393         int i;
394         ARMMMUIdx mmu_idx;
395 
396         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
397         for (i = 0; i < (ts ? 32 : 16); i += 2) {
398             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
399             uint32_t faddr = fpcar + 4 * i;
400             uint32_t slo = extract64(dn, 0, 32);
401             uint32_t shi = extract64(dn, 32, 32);
402 
403             if (i >= 16) {
404                 faddr += 8; /* skip the slot for the FPSCR/VPR */
405             }
406             stacked_ok = stacked_ok &&
407                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
408                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
409         }
410 
411         stacked_ok = stacked_ok &&
412             v7m_stack_write(cpu, fpcar + 0x40,
413                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
414         if (cpu_isar_feature(aa32_mve, cpu)) {
415             stacked_ok = stacked_ok &&
416                 v7m_stack_write(cpu, fpcar + 0x44,
417                                 env->v7m.vpr, mmu_idx, STACK_LAZYFP);
418         }
419     }
420 
421     /*
422      * We definitely pended an exception, but it's possible that it
423      * might not be able to be taken now. If its priority permits us
424      * to take it now, then we must not update the LSPACT or FP regs,
425      * but instead jump out to take the exception immediately.
426      * If it's just pending and won't be taken until the current
427      * handler exits, then we do update LSPACT and the FP regs.
428      */
429     take_exception = !stacked_ok &&
430         armv7m_nvic_can_take_pending_exception(env->nvic);
431 
432     bql_unlock();
433 
434     if (take_exception) {
435         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
436     }
437 
438     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
439 
440     if (ts) {
441         /* Clear s0 to s31 and the FPSCR and VPR */
442         int i;
443 
444         for (i = 0; i < 32; i += 2) {
445             *aa32_vfp_dreg(env, i / 2) = 0;
446         }
447         vfp_set_fpscr(env, 0);
448         if (cpu_isar_feature(aa32_mve, cpu)) {
449             env->v7m.vpr = 0;
450         }
451     }
452     /*
453      * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
454      * unchanged.
455      */
456 }
457 
458 /*
459  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
460  * This may change the current stack pointer between Main and Process
461  * stack pointers if it is done for the CONTROL register for the current
462  * security state.
463  */
464 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
465                                                  bool new_spsel,
466                                                  bool secstate)
467 {
468     bool old_is_psp = v7m_using_psp(env);
469 
470     env->v7m.control[secstate] =
471         deposit32(env->v7m.control[secstate],
472                   R_V7M_CONTROL_SPSEL_SHIFT,
473                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
474 
475     if (secstate == env->v7m.secure) {
476         bool new_is_psp = v7m_using_psp(env);
477         uint32_t tmp;
478 
479         if (old_is_psp != new_is_psp) {
480             tmp = env->v7m.other_sp;
481             env->v7m.other_sp = env->regs[13];
482             env->regs[13] = tmp;
483         }
484     }
485 }
486 
487 /*
488  * Write to v7M CONTROL.SPSEL bit. This may change the current
489  * stack pointer between Main and Process stack pointers.
490  */
491 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
492 {
493     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
494 }
495 
496 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
497 {
498     /*
499      * Write a new value to v7m.exception, thus transitioning into or out
500      * of Handler mode; this may result in a change of active stack pointer.
501      */
502     bool new_is_psp, old_is_psp = v7m_using_psp(env);
503     uint32_t tmp;
504 
505     env->v7m.exception = new_exc;
506 
507     new_is_psp = v7m_using_psp(env);
508 
509     if (old_is_psp != new_is_psp) {
510         tmp = env->v7m.other_sp;
511         env->v7m.other_sp = env->regs[13];
512         env->regs[13] = tmp;
513     }
514 }
515 
516 /* Switch M profile security state between NS and S */
517 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
518 {
519     uint32_t new_ss_msp, new_ss_psp;
520 
521     if (env->v7m.secure == new_secstate) {
522         return;
523     }
524 
525     /*
526      * All the banked state is accessed by looking at env->v7m.secure
527      * except for the stack pointer; rearrange the SP appropriately.
528      */
529     new_ss_msp = env->v7m.other_ss_msp;
530     new_ss_psp = env->v7m.other_ss_psp;
531 
532     if (v7m_using_psp(env)) {
533         env->v7m.other_ss_psp = env->regs[13];
534         env->v7m.other_ss_msp = env->v7m.other_sp;
535     } else {
536         env->v7m.other_ss_msp = env->regs[13];
537         env->v7m.other_ss_psp = env->v7m.other_sp;
538     }
539 
540     env->v7m.secure = new_secstate;
541 
542     if (v7m_using_psp(env)) {
543         env->regs[13] = new_ss_psp;
544         env->v7m.other_sp = new_ss_msp;
545     } else {
546         env->regs[13] = new_ss_msp;
547         env->v7m.other_sp = new_ss_psp;
548     }
549 }
550 
551 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
552 {
553     /*
554      * Handle v7M BXNS:
555      *  - if the return value is a magic value, do exception return (like BX)
556      *  - otherwise bit 0 of the return value is the target security state
557      */
558     uint32_t min_magic;
559 
560     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
561         /* Covers FNC_RETURN and EXC_RETURN magic */
562         min_magic = FNC_RETURN_MIN_MAGIC;
563     } else {
564         /* EXC_RETURN magic only */
565         min_magic = EXC_RETURN_MIN_MAGIC;
566     }
567 
568     if (dest >= min_magic) {
569         /*
570          * This is an exception return magic value; put it where
571          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
572          * Note that if we ever add gen_ss_advance() singlestep support to
573          * M profile this should count as an "instruction execution complete"
574          * event (compare gen_bx_excret_final_code()).
575          */
576         env->regs[15] = dest & ~1;
577         env->thumb = dest & 1;
578         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
579         /* notreached */
580     }
581 
582     /* translate.c should have made BXNS UNDEF unless we're secure */
583     assert(env->v7m.secure);
584 
585     if (!(dest & 1)) {
586         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
587     }
588     switch_v7m_security_state(env, dest & 1);
589     env->thumb = true;
590     env->regs[15] = dest & ~1;
591     arm_rebuild_hflags(env);
592 }
593 
594 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
595 {
596     /*
597      * Handle v7M BLXNS:
598      *  - bit 0 of the destination address is the target security state
599      */
600 
601     /* At this point regs[15] is the address just after the BLXNS */
602     uint32_t nextinst = env->regs[15] | 1;
603     uint32_t sp = env->regs[13] - 8;
604     uint32_t saved_psr;
605 
606     /* translate.c will have made BLXNS UNDEF unless we're secure */
607     assert(env->v7m.secure);
608 
609     if (dest & 1) {
610         /*
611          * Target is Secure, so this is just a normal BLX,
612          * except that the low bit doesn't indicate Thumb/not.
613          */
614         env->regs[14] = nextinst;
615         env->thumb = true;
616         env->regs[15] = dest & ~1;
617         return;
618     }
619 
620     /* Target is non-secure: first push a stack frame */
621     if (!QEMU_IS_ALIGNED(sp, 8)) {
622         qemu_log_mask(LOG_GUEST_ERROR,
623                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
624     }
625 
626     if (sp < v7m_sp_limit(env)) {
627         raise_exception(env, EXCP_STKOF, 0, 1);
628     }
629 
630     saved_psr = env->v7m.exception;
631     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
632         saved_psr |= XPSR_SFPA;
633     }
634 
635     /* Note that these stores can throw exceptions on MPU faults */
636     cpu_stl_data_ra(env, sp, nextinst, GETPC());
637     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
638 
639     env->regs[13] = sp;
640     env->regs[14] = 0xfeffffff;
641     if (arm_v7m_is_handler_mode(env)) {
642         /*
643          * Write a dummy value to IPSR, to avoid leaking the current secure
644          * exception number to non-secure code. This is guaranteed not
645          * to cause write_v7m_exception() to actually change stacks.
646          */
647         write_v7m_exception(env, 1);
648     }
649     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
650     switch_v7m_security_state(env, 0);
651     env->thumb = true;
652     env->regs[15] = dest;
653     arm_rebuild_hflags(env);
654 }
655 
656 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
657                                 uint32_t *pvec)
658 {
659     CPUState *cs = CPU(cpu);
660     CPUARMState *env = &cpu->env;
661     MemTxResult result;
662     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
663     uint32_t vector_entry;
664     MemTxAttrs attrs = {};
665     ARMMMUIdx mmu_idx;
666     bool exc_secure;
667 
668     qemu_log_mask(CPU_LOG_INT,
669                   "...loading from element %d of %s vector table at 0x%x\n",
670                   exc, targets_secure ? "secure" : "non-secure", addr);
671 
672     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
673 
674     /*
675      * We don't do a get_phys_addr() here because the rules for vector
676      * loads are special: they always use the default memory map, and
677      * the default memory map permits reads from all addresses.
678      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
679      * that we want this special case which would always say "yes",
680      * we just do the SAU lookup here followed by a direct physical load.
681      */
682     attrs.secure = targets_secure;
683     attrs.user = false;
684 
685     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
686         V8M_SAttributes sattrs = {};
687 
688         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
689                             targets_secure, &sattrs);
690         if (sattrs.ns) {
691             attrs.secure = false;
692         } else if (!targets_secure) {
693             /*
694              * NS access to S memory: the underlying exception which we escalate
695              * to HardFault is SecureFault, which always targets Secure.
696              */
697             exc_secure = true;
698             goto load_fail;
699         }
700     }
701 
702     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
703                                      attrs, &result);
704     if (result != MEMTX_OK) {
705         /*
706          * Underlying exception is BusFault: its target security state
707          * depends on BFHFNMINS.
708          */
709         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
710         goto load_fail;
711     }
712     *pvec = vector_entry;
713     qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
714     return true;
715 
716 load_fail:
717     /*
718      * All vector table fetch fails are reported as HardFault, with
719      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
720      * technically the underlying exception is a SecureFault or BusFault
721      * that is escalated to HardFault.) This is a terminal exception,
722      * so we will either take the HardFault immediately or else enter
723      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
724      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
725      * secure); otherwise it targets the same security state as the
726      * underlying exception.
727      * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
728      */
729     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
730         exc_secure = true;
731     }
732     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
733     if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
734         env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
735     }
736     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
737     return false;
738 }
739 
740 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
741 {
742     /*
743      * Return the integrity signature value for the callee-saves
744      * stack frame section. @lr is the exception return payload/LR value
745      * whose FType bit forms bit 0 of the signature if FP is present.
746      */
747     uint32_t sig = 0xfefa125a;
748 
749     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
750         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
751         sig |= 1;
752     }
753     return sig;
754 }
755 
756 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
757                                   bool ignore_faults)
758 {
759     /*
760      * For v8M, push the callee-saves register part of the stack frame.
761      * Compare the v8M pseudocode PushCalleeStack().
762      * In the tailchaining case this may not be the current stack.
763      */
764     CPUARMState *env = &cpu->env;
765     uint32_t *frame_sp_p;
766     uint32_t frameptr;
767     ARMMMUIdx mmu_idx;
768     bool stacked_ok;
769     uint32_t limit;
770     bool want_psp;
771     uint32_t sig;
772     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
773 
774     if (dotailchain) {
775         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
776         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
777             !mode;
778 
779         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
780         frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode,
781                                         lr & R_V7M_EXCRET_SPSEL_MASK);
782         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
783         if (want_psp) {
784             limit = env->v7m.psplim[M_REG_S];
785         } else {
786             limit = env->v7m.msplim[M_REG_S];
787         }
788     } else {
789         mmu_idx = arm_mmu_idx(env);
790         frame_sp_p = &env->regs[13];
791         limit = v7m_sp_limit(env);
792     }
793 
794     frameptr = *frame_sp_p - 0x28;
795     if (frameptr < limit) {
796         /*
797          * Stack limit failure: set SP to the limit value, and generate
798          * STKOF UsageFault. Stack pushes below the limit must not be
799          * performed. It is IMPDEF whether pushes above the limit are
800          * performed; we choose not to.
801          */
802         qemu_log_mask(CPU_LOG_INT,
803                       "...STKOF during callee-saves register stacking\n");
804         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
805         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
806                                 env->v7m.secure);
807         *frame_sp_p = limit;
808         return true;
809     }
810 
811     /*
812      * Write as much of the stack frame as we can. A write failure may
813      * cause us to pend a derived exception.
814      */
815     sig = v7m_integrity_sig(env, lr);
816     stacked_ok =
817         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
818         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
819         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
820         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
821         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
822         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
823         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
824         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
825         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
826 
827     /* Update SP regardless of whether any of the stack accesses failed. */
828     *frame_sp_p = frameptr;
829 
830     return !stacked_ok;
831 }
832 
833 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
834                                 bool ignore_stackfaults)
835 {
836     /*
837      * Do the "take the exception" parts of exception entry,
838      * but not the pushing of state to the stack. This is
839      * similar to the pseudocode ExceptionTaken() function.
840      */
841     CPUARMState *env = &cpu->env;
842     uint32_t addr;
843     bool targets_secure;
844     int exc;
845     bool push_failed = false;
846 
847     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
848     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
849                   targets_secure ? "secure" : "nonsecure", exc);
850 
851     if (dotailchain) {
852         /* Sanitize LR FType and PREFIX bits */
853         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
854             lr |= R_V7M_EXCRET_FTYPE_MASK;
855         }
856         lr = deposit32(lr, 24, 8, 0xff);
857     }
858 
859     if (arm_feature(env, ARM_FEATURE_V8)) {
860         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
861             (lr & R_V7M_EXCRET_S_MASK)) {
862             /*
863              * The background code (the owner of the registers in the
864              * exception frame) is Secure. This means it may either already
865              * have or now needs to push callee-saves registers.
866              */
867             if (targets_secure) {
868                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
869                     /*
870                      * We took an exception from Secure to NonSecure
871                      * (which means the callee-saved registers got stacked)
872                      * and are now tailchaining to a Secure exception.
873                      * Clear DCRS so eventual return from this Secure
874                      * exception unstacks the callee-saved registers.
875                      */
876                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
877                 }
878             } else {
879                 /*
880                  * We're going to a non-secure exception; push the
881                  * callee-saves registers to the stack now, if they're
882                  * not already saved.
883                  */
884                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
885                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
886                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
887                                                         ignore_stackfaults);
888                 }
889                 lr |= R_V7M_EXCRET_DCRS_MASK;
890             }
891         }
892 
893         lr &= ~R_V7M_EXCRET_ES_MASK;
894         if (targets_secure) {
895             lr |= R_V7M_EXCRET_ES_MASK;
896         }
897         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
898         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
899             lr |= R_V7M_EXCRET_SPSEL_MASK;
900         }
901 
902         /*
903          * Clear registers if necessary to prevent non-secure exception
904          * code being able to see register values from secure code.
905          * Where register values become architecturally UNKNOWN we leave
906          * them with their previous values. v8.1M is tighter than v8.0M
907          * here and always zeroes the caller-saved registers regardless
908          * of the security state the exception is targeting.
909          */
910         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
911             if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
912                 /*
913                  * Always clear the caller-saved registers (they have been
914                  * pushed to the stack earlier in v7m_push_stack()).
915                  * Clear callee-saved registers if the background code is
916                  * Secure (in which case these regs were saved in
917                  * v7m_push_callee_stack()).
918                  */
919                 int i;
920                 /*
921                  * r4..r11 are callee-saves, zero only if background
922                  * state was Secure (EXCRET.S == 1) and exception
923                  * targets Non-secure state
924                  */
925                 bool zero_callee_saves = !targets_secure &&
926                     (lr & R_V7M_EXCRET_S_MASK);
927 
928                 for (i = 0; i < 13; i++) {
929                     if (i < 4 || i > 11 || zero_callee_saves) {
930                         env->regs[i] = 0;
931                     }
932                 }
933                 /* Clear EAPSR */
934                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
935             }
936         }
937     }
938 
939     if (push_failed && !ignore_stackfaults) {
940         /*
941          * Derived exception on callee-saves register stacking:
942          * we might now want to take a different exception which
943          * targets a different security state, so try again from the top.
944          */
945         qemu_log_mask(CPU_LOG_INT,
946                       "...derived exception on callee-saves register stacking");
947         v7m_exception_taken(cpu, lr, true, true);
948         return;
949     }
950 
951     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
952         /* Vector load failed: derived exception */
953         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
954         v7m_exception_taken(cpu, lr, true, true);
955         return;
956     }
957 
958     /*
959      * Now we've done everything that might cause a derived exception
960      * we can go ahead and activate whichever exception we're going to
961      * take (which might now be the derived exception).
962      */
963     armv7m_nvic_acknowledge_irq(env->nvic);
964 
965     /* Switch to target security state -- must do this before writing SPSEL */
966     switch_v7m_security_state(env, targets_secure);
967     write_v7m_control_spsel(env, 0);
968     arm_clear_exclusive(env);
969     /* Clear SFPA and FPCA (has no effect if no FPU) */
970     env->v7m.control[M_REG_S] &=
971         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
972     /* Clear IT bits */
973     env->condexec_bits = 0;
974     env->regs[14] = lr;
975     env->regs[15] = addr & 0xfffffffe;
976     env->thumb = addr & 1;
977     arm_rebuild_hflags(env);
978 }
979 
980 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
981                              bool apply_splim)
982 {
983     /*
984      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
985      * that we will need later in order to do lazy FP reg stacking.
986      */
987     bool is_secure = env->v7m.secure;
988     NVICState *nvic = env->nvic;
989     /*
990      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
991      * are banked and we want to update the bit in the bank for the
992      * current security state; and in one case we want to specifically
993      * update the NS banked version of a bit even if we are secure.
994      */
995     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
996     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
997     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
998     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
999 
1000     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
1001 
1002     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1003         bool splimviol;
1004         uint32_t splim = v7m_sp_limit(env);
1005         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1006             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1007 
1008         splimviol = !ign && frameptr < splim;
1009         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1010     }
1011 
1012     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1013 
1014     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1015 
1016     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1017 
1018     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1019                         !arm_v7m_is_handler_mode(env));
1020 
1021     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1022     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1023 
1024     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1025     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1026 
1027     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1028     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1029 
1030     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1031     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1032 
1033     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1034     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1035 
1036     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1037         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1038         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1039 
1040         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1041         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1042     }
1043 }
1044 
1045 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1046 {
1047     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1048     ARMCPU *cpu = env_archcpu(env);
1049     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1050     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1051     uintptr_t ra = GETPC();
1052 
1053     assert(env->v7m.secure);
1054 
1055     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1056         return;
1057     }
1058 
1059     /* Check access to the coprocessor is permitted */
1060     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1061         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1062     }
1063 
1064     if (lspact) {
1065         /* LSPACT should not be active when there is active FP state */
1066         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1067     }
1068 
1069     if (fptr & 7) {
1070         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1071     }
1072 
1073     /*
1074      * Note that we do not use v7m_stack_write() here, because the
1075      * accesses should not set the FSR bits for stacking errors if they
1076      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1077      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1078      * and longjmp out.
1079      */
1080     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1081         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1082         int i;
1083 
1084         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1085             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1086             uint32_t faddr = fptr + 4 * i;
1087             uint32_t slo = extract64(dn, 0, 32);
1088             uint32_t shi = extract64(dn, 32, 32);
1089 
1090             if (i >= 16) {
1091                 faddr += 8; /* skip the slot for the FPSCR */
1092             }
1093             cpu_stl_data_ra(env, faddr, slo, ra);
1094             cpu_stl_data_ra(env, faddr + 4, shi, ra);
1095         }
1096         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1097         if (cpu_isar_feature(aa32_mve, cpu)) {
1098             cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
1099         }
1100 
1101         /*
1102          * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1103          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1104          */
1105         if (ts) {
1106             for (i = 0; i < 32; i += 2) {
1107                 *aa32_vfp_dreg(env, i / 2) = 0;
1108             }
1109             vfp_set_fpscr(env, 0);
1110             if (cpu_isar_feature(aa32_mve, cpu)) {
1111                 env->v7m.vpr = 0;
1112             }
1113         }
1114     } else {
1115         v7m_update_fpccr(env, fptr, false);
1116     }
1117 
1118     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1119 }
1120 
1121 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1122 {
1123     ARMCPU *cpu = env_archcpu(env);
1124     uintptr_t ra = GETPC();
1125 
1126     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1127     assert(env->v7m.secure);
1128 
1129     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1130         return;
1131     }
1132 
1133     /* Check access to the coprocessor is permitted */
1134     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1135         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1136     }
1137 
1138     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1139         /* State in FP is still valid */
1140         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1141     } else {
1142         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1143         int i;
1144         uint32_t fpscr;
1145 
1146         if (fptr & 7) {
1147             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1148         }
1149 
1150         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1151             uint32_t slo, shi;
1152             uint64_t dn;
1153             uint32_t faddr = fptr + 4 * i;
1154 
1155             if (i >= 16) {
1156                 faddr += 8; /* skip the slot for the FPSCR and VPR */
1157             }
1158 
1159             slo = cpu_ldl_data_ra(env, faddr, ra);
1160             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1161 
1162             dn = (uint64_t) shi << 32 | slo;
1163             *aa32_vfp_dreg(env, i / 2) = dn;
1164         }
1165         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1166         vfp_set_fpscr(env, fpscr);
1167         if (cpu_isar_feature(aa32_mve, cpu)) {
1168             env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
1169         }
1170     }
1171 
1172     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1173 }
1174 
1175 static bool v7m_push_stack(ARMCPU *cpu)
1176 {
1177     /*
1178      * Do the "set up stack frame" part of exception entry,
1179      * similar to pseudocode PushStack().
1180      * Return true if we generate a derived exception (and so
1181      * should ignore further stack faults trying to process
1182      * that derived exception.)
1183      */
1184     bool stacked_ok = true, limitviol = false;
1185     CPUARMState *env = &cpu->env;
1186     uint32_t xpsr = xpsr_read(env);
1187     uint32_t frameptr = env->regs[13];
1188     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1189     uint32_t framesize;
1190     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1191 
1192     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1193         (env->v7m.secure || nsacr_cp10)) {
1194         if (env->v7m.secure &&
1195             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1196             framesize = 0xa8;
1197         } else {
1198             framesize = 0x68;
1199         }
1200     } else {
1201         framesize = 0x20;
1202     }
1203 
1204     /* Align stack pointer if the guest wants that */
1205     if ((frameptr & 4) &&
1206         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1207         frameptr -= 4;
1208         xpsr |= XPSR_SPREALIGN;
1209     }
1210 
1211     xpsr &= ~XPSR_SFPA;
1212     if (env->v7m.secure &&
1213         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1214         xpsr |= XPSR_SFPA;
1215     }
1216 
1217     frameptr -= framesize;
1218 
1219     if (arm_feature(env, ARM_FEATURE_V8)) {
1220         uint32_t limit = v7m_sp_limit(env);
1221 
1222         if (frameptr < limit) {
1223             /*
1224              * Stack limit failure: set SP to the limit value, and generate
1225              * STKOF UsageFault. Stack pushes below the limit must not be
1226              * performed. It is IMPDEF whether pushes above the limit are
1227              * performed; we choose not to.
1228              */
1229             qemu_log_mask(CPU_LOG_INT,
1230                           "...STKOF during stacking\n");
1231             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1232             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1233                                     env->v7m.secure);
1234             env->regs[13] = limit;
1235             /*
1236              * We won't try to perform any further memory accesses but
1237              * we must continue through the following code to check for
1238              * permission faults during FPU state preservation, and we
1239              * must update FPCCR if lazy stacking is enabled.
1240              */
1241             limitviol = true;
1242             stacked_ok = false;
1243         }
1244     }
1245 
1246     /*
1247      * Write as much of the stack frame as we can. If we fail a stack
1248      * write this will result in a derived exception being pended
1249      * (which may be taken in preference to the one we started with
1250      * if it has higher priority).
1251      */
1252     stacked_ok = stacked_ok &&
1253         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1254         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1255                         mmu_idx, STACK_NORMAL) &&
1256         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1257                         mmu_idx, STACK_NORMAL) &&
1258         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1259                         mmu_idx, STACK_NORMAL) &&
1260         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1261                         mmu_idx, STACK_NORMAL) &&
1262         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1263                         mmu_idx, STACK_NORMAL) &&
1264         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1265                         mmu_idx, STACK_NORMAL) &&
1266         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1267 
1268     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1269         /* FPU is active, try to save its registers */
1270         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1271         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1272 
1273         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1274             qemu_log_mask(CPU_LOG_INT,
1275                           "...SecureFault because LSPACT and FPCA both set\n");
1276             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1277             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1278         } else if (!env->v7m.secure && !nsacr_cp10) {
1279             qemu_log_mask(CPU_LOG_INT,
1280                           "...Secure UsageFault with CFSR.NOCP because "
1281                           "NSACR.CP10 prevents stacking FP regs\n");
1282             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1283             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1284         } else {
1285             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1286                 /* Lazy stacking disabled, save registers now */
1287                 int i;
1288                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1289                                                  arm_current_el(env) != 0);
1290 
1291                 if (stacked_ok && !cpacr_pass) {
1292                     /*
1293                      * Take UsageFault if CPACR forbids access. The pseudocode
1294                      * here does a full CheckCPEnabled() but we know the NSACR
1295                      * check can never fail as we have already handled that.
1296                      */
1297                     qemu_log_mask(CPU_LOG_INT,
1298                                   "...UsageFault with CFSR.NOCP because "
1299                                   "CPACR.CP10 prevents stacking FP regs\n");
1300                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1301                                             env->v7m.secure);
1302                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1303                     stacked_ok = false;
1304                 }
1305 
1306                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1307                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1308                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1309                     uint32_t slo = extract64(dn, 0, 32);
1310                     uint32_t shi = extract64(dn, 32, 32);
1311 
1312                     if (i >= 16) {
1313                         faddr += 8; /* skip the slot for the FPSCR and VPR */
1314                     }
1315                     stacked_ok = stacked_ok &&
1316                         v7m_stack_write(cpu, faddr, slo,
1317                                         mmu_idx, STACK_NORMAL) &&
1318                         v7m_stack_write(cpu, faddr + 4, shi,
1319                                         mmu_idx, STACK_NORMAL);
1320                 }
1321                 stacked_ok = stacked_ok &&
1322                     v7m_stack_write(cpu, frameptr + 0x60,
1323                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1324                 if (cpu_isar_feature(aa32_mve, cpu)) {
1325                     stacked_ok = stacked_ok &&
1326                         v7m_stack_write(cpu, frameptr + 0x64,
1327                                         env->v7m.vpr, mmu_idx, STACK_NORMAL);
1328                 }
1329                 if (cpacr_pass) {
1330                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1331                         *aa32_vfp_dreg(env, i / 2) = 0;
1332                     }
1333                     vfp_set_fpscr(env, 0);
1334                     if (cpu_isar_feature(aa32_mve, cpu)) {
1335                         env->v7m.vpr = 0;
1336                     }
1337                 }
1338             } else {
1339                 /* Lazy stacking enabled, save necessary info to stack later */
1340                 v7m_update_fpccr(env, frameptr + 0x20, true);
1341             }
1342         }
1343     }
1344 
1345     /*
1346      * If we broke a stack limit then SP was already updated earlier;
1347      * otherwise we update SP regardless of whether any of the stack
1348      * accesses failed or we took some other kind of fault.
1349      */
1350     if (!limitviol) {
1351         env->regs[13] = frameptr;
1352     }
1353 
1354     return !stacked_ok;
1355 }
1356 
1357 static void do_v7m_exception_exit(ARMCPU *cpu)
1358 {
1359     CPUARMState *env = &cpu->env;
1360     uint32_t excret;
1361     uint32_t xpsr, xpsr_mask;
1362     bool ufault = false;
1363     bool sfault = false;
1364     bool return_to_sp_process;
1365     bool return_to_handler;
1366     bool rettobase = false;
1367     bool exc_secure = false;
1368     bool return_to_secure;
1369     bool ftype;
1370     bool restore_s16_s31 = false;
1371 
1372     /*
1373      * If we're not in Handler mode then jumps to magic exception-exit
1374      * addresses don't have magic behaviour. However for the v8M
1375      * security extensions the magic secure-function-return has to
1376      * work in thread mode too, so to avoid doing an extra check in
1377      * the generated code we allow exception-exit magic to also cause the
1378      * internal exception and bring us here in thread mode. Correct code
1379      * will never try to do this (the following insn fetch will always
1380      * fault) so we the overhead of having taken an unnecessary exception
1381      * doesn't matter.
1382      */
1383     if (!arm_v7m_is_handler_mode(env)) {
1384         return;
1385     }
1386 
1387     /*
1388      * In the spec pseudocode ExceptionReturn() is called directly
1389      * from BXWritePC() and gets the full target PC value including
1390      * bit zero. In QEMU's implementation we treat it as a normal
1391      * jump-to-register (which is then caught later on), and so split
1392      * the target value up between env->regs[15] and env->thumb in
1393      * gen_bx(). Reconstitute it.
1394      */
1395     excret = env->regs[15];
1396     if (env->thumb) {
1397         excret |= 1;
1398     }
1399 
1400     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1401                   " previous exception %d\n",
1402                   excret, env->v7m.exception);
1403 
1404     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1405         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1406                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1407                       excret);
1408     }
1409 
1410     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1411 
1412     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1413         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1414                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1415                       "if FPU not present\n",
1416                       excret);
1417         ftype = true;
1418     }
1419 
1420     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1421         /*
1422          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1423          * we pick which FAULTMASK to clear.
1424          */
1425         if (!env->v7m.secure &&
1426             ((excret & R_V7M_EXCRET_ES_MASK) ||
1427              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1428             sfault = 1;
1429             /* For all other purposes, treat ES as 0 (R_HXSR) */
1430             excret &= ~R_V7M_EXCRET_ES_MASK;
1431         }
1432         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1433     }
1434 
1435     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1436         /*
1437          * Auto-clear FAULTMASK on return from other than NMI.
1438          * If the security extension is implemented then this only
1439          * happens if the raw execution priority is >= 0; the
1440          * value of the ES bit in the exception return value indicates
1441          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1442          */
1443         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1444             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1445                 env->v7m.faultmask[exc_secure] = 0;
1446             }
1447         } else {
1448             env->v7m.faultmask[M_REG_NS] = 0;
1449         }
1450     }
1451 
1452     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1453                                      exc_secure)) {
1454     case -1:
1455         /* attempt to exit an exception that isn't active */
1456         ufault = true;
1457         break;
1458     case 0:
1459         /* still an irq active now */
1460         break;
1461     case 1:
1462         /*
1463          * We returned to base exception level, no nesting.
1464          * (In the pseudocode this is written using "NestedActivation != 1"
1465          * where we have 'rettobase == false'.)
1466          */
1467         rettobase = true;
1468         break;
1469     default:
1470         g_assert_not_reached();
1471     }
1472 
1473     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1474     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1475     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1476         (excret & R_V7M_EXCRET_S_MASK);
1477 
1478     if (arm_feature(env, ARM_FEATURE_V8)) {
1479         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1480             /*
1481              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1482              * we choose to take the UsageFault.
1483              */
1484             if ((excret & R_V7M_EXCRET_S_MASK) ||
1485                 (excret & R_V7M_EXCRET_ES_MASK) ||
1486                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1487                 ufault = true;
1488             }
1489         }
1490         if (excret & R_V7M_EXCRET_RES0_MASK) {
1491             ufault = true;
1492         }
1493     } else {
1494         /* For v7M we only recognize certain combinations of the low bits */
1495         switch (excret & 0xf) {
1496         case 1: /* Return to Handler */
1497             break;
1498         case 13: /* Return to Thread using Process stack */
1499         case 9: /* Return to Thread using Main stack */
1500             /*
1501              * We only need to check NONBASETHRDENA for v7M, because in
1502              * v8M this bit does not exist (it is RES1).
1503              */
1504             if (!rettobase &&
1505                 !(env->v7m.ccr[env->v7m.secure] &
1506                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1507                 ufault = true;
1508             }
1509             break;
1510         default:
1511             ufault = true;
1512         }
1513     }
1514 
1515     /*
1516      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1517      * Handler mode (and will be until we write the new XPSR.Interrupt
1518      * field) this does not switch around the current stack pointer.
1519      * We must do this before we do any kind of tailchaining, including
1520      * for the derived exceptions on integrity check failures, or we will
1521      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1522      */
1523     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1524 
1525     /*
1526      * Clear scratch FP values left in caller saved registers; this
1527      * must happen before any kind of tail chaining.
1528      */
1529     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1530         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1531         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1532             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1533             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1534             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1535                           "stackframe: error during lazy state deactivation\n");
1536             v7m_exception_taken(cpu, excret, true, false);
1537             return;
1538         } else {
1539             if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1540                 /* v8.1M adds this NOCP check */
1541                 bool nsacr_pass = exc_secure ||
1542                     extract32(env->v7m.nsacr, 10, 1);
1543                 bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1544                 if (!nsacr_pass) {
1545                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1546                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1547                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1548                         "stackframe: NSACR prevents clearing FPU registers\n");
1549                     v7m_exception_taken(cpu, excret, true, false);
1550                     return;
1551                 } else if (!cpacr_pass) {
1552                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1553                                             exc_secure);
1554                     env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1555                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1556                         "stackframe: CPACR prevents clearing FPU registers\n");
1557                     v7m_exception_taken(cpu, excret, true, false);
1558                     return;
1559                 }
1560             }
1561             /* Clear s0..s15, FPSCR and VPR */
1562             int i;
1563 
1564             for (i = 0; i < 16; i += 2) {
1565                 *aa32_vfp_dreg(env, i / 2) = 0;
1566             }
1567             vfp_set_fpscr(env, 0);
1568             if (cpu_isar_feature(aa32_mve, cpu)) {
1569                 env->v7m.vpr = 0;
1570             }
1571         }
1572     }
1573 
1574     if (sfault) {
1575         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1576         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1577         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1578                       "stackframe: failed EXC_RETURN.ES validity check\n");
1579         v7m_exception_taken(cpu, excret, true, false);
1580         return;
1581     }
1582 
1583     if (ufault) {
1584         /*
1585          * Bad exception return: instead of popping the exception
1586          * stack, directly take a usage fault on the current stack.
1587          */
1588         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1589         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1590         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1591                       "stackframe: failed exception return integrity check\n");
1592         v7m_exception_taken(cpu, excret, true, false);
1593         return;
1594     }
1595 
1596     /*
1597      * Tailchaining: if there is currently a pending exception that
1598      * is high enough priority to preempt execution at the level we're
1599      * about to return to, then just directly take that exception now,
1600      * avoiding an unstack-and-then-stack. Note that now we have
1601      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1602      * our current execution priority is already the execution priority we are
1603      * returning to -- none of the state we would unstack or set based on
1604      * the EXCRET value affects it.
1605      */
1606     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1607         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1608         v7m_exception_taken(cpu, excret, true, false);
1609         return;
1610     }
1611 
1612     switch_v7m_security_state(env, return_to_secure);
1613 
1614     {
1615         /*
1616          * The stack pointer we should be reading the exception frame from
1617          * depends on bits in the magic exception return type value (and
1618          * for v8M isn't necessarily the stack pointer we will eventually
1619          * end up resuming execution with). Get a pointer to the location
1620          * in the CPU state struct where the SP we need is currently being
1621          * stored; we will use and modify it in place.
1622          * We use this limited C variable scope so we don't accidentally
1623          * use 'frame_sp_p' after we do something that makes it invalid.
1624          */
1625         bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
1626         uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure,
1627                                                   !return_to_handler, spsel);
1628         uint32_t frameptr = *frame_sp_p;
1629         bool pop_ok = true;
1630         ARMMMUIdx mmu_idx;
1631         bool return_to_priv = return_to_handler ||
1632             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1633 
1634         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1635                                                         return_to_priv);
1636 
1637         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1638             arm_feature(env, ARM_FEATURE_V8)) {
1639             qemu_log_mask(LOG_GUEST_ERROR,
1640                           "M profile exception return with non-8-aligned SP "
1641                           "for destination state is UNPREDICTABLE\n");
1642         }
1643 
1644         /* Do we need to pop callee-saved registers? */
1645         if (return_to_secure &&
1646             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1647              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1648             uint32_t actual_sig;
1649 
1650             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1651 
1652             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1653                 /* Take a SecureFault on the current stack */
1654                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1655                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1656                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1657                               "stackframe: failed exception return integrity "
1658                               "signature check\n");
1659                 v7m_exception_taken(cpu, excret, true, false);
1660                 return;
1661             }
1662 
1663             pop_ok = pop_ok &&
1664                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1665                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1666                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1667                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1668                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1669                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1670                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1671                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1672 
1673             frameptr += 0x28;
1674         }
1675 
1676         /* Pop registers */
1677         pop_ok = pop_ok &&
1678             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1679             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1680             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1681             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1682             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1683             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1684             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1685             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1686 
1687         if (!pop_ok) {
1688             /*
1689              * v7m_stack_read() pended a fault, so take it (as a tail
1690              * chained exception on the same stack frame)
1691              */
1692             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1693             v7m_exception_taken(cpu, excret, true, false);
1694             return;
1695         }
1696 
1697         /*
1698          * Returning from an exception with a PC with bit 0 set is defined
1699          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1700          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1701          * the lsbit, and there are several RTOSes out there which incorrectly
1702          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1703          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1704          * complain about the badly behaved guest.
1705          */
1706         if (env->regs[15] & 1) {
1707             env->regs[15] &= ~1U;
1708             if (!arm_feature(env, ARM_FEATURE_V8)) {
1709                 qemu_log_mask(LOG_GUEST_ERROR,
1710                               "M profile return from interrupt with misaligned "
1711                               "PC is UNPREDICTABLE on v7M\n");
1712             }
1713         }
1714 
1715         if (arm_feature(env, ARM_FEATURE_V8)) {
1716             /*
1717              * For v8M we have to check whether the xPSR exception field
1718              * matches the EXCRET value for return to handler/thread
1719              * before we commit to changing the SP and xPSR.
1720              */
1721             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1722             if (return_to_handler != will_be_handler) {
1723                 /*
1724                  * Take an INVPC UsageFault on the current stack.
1725                  * By this point we will have switched to the security state
1726                  * for the background state, so this UsageFault will target
1727                  * that state.
1728                  */
1729                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1730                                         env->v7m.secure);
1731                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1732                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1733                               "stackframe: failed exception return integrity "
1734                               "check\n");
1735                 v7m_exception_taken(cpu, excret, true, false);
1736                 return;
1737             }
1738         }
1739 
1740         if (!ftype) {
1741             /* FP present and we need to handle it */
1742             if (!return_to_secure &&
1743                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1744                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1745                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1746                 qemu_log_mask(CPU_LOG_INT,
1747                               "...taking SecureFault on existing stackframe: "
1748                               "Secure LSPACT set but exception return is "
1749                               "not to secure state\n");
1750                 v7m_exception_taken(cpu, excret, true, false);
1751                 return;
1752             }
1753 
1754             restore_s16_s31 = return_to_secure &&
1755                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1756 
1757             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1758                 /* State in FPU is still valid, just clear LSPACT */
1759                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1760             } else {
1761                 int i;
1762                 uint32_t fpscr;
1763                 bool cpacr_pass, nsacr_pass;
1764 
1765                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1766                                             return_to_priv);
1767                 nsacr_pass = return_to_secure ||
1768                     extract32(env->v7m.nsacr, 10, 1);
1769 
1770                 if (!cpacr_pass) {
1771                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1772                                             return_to_secure);
1773                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1774                     qemu_log_mask(CPU_LOG_INT,
1775                                   "...taking UsageFault on existing "
1776                                   "stackframe: CPACR.CP10 prevents unstacking "
1777                                   "FP regs\n");
1778                     v7m_exception_taken(cpu, excret, true, false);
1779                     return;
1780                 } else if (!nsacr_pass) {
1781                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1782                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1783                     qemu_log_mask(CPU_LOG_INT,
1784                                   "...taking Secure UsageFault on existing "
1785                                   "stackframe: NSACR.CP10 prevents unstacking "
1786                                   "FP regs\n");
1787                     v7m_exception_taken(cpu, excret, true, false);
1788                     return;
1789                 }
1790 
1791                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1792                     uint32_t slo, shi;
1793                     uint64_t dn;
1794                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1795 
1796                     if (i >= 16) {
1797                         faddr += 8; /* Skip the slot for the FPSCR and VPR */
1798                     }
1799 
1800                     pop_ok = pop_ok &&
1801                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1802                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1803 
1804                     if (!pop_ok) {
1805                         break;
1806                     }
1807 
1808                     dn = (uint64_t)shi << 32 | slo;
1809                     *aa32_vfp_dreg(env, i / 2) = dn;
1810                 }
1811                 pop_ok = pop_ok &&
1812                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1813                 if (pop_ok) {
1814                     vfp_set_fpscr(env, fpscr);
1815                 }
1816                 if (cpu_isar_feature(aa32_mve, cpu)) {
1817                     pop_ok = pop_ok &&
1818                         v7m_stack_read(cpu, &env->v7m.vpr,
1819                                        frameptr + 0x64, mmu_idx);
1820                 }
1821                 if (!pop_ok) {
1822                     /*
1823                      * These regs are 0 if security extension present;
1824                      * otherwise merely UNKNOWN. We zero always.
1825                      */
1826                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1827                         *aa32_vfp_dreg(env, i / 2) = 0;
1828                     }
1829                     vfp_set_fpscr(env, 0);
1830                     if (cpu_isar_feature(aa32_mve, cpu)) {
1831                         env->v7m.vpr = 0;
1832                     }
1833                 }
1834             }
1835         }
1836         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1837                                                V7M_CONTROL, FPCA, !ftype);
1838 
1839         /* Commit to consuming the stack frame */
1840         frameptr += 0x20;
1841         if (!ftype) {
1842             frameptr += 0x48;
1843             if (restore_s16_s31) {
1844                 frameptr += 0x40;
1845             }
1846         }
1847         /*
1848          * Undo stack alignment (the SPREALIGN bit indicates that the original
1849          * pre-exception SP was not 8-aligned and we added a padding word to
1850          * align it, so we undo this by ORing in the bit that increases it
1851          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1852          * would work too but a logical OR is how the pseudocode specifies it.)
1853          */
1854         if (xpsr & XPSR_SPREALIGN) {
1855             frameptr |= 4;
1856         }
1857         *frame_sp_p = frameptr;
1858     }
1859 
1860     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1861     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1862         xpsr_mask &= ~XPSR_GE;
1863     }
1864     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1865     xpsr_write(env, xpsr, xpsr_mask);
1866 
1867     if (env->v7m.secure) {
1868         bool sfpa = xpsr & XPSR_SFPA;
1869 
1870         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1871                                                V7M_CONTROL, SFPA, sfpa);
1872     }
1873 
1874     /*
1875      * The restored xPSR exception field will be zero if we're
1876      * resuming in Thread mode. If that doesn't match what the
1877      * exception return excret specified then this is a UsageFault.
1878      * v7M requires we make this check here; v8M did it earlier.
1879      */
1880     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1881         /*
1882          * Take an INVPC UsageFault by pushing the stack again;
1883          * we know we're v7M so this is never a Secure UsageFault.
1884          */
1885         bool ignore_stackfaults;
1886 
1887         assert(!arm_feature(env, ARM_FEATURE_V8));
1888         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1889         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1890         ignore_stackfaults = v7m_push_stack(cpu);
1891         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1892                       "failed exception return integrity check\n");
1893         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1894         return;
1895     }
1896 
1897     /* Otherwise, we have a successful exception exit. */
1898     arm_clear_exclusive(env);
1899     arm_rebuild_hflags(env);
1900     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1901 }
1902 
1903 static bool do_v7m_function_return(ARMCPU *cpu)
1904 {
1905     /*
1906      * v8M security extensions magic function return.
1907      * We may either:
1908      *  (1) throw an exception (longjump)
1909      *  (2) return true if we successfully handled the function return
1910      *  (3) return false if we failed a consistency check and have
1911      *      pended a UsageFault that needs to be taken now
1912      *
1913      * At this point the magic return value is split between env->regs[15]
1914      * and env->thumb. We don't bother to reconstitute it because we don't
1915      * need it (all values are handled the same way).
1916      */
1917     CPUARMState *env = &cpu->env;
1918     uint32_t newpc, newpsr, newpsr_exc;
1919 
1920     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1921 
1922     {
1923         bool threadmode, spsel;
1924         MemOpIdx oi;
1925         ARMMMUIdx mmu_idx;
1926         uint32_t *frame_sp_p;
1927         uint32_t frameptr;
1928 
1929         /* Pull the return address and IPSR from the Secure stack */
1930         threadmode = !arm_v7m_is_handler_mode(env);
1931         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1932 
1933         frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel);
1934         frameptr = *frame_sp_p;
1935 
1936         /*
1937          * These loads may throw an exception (for MPU faults). We want to
1938          * do them as secure, so work out what MMU index that is.
1939          */
1940         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1941         oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
1942         newpc = cpu_ldl_mmu(env, frameptr, oi, 0);
1943         newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0);
1944 
1945         /* Consistency checks on new IPSR */
1946         newpsr_exc = newpsr & XPSR_EXCP;
1947         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1948               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1949             /* Pend the fault and tell our caller to take it */
1950             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1951             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1952                                     env->v7m.secure);
1953             qemu_log_mask(CPU_LOG_INT,
1954                           "...taking INVPC UsageFault: "
1955                           "IPSR consistency check failed\n");
1956             return false;
1957         }
1958 
1959         *frame_sp_p = frameptr + 8;
1960     }
1961 
1962     /* This invalidates frame_sp_p */
1963     switch_v7m_security_state(env, true);
1964     env->v7m.exception = newpsr_exc;
1965     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1966     if (newpsr & XPSR_SFPA) {
1967         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1968     }
1969     xpsr_write(env, 0, XPSR_IT);
1970     env->thumb = newpc & 1;
1971     env->regs[15] = newpc & ~1;
1972     arm_rebuild_hflags(env);
1973 
1974     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1975     return true;
1976 }
1977 
1978 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
1979                                uint32_t addr, uint16_t *insn)
1980 {
1981     /*
1982      * Load a 16-bit portion of a v7M instruction, returning true on success,
1983      * or false on failure (in which case we will have pended the appropriate
1984      * exception).
1985      * We need to do the instruction fetch's MPU and SAU checks
1986      * like this because there is no MMU index that would allow
1987      * doing the load with a single function call. Instead we must
1988      * first check that the security attributes permit the load
1989      * and that they don't mismatch on the two halves of the instruction,
1990      * and then we do the load as a secure load (ie using the security
1991      * attributes of the address, not the CPU, as architecturally required).
1992      */
1993     CPUState *cs = CPU(cpu);
1994     CPUARMState *env = &cpu->env;
1995     V8M_SAttributes sattrs = {};
1996     GetPhysAddrResult res = {};
1997     ARMMMUFaultInfo fi = {};
1998     MemTxResult txres;
1999 
2000     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
2001     if (!sattrs.nsc || sattrs.ns) {
2002         /*
2003          * This must be the second half of the insn, and it straddles a
2004          * region boundary with the second half not being S&NSC.
2005          */
2006         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2007         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2008         qemu_log_mask(CPU_LOG_INT,
2009                       "...really SecureFault with SFSR.INVEP\n");
2010         return false;
2011     }
2012     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
2013         /* the MPU lookup failed */
2014         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2015         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
2016         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
2017         return false;
2018     }
2019     *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
2020                                   res.f.phys_addr, res.f.attrs, &txres);
2021     if (txres != MEMTX_OK) {
2022         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2023         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2024         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
2025         return false;
2026     }
2027     return true;
2028 }
2029 
2030 static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2031                                    uint32_t addr, uint32_t *spdata)
2032 {
2033     /*
2034      * Read a word of data from the stack for the SG instruction,
2035      * writing the value into *spdata. If the load succeeds, return
2036      * true; otherwise pend an appropriate exception and return false.
2037      * (We can't use data load helpers here that throw an exception
2038      * because of the context we're called in, which is halfway through
2039      * arm_v7m_cpu_do_interrupt().)
2040      */
2041     CPUState *cs = CPU(cpu);
2042     CPUARMState *env = &cpu->env;
2043     MemTxResult txres;
2044     GetPhysAddrResult res = {};
2045     ARMMMUFaultInfo fi = {};
2046     uint32_t value;
2047 
2048     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
2049         /* MPU/SAU lookup failed */
2050         if (fi.type == ARMFault_QEMU_SFault) {
2051             qemu_log_mask(CPU_LOG_INT,
2052                           "...SecureFault during stack word read\n");
2053             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2054             env->v7m.sfar = addr;
2055             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2056         } else {
2057             qemu_log_mask(CPU_LOG_INT,
2058                           "...MemManageFault during stack word read\n");
2059             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2060                 R_V7M_CFSR_MMARVALID_MASK;
2061             env->v7m.mmfar[M_REG_S] = addr;
2062             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2063         }
2064         return false;
2065     }
2066     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
2067                               res.f.phys_addr, res.f.attrs, &txres);
2068     if (txres != MEMTX_OK) {
2069         /* BusFault trying to read the data */
2070         qemu_log_mask(CPU_LOG_INT,
2071                       "...BusFault during stack word read\n");
2072         env->v7m.cfsr[M_REG_NS] |=
2073             (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2074         env->v7m.bfar = addr;
2075         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2076         return false;
2077     }
2078 
2079     *spdata = value;
2080     return true;
2081 }
2082 
2083 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2084 {
2085     /*
2086      * Check whether this attempt to execute code in a Secure & NS-Callable
2087      * memory region is for an SG instruction; if so, then emulate the
2088      * effect of the SG instruction and return true. Otherwise pend
2089      * the correct kind of exception and return false.
2090      */
2091     CPUARMState *env = &cpu->env;
2092     ARMMMUIdx mmu_idx;
2093     uint16_t insn;
2094 
2095     /*
2096      * We should never get here unless get_phys_addr_pmsav8() caused
2097      * an exception for NS executing in S&NSC memory.
2098      */
2099     assert(!env->v7m.secure);
2100     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2101 
2102     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2103     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2104 
2105     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
2106         return false;
2107     }
2108 
2109     if (!env->thumb) {
2110         goto gen_invep;
2111     }
2112 
2113     if (insn != 0xe97f) {
2114         /*
2115          * Not an SG instruction first half (we choose the IMPDEF
2116          * early-SG-check option).
2117          */
2118         goto gen_invep;
2119     }
2120 
2121     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
2122         return false;
2123     }
2124 
2125     if (insn != 0xe97f) {
2126         /*
2127          * Not an SG instruction second half (yes, both halves of the SG
2128          * insn have the same hex value)
2129          */
2130         goto gen_invep;
2131     }
2132 
2133     /*
2134      * OK, we have confirmed that we really have an SG instruction.
2135      * We know we're NS in S memory so don't need to repeat those checks.
2136      */
2137     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2138                   ", executing it\n", env->regs[15]);
2139 
2140     if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2141         !arm_v7m_is_handler_mode(env)) {
2142         /*
2143          * v8.1M exception stack frame integrity check. Note that we
2144          * must perform the memory access even if CCR_S.TRD is zero
2145          * and we aren't going to check what the data loaded is.
2146          */
2147         uint32_t spdata, sp;
2148 
2149         /*
2150          * We know we are currently NS, so the S stack pointers must be
2151          * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2152          */
2153         sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2154         if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2155             /* Stack access failed and an exception has been pended */
2156             return false;
2157         }
2158 
2159         if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2160             if (((spdata & ~1) == 0xfefa125a) ||
2161                 !(env->v7m.control[M_REG_S] & 1)) {
2162                 goto gen_invep;
2163             }
2164         }
2165     }
2166 
2167     env->regs[14] &= ~1;
2168     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2169     switch_v7m_security_state(env, true);
2170     xpsr_write(env, 0, XPSR_IT);
2171     env->regs[15] += 4;
2172     arm_rebuild_hflags(env);
2173     return true;
2174 
2175 gen_invep:
2176     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2177     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2178     qemu_log_mask(CPU_LOG_INT,
2179                   "...really SecureFault with SFSR.INVEP\n");
2180     return false;
2181 }
2182 
2183 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2184 {
2185     ARMCPU *cpu = ARM_CPU(cs);
2186     CPUARMState *env = &cpu->env;
2187     uint32_t lr;
2188     bool ignore_stackfaults;
2189 
2190     arm_log_exception(cs);
2191 
2192     /*
2193      * For exceptions we just mark as pending on the NVIC, and let that
2194      * handle it.
2195      */
2196     switch (cs->exception_index) {
2197     case EXCP_UDEF:
2198         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2199         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2200         break;
2201     case EXCP_NOCP:
2202     {
2203         /*
2204          * NOCP might be directed to something other than the current
2205          * security state if this fault is because of NSACR; we indicate
2206          * the target security state using exception.target_el.
2207          */
2208         int target_secstate;
2209 
2210         if (env->exception.target_el == 3) {
2211             target_secstate = M_REG_S;
2212         } else {
2213             target_secstate = env->v7m.secure;
2214         }
2215         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2216         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2217         break;
2218     }
2219     case EXCP_INVSTATE:
2220         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2221         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2222         break;
2223     case EXCP_STKOF:
2224         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2225         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2226         break;
2227     case EXCP_LSERR:
2228         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2229         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2230         break;
2231     case EXCP_UNALIGNED:
2232         /* Unaligned faults reported by M-profile aware code */
2233         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2234         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2235         break;
2236     case EXCP_DIVBYZERO:
2237         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2238         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
2239         break;
2240     case EXCP_SWI:
2241         /* The PC already points to the next instruction.  */
2242         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2243         break;
2244     case EXCP_PREFETCH_ABORT:
2245     case EXCP_DATA_ABORT:
2246         /*
2247          * Note that for M profile we don't have a guest facing FSR, but
2248          * the env->exception.fsr will be populated by the code that
2249          * raises the fault, in the A profile short-descriptor format.
2250          *
2251          * Log the exception.vaddress now regardless of subtype, because
2252          * logging below only logs it when it goes into a guest visible
2253          * register.
2254          */
2255         qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
2256                       (uint32_t)env->exception.vaddress);
2257         switch (env->exception.fsr & 0xf) {
2258         case M_FAKE_FSR_NSC_EXEC:
2259             /*
2260              * Exception generated when we try to execute code at an address
2261              * which is marked as Secure & Non-Secure Callable and the CPU
2262              * is in the Non-Secure state. The only instruction which can
2263              * be executed like this is SG (and that only if both halves of
2264              * the SG instruction have the same security attributes.)
2265              * Everything else must generate an INVEP SecureFault, so we
2266              * emulate the SG instruction here.
2267              */
2268             if (v7m_handle_execute_nsc(cpu)) {
2269                 return;
2270             }
2271             break;
2272         case M_FAKE_FSR_SFAULT:
2273             /*
2274              * Various flavours of SecureFault for attempts to execute or
2275              * access data in the wrong security state.
2276              */
2277             switch (cs->exception_index) {
2278             case EXCP_PREFETCH_ABORT:
2279                 if (env->v7m.secure) {
2280                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2281                     qemu_log_mask(CPU_LOG_INT,
2282                                   "...really SecureFault with SFSR.INVTRAN\n");
2283                 } else {
2284                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2285                     qemu_log_mask(CPU_LOG_INT,
2286                                   "...really SecureFault with SFSR.INVEP\n");
2287                 }
2288                 break;
2289             case EXCP_DATA_ABORT:
2290                 /* This must be an NS access to S memory */
2291                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2292                 qemu_log_mask(CPU_LOG_INT,
2293                               "...really SecureFault with SFSR.AUVIOL\n");
2294                 break;
2295             }
2296             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2297             break;
2298         case 0x8: /* External Abort */
2299             switch (cs->exception_index) {
2300             case EXCP_PREFETCH_ABORT:
2301                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2302                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2303                 break;
2304             case EXCP_DATA_ABORT:
2305                 env->v7m.cfsr[M_REG_NS] |=
2306                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2307                 env->v7m.bfar = env->exception.vaddress;
2308                 qemu_log_mask(CPU_LOG_INT,
2309                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2310                               env->v7m.bfar);
2311                 break;
2312             }
2313             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2314             break;
2315         case 0x1: /* Alignment fault reported by generic code */
2316             qemu_log_mask(CPU_LOG_INT,
2317                           "...really UsageFault with UFSR.UNALIGNED\n");
2318             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2319             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
2320                                     env->v7m.secure);
2321             break;
2322         default:
2323             /*
2324              * All other FSR values are either MPU faults or "can't happen
2325              * for M profile" cases.
2326              */
2327             switch (cs->exception_index) {
2328             case EXCP_PREFETCH_ABORT:
2329                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2330                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2331                 break;
2332             case EXCP_DATA_ABORT:
2333                 env->v7m.cfsr[env->v7m.secure] |=
2334                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2335                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2336                 qemu_log_mask(CPU_LOG_INT,
2337                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2338                               env->v7m.mmfar[env->v7m.secure]);
2339                 break;
2340             }
2341             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2342                                     env->v7m.secure);
2343             break;
2344         }
2345         break;
2346     case EXCP_SEMIHOST:
2347         qemu_log_mask(CPU_LOG_INT,
2348                       "...handling as semihosting call 0x%x\n",
2349                       env->regs[0]);
2350 #ifdef CONFIG_TCG
2351         do_common_semihosting(cs);
2352 #else
2353         g_assert_not_reached();
2354 #endif
2355         env->regs[15] += env->thumb ? 2 : 4;
2356         return;
2357     case EXCP_BKPT:
2358         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2359         break;
2360     case EXCP_IRQ:
2361         break;
2362     case EXCP_EXCEPTION_EXIT:
2363         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2364             /* Must be v8M security extension function return */
2365             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2366             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2367             if (do_v7m_function_return(cpu)) {
2368                 return;
2369             }
2370         } else {
2371             do_v7m_exception_exit(cpu);
2372             return;
2373         }
2374         break;
2375     case EXCP_LAZYFP:
2376         /*
2377          * We already pended the specific exception in the NVIC in the
2378          * v7m_preserve_fp_state() helper function.
2379          */
2380         break;
2381     default:
2382         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2383         return; /* Never happens.  Keep compiler happy.  */
2384     }
2385 
2386     if (arm_feature(env, ARM_FEATURE_V8)) {
2387         lr = R_V7M_EXCRET_RES1_MASK |
2388             R_V7M_EXCRET_DCRS_MASK;
2389         /*
2390          * The S bit indicates whether we should return to Secure
2391          * or NonSecure (ie our current state).
2392          * The ES bit indicates whether we're taking this exception
2393          * to Secure or NonSecure (ie our target state). We set it
2394          * later, in v7m_exception_taken().
2395          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2396          * This corresponds to the ARM ARM pseudocode for v8M setting
2397          * some LR bits in PushStack() and some in ExceptionTaken();
2398          * the distinction matters for the tailchain cases where we
2399          * can take an exception without pushing the stack.
2400          */
2401         if (env->v7m.secure) {
2402             lr |= R_V7M_EXCRET_S_MASK;
2403         }
2404     } else {
2405         lr = R_V7M_EXCRET_RES1_MASK |
2406             R_V7M_EXCRET_S_MASK |
2407             R_V7M_EXCRET_DCRS_MASK |
2408             R_V7M_EXCRET_ES_MASK;
2409         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2410             lr |= R_V7M_EXCRET_SPSEL_MASK;
2411         }
2412     }
2413     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2414         lr |= R_V7M_EXCRET_FTYPE_MASK;
2415     }
2416     if (!arm_v7m_is_handler_mode(env)) {
2417         lr |= R_V7M_EXCRET_MODE_MASK;
2418     }
2419 
2420     ignore_stackfaults = v7m_push_stack(cpu);
2421     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2422 }
2423 
2424 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2425 {
2426     unsigned el = arm_current_el(env);
2427 
2428     /* First handle registers which unprivileged can read */
2429     switch (reg) {
2430     case 0 ... 7: /* xPSR sub-fields */
2431         return v7m_mrs_xpsr(env, reg, el);
2432     case 20: /* CONTROL */
2433         return arm_v7m_mrs_control(env, env->v7m.secure);
2434     case 0x94: /* CONTROL_NS */
2435         /*
2436          * We have to handle this here because unprivileged Secure code
2437          * can read the NS CONTROL register.
2438          */
2439         if (!env->v7m.secure) {
2440             return 0;
2441         }
2442         return env->v7m.control[M_REG_NS] |
2443             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2444     }
2445 
2446     if (el == 0) {
2447         return 0; /* unprivileged reads others as zero */
2448     }
2449 
2450     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2451         switch (reg) {
2452         case 0x88: /* MSP_NS */
2453             if (!env->v7m.secure) {
2454                 return 0;
2455             }
2456             return env->v7m.other_ss_msp;
2457         case 0x89: /* PSP_NS */
2458             if (!env->v7m.secure) {
2459                 return 0;
2460             }
2461             return env->v7m.other_ss_psp;
2462         case 0x8a: /* MSPLIM_NS */
2463             if (!env->v7m.secure) {
2464                 return 0;
2465             }
2466             return env->v7m.msplim[M_REG_NS];
2467         case 0x8b: /* PSPLIM_NS */
2468             if (!env->v7m.secure) {
2469                 return 0;
2470             }
2471             return env->v7m.psplim[M_REG_NS];
2472         case 0x90: /* PRIMASK_NS */
2473             if (!env->v7m.secure) {
2474                 return 0;
2475             }
2476             return env->v7m.primask[M_REG_NS];
2477         case 0x91: /* BASEPRI_NS */
2478             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2479                 goto bad_reg;
2480             }
2481             if (!env->v7m.secure) {
2482                 return 0;
2483             }
2484             return env->v7m.basepri[M_REG_NS];
2485         case 0x93: /* FAULTMASK_NS */
2486             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2487                 goto bad_reg;
2488             }
2489             if (!env->v7m.secure) {
2490                 return 0;
2491             }
2492             return env->v7m.faultmask[M_REG_NS];
2493         case 0x98: /* SP_NS */
2494         {
2495             /*
2496              * This gives the non-secure SP selected based on whether we're
2497              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2498              */
2499             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2500 
2501             if (!env->v7m.secure) {
2502                 return 0;
2503             }
2504             if (!arm_v7m_is_handler_mode(env) && spsel) {
2505                 return env->v7m.other_ss_psp;
2506             } else {
2507                 return env->v7m.other_ss_msp;
2508             }
2509         }
2510         default:
2511             break;
2512         }
2513     }
2514 
2515     switch (reg) {
2516     case 8: /* MSP */
2517         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2518     case 9: /* PSP */
2519         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2520     case 10: /* MSPLIM */
2521         if (!arm_feature(env, ARM_FEATURE_V8)) {
2522             goto bad_reg;
2523         }
2524         return env->v7m.msplim[env->v7m.secure];
2525     case 11: /* PSPLIM */
2526         if (!arm_feature(env, ARM_FEATURE_V8)) {
2527             goto bad_reg;
2528         }
2529         return env->v7m.psplim[env->v7m.secure];
2530     case 16: /* PRIMASK */
2531         return env->v7m.primask[env->v7m.secure];
2532     case 17: /* BASEPRI */
2533     case 18: /* BASEPRI_MAX */
2534         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2535             goto bad_reg;
2536         }
2537         return env->v7m.basepri[env->v7m.secure];
2538     case 19: /* FAULTMASK */
2539         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2540             goto bad_reg;
2541         }
2542         return env->v7m.faultmask[env->v7m.secure];
2543     default:
2544     bad_reg:
2545         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2546                                        " register %d\n", reg);
2547         return 0;
2548     }
2549 }
2550 
2551 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2552 {
2553     /*
2554      * We're passed bits [11..0] of the instruction; extract
2555      * SYSm and the mask bits.
2556      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2557      * we choose to treat them as if the mask bits were valid.
2558      * NB that the pseudocode 'mask' variable is bits [11..10],
2559      * whereas ours is [11..8].
2560      */
2561     uint32_t mask = extract32(maskreg, 8, 4);
2562     uint32_t reg = extract32(maskreg, 0, 8);
2563     int cur_el = arm_current_el(env);
2564 
2565     if (cur_el == 0 && reg > 7 && reg != 20) {
2566         /*
2567          * only xPSR sub-fields and CONTROL.SFPA may be written by
2568          * unprivileged code
2569          */
2570         return;
2571     }
2572 
2573     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2574         switch (reg) {
2575         case 0x88: /* MSP_NS */
2576             if (!env->v7m.secure) {
2577                 return;
2578             }
2579             env->v7m.other_ss_msp = val & ~3;
2580             return;
2581         case 0x89: /* PSP_NS */
2582             if (!env->v7m.secure) {
2583                 return;
2584             }
2585             env->v7m.other_ss_psp = val & ~3;
2586             return;
2587         case 0x8a: /* MSPLIM_NS */
2588             if (!env->v7m.secure) {
2589                 return;
2590             }
2591             env->v7m.msplim[M_REG_NS] = val & ~7;
2592             return;
2593         case 0x8b: /* PSPLIM_NS */
2594             if (!env->v7m.secure) {
2595                 return;
2596             }
2597             env->v7m.psplim[M_REG_NS] = val & ~7;
2598             return;
2599         case 0x90: /* PRIMASK_NS */
2600             if (!env->v7m.secure) {
2601                 return;
2602             }
2603             env->v7m.primask[M_REG_NS] = val & 1;
2604             return;
2605         case 0x91: /* BASEPRI_NS */
2606             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2607                 goto bad_reg;
2608             }
2609             if (!env->v7m.secure) {
2610                 return;
2611             }
2612             env->v7m.basepri[M_REG_NS] = val & 0xff;
2613             return;
2614         case 0x93: /* FAULTMASK_NS */
2615             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2616                 goto bad_reg;
2617             }
2618             if (!env->v7m.secure) {
2619                 return;
2620             }
2621             env->v7m.faultmask[M_REG_NS] = val & 1;
2622             return;
2623         case 0x94: /* CONTROL_NS */
2624             if (!env->v7m.secure) {
2625                 return;
2626             }
2627             write_v7m_control_spsel_for_secstate(env,
2628                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2629                                                  M_REG_NS);
2630             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2631                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2632                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2633             }
2634             /*
2635              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2636              * RES0 if the FPU is not present, and is stored in the S bank
2637              */
2638             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2639                 extract32(env->v7m.nsacr, 10, 1)) {
2640                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2641                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2642             }
2643             return;
2644         case 0x98: /* SP_NS */
2645         {
2646             /*
2647              * This gives the non-secure SP selected based on whether we're
2648              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2649              */
2650             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2651             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2652             uint32_t limit;
2653 
2654             if (!env->v7m.secure) {
2655                 return;
2656             }
2657 
2658             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2659 
2660             val &= ~0x3;
2661 
2662             if (val < limit) {
2663                 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
2664             }
2665 
2666             if (is_psp) {
2667                 env->v7m.other_ss_psp = val;
2668             } else {
2669                 env->v7m.other_ss_msp = val;
2670             }
2671             return;
2672         }
2673         default:
2674             break;
2675         }
2676     }
2677 
2678     switch (reg) {
2679     case 0 ... 7: /* xPSR sub-fields */
2680         v7m_msr_xpsr(env, mask, reg, val);
2681         break;
2682     case 8: /* MSP */
2683         if (v7m_using_psp(env)) {
2684             env->v7m.other_sp = val & ~3;
2685         } else {
2686             env->regs[13] = val & ~3;
2687         }
2688         break;
2689     case 9: /* PSP */
2690         if (v7m_using_psp(env)) {
2691             env->regs[13] = val & ~3;
2692         } else {
2693             env->v7m.other_sp = val & ~3;
2694         }
2695         break;
2696     case 10: /* MSPLIM */
2697         if (!arm_feature(env, ARM_FEATURE_V8)) {
2698             goto bad_reg;
2699         }
2700         env->v7m.msplim[env->v7m.secure] = val & ~7;
2701         break;
2702     case 11: /* PSPLIM */
2703         if (!arm_feature(env, ARM_FEATURE_V8)) {
2704             goto bad_reg;
2705         }
2706         env->v7m.psplim[env->v7m.secure] = val & ~7;
2707         break;
2708     case 16: /* PRIMASK */
2709         env->v7m.primask[env->v7m.secure] = val & 1;
2710         break;
2711     case 17: /* BASEPRI */
2712         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2713             goto bad_reg;
2714         }
2715         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2716         break;
2717     case 18: /* BASEPRI_MAX */
2718         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2719             goto bad_reg;
2720         }
2721         val &= 0xff;
2722         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2723                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2724             env->v7m.basepri[env->v7m.secure] = val;
2725         }
2726         break;
2727     case 19: /* FAULTMASK */
2728         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2729             goto bad_reg;
2730         }
2731         env->v7m.faultmask[env->v7m.secure] = val & 1;
2732         break;
2733     case 20: /* CONTROL */
2734         /*
2735          * Writing to the SPSEL bit only has an effect if we are in
2736          * thread mode; other bits can be updated by any privileged code.
2737          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2738          * env->v7m.control, so we only need update the others.
2739          * For v7M, we must just ignore explicit writes to SPSEL in handler
2740          * mode; for v8M the write is permitted but will have no effect.
2741          * All these bits are writes-ignored from non-privileged code,
2742          * except for SFPA.
2743          */
2744         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2745                            !arm_v7m_is_handler_mode(env))) {
2746             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2747         }
2748         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2749             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2750             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2751         }
2752         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2753             /*
2754              * SFPA is RAZ/WI from NS or if no FPU.
2755              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2756              * Both are stored in the S bank.
2757              */
2758             if (env->v7m.secure) {
2759                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2760                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2761             }
2762             if (cur_el > 0 &&
2763                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2764                  extract32(env->v7m.nsacr, 10, 1))) {
2765                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2766                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2767             }
2768         }
2769         break;
2770     default:
2771     bad_reg:
2772         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2773                                        " register %d\n", reg);
2774         return;
2775     }
2776 }
2777 
2778 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2779 {
2780     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2781     bool forceunpriv = op & 1;
2782     bool alt = op & 2;
2783     V8M_SAttributes sattrs = {};
2784     uint32_t tt_resp;
2785     bool r, rw, nsr, nsrw, mrvalid;
2786     ARMMMUIdx mmu_idx;
2787     uint32_t mregion;
2788     bool targetpriv;
2789     bool targetsec = env->v7m.secure;
2790 
2791     /*
2792      * Work out what the security state and privilege level we're
2793      * interested in is...
2794      */
2795     if (alt) {
2796         targetsec = !targetsec;
2797     }
2798 
2799     if (forceunpriv) {
2800         targetpriv = false;
2801     } else {
2802         targetpriv = arm_v7m_is_handler_mode(env) ||
2803             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2804     }
2805 
2806     /* ...and then figure out which MMU index this is */
2807     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2808 
2809     /*
2810      * We know that the MPU and SAU don't care about the access type
2811      * for our purposes beyond that we don't want to claim to be
2812      * an insn fetch, so we arbitrarily call this a read.
2813      */
2814 
2815     /*
2816      * MPU region info only available for privileged or if
2817      * inspecting the other MPU state.
2818      */
2819     if (arm_current_el(env) != 0 || alt) {
2820         GetPhysAddrResult res = {};
2821         ARMMMUFaultInfo fi = {};
2822 
2823         /* We can ignore the return value as prot is always set */
2824         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec,
2825                           &res, &fi, &mregion);
2826         if (mregion == -1) {
2827             mrvalid = false;
2828             mregion = 0;
2829         } else {
2830             mrvalid = true;
2831         }
2832         r = res.f.prot & PAGE_READ;
2833         rw = res.f.prot & PAGE_WRITE;
2834     } else {
2835         r = false;
2836         rw = false;
2837         mrvalid = false;
2838         mregion = 0;
2839     }
2840 
2841     if (env->v7m.secure) {
2842         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2843                             targetsec, &sattrs);
2844         nsr = sattrs.ns && r;
2845         nsrw = sattrs.ns && rw;
2846     } else {
2847         sattrs.ns = true;
2848         nsr = false;
2849         nsrw = false;
2850     }
2851 
2852     tt_resp = (sattrs.iregion << 24) |
2853         (sattrs.irvalid << 23) |
2854         ((!sattrs.ns) << 22) |
2855         (nsrw << 21) |
2856         (nsr << 20) |
2857         (rw << 19) |
2858         (r << 18) |
2859         (sattrs.srvalid << 17) |
2860         (mrvalid << 16) |
2861         (sattrs.sregion << 8) |
2862         mregion;
2863 
2864     return tt_resp;
2865 }
2866 
2867 #endif /* !CONFIG_USER_ONLY */
2868 
2869 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
2870                              bool spsel)
2871 {
2872     /*
2873      * Return a pointer to the location where we currently store the
2874      * stack pointer for the requested security state and thread mode.
2875      * This pointer will become invalid if the CPU state is updated
2876      * such that the stack pointers are switched around (eg changing
2877      * the SPSEL control bit).
2878      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
2879      * Unlike that pseudocode, we require the caller to pass us in the
2880      * SPSEL control bit value; this is because we also use this
2881      * function in handling of pushing of the callee-saves registers
2882      * part of the v8M stack frame (pseudocode PushCalleeStack()),
2883      * and in the tailchain codepath the SPSEL bit comes from the exception
2884      * return magic LR value from the previous exception. The pseudocode
2885      * opencodes the stack-selection in PushCalleeStack(), but we prefer
2886      * to make this utility function generic enough to do the job.
2887      */
2888     bool want_psp = threadmode && spsel;
2889 
2890     if (secure == env->v7m.secure) {
2891         if (want_psp == v7m_using_psp(env)) {
2892             return &env->regs[13];
2893         } else {
2894             return &env->v7m.other_sp;
2895         }
2896     } else {
2897         if (want_psp) {
2898             return &env->v7m.other_ss_psp;
2899         } else {
2900             return &env->v7m.other_ss_msp;
2901         }
2902     }
2903 }
2904