xref: /openbmc/qemu/hw/intc/armv7m_nvic.c (revision 24496b8d)
1 /*
2  * ARM Nested Vectored Interrupt Controller
3  *
4  * Copyright (c) 2006-2007 CodeSourcery.
5  * Written by Paul Brook
6  *
7  * This code is licensed under the GPL.
8  *
9  * The ARMv7M System controller is fairly tightly tied in with the
10  * NVIC.  Much of that is also implemented here.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu-common.h"
16 #include "cpu.h"
17 #include "hw/sysbus.h"
18 #include "qemu/timer.h"
19 #include "hw/arm/arm.h"
20 #include "hw/intc/armv7m_nvic.h"
21 #include "target/arm/cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/log.h"
24 #include "trace.h"
25 
26 /* IRQ number counting:
27  *
28  * the num-irq property counts the number of external IRQ lines
29  *
30  * NVICState::num_irq counts the total number of exceptions
31  * (external IRQs, the 15 internal exceptions including reset,
32  * and one for the unused exception number 0).
33  *
34  * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
35  *
36  * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
37  *
38  * Iterating through all exceptions should typically be done with
39  * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
40  *
41  * The external qemu_irq lines are the NVIC's external IRQ lines,
42  * so line 0 is exception 16.
43  *
44  * In the terminology of the architecture manual, "interrupts" are
45  * a subcategory of exception referring to the external interrupts
46  * (which are exception numbers NVIC_FIRST_IRQ and upward).
47  * For historical reasons QEMU tends to use "interrupt" and
48  * "exception" more or less interchangeably.
49  */
50 #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
51 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
52 
53 /* Effective running priority of the CPU when no exception is active
54  * (higher than the highest possible priority value)
55  */
56 #define NVIC_NOEXC_PRIO 0x100
57 /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
58 #define NVIC_NS_PRIO_LIMIT 0x80
59 
60 static const uint8_t nvic_id[] = {
61     0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
62 };
63 
64 static int nvic_pending_prio(NVICState *s)
65 {
66     /* return the group priority of the current pending interrupt,
67      * or NVIC_NOEXC_PRIO if no interrupt is pending
68      */
69     return s->vectpending_prio;
70 }
71 
72 /* Return the value of the ISCR RETTOBASE bit:
73  * 1 if there is exactly one active exception
74  * 0 if there is more than one active exception
75  * UNKNOWN if there are no active exceptions (we choose 1,
76  * which matches the choice Cortex-M3 is documented as making).
77  *
78  * NB: some versions of the documentation talk about this
79  * counting "active exceptions other than the one shown by IPSR";
80  * this is only different in the obscure corner case where guest
81  * code has manually deactivated an exception and is about
82  * to fail an exception-return integrity check. The definition
83  * above is the one from the v8M ARM ARM and is also in line
84  * with the behaviour documented for the Cortex-M3.
85  */
86 static bool nvic_rettobase(NVICState *s)
87 {
88     int irq, nhand = 0;
89     bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
90 
91     for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
92         if (s->vectors[irq].active ||
93             (check_sec && irq < NVIC_INTERNAL_VECTORS &&
94              s->sec_vectors[irq].active)) {
95             nhand++;
96             if (nhand == 2) {
97                 return 0;
98             }
99         }
100     }
101 
102     return 1;
103 }
104 
105 /* Return the value of the ISCR ISRPENDING bit:
106  * 1 if an external interrupt is pending
107  * 0 if no external interrupt is pending
108  */
109 static bool nvic_isrpending(NVICState *s)
110 {
111     int irq;
112 
113     /* We can shortcut if the highest priority pending interrupt
114      * happens to be external or if there is nothing pending.
115      */
116     if (s->vectpending > NVIC_FIRST_IRQ) {
117         return true;
118     }
119     if (s->vectpending == 0) {
120         return false;
121     }
122 
123     for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
124         if (s->vectors[irq].pending) {
125             return true;
126         }
127     }
128     return false;
129 }
130 
131 static bool exc_is_banked(int exc)
132 {
133     /* Return true if this is one of the limited set of exceptions which
134      * are banked (and thus have state in sec_vectors[])
135      */
136     return exc == ARMV7M_EXCP_HARD ||
137         exc == ARMV7M_EXCP_MEM ||
138         exc == ARMV7M_EXCP_USAGE ||
139         exc == ARMV7M_EXCP_SVC ||
140         exc == ARMV7M_EXCP_PENDSV ||
141         exc == ARMV7M_EXCP_SYSTICK;
142 }
143 
144 /* Return a mask word which clears the subpriority bits from
145  * a priority value for an M-profile exception, leaving only
146  * the group priority.
147  */
148 static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
149 {
150     return ~0U << (s->prigroup[secure] + 1);
151 }
152 
153 static bool exc_targets_secure(NVICState *s, int exc)
154 {
155     /* Return true if this non-banked exception targets Secure state. */
156     if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
157         return false;
158     }
159 
160     if (exc >= NVIC_FIRST_IRQ) {
161         return !s->itns[exc];
162     }
163 
164     /* Function shouldn't be called for banked exceptions. */
165     assert(!exc_is_banked(exc));
166 
167     switch (exc) {
168     case ARMV7M_EXCP_NMI:
169     case ARMV7M_EXCP_BUS:
170         return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
171     case ARMV7M_EXCP_SECURE:
172         return true;
173     case ARMV7M_EXCP_DEBUG:
174         /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
175         return false;
176     default:
177         /* reset, and reserved (unused) low exception numbers.
178          * We'll get called by code that loops through all the exception
179          * numbers, but it doesn't matter what we return here as these
180          * non-existent exceptions will never be pended or active.
181          */
182         return true;
183     }
184 }
185 
186 static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
187 {
188     /* Return the group priority for this exception, given its raw
189      * (group-and-subgroup) priority value and whether it is targeting
190      * secure state or not.
191      */
192     if (rawprio < 0) {
193         return rawprio;
194     }
195     rawprio &= nvic_gprio_mask(s, targets_secure);
196     /* AIRCR.PRIS causes us to squash all NS priorities into the
197      * lower half of the total range
198      */
199     if (!targets_secure &&
200         (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
201         rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
202     }
203     return rawprio;
204 }
205 
206 /* Recompute vectpending and exception_prio for a CPU which implements
207  * the Security extension
208  */
209 static void nvic_recompute_state_secure(NVICState *s)
210 {
211     int i, bank;
212     int pend_prio = NVIC_NOEXC_PRIO;
213     int active_prio = NVIC_NOEXC_PRIO;
214     int pend_irq = 0;
215     bool pending_is_s_banked = false;
216 
217     /* R_CQRV: precedence is by:
218      *  - lowest group priority; if both the same then
219      *  - lowest subpriority; if both the same then
220      *  - lowest exception number; if both the same (ie banked) then
221      *  - secure exception takes precedence
222      * Compare pseudocode RawExecutionPriority.
223      * Annoyingly, now we have two prigroup values (for S and NS)
224      * we can't do the loop comparison on raw priority values.
225      */
226     for (i = 1; i < s->num_irq; i++) {
227         for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
228             VecInfo *vec;
229             int prio;
230             bool targets_secure;
231 
232             if (bank == M_REG_S) {
233                 if (!exc_is_banked(i)) {
234                     continue;
235                 }
236                 vec = &s->sec_vectors[i];
237                 targets_secure = true;
238             } else {
239                 vec = &s->vectors[i];
240                 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
241             }
242 
243             prio = exc_group_prio(s, vec->prio, targets_secure);
244             if (vec->enabled && vec->pending && prio < pend_prio) {
245                 pend_prio = prio;
246                 pend_irq = i;
247                 pending_is_s_banked = (bank == M_REG_S);
248             }
249             if (vec->active && prio < active_prio) {
250                 active_prio = prio;
251             }
252         }
253     }
254 
255     s->vectpending_is_s_banked = pending_is_s_banked;
256     s->vectpending = pend_irq;
257     s->vectpending_prio = pend_prio;
258     s->exception_prio = active_prio;
259 
260     trace_nvic_recompute_state_secure(s->vectpending,
261                                       s->vectpending_is_s_banked,
262                                       s->vectpending_prio,
263                                       s->exception_prio);
264 }
265 
266 /* Recompute vectpending and exception_prio */
267 static void nvic_recompute_state(NVICState *s)
268 {
269     int i;
270     int pend_prio = NVIC_NOEXC_PRIO;
271     int active_prio = NVIC_NOEXC_PRIO;
272     int pend_irq = 0;
273 
274     /* In theory we could write one function that handled both
275      * the "security extension present" and "not present"; however
276      * the security related changes significantly complicate the
277      * recomputation just by themselves and mixing both cases together
278      * would be even worse, so we retain a separate non-secure-only
279      * version for CPUs which don't implement the security extension.
280      */
281     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
282         nvic_recompute_state_secure(s);
283         return;
284     }
285 
286     for (i = 1; i < s->num_irq; i++) {
287         VecInfo *vec = &s->vectors[i];
288 
289         if (vec->enabled && vec->pending && vec->prio < pend_prio) {
290             pend_prio = vec->prio;
291             pend_irq = i;
292         }
293         if (vec->active && vec->prio < active_prio) {
294             active_prio = vec->prio;
295         }
296     }
297 
298     if (active_prio > 0) {
299         active_prio &= nvic_gprio_mask(s, false);
300     }
301 
302     if (pend_prio > 0) {
303         pend_prio &= nvic_gprio_mask(s, false);
304     }
305 
306     s->vectpending = pend_irq;
307     s->vectpending_prio = pend_prio;
308     s->exception_prio = active_prio;
309 
310     trace_nvic_recompute_state(s->vectpending,
311                                s->vectpending_prio,
312                                s->exception_prio);
313 }
314 
315 /* Return the current execution priority of the CPU
316  * (equivalent to the pseudocode ExecutionPriority function).
317  * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
318  */
319 static inline int nvic_exec_prio(NVICState *s)
320 {
321     CPUARMState *env = &s->cpu->env;
322     int running = NVIC_NOEXC_PRIO;
323 
324     if (env->v7m.basepri[M_REG_NS] > 0) {
325         running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
326     }
327 
328     if (env->v7m.basepri[M_REG_S] > 0) {
329         int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
330         if (running > basepri) {
331             running = basepri;
332         }
333     }
334 
335     if (env->v7m.primask[M_REG_NS]) {
336         if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
337             if (running > NVIC_NS_PRIO_LIMIT) {
338                 running = NVIC_NS_PRIO_LIMIT;
339             }
340         } else {
341             running = 0;
342         }
343     }
344 
345     if (env->v7m.primask[M_REG_S]) {
346         running = 0;
347     }
348 
349     if (env->v7m.faultmask[M_REG_NS]) {
350         if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
351             running = -1;
352         } else {
353             if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
354                 if (running > NVIC_NS_PRIO_LIMIT) {
355                     running = NVIC_NS_PRIO_LIMIT;
356                 }
357             } else {
358                 running = 0;
359             }
360         }
361     }
362 
363     if (env->v7m.faultmask[M_REG_S]) {
364         running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
365     }
366 
367     /* consider priority of active handler */
368     return MIN(running, s->exception_prio);
369 }
370 
371 bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
372 {
373     /* Return true if the requested execution priority is negative
374      * for the specified security state, ie that security state
375      * has an active NMI or HardFault or has set its FAULTMASK.
376      * Note that this is not the same as whether the execution
377      * priority is actually negative (for instance AIRCR.PRIS may
378      * mean we don't allow FAULTMASK_NS to actually make the execution
379      * priority negative). Compare pseudocode IsReqExcPriNeg().
380      */
381     NVICState *s = opaque;
382 
383     if (s->cpu->env.v7m.faultmask[secure]) {
384         return true;
385     }
386 
387     if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
388         s->vectors[ARMV7M_EXCP_HARD].active) {
389         return true;
390     }
391 
392     if (s->vectors[ARMV7M_EXCP_NMI].active &&
393         exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
394         return true;
395     }
396 
397     return false;
398 }
399 
400 bool armv7m_nvic_can_take_pending_exception(void *opaque)
401 {
402     NVICState *s = opaque;
403 
404     return nvic_exec_prio(s) > nvic_pending_prio(s);
405 }
406 
407 int armv7m_nvic_raw_execution_priority(void *opaque)
408 {
409     NVICState *s = opaque;
410 
411     return s->exception_prio;
412 }
413 
414 /* caller must call nvic_irq_update() after this.
415  * secure indicates the bank to use for banked exceptions (we assert if
416  * we are passed secure=true for a non-banked exception).
417  */
418 static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
419 {
420     assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
421     assert(irq < s->num_irq);
422 
423     prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
424 
425     if (secure) {
426         assert(exc_is_banked(irq));
427         s->sec_vectors[irq].prio = prio;
428     } else {
429         s->vectors[irq].prio = prio;
430     }
431 
432     trace_nvic_set_prio(irq, secure, prio);
433 }
434 
435 /* Return the current raw priority register value.
436  * secure indicates the bank to use for banked exceptions (we assert if
437  * we are passed secure=true for a non-banked exception).
438  */
439 static int get_prio(NVICState *s, unsigned irq, bool secure)
440 {
441     assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
442     assert(irq < s->num_irq);
443 
444     if (secure) {
445         assert(exc_is_banked(irq));
446         return s->sec_vectors[irq].prio;
447     } else {
448         return s->vectors[irq].prio;
449     }
450 }
451 
452 /* Recompute state and assert irq line accordingly.
453  * Must be called after changes to:
454  *  vec->active, vec->enabled, vec->pending or vec->prio for any vector
455  *  prigroup
456  */
457 static void nvic_irq_update(NVICState *s)
458 {
459     int lvl;
460     int pend_prio;
461 
462     nvic_recompute_state(s);
463     pend_prio = nvic_pending_prio(s);
464 
465     /* Raise NVIC output if this IRQ would be taken, except that we
466      * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
467      * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
468      * to those CPU registers don't cause us to recalculate the NVIC
469      * pending info.
470      */
471     lvl = (pend_prio < s->exception_prio);
472     trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
473     qemu_set_irq(s->excpout, lvl);
474 }
475 
476 /**
477  * armv7m_nvic_clear_pending: mark the specified exception as not pending
478  * @opaque: the NVIC
479  * @irq: the exception number to mark as not pending
480  * @secure: false for non-banked exceptions or for the nonsecure
481  * version of a banked exception, true for the secure version of a banked
482  * exception.
483  *
484  * Marks the specified exception as not pending. Note that we will assert()
485  * if @secure is true and @irq does not specify one of the fixed set
486  * of architecturally banked exceptions.
487  */
488 static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
489 {
490     NVICState *s = (NVICState *)opaque;
491     VecInfo *vec;
492 
493     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
494 
495     if (secure) {
496         assert(exc_is_banked(irq));
497         vec = &s->sec_vectors[irq];
498     } else {
499         vec = &s->vectors[irq];
500     }
501     trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
502     if (vec->pending) {
503         vec->pending = 0;
504         nvic_irq_update(s);
505     }
506 }
507 
508 static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
509                                        bool derived)
510 {
511     /* Pend an exception, including possibly escalating it to HardFault.
512      *
513      * This function handles both "normal" pending of interrupts and
514      * exceptions, and also derived exceptions (ones which occur as
515      * a result of trying to take some other exception).
516      *
517      * If derived == true, the caller guarantees that we are part way through
518      * trying to take an exception (but have not yet called
519      * armv7m_nvic_acknowledge_irq() to make it active), and so:
520      *  - s->vectpending is the "original exception" we were trying to take
521      *  - irq is the "derived exception"
522      *  - nvic_exec_prio(s) gives the priority before exception entry
523      * Here we handle the prioritization logic which the pseudocode puts
524      * in the DerivedLateArrival() function.
525      */
526 
527     NVICState *s = (NVICState *)opaque;
528     bool banked = exc_is_banked(irq);
529     VecInfo *vec;
530     bool targets_secure;
531 
532     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
533     assert(!secure || banked);
534 
535     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
536 
537     targets_secure = banked ? secure : exc_targets_secure(s, irq);
538 
539     trace_nvic_set_pending(irq, secure, targets_secure,
540                            derived, vec->enabled, vec->prio);
541 
542     if (derived) {
543         /* Derived exceptions are always synchronous. */
544         assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
545 
546         if (irq == ARMV7M_EXCP_DEBUG &&
547             exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
548             /* DebugMonitorFault, but its priority is lower than the
549              * preempted exception priority: just ignore it.
550              */
551             return;
552         }
553 
554         if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
555             /* If this is a terminal exception (one which means we cannot
556              * take the original exception, like a failure to read its
557              * vector table entry), then we must take the derived exception.
558              * If the derived exception can't take priority over the
559              * original exception, then we go into Lockup.
560              *
561              * For QEMU, we rely on the fact that a derived exception is
562              * terminal if and only if it's reported to us as HardFault,
563              * which saves having to have an extra argument is_terminal
564              * that we'd only use in one place.
565              */
566             cpu_abort(&s->cpu->parent_obj,
567                       "Lockup: can't take terminal derived exception "
568                       "(original exception priority %d)\n",
569                       s->vectpending_prio);
570         }
571         /* We now continue with the same code as for a normal pending
572          * exception, which will cause us to pend the derived exception.
573          * We'll then take either the original or the derived exception
574          * based on which is higher priority by the usual mechanism
575          * for selecting the highest priority pending interrupt.
576          */
577     }
578 
579     if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
580         /* If a synchronous exception is pending then it may be
581          * escalated to HardFault if:
582          *  * it is equal or lower priority to current execution
583          *  * it is disabled
584          * (ie we need to take it immediately but we can't do so).
585          * Asynchronous exceptions (and interrupts) simply remain pending.
586          *
587          * For QEMU, we don't have any imprecise (asynchronous) faults,
588          * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
589          * synchronous.
590          * Debug exceptions are awkward because only Debug exceptions
591          * resulting from the BKPT instruction should be escalated,
592          * but we don't currently implement any Debug exceptions other
593          * than those that result from BKPT, so we treat all debug exceptions
594          * as needing escalation.
595          *
596          * This all means we can identify whether to escalate based only on
597          * the exception number and don't (yet) need the caller to explicitly
598          * tell us whether this exception is synchronous or not.
599          */
600         int running = nvic_exec_prio(s);
601         bool escalate = false;
602 
603         if (exc_group_prio(s, vec->prio, secure) >= running) {
604             trace_nvic_escalate_prio(irq, vec->prio, running);
605             escalate = true;
606         } else if (!vec->enabled) {
607             trace_nvic_escalate_disabled(irq);
608             escalate = true;
609         }
610 
611         if (escalate) {
612 
613             /* We need to escalate this exception to a synchronous HardFault.
614              * If BFHFNMINS is set then we escalate to the banked HF for
615              * the target security state of the original exception; otherwise
616              * we take a Secure HardFault.
617              */
618             irq = ARMV7M_EXCP_HARD;
619             if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
620                 (targets_secure ||
621                  !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
622                 vec = &s->sec_vectors[irq];
623             } else {
624                 vec = &s->vectors[irq];
625             }
626             if (running <= vec->prio) {
627                 /* We want to escalate to HardFault but we can't take the
628                  * synchronous HardFault at this point either. This is a
629                  * Lockup condition due to a guest bug. We don't model
630                  * Lockup, so report via cpu_abort() instead.
631                  */
632                 cpu_abort(&s->cpu->parent_obj,
633                           "Lockup: can't escalate %d to HardFault "
634                           "(current priority %d)\n", irq, running);
635             }
636 
637             /* HF may be banked but there is only one shared HFSR */
638             s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
639         }
640     }
641 
642     if (!vec->pending) {
643         vec->pending = 1;
644         nvic_irq_update(s);
645     }
646 }
647 
648 void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
649 {
650     do_armv7m_nvic_set_pending(opaque, irq, secure, false);
651 }
652 
653 void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
654 {
655     do_armv7m_nvic_set_pending(opaque, irq, secure, true);
656 }
657 
658 void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
659 {
660     /*
661      * Pend an exception during lazy FP stacking. This differs
662      * from the usual exception pending because the logic for
663      * whether we should escalate depends on the saved context
664      * in the FPCCR register, not on the current state of the CPU/NVIC.
665      */
666     NVICState *s = (NVICState *)opaque;
667     bool banked = exc_is_banked(irq);
668     VecInfo *vec;
669     bool targets_secure;
670     bool escalate = false;
671     /*
672      * We will only look at bits in fpccr if this is a banked exception
673      * (in which case 'secure' tells us whether it is the S or NS version).
674      * All the bits for the non-banked exceptions are in fpccr_s.
675      */
676     uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
677     uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
678 
679     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
680     assert(!secure || banked);
681 
682     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
683 
684     targets_secure = banked ? secure : exc_targets_secure(s, irq);
685 
686     switch (irq) {
687     case ARMV7M_EXCP_DEBUG:
688         if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
689             /* Ignore DebugMonitor exception */
690             return;
691         }
692         break;
693     case ARMV7M_EXCP_MEM:
694         escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
695         break;
696     case ARMV7M_EXCP_USAGE:
697         escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
698         break;
699     case ARMV7M_EXCP_BUS:
700         escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
701         break;
702     case ARMV7M_EXCP_SECURE:
703         escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
704         break;
705     default:
706         g_assert_not_reached();
707     }
708 
709     if (escalate) {
710         /*
711          * Escalate to HardFault: faults that initially targeted Secure
712          * continue to do so, even if HF normally targets NonSecure.
713          */
714         irq = ARMV7M_EXCP_HARD;
715         if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
716             (targets_secure ||
717              !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
718             vec = &s->sec_vectors[irq];
719         } else {
720             vec = &s->vectors[irq];
721         }
722     }
723 
724     if (!vec->enabled ||
725         nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
726         if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
727             /*
728              * We want to escalate to HardFault but the context the
729              * FP state belongs to prevents the exception pre-empting.
730              */
731             cpu_abort(&s->cpu->parent_obj,
732                       "Lockup: can't escalate to HardFault during "
733                       "lazy FP register stacking\n");
734         }
735     }
736 
737     if (escalate) {
738         s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
739     }
740     if (!vec->pending) {
741         vec->pending = 1;
742         /*
743          * We do not call nvic_irq_update(), because we know our caller
744          * is going to handle causing us to take the exception by
745          * raising EXCP_LAZYFP, so raising the IRQ line would be
746          * pointless extra work. We just need to recompute the
747          * priorities so that armv7m_nvic_can_take_pending_exception()
748          * returns the right answer.
749          */
750         nvic_recompute_state(s);
751     }
752 }
753 
754 /* Make pending IRQ active.  */
755 void armv7m_nvic_acknowledge_irq(void *opaque)
756 {
757     NVICState *s = (NVICState *)opaque;
758     CPUARMState *env = &s->cpu->env;
759     const int pending = s->vectpending;
760     const int running = nvic_exec_prio(s);
761     VecInfo *vec;
762 
763     assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
764 
765     if (s->vectpending_is_s_banked) {
766         vec = &s->sec_vectors[pending];
767     } else {
768         vec = &s->vectors[pending];
769     }
770 
771     assert(vec->enabled);
772     assert(vec->pending);
773 
774     assert(s->vectpending_prio < running);
775 
776     trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
777 
778     vec->active = 1;
779     vec->pending = 0;
780 
781     write_v7m_exception(env, s->vectpending);
782 
783     nvic_irq_update(s);
784 }
785 
786 void armv7m_nvic_get_pending_irq_info(void *opaque,
787                                       int *pirq, bool *ptargets_secure)
788 {
789     NVICState *s = (NVICState *)opaque;
790     const int pending = s->vectpending;
791     bool targets_secure;
792 
793     assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
794 
795     if (s->vectpending_is_s_banked) {
796         targets_secure = true;
797     } else {
798         targets_secure = !exc_is_banked(pending) &&
799             exc_targets_secure(s, pending);
800     }
801 
802     trace_nvic_get_pending_irq_info(pending, targets_secure);
803 
804     *ptargets_secure = targets_secure;
805     *pirq = pending;
806 }
807 
808 int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
809 {
810     NVICState *s = (NVICState *)opaque;
811     VecInfo *vec;
812     int ret;
813 
814     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
815 
816     if (secure && exc_is_banked(irq)) {
817         vec = &s->sec_vectors[irq];
818     } else {
819         vec = &s->vectors[irq];
820     }
821 
822     trace_nvic_complete_irq(irq, secure);
823 
824     if (!vec->active) {
825         /* Tell the caller this was an illegal exception return */
826         return -1;
827     }
828 
829     ret = nvic_rettobase(s);
830 
831     vec->active = 0;
832     if (vec->level) {
833         /* Re-pend the exception if it's still held high; only
834          * happens for extenal IRQs
835          */
836         assert(irq >= NVIC_FIRST_IRQ);
837         vec->pending = 1;
838     }
839 
840     nvic_irq_update(s);
841 
842     return ret;
843 }
844 
845 bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
846 {
847     /*
848      * Return whether an exception is "ready", i.e. it is enabled and is
849      * configured at a priority which would allow it to interrupt the
850      * current execution priority.
851      *
852      * irq and secure have the same semantics as for armv7m_nvic_set_pending():
853      * for non-banked exceptions secure is always false; for banked exceptions
854      * it indicates which of the exceptions is required.
855      */
856     NVICState *s = (NVICState *)opaque;
857     bool banked = exc_is_banked(irq);
858     VecInfo *vec;
859     int running = nvic_exec_prio(s);
860 
861     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
862     assert(!secure || banked);
863 
864     /*
865      * HardFault is an odd special case: we always check against -1,
866      * even if we're secure and HardFault has priority -3; we never
867      * need to check for enabled state.
868      */
869     if (irq == ARMV7M_EXCP_HARD) {
870         return running > -1;
871     }
872 
873     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
874 
875     return vec->enabled &&
876         exc_group_prio(s, vec->prio, secure) < running;
877 }
878 
879 /* callback when external interrupt line is changed */
880 static void set_irq_level(void *opaque, int n, int level)
881 {
882     NVICState *s = opaque;
883     VecInfo *vec;
884 
885     n += NVIC_FIRST_IRQ;
886 
887     assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
888 
889     trace_nvic_set_irq_level(n, level);
890 
891     /* The pending status of an external interrupt is
892      * latched on rising edge and exception handler return.
893      *
894      * Pulsing the IRQ will always run the handler
895      * once, and the handler will re-run until the
896      * level is low when the handler completes.
897      */
898     vec = &s->vectors[n];
899     if (level != vec->level) {
900         vec->level = level;
901         if (level) {
902             armv7m_nvic_set_pending(s, n, false);
903         }
904     }
905 }
906 
907 /* callback when external NMI line is changed */
908 static void nvic_nmi_trigger(void *opaque, int n, int level)
909 {
910     NVICState *s = opaque;
911 
912     trace_nvic_set_nmi_level(level);
913 
914     /*
915      * The architecture doesn't specify whether NMI should share
916      * the normal-interrupt behaviour of being resampled on
917      * exception handler return. We choose not to, so just
918      * set NMI pending here and don't track the current level.
919      */
920     if (level) {
921         armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
922     }
923 }
924 
925 static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
926 {
927     ARMCPU *cpu = s->cpu;
928     uint32_t val;
929 
930     switch (offset) {
931     case 4: /* Interrupt Control Type.  */
932         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
933             goto bad_offset;
934         }
935         return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
936     case 0xc: /* CPPWR */
937         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
938             goto bad_offset;
939         }
940         /* We make the IMPDEF choice that nothing can ever go into a
941          * non-retentive power state, which allows us to RAZ/WI this.
942          */
943         return 0;
944     case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
945     {
946         int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
947         int i;
948 
949         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
950             goto bad_offset;
951         }
952         if (!attrs.secure) {
953             return 0;
954         }
955         val = 0;
956         for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
957             if (s->itns[startvec + i]) {
958                 val |= (1 << i);
959             }
960         }
961         return val;
962     }
963     case 0xd00: /* CPUID Base.  */
964         return cpu->midr;
965     case 0xd04: /* Interrupt Control State (ICSR) */
966         /* VECTACTIVE */
967         val = cpu->env.v7m.exception;
968         /* VECTPENDING */
969         val |= (s->vectpending & 0xff) << 12;
970         /* ISRPENDING - set if any external IRQ is pending */
971         if (nvic_isrpending(s)) {
972             val |= (1 << 22);
973         }
974         /* RETTOBASE - set if only one handler is active */
975         if (nvic_rettobase(s)) {
976             val |= (1 << 11);
977         }
978         if (attrs.secure) {
979             /* PENDSTSET */
980             if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
981                 val |= (1 << 26);
982             }
983             /* PENDSVSET */
984             if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
985                 val |= (1 << 28);
986             }
987         } else {
988             /* PENDSTSET */
989             if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
990                 val |= (1 << 26);
991             }
992             /* PENDSVSET */
993             if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
994                 val |= (1 << 28);
995             }
996         }
997         /* NMIPENDSET */
998         if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
999             && s->vectors[ARMV7M_EXCP_NMI].pending) {
1000             val |= (1 << 31);
1001         }
1002         /* ISRPREEMPT: RES0 when halting debug not implemented */
1003         /* STTNS: RES0 for the Main Extension */
1004         return val;
1005     case 0xd08: /* Vector Table Offset.  */
1006         return cpu->env.v7m.vecbase[attrs.secure];
1007     case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1008         val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
1009         if (attrs.secure) {
1010             /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
1011             val |= cpu->env.v7m.aircr;
1012         } else {
1013             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1014                 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
1015                  * security isn't supported then BFHFNMINS is RAO (and
1016                  * the bit in env.v7m.aircr is always set).
1017                  */
1018                 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
1019             }
1020         }
1021         return val;
1022     case 0xd10: /* System Control.  */
1023         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1024             goto bad_offset;
1025         }
1026         return cpu->env.v7m.scr[attrs.secure];
1027     case 0xd14: /* Configuration Control.  */
1028         /* The BFHFNMIGN bit is the only non-banked bit; we
1029          * keep it in the non-secure copy of the register.
1030          */
1031         val = cpu->env.v7m.ccr[attrs.secure];
1032         val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1033         return val;
1034     case 0xd24: /* System Handler Control and State (SHCSR) */
1035         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1036             goto bad_offset;
1037         }
1038         val = 0;
1039         if (attrs.secure) {
1040             if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
1041                 val |= (1 << 0);
1042             }
1043             if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
1044                 val |= (1 << 2);
1045             }
1046             if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
1047                 val |= (1 << 3);
1048             }
1049             if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
1050                 val |= (1 << 7);
1051             }
1052             if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
1053                 val |= (1 << 10);
1054             }
1055             if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
1056                 val |= (1 << 11);
1057             }
1058             if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
1059                 val |= (1 << 12);
1060             }
1061             if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
1062                 val |= (1 << 13);
1063             }
1064             if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
1065                 val |= (1 << 15);
1066             }
1067             if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
1068                 val |= (1 << 16);
1069             }
1070             if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
1071                 val |= (1 << 18);
1072             }
1073             if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
1074                 val |= (1 << 21);
1075             }
1076             /* SecureFault is not banked but is always RAZ/WI to NS */
1077             if (s->vectors[ARMV7M_EXCP_SECURE].active) {
1078                 val |= (1 << 4);
1079             }
1080             if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
1081                 val |= (1 << 19);
1082             }
1083             if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
1084                 val |= (1 << 20);
1085             }
1086         } else {
1087             if (s->vectors[ARMV7M_EXCP_MEM].active) {
1088                 val |= (1 << 0);
1089             }
1090             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1091                 /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
1092                 if (s->vectors[ARMV7M_EXCP_HARD].active) {
1093                     val |= (1 << 2);
1094                 }
1095                 if (s->vectors[ARMV7M_EXCP_HARD].pending) {
1096                     val |= (1 << 21);
1097                 }
1098             }
1099             if (s->vectors[ARMV7M_EXCP_USAGE].active) {
1100                 val |= (1 << 3);
1101             }
1102             if (s->vectors[ARMV7M_EXCP_SVC].active) {
1103                 val |= (1 << 7);
1104             }
1105             if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
1106                 val |= (1 << 10);
1107             }
1108             if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
1109                 val |= (1 << 11);
1110             }
1111             if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
1112                 val |= (1 << 12);
1113             }
1114             if (s->vectors[ARMV7M_EXCP_MEM].pending) {
1115                 val |= (1 << 13);
1116             }
1117             if (s->vectors[ARMV7M_EXCP_SVC].pending) {
1118                 val |= (1 << 15);
1119             }
1120             if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
1121                 val |= (1 << 16);
1122             }
1123             if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
1124                 val |= (1 << 18);
1125             }
1126         }
1127         if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1128             if (s->vectors[ARMV7M_EXCP_BUS].active) {
1129                 val |= (1 << 1);
1130             }
1131             if (s->vectors[ARMV7M_EXCP_BUS].pending) {
1132                 val |= (1 << 14);
1133             }
1134             if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
1135                 val |= (1 << 17);
1136             }
1137             if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
1138                 s->vectors[ARMV7M_EXCP_NMI].active) {
1139                 /* NMIACT is not present in v7M */
1140                 val |= (1 << 5);
1141             }
1142         }
1143 
1144         /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1145         if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
1146             val |= (1 << 8);
1147         }
1148         return val;
1149     case 0xd2c: /* Hard Fault Status.  */
1150         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1151             goto bad_offset;
1152         }
1153         return cpu->env.v7m.hfsr;
1154     case 0xd30: /* Debug Fault Status.  */
1155         return cpu->env.v7m.dfsr;
1156     case 0xd34: /* MMFAR MemManage Fault Address */
1157         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1158             goto bad_offset;
1159         }
1160         return cpu->env.v7m.mmfar[attrs.secure];
1161     case 0xd38: /* Bus Fault Address.  */
1162         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1163             goto bad_offset;
1164         }
1165         return cpu->env.v7m.bfar;
1166     case 0xd3c: /* Aux Fault Status.  */
1167         /* TODO: Implement fault status registers.  */
1168         qemu_log_mask(LOG_UNIMP,
1169                       "Aux Fault status registers unimplemented\n");
1170         return 0;
1171     case 0xd40: /* PFR0.  */
1172         return cpu->id_pfr0;
1173     case 0xd44: /* PFR1.  */
1174         return cpu->id_pfr1;
1175     case 0xd48: /* DFR0.  */
1176         return cpu->id_dfr0;
1177     case 0xd4c: /* AFR0.  */
1178         return cpu->id_afr0;
1179     case 0xd50: /* MMFR0.  */
1180         return cpu->id_mmfr0;
1181     case 0xd54: /* MMFR1.  */
1182         return cpu->id_mmfr1;
1183     case 0xd58: /* MMFR2.  */
1184         return cpu->id_mmfr2;
1185     case 0xd5c: /* MMFR3.  */
1186         return cpu->id_mmfr3;
1187     case 0xd60: /* ISAR0.  */
1188         return cpu->isar.id_isar0;
1189     case 0xd64: /* ISAR1.  */
1190         return cpu->isar.id_isar1;
1191     case 0xd68: /* ISAR2.  */
1192         return cpu->isar.id_isar2;
1193     case 0xd6c: /* ISAR3.  */
1194         return cpu->isar.id_isar3;
1195     case 0xd70: /* ISAR4.  */
1196         return cpu->isar.id_isar4;
1197     case 0xd74: /* ISAR5.  */
1198         return cpu->isar.id_isar5;
1199     case 0xd78: /* CLIDR */
1200         return cpu->clidr;
1201     case 0xd7c: /* CTR */
1202         return cpu->ctr;
1203     case 0xd80: /* CSSIDR */
1204     {
1205         int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1206         return cpu->ccsidr[idx];
1207     }
1208     case 0xd84: /* CSSELR */
1209         return cpu->env.v7m.csselr[attrs.secure];
1210     case 0xd88: /* CPACR */
1211         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1212             return 0;
1213         }
1214         return cpu->env.v7m.cpacr[attrs.secure];
1215     case 0xd8c: /* NSACR */
1216         if (!attrs.secure || !arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1217             return 0;
1218         }
1219         return cpu->env.v7m.nsacr;
1220     /* TODO: Implement debug registers.  */
1221     case 0xd90: /* MPU_TYPE */
1222         /* Unified MPU; if the MPU is not present this value is zero */
1223         return cpu->pmsav7_dregion << 8;
1224         break;
1225     case 0xd94: /* MPU_CTRL */
1226         return cpu->env.v7m.mpu_ctrl[attrs.secure];
1227     case 0xd98: /* MPU_RNR */
1228         return cpu->env.pmsav7.rnr[attrs.secure];
1229     case 0xd9c: /* MPU_RBAR */
1230     case 0xda4: /* MPU_RBAR_A1 */
1231     case 0xdac: /* MPU_RBAR_A2 */
1232     case 0xdb4: /* MPU_RBAR_A3 */
1233     {
1234         int region = cpu->env.pmsav7.rnr[attrs.secure];
1235 
1236         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1237             /* PMSAv8M handling of the aliases is different from v7M:
1238              * aliases A1, A2, A3 override the low two bits of the region
1239              * number in MPU_RNR, and there is no 'region' field in the
1240              * RBAR register.
1241              */
1242             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1243             if (aliasno) {
1244                 region = deposit32(region, 0, 2, aliasno);
1245             }
1246             if (region >= cpu->pmsav7_dregion) {
1247                 return 0;
1248             }
1249             return cpu->env.pmsav8.rbar[attrs.secure][region];
1250         }
1251 
1252         if (region >= cpu->pmsav7_dregion) {
1253             return 0;
1254         }
1255         return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1256     }
1257     case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1258     case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1259     case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1260     case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1261     {
1262         int region = cpu->env.pmsav7.rnr[attrs.secure];
1263 
1264         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1265             /* PMSAv8M handling of the aliases is different from v7M:
1266              * aliases A1, A2, A3 override the low two bits of the region
1267              * number in MPU_RNR.
1268              */
1269             int aliasno = (offset - 0xda0) / 8; /* 0..3 */
1270             if (aliasno) {
1271                 region = deposit32(region, 0, 2, aliasno);
1272             }
1273             if (region >= cpu->pmsav7_dregion) {
1274                 return 0;
1275             }
1276             return cpu->env.pmsav8.rlar[attrs.secure][region];
1277         }
1278 
1279         if (region >= cpu->pmsav7_dregion) {
1280             return 0;
1281         }
1282         return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1283             (cpu->env.pmsav7.drsr[region] & 0xffff);
1284     }
1285     case 0xdc0: /* MPU_MAIR0 */
1286         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1287             goto bad_offset;
1288         }
1289         return cpu->env.pmsav8.mair0[attrs.secure];
1290     case 0xdc4: /* MPU_MAIR1 */
1291         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1292             goto bad_offset;
1293         }
1294         return cpu->env.pmsav8.mair1[attrs.secure];
1295     case 0xdd0: /* SAU_CTRL */
1296         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1297             goto bad_offset;
1298         }
1299         if (!attrs.secure) {
1300             return 0;
1301         }
1302         return cpu->env.sau.ctrl;
1303     case 0xdd4: /* SAU_TYPE */
1304         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1305             goto bad_offset;
1306         }
1307         if (!attrs.secure) {
1308             return 0;
1309         }
1310         return cpu->sau_sregion;
1311     case 0xdd8: /* SAU_RNR */
1312         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1313             goto bad_offset;
1314         }
1315         if (!attrs.secure) {
1316             return 0;
1317         }
1318         return cpu->env.sau.rnr;
1319     case 0xddc: /* SAU_RBAR */
1320     {
1321         int region = cpu->env.sau.rnr;
1322 
1323         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1324             goto bad_offset;
1325         }
1326         if (!attrs.secure) {
1327             return 0;
1328         }
1329         if (region >= cpu->sau_sregion) {
1330             return 0;
1331         }
1332         return cpu->env.sau.rbar[region];
1333     }
1334     case 0xde0: /* SAU_RLAR */
1335     {
1336         int region = cpu->env.sau.rnr;
1337 
1338         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1339             goto bad_offset;
1340         }
1341         if (!attrs.secure) {
1342             return 0;
1343         }
1344         if (region >= cpu->sau_sregion) {
1345             return 0;
1346         }
1347         return cpu->env.sau.rlar[region];
1348     }
1349     case 0xde4: /* SFSR */
1350         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1351             goto bad_offset;
1352         }
1353         if (!attrs.secure) {
1354             return 0;
1355         }
1356         return cpu->env.v7m.sfsr;
1357     case 0xde8: /* SFAR */
1358         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1359             goto bad_offset;
1360         }
1361         if (!attrs.secure) {
1362             return 0;
1363         }
1364         return cpu->env.v7m.sfar;
1365     case 0xf34: /* FPCCR */
1366         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1367             return 0;
1368         }
1369         if (attrs.secure) {
1370             return cpu->env.v7m.fpccr[M_REG_S];
1371         } else {
1372             /*
1373              * NS can read LSPEN, CLRONRET and MONRDY. It can read
1374              * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0;
1375              * other non-banked bits RAZ.
1376              * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set.
1377              */
1378             uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
1379             uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
1380                 R_V7M_FPCCR_CLRONRET_MASK |
1381                 R_V7M_FPCCR_MONRDY_MASK;
1382 
1383             if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1384                 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
1385             }
1386 
1387             value &= mask;
1388 
1389             value |= cpu->env.v7m.fpccr[M_REG_NS];
1390             return value;
1391         }
1392     case 0xf38: /* FPCAR */
1393         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1394             return 0;
1395         }
1396         return cpu->env.v7m.fpcar[attrs.secure];
1397     case 0xf3c: /* FPDSCR */
1398         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1399             return 0;
1400         }
1401         return cpu->env.v7m.fpdscr[attrs.secure];
1402     case 0xf40: /* MVFR0 */
1403         return cpu->isar.mvfr0;
1404     case 0xf44: /* MVFR1 */
1405         return cpu->isar.mvfr1;
1406     case 0xf48: /* MVFR2 */
1407         return cpu->isar.mvfr2;
1408     default:
1409     bad_offset:
1410         qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1411         return 0;
1412     }
1413 }
1414 
1415 static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1416                         MemTxAttrs attrs)
1417 {
1418     ARMCPU *cpu = s->cpu;
1419 
1420     switch (offset) {
1421     case 0xc: /* CPPWR */
1422         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1423             goto bad_offset;
1424         }
1425         /* Make the IMPDEF choice to RAZ/WI this. */
1426         break;
1427     case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1428     {
1429         int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1430         int i;
1431 
1432         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1433             goto bad_offset;
1434         }
1435         if (!attrs.secure) {
1436             break;
1437         }
1438         for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1439             s->itns[startvec + i] = (value >> i) & 1;
1440         }
1441         nvic_irq_update(s);
1442         break;
1443     }
1444     case 0xd04: /* Interrupt Control State (ICSR) */
1445         if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1446             if (value & (1 << 31)) {
1447                 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1448             } else if (value & (1 << 30) &&
1449                        arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1450                 /* PENDNMICLR didn't exist in v7M */
1451                 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1452             }
1453         }
1454         if (value & (1 << 28)) {
1455             armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1456         } else if (value & (1 << 27)) {
1457             armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1458         }
1459         if (value & (1 << 26)) {
1460             armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1461         } else if (value & (1 << 25)) {
1462             armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1463         }
1464         break;
1465     case 0xd08: /* Vector Table Offset.  */
1466         cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1467         break;
1468     case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1469         if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1470             if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1471                 if (attrs.secure ||
1472                     !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1473                     qemu_irq_pulse(s->sysresetreq);
1474                 }
1475             }
1476             if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1477                 qemu_log_mask(LOG_GUEST_ERROR,
1478                               "Setting VECTCLRACTIVE when not in DEBUG mode "
1479                               "is UNPREDICTABLE\n");
1480             }
1481             if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1482                 /* NB: this bit is RES0 in v8M */
1483                 qemu_log_mask(LOG_GUEST_ERROR,
1484                               "Setting VECTRESET when not in DEBUG mode "
1485                               "is UNPREDICTABLE\n");
1486             }
1487             if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1488                 s->prigroup[attrs.secure] =
1489                     extract32(value,
1490                               R_V7M_AIRCR_PRIGROUP_SHIFT,
1491                               R_V7M_AIRCR_PRIGROUP_LENGTH);
1492             }
1493             if (attrs.secure) {
1494                 /* These bits are only writable by secure */
1495                 cpu->env.v7m.aircr = value &
1496                     (R_V7M_AIRCR_SYSRESETREQS_MASK |
1497                      R_V7M_AIRCR_BFHFNMINS_MASK |
1498                      R_V7M_AIRCR_PRIS_MASK);
1499                 /* BFHFNMINS changes the priority of Secure HardFault, and
1500                  * allows a pending Non-secure HardFault to preempt (which
1501                  * we implement by marking it enabled).
1502                  */
1503                 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1504                     s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1505                     s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1506                 } else {
1507                     s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1508                     s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1509                 }
1510             }
1511             nvic_irq_update(s);
1512         }
1513         break;
1514     case 0xd10: /* System Control.  */
1515         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1516             goto bad_offset;
1517         }
1518         /* We don't implement deep-sleep so these bits are RAZ/WI.
1519          * The other bits in the register are banked.
1520          * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
1521          * is architecturally permitted.
1522          */
1523         value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1524         cpu->env.v7m.scr[attrs.secure] = value;
1525         break;
1526     case 0xd14: /* Configuration Control.  */
1527         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1528             goto bad_offset;
1529         }
1530 
1531         /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
1532         value &= (R_V7M_CCR_STKALIGN_MASK |
1533                   R_V7M_CCR_BFHFNMIGN_MASK |
1534                   R_V7M_CCR_DIV_0_TRP_MASK |
1535                   R_V7M_CCR_UNALIGN_TRP_MASK |
1536                   R_V7M_CCR_USERSETMPEND_MASK |
1537                   R_V7M_CCR_NONBASETHRDENA_MASK);
1538 
1539         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1540             /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1541             value |= R_V7M_CCR_NONBASETHRDENA_MASK
1542                 | R_V7M_CCR_STKALIGN_MASK;
1543         }
1544         if (attrs.secure) {
1545             /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1546             cpu->env.v7m.ccr[M_REG_NS] =
1547                 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1548                 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1549             value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1550         }
1551 
1552         cpu->env.v7m.ccr[attrs.secure] = value;
1553         break;
1554     case 0xd24: /* System Handler Control and State (SHCSR) */
1555         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1556             goto bad_offset;
1557         }
1558         if (attrs.secure) {
1559             s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1560             /* Secure HardFault active bit cannot be written */
1561             s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1562             s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1563             s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1564                 (value & (1 << 10)) != 0;
1565             s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1566                 (value & (1 << 11)) != 0;
1567             s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1568                 (value & (1 << 12)) != 0;
1569             s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1570             s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1571             s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1572             s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1573             s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1574                 (value & (1 << 18)) != 0;
1575             s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1576             /* SecureFault not banked, but RAZ/WI to NS */
1577             s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1578             s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1579             s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1580         } else {
1581             s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1582             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1583                 /* HARDFAULTPENDED is not present in v7M */
1584                 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1585             }
1586             s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1587             s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1588             s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1589             s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1590             s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1591             s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1592             s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1593             s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1594             s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1595         }
1596         if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1597             s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1598             s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1599             s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1600         }
1601         /* NMIACT can only be written if the write is of a zero, with
1602          * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
1603          */
1604         if (!attrs.secure && cpu->env.v7m.secure &&
1605             (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1606             (value & (1 << 5)) == 0) {
1607             s->vectors[ARMV7M_EXCP_NMI].active = 0;
1608         }
1609         /* HARDFAULTACT can only be written if the write is of a zero
1610          * to the non-secure HardFault state by the CPU in secure state.
1611          * The only case where we can be targeting the non-secure HF state
1612          * when in secure state is if this is a write via the NS alias
1613          * and BFHFNMINS is 1.
1614          */
1615         if (!attrs.secure && cpu->env.v7m.secure &&
1616             (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1617             (value & (1 << 2)) == 0) {
1618             s->vectors[ARMV7M_EXCP_HARD].active = 0;
1619         }
1620 
1621         /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1622         s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1623         nvic_irq_update(s);
1624         break;
1625     case 0xd2c: /* Hard Fault Status.  */
1626         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1627             goto bad_offset;
1628         }
1629         cpu->env.v7m.hfsr &= ~value; /* W1C */
1630         break;
1631     case 0xd30: /* Debug Fault Status.  */
1632         cpu->env.v7m.dfsr &= ~value; /* W1C */
1633         break;
1634     case 0xd34: /* Mem Manage Address.  */
1635         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1636             goto bad_offset;
1637         }
1638         cpu->env.v7m.mmfar[attrs.secure] = value;
1639         return;
1640     case 0xd38: /* Bus Fault Address.  */
1641         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1642             goto bad_offset;
1643         }
1644         cpu->env.v7m.bfar = value;
1645         return;
1646     case 0xd3c: /* Aux Fault Status.  */
1647         qemu_log_mask(LOG_UNIMP,
1648                       "NVIC: Aux fault status registers unimplemented\n");
1649         break;
1650     case 0xd84: /* CSSELR */
1651         if (!arm_v7m_csselr_razwi(cpu)) {
1652             cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1653         }
1654         break;
1655     case 0xd88: /* CPACR */
1656         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1657             /* We implement only the Floating Point extension's CP10/CP11 */
1658             cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
1659         }
1660         break;
1661     case 0xd8c: /* NSACR */
1662         if (attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1663             /* We implement only the Floating Point extension's CP10/CP11 */
1664             cpu->env.v7m.nsacr = value & (3 << 10);
1665         }
1666         break;
1667     case 0xd90: /* MPU_TYPE */
1668         return; /* RO */
1669     case 0xd94: /* MPU_CTRL */
1670         if ((value &
1671              (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1672             == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1673             qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1674                           "UNPREDICTABLE\n");
1675         }
1676         cpu->env.v7m.mpu_ctrl[attrs.secure]
1677             = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1678                        R_V7M_MPU_CTRL_HFNMIENA_MASK |
1679                        R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1680         tlb_flush(CPU(cpu));
1681         break;
1682     case 0xd98: /* MPU_RNR */
1683         if (value >= cpu->pmsav7_dregion) {
1684             qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1685                           PRIu32 "/%" PRIu32 "\n",
1686                           value, cpu->pmsav7_dregion);
1687         } else {
1688             cpu->env.pmsav7.rnr[attrs.secure] = value;
1689         }
1690         break;
1691     case 0xd9c: /* MPU_RBAR */
1692     case 0xda4: /* MPU_RBAR_A1 */
1693     case 0xdac: /* MPU_RBAR_A2 */
1694     case 0xdb4: /* MPU_RBAR_A3 */
1695     {
1696         int region;
1697 
1698         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1699             /* PMSAv8M handling of the aliases is different from v7M:
1700              * aliases A1, A2, A3 override the low two bits of the region
1701              * number in MPU_RNR, and there is no 'region' field in the
1702              * RBAR register.
1703              */
1704             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1705 
1706             region = cpu->env.pmsav7.rnr[attrs.secure];
1707             if (aliasno) {
1708                 region = deposit32(region, 0, 2, aliasno);
1709             }
1710             if (region >= cpu->pmsav7_dregion) {
1711                 return;
1712             }
1713             cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1714             tlb_flush(CPU(cpu));
1715             return;
1716         }
1717 
1718         if (value & (1 << 4)) {
1719             /* VALID bit means use the region number specified in this
1720              * value and also update MPU_RNR.REGION with that value.
1721              */
1722             region = extract32(value, 0, 4);
1723             if (region >= cpu->pmsav7_dregion) {
1724                 qemu_log_mask(LOG_GUEST_ERROR,
1725                               "MPU region out of range %u/%" PRIu32 "\n",
1726                               region, cpu->pmsav7_dregion);
1727                 return;
1728             }
1729             cpu->env.pmsav7.rnr[attrs.secure] = region;
1730         } else {
1731             region = cpu->env.pmsav7.rnr[attrs.secure];
1732         }
1733 
1734         if (region >= cpu->pmsav7_dregion) {
1735             return;
1736         }
1737 
1738         cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1739         tlb_flush(CPU(cpu));
1740         break;
1741     }
1742     case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1743     case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1744     case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1745     case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1746     {
1747         int region = cpu->env.pmsav7.rnr[attrs.secure];
1748 
1749         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1750             /* PMSAv8M handling of the aliases is different from v7M:
1751              * aliases A1, A2, A3 override the low two bits of the region
1752              * number in MPU_RNR.
1753              */
1754             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1755 
1756             region = cpu->env.pmsav7.rnr[attrs.secure];
1757             if (aliasno) {
1758                 region = deposit32(region, 0, 2, aliasno);
1759             }
1760             if (region >= cpu->pmsav7_dregion) {
1761                 return;
1762             }
1763             cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1764             tlb_flush(CPU(cpu));
1765             return;
1766         }
1767 
1768         if (region >= cpu->pmsav7_dregion) {
1769             return;
1770         }
1771 
1772         cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1773         cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1774         tlb_flush(CPU(cpu));
1775         break;
1776     }
1777     case 0xdc0: /* MPU_MAIR0 */
1778         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1779             goto bad_offset;
1780         }
1781         if (cpu->pmsav7_dregion) {
1782             /* Register is RES0 if no MPU regions are implemented */
1783             cpu->env.pmsav8.mair0[attrs.secure] = value;
1784         }
1785         /* We don't need to do anything else because memory attributes
1786          * only affect cacheability, and we don't implement caching.
1787          */
1788         break;
1789     case 0xdc4: /* MPU_MAIR1 */
1790         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1791             goto bad_offset;
1792         }
1793         if (cpu->pmsav7_dregion) {
1794             /* Register is RES0 if no MPU regions are implemented */
1795             cpu->env.pmsav8.mair1[attrs.secure] = value;
1796         }
1797         /* We don't need to do anything else because memory attributes
1798          * only affect cacheability, and we don't implement caching.
1799          */
1800         break;
1801     case 0xdd0: /* SAU_CTRL */
1802         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1803             goto bad_offset;
1804         }
1805         if (!attrs.secure) {
1806             return;
1807         }
1808         cpu->env.sau.ctrl = value & 3;
1809         break;
1810     case 0xdd4: /* SAU_TYPE */
1811         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1812             goto bad_offset;
1813         }
1814         break;
1815     case 0xdd8: /* SAU_RNR */
1816         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1817             goto bad_offset;
1818         }
1819         if (!attrs.secure) {
1820             return;
1821         }
1822         if (value >= cpu->sau_sregion) {
1823             qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1824                           PRIu32 "/%" PRIu32 "\n",
1825                           value, cpu->sau_sregion);
1826         } else {
1827             cpu->env.sau.rnr = value;
1828         }
1829         break;
1830     case 0xddc: /* SAU_RBAR */
1831     {
1832         int region = cpu->env.sau.rnr;
1833 
1834         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1835             goto bad_offset;
1836         }
1837         if (!attrs.secure) {
1838             return;
1839         }
1840         if (region >= cpu->sau_sregion) {
1841             return;
1842         }
1843         cpu->env.sau.rbar[region] = value & ~0x1f;
1844         tlb_flush(CPU(cpu));
1845         break;
1846     }
1847     case 0xde0: /* SAU_RLAR */
1848     {
1849         int region = cpu->env.sau.rnr;
1850 
1851         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1852             goto bad_offset;
1853         }
1854         if (!attrs.secure) {
1855             return;
1856         }
1857         if (region >= cpu->sau_sregion) {
1858             return;
1859         }
1860         cpu->env.sau.rlar[region] = value & ~0x1c;
1861         tlb_flush(CPU(cpu));
1862         break;
1863     }
1864     case 0xde4: /* SFSR */
1865         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1866             goto bad_offset;
1867         }
1868         if (!attrs.secure) {
1869             return;
1870         }
1871         cpu->env.v7m.sfsr &= ~value; /* W1C */
1872         break;
1873     case 0xde8: /* SFAR */
1874         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1875             goto bad_offset;
1876         }
1877         if (!attrs.secure) {
1878             return;
1879         }
1880         cpu->env.v7m.sfsr = value;
1881         break;
1882     case 0xf00: /* Software Triggered Interrupt Register */
1883     {
1884         int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
1885 
1886         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1887             goto bad_offset;
1888         }
1889 
1890         if (excnum < s->num_irq) {
1891             armv7m_nvic_set_pending(s, excnum, false);
1892         }
1893         break;
1894     }
1895     case 0xf34: /* FPCCR */
1896         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1897             /* Not all bits here are banked. */
1898             uint32_t fpccr_s;
1899 
1900             if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1901                 /* Don't allow setting of bits not present in v7M */
1902                 value &= (R_V7M_FPCCR_LSPACT_MASK |
1903                           R_V7M_FPCCR_USER_MASK |
1904                           R_V7M_FPCCR_THREAD_MASK |
1905                           R_V7M_FPCCR_HFRDY_MASK |
1906                           R_V7M_FPCCR_MMRDY_MASK |
1907                           R_V7M_FPCCR_BFRDY_MASK |
1908                           R_V7M_FPCCR_MONRDY_MASK |
1909                           R_V7M_FPCCR_LSPEN_MASK |
1910                           R_V7M_FPCCR_ASPEN_MASK);
1911             }
1912             value &= ~R_V7M_FPCCR_RES0_MASK;
1913 
1914             if (!attrs.secure) {
1915                 /* Some non-banked bits are configurably writable by NS */
1916                 fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
1917                 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
1918                     uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
1919                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
1920                 }
1921                 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
1922                     uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
1923                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
1924                 }
1925                 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1926                     uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
1927                     uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
1928                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1929                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1930                 }
1931                 /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */
1932                 {
1933                     uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
1934                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1935                 }
1936 
1937                 /*
1938                  * All other non-banked bits are RAZ/WI from NS; write
1939                  * just the banked bits to fpccr[M_REG_NS].
1940                  */
1941                 value &= R_V7M_FPCCR_BANKED_MASK;
1942                 cpu->env.v7m.fpccr[M_REG_NS] = value;
1943             } else {
1944                 fpccr_s = value;
1945             }
1946             cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
1947         }
1948         break;
1949     case 0xf38: /* FPCAR */
1950         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1951             value &= ~7;
1952             cpu->env.v7m.fpcar[attrs.secure] = value;
1953         }
1954         break;
1955     case 0xf3c: /* FPDSCR */
1956         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1957             value &= 0x07c00000;
1958             cpu->env.v7m.fpdscr[attrs.secure] = value;
1959         }
1960         break;
1961     case 0xf50: /* ICIALLU */
1962     case 0xf58: /* ICIMVAU */
1963     case 0xf5c: /* DCIMVAC */
1964     case 0xf60: /* DCISW */
1965     case 0xf64: /* DCCMVAU */
1966     case 0xf68: /* DCCMVAC */
1967     case 0xf6c: /* DCCSW */
1968     case 0xf70: /* DCCIMVAC */
1969     case 0xf74: /* DCCISW */
1970     case 0xf78: /* BPIALL */
1971         /* Cache and branch predictor maintenance: for QEMU these always NOP */
1972         break;
1973     default:
1974     bad_offset:
1975         qemu_log_mask(LOG_GUEST_ERROR,
1976                       "NVIC: Bad write offset 0x%x\n", offset);
1977     }
1978 }
1979 
1980 static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
1981 {
1982     /* Return true if unprivileged access to this register is permitted. */
1983     switch (offset) {
1984     case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
1985         /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
1986          * controls access even though the CPU is in Secure state (I_QDKX).
1987          */
1988         return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
1989     default:
1990         /* All other user accesses cause a BusFault unconditionally */
1991         return false;
1992     }
1993 }
1994 
1995 static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
1996 {
1997     /* Behaviour for the SHPR register field for this exception:
1998      * return M_REG_NS to use the nonsecure vector (including for
1999      * non-banked exceptions), M_REG_S for the secure version of
2000      * a banked exception, and -1 if this field should RAZ/WI.
2001      */
2002     switch (exc) {
2003     case ARMV7M_EXCP_MEM:
2004     case ARMV7M_EXCP_USAGE:
2005     case ARMV7M_EXCP_SVC:
2006     case ARMV7M_EXCP_PENDSV:
2007     case ARMV7M_EXCP_SYSTICK:
2008         /* Banked exceptions */
2009         return attrs.secure;
2010     case ARMV7M_EXCP_BUS:
2011         /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
2012         if (!attrs.secure &&
2013             !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2014             return -1;
2015         }
2016         return M_REG_NS;
2017     case ARMV7M_EXCP_SECURE:
2018         /* Not banked, RAZ/WI from nonsecure */
2019         if (!attrs.secure) {
2020             return -1;
2021         }
2022         return M_REG_NS;
2023     case ARMV7M_EXCP_DEBUG:
2024         /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
2025         return M_REG_NS;
2026     case 8 ... 10:
2027     case 13:
2028         /* RES0 */
2029         return -1;
2030     default:
2031         /* Not reachable due to decode of SHPR register addresses */
2032         g_assert_not_reached();
2033     }
2034 }
2035 
2036 static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
2037                                     uint64_t *data, unsigned size,
2038                                     MemTxAttrs attrs)
2039 {
2040     NVICState *s = (NVICState *)opaque;
2041     uint32_t offset = addr;
2042     unsigned i, startvec, end;
2043     uint32_t val;
2044 
2045     if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2046         /* Generate BusFault for unprivileged accesses */
2047         return MEMTX_ERROR;
2048     }
2049 
2050     switch (offset) {
2051     /* reads of set and clear both return the status */
2052     case 0x100 ... 0x13f: /* NVIC Set enable */
2053         offset += 0x80;
2054         /* fall through */
2055     case 0x180 ... 0x1bf: /* NVIC Clear enable */
2056         val = 0;
2057         startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
2058 
2059         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2060             if (s->vectors[startvec + i].enabled &&
2061                 (attrs.secure || s->itns[startvec + i])) {
2062                 val |= (1 << i);
2063             }
2064         }
2065         break;
2066     case 0x200 ... 0x23f: /* NVIC Set pend */
2067         offset += 0x80;
2068         /* fall through */
2069     case 0x280 ... 0x2bf: /* NVIC Clear pend */
2070         val = 0;
2071         startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2072         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2073             if (s->vectors[startvec + i].pending &&
2074                 (attrs.secure || s->itns[startvec + i])) {
2075                 val |= (1 << i);
2076             }
2077         }
2078         break;
2079     case 0x300 ... 0x33f: /* NVIC Active */
2080         val = 0;
2081 
2082         if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
2083             break;
2084         }
2085 
2086         startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
2087 
2088         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2089             if (s->vectors[startvec + i].active &&
2090                 (attrs.secure || s->itns[startvec + i])) {
2091                 val |= (1 << i);
2092             }
2093         }
2094         break;
2095     case 0x400 ... 0x5ef: /* NVIC Priority */
2096         val = 0;
2097         startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
2098 
2099         for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2100             if (attrs.secure || s->itns[startvec + i]) {
2101                 val |= s->vectors[startvec + i].prio << (8 * i);
2102             }
2103         }
2104         break;
2105     case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2106         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2107             val = 0;
2108             break;
2109         }
2110         /* fall through */
2111     case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2112         val = 0;
2113         for (i = 0; i < size; i++) {
2114             unsigned hdlidx = (offset - 0xd14) + i;
2115             int sbank = shpr_bank(s, hdlidx, attrs);
2116 
2117             if (sbank < 0) {
2118                 continue;
2119             }
2120             val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
2121         }
2122         break;
2123     case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2124         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2125             val = 0;
2126             break;
2127         };
2128         /* The BFSR bits [15:8] are shared between security states
2129          * and we store them in the NS copy
2130          */
2131         val = s->cpu->env.v7m.cfsr[attrs.secure];
2132         val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
2133         val = extract32(val, (offset - 0xd28) * 8, size * 8);
2134         break;
2135     case 0xfe0 ... 0xfff: /* ID.  */
2136         if (offset & 3) {
2137             val = 0;
2138         } else {
2139             val = nvic_id[(offset - 0xfe0) >> 2];
2140         }
2141         break;
2142     default:
2143         if (size == 4) {
2144             val = nvic_readl(s, offset, attrs);
2145         } else {
2146             qemu_log_mask(LOG_GUEST_ERROR,
2147                           "NVIC: Bad read of size %d at offset 0x%x\n",
2148                           size, offset);
2149             val = 0;
2150         }
2151     }
2152 
2153     trace_nvic_sysreg_read(addr, val, size);
2154     *data = val;
2155     return MEMTX_OK;
2156 }
2157 
2158 static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
2159                                      uint64_t value, unsigned size,
2160                                      MemTxAttrs attrs)
2161 {
2162     NVICState *s = (NVICState *)opaque;
2163     uint32_t offset = addr;
2164     unsigned i, startvec, end;
2165     unsigned setval = 0;
2166 
2167     trace_nvic_sysreg_write(addr, value, size);
2168 
2169     if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2170         /* Generate BusFault for unprivileged accesses */
2171         return MEMTX_ERROR;
2172     }
2173 
2174     switch (offset) {
2175     case 0x100 ... 0x13f: /* NVIC Set enable */
2176         offset += 0x80;
2177         setval = 1;
2178         /* fall through */
2179     case 0x180 ... 0x1bf: /* NVIC Clear enable */
2180         startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2181 
2182         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2183             if (value & (1 << i) &&
2184                 (attrs.secure || s->itns[startvec + i])) {
2185                 s->vectors[startvec + i].enabled = setval;
2186             }
2187         }
2188         nvic_irq_update(s);
2189         return MEMTX_OK;
2190     case 0x200 ... 0x23f: /* NVIC Set pend */
2191         /* the special logic in armv7m_nvic_set_pending()
2192          * is not needed since IRQs are never escalated
2193          */
2194         offset += 0x80;
2195         setval = 1;
2196         /* fall through */
2197     case 0x280 ... 0x2bf: /* NVIC Clear pend */
2198         startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2199 
2200         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2201             if (value & (1 << i) &&
2202                 (attrs.secure || s->itns[startvec + i])) {
2203                 s->vectors[startvec + i].pending = setval;
2204             }
2205         }
2206         nvic_irq_update(s);
2207         return MEMTX_OK;
2208     case 0x300 ... 0x33f: /* NVIC Active */
2209         return MEMTX_OK; /* R/O */
2210     case 0x400 ... 0x5ef: /* NVIC Priority */
2211         startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
2212 
2213         for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2214             if (attrs.secure || s->itns[startvec + i]) {
2215                 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
2216             }
2217         }
2218         nvic_irq_update(s);
2219         return MEMTX_OK;
2220     case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2221         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2222             return MEMTX_OK;
2223         }
2224         /* fall through */
2225     case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2226         for (i = 0; i < size; i++) {
2227             unsigned hdlidx = (offset - 0xd14) + i;
2228             int newprio = extract32(value, i * 8, 8);
2229             int sbank = shpr_bank(s, hdlidx, attrs);
2230 
2231             if (sbank < 0) {
2232                 continue;
2233             }
2234             set_prio(s, hdlidx, sbank, newprio);
2235         }
2236         nvic_irq_update(s);
2237         return MEMTX_OK;
2238     case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2239         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2240             return MEMTX_OK;
2241         }
2242         /* All bits are W1C, so construct 32 bit value with 0s in
2243          * the parts not written by the access size
2244          */
2245         value <<= ((offset - 0xd28) * 8);
2246 
2247         s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
2248         if (attrs.secure) {
2249             /* The BFSR bits [15:8] are shared between security states
2250              * and we store them in the NS copy.
2251              */
2252             s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
2253         }
2254         return MEMTX_OK;
2255     }
2256     if (size == 4) {
2257         nvic_writel(s, offset, value, attrs);
2258         return MEMTX_OK;
2259     }
2260     qemu_log_mask(LOG_GUEST_ERROR,
2261                   "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
2262     /* This is UNPREDICTABLE; treat as RAZ/WI */
2263     return MEMTX_OK;
2264 }
2265 
2266 static const MemoryRegionOps nvic_sysreg_ops = {
2267     .read_with_attrs = nvic_sysreg_read,
2268     .write_with_attrs = nvic_sysreg_write,
2269     .endianness = DEVICE_NATIVE_ENDIAN,
2270 };
2271 
2272 static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
2273                                         uint64_t value, unsigned size,
2274                                         MemTxAttrs attrs)
2275 {
2276     MemoryRegion *mr = opaque;
2277 
2278     if (attrs.secure) {
2279         /* S accesses to the alias act like NS accesses to the real region */
2280         attrs.secure = 0;
2281         return memory_region_dispatch_write(mr, addr, value, size, attrs);
2282     } else {
2283         /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2284         if (attrs.user) {
2285             return MEMTX_ERROR;
2286         }
2287         return MEMTX_OK;
2288     }
2289 }
2290 
2291 static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
2292                                        uint64_t *data, unsigned size,
2293                                        MemTxAttrs attrs)
2294 {
2295     MemoryRegion *mr = opaque;
2296 
2297     if (attrs.secure) {
2298         /* S accesses to the alias act like NS accesses to the real region */
2299         attrs.secure = 0;
2300         return memory_region_dispatch_read(mr, addr, data, size, attrs);
2301     } else {
2302         /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2303         if (attrs.user) {
2304             return MEMTX_ERROR;
2305         }
2306         *data = 0;
2307         return MEMTX_OK;
2308     }
2309 }
2310 
2311 static const MemoryRegionOps nvic_sysreg_ns_ops = {
2312     .read_with_attrs = nvic_sysreg_ns_read,
2313     .write_with_attrs = nvic_sysreg_ns_write,
2314     .endianness = DEVICE_NATIVE_ENDIAN,
2315 };
2316 
2317 static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
2318                                       uint64_t value, unsigned size,
2319                                       MemTxAttrs attrs)
2320 {
2321     NVICState *s = opaque;
2322     MemoryRegion *mr;
2323 
2324     /* Direct the access to the correct systick */
2325     mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2326     return memory_region_dispatch_write(mr, addr, value, size, attrs);
2327 }
2328 
2329 static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
2330                                      uint64_t *data, unsigned size,
2331                                      MemTxAttrs attrs)
2332 {
2333     NVICState *s = opaque;
2334     MemoryRegion *mr;
2335 
2336     /* Direct the access to the correct systick */
2337     mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2338     return memory_region_dispatch_read(mr, addr, data, size, attrs);
2339 }
2340 
2341 static const MemoryRegionOps nvic_systick_ops = {
2342     .read_with_attrs = nvic_systick_read,
2343     .write_with_attrs = nvic_systick_write,
2344     .endianness = DEVICE_NATIVE_ENDIAN,
2345 };
2346 
2347 static int nvic_post_load(void *opaque, int version_id)
2348 {
2349     NVICState *s = opaque;
2350     unsigned i;
2351     int resetprio;
2352 
2353     /* Check for out of range priority settings */
2354     resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2355 
2356     if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2357         s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2358         s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2359         return 1;
2360     }
2361     for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2362         if (s->vectors[i].prio & ~0xff) {
2363             return 1;
2364         }
2365     }
2366 
2367     nvic_recompute_state(s);
2368 
2369     return 0;
2370 }
2371 
2372 static const VMStateDescription vmstate_VecInfo = {
2373     .name = "armv7m_nvic_info",
2374     .version_id = 1,
2375     .minimum_version_id = 1,
2376     .fields = (VMStateField[]) {
2377         VMSTATE_INT16(prio, VecInfo),
2378         VMSTATE_UINT8(enabled, VecInfo),
2379         VMSTATE_UINT8(pending, VecInfo),
2380         VMSTATE_UINT8(active, VecInfo),
2381         VMSTATE_UINT8(level, VecInfo),
2382         VMSTATE_END_OF_LIST()
2383     }
2384 };
2385 
2386 static bool nvic_security_needed(void *opaque)
2387 {
2388     NVICState *s = opaque;
2389 
2390     return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2391 }
2392 
2393 static int nvic_security_post_load(void *opaque, int version_id)
2394 {
2395     NVICState *s = opaque;
2396     int i;
2397 
2398     /* Check for out of range priority settings */
2399     if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2400         && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2401         /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
2402          * if the CPU state has been migrated yet; a mismatch won't
2403          * cause the emulation to blow up, though.
2404          */
2405         return 1;
2406     }
2407     for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2408         if (s->sec_vectors[i].prio & ~0xff) {
2409             return 1;
2410         }
2411     }
2412     return 0;
2413 }
2414 
2415 static const VMStateDescription vmstate_nvic_security = {
2416     .name = "armv7m_nvic/m-security",
2417     .version_id = 1,
2418     .minimum_version_id = 1,
2419     .needed = nvic_security_needed,
2420     .post_load = &nvic_security_post_load,
2421     .fields = (VMStateField[]) {
2422         VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2423                              vmstate_VecInfo, VecInfo),
2424         VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2425         VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2426         VMSTATE_END_OF_LIST()
2427     }
2428 };
2429 
2430 static const VMStateDescription vmstate_nvic = {
2431     .name = "armv7m_nvic",
2432     .version_id = 4,
2433     .minimum_version_id = 4,
2434     .post_load = &nvic_post_load,
2435     .fields = (VMStateField[]) {
2436         VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2437                              vmstate_VecInfo, VecInfo),
2438         VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2439         VMSTATE_END_OF_LIST()
2440     },
2441     .subsections = (const VMStateDescription*[]) {
2442         &vmstate_nvic_security,
2443         NULL
2444     }
2445 };
2446 
2447 static Property props_nvic[] = {
2448     /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
2449     DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2450     DEFINE_PROP_END_OF_LIST()
2451 };
2452 
2453 static void armv7m_nvic_reset(DeviceState *dev)
2454 {
2455     int resetprio;
2456     NVICState *s = NVIC(dev);
2457 
2458     memset(s->vectors, 0, sizeof(s->vectors));
2459     memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2460     s->prigroup[M_REG_NS] = 0;
2461     s->prigroup[M_REG_S] = 0;
2462 
2463     s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2464     /* MEM, BUS, and USAGE are enabled through
2465      * the System Handler Control register
2466      */
2467     s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2468     s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1;
2469     s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2470     s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2471 
2472     resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2473     s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2474     s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2475     s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2476 
2477     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2478         s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2479         s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2480         s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2481         s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2482 
2483         /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
2484         s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2485         /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
2486         s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2487     } else {
2488         s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2489     }
2490 
2491     /* Strictly speaking the reset handler should be enabled.
2492      * However, we don't simulate soft resets through the NVIC,
2493      * and the reset vector should never be pended.
2494      * So we leave it disabled to catch logic errors.
2495      */
2496 
2497     s->exception_prio = NVIC_NOEXC_PRIO;
2498     s->vectpending = 0;
2499     s->vectpending_is_s_banked = false;
2500     s->vectpending_prio = NVIC_NOEXC_PRIO;
2501 
2502     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2503         memset(s->itns, 0, sizeof(s->itns));
2504     } else {
2505         /* This state is constant and not guest accessible in a non-security
2506          * NVIC; we set the bits to true to avoid having to do a feature
2507          * bit check in the NVIC enable/pend/etc register accessors.
2508          */
2509         int i;
2510 
2511         for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2512             s->itns[i] = true;
2513         }
2514     }
2515 }
2516 
2517 static void nvic_systick_trigger(void *opaque, int n, int level)
2518 {
2519     NVICState *s = opaque;
2520 
2521     if (level) {
2522         /* SysTick just asked us to pend its exception.
2523          * (This is different from an external interrupt line's
2524          * behaviour.)
2525          * n == 0 : NonSecure systick
2526          * n == 1 : Secure systick
2527          */
2528         armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2529     }
2530 }
2531 
2532 static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2533 {
2534     NVICState *s = NVIC(dev);
2535     Error *err = NULL;
2536     int regionlen;
2537 
2538     /* The armv7m container object will have set our CPU pointer */
2539     if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2540         error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2541         return;
2542     }
2543 
2544     if (s->num_irq > NVIC_MAX_IRQ) {
2545         error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2546         return;
2547     }
2548 
2549     qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2550 
2551     /* include space for internal exception vectors */
2552     s->num_irq += NVIC_FIRST_IRQ;
2553 
2554     s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
2555 
2556     object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true,
2557                              "realized", &err);
2558     if (err != NULL) {
2559         error_propagate(errp, err);
2560         return;
2561     }
2562     sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
2563                        qdev_get_gpio_in_named(dev, "systick-trigger",
2564                                               M_REG_NS));
2565 
2566     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2567         /* We couldn't init the secure systick device in instance_init
2568          * as we didn't know then if the CPU had the security extensions;
2569          * so we have to do it here.
2570          */
2571         object_initialize(&s->systick[M_REG_S], sizeof(s->systick[M_REG_S]),
2572                           TYPE_SYSTICK);
2573         qdev_set_parent_bus(DEVICE(&s->systick[M_REG_S]), sysbus_get_default());
2574 
2575         object_property_set_bool(OBJECT(&s->systick[M_REG_S]), true,
2576                                  "realized", &err);
2577         if (err != NULL) {
2578             error_propagate(errp, err);
2579             return;
2580         }
2581         sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
2582                            qdev_get_gpio_in_named(dev, "systick-trigger",
2583                                                   M_REG_S));
2584     }
2585 
2586     /* The NVIC and System Control Space (SCS) starts at 0xe000e000
2587      * and looks like this:
2588      *  0x004 - ICTR
2589      *  0x010 - 0xff - systick
2590      *  0x100..0x7ec - NVIC
2591      *  0x7f0..0xcff - Reserved
2592      *  0xd00..0xd3c - SCS registers
2593      *  0xd40..0xeff - Reserved or Not implemented
2594      *  0xf00 - STIR
2595      *
2596      * Some registers within this space are banked between security states.
2597      * In v8M there is a second range 0xe002e000..0xe002efff which is the
2598      * NonSecure alias SCS; secure accesses to this behave like NS accesses
2599      * to the main SCS range, and non-secure accesses (including when
2600      * the security extension is not implemented) are RAZ/WI.
2601      * Note that both the main SCS range and the alias range are defined
2602      * to be exempt from memory attribution (R_BLJT) and so the memory
2603      * transaction attribute always matches the current CPU security
2604      * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops
2605      * wrappers we change attrs.secure to indicate the NS access; so
2606      * generally code determining which banked register to use should
2607      * use attrs.secure; code determining actual behaviour of the system
2608      * should use env->v7m.secure.
2609      */
2610     regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
2611     memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
2612     /* The system register region goes at the bottom of the priority
2613      * stack as it covers the whole page.
2614      */
2615     memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2616                           "nvic_sysregs", 0x1000);
2617     memory_region_add_subregion(&s->container, 0, &s->sysregmem);
2618 
2619     memory_region_init_io(&s->systickmem, OBJECT(s),
2620                           &nvic_systick_ops, s,
2621                           "nvic_systick", 0xe0);
2622 
2623     memory_region_add_subregion_overlap(&s->container, 0x10,
2624                                         &s->systickmem, 1);
2625 
2626     if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2627         memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2628                               &nvic_sysreg_ns_ops, &s->sysregmem,
2629                               "nvic_sysregs_ns", 0x1000);
2630         memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
2631         memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
2632                               &nvic_sysreg_ns_ops, &s->systickmem,
2633                               "nvic_systick_ns", 0xe0);
2634         memory_region_add_subregion_overlap(&s->container, 0x20010,
2635                                             &s->systick_ns_mem, 1);
2636     }
2637 
2638     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2639 }
2640 
2641 static void armv7m_nvic_instance_init(Object *obj)
2642 {
2643     /* We have a different default value for the num-irq property
2644      * than our superclass. This function runs after qdev init
2645      * has set the defaults from the Property array and before
2646      * any user-specified property setting, so just modify the
2647      * value in the GICState struct.
2648      */
2649     DeviceState *dev = DEVICE(obj);
2650     NVICState *nvic = NVIC(obj);
2651     SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2652 
2653     sysbus_init_child_obj(obj, "systick-reg-ns", &nvic->systick[M_REG_NS],
2654                           sizeof(nvic->systick[M_REG_NS]), TYPE_SYSTICK);
2655     /* We can't initialize the secure systick here, as we don't know
2656      * yet if we need it.
2657      */
2658 
2659     sysbus_init_irq(sbd, &nvic->excpout);
2660     qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2661     qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2662                             M_REG_NUM_BANKS);
2663     qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
2664 }
2665 
2666 static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2667 {
2668     DeviceClass *dc = DEVICE_CLASS(klass);
2669 
2670     dc->vmsd  = &vmstate_nvic;
2671     dc->props = props_nvic;
2672     dc->reset = armv7m_nvic_reset;
2673     dc->realize = armv7m_nvic_realize;
2674 }
2675 
2676 static const TypeInfo armv7m_nvic_info = {
2677     .name          = TYPE_NVIC,
2678     .parent        = TYPE_SYS_BUS_DEVICE,
2679     .instance_init = armv7m_nvic_instance_init,
2680     .instance_size = sizeof(NVICState),
2681     .class_init    = armv7m_nvic_class_init,
2682     .class_size    = sizeof(SysBusDeviceClass),
2683 };
2684 
2685 static void armv7m_nvic_register_types(void)
2686 {
2687     type_register_static(&armv7m_nvic_info);
2688 }
2689 
2690 type_init(armv7m_nvic_register_types)
2691