xref: /openbmc/qemu/hw/intc/armv7m_nvic.c (revision b01e2f0284a2df11aef990219104e3f52c317061)
1 /*
2  * ARM Nested Vectored Interrupt Controller
3  *
4  * Copyright (c) 2006-2007 CodeSourcery.
5  * Written by Paul Brook
6  *
7  * This code is licensed under the GPL.
8  *
9  * The ARMv7M System controller is fairly tightly tied in with the
10  * NVIC.  Much of that is also implemented here.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu-common.h"
16 #include "cpu.h"
17 #include "hw/sysbus.h"
18 #include "qemu/timer.h"
19 #include "hw/arm/arm.h"
20 #include "hw/intc/armv7m_nvic.h"
21 #include "target/arm/cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/log.h"
24 #include "trace.h"
25 
26 /* IRQ number counting:
27  *
28  * the num-irq property counts the number of external IRQ lines
29  *
30  * NVICState::num_irq counts the total number of exceptions
31  * (external IRQs, the 15 internal exceptions including reset,
32  * and one for the unused exception number 0).
33  *
34  * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
35  *
36  * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
37  *
38  * Iterating through all exceptions should typically be done with
39  * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
40  *
41  * The external qemu_irq lines are the NVIC's external IRQ lines,
42  * so line 0 is exception 16.
43  *
44  * In the terminology of the architecture manual, "interrupts" are
45  * a subcategory of exception referring to the external interrupts
46  * (which are exception numbers NVIC_FIRST_IRQ and upward).
47  * For historical reasons QEMU tends to use "interrupt" and
48  * "exception" more or less interchangeably.
49  */
50 #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
51 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
52 
53 /* Effective running priority of the CPU when no exception is active
54  * (higher than the highest possible priority value)
55  */
56 #define NVIC_NOEXC_PRIO 0x100
57 /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
58 #define NVIC_NS_PRIO_LIMIT 0x80
59 
60 static const uint8_t nvic_id[] = {
61     0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
62 };
63 
64 static int nvic_pending_prio(NVICState *s)
65 {
66     /* return the group priority of the current pending interrupt,
67      * or NVIC_NOEXC_PRIO if no interrupt is pending
68      */
69     return s->vectpending_prio;
70 }
71 
72 /* Return the value of the ISCR RETTOBASE bit:
73  * 1 if there is exactly one active exception
74  * 0 if there is more than one active exception
75  * UNKNOWN if there are no active exceptions (we choose 1,
76  * which matches the choice Cortex-M3 is documented as making).
77  *
78  * NB: some versions of the documentation talk about this
79  * counting "active exceptions other than the one shown by IPSR";
80  * this is only different in the obscure corner case where guest
81  * code has manually deactivated an exception and is about
82  * to fail an exception-return integrity check. The definition
83  * above is the one from the v8M ARM ARM and is also in line
84  * with the behaviour documented for the Cortex-M3.
85  */
86 static bool nvic_rettobase(NVICState *s)
87 {
88     int irq, nhand = 0;
89     bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
90 
91     for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
92         if (s->vectors[irq].active ||
93             (check_sec && irq < NVIC_INTERNAL_VECTORS &&
94              s->sec_vectors[irq].active)) {
95             nhand++;
96             if (nhand == 2) {
97                 return 0;
98             }
99         }
100     }
101 
102     return 1;
103 }
104 
105 /* Return the value of the ISCR ISRPENDING bit:
106  * 1 if an external interrupt is pending
107  * 0 if no external interrupt is pending
108  */
109 static bool nvic_isrpending(NVICState *s)
110 {
111     int irq;
112 
113     /* We can shortcut if the highest priority pending interrupt
114      * happens to be external or if there is nothing pending.
115      */
116     if (s->vectpending > NVIC_FIRST_IRQ) {
117         return true;
118     }
119     if (s->vectpending == 0) {
120         return false;
121     }
122 
123     for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
124         if (s->vectors[irq].pending) {
125             return true;
126         }
127     }
128     return false;
129 }
130 
131 static bool exc_is_banked(int exc)
132 {
133     /* Return true if this is one of the limited set of exceptions which
134      * are banked (and thus have state in sec_vectors[])
135      */
136     return exc == ARMV7M_EXCP_HARD ||
137         exc == ARMV7M_EXCP_MEM ||
138         exc == ARMV7M_EXCP_USAGE ||
139         exc == ARMV7M_EXCP_SVC ||
140         exc == ARMV7M_EXCP_PENDSV ||
141         exc == ARMV7M_EXCP_SYSTICK;
142 }
143 
144 /* Return a mask word which clears the subpriority bits from
145  * a priority value for an M-profile exception, leaving only
146  * the group priority.
147  */
148 static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
149 {
150     return ~0U << (s->prigroup[secure] + 1);
151 }
152 
153 static bool exc_targets_secure(NVICState *s, int exc)
154 {
155     /* Return true if this non-banked exception targets Secure state. */
156     if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
157         return false;
158     }
159 
160     if (exc >= NVIC_FIRST_IRQ) {
161         return !s->itns[exc];
162     }
163 
164     /* Function shouldn't be called for banked exceptions. */
165     assert(!exc_is_banked(exc));
166 
167     switch (exc) {
168     case ARMV7M_EXCP_NMI:
169     case ARMV7M_EXCP_BUS:
170         return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
171     case ARMV7M_EXCP_SECURE:
172         return true;
173     case ARMV7M_EXCP_DEBUG:
174         /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
175         return false;
176     default:
177         /* reset, and reserved (unused) low exception numbers.
178          * We'll get called by code that loops through all the exception
179          * numbers, but it doesn't matter what we return here as these
180          * non-existent exceptions will never be pended or active.
181          */
182         return true;
183     }
184 }
185 
186 static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
187 {
188     /* Return the group priority for this exception, given its raw
189      * (group-and-subgroup) priority value and whether it is targeting
190      * secure state or not.
191      */
192     if (rawprio < 0) {
193         return rawprio;
194     }
195     rawprio &= nvic_gprio_mask(s, targets_secure);
196     /* AIRCR.PRIS causes us to squash all NS priorities into the
197      * lower half of the total range
198      */
199     if (!targets_secure &&
200         (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
201         rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
202     }
203     return rawprio;
204 }
205 
206 /* Recompute vectpending and exception_prio for a CPU which implements
207  * the Security extension
208  */
209 static void nvic_recompute_state_secure(NVICState *s)
210 {
211     int i, bank;
212     int pend_prio = NVIC_NOEXC_PRIO;
213     int active_prio = NVIC_NOEXC_PRIO;
214     int pend_irq = 0;
215     bool pending_is_s_banked = false;
216     int pend_subprio = 0;
217 
218     /* R_CQRV: precedence is by:
219      *  - lowest group priority; if both the same then
220      *  - lowest subpriority; if both the same then
221      *  - lowest exception number; if both the same (ie banked) then
222      *  - secure exception takes precedence
223      * Compare pseudocode RawExecutionPriority.
224      * Annoyingly, now we have two prigroup values (for S and NS)
225      * we can't do the loop comparison on raw priority values.
226      */
227     for (i = 1; i < s->num_irq; i++) {
228         for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
229             VecInfo *vec;
230             int prio, subprio;
231             bool targets_secure;
232 
233             if (bank == M_REG_S) {
234                 if (!exc_is_banked(i)) {
235                     continue;
236                 }
237                 vec = &s->sec_vectors[i];
238                 targets_secure = true;
239             } else {
240                 vec = &s->vectors[i];
241                 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
242             }
243 
244             prio = exc_group_prio(s, vec->prio, targets_secure);
245             subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
246             if (vec->enabled && vec->pending &&
247                 ((prio < pend_prio) ||
248                  (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
249                 pend_prio = prio;
250                 pend_subprio = subprio;
251                 pend_irq = i;
252                 pending_is_s_banked = (bank == M_REG_S);
253             }
254             if (vec->active && prio < active_prio) {
255                 active_prio = prio;
256             }
257         }
258     }
259 
260     s->vectpending_is_s_banked = pending_is_s_banked;
261     s->vectpending = pend_irq;
262     s->vectpending_prio = pend_prio;
263     s->exception_prio = active_prio;
264 
265     trace_nvic_recompute_state_secure(s->vectpending,
266                                       s->vectpending_is_s_banked,
267                                       s->vectpending_prio,
268                                       s->exception_prio);
269 }
270 
271 /* Recompute vectpending and exception_prio */
272 static void nvic_recompute_state(NVICState *s)
273 {
274     int i;
275     int pend_prio = NVIC_NOEXC_PRIO;
276     int active_prio = NVIC_NOEXC_PRIO;
277     int pend_irq = 0;
278 
279     /* In theory we could write one function that handled both
280      * the "security extension present" and "not present"; however
281      * the security related changes significantly complicate the
282      * recomputation just by themselves and mixing both cases together
283      * would be even worse, so we retain a separate non-secure-only
284      * version for CPUs which don't implement the security extension.
285      */
286     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
287         nvic_recompute_state_secure(s);
288         return;
289     }
290 
291     for (i = 1; i < s->num_irq; i++) {
292         VecInfo *vec = &s->vectors[i];
293 
294         if (vec->enabled && vec->pending && vec->prio < pend_prio) {
295             pend_prio = vec->prio;
296             pend_irq = i;
297         }
298         if (vec->active && vec->prio < active_prio) {
299             active_prio = vec->prio;
300         }
301     }
302 
303     if (active_prio > 0) {
304         active_prio &= nvic_gprio_mask(s, false);
305     }
306 
307     if (pend_prio > 0) {
308         pend_prio &= nvic_gprio_mask(s, false);
309     }
310 
311     s->vectpending = pend_irq;
312     s->vectpending_prio = pend_prio;
313     s->exception_prio = active_prio;
314 
315     trace_nvic_recompute_state(s->vectpending,
316                                s->vectpending_prio,
317                                s->exception_prio);
318 }
319 
320 /* Return the current execution priority of the CPU
321  * (equivalent to the pseudocode ExecutionPriority function).
322  * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
323  */
324 static inline int nvic_exec_prio(NVICState *s)
325 {
326     CPUARMState *env = &s->cpu->env;
327     int running = NVIC_NOEXC_PRIO;
328 
329     if (env->v7m.basepri[M_REG_NS] > 0) {
330         running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
331     }
332 
333     if (env->v7m.basepri[M_REG_S] > 0) {
334         int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
335         if (running > basepri) {
336             running = basepri;
337         }
338     }
339 
340     if (env->v7m.primask[M_REG_NS]) {
341         if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
342             if (running > NVIC_NS_PRIO_LIMIT) {
343                 running = NVIC_NS_PRIO_LIMIT;
344             }
345         } else {
346             running = 0;
347         }
348     }
349 
350     if (env->v7m.primask[M_REG_S]) {
351         running = 0;
352     }
353 
354     if (env->v7m.faultmask[M_REG_NS]) {
355         if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
356             running = -1;
357         } else {
358             if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
359                 if (running > NVIC_NS_PRIO_LIMIT) {
360                     running = NVIC_NS_PRIO_LIMIT;
361                 }
362             } else {
363                 running = 0;
364             }
365         }
366     }
367 
368     if (env->v7m.faultmask[M_REG_S]) {
369         running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
370     }
371 
372     /* consider priority of active handler */
373     return MIN(running, s->exception_prio);
374 }
375 
376 bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
377 {
378     /* Return true if the requested execution priority is negative
379      * for the specified security state, ie that security state
380      * has an active NMI or HardFault or has set its FAULTMASK.
381      * Note that this is not the same as whether the execution
382      * priority is actually negative (for instance AIRCR.PRIS may
383      * mean we don't allow FAULTMASK_NS to actually make the execution
384      * priority negative). Compare pseudocode IsReqExcPriNeg().
385      */
386     NVICState *s = opaque;
387 
388     if (s->cpu->env.v7m.faultmask[secure]) {
389         return true;
390     }
391 
392     if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
393         s->vectors[ARMV7M_EXCP_HARD].active) {
394         return true;
395     }
396 
397     if (s->vectors[ARMV7M_EXCP_NMI].active &&
398         exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
399         return true;
400     }
401 
402     return false;
403 }
404 
405 bool armv7m_nvic_can_take_pending_exception(void *opaque)
406 {
407     NVICState *s = opaque;
408 
409     return nvic_exec_prio(s) > nvic_pending_prio(s);
410 }
411 
412 int armv7m_nvic_raw_execution_priority(void *opaque)
413 {
414     NVICState *s = opaque;
415 
416     return s->exception_prio;
417 }
418 
419 /* caller must call nvic_irq_update() after this.
420  * secure indicates the bank to use for banked exceptions (we assert if
421  * we are passed secure=true for a non-banked exception).
422  */
423 static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
424 {
425     assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
426     assert(irq < s->num_irq);
427 
428     prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
429 
430     if (secure) {
431         assert(exc_is_banked(irq));
432         s->sec_vectors[irq].prio = prio;
433     } else {
434         s->vectors[irq].prio = prio;
435     }
436 
437     trace_nvic_set_prio(irq, secure, prio);
438 }
439 
440 /* Return the current raw priority register value.
441  * secure indicates the bank to use for banked exceptions (we assert if
442  * we are passed secure=true for a non-banked exception).
443  */
444 static int get_prio(NVICState *s, unsigned irq, bool secure)
445 {
446     assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
447     assert(irq < s->num_irq);
448 
449     if (secure) {
450         assert(exc_is_banked(irq));
451         return s->sec_vectors[irq].prio;
452     } else {
453         return s->vectors[irq].prio;
454     }
455 }
456 
457 /* Recompute state and assert irq line accordingly.
458  * Must be called after changes to:
459  *  vec->active, vec->enabled, vec->pending or vec->prio for any vector
460  *  prigroup
461  */
462 static void nvic_irq_update(NVICState *s)
463 {
464     int lvl;
465     int pend_prio;
466 
467     nvic_recompute_state(s);
468     pend_prio = nvic_pending_prio(s);
469 
470     /* Raise NVIC output if this IRQ would be taken, except that we
471      * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
472      * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
473      * to those CPU registers don't cause us to recalculate the NVIC
474      * pending info.
475      */
476     lvl = (pend_prio < s->exception_prio);
477     trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
478     qemu_set_irq(s->excpout, lvl);
479 }
480 
481 /**
482  * armv7m_nvic_clear_pending: mark the specified exception as not pending
483  * @opaque: the NVIC
484  * @irq: the exception number to mark as not pending
485  * @secure: false for non-banked exceptions or for the nonsecure
486  * version of a banked exception, true for the secure version of a banked
487  * exception.
488  *
489  * Marks the specified exception as not pending. Note that we will assert()
490  * if @secure is true and @irq does not specify one of the fixed set
491  * of architecturally banked exceptions.
492  */
493 static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
494 {
495     NVICState *s = (NVICState *)opaque;
496     VecInfo *vec;
497 
498     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
499 
500     if (secure) {
501         assert(exc_is_banked(irq));
502         vec = &s->sec_vectors[irq];
503     } else {
504         vec = &s->vectors[irq];
505     }
506     trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
507     if (vec->pending) {
508         vec->pending = 0;
509         nvic_irq_update(s);
510     }
511 }
512 
513 static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
514                                        bool derived)
515 {
516     /* Pend an exception, including possibly escalating it to HardFault.
517      *
518      * This function handles both "normal" pending of interrupts and
519      * exceptions, and also derived exceptions (ones which occur as
520      * a result of trying to take some other exception).
521      *
522      * If derived == true, the caller guarantees that we are part way through
523      * trying to take an exception (but have not yet called
524      * armv7m_nvic_acknowledge_irq() to make it active), and so:
525      *  - s->vectpending is the "original exception" we were trying to take
526      *  - irq is the "derived exception"
527      *  - nvic_exec_prio(s) gives the priority before exception entry
528      * Here we handle the prioritization logic which the pseudocode puts
529      * in the DerivedLateArrival() function.
530      */
531 
532     NVICState *s = (NVICState *)opaque;
533     bool banked = exc_is_banked(irq);
534     VecInfo *vec;
535     bool targets_secure;
536 
537     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
538     assert(!secure || banked);
539 
540     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
541 
542     targets_secure = banked ? secure : exc_targets_secure(s, irq);
543 
544     trace_nvic_set_pending(irq, secure, targets_secure,
545                            derived, vec->enabled, vec->prio);
546 
547     if (derived) {
548         /* Derived exceptions are always synchronous. */
549         assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
550 
551         if (irq == ARMV7M_EXCP_DEBUG &&
552             exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
553             /* DebugMonitorFault, but its priority is lower than the
554              * preempted exception priority: just ignore it.
555              */
556             return;
557         }
558 
559         if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
560             /* If this is a terminal exception (one which means we cannot
561              * take the original exception, like a failure to read its
562              * vector table entry), then we must take the derived exception.
563              * If the derived exception can't take priority over the
564              * original exception, then we go into Lockup.
565              *
566              * For QEMU, we rely on the fact that a derived exception is
567              * terminal if and only if it's reported to us as HardFault,
568              * which saves having to have an extra argument is_terminal
569              * that we'd only use in one place.
570              */
571             cpu_abort(&s->cpu->parent_obj,
572                       "Lockup: can't take terminal derived exception "
573                       "(original exception priority %d)\n",
574                       s->vectpending_prio);
575         }
576         /* We now continue with the same code as for a normal pending
577          * exception, which will cause us to pend the derived exception.
578          * We'll then take either the original or the derived exception
579          * based on which is higher priority by the usual mechanism
580          * for selecting the highest priority pending interrupt.
581          */
582     }
583 
584     if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
585         /* If a synchronous exception is pending then it may be
586          * escalated to HardFault if:
587          *  * it is equal or lower priority to current execution
588          *  * it is disabled
589          * (ie we need to take it immediately but we can't do so).
590          * Asynchronous exceptions (and interrupts) simply remain pending.
591          *
592          * For QEMU, we don't have any imprecise (asynchronous) faults,
593          * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
594          * synchronous.
595          * Debug exceptions are awkward because only Debug exceptions
596          * resulting from the BKPT instruction should be escalated,
597          * but we don't currently implement any Debug exceptions other
598          * than those that result from BKPT, so we treat all debug exceptions
599          * as needing escalation.
600          *
601          * This all means we can identify whether to escalate based only on
602          * the exception number and don't (yet) need the caller to explicitly
603          * tell us whether this exception is synchronous or not.
604          */
605         int running = nvic_exec_prio(s);
606         bool escalate = false;
607 
608         if (exc_group_prio(s, vec->prio, secure) >= running) {
609             trace_nvic_escalate_prio(irq, vec->prio, running);
610             escalate = true;
611         } else if (!vec->enabled) {
612             trace_nvic_escalate_disabled(irq);
613             escalate = true;
614         }
615 
616         if (escalate) {
617 
618             /* We need to escalate this exception to a synchronous HardFault.
619              * If BFHFNMINS is set then we escalate to the banked HF for
620              * the target security state of the original exception; otherwise
621              * we take a Secure HardFault.
622              */
623             irq = ARMV7M_EXCP_HARD;
624             if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
625                 (targets_secure ||
626                  !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
627                 vec = &s->sec_vectors[irq];
628             } else {
629                 vec = &s->vectors[irq];
630             }
631             if (running <= vec->prio) {
632                 /* We want to escalate to HardFault but we can't take the
633                  * synchronous HardFault at this point either. This is a
634                  * Lockup condition due to a guest bug. We don't model
635                  * Lockup, so report via cpu_abort() instead.
636                  */
637                 cpu_abort(&s->cpu->parent_obj,
638                           "Lockup: can't escalate %d to HardFault "
639                           "(current priority %d)\n", irq, running);
640             }
641 
642             /* HF may be banked but there is only one shared HFSR */
643             s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
644         }
645     }
646 
647     if (!vec->pending) {
648         vec->pending = 1;
649         nvic_irq_update(s);
650     }
651 }
652 
653 void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
654 {
655     do_armv7m_nvic_set_pending(opaque, irq, secure, false);
656 }
657 
658 void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
659 {
660     do_armv7m_nvic_set_pending(opaque, irq, secure, true);
661 }
662 
663 void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
664 {
665     /*
666      * Pend an exception during lazy FP stacking. This differs
667      * from the usual exception pending because the logic for
668      * whether we should escalate depends on the saved context
669      * in the FPCCR register, not on the current state of the CPU/NVIC.
670      */
671     NVICState *s = (NVICState *)opaque;
672     bool banked = exc_is_banked(irq);
673     VecInfo *vec;
674     bool targets_secure;
675     bool escalate = false;
676     /*
677      * We will only look at bits in fpccr if this is a banked exception
678      * (in which case 'secure' tells us whether it is the S or NS version).
679      * All the bits for the non-banked exceptions are in fpccr_s.
680      */
681     uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
682     uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
683 
684     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
685     assert(!secure || banked);
686 
687     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
688 
689     targets_secure = banked ? secure : exc_targets_secure(s, irq);
690 
691     switch (irq) {
692     case ARMV7M_EXCP_DEBUG:
693         if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
694             /* Ignore DebugMonitor exception */
695             return;
696         }
697         break;
698     case ARMV7M_EXCP_MEM:
699         escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
700         break;
701     case ARMV7M_EXCP_USAGE:
702         escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
703         break;
704     case ARMV7M_EXCP_BUS:
705         escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
706         break;
707     case ARMV7M_EXCP_SECURE:
708         escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
709         break;
710     default:
711         g_assert_not_reached();
712     }
713 
714     if (escalate) {
715         /*
716          * Escalate to HardFault: faults that initially targeted Secure
717          * continue to do so, even if HF normally targets NonSecure.
718          */
719         irq = ARMV7M_EXCP_HARD;
720         if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
721             (targets_secure ||
722              !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
723             vec = &s->sec_vectors[irq];
724         } else {
725             vec = &s->vectors[irq];
726         }
727     }
728 
729     if (!vec->enabled ||
730         nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
731         if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
732             /*
733              * We want to escalate to HardFault but the context the
734              * FP state belongs to prevents the exception pre-empting.
735              */
736             cpu_abort(&s->cpu->parent_obj,
737                       "Lockup: can't escalate to HardFault during "
738                       "lazy FP register stacking\n");
739         }
740     }
741 
742     if (escalate) {
743         s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
744     }
745     if (!vec->pending) {
746         vec->pending = 1;
747         /*
748          * We do not call nvic_irq_update(), because we know our caller
749          * is going to handle causing us to take the exception by
750          * raising EXCP_LAZYFP, so raising the IRQ line would be
751          * pointless extra work. We just need to recompute the
752          * priorities so that armv7m_nvic_can_take_pending_exception()
753          * returns the right answer.
754          */
755         nvic_recompute_state(s);
756     }
757 }
758 
759 /* Make pending IRQ active.  */
760 void armv7m_nvic_acknowledge_irq(void *opaque)
761 {
762     NVICState *s = (NVICState *)opaque;
763     CPUARMState *env = &s->cpu->env;
764     const int pending = s->vectpending;
765     const int running = nvic_exec_prio(s);
766     VecInfo *vec;
767 
768     assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
769 
770     if (s->vectpending_is_s_banked) {
771         vec = &s->sec_vectors[pending];
772     } else {
773         vec = &s->vectors[pending];
774     }
775 
776     assert(vec->enabled);
777     assert(vec->pending);
778 
779     assert(s->vectpending_prio < running);
780 
781     trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
782 
783     vec->active = 1;
784     vec->pending = 0;
785 
786     write_v7m_exception(env, s->vectpending);
787 
788     nvic_irq_update(s);
789 }
790 
791 void armv7m_nvic_get_pending_irq_info(void *opaque,
792                                       int *pirq, bool *ptargets_secure)
793 {
794     NVICState *s = (NVICState *)opaque;
795     const int pending = s->vectpending;
796     bool targets_secure;
797 
798     assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
799 
800     if (s->vectpending_is_s_banked) {
801         targets_secure = true;
802     } else {
803         targets_secure = !exc_is_banked(pending) &&
804             exc_targets_secure(s, pending);
805     }
806 
807     trace_nvic_get_pending_irq_info(pending, targets_secure);
808 
809     *ptargets_secure = targets_secure;
810     *pirq = pending;
811 }
812 
813 int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
814 {
815     NVICState *s = (NVICState *)opaque;
816     VecInfo *vec;
817     int ret;
818 
819     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
820 
821     if (secure && exc_is_banked(irq)) {
822         vec = &s->sec_vectors[irq];
823     } else {
824         vec = &s->vectors[irq];
825     }
826 
827     trace_nvic_complete_irq(irq, secure);
828 
829     if (!vec->active) {
830         /* Tell the caller this was an illegal exception return */
831         return -1;
832     }
833 
834     ret = nvic_rettobase(s);
835 
836     vec->active = 0;
837     if (vec->level) {
838         /* Re-pend the exception if it's still held high; only
839          * happens for extenal IRQs
840          */
841         assert(irq >= NVIC_FIRST_IRQ);
842         vec->pending = 1;
843     }
844 
845     nvic_irq_update(s);
846 
847     return ret;
848 }
849 
850 bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
851 {
852     /*
853      * Return whether an exception is "ready", i.e. it is enabled and is
854      * configured at a priority which would allow it to interrupt the
855      * current execution priority.
856      *
857      * irq and secure have the same semantics as for armv7m_nvic_set_pending():
858      * for non-banked exceptions secure is always false; for banked exceptions
859      * it indicates which of the exceptions is required.
860      */
861     NVICState *s = (NVICState *)opaque;
862     bool banked = exc_is_banked(irq);
863     VecInfo *vec;
864     int running = nvic_exec_prio(s);
865 
866     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
867     assert(!secure || banked);
868 
869     /*
870      * HardFault is an odd special case: we always check against -1,
871      * even if we're secure and HardFault has priority -3; we never
872      * need to check for enabled state.
873      */
874     if (irq == ARMV7M_EXCP_HARD) {
875         return running > -1;
876     }
877 
878     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
879 
880     return vec->enabled &&
881         exc_group_prio(s, vec->prio, secure) < running;
882 }
883 
884 /* callback when external interrupt line is changed */
885 static void set_irq_level(void *opaque, int n, int level)
886 {
887     NVICState *s = opaque;
888     VecInfo *vec;
889 
890     n += NVIC_FIRST_IRQ;
891 
892     assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
893 
894     trace_nvic_set_irq_level(n, level);
895 
896     /* The pending status of an external interrupt is
897      * latched on rising edge and exception handler return.
898      *
899      * Pulsing the IRQ will always run the handler
900      * once, and the handler will re-run until the
901      * level is low when the handler completes.
902      */
903     vec = &s->vectors[n];
904     if (level != vec->level) {
905         vec->level = level;
906         if (level) {
907             armv7m_nvic_set_pending(s, n, false);
908         }
909     }
910 }
911 
912 /* callback when external NMI line is changed */
913 static void nvic_nmi_trigger(void *opaque, int n, int level)
914 {
915     NVICState *s = opaque;
916 
917     trace_nvic_set_nmi_level(level);
918 
919     /*
920      * The architecture doesn't specify whether NMI should share
921      * the normal-interrupt behaviour of being resampled on
922      * exception handler return. We choose not to, so just
923      * set NMI pending here and don't track the current level.
924      */
925     if (level) {
926         armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
927     }
928 }
929 
930 static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
931 {
932     ARMCPU *cpu = s->cpu;
933     uint32_t val;
934 
935     switch (offset) {
936     case 4: /* Interrupt Control Type.  */
937         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
938             goto bad_offset;
939         }
940         return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
941     case 0xc: /* CPPWR */
942         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
943             goto bad_offset;
944         }
945         /* We make the IMPDEF choice that nothing can ever go into a
946          * non-retentive power state, which allows us to RAZ/WI this.
947          */
948         return 0;
949     case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
950     {
951         int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
952         int i;
953 
954         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
955             goto bad_offset;
956         }
957         if (!attrs.secure) {
958             return 0;
959         }
960         val = 0;
961         for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
962             if (s->itns[startvec + i]) {
963                 val |= (1 << i);
964             }
965         }
966         return val;
967     }
968     case 0xd00: /* CPUID Base.  */
969         return cpu->midr;
970     case 0xd04: /* Interrupt Control State (ICSR) */
971         /* VECTACTIVE */
972         val = cpu->env.v7m.exception;
973         /* VECTPENDING */
974         val |= (s->vectpending & 0xff) << 12;
975         /* ISRPENDING - set if any external IRQ is pending */
976         if (nvic_isrpending(s)) {
977             val |= (1 << 22);
978         }
979         /* RETTOBASE - set if only one handler is active */
980         if (nvic_rettobase(s)) {
981             val |= (1 << 11);
982         }
983         if (attrs.secure) {
984             /* PENDSTSET */
985             if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
986                 val |= (1 << 26);
987             }
988             /* PENDSVSET */
989             if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
990                 val |= (1 << 28);
991             }
992         } else {
993             /* PENDSTSET */
994             if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
995                 val |= (1 << 26);
996             }
997             /* PENDSVSET */
998             if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
999                 val |= (1 << 28);
1000             }
1001         }
1002         /* NMIPENDSET */
1003         if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
1004             && s->vectors[ARMV7M_EXCP_NMI].pending) {
1005             val |= (1 << 31);
1006         }
1007         /* ISRPREEMPT: RES0 when halting debug not implemented */
1008         /* STTNS: RES0 for the Main Extension */
1009         return val;
1010     case 0xd08: /* Vector Table Offset.  */
1011         return cpu->env.v7m.vecbase[attrs.secure];
1012     case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1013         val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
1014         if (attrs.secure) {
1015             /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
1016             val |= cpu->env.v7m.aircr;
1017         } else {
1018             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1019                 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
1020                  * security isn't supported then BFHFNMINS is RAO (and
1021                  * the bit in env.v7m.aircr is always set).
1022                  */
1023                 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
1024             }
1025         }
1026         return val;
1027     case 0xd10: /* System Control.  */
1028         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1029             goto bad_offset;
1030         }
1031         return cpu->env.v7m.scr[attrs.secure];
1032     case 0xd14: /* Configuration Control.  */
1033         /* The BFHFNMIGN bit is the only non-banked bit; we
1034          * keep it in the non-secure copy of the register.
1035          */
1036         val = cpu->env.v7m.ccr[attrs.secure];
1037         val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1038         return val;
1039     case 0xd24: /* System Handler Control and State (SHCSR) */
1040         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1041             goto bad_offset;
1042         }
1043         val = 0;
1044         if (attrs.secure) {
1045             if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
1046                 val |= (1 << 0);
1047             }
1048             if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
1049                 val |= (1 << 2);
1050             }
1051             if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
1052                 val |= (1 << 3);
1053             }
1054             if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
1055                 val |= (1 << 7);
1056             }
1057             if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
1058                 val |= (1 << 10);
1059             }
1060             if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
1061                 val |= (1 << 11);
1062             }
1063             if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
1064                 val |= (1 << 12);
1065             }
1066             if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
1067                 val |= (1 << 13);
1068             }
1069             if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
1070                 val |= (1 << 15);
1071             }
1072             if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
1073                 val |= (1 << 16);
1074             }
1075             if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
1076                 val |= (1 << 18);
1077             }
1078             if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
1079                 val |= (1 << 21);
1080             }
1081             /* SecureFault is not banked but is always RAZ/WI to NS */
1082             if (s->vectors[ARMV7M_EXCP_SECURE].active) {
1083                 val |= (1 << 4);
1084             }
1085             if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
1086                 val |= (1 << 19);
1087             }
1088             if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
1089                 val |= (1 << 20);
1090             }
1091         } else {
1092             if (s->vectors[ARMV7M_EXCP_MEM].active) {
1093                 val |= (1 << 0);
1094             }
1095             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1096                 /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
1097                 if (s->vectors[ARMV7M_EXCP_HARD].active) {
1098                     val |= (1 << 2);
1099                 }
1100                 if (s->vectors[ARMV7M_EXCP_HARD].pending) {
1101                     val |= (1 << 21);
1102                 }
1103             }
1104             if (s->vectors[ARMV7M_EXCP_USAGE].active) {
1105                 val |= (1 << 3);
1106             }
1107             if (s->vectors[ARMV7M_EXCP_SVC].active) {
1108                 val |= (1 << 7);
1109             }
1110             if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
1111                 val |= (1 << 10);
1112             }
1113             if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
1114                 val |= (1 << 11);
1115             }
1116             if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
1117                 val |= (1 << 12);
1118             }
1119             if (s->vectors[ARMV7M_EXCP_MEM].pending) {
1120                 val |= (1 << 13);
1121             }
1122             if (s->vectors[ARMV7M_EXCP_SVC].pending) {
1123                 val |= (1 << 15);
1124             }
1125             if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
1126                 val |= (1 << 16);
1127             }
1128             if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
1129                 val |= (1 << 18);
1130             }
1131         }
1132         if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1133             if (s->vectors[ARMV7M_EXCP_BUS].active) {
1134                 val |= (1 << 1);
1135             }
1136             if (s->vectors[ARMV7M_EXCP_BUS].pending) {
1137                 val |= (1 << 14);
1138             }
1139             if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
1140                 val |= (1 << 17);
1141             }
1142             if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
1143                 s->vectors[ARMV7M_EXCP_NMI].active) {
1144                 /* NMIACT is not present in v7M */
1145                 val |= (1 << 5);
1146             }
1147         }
1148 
1149         /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1150         if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
1151             val |= (1 << 8);
1152         }
1153         return val;
1154     case 0xd2c: /* Hard Fault Status.  */
1155         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1156             goto bad_offset;
1157         }
1158         return cpu->env.v7m.hfsr;
1159     case 0xd30: /* Debug Fault Status.  */
1160         return cpu->env.v7m.dfsr;
1161     case 0xd34: /* MMFAR MemManage Fault Address */
1162         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1163             goto bad_offset;
1164         }
1165         return cpu->env.v7m.mmfar[attrs.secure];
1166     case 0xd38: /* Bus Fault Address.  */
1167         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1168             goto bad_offset;
1169         }
1170         return cpu->env.v7m.bfar;
1171     case 0xd3c: /* Aux Fault Status.  */
1172         /* TODO: Implement fault status registers.  */
1173         qemu_log_mask(LOG_UNIMP,
1174                       "Aux Fault status registers unimplemented\n");
1175         return 0;
1176     case 0xd40: /* PFR0.  */
1177         return cpu->id_pfr0;
1178     case 0xd44: /* PFR1.  */
1179         return cpu->id_pfr1;
1180     case 0xd48: /* DFR0.  */
1181         return cpu->id_dfr0;
1182     case 0xd4c: /* AFR0.  */
1183         return cpu->id_afr0;
1184     case 0xd50: /* MMFR0.  */
1185         return cpu->id_mmfr0;
1186     case 0xd54: /* MMFR1.  */
1187         return cpu->id_mmfr1;
1188     case 0xd58: /* MMFR2.  */
1189         return cpu->id_mmfr2;
1190     case 0xd5c: /* MMFR3.  */
1191         return cpu->id_mmfr3;
1192     case 0xd60: /* ISAR0.  */
1193         return cpu->isar.id_isar0;
1194     case 0xd64: /* ISAR1.  */
1195         return cpu->isar.id_isar1;
1196     case 0xd68: /* ISAR2.  */
1197         return cpu->isar.id_isar2;
1198     case 0xd6c: /* ISAR3.  */
1199         return cpu->isar.id_isar3;
1200     case 0xd70: /* ISAR4.  */
1201         return cpu->isar.id_isar4;
1202     case 0xd74: /* ISAR5.  */
1203         return cpu->isar.id_isar5;
1204     case 0xd78: /* CLIDR */
1205         return cpu->clidr;
1206     case 0xd7c: /* CTR */
1207         return cpu->ctr;
1208     case 0xd80: /* CSSIDR */
1209     {
1210         int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1211         return cpu->ccsidr[idx];
1212     }
1213     case 0xd84: /* CSSELR */
1214         return cpu->env.v7m.csselr[attrs.secure];
1215     case 0xd88: /* CPACR */
1216         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1217             return 0;
1218         }
1219         return cpu->env.v7m.cpacr[attrs.secure];
1220     case 0xd8c: /* NSACR */
1221         if (!attrs.secure || !arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1222             return 0;
1223         }
1224         return cpu->env.v7m.nsacr;
1225     /* TODO: Implement debug registers.  */
1226     case 0xd90: /* MPU_TYPE */
1227         /* Unified MPU; if the MPU is not present this value is zero */
1228         return cpu->pmsav7_dregion << 8;
1229         break;
1230     case 0xd94: /* MPU_CTRL */
1231         return cpu->env.v7m.mpu_ctrl[attrs.secure];
1232     case 0xd98: /* MPU_RNR */
1233         return cpu->env.pmsav7.rnr[attrs.secure];
1234     case 0xd9c: /* MPU_RBAR */
1235     case 0xda4: /* MPU_RBAR_A1 */
1236     case 0xdac: /* MPU_RBAR_A2 */
1237     case 0xdb4: /* MPU_RBAR_A3 */
1238     {
1239         int region = cpu->env.pmsav7.rnr[attrs.secure];
1240 
1241         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1242             /* PMSAv8M handling of the aliases is different from v7M:
1243              * aliases A1, A2, A3 override the low two bits of the region
1244              * number in MPU_RNR, and there is no 'region' field in the
1245              * RBAR register.
1246              */
1247             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1248             if (aliasno) {
1249                 region = deposit32(region, 0, 2, aliasno);
1250             }
1251             if (region >= cpu->pmsav7_dregion) {
1252                 return 0;
1253             }
1254             return cpu->env.pmsav8.rbar[attrs.secure][region];
1255         }
1256 
1257         if (region >= cpu->pmsav7_dregion) {
1258             return 0;
1259         }
1260         return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1261     }
1262     case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1263     case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1264     case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1265     case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1266     {
1267         int region = cpu->env.pmsav7.rnr[attrs.secure];
1268 
1269         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1270             /* PMSAv8M handling of the aliases is different from v7M:
1271              * aliases A1, A2, A3 override the low two bits of the region
1272              * number in MPU_RNR.
1273              */
1274             int aliasno = (offset - 0xda0) / 8; /* 0..3 */
1275             if (aliasno) {
1276                 region = deposit32(region, 0, 2, aliasno);
1277             }
1278             if (region >= cpu->pmsav7_dregion) {
1279                 return 0;
1280             }
1281             return cpu->env.pmsav8.rlar[attrs.secure][region];
1282         }
1283 
1284         if (region >= cpu->pmsav7_dregion) {
1285             return 0;
1286         }
1287         return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1288             (cpu->env.pmsav7.drsr[region] & 0xffff);
1289     }
1290     case 0xdc0: /* MPU_MAIR0 */
1291         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1292             goto bad_offset;
1293         }
1294         return cpu->env.pmsav8.mair0[attrs.secure];
1295     case 0xdc4: /* MPU_MAIR1 */
1296         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1297             goto bad_offset;
1298         }
1299         return cpu->env.pmsav8.mair1[attrs.secure];
1300     case 0xdd0: /* SAU_CTRL */
1301         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1302             goto bad_offset;
1303         }
1304         if (!attrs.secure) {
1305             return 0;
1306         }
1307         return cpu->env.sau.ctrl;
1308     case 0xdd4: /* SAU_TYPE */
1309         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1310             goto bad_offset;
1311         }
1312         if (!attrs.secure) {
1313             return 0;
1314         }
1315         return cpu->sau_sregion;
1316     case 0xdd8: /* SAU_RNR */
1317         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1318             goto bad_offset;
1319         }
1320         if (!attrs.secure) {
1321             return 0;
1322         }
1323         return cpu->env.sau.rnr;
1324     case 0xddc: /* SAU_RBAR */
1325     {
1326         int region = cpu->env.sau.rnr;
1327 
1328         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1329             goto bad_offset;
1330         }
1331         if (!attrs.secure) {
1332             return 0;
1333         }
1334         if (region >= cpu->sau_sregion) {
1335             return 0;
1336         }
1337         return cpu->env.sau.rbar[region];
1338     }
1339     case 0xde0: /* SAU_RLAR */
1340     {
1341         int region = cpu->env.sau.rnr;
1342 
1343         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1344             goto bad_offset;
1345         }
1346         if (!attrs.secure) {
1347             return 0;
1348         }
1349         if (region >= cpu->sau_sregion) {
1350             return 0;
1351         }
1352         return cpu->env.sau.rlar[region];
1353     }
1354     case 0xde4: /* SFSR */
1355         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1356             goto bad_offset;
1357         }
1358         if (!attrs.secure) {
1359             return 0;
1360         }
1361         return cpu->env.v7m.sfsr;
1362     case 0xde8: /* SFAR */
1363         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1364             goto bad_offset;
1365         }
1366         if (!attrs.secure) {
1367             return 0;
1368         }
1369         return cpu->env.v7m.sfar;
1370     case 0xf34: /* FPCCR */
1371         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1372             return 0;
1373         }
1374         if (attrs.secure) {
1375             return cpu->env.v7m.fpccr[M_REG_S];
1376         } else {
1377             /*
1378              * NS can read LSPEN, CLRONRET and MONRDY. It can read
1379              * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0;
1380              * other non-banked bits RAZ.
1381              * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set.
1382              */
1383             uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
1384             uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
1385                 R_V7M_FPCCR_CLRONRET_MASK |
1386                 R_V7M_FPCCR_MONRDY_MASK;
1387 
1388             if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1389                 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
1390             }
1391 
1392             value &= mask;
1393 
1394             value |= cpu->env.v7m.fpccr[M_REG_NS];
1395             return value;
1396         }
1397     case 0xf38: /* FPCAR */
1398         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1399             return 0;
1400         }
1401         return cpu->env.v7m.fpcar[attrs.secure];
1402     case 0xf3c: /* FPDSCR */
1403         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1404             return 0;
1405         }
1406         return cpu->env.v7m.fpdscr[attrs.secure];
1407     case 0xf40: /* MVFR0 */
1408         return cpu->isar.mvfr0;
1409     case 0xf44: /* MVFR1 */
1410         return cpu->isar.mvfr1;
1411     case 0xf48: /* MVFR2 */
1412         return cpu->isar.mvfr2;
1413     default:
1414     bad_offset:
1415         qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1416         return 0;
1417     }
1418 }
1419 
1420 static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1421                         MemTxAttrs attrs)
1422 {
1423     ARMCPU *cpu = s->cpu;
1424 
1425     switch (offset) {
1426     case 0xc: /* CPPWR */
1427         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1428             goto bad_offset;
1429         }
1430         /* Make the IMPDEF choice to RAZ/WI this. */
1431         break;
1432     case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1433     {
1434         int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1435         int i;
1436 
1437         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1438             goto bad_offset;
1439         }
1440         if (!attrs.secure) {
1441             break;
1442         }
1443         for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1444             s->itns[startvec + i] = (value >> i) & 1;
1445         }
1446         nvic_irq_update(s);
1447         break;
1448     }
1449     case 0xd04: /* Interrupt Control State (ICSR) */
1450         if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1451             if (value & (1 << 31)) {
1452                 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1453             } else if (value & (1 << 30) &&
1454                        arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1455                 /* PENDNMICLR didn't exist in v7M */
1456                 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1457             }
1458         }
1459         if (value & (1 << 28)) {
1460             armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1461         } else if (value & (1 << 27)) {
1462             armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1463         }
1464         if (value & (1 << 26)) {
1465             armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1466         } else if (value & (1 << 25)) {
1467             armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1468         }
1469         break;
1470     case 0xd08: /* Vector Table Offset.  */
1471         cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1472         break;
1473     case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1474         if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1475             if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1476                 if (attrs.secure ||
1477                     !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1478                     qemu_irq_pulse(s->sysresetreq);
1479                 }
1480             }
1481             if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1482                 qemu_log_mask(LOG_GUEST_ERROR,
1483                               "Setting VECTCLRACTIVE when not in DEBUG mode "
1484                               "is UNPREDICTABLE\n");
1485             }
1486             if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1487                 /* NB: this bit is RES0 in v8M */
1488                 qemu_log_mask(LOG_GUEST_ERROR,
1489                               "Setting VECTRESET when not in DEBUG mode "
1490                               "is UNPREDICTABLE\n");
1491             }
1492             if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1493                 s->prigroup[attrs.secure] =
1494                     extract32(value,
1495                               R_V7M_AIRCR_PRIGROUP_SHIFT,
1496                               R_V7M_AIRCR_PRIGROUP_LENGTH);
1497             }
1498             if (attrs.secure) {
1499                 /* These bits are only writable by secure */
1500                 cpu->env.v7m.aircr = value &
1501                     (R_V7M_AIRCR_SYSRESETREQS_MASK |
1502                      R_V7M_AIRCR_BFHFNMINS_MASK |
1503                      R_V7M_AIRCR_PRIS_MASK);
1504                 /* BFHFNMINS changes the priority of Secure HardFault, and
1505                  * allows a pending Non-secure HardFault to preempt (which
1506                  * we implement by marking it enabled).
1507                  */
1508                 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1509                     s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1510                     s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1511                 } else {
1512                     s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1513                     s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1514                 }
1515             }
1516             nvic_irq_update(s);
1517         }
1518         break;
1519     case 0xd10: /* System Control.  */
1520         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1521             goto bad_offset;
1522         }
1523         /* We don't implement deep-sleep so these bits are RAZ/WI.
1524          * The other bits in the register are banked.
1525          * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
1526          * is architecturally permitted.
1527          */
1528         value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1529         cpu->env.v7m.scr[attrs.secure] = value;
1530         break;
1531     case 0xd14: /* Configuration Control.  */
1532         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1533             goto bad_offset;
1534         }
1535 
1536         /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
1537         value &= (R_V7M_CCR_STKALIGN_MASK |
1538                   R_V7M_CCR_BFHFNMIGN_MASK |
1539                   R_V7M_CCR_DIV_0_TRP_MASK |
1540                   R_V7M_CCR_UNALIGN_TRP_MASK |
1541                   R_V7M_CCR_USERSETMPEND_MASK |
1542                   R_V7M_CCR_NONBASETHRDENA_MASK);
1543 
1544         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1545             /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1546             value |= R_V7M_CCR_NONBASETHRDENA_MASK
1547                 | R_V7M_CCR_STKALIGN_MASK;
1548         }
1549         if (attrs.secure) {
1550             /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1551             cpu->env.v7m.ccr[M_REG_NS] =
1552                 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1553                 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1554             value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1555         }
1556 
1557         cpu->env.v7m.ccr[attrs.secure] = value;
1558         break;
1559     case 0xd24: /* System Handler Control and State (SHCSR) */
1560         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1561             goto bad_offset;
1562         }
1563         if (attrs.secure) {
1564             s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1565             /* Secure HardFault active bit cannot be written */
1566             s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1567             s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1568             s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1569                 (value & (1 << 10)) != 0;
1570             s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1571                 (value & (1 << 11)) != 0;
1572             s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1573                 (value & (1 << 12)) != 0;
1574             s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1575             s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1576             s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1577             s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1578             s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1579                 (value & (1 << 18)) != 0;
1580             s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1581             /* SecureFault not banked, but RAZ/WI to NS */
1582             s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1583             s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1584             s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1585         } else {
1586             s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1587             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1588                 /* HARDFAULTPENDED is not present in v7M */
1589                 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1590             }
1591             s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1592             s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1593             s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1594             s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1595             s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1596             s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1597             s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1598             s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1599             s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1600         }
1601         if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1602             s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1603             s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1604             s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1605         }
1606         /* NMIACT can only be written if the write is of a zero, with
1607          * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
1608          */
1609         if (!attrs.secure && cpu->env.v7m.secure &&
1610             (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1611             (value & (1 << 5)) == 0) {
1612             s->vectors[ARMV7M_EXCP_NMI].active = 0;
1613         }
1614         /* HARDFAULTACT can only be written if the write is of a zero
1615          * to the non-secure HardFault state by the CPU in secure state.
1616          * The only case where we can be targeting the non-secure HF state
1617          * when in secure state is if this is a write via the NS alias
1618          * and BFHFNMINS is 1.
1619          */
1620         if (!attrs.secure && cpu->env.v7m.secure &&
1621             (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1622             (value & (1 << 2)) == 0) {
1623             s->vectors[ARMV7M_EXCP_HARD].active = 0;
1624         }
1625 
1626         /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1627         s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1628         nvic_irq_update(s);
1629         break;
1630     case 0xd2c: /* Hard Fault Status.  */
1631         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1632             goto bad_offset;
1633         }
1634         cpu->env.v7m.hfsr &= ~value; /* W1C */
1635         break;
1636     case 0xd30: /* Debug Fault Status.  */
1637         cpu->env.v7m.dfsr &= ~value; /* W1C */
1638         break;
1639     case 0xd34: /* Mem Manage Address.  */
1640         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1641             goto bad_offset;
1642         }
1643         cpu->env.v7m.mmfar[attrs.secure] = value;
1644         return;
1645     case 0xd38: /* Bus Fault Address.  */
1646         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1647             goto bad_offset;
1648         }
1649         cpu->env.v7m.bfar = value;
1650         return;
1651     case 0xd3c: /* Aux Fault Status.  */
1652         qemu_log_mask(LOG_UNIMP,
1653                       "NVIC: Aux fault status registers unimplemented\n");
1654         break;
1655     case 0xd84: /* CSSELR */
1656         if (!arm_v7m_csselr_razwi(cpu)) {
1657             cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1658         }
1659         break;
1660     case 0xd88: /* CPACR */
1661         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1662             /* We implement only the Floating Point extension's CP10/CP11 */
1663             cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
1664         }
1665         break;
1666     case 0xd8c: /* NSACR */
1667         if (attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1668             /* We implement only the Floating Point extension's CP10/CP11 */
1669             cpu->env.v7m.nsacr = value & (3 << 10);
1670         }
1671         break;
1672     case 0xd90: /* MPU_TYPE */
1673         return; /* RO */
1674     case 0xd94: /* MPU_CTRL */
1675         if ((value &
1676              (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1677             == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1678             qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1679                           "UNPREDICTABLE\n");
1680         }
1681         cpu->env.v7m.mpu_ctrl[attrs.secure]
1682             = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1683                        R_V7M_MPU_CTRL_HFNMIENA_MASK |
1684                        R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1685         tlb_flush(CPU(cpu));
1686         break;
1687     case 0xd98: /* MPU_RNR */
1688         if (value >= cpu->pmsav7_dregion) {
1689             qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1690                           PRIu32 "/%" PRIu32 "\n",
1691                           value, cpu->pmsav7_dregion);
1692         } else {
1693             cpu->env.pmsav7.rnr[attrs.secure] = value;
1694         }
1695         break;
1696     case 0xd9c: /* MPU_RBAR */
1697     case 0xda4: /* MPU_RBAR_A1 */
1698     case 0xdac: /* MPU_RBAR_A2 */
1699     case 0xdb4: /* MPU_RBAR_A3 */
1700     {
1701         int region;
1702 
1703         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1704             /* PMSAv8M handling of the aliases is different from v7M:
1705              * aliases A1, A2, A3 override the low two bits of the region
1706              * number in MPU_RNR, and there is no 'region' field in the
1707              * RBAR register.
1708              */
1709             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1710 
1711             region = cpu->env.pmsav7.rnr[attrs.secure];
1712             if (aliasno) {
1713                 region = deposit32(region, 0, 2, aliasno);
1714             }
1715             if (region >= cpu->pmsav7_dregion) {
1716                 return;
1717             }
1718             cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1719             tlb_flush(CPU(cpu));
1720             return;
1721         }
1722 
1723         if (value & (1 << 4)) {
1724             /* VALID bit means use the region number specified in this
1725              * value and also update MPU_RNR.REGION with that value.
1726              */
1727             region = extract32(value, 0, 4);
1728             if (region >= cpu->pmsav7_dregion) {
1729                 qemu_log_mask(LOG_GUEST_ERROR,
1730                               "MPU region out of range %u/%" PRIu32 "\n",
1731                               region, cpu->pmsav7_dregion);
1732                 return;
1733             }
1734             cpu->env.pmsav7.rnr[attrs.secure] = region;
1735         } else {
1736             region = cpu->env.pmsav7.rnr[attrs.secure];
1737         }
1738 
1739         if (region >= cpu->pmsav7_dregion) {
1740             return;
1741         }
1742 
1743         cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1744         tlb_flush(CPU(cpu));
1745         break;
1746     }
1747     case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1748     case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1749     case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1750     case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1751     {
1752         int region = cpu->env.pmsav7.rnr[attrs.secure];
1753 
1754         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1755             /* PMSAv8M handling of the aliases is different from v7M:
1756              * aliases A1, A2, A3 override the low two bits of the region
1757              * number in MPU_RNR.
1758              */
1759             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1760 
1761             region = cpu->env.pmsav7.rnr[attrs.secure];
1762             if (aliasno) {
1763                 region = deposit32(region, 0, 2, aliasno);
1764             }
1765             if (region >= cpu->pmsav7_dregion) {
1766                 return;
1767             }
1768             cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1769             tlb_flush(CPU(cpu));
1770             return;
1771         }
1772 
1773         if (region >= cpu->pmsav7_dregion) {
1774             return;
1775         }
1776 
1777         cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1778         cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1779         tlb_flush(CPU(cpu));
1780         break;
1781     }
1782     case 0xdc0: /* MPU_MAIR0 */
1783         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1784             goto bad_offset;
1785         }
1786         if (cpu->pmsav7_dregion) {
1787             /* Register is RES0 if no MPU regions are implemented */
1788             cpu->env.pmsav8.mair0[attrs.secure] = value;
1789         }
1790         /* We don't need to do anything else because memory attributes
1791          * only affect cacheability, and we don't implement caching.
1792          */
1793         break;
1794     case 0xdc4: /* MPU_MAIR1 */
1795         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1796             goto bad_offset;
1797         }
1798         if (cpu->pmsav7_dregion) {
1799             /* Register is RES0 if no MPU regions are implemented */
1800             cpu->env.pmsav8.mair1[attrs.secure] = value;
1801         }
1802         /* We don't need to do anything else because memory attributes
1803          * only affect cacheability, and we don't implement caching.
1804          */
1805         break;
1806     case 0xdd0: /* SAU_CTRL */
1807         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1808             goto bad_offset;
1809         }
1810         if (!attrs.secure) {
1811             return;
1812         }
1813         cpu->env.sau.ctrl = value & 3;
1814         break;
1815     case 0xdd4: /* SAU_TYPE */
1816         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1817             goto bad_offset;
1818         }
1819         break;
1820     case 0xdd8: /* SAU_RNR */
1821         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1822             goto bad_offset;
1823         }
1824         if (!attrs.secure) {
1825             return;
1826         }
1827         if (value >= cpu->sau_sregion) {
1828             qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1829                           PRIu32 "/%" PRIu32 "\n",
1830                           value, cpu->sau_sregion);
1831         } else {
1832             cpu->env.sau.rnr = value;
1833         }
1834         break;
1835     case 0xddc: /* SAU_RBAR */
1836     {
1837         int region = cpu->env.sau.rnr;
1838 
1839         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1840             goto bad_offset;
1841         }
1842         if (!attrs.secure) {
1843             return;
1844         }
1845         if (region >= cpu->sau_sregion) {
1846             return;
1847         }
1848         cpu->env.sau.rbar[region] = value & ~0x1f;
1849         tlb_flush(CPU(cpu));
1850         break;
1851     }
1852     case 0xde0: /* SAU_RLAR */
1853     {
1854         int region = cpu->env.sau.rnr;
1855 
1856         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1857             goto bad_offset;
1858         }
1859         if (!attrs.secure) {
1860             return;
1861         }
1862         if (region >= cpu->sau_sregion) {
1863             return;
1864         }
1865         cpu->env.sau.rlar[region] = value & ~0x1c;
1866         tlb_flush(CPU(cpu));
1867         break;
1868     }
1869     case 0xde4: /* SFSR */
1870         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1871             goto bad_offset;
1872         }
1873         if (!attrs.secure) {
1874             return;
1875         }
1876         cpu->env.v7m.sfsr &= ~value; /* W1C */
1877         break;
1878     case 0xde8: /* SFAR */
1879         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1880             goto bad_offset;
1881         }
1882         if (!attrs.secure) {
1883             return;
1884         }
1885         cpu->env.v7m.sfsr = value;
1886         break;
1887     case 0xf00: /* Software Triggered Interrupt Register */
1888     {
1889         int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
1890 
1891         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1892             goto bad_offset;
1893         }
1894 
1895         if (excnum < s->num_irq) {
1896             armv7m_nvic_set_pending(s, excnum, false);
1897         }
1898         break;
1899     }
1900     case 0xf34: /* FPCCR */
1901         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1902             /* Not all bits here are banked. */
1903             uint32_t fpccr_s;
1904 
1905             if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1906                 /* Don't allow setting of bits not present in v7M */
1907                 value &= (R_V7M_FPCCR_LSPACT_MASK |
1908                           R_V7M_FPCCR_USER_MASK |
1909                           R_V7M_FPCCR_THREAD_MASK |
1910                           R_V7M_FPCCR_HFRDY_MASK |
1911                           R_V7M_FPCCR_MMRDY_MASK |
1912                           R_V7M_FPCCR_BFRDY_MASK |
1913                           R_V7M_FPCCR_MONRDY_MASK |
1914                           R_V7M_FPCCR_LSPEN_MASK |
1915                           R_V7M_FPCCR_ASPEN_MASK);
1916             }
1917             value &= ~R_V7M_FPCCR_RES0_MASK;
1918 
1919             if (!attrs.secure) {
1920                 /* Some non-banked bits are configurably writable by NS */
1921                 fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
1922                 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
1923                     uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
1924                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
1925                 }
1926                 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
1927                     uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
1928                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
1929                 }
1930                 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1931                     uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
1932                     uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
1933                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1934                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1935                 }
1936                 /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */
1937                 {
1938                     uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
1939                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1940                 }
1941 
1942                 /*
1943                  * All other non-banked bits are RAZ/WI from NS; write
1944                  * just the banked bits to fpccr[M_REG_NS].
1945                  */
1946                 value &= R_V7M_FPCCR_BANKED_MASK;
1947                 cpu->env.v7m.fpccr[M_REG_NS] = value;
1948             } else {
1949                 fpccr_s = value;
1950             }
1951             cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
1952         }
1953         break;
1954     case 0xf38: /* FPCAR */
1955         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1956             value &= ~7;
1957             cpu->env.v7m.fpcar[attrs.secure] = value;
1958         }
1959         break;
1960     case 0xf3c: /* FPDSCR */
1961         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1962             value &= 0x07c00000;
1963             cpu->env.v7m.fpdscr[attrs.secure] = value;
1964         }
1965         break;
1966     case 0xf50: /* ICIALLU */
1967     case 0xf58: /* ICIMVAU */
1968     case 0xf5c: /* DCIMVAC */
1969     case 0xf60: /* DCISW */
1970     case 0xf64: /* DCCMVAU */
1971     case 0xf68: /* DCCMVAC */
1972     case 0xf6c: /* DCCSW */
1973     case 0xf70: /* DCCIMVAC */
1974     case 0xf74: /* DCCISW */
1975     case 0xf78: /* BPIALL */
1976         /* Cache and branch predictor maintenance: for QEMU these always NOP */
1977         break;
1978     default:
1979     bad_offset:
1980         qemu_log_mask(LOG_GUEST_ERROR,
1981                       "NVIC: Bad write offset 0x%x\n", offset);
1982     }
1983 }
1984 
1985 static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
1986 {
1987     /* Return true if unprivileged access to this register is permitted. */
1988     switch (offset) {
1989     case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
1990         /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
1991          * controls access even though the CPU is in Secure state (I_QDKX).
1992          */
1993         return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
1994     default:
1995         /* All other user accesses cause a BusFault unconditionally */
1996         return false;
1997     }
1998 }
1999 
2000 static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
2001 {
2002     /* Behaviour for the SHPR register field for this exception:
2003      * return M_REG_NS to use the nonsecure vector (including for
2004      * non-banked exceptions), M_REG_S for the secure version of
2005      * a banked exception, and -1 if this field should RAZ/WI.
2006      */
2007     switch (exc) {
2008     case ARMV7M_EXCP_MEM:
2009     case ARMV7M_EXCP_USAGE:
2010     case ARMV7M_EXCP_SVC:
2011     case ARMV7M_EXCP_PENDSV:
2012     case ARMV7M_EXCP_SYSTICK:
2013         /* Banked exceptions */
2014         return attrs.secure;
2015     case ARMV7M_EXCP_BUS:
2016         /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
2017         if (!attrs.secure &&
2018             !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2019             return -1;
2020         }
2021         return M_REG_NS;
2022     case ARMV7M_EXCP_SECURE:
2023         /* Not banked, RAZ/WI from nonsecure */
2024         if (!attrs.secure) {
2025             return -1;
2026         }
2027         return M_REG_NS;
2028     case ARMV7M_EXCP_DEBUG:
2029         /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
2030         return M_REG_NS;
2031     case 8 ... 10:
2032     case 13:
2033         /* RES0 */
2034         return -1;
2035     default:
2036         /* Not reachable due to decode of SHPR register addresses */
2037         g_assert_not_reached();
2038     }
2039 }
2040 
2041 static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
2042                                     uint64_t *data, unsigned size,
2043                                     MemTxAttrs attrs)
2044 {
2045     NVICState *s = (NVICState *)opaque;
2046     uint32_t offset = addr;
2047     unsigned i, startvec, end;
2048     uint32_t val;
2049 
2050     if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2051         /* Generate BusFault for unprivileged accesses */
2052         return MEMTX_ERROR;
2053     }
2054 
2055     switch (offset) {
2056     /* reads of set and clear both return the status */
2057     case 0x100 ... 0x13f: /* NVIC Set enable */
2058         offset += 0x80;
2059         /* fall through */
2060     case 0x180 ... 0x1bf: /* NVIC Clear enable */
2061         val = 0;
2062         startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
2063 
2064         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2065             if (s->vectors[startvec + i].enabled &&
2066                 (attrs.secure || s->itns[startvec + i])) {
2067                 val |= (1 << i);
2068             }
2069         }
2070         break;
2071     case 0x200 ... 0x23f: /* NVIC Set pend */
2072         offset += 0x80;
2073         /* fall through */
2074     case 0x280 ... 0x2bf: /* NVIC Clear pend */
2075         val = 0;
2076         startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2077         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2078             if (s->vectors[startvec + i].pending &&
2079                 (attrs.secure || s->itns[startvec + i])) {
2080                 val |= (1 << i);
2081             }
2082         }
2083         break;
2084     case 0x300 ... 0x33f: /* NVIC Active */
2085         val = 0;
2086 
2087         if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
2088             break;
2089         }
2090 
2091         startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
2092 
2093         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2094             if (s->vectors[startvec + i].active &&
2095                 (attrs.secure || s->itns[startvec + i])) {
2096                 val |= (1 << i);
2097             }
2098         }
2099         break;
2100     case 0x400 ... 0x5ef: /* NVIC Priority */
2101         val = 0;
2102         startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
2103 
2104         for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2105             if (attrs.secure || s->itns[startvec + i]) {
2106                 val |= s->vectors[startvec + i].prio << (8 * i);
2107             }
2108         }
2109         break;
2110     case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2111         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2112             val = 0;
2113             break;
2114         }
2115         /* fall through */
2116     case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2117         val = 0;
2118         for (i = 0; i < size; i++) {
2119             unsigned hdlidx = (offset - 0xd14) + i;
2120             int sbank = shpr_bank(s, hdlidx, attrs);
2121 
2122             if (sbank < 0) {
2123                 continue;
2124             }
2125             val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
2126         }
2127         break;
2128     case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2129         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2130             val = 0;
2131             break;
2132         };
2133         /* The BFSR bits [15:8] are shared between security states
2134          * and we store them in the NS copy
2135          */
2136         val = s->cpu->env.v7m.cfsr[attrs.secure];
2137         val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
2138         val = extract32(val, (offset - 0xd28) * 8, size * 8);
2139         break;
2140     case 0xfe0 ... 0xfff: /* ID.  */
2141         if (offset & 3) {
2142             val = 0;
2143         } else {
2144             val = nvic_id[(offset - 0xfe0) >> 2];
2145         }
2146         break;
2147     default:
2148         if (size == 4) {
2149             val = nvic_readl(s, offset, attrs);
2150         } else {
2151             qemu_log_mask(LOG_GUEST_ERROR,
2152                           "NVIC: Bad read of size %d at offset 0x%x\n",
2153                           size, offset);
2154             val = 0;
2155         }
2156     }
2157 
2158     trace_nvic_sysreg_read(addr, val, size);
2159     *data = val;
2160     return MEMTX_OK;
2161 }
2162 
2163 static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
2164                                      uint64_t value, unsigned size,
2165                                      MemTxAttrs attrs)
2166 {
2167     NVICState *s = (NVICState *)opaque;
2168     uint32_t offset = addr;
2169     unsigned i, startvec, end;
2170     unsigned setval = 0;
2171 
2172     trace_nvic_sysreg_write(addr, value, size);
2173 
2174     if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2175         /* Generate BusFault for unprivileged accesses */
2176         return MEMTX_ERROR;
2177     }
2178 
2179     switch (offset) {
2180     case 0x100 ... 0x13f: /* NVIC Set enable */
2181         offset += 0x80;
2182         setval = 1;
2183         /* fall through */
2184     case 0x180 ... 0x1bf: /* NVIC Clear enable */
2185         startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2186 
2187         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2188             if (value & (1 << i) &&
2189                 (attrs.secure || s->itns[startvec + i])) {
2190                 s->vectors[startvec + i].enabled = setval;
2191             }
2192         }
2193         nvic_irq_update(s);
2194         return MEMTX_OK;
2195     case 0x200 ... 0x23f: /* NVIC Set pend */
2196         /* the special logic in armv7m_nvic_set_pending()
2197          * is not needed since IRQs are never escalated
2198          */
2199         offset += 0x80;
2200         setval = 1;
2201         /* fall through */
2202     case 0x280 ... 0x2bf: /* NVIC Clear pend */
2203         startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2204 
2205         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2206             if (value & (1 << i) &&
2207                 (attrs.secure || s->itns[startvec + i])) {
2208                 s->vectors[startvec + i].pending = setval;
2209             }
2210         }
2211         nvic_irq_update(s);
2212         return MEMTX_OK;
2213     case 0x300 ... 0x33f: /* NVIC Active */
2214         return MEMTX_OK; /* R/O */
2215     case 0x400 ... 0x5ef: /* NVIC Priority */
2216         startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
2217 
2218         for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2219             if (attrs.secure || s->itns[startvec + i]) {
2220                 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
2221             }
2222         }
2223         nvic_irq_update(s);
2224         return MEMTX_OK;
2225     case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2226         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2227             return MEMTX_OK;
2228         }
2229         /* fall through */
2230     case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2231         for (i = 0; i < size; i++) {
2232             unsigned hdlidx = (offset - 0xd14) + i;
2233             int newprio = extract32(value, i * 8, 8);
2234             int sbank = shpr_bank(s, hdlidx, attrs);
2235 
2236             if (sbank < 0) {
2237                 continue;
2238             }
2239             set_prio(s, hdlidx, sbank, newprio);
2240         }
2241         nvic_irq_update(s);
2242         return MEMTX_OK;
2243     case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2244         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2245             return MEMTX_OK;
2246         }
2247         /* All bits are W1C, so construct 32 bit value with 0s in
2248          * the parts not written by the access size
2249          */
2250         value <<= ((offset - 0xd28) * 8);
2251 
2252         s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
2253         if (attrs.secure) {
2254             /* The BFSR bits [15:8] are shared between security states
2255              * and we store them in the NS copy.
2256              */
2257             s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
2258         }
2259         return MEMTX_OK;
2260     }
2261     if (size == 4) {
2262         nvic_writel(s, offset, value, attrs);
2263         return MEMTX_OK;
2264     }
2265     qemu_log_mask(LOG_GUEST_ERROR,
2266                   "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
2267     /* This is UNPREDICTABLE; treat as RAZ/WI */
2268     return MEMTX_OK;
2269 }
2270 
2271 static const MemoryRegionOps nvic_sysreg_ops = {
2272     .read_with_attrs = nvic_sysreg_read,
2273     .write_with_attrs = nvic_sysreg_write,
2274     .endianness = DEVICE_NATIVE_ENDIAN,
2275 };
2276 
2277 static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
2278                                         uint64_t value, unsigned size,
2279                                         MemTxAttrs attrs)
2280 {
2281     MemoryRegion *mr = opaque;
2282 
2283     if (attrs.secure) {
2284         /* S accesses to the alias act like NS accesses to the real region */
2285         attrs.secure = 0;
2286         return memory_region_dispatch_write(mr, addr, value, size, attrs);
2287     } else {
2288         /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2289         if (attrs.user) {
2290             return MEMTX_ERROR;
2291         }
2292         return MEMTX_OK;
2293     }
2294 }
2295 
2296 static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
2297                                        uint64_t *data, unsigned size,
2298                                        MemTxAttrs attrs)
2299 {
2300     MemoryRegion *mr = opaque;
2301 
2302     if (attrs.secure) {
2303         /* S accesses to the alias act like NS accesses to the real region */
2304         attrs.secure = 0;
2305         return memory_region_dispatch_read(mr, addr, data, size, attrs);
2306     } else {
2307         /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2308         if (attrs.user) {
2309             return MEMTX_ERROR;
2310         }
2311         *data = 0;
2312         return MEMTX_OK;
2313     }
2314 }
2315 
2316 static const MemoryRegionOps nvic_sysreg_ns_ops = {
2317     .read_with_attrs = nvic_sysreg_ns_read,
2318     .write_with_attrs = nvic_sysreg_ns_write,
2319     .endianness = DEVICE_NATIVE_ENDIAN,
2320 };
2321 
2322 static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
2323                                       uint64_t value, unsigned size,
2324                                       MemTxAttrs attrs)
2325 {
2326     NVICState *s = opaque;
2327     MemoryRegion *mr;
2328 
2329     /* Direct the access to the correct systick */
2330     mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2331     return memory_region_dispatch_write(mr, addr, value, size, attrs);
2332 }
2333 
2334 static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
2335                                      uint64_t *data, unsigned size,
2336                                      MemTxAttrs attrs)
2337 {
2338     NVICState *s = opaque;
2339     MemoryRegion *mr;
2340 
2341     /* Direct the access to the correct systick */
2342     mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2343     return memory_region_dispatch_read(mr, addr, data, size, attrs);
2344 }
2345 
2346 static const MemoryRegionOps nvic_systick_ops = {
2347     .read_with_attrs = nvic_systick_read,
2348     .write_with_attrs = nvic_systick_write,
2349     .endianness = DEVICE_NATIVE_ENDIAN,
2350 };
2351 
2352 static int nvic_post_load(void *opaque, int version_id)
2353 {
2354     NVICState *s = opaque;
2355     unsigned i;
2356     int resetprio;
2357 
2358     /* Check for out of range priority settings */
2359     resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2360 
2361     if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2362         s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2363         s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2364         return 1;
2365     }
2366     for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2367         if (s->vectors[i].prio & ~0xff) {
2368             return 1;
2369         }
2370     }
2371 
2372     nvic_recompute_state(s);
2373 
2374     return 0;
2375 }
2376 
2377 static const VMStateDescription vmstate_VecInfo = {
2378     .name = "armv7m_nvic_info",
2379     .version_id = 1,
2380     .minimum_version_id = 1,
2381     .fields = (VMStateField[]) {
2382         VMSTATE_INT16(prio, VecInfo),
2383         VMSTATE_UINT8(enabled, VecInfo),
2384         VMSTATE_UINT8(pending, VecInfo),
2385         VMSTATE_UINT8(active, VecInfo),
2386         VMSTATE_UINT8(level, VecInfo),
2387         VMSTATE_END_OF_LIST()
2388     }
2389 };
2390 
2391 static bool nvic_security_needed(void *opaque)
2392 {
2393     NVICState *s = opaque;
2394 
2395     return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2396 }
2397 
2398 static int nvic_security_post_load(void *opaque, int version_id)
2399 {
2400     NVICState *s = opaque;
2401     int i;
2402 
2403     /* Check for out of range priority settings */
2404     if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2405         && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2406         /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
2407          * if the CPU state has been migrated yet; a mismatch won't
2408          * cause the emulation to blow up, though.
2409          */
2410         return 1;
2411     }
2412     for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2413         if (s->sec_vectors[i].prio & ~0xff) {
2414             return 1;
2415         }
2416     }
2417     return 0;
2418 }
2419 
2420 static const VMStateDescription vmstate_nvic_security = {
2421     .name = "armv7m_nvic/m-security",
2422     .version_id = 1,
2423     .minimum_version_id = 1,
2424     .needed = nvic_security_needed,
2425     .post_load = &nvic_security_post_load,
2426     .fields = (VMStateField[]) {
2427         VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2428                              vmstate_VecInfo, VecInfo),
2429         VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2430         VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2431         VMSTATE_END_OF_LIST()
2432     }
2433 };
2434 
2435 static const VMStateDescription vmstate_nvic = {
2436     .name = "armv7m_nvic",
2437     .version_id = 4,
2438     .minimum_version_id = 4,
2439     .post_load = &nvic_post_load,
2440     .fields = (VMStateField[]) {
2441         VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2442                              vmstate_VecInfo, VecInfo),
2443         VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2444         VMSTATE_END_OF_LIST()
2445     },
2446     .subsections = (const VMStateDescription*[]) {
2447         &vmstate_nvic_security,
2448         NULL
2449     }
2450 };
2451 
2452 static Property props_nvic[] = {
2453     /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
2454     DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2455     DEFINE_PROP_END_OF_LIST()
2456 };
2457 
2458 static void armv7m_nvic_reset(DeviceState *dev)
2459 {
2460     int resetprio;
2461     NVICState *s = NVIC(dev);
2462 
2463     memset(s->vectors, 0, sizeof(s->vectors));
2464     memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2465     s->prigroup[M_REG_NS] = 0;
2466     s->prigroup[M_REG_S] = 0;
2467 
2468     s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2469     /* MEM, BUS, and USAGE are enabled through
2470      * the System Handler Control register
2471      */
2472     s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2473     s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1;
2474     s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2475     s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2476 
2477     resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2478     s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2479     s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2480     s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2481 
2482     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2483         s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2484         s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2485         s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2486         s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2487 
2488         /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
2489         s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2490         /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
2491         s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2492     } else {
2493         s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2494     }
2495 
2496     /* Strictly speaking the reset handler should be enabled.
2497      * However, we don't simulate soft resets through the NVIC,
2498      * and the reset vector should never be pended.
2499      * So we leave it disabled to catch logic errors.
2500      */
2501 
2502     s->exception_prio = NVIC_NOEXC_PRIO;
2503     s->vectpending = 0;
2504     s->vectpending_is_s_banked = false;
2505     s->vectpending_prio = NVIC_NOEXC_PRIO;
2506 
2507     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2508         memset(s->itns, 0, sizeof(s->itns));
2509     } else {
2510         /* This state is constant and not guest accessible in a non-security
2511          * NVIC; we set the bits to true to avoid having to do a feature
2512          * bit check in the NVIC enable/pend/etc register accessors.
2513          */
2514         int i;
2515 
2516         for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2517             s->itns[i] = true;
2518         }
2519     }
2520 }
2521 
2522 static void nvic_systick_trigger(void *opaque, int n, int level)
2523 {
2524     NVICState *s = opaque;
2525 
2526     if (level) {
2527         /* SysTick just asked us to pend its exception.
2528          * (This is different from an external interrupt line's
2529          * behaviour.)
2530          * n == 0 : NonSecure systick
2531          * n == 1 : Secure systick
2532          */
2533         armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2534     }
2535 }
2536 
2537 static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2538 {
2539     NVICState *s = NVIC(dev);
2540     Error *err = NULL;
2541     int regionlen;
2542 
2543     /* The armv7m container object will have set our CPU pointer */
2544     if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2545         error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2546         return;
2547     }
2548 
2549     if (s->num_irq > NVIC_MAX_IRQ) {
2550         error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2551         return;
2552     }
2553 
2554     qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2555 
2556     /* include space for internal exception vectors */
2557     s->num_irq += NVIC_FIRST_IRQ;
2558 
2559     s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
2560 
2561     object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true,
2562                              "realized", &err);
2563     if (err != NULL) {
2564         error_propagate(errp, err);
2565         return;
2566     }
2567     sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
2568                        qdev_get_gpio_in_named(dev, "systick-trigger",
2569                                               M_REG_NS));
2570 
2571     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2572         /* We couldn't init the secure systick device in instance_init
2573          * as we didn't know then if the CPU had the security extensions;
2574          * so we have to do it here.
2575          */
2576         object_initialize(&s->systick[M_REG_S], sizeof(s->systick[M_REG_S]),
2577                           TYPE_SYSTICK);
2578         qdev_set_parent_bus(DEVICE(&s->systick[M_REG_S]), sysbus_get_default());
2579 
2580         object_property_set_bool(OBJECT(&s->systick[M_REG_S]), true,
2581                                  "realized", &err);
2582         if (err != NULL) {
2583             error_propagate(errp, err);
2584             return;
2585         }
2586         sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
2587                            qdev_get_gpio_in_named(dev, "systick-trigger",
2588                                                   M_REG_S));
2589     }
2590 
2591     /* The NVIC and System Control Space (SCS) starts at 0xe000e000
2592      * and looks like this:
2593      *  0x004 - ICTR
2594      *  0x010 - 0xff - systick
2595      *  0x100..0x7ec - NVIC
2596      *  0x7f0..0xcff - Reserved
2597      *  0xd00..0xd3c - SCS registers
2598      *  0xd40..0xeff - Reserved or Not implemented
2599      *  0xf00 - STIR
2600      *
2601      * Some registers within this space are banked between security states.
2602      * In v8M there is a second range 0xe002e000..0xe002efff which is the
2603      * NonSecure alias SCS; secure accesses to this behave like NS accesses
2604      * to the main SCS range, and non-secure accesses (including when
2605      * the security extension is not implemented) are RAZ/WI.
2606      * Note that both the main SCS range and the alias range are defined
2607      * to be exempt from memory attribution (R_BLJT) and so the memory
2608      * transaction attribute always matches the current CPU security
2609      * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops
2610      * wrappers we change attrs.secure to indicate the NS access; so
2611      * generally code determining which banked register to use should
2612      * use attrs.secure; code determining actual behaviour of the system
2613      * should use env->v7m.secure.
2614      */
2615     regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
2616     memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
2617     /* The system register region goes at the bottom of the priority
2618      * stack as it covers the whole page.
2619      */
2620     memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2621                           "nvic_sysregs", 0x1000);
2622     memory_region_add_subregion(&s->container, 0, &s->sysregmem);
2623 
2624     memory_region_init_io(&s->systickmem, OBJECT(s),
2625                           &nvic_systick_ops, s,
2626                           "nvic_systick", 0xe0);
2627 
2628     memory_region_add_subregion_overlap(&s->container, 0x10,
2629                                         &s->systickmem, 1);
2630 
2631     if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2632         memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2633                               &nvic_sysreg_ns_ops, &s->sysregmem,
2634                               "nvic_sysregs_ns", 0x1000);
2635         memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
2636         memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
2637                               &nvic_sysreg_ns_ops, &s->systickmem,
2638                               "nvic_systick_ns", 0xe0);
2639         memory_region_add_subregion_overlap(&s->container, 0x20010,
2640                                             &s->systick_ns_mem, 1);
2641     }
2642 
2643     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2644 }
2645 
2646 static void armv7m_nvic_instance_init(Object *obj)
2647 {
2648     /* We have a different default value for the num-irq property
2649      * than our superclass. This function runs after qdev init
2650      * has set the defaults from the Property array and before
2651      * any user-specified property setting, so just modify the
2652      * value in the GICState struct.
2653      */
2654     DeviceState *dev = DEVICE(obj);
2655     NVICState *nvic = NVIC(obj);
2656     SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2657 
2658     sysbus_init_child_obj(obj, "systick-reg-ns", &nvic->systick[M_REG_NS],
2659                           sizeof(nvic->systick[M_REG_NS]), TYPE_SYSTICK);
2660     /* We can't initialize the secure systick here, as we don't know
2661      * yet if we need it.
2662      */
2663 
2664     sysbus_init_irq(sbd, &nvic->excpout);
2665     qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2666     qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2667                             M_REG_NUM_BANKS);
2668     qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
2669 }
2670 
2671 static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2672 {
2673     DeviceClass *dc = DEVICE_CLASS(klass);
2674 
2675     dc->vmsd  = &vmstate_nvic;
2676     dc->props = props_nvic;
2677     dc->reset = armv7m_nvic_reset;
2678     dc->realize = armv7m_nvic_realize;
2679 }
2680 
2681 static const TypeInfo armv7m_nvic_info = {
2682     .name          = TYPE_NVIC,
2683     .parent        = TYPE_SYS_BUS_DEVICE,
2684     .instance_init = armv7m_nvic_instance_init,
2685     .instance_size = sizeof(NVICState),
2686     .class_init    = armv7m_nvic_class_init,
2687     .class_size    = sizeof(SysBusDeviceClass),
2688 };
2689 
2690 static void armv7m_nvic_register_types(void)
2691 {
2692     type_register_static(&armv7m_nvic_info);
2693 }
2694 
2695 type_init(armv7m_nvic_register_types)
2696