xref: /openbmc/qemu/hw/intc/armv7m_nvic.c (revision 8e6fe6b8)
1 /*
2  * ARM Nested Vectored Interrupt Controller
3  *
4  * Copyright (c) 2006-2007 CodeSourcery.
5  * Written by Paul Brook
6  *
7  * This code is licensed under the GPL.
8  *
9  * The ARMv7M System controller is fairly tightly tied in with the
10  * NVIC.  Much of that is also implemented here.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "cpu.h"
16 #include "hw/sysbus.h"
17 #include "qemu/timer.h"
18 #include "hw/intc/armv7m_nvic.h"
19 #include "target/arm/cpu.h"
20 #include "exec/exec-all.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "trace.h"
24 
25 /* IRQ number counting:
26  *
27  * the num-irq property counts the number of external IRQ lines
28  *
29  * NVICState::num_irq counts the total number of exceptions
30  * (external IRQs, the 15 internal exceptions including reset,
31  * and one for the unused exception number 0).
32  *
33  * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
34  *
35  * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
36  *
37  * Iterating through all exceptions should typically be done with
38  * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
39  *
40  * The external qemu_irq lines are the NVIC's external IRQ lines,
41  * so line 0 is exception 16.
42  *
43  * In the terminology of the architecture manual, "interrupts" are
44  * a subcategory of exception referring to the external interrupts
45  * (which are exception numbers NVIC_FIRST_IRQ and upward).
46  * For historical reasons QEMU tends to use "interrupt" and
47  * "exception" more or less interchangeably.
48  */
49 #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
50 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
51 
52 /* Effective running priority of the CPU when no exception is active
53  * (higher than the highest possible priority value)
54  */
55 #define NVIC_NOEXC_PRIO 0x100
56 /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
57 #define NVIC_NS_PRIO_LIMIT 0x80
58 
59 static const uint8_t nvic_id[] = {
60     0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
61 };
62 
63 static int nvic_pending_prio(NVICState *s)
64 {
65     /* return the group priority of the current pending interrupt,
66      * or NVIC_NOEXC_PRIO if no interrupt is pending
67      */
68     return s->vectpending_prio;
69 }
70 
71 /* Return the value of the ISCR RETTOBASE bit:
72  * 1 if there is exactly one active exception
73  * 0 if there is more than one active exception
74  * UNKNOWN if there are no active exceptions (we choose 1,
75  * which matches the choice Cortex-M3 is documented as making).
76  *
77  * NB: some versions of the documentation talk about this
78  * counting "active exceptions other than the one shown by IPSR";
79  * this is only different in the obscure corner case where guest
80  * code has manually deactivated an exception and is about
81  * to fail an exception-return integrity check. The definition
82  * above is the one from the v8M ARM ARM and is also in line
83  * with the behaviour documented for the Cortex-M3.
84  */
85 static bool nvic_rettobase(NVICState *s)
86 {
87     int irq, nhand = 0;
88     bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
89 
90     for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
91         if (s->vectors[irq].active ||
92             (check_sec && irq < NVIC_INTERNAL_VECTORS &&
93              s->sec_vectors[irq].active)) {
94             nhand++;
95             if (nhand == 2) {
96                 return 0;
97             }
98         }
99     }
100 
101     return 1;
102 }
103 
104 /* Return the value of the ISCR ISRPENDING bit:
105  * 1 if an external interrupt is pending
106  * 0 if no external interrupt is pending
107  */
108 static bool nvic_isrpending(NVICState *s)
109 {
110     int irq;
111 
112     /* We can shortcut if the highest priority pending interrupt
113      * happens to be external or if there is nothing pending.
114      */
115     if (s->vectpending > NVIC_FIRST_IRQ) {
116         return true;
117     }
118     if (s->vectpending == 0) {
119         return false;
120     }
121 
122     for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
123         if (s->vectors[irq].pending) {
124             return true;
125         }
126     }
127     return false;
128 }
129 
130 static bool exc_is_banked(int exc)
131 {
132     /* Return true if this is one of the limited set of exceptions which
133      * are banked (and thus have state in sec_vectors[])
134      */
135     return exc == ARMV7M_EXCP_HARD ||
136         exc == ARMV7M_EXCP_MEM ||
137         exc == ARMV7M_EXCP_USAGE ||
138         exc == ARMV7M_EXCP_SVC ||
139         exc == ARMV7M_EXCP_PENDSV ||
140         exc == ARMV7M_EXCP_SYSTICK;
141 }
142 
143 /* Return a mask word which clears the subpriority bits from
144  * a priority value for an M-profile exception, leaving only
145  * the group priority.
146  */
147 static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
148 {
149     return ~0U << (s->prigroup[secure] + 1);
150 }
151 
152 static bool exc_targets_secure(NVICState *s, int exc)
153 {
154     /* Return true if this non-banked exception targets Secure state. */
155     if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
156         return false;
157     }
158 
159     if (exc >= NVIC_FIRST_IRQ) {
160         return !s->itns[exc];
161     }
162 
163     /* Function shouldn't be called for banked exceptions. */
164     assert(!exc_is_banked(exc));
165 
166     switch (exc) {
167     case ARMV7M_EXCP_NMI:
168     case ARMV7M_EXCP_BUS:
169         return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
170     case ARMV7M_EXCP_SECURE:
171         return true;
172     case ARMV7M_EXCP_DEBUG:
173         /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
174         return false;
175     default:
176         /* reset, and reserved (unused) low exception numbers.
177          * We'll get called by code that loops through all the exception
178          * numbers, but it doesn't matter what we return here as these
179          * non-existent exceptions will never be pended or active.
180          */
181         return true;
182     }
183 }
184 
185 static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
186 {
187     /* Return the group priority for this exception, given its raw
188      * (group-and-subgroup) priority value and whether it is targeting
189      * secure state or not.
190      */
191     if (rawprio < 0) {
192         return rawprio;
193     }
194     rawprio &= nvic_gprio_mask(s, targets_secure);
195     /* AIRCR.PRIS causes us to squash all NS priorities into the
196      * lower half of the total range
197      */
198     if (!targets_secure &&
199         (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
200         rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
201     }
202     return rawprio;
203 }
204 
205 /* Recompute vectpending and exception_prio for a CPU which implements
206  * the Security extension
207  */
208 static void nvic_recompute_state_secure(NVICState *s)
209 {
210     int i, bank;
211     int pend_prio = NVIC_NOEXC_PRIO;
212     int active_prio = NVIC_NOEXC_PRIO;
213     int pend_irq = 0;
214     bool pending_is_s_banked = false;
215     int pend_subprio = 0;
216 
217     /* R_CQRV: precedence is by:
218      *  - lowest group priority; if both the same then
219      *  - lowest subpriority; if both the same then
220      *  - lowest exception number; if both the same (ie banked) then
221      *  - secure exception takes precedence
222      * Compare pseudocode RawExecutionPriority.
223      * Annoyingly, now we have two prigroup values (for S and NS)
224      * we can't do the loop comparison on raw priority values.
225      */
226     for (i = 1; i < s->num_irq; i++) {
227         for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
228             VecInfo *vec;
229             int prio, subprio;
230             bool targets_secure;
231 
232             if (bank == M_REG_S) {
233                 if (!exc_is_banked(i)) {
234                     continue;
235                 }
236                 vec = &s->sec_vectors[i];
237                 targets_secure = true;
238             } else {
239                 vec = &s->vectors[i];
240                 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
241             }
242 
243             prio = exc_group_prio(s, vec->prio, targets_secure);
244             subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
245             if (vec->enabled && vec->pending &&
246                 ((prio < pend_prio) ||
247                  (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
248                 pend_prio = prio;
249                 pend_subprio = subprio;
250                 pend_irq = i;
251                 pending_is_s_banked = (bank == M_REG_S);
252             }
253             if (vec->active && prio < active_prio) {
254                 active_prio = prio;
255             }
256         }
257     }
258 
259     s->vectpending_is_s_banked = pending_is_s_banked;
260     s->vectpending = pend_irq;
261     s->vectpending_prio = pend_prio;
262     s->exception_prio = active_prio;
263 
264     trace_nvic_recompute_state_secure(s->vectpending,
265                                       s->vectpending_is_s_banked,
266                                       s->vectpending_prio,
267                                       s->exception_prio);
268 }
269 
270 /* Recompute vectpending and exception_prio */
271 static void nvic_recompute_state(NVICState *s)
272 {
273     int i;
274     int pend_prio = NVIC_NOEXC_PRIO;
275     int active_prio = NVIC_NOEXC_PRIO;
276     int pend_irq = 0;
277 
278     /* In theory we could write one function that handled both
279      * the "security extension present" and "not present"; however
280      * the security related changes significantly complicate the
281      * recomputation just by themselves and mixing both cases together
282      * would be even worse, so we retain a separate non-secure-only
283      * version for CPUs which don't implement the security extension.
284      */
285     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
286         nvic_recompute_state_secure(s);
287         return;
288     }
289 
290     for (i = 1; i < s->num_irq; i++) {
291         VecInfo *vec = &s->vectors[i];
292 
293         if (vec->enabled && vec->pending && vec->prio < pend_prio) {
294             pend_prio = vec->prio;
295             pend_irq = i;
296         }
297         if (vec->active && vec->prio < active_prio) {
298             active_prio = vec->prio;
299         }
300     }
301 
302     if (active_prio > 0) {
303         active_prio &= nvic_gprio_mask(s, false);
304     }
305 
306     if (pend_prio > 0) {
307         pend_prio &= nvic_gprio_mask(s, false);
308     }
309 
310     s->vectpending = pend_irq;
311     s->vectpending_prio = pend_prio;
312     s->exception_prio = active_prio;
313 
314     trace_nvic_recompute_state(s->vectpending,
315                                s->vectpending_prio,
316                                s->exception_prio);
317 }
318 
319 /* Return the current execution priority of the CPU
320  * (equivalent to the pseudocode ExecutionPriority function).
321  * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
322  */
323 static inline int nvic_exec_prio(NVICState *s)
324 {
325     CPUARMState *env = &s->cpu->env;
326     int running = NVIC_NOEXC_PRIO;
327 
328     if (env->v7m.basepri[M_REG_NS] > 0) {
329         running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
330     }
331 
332     if (env->v7m.basepri[M_REG_S] > 0) {
333         int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
334         if (running > basepri) {
335             running = basepri;
336         }
337     }
338 
339     if (env->v7m.primask[M_REG_NS]) {
340         if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
341             if (running > NVIC_NS_PRIO_LIMIT) {
342                 running = NVIC_NS_PRIO_LIMIT;
343             }
344         } else {
345             running = 0;
346         }
347     }
348 
349     if (env->v7m.primask[M_REG_S]) {
350         running = 0;
351     }
352 
353     if (env->v7m.faultmask[M_REG_NS]) {
354         if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
355             running = -1;
356         } else {
357             if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
358                 if (running > NVIC_NS_PRIO_LIMIT) {
359                     running = NVIC_NS_PRIO_LIMIT;
360                 }
361             } else {
362                 running = 0;
363             }
364         }
365     }
366 
367     if (env->v7m.faultmask[M_REG_S]) {
368         running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
369     }
370 
371     /* consider priority of active handler */
372     return MIN(running, s->exception_prio);
373 }
374 
375 bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
376 {
377     /* Return true if the requested execution priority is negative
378      * for the specified security state, ie that security state
379      * has an active NMI or HardFault or has set its FAULTMASK.
380      * Note that this is not the same as whether the execution
381      * priority is actually negative (for instance AIRCR.PRIS may
382      * mean we don't allow FAULTMASK_NS to actually make the execution
383      * priority negative). Compare pseudocode IsReqExcPriNeg().
384      */
385     NVICState *s = opaque;
386 
387     if (s->cpu->env.v7m.faultmask[secure]) {
388         return true;
389     }
390 
391     if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
392         s->vectors[ARMV7M_EXCP_HARD].active) {
393         return true;
394     }
395 
396     if (s->vectors[ARMV7M_EXCP_NMI].active &&
397         exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
398         return true;
399     }
400 
401     return false;
402 }
403 
404 bool armv7m_nvic_can_take_pending_exception(void *opaque)
405 {
406     NVICState *s = opaque;
407 
408     return nvic_exec_prio(s) > nvic_pending_prio(s);
409 }
410 
411 int armv7m_nvic_raw_execution_priority(void *opaque)
412 {
413     NVICState *s = opaque;
414 
415     return s->exception_prio;
416 }
417 
418 /* caller must call nvic_irq_update() after this.
419  * secure indicates the bank to use for banked exceptions (we assert if
420  * we are passed secure=true for a non-banked exception).
421  */
422 static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
423 {
424     assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
425     assert(irq < s->num_irq);
426 
427     prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
428 
429     if (secure) {
430         assert(exc_is_banked(irq));
431         s->sec_vectors[irq].prio = prio;
432     } else {
433         s->vectors[irq].prio = prio;
434     }
435 
436     trace_nvic_set_prio(irq, secure, prio);
437 }
438 
439 /* Return the current raw priority register value.
440  * secure indicates the bank to use for banked exceptions (we assert if
441  * we are passed secure=true for a non-banked exception).
442  */
443 static int get_prio(NVICState *s, unsigned irq, bool secure)
444 {
445     assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
446     assert(irq < s->num_irq);
447 
448     if (secure) {
449         assert(exc_is_banked(irq));
450         return s->sec_vectors[irq].prio;
451     } else {
452         return s->vectors[irq].prio;
453     }
454 }
455 
456 /* Recompute state and assert irq line accordingly.
457  * Must be called after changes to:
458  *  vec->active, vec->enabled, vec->pending or vec->prio for any vector
459  *  prigroup
460  */
461 static void nvic_irq_update(NVICState *s)
462 {
463     int lvl;
464     int pend_prio;
465 
466     nvic_recompute_state(s);
467     pend_prio = nvic_pending_prio(s);
468 
469     /* Raise NVIC output if this IRQ would be taken, except that we
470      * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
471      * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
472      * to those CPU registers don't cause us to recalculate the NVIC
473      * pending info.
474      */
475     lvl = (pend_prio < s->exception_prio);
476     trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
477     qemu_set_irq(s->excpout, lvl);
478 }
479 
480 /**
481  * armv7m_nvic_clear_pending: mark the specified exception as not pending
482  * @opaque: the NVIC
483  * @irq: the exception number to mark as not pending
484  * @secure: false for non-banked exceptions or for the nonsecure
485  * version of a banked exception, true for the secure version of a banked
486  * exception.
487  *
488  * Marks the specified exception as not pending. Note that we will assert()
489  * if @secure is true and @irq does not specify one of the fixed set
490  * of architecturally banked exceptions.
491  */
492 static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
493 {
494     NVICState *s = (NVICState *)opaque;
495     VecInfo *vec;
496 
497     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
498 
499     if (secure) {
500         assert(exc_is_banked(irq));
501         vec = &s->sec_vectors[irq];
502     } else {
503         vec = &s->vectors[irq];
504     }
505     trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
506     if (vec->pending) {
507         vec->pending = 0;
508         nvic_irq_update(s);
509     }
510 }
511 
512 static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
513                                        bool derived)
514 {
515     /* Pend an exception, including possibly escalating it to HardFault.
516      *
517      * This function handles both "normal" pending of interrupts and
518      * exceptions, and also derived exceptions (ones which occur as
519      * a result of trying to take some other exception).
520      *
521      * If derived == true, the caller guarantees that we are part way through
522      * trying to take an exception (but have not yet called
523      * armv7m_nvic_acknowledge_irq() to make it active), and so:
524      *  - s->vectpending is the "original exception" we were trying to take
525      *  - irq is the "derived exception"
526      *  - nvic_exec_prio(s) gives the priority before exception entry
527      * Here we handle the prioritization logic which the pseudocode puts
528      * in the DerivedLateArrival() function.
529      */
530 
531     NVICState *s = (NVICState *)opaque;
532     bool banked = exc_is_banked(irq);
533     VecInfo *vec;
534     bool targets_secure;
535 
536     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
537     assert(!secure || banked);
538 
539     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
540 
541     targets_secure = banked ? secure : exc_targets_secure(s, irq);
542 
543     trace_nvic_set_pending(irq, secure, targets_secure,
544                            derived, vec->enabled, vec->prio);
545 
546     if (derived) {
547         /* Derived exceptions are always synchronous. */
548         assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
549 
550         if (irq == ARMV7M_EXCP_DEBUG &&
551             exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
552             /* DebugMonitorFault, but its priority is lower than the
553              * preempted exception priority: just ignore it.
554              */
555             return;
556         }
557 
558         if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
559             /* If this is a terminal exception (one which means we cannot
560              * take the original exception, like a failure to read its
561              * vector table entry), then we must take the derived exception.
562              * If the derived exception can't take priority over the
563              * original exception, then we go into Lockup.
564              *
565              * For QEMU, we rely on the fact that a derived exception is
566              * terminal if and only if it's reported to us as HardFault,
567              * which saves having to have an extra argument is_terminal
568              * that we'd only use in one place.
569              */
570             cpu_abort(&s->cpu->parent_obj,
571                       "Lockup: can't take terminal derived exception "
572                       "(original exception priority %d)\n",
573                       s->vectpending_prio);
574         }
575         /* We now continue with the same code as for a normal pending
576          * exception, which will cause us to pend the derived exception.
577          * We'll then take either the original or the derived exception
578          * based on which is higher priority by the usual mechanism
579          * for selecting the highest priority pending interrupt.
580          */
581     }
582 
583     if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
584         /* If a synchronous exception is pending then it may be
585          * escalated to HardFault if:
586          *  * it is equal or lower priority to current execution
587          *  * it is disabled
588          * (ie we need to take it immediately but we can't do so).
589          * Asynchronous exceptions (and interrupts) simply remain pending.
590          *
591          * For QEMU, we don't have any imprecise (asynchronous) faults,
592          * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
593          * synchronous.
594          * Debug exceptions are awkward because only Debug exceptions
595          * resulting from the BKPT instruction should be escalated,
596          * but we don't currently implement any Debug exceptions other
597          * than those that result from BKPT, so we treat all debug exceptions
598          * as needing escalation.
599          *
600          * This all means we can identify whether to escalate based only on
601          * the exception number and don't (yet) need the caller to explicitly
602          * tell us whether this exception is synchronous or not.
603          */
604         int running = nvic_exec_prio(s);
605         bool escalate = false;
606 
607         if (exc_group_prio(s, vec->prio, secure) >= running) {
608             trace_nvic_escalate_prio(irq, vec->prio, running);
609             escalate = true;
610         } else if (!vec->enabled) {
611             trace_nvic_escalate_disabled(irq);
612             escalate = true;
613         }
614 
615         if (escalate) {
616 
617             /* We need to escalate this exception to a synchronous HardFault.
618              * If BFHFNMINS is set then we escalate to the banked HF for
619              * the target security state of the original exception; otherwise
620              * we take a Secure HardFault.
621              */
622             irq = ARMV7M_EXCP_HARD;
623             if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
624                 (targets_secure ||
625                  !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
626                 vec = &s->sec_vectors[irq];
627             } else {
628                 vec = &s->vectors[irq];
629             }
630             if (running <= vec->prio) {
631                 /* We want to escalate to HardFault but we can't take the
632                  * synchronous HardFault at this point either. This is a
633                  * Lockup condition due to a guest bug. We don't model
634                  * Lockup, so report via cpu_abort() instead.
635                  */
636                 cpu_abort(&s->cpu->parent_obj,
637                           "Lockup: can't escalate %d to HardFault "
638                           "(current priority %d)\n", irq, running);
639             }
640 
641             /* HF may be banked but there is only one shared HFSR */
642             s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
643         }
644     }
645 
646     if (!vec->pending) {
647         vec->pending = 1;
648         nvic_irq_update(s);
649     }
650 }
651 
652 void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
653 {
654     do_armv7m_nvic_set_pending(opaque, irq, secure, false);
655 }
656 
657 void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
658 {
659     do_armv7m_nvic_set_pending(opaque, irq, secure, true);
660 }
661 
662 void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
663 {
664     /*
665      * Pend an exception during lazy FP stacking. This differs
666      * from the usual exception pending because the logic for
667      * whether we should escalate depends on the saved context
668      * in the FPCCR register, not on the current state of the CPU/NVIC.
669      */
670     NVICState *s = (NVICState *)opaque;
671     bool banked = exc_is_banked(irq);
672     VecInfo *vec;
673     bool targets_secure;
674     bool escalate = false;
675     /*
676      * We will only look at bits in fpccr if this is a banked exception
677      * (in which case 'secure' tells us whether it is the S or NS version).
678      * All the bits for the non-banked exceptions are in fpccr_s.
679      */
680     uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
681     uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
682 
683     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
684     assert(!secure || banked);
685 
686     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
687 
688     targets_secure = banked ? secure : exc_targets_secure(s, irq);
689 
690     switch (irq) {
691     case ARMV7M_EXCP_DEBUG:
692         if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
693             /* Ignore DebugMonitor exception */
694             return;
695         }
696         break;
697     case ARMV7M_EXCP_MEM:
698         escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
699         break;
700     case ARMV7M_EXCP_USAGE:
701         escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
702         break;
703     case ARMV7M_EXCP_BUS:
704         escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
705         break;
706     case ARMV7M_EXCP_SECURE:
707         escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
708         break;
709     default:
710         g_assert_not_reached();
711     }
712 
713     if (escalate) {
714         /*
715          * Escalate to HardFault: faults that initially targeted Secure
716          * continue to do so, even if HF normally targets NonSecure.
717          */
718         irq = ARMV7M_EXCP_HARD;
719         if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
720             (targets_secure ||
721              !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
722             vec = &s->sec_vectors[irq];
723         } else {
724             vec = &s->vectors[irq];
725         }
726     }
727 
728     if (!vec->enabled ||
729         nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
730         if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
731             /*
732              * We want to escalate to HardFault but the context the
733              * FP state belongs to prevents the exception pre-empting.
734              */
735             cpu_abort(&s->cpu->parent_obj,
736                       "Lockup: can't escalate to HardFault during "
737                       "lazy FP register stacking\n");
738         }
739     }
740 
741     if (escalate) {
742         s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
743     }
744     if (!vec->pending) {
745         vec->pending = 1;
746         /*
747          * We do not call nvic_irq_update(), because we know our caller
748          * is going to handle causing us to take the exception by
749          * raising EXCP_LAZYFP, so raising the IRQ line would be
750          * pointless extra work. We just need to recompute the
751          * priorities so that armv7m_nvic_can_take_pending_exception()
752          * returns the right answer.
753          */
754         nvic_recompute_state(s);
755     }
756 }
757 
758 /* Make pending IRQ active.  */
759 void armv7m_nvic_acknowledge_irq(void *opaque)
760 {
761     NVICState *s = (NVICState *)opaque;
762     CPUARMState *env = &s->cpu->env;
763     const int pending = s->vectpending;
764     const int running = nvic_exec_prio(s);
765     VecInfo *vec;
766 
767     assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
768 
769     if (s->vectpending_is_s_banked) {
770         vec = &s->sec_vectors[pending];
771     } else {
772         vec = &s->vectors[pending];
773     }
774 
775     assert(vec->enabled);
776     assert(vec->pending);
777 
778     assert(s->vectpending_prio < running);
779 
780     trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
781 
782     vec->active = 1;
783     vec->pending = 0;
784 
785     write_v7m_exception(env, s->vectpending);
786 
787     nvic_irq_update(s);
788 }
789 
790 void armv7m_nvic_get_pending_irq_info(void *opaque,
791                                       int *pirq, bool *ptargets_secure)
792 {
793     NVICState *s = (NVICState *)opaque;
794     const int pending = s->vectpending;
795     bool targets_secure;
796 
797     assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
798 
799     if (s->vectpending_is_s_banked) {
800         targets_secure = true;
801     } else {
802         targets_secure = !exc_is_banked(pending) &&
803             exc_targets_secure(s, pending);
804     }
805 
806     trace_nvic_get_pending_irq_info(pending, targets_secure);
807 
808     *ptargets_secure = targets_secure;
809     *pirq = pending;
810 }
811 
812 int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
813 {
814     NVICState *s = (NVICState *)opaque;
815     VecInfo *vec;
816     int ret;
817 
818     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
819 
820     if (secure && exc_is_banked(irq)) {
821         vec = &s->sec_vectors[irq];
822     } else {
823         vec = &s->vectors[irq];
824     }
825 
826     trace_nvic_complete_irq(irq, secure);
827 
828     if (!vec->active) {
829         /* Tell the caller this was an illegal exception return */
830         return -1;
831     }
832 
833     ret = nvic_rettobase(s);
834 
835     vec->active = 0;
836     if (vec->level) {
837         /* Re-pend the exception if it's still held high; only
838          * happens for extenal IRQs
839          */
840         assert(irq >= NVIC_FIRST_IRQ);
841         vec->pending = 1;
842     }
843 
844     nvic_irq_update(s);
845 
846     return ret;
847 }
848 
849 bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
850 {
851     /*
852      * Return whether an exception is "ready", i.e. it is enabled and is
853      * configured at a priority which would allow it to interrupt the
854      * current execution priority.
855      *
856      * irq and secure have the same semantics as for armv7m_nvic_set_pending():
857      * for non-banked exceptions secure is always false; for banked exceptions
858      * it indicates which of the exceptions is required.
859      */
860     NVICState *s = (NVICState *)opaque;
861     bool banked = exc_is_banked(irq);
862     VecInfo *vec;
863     int running = nvic_exec_prio(s);
864 
865     assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
866     assert(!secure || banked);
867 
868     /*
869      * HardFault is an odd special case: we always check against -1,
870      * even if we're secure and HardFault has priority -3; we never
871      * need to check for enabled state.
872      */
873     if (irq == ARMV7M_EXCP_HARD) {
874         return running > -1;
875     }
876 
877     vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
878 
879     return vec->enabled &&
880         exc_group_prio(s, vec->prio, secure) < running;
881 }
882 
883 /* callback when external interrupt line is changed */
884 static void set_irq_level(void *opaque, int n, int level)
885 {
886     NVICState *s = opaque;
887     VecInfo *vec;
888 
889     n += NVIC_FIRST_IRQ;
890 
891     assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
892 
893     trace_nvic_set_irq_level(n, level);
894 
895     /* The pending status of an external interrupt is
896      * latched on rising edge and exception handler return.
897      *
898      * Pulsing the IRQ will always run the handler
899      * once, and the handler will re-run until the
900      * level is low when the handler completes.
901      */
902     vec = &s->vectors[n];
903     if (level != vec->level) {
904         vec->level = level;
905         if (level) {
906             armv7m_nvic_set_pending(s, n, false);
907         }
908     }
909 }
910 
911 /* callback when external NMI line is changed */
912 static void nvic_nmi_trigger(void *opaque, int n, int level)
913 {
914     NVICState *s = opaque;
915 
916     trace_nvic_set_nmi_level(level);
917 
918     /*
919      * The architecture doesn't specify whether NMI should share
920      * the normal-interrupt behaviour of being resampled on
921      * exception handler return. We choose not to, so just
922      * set NMI pending here and don't track the current level.
923      */
924     if (level) {
925         armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
926     }
927 }
928 
929 static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
930 {
931     ARMCPU *cpu = s->cpu;
932     uint32_t val;
933 
934     switch (offset) {
935     case 4: /* Interrupt Control Type.  */
936         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
937             goto bad_offset;
938         }
939         return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
940     case 0xc: /* CPPWR */
941         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
942             goto bad_offset;
943         }
944         /* We make the IMPDEF choice that nothing can ever go into a
945          * non-retentive power state, which allows us to RAZ/WI this.
946          */
947         return 0;
948     case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
949     {
950         int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
951         int i;
952 
953         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
954             goto bad_offset;
955         }
956         if (!attrs.secure) {
957             return 0;
958         }
959         val = 0;
960         for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
961             if (s->itns[startvec + i]) {
962                 val |= (1 << i);
963             }
964         }
965         return val;
966     }
967     case 0xd00: /* CPUID Base.  */
968         return cpu->midr;
969     case 0xd04: /* Interrupt Control State (ICSR) */
970         /* VECTACTIVE */
971         val = cpu->env.v7m.exception;
972         /* VECTPENDING */
973         val |= (s->vectpending & 0xff) << 12;
974         /* ISRPENDING - set if any external IRQ is pending */
975         if (nvic_isrpending(s)) {
976             val |= (1 << 22);
977         }
978         /* RETTOBASE - set if only one handler is active */
979         if (nvic_rettobase(s)) {
980             val |= (1 << 11);
981         }
982         if (attrs.secure) {
983             /* PENDSTSET */
984             if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
985                 val |= (1 << 26);
986             }
987             /* PENDSVSET */
988             if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
989                 val |= (1 << 28);
990             }
991         } else {
992             /* PENDSTSET */
993             if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
994                 val |= (1 << 26);
995             }
996             /* PENDSVSET */
997             if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
998                 val |= (1 << 28);
999             }
1000         }
1001         /* NMIPENDSET */
1002         if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
1003             && s->vectors[ARMV7M_EXCP_NMI].pending) {
1004             val |= (1 << 31);
1005         }
1006         /* ISRPREEMPT: RES0 when halting debug not implemented */
1007         /* STTNS: RES0 for the Main Extension */
1008         return val;
1009     case 0xd08: /* Vector Table Offset.  */
1010         return cpu->env.v7m.vecbase[attrs.secure];
1011     case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1012         val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
1013         if (attrs.secure) {
1014             /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
1015             val |= cpu->env.v7m.aircr;
1016         } else {
1017             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1018                 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
1019                  * security isn't supported then BFHFNMINS is RAO (and
1020                  * the bit in env.v7m.aircr is always set).
1021                  */
1022                 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
1023             }
1024         }
1025         return val;
1026     case 0xd10: /* System Control.  */
1027         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1028             goto bad_offset;
1029         }
1030         return cpu->env.v7m.scr[attrs.secure];
1031     case 0xd14: /* Configuration Control.  */
1032         /* The BFHFNMIGN bit is the only non-banked bit; we
1033          * keep it in the non-secure copy of the register.
1034          */
1035         val = cpu->env.v7m.ccr[attrs.secure];
1036         val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1037         return val;
1038     case 0xd24: /* System Handler Control and State (SHCSR) */
1039         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1040             goto bad_offset;
1041         }
1042         val = 0;
1043         if (attrs.secure) {
1044             if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
1045                 val |= (1 << 0);
1046             }
1047             if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
1048                 val |= (1 << 2);
1049             }
1050             if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
1051                 val |= (1 << 3);
1052             }
1053             if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
1054                 val |= (1 << 7);
1055             }
1056             if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
1057                 val |= (1 << 10);
1058             }
1059             if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
1060                 val |= (1 << 11);
1061             }
1062             if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
1063                 val |= (1 << 12);
1064             }
1065             if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
1066                 val |= (1 << 13);
1067             }
1068             if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
1069                 val |= (1 << 15);
1070             }
1071             if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
1072                 val |= (1 << 16);
1073             }
1074             if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
1075                 val |= (1 << 18);
1076             }
1077             if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
1078                 val |= (1 << 21);
1079             }
1080             /* SecureFault is not banked but is always RAZ/WI to NS */
1081             if (s->vectors[ARMV7M_EXCP_SECURE].active) {
1082                 val |= (1 << 4);
1083             }
1084             if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
1085                 val |= (1 << 19);
1086             }
1087             if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
1088                 val |= (1 << 20);
1089             }
1090         } else {
1091             if (s->vectors[ARMV7M_EXCP_MEM].active) {
1092                 val |= (1 << 0);
1093             }
1094             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1095                 /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
1096                 if (s->vectors[ARMV7M_EXCP_HARD].active) {
1097                     val |= (1 << 2);
1098                 }
1099                 if (s->vectors[ARMV7M_EXCP_HARD].pending) {
1100                     val |= (1 << 21);
1101                 }
1102             }
1103             if (s->vectors[ARMV7M_EXCP_USAGE].active) {
1104                 val |= (1 << 3);
1105             }
1106             if (s->vectors[ARMV7M_EXCP_SVC].active) {
1107                 val |= (1 << 7);
1108             }
1109             if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
1110                 val |= (1 << 10);
1111             }
1112             if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
1113                 val |= (1 << 11);
1114             }
1115             if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
1116                 val |= (1 << 12);
1117             }
1118             if (s->vectors[ARMV7M_EXCP_MEM].pending) {
1119                 val |= (1 << 13);
1120             }
1121             if (s->vectors[ARMV7M_EXCP_SVC].pending) {
1122                 val |= (1 << 15);
1123             }
1124             if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
1125                 val |= (1 << 16);
1126             }
1127             if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
1128                 val |= (1 << 18);
1129             }
1130         }
1131         if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1132             if (s->vectors[ARMV7M_EXCP_BUS].active) {
1133                 val |= (1 << 1);
1134             }
1135             if (s->vectors[ARMV7M_EXCP_BUS].pending) {
1136                 val |= (1 << 14);
1137             }
1138             if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
1139                 val |= (1 << 17);
1140             }
1141             if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
1142                 s->vectors[ARMV7M_EXCP_NMI].active) {
1143                 /* NMIACT is not present in v7M */
1144                 val |= (1 << 5);
1145             }
1146         }
1147 
1148         /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1149         if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
1150             val |= (1 << 8);
1151         }
1152         return val;
1153     case 0xd2c: /* Hard Fault Status.  */
1154         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1155             goto bad_offset;
1156         }
1157         return cpu->env.v7m.hfsr;
1158     case 0xd30: /* Debug Fault Status.  */
1159         return cpu->env.v7m.dfsr;
1160     case 0xd34: /* MMFAR MemManage Fault Address */
1161         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1162             goto bad_offset;
1163         }
1164         return cpu->env.v7m.mmfar[attrs.secure];
1165     case 0xd38: /* Bus Fault Address.  */
1166         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1167             goto bad_offset;
1168         }
1169         if (!attrs.secure &&
1170             !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1171             return 0;
1172         }
1173         return cpu->env.v7m.bfar;
1174     case 0xd3c: /* Aux Fault Status.  */
1175         /* TODO: Implement fault status registers.  */
1176         qemu_log_mask(LOG_UNIMP,
1177                       "Aux Fault status registers unimplemented\n");
1178         return 0;
1179     case 0xd40: /* PFR0.  */
1180         return cpu->id_pfr0;
1181     case 0xd44: /* PFR1.  */
1182         return cpu->id_pfr1;
1183     case 0xd48: /* DFR0.  */
1184         return cpu->id_dfr0;
1185     case 0xd4c: /* AFR0.  */
1186         return cpu->id_afr0;
1187     case 0xd50: /* MMFR0.  */
1188         return cpu->id_mmfr0;
1189     case 0xd54: /* MMFR1.  */
1190         return cpu->id_mmfr1;
1191     case 0xd58: /* MMFR2.  */
1192         return cpu->id_mmfr2;
1193     case 0xd5c: /* MMFR3.  */
1194         return cpu->id_mmfr3;
1195     case 0xd60: /* ISAR0.  */
1196         return cpu->isar.id_isar0;
1197     case 0xd64: /* ISAR1.  */
1198         return cpu->isar.id_isar1;
1199     case 0xd68: /* ISAR2.  */
1200         return cpu->isar.id_isar2;
1201     case 0xd6c: /* ISAR3.  */
1202         return cpu->isar.id_isar3;
1203     case 0xd70: /* ISAR4.  */
1204         return cpu->isar.id_isar4;
1205     case 0xd74: /* ISAR5.  */
1206         return cpu->isar.id_isar5;
1207     case 0xd78: /* CLIDR */
1208         return cpu->clidr;
1209     case 0xd7c: /* CTR */
1210         return cpu->ctr;
1211     case 0xd80: /* CSSIDR */
1212     {
1213         int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1214         return cpu->ccsidr[idx];
1215     }
1216     case 0xd84: /* CSSELR */
1217         return cpu->env.v7m.csselr[attrs.secure];
1218     case 0xd88: /* CPACR */
1219         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1220             return 0;
1221         }
1222         return cpu->env.v7m.cpacr[attrs.secure];
1223     case 0xd8c: /* NSACR */
1224         if (!attrs.secure || !arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1225             return 0;
1226         }
1227         return cpu->env.v7m.nsacr;
1228     /* TODO: Implement debug registers.  */
1229     case 0xd90: /* MPU_TYPE */
1230         /* Unified MPU; if the MPU is not present this value is zero */
1231         return cpu->pmsav7_dregion << 8;
1232         break;
1233     case 0xd94: /* MPU_CTRL */
1234         return cpu->env.v7m.mpu_ctrl[attrs.secure];
1235     case 0xd98: /* MPU_RNR */
1236         return cpu->env.pmsav7.rnr[attrs.secure];
1237     case 0xd9c: /* MPU_RBAR */
1238     case 0xda4: /* MPU_RBAR_A1 */
1239     case 0xdac: /* MPU_RBAR_A2 */
1240     case 0xdb4: /* MPU_RBAR_A3 */
1241     {
1242         int region = cpu->env.pmsav7.rnr[attrs.secure];
1243 
1244         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1245             /* PMSAv8M handling of the aliases is different from v7M:
1246              * aliases A1, A2, A3 override the low two bits of the region
1247              * number in MPU_RNR, and there is no 'region' field in the
1248              * RBAR register.
1249              */
1250             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1251             if (aliasno) {
1252                 region = deposit32(region, 0, 2, aliasno);
1253             }
1254             if (region >= cpu->pmsav7_dregion) {
1255                 return 0;
1256             }
1257             return cpu->env.pmsav8.rbar[attrs.secure][region];
1258         }
1259 
1260         if (region >= cpu->pmsav7_dregion) {
1261             return 0;
1262         }
1263         return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1264     }
1265     case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1266     case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1267     case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1268     case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1269     {
1270         int region = cpu->env.pmsav7.rnr[attrs.secure];
1271 
1272         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1273             /* PMSAv8M handling of the aliases is different from v7M:
1274              * aliases A1, A2, A3 override the low two bits of the region
1275              * number in MPU_RNR.
1276              */
1277             int aliasno = (offset - 0xda0) / 8; /* 0..3 */
1278             if (aliasno) {
1279                 region = deposit32(region, 0, 2, aliasno);
1280             }
1281             if (region >= cpu->pmsav7_dregion) {
1282                 return 0;
1283             }
1284             return cpu->env.pmsav8.rlar[attrs.secure][region];
1285         }
1286 
1287         if (region >= cpu->pmsav7_dregion) {
1288             return 0;
1289         }
1290         return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1291             (cpu->env.pmsav7.drsr[region] & 0xffff);
1292     }
1293     case 0xdc0: /* MPU_MAIR0 */
1294         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1295             goto bad_offset;
1296         }
1297         return cpu->env.pmsav8.mair0[attrs.secure];
1298     case 0xdc4: /* MPU_MAIR1 */
1299         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1300             goto bad_offset;
1301         }
1302         return cpu->env.pmsav8.mair1[attrs.secure];
1303     case 0xdd0: /* SAU_CTRL */
1304         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1305             goto bad_offset;
1306         }
1307         if (!attrs.secure) {
1308             return 0;
1309         }
1310         return cpu->env.sau.ctrl;
1311     case 0xdd4: /* SAU_TYPE */
1312         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1313             goto bad_offset;
1314         }
1315         if (!attrs.secure) {
1316             return 0;
1317         }
1318         return cpu->sau_sregion;
1319     case 0xdd8: /* SAU_RNR */
1320         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1321             goto bad_offset;
1322         }
1323         if (!attrs.secure) {
1324             return 0;
1325         }
1326         return cpu->env.sau.rnr;
1327     case 0xddc: /* SAU_RBAR */
1328     {
1329         int region = cpu->env.sau.rnr;
1330 
1331         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1332             goto bad_offset;
1333         }
1334         if (!attrs.secure) {
1335             return 0;
1336         }
1337         if (region >= cpu->sau_sregion) {
1338             return 0;
1339         }
1340         return cpu->env.sau.rbar[region];
1341     }
1342     case 0xde0: /* SAU_RLAR */
1343     {
1344         int region = cpu->env.sau.rnr;
1345 
1346         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1347             goto bad_offset;
1348         }
1349         if (!attrs.secure) {
1350             return 0;
1351         }
1352         if (region >= cpu->sau_sregion) {
1353             return 0;
1354         }
1355         return cpu->env.sau.rlar[region];
1356     }
1357     case 0xde4: /* SFSR */
1358         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1359             goto bad_offset;
1360         }
1361         if (!attrs.secure) {
1362             return 0;
1363         }
1364         return cpu->env.v7m.sfsr;
1365     case 0xde8: /* SFAR */
1366         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1367             goto bad_offset;
1368         }
1369         if (!attrs.secure) {
1370             return 0;
1371         }
1372         return cpu->env.v7m.sfar;
1373     case 0xf34: /* FPCCR */
1374         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1375             return 0;
1376         }
1377         if (attrs.secure) {
1378             return cpu->env.v7m.fpccr[M_REG_S];
1379         } else {
1380             /*
1381              * NS can read LSPEN, CLRONRET and MONRDY. It can read
1382              * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0;
1383              * other non-banked bits RAZ.
1384              * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set.
1385              */
1386             uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
1387             uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
1388                 R_V7M_FPCCR_CLRONRET_MASK |
1389                 R_V7M_FPCCR_MONRDY_MASK;
1390 
1391             if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1392                 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
1393             }
1394 
1395             value &= mask;
1396 
1397             value |= cpu->env.v7m.fpccr[M_REG_NS];
1398             return value;
1399         }
1400     case 0xf38: /* FPCAR */
1401         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1402             return 0;
1403         }
1404         return cpu->env.v7m.fpcar[attrs.secure];
1405     case 0xf3c: /* FPDSCR */
1406         if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1407             return 0;
1408         }
1409         return cpu->env.v7m.fpdscr[attrs.secure];
1410     case 0xf40: /* MVFR0 */
1411         return cpu->isar.mvfr0;
1412     case 0xf44: /* MVFR1 */
1413         return cpu->isar.mvfr1;
1414     case 0xf48: /* MVFR2 */
1415         return cpu->isar.mvfr2;
1416     default:
1417     bad_offset:
1418         qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1419         return 0;
1420     }
1421 }
1422 
1423 static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1424                         MemTxAttrs attrs)
1425 {
1426     ARMCPU *cpu = s->cpu;
1427 
1428     switch (offset) {
1429     case 0xc: /* CPPWR */
1430         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1431             goto bad_offset;
1432         }
1433         /* Make the IMPDEF choice to RAZ/WI this. */
1434         break;
1435     case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1436     {
1437         int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1438         int i;
1439 
1440         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1441             goto bad_offset;
1442         }
1443         if (!attrs.secure) {
1444             break;
1445         }
1446         for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1447             s->itns[startvec + i] = (value >> i) & 1;
1448         }
1449         nvic_irq_update(s);
1450         break;
1451     }
1452     case 0xd04: /* Interrupt Control State (ICSR) */
1453         if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1454             if (value & (1 << 31)) {
1455                 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1456             } else if (value & (1 << 30) &&
1457                        arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1458                 /* PENDNMICLR didn't exist in v7M */
1459                 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1460             }
1461         }
1462         if (value & (1 << 28)) {
1463             armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1464         } else if (value & (1 << 27)) {
1465             armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1466         }
1467         if (value & (1 << 26)) {
1468             armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1469         } else if (value & (1 << 25)) {
1470             armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1471         }
1472         break;
1473     case 0xd08: /* Vector Table Offset.  */
1474         cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1475         break;
1476     case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1477         if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1478             if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1479                 if (attrs.secure ||
1480                     !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1481                     qemu_irq_pulse(s->sysresetreq);
1482                 }
1483             }
1484             if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1485                 qemu_log_mask(LOG_GUEST_ERROR,
1486                               "Setting VECTCLRACTIVE when not in DEBUG mode "
1487                               "is UNPREDICTABLE\n");
1488             }
1489             if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1490                 /* NB: this bit is RES0 in v8M */
1491                 qemu_log_mask(LOG_GUEST_ERROR,
1492                               "Setting VECTRESET when not in DEBUG mode "
1493                               "is UNPREDICTABLE\n");
1494             }
1495             if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1496                 s->prigroup[attrs.secure] =
1497                     extract32(value,
1498                               R_V7M_AIRCR_PRIGROUP_SHIFT,
1499                               R_V7M_AIRCR_PRIGROUP_LENGTH);
1500             }
1501             if (attrs.secure) {
1502                 /* These bits are only writable by secure */
1503                 cpu->env.v7m.aircr = value &
1504                     (R_V7M_AIRCR_SYSRESETREQS_MASK |
1505                      R_V7M_AIRCR_BFHFNMINS_MASK |
1506                      R_V7M_AIRCR_PRIS_MASK);
1507                 /* BFHFNMINS changes the priority of Secure HardFault, and
1508                  * allows a pending Non-secure HardFault to preempt (which
1509                  * we implement by marking it enabled).
1510                  */
1511                 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1512                     s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1513                     s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1514                 } else {
1515                     s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1516                     s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1517                 }
1518             }
1519             nvic_irq_update(s);
1520         }
1521         break;
1522     case 0xd10: /* System Control.  */
1523         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1524             goto bad_offset;
1525         }
1526         /* We don't implement deep-sleep so these bits are RAZ/WI.
1527          * The other bits in the register are banked.
1528          * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
1529          * is architecturally permitted.
1530          */
1531         value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1532         cpu->env.v7m.scr[attrs.secure] = value;
1533         break;
1534     case 0xd14: /* Configuration Control.  */
1535         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1536             goto bad_offset;
1537         }
1538 
1539         /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
1540         value &= (R_V7M_CCR_STKALIGN_MASK |
1541                   R_V7M_CCR_BFHFNMIGN_MASK |
1542                   R_V7M_CCR_DIV_0_TRP_MASK |
1543                   R_V7M_CCR_UNALIGN_TRP_MASK |
1544                   R_V7M_CCR_USERSETMPEND_MASK |
1545                   R_V7M_CCR_NONBASETHRDENA_MASK);
1546 
1547         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1548             /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1549             value |= R_V7M_CCR_NONBASETHRDENA_MASK
1550                 | R_V7M_CCR_STKALIGN_MASK;
1551         }
1552         if (attrs.secure) {
1553             /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1554             cpu->env.v7m.ccr[M_REG_NS] =
1555                 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1556                 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1557             value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1558         }
1559 
1560         cpu->env.v7m.ccr[attrs.secure] = value;
1561         break;
1562     case 0xd24: /* System Handler Control and State (SHCSR) */
1563         if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1564             goto bad_offset;
1565         }
1566         if (attrs.secure) {
1567             s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1568             /* Secure HardFault active bit cannot be written */
1569             s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1570             s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1571             s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1572                 (value & (1 << 10)) != 0;
1573             s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1574                 (value & (1 << 11)) != 0;
1575             s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1576                 (value & (1 << 12)) != 0;
1577             s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1578             s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1579             s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1580             s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1581             s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1582                 (value & (1 << 18)) != 0;
1583             s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1584             /* SecureFault not banked, but RAZ/WI to NS */
1585             s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1586             s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1587             s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1588         } else {
1589             s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1590             if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1591                 /* HARDFAULTPENDED is not present in v7M */
1592                 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1593             }
1594             s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1595             s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1596             s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1597             s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1598             s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1599             s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1600             s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1601             s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1602             s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1603         }
1604         if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1605             s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1606             s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1607             s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1608         }
1609         /* NMIACT can only be written if the write is of a zero, with
1610          * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
1611          */
1612         if (!attrs.secure && cpu->env.v7m.secure &&
1613             (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1614             (value & (1 << 5)) == 0) {
1615             s->vectors[ARMV7M_EXCP_NMI].active = 0;
1616         }
1617         /* HARDFAULTACT can only be written if the write is of a zero
1618          * to the non-secure HardFault state by the CPU in secure state.
1619          * The only case where we can be targeting the non-secure HF state
1620          * when in secure state is if this is a write via the NS alias
1621          * and BFHFNMINS is 1.
1622          */
1623         if (!attrs.secure && cpu->env.v7m.secure &&
1624             (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1625             (value & (1 << 2)) == 0) {
1626             s->vectors[ARMV7M_EXCP_HARD].active = 0;
1627         }
1628 
1629         /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1630         s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1631         nvic_irq_update(s);
1632         break;
1633     case 0xd2c: /* Hard Fault Status.  */
1634         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1635             goto bad_offset;
1636         }
1637         cpu->env.v7m.hfsr &= ~value; /* W1C */
1638         break;
1639     case 0xd30: /* Debug Fault Status.  */
1640         cpu->env.v7m.dfsr &= ~value; /* W1C */
1641         break;
1642     case 0xd34: /* Mem Manage Address.  */
1643         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1644             goto bad_offset;
1645         }
1646         cpu->env.v7m.mmfar[attrs.secure] = value;
1647         return;
1648     case 0xd38: /* Bus Fault Address.  */
1649         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1650             goto bad_offset;
1651         }
1652         if (!attrs.secure &&
1653             !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1654             return;
1655         }
1656         cpu->env.v7m.bfar = value;
1657         return;
1658     case 0xd3c: /* Aux Fault Status.  */
1659         qemu_log_mask(LOG_UNIMP,
1660                       "NVIC: Aux fault status registers unimplemented\n");
1661         break;
1662     case 0xd84: /* CSSELR */
1663         if (!arm_v7m_csselr_razwi(cpu)) {
1664             cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1665         }
1666         break;
1667     case 0xd88: /* CPACR */
1668         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1669             /* We implement only the Floating Point extension's CP10/CP11 */
1670             cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
1671         }
1672         break;
1673     case 0xd8c: /* NSACR */
1674         if (attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1675             /* We implement only the Floating Point extension's CP10/CP11 */
1676             cpu->env.v7m.nsacr = value & (3 << 10);
1677         }
1678         break;
1679     case 0xd90: /* MPU_TYPE */
1680         return; /* RO */
1681     case 0xd94: /* MPU_CTRL */
1682         if ((value &
1683              (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1684             == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1685             qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1686                           "UNPREDICTABLE\n");
1687         }
1688         cpu->env.v7m.mpu_ctrl[attrs.secure]
1689             = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1690                        R_V7M_MPU_CTRL_HFNMIENA_MASK |
1691                        R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1692         tlb_flush(CPU(cpu));
1693         break;
1694     case 0xd98: /* MPU_RNR */
1695         if (value >= cpu->pmsav7_dregion) {
1696             qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1697                           PRIu32 "/%" PRIu32 "\n",
1698                           value, cpu->pmsav7_dregion);
1699         } else {
1700             cpu->env.pmsav7.rnr[attrs.secure] = value;
1701         }
1702         break;
1703     case 0xd9c: /* MPU_RBAR */
1704     case 0xda4: /* MPU_RBAR_A1 */
1705     case 0xdac: /* MPU_RBAR_A2 */
1706     case 0xdb4: /* MPU_RBAR_A3 */
1707     {
1708         int region;
1709 
1710         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1711             /* PMSAv8M handling of the aliases is different from v7M:
1712              * aliases A1, A2, A3 override the low two bits of the region
1713              * number in MPU_RNR, and there is no 'region' field in the
1714              * RBAR register.
1715              */
1716             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1717 
1718             region = cpu->env.pmsav7.rnr[attrs.secure];
1719             if (aliasno) {
1720                 region = deposit32(region, 0, 2, aliasno);
1721             }
1722             if (region >= cpu->pmsav7_dregion) {
1723                 return;
1724             }
1725             cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1726             tlb_flush(CPU(cpu));
1727             return;
1728         }
1729 
1730         if (value & (1 << 4)) {
1731             /* VALID bit means use the region number specified in this
1732              * value and also update MPU_RNR.REGION with that value.
1733              */
1734             region = extract32(value, 0, 4);
1735             if (region >= cpu->pmsav7_dregion) {
1736                 qemu_log_mask(LOG_GUEST_ERROR,
1737                               "MPU region out of range %u/%" PRIu32 "\n",
1738                               region, cpu->pmsav7_dregion);
1739                 return;
1740             }
1741             cpu->env.pmsav7.rnr[attrs.secure] = region;
1742         } else {
1743             region = cpu->env.pmsav7.rnr[attrs.secure];
1744         }
1745 
1746         if (region >= cpu->pmsav7_dregion) {
1747             return;
1748         }
1749 
1750         cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1751         tlb_flush(CPU(cpu));
1752         break;
1753     }
1754     case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1755     case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1756     case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1757     case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1758     {
1759         int region = cpu->env.pmsav7.rnr[attrs.secure];
1760 
1761         if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1762             /* PMSAv8M handling of the aliases is different from v7M:
1763              * aliases A1, A2, A3 override the low two bits of the region
1764              * number in MPU_RNR.
1765              */
1766             int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1767 
1768             region = cpu->env.pmsav7.rnr[attrs.secure];
1769             if (aliasno) {
1770                 region = deposit32(region, 0, 2, aliasno);
1771             }
1772             if (region >= cpu->pmsav7_dregion) {
1773                 return;
1774             }
1775             cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1776             tlb_flush(CPU(cpu));
1777             return;
1778         }
1779 
1780         if (region >= cpu->pmsav7_dregion) {
1781             return;
1782         }
1783 
1784         cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1785         cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1786         tlb_flush(CPU(cpu));
1787         break;
1788     }
1789     case 0xdc0: /* MPU_MAIR0 */
1790         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1791             goto bad_offset;
1792         }
1793         if (cpu->pmsav7_dregion) {
1794             /* Register is RES0 if no MPU regions are implemented */
1795             cpu->env.pmsav8.mair0[attrs.secure] = value;
1796         }
1797         /* We don't need to do anything else because memory attributes
1798          * only affect cacheability, and we don't implement caching.
1799          */
1800         break;
1801     case 0xdc4: /* MPU_MAIR1 */
1802         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1803             goto bad_offset;
1804         }
1805         if (cpu->pmsav7_dregion) {
1806             /* Register is RES0 if no MPU regions are implemented */
1807             cpu->env.pmsav8.mair1[attrs.secure] = value;
1808         }
1809         /* We don't need to do anything else because memory attributes
1810          * only affect cacheability, and we don't implement caching.
1811          */
1812         break;
1813     case 0xdd0: /* SAU_CTRL */
1814         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1815             goto bad_offset;
1816         }
1817         if (!attrs.secure) {
1818             return;
1819         }
1820         cpu->env.sau.ctrl = value & 3;
1821         break;
1822     case 0xdd4: /* SAU_TYPE */
1823         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1824             goto bad_offset;
1825         }
1826         break;
1827     case 0xdd8: /* SAU_RNR */
1828         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1829             goto bad_offset;
1830         }
1831         if (!attrs.secure) {
1832             return;
1833         }
1834         if (value >= cpu->sau_sregion) {
1835             qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1836                           PRIu32 "/%" PRIu32 "\n",
1837                           value, cpu->sau_sregion);
1838         } else {
1839             cpu->env.sau.rnr = value;
1840         }
1841         break;
1842     case 0xddc: /* SAU_RBAR */
1843     {
1844         int region = cpu->env.sau.rnr;
1845 
1846         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1847             goto bad_offset;
1848         }
1849         if (!attrs.secure) {
1850             return;
1851         }
1852         if (region >= cpu->sau_sregion) {
1853             return;
1854         }
1855         cpu->env.sau.rbar[region] = value & ~0x1f;
1856         tlb_flush(CPU(cpu));
1857         break;
1858     }
1859     case 0xde0: /* SAU_RLAR */
1860     {
1861         int region = cpu->env.sau.rnr;
1862 
1863         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1864             goto bad_offset;
1865         }
1866         if (!attrs.secure) {
1867             return;
1868         }
1869         if (region >= cpu->sau_sregion) {
1870             return;
1871         }
1872         cpu->env.sau.rlar[region] = value & ~0x1c;
1873         tlb_flush(CPU(cpu));
1874         break;
1875     }
1876     case 0xde4: /* SFSR */
1877         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1878             goto bad_offset;
1879         }
1880         if (!attrs.secure) {
1881             return;
1882         }
1883         cpu->env.v7m.sfsr &= ~value; /* W1C */
1884         break;
1885     case 0xde8: /* SFAR */
1886         if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1887             goto bad_offset;
1888         }
1889         if (!attrs.secure) {
1890             return;
1891         }
1892         cpu->env.v7m.sfsr = value;
1893         break;
1894     case 0xf00: /* Software Triggered Interrupt Register */
1895     {
1896         int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
1897 
1898         if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1899             goto bad_offset;
1900         }
1901 
1902         if (excnum < s->num_irq) {
1903             armv7m_nvic_set_pending(s, excnum, false);
1904         }
1905         break;
1906     }
1907     case 0xf34: /* FPCCR */
1908         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1909             /* Not all bits here are banked. */
1910             uint32_t fpccr_s;
1911 
1912             if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1913                 /* Don't allow setting of bits not present in v7M */
1914                 value &= (R_V7M_FPCCR_LSPACT_MASK |
1915                           R_V7M_FPCCR_USER_MASK |
1916                           R_V7M_FPCCR_THREAD_MASK |
1917                           R_V7M_FPCCR_HFRDY_MASK |
1918                           R_V7M_FPCCR_MMRDY_MASK |
1919                           R_V7M_FPCCR_BFRDY_MASK |
1920                           R_V7M_FPCCR_MONRDY_MASK |
1921                           R_V7M_FPCCR_LSPEN_MASK |
1922                           R_V7M_FPCCR_ASPEN_MASK);
1923             }
1924             value &= ~R_V7M_FPCCR_RES0_MASK;
1925 
1926             if (!attrs.secure) {
1927                 /* Some non-banked bits are configurably writable by NS */
1928                 fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
1929                 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
1930                     uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
1931                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
1932                 }
1933                 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
1934                     uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
1935                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
1936                 }
1937                 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1938                     uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
1939                     uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
1940                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1941                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1942                 }
1943                 /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */
1944                 {
1945                     uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
1946                     fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1947                 }
1948 
1949                 /*
1950                  * All other non-banked bits are RAZ/WI from NS; write
1951                  * just the banked bits to fpccr[M_REG_NS].
1952                  */
1953                 value &= R_V7M_FPCCR_BANKED_MASK;
1954                 cpu->env.v7m.fpccr[M_REG_NS] = value;
1955             } else {
1956                 fpccr_s = value;
1957             }
1958             cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
1959         }
1960         break;
1961     case 0xf38: /* FPCAR */
1962         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1963             value &= ~7;
1964             cpu->env.v7m.fpcar[attrs.secure] = value;
1965         }
1966         break;
1967     case 0xf3c: /* FPDSCR */
1968         if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
1969             value &= 0x07c00000;
1970             cpu->env.v7m.fpdscr[attrs.secure] = value;
1971         }
1972         break;
1973     case 0xf50: /* ICIALLU */
1974     case 0xf58: /* ICIMVAU */
1975     case 0xf5c: /* DCIMVAC */
1976     case 0xf60: /* DCISW */
1977     case 0xf64: /* DCCMVAU */
1978     case 0xf68: /* DCCMVAC */
1979     case 0xf6c: /* DCCSW */
1980     case 0xf70: /* DCCIMVAC */
1981     case 0xf74: /* DCCISW */
1982     case 0xf78: /* BPIALL */
1983         /* Cache and branch predictor maintenance: for QEMU these always NOP */
1984         break;
1985     default:
1986     bad_offset:
1987         qemu_log_mask(LOG_GUEST_ERROR,
1988                       "NVIC: Bad write offset 0x%x\n", offset);
1989     }
1990 }
1991 
1992 static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
1993 {
1994     /* Return true if unprivileged access to this register is permitted. */
1995     switch (offset) {
1996     case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
1997         /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
1998          * controls access even though the CPU is in Secure state (I_QDKX).
1999          */
2000         return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
2001     default:
2002         /* All other user accesses cause a BusFault unconditionally */
2003         return false;
2004     }
2005 }
2006 
2007 static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
2008 {
2009     /* Behaviour for the SHPR register field for this exception:
2010      * return M_REG_NS to use the nonsecure vector (including for
2011      * non-banked exceptions), M_REG_S for the secure version of
2012      * a banked exception, and -1 if this field should RAZ/WI.
2013      */
2014     switch (exc) {
2015     case ARMV7M_EXCP_MEM:
2016     case ARMV7M_EXCP_USAGE:
2017     case ARMV7M_EXCP_SVC:
2018     case ARMV7M_EXCP_PENDSV:
2019     case ARMV7M_EXCP_SYSTICK:
2020         /* Banked exceptions */
2021         return attrs.secure;
2022     case ARMV7M_EXCP_BUS:
2023         /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
2024         if (!attrs.secure &&
2025             !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2026             return -1;
2027         }
2028         return M_REG_NS;
2029     case ARMV7M_EXCP_SECURE:
2030         /* Not banked, RAZ/WI from nonsecure */
2031         if (!attrs.secure) {
2032             return -1;
2033         }
2034         return M_REG_NS;
2035     case ARMV7M_EXCP_DEBUG:
2036         /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
2037         return M_REG_NS;
2038     case 8 ... 10:
2039     case 13:
2040         /* RES0 */
2041         return -1;
2042     default:
2043         /* Not reachable due to decode of SHPR register addresses */
2044         g_assert_not_reached();
2045     }
2046 }
2047 
2048 static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
2049                                     uint64_t *data, unsigned size,
2050                                     MemTxAttrs attrs)
2051 {
2052     NVICState *s = (NVICState *)opaque;
2053     uint32_t offset = addr;
2054     unsigned i, startvec, end;
2055     uint32_t val;
2056 
2057     if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2058         /* Generate BusFault for unprivileged accesses */
2059         return MEMTX_ERROR;
2060     }
2061 
2062     switch (offset) {
2063     /* reads of set and clear both return the status */
2064     case 0x100 ... 0x13f: /* NVIC Set enable */
2065         offset += 0x80;
2066         /* fall through */
2067     case 0x180 ... 0x1bf: /* NVIC Clear enable */
2068         val = 0;
2069         startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
2070 
2071         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2072             if (s->vectors[startvec + i].enabled &&
2073                 (attrs.secure || s->itns[startvec + i])) {
2074                 val |= (1 << i);
2075             }
2076         }
2077         break;
2078     case 0x200 ... 0x23f: /* NVIC Set pend */
2079         offset += 0x80;
2080         /* fall through */
2081     case 0x280 ... 0x2bf: /* NVIC Clear pend */
2082         val = 0;
2083         startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2084         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2085             if (s->vectors[startvec + i].pending &&
2086                 (attrs.secure || s->itns[startvec + i])) {
2087                 val |= (1 << i);
2088             }
2089         }
2090         break;
2091     case 0x300 ... 0x33f: /* NVIC Active */
2092         val = 0;
2093 
2094         if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
2095             break;
2096         }
2097 
2098         startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
2099 
2100         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2101             if (s->vectors[startvec + i].active &&
2102                 (attrs.secure || s->itns[startvec + i])) {
2103                 val |= (1 << i);
2104             }
2105         }
2106         break;
2107     case 0x400 ... 0x5ef: /* NVIC Priority */
2108         val = 0;
2109         startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
2110 
2111         for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2112             if (attrs.secure || s->itns[startvec + i]) {
2113                 val |= s->vectors[startvec + i].prio << (8 * i);
2114             }
2115         }
2116         break;
2117     case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2118         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2119             val = 0;
2120             break;
2121         }
2122         /* fall through */
2123     case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2124         val = 0;
2125         for (i = 0; i < size; i++) {
2126             unsigned hdlidx = (offset - 0xd14) + i;
2127             int sbank = shpr_bank(s, hdlidx, attrs);
2128 
2129             if (sbank < 0) {
2130                 continue;
2131             }
2132             val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
2133         }
2134         break;
2135     case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2136         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2137             val = 0;
2138             break;
2139         };
2140         /*
2141          * The BFSR bits [15:8] are shared between security states
2142          * and we store them in the NS copy. They are RAZ/WI for
2143          * NS code if AIRCR.BFHFNMINS is 0.
2144          */
2145         val = s->cpu->env.v7m.cfsr[attrs.secure];
2146         if (!attrs.secure &&
2147             !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2148             val &= ~R_V7M_CFSR_BFSR_MASK;
2149         } else {
2150             val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
2151         }
2152         val = extract32(val, (offset - 0xd28) * 8, size * 8);
2153         break;
2154     case 0xfe0 ... 0xfff: /* ID.  */
2155         if (offset & 3) {
2156             val = 0;
2157         } else {
2158             val = nvic_id[(offset - 0xfe0) >> 2];
2159         }
2160         break;
2161     default:
2162         if (size == 4) {
2163             val = nvic_readl(s, offset, attrs);
2164         } else {
2165             qemu_log_mask(LOG_GUEST_ERROR,
2166                           "NVIC: Bad read of size %d at offset 0x%x\n",
2167                           size, offset);
2168             val = 0;
2169         }
2170     }
2171 
2172     trace_nvic_sysreg_read(addr, val, size);
2173     *data = val;
2174     return MEMTX_OK;
2175 }
2176 
2177 static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
2178                                      uint64_t value, unsigned size,
2179                                      MemTxAttrs attrs)
2180 {
2181     NVICState *s = (NVICState *)opaque;
2182     uint32_t offset = addr;
2183     unsigned i, startvec, end;
2184     unsigned setval = 0;
2185 
2186     trace_nvic_sysreg_write(addr, value, size);
2187 
2188     if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2189         /* Generate BusFault for unprivileged accesses */
2190         return MEMTX_ERROR;
2191     }
2192 
2193     switch (offset) {
2194     case 0x100 ... 0x13f: /* NVIC Set enable */
2195         offset += 0x80;
2196         setval = 1;
2197         /* fall through */
2198     case 0x180 ... 0x1bf: /* NVIC Clear enable */
2199         startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2200 
2201         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2202             if (value & (1 << i) &&
2203                 (attrs.secure || s->itns[startvec + i])) {
2204                 s->vectors[startvec + i].enabled = setval;
2205             }
2206         }
2207         nvic_irq_update(s);
2208         return MEMTX_OK;
2209     case 0x200 ... 0x23f: /* NVIC Set pend */
2210         /* the special logic in armv7m_nvic_set_pending()
2211          * is not needed since IRQs are never escalated
2212          */
2213         offset += 0x80;
2214         setval = 1;
2215         /* fall through */
2216     case 0x280 ... 0x2bf: /* NVIC Clear pend */
2217         startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2218 
2219         for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2220             if (value & (1 << i) &&
2221                 (attrs.secure || s->itns[startvec + i])) {
2222                 s->vectors[startvec + i].pending = setval;
2223             }
2224         }
2225         nvic_irq_update(s);
2226         return MEMTX_OK;
2227     case 0x300 ... 0x33f: /* NVIC Active */
2228         return MEMTX_OK; /* R/O */
2229     case 0x400 ... 0x5ef: /* NVIC Priority */
2230         startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
2231 
2232         for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2233             if (attrs.secure || s->itns[startvec + i]) {
2234                 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
2235             }
2236         }
2237         nvic_irq_update(s);
2238         return MEMTX_OK;
2239     case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2240         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2241             return MEMTX_OK;
2242         }
2243         /* fall through */
2244     case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2245         for (i = 0; i < size; i++) {
2246             unsigned hdlidx = (offset - 0xd14) + i;
2247             int newprio = extract32(value, i * 8, 8);
2248             int sbank = shpr_bank(s, hdlidx, attrs);
2249 
2250             if (sbank < 0) {
2251                 continue;
2252             }
2253             set_prio(s, hdlidx, sbank, newprio);
2254         }
2255         nvic_irq_update(s);
2256         return MEMTX_OK;
2257     case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2258         if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2259             return MEMTX_OK;
2260         }
2261         /* All bits are W1C, so construct 32 bit value with 0s in
2262          * the parts not written by the access size
2263          */
2264         value <<= ((offset - 0xd28) * 8);
2265 
2266         if (!attrs.secure &&
2267             !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2268             /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */
2269             value &= ~R_V7M_CFSR_BFSR_MASK;
2270         }
2271 
2272         s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
2273         if (attrs.secure) {
2274             /* The BFSR bits [15:8] are shared between security states
2275              * and we store them in the NS copy.
2276              */
2277             s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
2278         }
2279         return MEMTX_OK;
2280     }
2281     if (size == 4) {
2282         nvic_writel(s, offset, value, attrs);
2283         return MEMTX_OK;
2284     }
2285     qemu_log_mask(LOG_GUEST_ERROR,
2286                   "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
2287     /* This is UNPREDICTABLE; treat as RAZ/WI */
2288     return MEMTX_OK;
2289 }
2290 
2291 static const MemoryRegionOps nvic_sysreg_ops = {
2292     .read_with_attrs = nvic_sysreg_read,
2293     .write_with_attrs = nvic_sysreg_write,
2294     .endianness = DEVICE_NATIVE_ENDIAN,
2295 };
2296 
2297 static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
2298                                         uint64_t value, unsigned size,
2299                                         MemTxAttrs attrs)
2300 {
2301     MemoryRegion *mr = opaque;
2302 
2303     if (attrs.secure) {
2304         /* S accesses to the alias act like NS accesses to the real region */
2305         attrs.secure = 0;
2306         return memory_region_dispatch_write(mr, addr, value, size, attrs);
2307     } else {
2308         /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2309         if (attrs.user) {
2310             return MEMTX_ERROR;
2311         }
2312         return MEMTX_OK;
2313     }
2314 }
2315 
2316 static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
2317                                        uint64_t *data, unsigned size,
2318                                        MemTxAttrs attrs)
2319 {
2320     MemoryRegion *mr = opaque;
2321 
2322     if (attrs.secure) {
2323         /* S accesses to the alias act like NS accesses to the real region */
2324         attrs.secure = 0;
2325         return memory_region_dispatch_read(mr, addr, data, size, attrs);
2326     } else {
2327         /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2328         if (attrs.user) {
2329             return MEMTX_ERROR;
2330         }
2331         *data = 0;
2332         return MEMTX_OK;
2333     }
2334 }
2335 
2336 static const MemoryRegionOps nvic_sysreg_ns_ops = {
2337     .read_with_attrs = nvic_sysreg_ns_read,
2338     .write_with_attrs = nvic_sysreg_ns_write,
2339     .endianness = DEVICE_NATIVE_ENDIAN,
2340 };
2341 
2342 static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
2343                                       uint64_t value, unsigned size,
2344                                       MemTxAttrs attrs)
2345 {
2346     NVICState *s = opaque;
2347     MemoryRegion *mr;
2348 
2349     /* Direct the access to the correct systick */
2350     mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2351     return memory_region_dispatch_write(mr, addr, value, size, attrs);
2352 }
2353 
2354 static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
2355                                      uint64_t *data, unsigned size,
2356                                      MemTxAttrs attrs)
2357 {
2358     NVICState *s = opaque;
2359     MemoryRegion *mr;
2360 
2361     /* Direct the access to the correct systick */
2362     mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2363     return memory_region_dispatch_read(mr, addr, data, size, attrs);
2364 }
2365 
2366 static const MemoryRegionOps nvic_systick_ops = {
2367     .read_with_attrs = nvic_systick_read,
2368     .write_with_attrs = nvic_systick_write,
2369     .endianness = DEVICE_NATIVE_ENDIAN,
2370 };
2371 
2372 static int nvic_post_load(void *opaque, int version_id)
2373 {
2374     NVICState *s = opaque;
2375     unsigned i;
2376     int resetprio;
2377 
2378     /* Check for out of range priority settings */
2379     resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2380 
2381     if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2382         s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2383         s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2384         return 1;
2385     }
2386     for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2387         if (s->vectors[i].prio & ~0xff) {
2388             return 1;
2389         }
2390     }
2391 
2392     nvic_recompute_state(s);
2393 
2394     return 0;
2395 }
2396 
2397 static const VMStateDescription vmstate_VecInfo = {
2398     .name = "armv7m_nvic_info",
2399     .version_id = 1,
2400     .minimum_version_id = 1,
2401     .fields = (VMStateField[]) {
2402         VMSTATE_INT16(prio, VecInfo),
2403         VMSTATE_UINT8(enabled, VecInfo),
2404         VMSTATE_UINT8(pending, VecInfo),
2405         VMSTATE_UINT8(active, VecInfo),
2406         VMSTATE_UINT8(level, VecInfo),
2407         VMSTATE_END_OF_LIST()
2408     }
2409 };
2410 
2411 static bool nvic_security_needed(void *opaque)
2412 {
2413     NVICState *s = opaque;
2414 
2415     return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2416 }
2417 
2418 static int nvic_security_post_load(void *opaque, int version_id)
2419 {
2420     NVICState *s = opaque;
2421     int i;
2422 
2423     /* Check for out of range priority settings */
2424     if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2425         && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2426         /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
2427          * if the CPU state has been migrated yet; a mismatch won't
2428          * cause the emulation to blow up, though.
2429          */
2430         return 1;
2431     }
2432     for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2433         if (s->sec_vectors[i].prio & ~0xff) {
2434             return 1;
2435         }
2436     }
2437     return 0;
2438 }
2439 
2440 static const VMStateDescription vmstate_nvic_security = {
2441     .name = "armv7m_nvic/m-security",
2442     .version_id = 1,
2443     .minimum_version_id = 1,
2444     .needed = nvic_security_needed,
2445     .post_load = &nvic_security_post_load,
2446     .fields = (VMStateField[]) {
2447         VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2448                              vmstate_VecInfo, VecInfo),
2449         VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2450         VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2451         VMSTATE_END_OF_LIST()
2452     }
2453 };
2454 
2455 static const VMStateDescription vmstate_nvic = {
2456     .name = "armv7m_nvic",
2457     .version_id = 4,
2458     .minimum_version_id = 4,
2459     .post_load = &nvic_post_load,
2460     .fields = (VMStateField[]) {
2461         VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2462                              vmstate_VecInfo, VecInfo),
2463         VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2464         VMSTATE_END_OF_LIST()
2465     },
2466     .subsections = (const VMStateDescription*[]) {
2467         &vmstate_nvic_security,
2468         NULL
2469     }
2470 };
2471 
2472 static Property props_nvic[] = {
2473     /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
2474     DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2475     DEFINE_PROP_END_OF_LIST()
2476 };
2477 
2478 static void armv7m_nvic_reset(DeviceState *dev)
2479 {
2480     int resetprio;
2481     NVICState *s = NVIC(dev);
2482 
2483     memset(s->vectors, 0, sizeof(s->vectors));
2484     memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2485     s->prigroup[M_REG_NS] = 0;
2486     s->prigroup[M_REG_S] = 0;
2487 
2488     s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2489     /* MEM, BUS, and USAGE are enabled through
2490      * the System Handler Control register
2491      */
2492     s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2493     s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2494     s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2495 
2496     /* DebugMonitor is enabled via DEMCR.MON_EN */
2497     s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0;
2498 
2499     resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2500     s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2501     s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2502     s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2503 
2504     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2505         s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2506         s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2507         s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2508         s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2509 
2510         /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
2511         s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2512         /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
2513         s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2514     } else {
2515         s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2516     }
2517 
2518     /* Strictly speaking the reset handler should be enabled.
2519      * However, we don't simulate soft resets through the NVIC,
2520      * and the reset vector should never be pended.
2521      * So we leave it disabled to catch logic errors.
2522      */
2523 
2524     s->exception_prio = NVIC_NOEXC_PRIO;
2525     s->vectpending = 0;
2526     s->vectpending_is_s_banked = false;
2527     s->vectpending_prio = NVIC_NOEXC_PRIO;
2528 
2529     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2530         memset(s->itns, 0, sizeof(s->itns));
2531     } else {
2532         /* This state is constant and not guest accessible in a non-security
2533          * NVIC; we set the bits to true to avoid having to do a feature
2534          * bit check in the NVIC enable/pend/etc register accessors.
2535          */
2536         int i;
2537 
2538         for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2539             s->itns[i] = true;
2540         }
2541     }
2542 }
2543 
2544 static void nvic_systick_trigger(void *opaque, int n, int level)
2545 {
2546     NVICState *s = opaque;
2547 
2548     if (level) {
2549         /* SysTick just asked us to pend its exception.
2550          * (This is different from an external interrupt line's
2551          * behaviour.)
2552          * n == 0 : NonSecure systick
2553          * n == 1 : Secure systick
2554          */
2555         armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2556     }
2557 }
2558 
2559 static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2560 {
2561     NVICState *s = NVIC(dev);
2562     Error *err = NULL;
2563     int regionlen;
2564 
2565     /* The armv7m container object will have set our CPU pointer */
2566     if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2567         error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2568         return;
2569     }
2570 
2571     if (s->num_irq > NVIC_MAX_IRQ) {
2572         error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2573         return;
2574     }
2575 
2576     qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2577 
2578     /* include space for internal exception vectors */
2579     s->num_irq += NVIC_FIRST_IRQ;
2580 
2581     s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
2582 
2583     object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true,
2584                              "realized", &err);
2585     if (err != NULL) {
2586         error_propagate(errp, err);
2587         return;
2588     }
2589     sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
2590                        qdev_get_gpio_in_named(dev, "systick-trigger",
2591                                               M_REG_NS));
2592 
2593     if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2594         /* We couldn't init the secure systick device in instance_init
2595          * as we didn't know then if the CPU had the security extensions;
2596          * so we have to do it here.
2597          */
2598         sysbus_init_child_obj(OBJECT(dev), "systick-reg-s",
2599                               &s->systick[M_REG_S],
2600                               sizeof(s->systick[M_REG_S]), TYPE_SYSTICK);
2601 
2602         object_property_set_bool(OBJECT(&s->systick[M_REG_S]), true,
2603                                  "realized", &err);
2604         if (err != NULL) {
2605             error_propagate(errp, err);
2606             return;
2607         }
2608         sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
2609                            qdev_get_gpio_in_named(dev, "systick-trigger",
2610                                                   M_REG_S));
2611     }
2612 
2613     /* The NVIC and System Control Space (SCS) starts at 0xe000e000
2614      * and looks like this:
2615      *  0x004 - ICTR
2616      *  0x010 - 0xff - systick
2617      *  0x100..0x7ec - NVIC
2618      *  0x7f0..0xcff - Reserved
2619      *  0xd00..0xd3c - SCS registers
2620      *  0xd40..0xeff - Reserved or Not implemented
2621      *  0xf00 - STIR
2622      *
2623      * Some registers within this space are banked between security states.
2624      * In v8M there is a second range 0xe002e000..0xe002efff which is the
2625      * NonSecure alias SCS; secure accesses to this behave like NS accesses
2626      * to the main SCS range, and non-secure accesses (including when
2627      * the security extension is not implemented) are RAZ/WI.
2628      * Note that both the main SCS range and the alias range are defined
2629      * to be exempt from memory attribution (R_BLJT) and so the memory
2630      * transaction attribute always matches the current CPU security
2631      * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops
2632      * wrappers we change attrs.secure to indicate the NS access; so
2633      * generally code determining which banked register to use should
2634      * use attrs.secure; code determining actual behaviour of the system
2635      * should use env->v7m.secure.
2636      */
2637     regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
2638     memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
2639     /* The system register region goes at the bottom of the priority
2640      * stack as it covers the whole page.
2641      */
2642     memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2643                           "nvic_sysregs", 0x1000);
2644     memory_region_add_subregion(&s->container, 0, &s->sysregmem);
2645 
2646     memory_region_init_io(&s->systickmem, OBJECT(s),
2647                           &nvic_systick_ops, s,
2648                           "nvic_systick", 0xe0);
2649 
2650     memory_region_add_subregion_overlap(&s->container, 0x10,
2651                                         &s->systickmem, 1);
2652 
2653     if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2654         memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2655                               &nvic_sysreg_ns_ops, &s->sysregmem,
2656                               "nvic_sysregs_ns", 0x1000);
2657         memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
2658         memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
2659                               &nvic_sysreg_ns_ops, &s->systickmem,
2660                               "nvic_systick_ns", 0xe0);
2661         memory_region_add_subregion_overlap(&s->container, 0x20010,
2662                                             &s->systick_ns_mem, 1);
2663     }
2664 
2665     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2666 }
2667 
2668 static void armv7m_nvic_instance_init(Object *obj)
2669 {
2670     /* We have a different default value for the num-irq property
2671      * than our superclass. This function runs after qdev init
2672      * has set the defaults from the Property array and before
2673      * any user-specified property setting, so just modify the
2674      * value in the GICState struct.
2675      */
2676     DeviceState *dev = DEVICE(obj);
2677     NVICState *nvic = NVIC(obj);
2678     SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2679 
2680     sysbus_init_child_obj(obj, "systick-reg-ns", &nvic->systick[M_REG_NS],
2681                           sizeof(nvic->systick[M_REG_NS]), TYPE_SYSTICK);
2682     /* We can't initialize the secure systick here, as we don't know
2683      * yet if we need it.
2684      */
2685 
2686     sysbus_init_irq(sbd, &nvic->excpout);
2687     qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2688     qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2689                             M_REG_NUM_BANKS);
2690     qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
2691 }
2692 
2693 static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2694 {
2695     DeviceClass *dc = DEVICE_CLASS(klass);
2696 
2697     dc->vmsd  = &vmstate_nvic;
2698     dc->props = props_nvic;
2699     dc->reset = armv7m_nvic_reset;
2700     dc->realize = armv7m_nvic_realize;
2701 }
2702 
2703 static const TypeInfo armv7m_nvic_info = {
2704     .name          = TYPE_NVIC,
2705     .parent        = TYPE_SYS_BUS_DEVICE,
2706     .instance_init = armv7m_nvic_instance_init,
2707     .instance_size = sizeof(NVICState),
2708     .class_init    = armv7m_nvic_class_init,
2709     .class_size    = sizeof(SysBusDeviceClass),
2710 };
2711 
2712 static void armv7m_nvic_register_types(void)
2713 {
2714     type_register_static(&armv7m_nvic_info);
2715 }
2716 
2717 type_init(armv7m_nvic_register_types)
2718