1 /* 2 * ARM Nested Vectored Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 * 9 * The ARMv7M System controller is fairly tightly tied in with the 10 * NVIC. Much of that is also implemented here. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "hw/sysbus.h" 16 #include "migration/vmstate.h" 17 #include "qemu/timer.h" 18 #include "hw/intc/armv7m_nvic.h" 19 #include "hw/irq.h" 20 #include "hw/qdev-properties.h" 21 #include "sysemu/runstate.h" 22 #include "target/arm/cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/memop.h" 25 #include "qemu/log.h" 26 #include "qemu/module.h" 27 #include "trace.h" 28 29 /* IRQ number counting: 30 * 31 * the num-irq property counts the number of external IRQ lines 32 * 33 * NVICState::num_irq counts the total number of exceptions 34 * (external IRQs, the 15 internal exceptions including reset, 35 * and one for the unused exception number 0). 36 * 37 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines. 38 * 39 * NVIC_MAX_VECTORS is the highest permitted number of exceptions. 40 * 41 * Iterating through all exceptions should typically be done with 42 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0. 43 * 44 * The external qemu_irq lines are the NVIC's external IRQ lines, 45 * so line 0 is exception 16. 46 * 47 * In the terminology of the architecture manual, "interrupts" are 48 * a subcategory of exception referring to the external interrupts 49 * (which are exception numbers NVIC_FIRST_IRQ and upward). 50 * For historical reasons QEMU tends to use "interrupt" and 51 * "exception" more or less interchangeably. 52 */ 53 #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS 54 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ) 55 56 /* Effective running priority of the CPU when no exception is active 57 * (higher than the highest possible priority value) 58 */ 59 #define NVIC_NOEXC_PRIO 0x100 60 /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */ 61 #define NVIC_NS_PRIO_LIMIT 0x80 62 63 static const uint8_t nvic_id[] = { 64 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 65 }; 66 67 static void signal_sysresetreq(NVICState *s) 68 { 69 if (qemu_irq_is_connected(s->sysresetreq)) { 70 qemu_irq_pulse(s->sysresetreq); 71 } else { 72 /* 73 * Default behaviour if the SoC doesn't need to wire up 74 * SYSRESETREQ (eg to a system reset controller of some kind): 75 * perform a system reset via the usual QEMU API. 76 */ 77 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 78 } 79 } 80 81 static int nvic_pending_prio(NVICState *s) 82 { 83 /* return the group priority of the current pending interrupt, 84 * or NVIC_NOEXC_PRIO if no interrupt is pending 85 */ 86 return s->vectpending_prio; 87 } 88 89 /* Return the value of the ISCR RETTOBASE bit: 90 * 1 if there is exactly one active exception 91 * 0 if there is more than one active exception 92 * UNKNOWN if there are no active exceptions (we choose 1, 93 * which matches the choice Cortex-M3 is documented as making). 94 * 95 * NB: some versions of the documentation talk about this 96 * counting "active exceptions other than the one shown by IPSR"; 97 * this is only different in the obscure corner case where guest 98 * code has manually deactivated an exception and is about 99 * to fail an exception-return integrity check. The definition 100 * above is the one from the v8M ARM ARM and is also in line 101 * with the behaviour documented for the Cortex-M3. 102 */ 103 static bool nvic_rettobase(NVICState *s) 104 { 105 int irq, nhand = 0; 106 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY); 107 108 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) { 109 if (s->vectors[irq].active || 110 (check_sec && irq < NVIC_INTERNAL_VECTORS && 111 s->sec_vectors[irq].active)) { 112 nhand++; 113 if (nhand == 2) { 114 return 0; 115 } 116 } 117 } 118 119 return 1; 120 } 121 122 /* Return the value of the ISCR ISRPENDING bit: 123 * 1 if an external interrupt is pending 124 * 0 if no external interrupt is pending 125 */ 126 static bool nvic_isrpending(NVICState *s) 127 { 128 int irq; 129 130 /* We can shortcut if the highest priority pending interrupt 131 * happens to be external or if there is nothing pending. 132 */ 133 if (s->vectpending > NVIC_FIRST_IRQ) { 134 return true; 135 } 136 if (s->vectpending == 0) { 137 return false; 138 } 139 140 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) { 141 if (s->vectors[irq].pending) { 142 return true; 143 } 144 } 145 return false; 146 } 147 148 static bool exc_is_banked(int exc) 149 { 150 /* Return true if this is one of the limited set of exceptions which 151 * are banked (and thus have state in sec_vectors[]) 152 */ 153 return exc == ARMV7M_EXCP_HARD || 154 exc == ARMV7M_EXCP_MEM || 155 exc == ARMV7M_EXCP_USAGE || 156 exc == ARMV7M_EXCP_SVC || 157 exc == ARMV7M_EXCP_PENDSV || 158 exc == ARMV7M_EXCP_SYSTICK; 159 } 160 161 /* Return a mask word which clears the subpriority bits from 162 * a priority value for an M-profile exception, leaving only 163 * the group priority. 164 */ 165 static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure) 166 { 167 return ~0U << (s->prigroup[secure] + 1); 168 } 169 170 static bool exc_targets_secure(NVICState *s, int exc) 171 { 172 /* Return true if this non-banked exception targets Secure state. */ 173 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 174 return false; 175 } 176 177 if (exc >= NVIC_FIRST_IRQ) { 178 return !s->itns[exc]; 179 } 180 181 /* Function shouldn't be called for banked exceptions. */ 182 assert(!exc_is_banked(exc)); 183 184 switch (exc) { 185 case ARMV7M_EXCP_NMI: 186 case ARMV7M_EXCP_BUS: 187 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 188 case ARMV7M_EXCP_SECURE: 189 return true; 190 case ARMV7M_EXCP_DEBUG: 191 /* TODO: controlled by DEMCR.SDME, which we don't yet implement */ 192 return false; 193 default: 194 /* reset, and reserved (unused) low exception numbers. 195 * We'll get called by code that loops through all the exception 196 * numbers, but it doesn't matter what we return here as these 197 * non-existent exceptions will never be pended or active. 198 */ 199 return true; 200 } 201 } 202 203 static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure) 204 { 205 /* Return the group priority for this exception, given its raw 206 * (group-and-subgroup) priority value and whether it is targeting 207 * secure state or not. 208 */ 209 if (rawprio < 0) { 210 return rawprio; 211 } 212 rawprio &= nvic_gprio_mask(s, targets_secure); 213 /* AIRCR.PRIS causes us to squash all NS priorities into the 214 * lower half of the total range 215 */ 216 if (!targets_secure && 217 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) { 218 rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT; 219 } 220 return rawprio; 221 } 222 223 /* Recompute vectpending and exception_prio for a CPU which implements 224 * the Security extension 225 */ 226 static void nvic_recompute_state_secure(NVICState *s) 227 { 228 int i, bank; 229 int pend_prio = NVIC_NOEXC_PRIO; 230 int active_prio = NVIC_NOEXC_PRIO; 231 int pend_irq = 0; 232 bool pending_is_s_banked = false; 233 int pend_subprio = 0; 234 235 /* R_CQRV: precedence is by: 236 * - lowest group priority; if both the same then 237 * - lowest subpriority; if both the same then 238 * - lowest exception number; if both the same (ie banked) then 239 * - secure exception takes precedence 240 * Compare pseudocode RawExecutionPriority. 241 * Annoyingly, now we have two prigroup values (for S and NS) 242 * we can't do the loop comparison on raw priority values. 243 */ 244 for (i = 1; i < s->num_irq; i++) { 245 for (bank = M_REG_S; bank >= M_REG_NS; bank--) { 246 VecInfo *vec; 247 int prio, subprio; 248 bool targets_secure; 249 250 if (bank == M_REG_S) { 251 if (!exc_is_banked(i)) { 252 continue; 253 } 254 vec = &s->sec_vectors[i]; 255 targets_secure = true; 256 } else { 257 vec = &s->vectors[i]; 258 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i); 259 } 260 261 prio = exc_group_prio(s, vec->prio, targets_secure); 262 subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure); 263 if (vec->enabled && vec->pending && 264 ((prio < pend_prio) || 265 (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) { 266 pend_prio = prio; 267 pend_subprio = subprio; 268 pend_irq = i; 269 pending_is_s_banked = (bank == M_REG_S); 270 } 271 if (vec->active && prio < active_prio) { 272 active_prio = prio; 273 } 274 } 275 } 276 277 s->vectpending_is_s_banked = pending_is_s_banked; 278 s->vectpending = pend_irq; 279 s->vectpending_prio = pend_prio; 280 s->exception_prio = active_prio; 281 282 trace_nvic_recompute_state_secure(s->vectpending, 283 s->vectpending_is_s_banked, 284 s->vectpending_prio, 285 s->exception_prio); 286 } 287 288 /* Recompute vectpending and exception_prio */ 289 static void nvic_recompute_state(NVICState *s) 290 { 291 int i; 292 int pend_prio = NVIC_NOEXC_PRIO; 293 int active_prio = NVIC_NOEXC_PRIO; 294 int pend_irq = 0; 295 296 /* In theory we could write one function that handled both 297 * the "security extension present" and "not present"; however 298 * the security related changes significantly complicate the 299 * recomputation just by themselves and mixing both cases together 300 * would be even worse, so we retain a separate non-secure-only 301 * version for CPUs which don't implement the security extension. 302 */ 303 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 304 nvic_recompute_state_secure(s); 305 return; 306 } 307 308 for (i = 1; i < s->num_irq; i++) { 309 VecInfo *vec = &s->vectors[i]; 310 311 if (vec->enabled && vec->pending && vec->prio < pend_prio) { 312 pend_prio = vec->prio; 313 pend_irq = i; 314 } 315 if (vec->active && vec->prio < active_prio) { 316 active_prio = vec->prio; 317 } 318 } 319 320 if (active_prio > 0) { 321 active_prio &= nvic_gprio_mask(s, false); 322 } 323 324 if (pend_prio > 0) { 325 pend_prio &= nvic_gprio_mask(s, false); 326 } 327 328 s->vectpending = pend_irq; 329 s->vectpending_prio = pend_prio; 330 s->exception_prio = active_prio; 331 332 trace_nvic_recompute_state(s->vectpending, 333 s->vectpending_prio, 334 s->exception_prio); 335 } 336 337 /* Return the current execution priority of the CPU 338 * (equivalent to the pseudocode ExecutionPriority function). 339 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO. 340 */ 341 static inline int nvic_exec_prio(NVICState *s) 342 { 343 CPUARMState *env = &s->cpu->env; 344 int running = NVIC_NOEXC_PRIO; 345 346 if (env->v7m.basepri[M_REG_NS] > 0) { 347 running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS); 348 } 349 350 if (env->v7m.basepri[M_REG_S] > 0) { 351 int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S); 352 if (running > basepri) { 353 running = basepri; 354 } 355 } 356 357 if (env->v7m.primask[M_REG_NS]) { 358 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) { 359 if (running > NVIC_NS_PRIO_LIMIT) { 360 running = NVIC_NS_PRIO_LIMIT; 361 } 362 } else { 363 running = 0; 364 } 365 } 366 367 if (env->v7m.primask[M_REG_S]) { 368 running = 0; 369 } 370 371 if (env->v7m.faultmask[M_REG_NS]) { 372 if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 373 running = -1; 374 } else { 375 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) { 376 if (running > NVIC_NS_PRIO_LIMIT) { 377 running = NVIC_NS_PRIO_LIMIT; 378 } 379 } else { 380 running = 0; 381 } 382 } 383 } 384 385 if (env->v7m.faultmask[M_REG_S]) { 386 running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1; 387 } 388 389 /* consider priority of active handler */ 390 return MIN(running, s->exception_prio); 391 } 392 393 bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure) 394 { 395 /* Return true if the requested execution priority is negative 396 * for the specified security state, ie that security state 397 * has an active NMI or HardFault or has set its FAULTMASK. 398 * Note that this is not the same as whether the execution 399 * priority is actually negative (for instance AIRCR.PRIS may 400 * mean we don't allow FAULTMASK_NS to actually make the execution 401 * priority negative). Compare pseudocode IsReqExcPriNeg(). 402 */ 403 NVICState *s = opaque; 404 405 if (s->cpu->env.v7m.faultmask[secure]) { 406 return true; 407 } 408 409 if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active : 410 s->vectors[ARMV7M_EXCP_HARD].active) { 411 return true; 412 } 413 414 if (s->vectors[ARMV7M_EXCP_NMI].active && 415 exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) { 416 return true; 417 } 418 419 return false; 420 } 421 422 bool armv7m_nvic_can_take_pending_exception(void *opaque) 423 { 424 NVICState *s = opaque; 425 426 return nvic_exec_prio(s) > nvic_pending_prio(s); 427 } 428 429 int armv7m_nvic_raw_execution_priority(void *opaque) 430 { 431 NVICState *s = opaque; 432 433 return s->exception_prio; 434 } 435 436 /* caller must call nvic_irq_update() after this. 437 * secure indicates the bank to use for banked exceptions (we assert if 438 * we are passed secure=true for a non-banked exception). 439 */ 440 static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio) 441 { 442 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ 443 assert(irq < s->num_irq); 444 445 prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits); 446 447 if (secure) { 448 assert(exc_is_banked(irq)); 449 s->sec_vectors[irq].prio = prio; 450 } else { 451 s->vectors[irq].prio = prio; 452 } 453 454 trace_nvic_set_prio(irq, secure, prio); 455 } 456 457 /* Return the current raw priority register value. 458 * secure indicates the bank to use for banked exceptions (we assert if 459 * we are passed secure=true for a non-banked exception). 460 */ 461 static int get_prio(NVICState *s, unsigned irq, bool secure) 462 { 463 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ 464 assert(irq < s->num_irq); 465 466 if (secure) { 467 assert(exc_is_banked(irq)); 468 return s->sec_vectors[irq].prio; 469 } else { 470 return s->vectors[irq].prio; 471 } 472 } 473 474 /* Recompute state and assert irq line accordingly. 475 * Must be called after changes to: 476 * vec->active, vec->enabled, vec->pending or vec->prio for any vector 477 * prigroup 478 */ 479 static void nvic_irq_update(NVICState *s) 480 { 481 int lvl; 482 int pend_prio; 483 484 nvic_recompute_state(s); 485 pend_prio = nvic_pending_prio(s); 486 487 /* Raise NVIC output if this IRQ would be taken, except that we 488 * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which 489 * will be checked for in arm_v7m_cpu_exec_interrupt()); changes 490 * to those CPU registers don't cause us to recalculate the NVIC 491 * pending info. 492 */ 493 lvl = (pend_prio < s->exception_prio); 494 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl); 495 qemu_set_irq(s->excpout, lvl); 496 } 497 498 /** 499 * armv7m_nvic_clear_pending: mark the specified exception as not pending 500 * @opaque: the NVIC 501 * @irq: the exception number to mark as not pending 502 * @secure: false for non-banked exceptions or for the nonsecure 503 * version of a banked exception, true for the secure version of a banked 504 * exception. 505 * 506 * Marks the specified exception as not pending. Note that we will assert() 507 * if @secure is true and @irq does not specify one of the fixed set 508 * of architecturally banked exceptions. 509 */ 510 static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure) 511 { 512 NVICState *s = (NVICState *)opaque; 513 VecInfo *vec; 514 515 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 516 517 if (secure) { 518 assert(exc_is_banked(irq)); 519 vec = &s->sec_vectors[irq]; 520 } else { 521 vec = &s->vectors[irq]; 522 } 523 trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio); 524 if (vec->pending) { 525 vec->pending = 0; 526 nvic_irq_update(s); 527 } 528 } 529 530 static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure, 531 bool derived) 532 { 533 /* Pend an exception, including possibly escalating it to HardFault. 534 * 535 * This function handles both "normal" pending of interrupts and 536 * exceptions, and also derived exceptions (ones which occur as 537 * a result of trying to take some other exception). 538 * 539 * If derived == true, the caller guarantees that we are part way through 540 * trying to take an exception (but have not yet called 541 * armv7m_nvic_acknowledge_irq() to make it active), and so: 542 * - s->vectpending is the "original exception" we were trying to take 543 * - irq is the "derived exception" 544 * - nvic_exec_prio(s) gives the priority before exception entry 545 * Here we handle the prioritization logic which the pseudocode puts 546 * in the DerivedLateArrival() function. 547 */ 548 549 NVICState *s = (NVICState *)opaque; 550 bool banked = exc_is_banked(irq); 551 VecInfo *vec; 552 bool targets_secure; 553 554 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 555 assert(!secure || banked); 556 557 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; 558 559 targets_secure = banked ? secure : exc_targets_secure(s, irq); 560 561 trace_nvic_set_pending(irq, secure, targets_secure, 562 derived, vec->enabled, vec->prio); 563 564 if (derived) { 565 /* Derived exceptions are always synchronous. */ 566 assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV); 567 568 if (irq == ARMV7M_EXCP_DEBUG && 569 exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) { 570 /* DebugMonitorFault, but its priority is lower than the 571 * preempted exception priority: just ignore it. 572 */ 573 return; 574 } 575 576 if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) { 577 /* If this is a terminal exception (one which means we cannot 578 * take the original exception, like a failure to read its 579 * vector table entry), then we must take the derived exception. 580 * If the derived exception can't take priority over the 581 * original exception, then we go into Lockup. 582 * 583 * For QEMU, we rely on the fact that a derived exception is 584 * terminal if and only if it's reported to us as HardFault, 585 * which saves having to have an extra argument is_terminal 586 * that we'd only use in one place. 587 */ 588 cpu_abort(&s->cpu->parent_obj, 589 "Lockup: can't take terminal derived exception " 590 "(original exception priority %d)\n", 591 s->vectpending_prio); 592 } 593 /* We now continue with the same code as for a normal pending 594 * exception, which will cause us to pend the derived exception. 595 * We'll then take either the original or the derived exception 596 * based on which is higher priority by the usual mechanism 597 * for selecting the highest priority pending interrupt. 598 */ 599 } 600 601 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) { 602 /* If a synchronous exception is pending then it may be 603 * escalated to HardFault if: 604 * * it is equal or lower priority to current execution 605 * * it is disabled 606 * (ie we need to take it immediately but we can't do so). 607 * Asynchronous exceptions (and interrupts) simply remain pending. 608 * 609 * For QEMU, we don't have any imprecise (asynchronous) faults, 610 * so we can assume that PREFETCH_ABORT and DATA_ABORT are always 611 * synchronous. 612 * Debug exceptions are awkward because only Debug exceptions 613 * resulting from the BKPT instruction should be escalated, 614 * but we don't currently implement any Debug exceptions other 615 * than those that result from BKPT, so we treat all debug exceptions 616 * as needing escalation. 617 * 618 * This all means we can identify whether to escalate based only on 619 * the exception number and don't (yet) need the caller to explicitly 620 * tell us whether this exception is synchronous or not. 621 */ 622 int running = nvic_exec_prio(s); 623 bool escalate = false; 624 625 if (exc_group_prio(s, vec->prio, secure) >= running) { 626 trace_nvic_escalate_prio(irq, vec->prio, running); 627 escalate = true; 628 } else if (!vec->enabled) { 629 trace_nvic_escalate_disabled(irq); 630 escalate = true; 631 } 632 633 if (escalate) { 634 635 /* We need to escalate this exception to a synchronous HardFault. 636 * If BFHFNMINS is set then we escalate to the banked HF for 637 * the target security state of the original exception; otherwise 638 * we take a Secure HardFault. 639 */ 640 irq = ARMV7M_EXCP_HARD; 641 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && 642 (targets_secure || 643 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { 644 vec = &s->sec_vectors[irq]; 645 } else { 646 vec = &s->vectors[irq]; 647 } 648 if (running <= vec->prio) { 649 /* We want to escalate to HardFault but we can't take the 650 * synchronous HardFault at this point either. This is a 651 * Lockup condition due to a guest bug. We don't model 652 * Lockup, so report via cpu_abort() instead. 653 */ 654 cpu_abort(&s->cpu->parent_obj, 655 "Lockup: can't escalate %d to HardFault " 656 "(current priority %d)\n", irq, running); 657 } 658 659 /* HF may be banked but there is only one shared HFSR */ 660 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; 661 } 662 } 663 664 if (!vec->pending) { 665 vec->pending = 1; 666 nvic_irq_update(s); 667 } 668 } 669 670 void armv7m_nvic_set_pending(void *opaque, int irq, bool secure) 671 { 672 do_armv7m_nvic_set_pending(opaque, irq, secure, false); 673 } 674 675 void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure) 676 { 677 do_armv7m_nvic_set_pending(opaque, irq, secure, true); 678 } 679 680 void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure) 681 { 682 /* 683 * Pend an exception during lazy FP stacking. This differs 684 * from the usual exception pending because the logic for 685 * whether we should escalate depends on the saved context 686 * in the FPCCR register, not on the current state of the CPU/NVIC. 687 */ 688 NVICState *s = (NVICState *)opaque; 689 bool banked = exc_is_banked(irq); 690 VecInfo *vec; 691 bool targets_secure; 692 bool escalate = false; 693 /* 694 * We will only look at bits in fpccr if this is a banked exception 695 * (in which case 'secure' tells us whether it is the S or NS version). 696 * All the bits for the non-banked exceptions are in fpccr_s. 697 */ 698 uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S]; 699 uint32_t fpccr = s->cpu->env.v7m.fpccr[secure]; 700 701 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 702 assert(!secure || banked); 703 704 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; 705 706 targets_secure = banked ? secure : exc_targets_secure(s, irq); 707 708 switch (irq) { 709 case ARMV7M_EXCP_DEBUG: 710 if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) { 711 /* Ignore DebugMonitor exception */ 712 return; 713 } 714 break; 715 case ARMV7M_EXCP_MEM: 716 escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK); 717 break; 718 case ARMV7M_EXCP_USAGE: 719 escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK); 720 break; 721 case ARMV7M_EXCP_BUS: 722 escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK); 723 break; 724 case ARMV7M_EXCP_SECURE: 725 escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK); 726 break; 727 default: 728 g_assert_not_reached(); 729 } 730 731 if (escalate) { 732 /* 733 * Escalate to HardFault: faults that initially targeted Secure 734 * continue to do so, even if HF normally targets NonSecure. 735 */ 736 irq = ARMV7M_EXCP_HARD; 737 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && 738 (targets_secure || 739 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { 740 vec = &s->sec_vectors[irq]; 741 } else { 742 vec = &s->vectors[irq]; 743 } 744 } 745 746 if (!vec->enabled || 747 nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) { 748 if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) { 749 /* 750 * We want to escalate to HardFault but the context the 751 * FP state belongs to prevents the exception pre-empting. 752 */ 753 cpu_abort(&s->cpu->parent_obj, 754 "Lockup: can't escalate to HardFault during " 755 "lazy FP register stacking\n"); 756 } 757 } 758 759 if (escalate) { 760 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; 761 } 762 if (!vec->pending) { 763 vec->pending = 1; 764 /* 765 * We do not call nvic_irq_update(), because we know our caller 766 * is going to handle causing us to take the exception by 767 * raising EXCP_LAZYFP, so raising the IRQ line would be 768 * pointless extra work. We just need to recompute the 769 * priorities so that armv7m_nvic_can_take_pending_exception() 770 * returns the right answer. 771 */ 772 nvic_recompute_state(s); 773 } 774 } 775 776 /* Make pending IRQ active. */ 777 void armv7m_nvic_acknowledge_irq(void *opaque) 778 { 779 NVICState *s = (NVICState *)opaque; 780 CPUARMState *env = &s->cpu->env; 781 const int pending = s->vectpending; 782 const int running = nvic_exec_prio(s); 783 VecInfo *vec; 784 785 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); 786 787 if (s->vectpending_is_s_banked) { 788 vec = &s->sec_vectors[pending]; 789 } else { 790 vec = &s->vectors[pending]; 791 } 792 793 assert(vec->enabled); 794 assert(vec->pending); 795 796 assert(s->vectpending_prio < running); 797 798 trace_nvic_acknowledge_irq(pending, s->vectpending_prio); 799 800 vec->active = 1; 801 vec->pending = 0; 802 803 write_v7m_exception(env, s->vectpending); 804 805 nvic_irq_update(s); 806 } 807 808 void armv7m_nvic_get_pending_irq_info(void *opaque, 809 int *pirq, bool *ptargets_secure) 810 { 811 NVICState *s = (NVICState *)opaque; 812 const int pending = s->vectpending; 813 bool targets_secure; 814 815 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); 816 817 if (s->vectpending_is_s_banked) { 818 targets_secure = true; 819 } else { 820 targets_secure = !exc_is_banked(pending) && 821 exc_targets_secure(s, pending); 822 } 823 824 trace_nvic_get_pending_irq_info(pending, targets_secure); 825 826 *ptargets_secure = targets_secure; 827 *pirq = pending; 828 } 829 830 int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure) 831 { 832 NVICState *s = (NVICState *)opaque; 833 VecInfo *vec = NULL; 834 int ret = 0; 835 836 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 837 838 trace_nvic_complete_irq(irq, secure); 839 840 if (secure && exc_is_banked(irq)) { 841 vec = &s->sec_vectors[irq]; 842 } else { 843 vec = &s->vectors[irq]; 844 } 845 846 /* 847 * Identify illegal exception return cases. We can't immediately 848 * return at this point because we still need to deactivate 849 * (either this exception or NMI/HardFault) first. 850 */ 851 if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) { 852 /* 853 * Return from a configurable exception targeting the opposite 854 * security state from the one we're trying to complete it for. 855 * Clear vec because it's not really the VecInfo for this 856 * (irq, secstate) so we mustn't deactivate it. 857 */ 858 ret = -1; 859 vec = NULL; 860 } else if (!vec->active) { 861 /* Return from an inactive interrupt */ 862 ret = -1; 863 } else { 864 /* Legal return, we will return the RETTOBASE bit value to the caller */ 865 ret = nvic_rettobase(s); 866 } 867 868 /* 869 * For negative priorities, v8M will forcibly deactivate the appropriate 870 * NMI or HardFault regardless of what interrupt we're being asked to 871 * deactivate (compare the DeActivate() pseudocode). This is a guard 872 * against software returning from NMI or HardFault with a corrupted 873 * IPSR and leaving the CPU in a negative-priority state. 874 * v7M does not do this, but simply deactivates the requested interrupt. 875 */ 876 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { 877 switch (armv7m_nvic_raw_execution_priority(s)) { 878 case -1: 879 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 880 vec = &s->vectors[ARMV7M_EXCP_HARD]; 881 } else { 882 vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; 883 } 884 break; 885 case -2: 886 vec = &s->vectors[ARMV7M_EXCP_NMI]; 887 break; 888 case -3: 889 vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; 890 break; 891 default: 892 break; 893 } 894 } 895 896 if (!vec) { 897 return ret; 898 } 899 900 vec->active = 0; 901 if (vec->level) { 902 /* Re-pend the exception if it's still held high; only 903 * happens for extenal IRQs 904 */ 905 assert(irq >= NVIC_FIRST_IRQ); 906 vec->pending = 1; 907 } 908 909 nvic_irq_update(s); 910 911 return ret; 912 } 913 914 bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure) 915 { 916 /* 917 * Return whether an exception is "ready", i.e. it is enabled and is 918 * configured at a priority which would allow it to interrupt the 919 * current execution priority. 920 * 921 * irq and secure have the same semantics as for armv7m_nvic_set_pending(): 922 * for non-banked exceptions secure is always false; for banked exceptions 923 * it indicates which of the exceptions is required. 924 */ 925 NVICState *s = (NVICState *)opaque; 926 bool banked = exc_is_banked(irq); 927 VecInfo *vec; 928 int running = nvic_exec_prio(s); 929 930 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 931 assert(!secure || banked); 932 933 /* 934 * HardFault is an odd special case: we always check against -1, 935 * even if we're secure and HardFault has priority -3; we never 936 * need to check for enabled state. 937 */ 938 if (irq == ARMV7M_EXCP_HARD) { 939 return running > -1; 940 } 941 942 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; 943 944 return vec->enabled && 945 exc_group_prio(s, vec->prio, secure) < running; 946 } 947 948 /* callback when external interrupt line is changed */ 949 static void set_irq_level(void *opaque, int n, int level) 950 { 951 NVICState *s = opaque; 952 VecInfo *vec; 953 954 n += NVIC_FIRST_IRQ; 955 956 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq); 957 958 trace_nvic_set_irq_level(n, level); 959 960 /* The pending status of an external interrupt is 961 * latched on rising edge and exception handler return. 962 * 963 * Pulsing the IRQ will always run the handler 964 * once, and the handler will re-run until the 965 * level is low when the handler completes. 966 */ 967 vec = &s->vectors[n]; 968 if (level != vec->level) { 969 vec->level = level; 970 if (level) { 971 armv7m_nvic_set_pending(s, n, false); 972 } 973 } 974 } 975 976 /* callback when external NMI line is changed */ 977 static void nvic_nmi_trigger(void *opaque, int n, int level) 978 { 979 NVICState *s = opaque; 980 981 trace_nvic_set_nmi_level(level); 982 983 /* 984 * The architecture doesn't specify whether NMI should share 985 * the normal-interrupt behaviour of being resampled on 986 * exception handler return. We choose not to, so just 987 * set NMI pending here and don't track the current level. 988 */ 989 if (level) { 990 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); 991 } 992 } 993 994 static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) 995 { 996 ARMCPU *cpu = s->cpu; 997 uint32_t val; 998 999 switch (offset) { 1000 case 4: /* Interrupt Control Type. */ 1001 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1002 goto bad_offset; 1003 } 1004 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1; 1005 case 0xc: /* CPPWR */ 1006 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1007 goto bad_offset; 1008 } 1009 /* We make the IMPDEF choice that nothing can ever go into a 1010 * non-retentive power state, which allows us to RAZ/WI this. 1011 */ 1012 return 0; 1013 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */ 1014 { 1015 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; 1016 int i; 1017 1018 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1019 goto bad_offset; 1020 } 1021 if (!attrs.secure) { 1022 return 0; 1023 } 1024 val = 0; 1025 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) { 1026 if (s->itns[startvec + i]) { 1027 val |= (1 << i); 1028 } 1029 } 1030 return val; 1031 } 1032 case 0xcfc: 1033 if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) { 1034 goto bad_offset; 1035 } 1036 return cpu->revidr; 1037 case 0xd00: /* CPUID Base. */ 1038 return cpu->midr; 1039 case 0xd04: /* Interrupt Control State (ICSR) */ 1040 /* VECTACTIVE */ 1041 val = cpu->env.v7m.exception; 1042 /* VECTPENDING */ 1043 val |= (s->vectpending & 0xff) << 12; 1044 /* ISRPENDING - set if any external IRQ is pending */ 1045 if (nvic_isrpending(s)) { 1046 val |= (1 << 22); 1047 } 1048 /* RETTOBASE - set if only one handler is active */ 1049 if (nvic_rettobase(s)) { 1050 val |= (1 << 11); 1051 } 1052 if (attrs.secure) { 1053 /* PENDSTSET */ 1054 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) { 1055 val |= (1 << 26); 1056 } 1057 /* PENDSVSET */ 1058 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) { 1059 val |= (1 << 28); 1060 } 1061 } else { 1062 /* PENDSTSET */ 1063 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) { 1064 val |= (1 << 26); 1065 } 1066 /* PENDSVSET */ 1067 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) { 1068 val |= (1 << 28); 1069 } 1070 } 1071 /* NMIPENDSET */ 1072 if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) 1073 && s->vectors[ARMV7M_EXCP_NMI].pending) { 1074 val |= (1 << 31); 1075 } 1076 /* ISRPREEMPT: RES0 when halting debug not implemented */ 1077 /* STTNS: RES0 for the Main Extension */ 1078 return val; 1079 case 0xd08: /* Vector Table Offset. */ 1080 return cpu->env.v7m.vecbase[attrs.secure]; 1081 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */ 1082 val = 0xfa050000 | (s->prigroup[attrs.secure] << 8); 1083 if (attrs.secure) { 1084 /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */ 1085 val |= cpu->env.v7m.aircr; 1086 } else { 1087 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1088 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If 1089 * security isn't supported then BFHFNMINS is RAO (and 1090 * the bit in env.v7m.aircr is always set). 1091 */ 1092 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK; 1093 } 1094 } 1095 return val; 1096 case 0xd10: /* System Control. */ 1097 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1098 goto bad_offset; 1099 } 1100 return cpu->env.v7m.scr[attrs.secure]; 1101 case 0xd14: /* Configuration Control. */ 1102 /* 1103 * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register) 1104 * and TRD (stored in the S copy of the register) 1105 */ 1106 val = cpu->env.v7m.ccr[attrs.secure]; 1107 val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK; 1108 /* BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0 */ 1109 if (!attrs.secure) { 1110 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1111 val &= ~R_V7M_CCR_BFHFNMIGN_MASK; 1112 } 1113 } 1114 return val; 1115 case 0xd24: /* System Handler Control and State (SHCSR) */ 1116 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1117 goto bad_offset; 1118 } 1119 val = 0; 1120 if (attrs.secure) { 1121 if (s->sec_vectors[ARMV7M_EXCP_MEM].active) { 1122 val |= (1 << 0); 1123 } 1124 if (s->sec_vectors[ARMV7M_EXCP_HARD].active) { 1125 val |= (1 << 2); 1126 } 1127 if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) { 1128 val |= (1 << 3); 1129 } 1130 if (s->sec_vectors[ARMV7M_EXCP_SVC].active) { 1131 val |= (1 << 7); 1132 } 1133 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) { 1134 val |= (1 << 10); 1135 } 1136 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) { 1137 val |= (1 << 11); 1138 } 1139 if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) { 1140 val |= (1 << 12); 1141 } 1142 if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) { 1143 val |= (1 << 13); 1144 } 1145 if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) { 1146 val |= (1 << 15); 1147 } 1148 if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) { 1149 val |= (1 << 16); 1150 } 1151 if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) { 1152 val |= (1 << 18); 1153 } 1154 if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) { 1155 val |= (1 << 21); 1156 } 1157 /* SecureFault is not banked but is always RAZ/WI to NS */ 1158 if (s->vectors[ARMV7M_EXCP_SECURE].active) { 1159 val |= (1 << 4); 1160 } 1161 if (s->vectors[ARMV7M_EXCP_SECURE].enabled) { 1162 val |= (1 << 19); 1163 } 1164 if (s->vectors[ARMV7M_EXCP_SECURE].pending) { 1165 val |= (1 << 20); 1166 } 1167 } else { 1168 if (s->vectors[ARMV7M_EXCP_MEM].active) { 1169 val |= (1 << 0); 1170 } 1171 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1172 /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */ 1173 if (s->vectors[ARMV7M_EXCP_HARD].active) { 1174 val |= (1 << 2); 1175 } 1176 if (s->vectors[ARMV7M_EXCP_HARD].pending) { 1177 val |= (1 << 21); 1178 } 1179 } 1180 if (s->vectors[ARMV7M_EXCP_USAGE].active) { 1181 val |= (1 << 3); 1182 } 1183 if (s->vectors[ARMV7M_EXCP_SVC].active) { 1184 val |= (1 << 7); 1185 } 1186 if (s->vectors[ARMV7M_EXCP_PENDSV].active) { 1187 val |= (1 << 10); 1188 } 1189 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) { 1190 val |= (1 << 11); 1191 } 1192 if (s->vectors[ARMV7M_EXCP_USAGE].pending) { 1193 val |= (1 << 12); 1194 } 1195 if (s->vectors[ARMV7M_EXCP_MEM].pending) { 1196 val |= (1 << 13); 1197 } 1198 if (s->vectors[ARMV7M_EXCP_SVC].pending) { 1199 val |= (1 << 15); 1200 } 1201 if (s->vectors[ARMV7M_EXCP_MEM].enabled) { 1202 val |= (1 << 16); 1203 } 1204 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) { 1205 val |= (1 << 18); 1206 } 1207 } 1208 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1209 if (s->vectors[ARMV7M_EXCP_BUS].active) { 1210 val |= (1 << 1); 1211 } 1212 if (s->vectors[ARMV7M_EXCP_BUS].pending) { 1213 val |= (1 << 14); 1214 } 1215 if (s->vectors[ARMV7M_EXCP_BUS].enabled) { 1216 val |= (1 << 17); 1217 } 1218 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && 1219 s->vectors[ARMV7M_EXCP_NMI].active) { 1220 /* NMIACT is not present in v7M */ 1221 val |= (1 << 5); 1222 } 1223 } 1224 1225 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */ 1226 if (s->vectors[ARMV7M_EXCP_DEBUG].active) { 1227 val |= (1 << 8); 1228 } 1229 return val; 1230 case 0xd2c: /* Hard Fault Status. */ 1231 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1232 goto bad_offset; 1233 } 1234 return cpu->env.v7m.hfsr; 1235 case 0xd30: /* Debug Fault Status. */ 1236 return cpu->env.v7m.dfsr; 1237 case 0xd34: /* MMFAR MemManage Fault Address */ 1238 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1239 goto bad_offset; 1240 } 1241 return cpu->env.v7m.mmfar[attrs.secure]; 1242 case 0xd38: /* Bus Fault Address. */ 1243 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1244 goto bad_offset; 1245 } 1246 if (!attrs.secure && 1247 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1248 return 0; 1249 } 1250 return cpu->env.v7m.bfar; 1251 case 0xd3c: /* Aux Fault Status. */ 1252 /* TODO: Implement fault status registers. */ 1253 qemu_log_mask(LOG_UNIMP, 1254 "Aux Fault status registers unimplemented\n"); 1255 return 0; 1256 case 0xd40: /* PFR0. */ 1257 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1258 goto bad_offset; 1259 } 1260 return cpu->isar.id_pfr0; 1261 case 0xd44: /* PFR1. */ 1262 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1263 goto bad_offset; 1264 } 1265 return cpu->isar.id_pfr1; 1266 case 0xd48: /* DFR0. */ 1267 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1268 goto bad_offset; 1269 } 1270 return cpu->isar.id_dfr0; 1271 case 0xd4c: /* AFR0. */ 1272 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1273 goto bad_offset; 1274 } 1275 return cpu->id_afr0; 1276 case 0xd50: /* MMFR0. */ 1277 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1278 goto bad_offset; 1279 } 1280 return cpu->isar.id_mmfr0; 1281 case 0xd54: /* MMFR1. */ 1282 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1283 goto bad_offset; 1284 } 1285 return cpu->isar.id_mmfr1; 1286 case 0xd58: /* MMFR2. */ 1287 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1288 goto bad_offset; 1289 } 1290 return cpu->isar.id_mmfr2; 1291 case 0xd5c: /* MMFR3. */ 1292 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1293 goto bad_offset; 1294 } 1295 return cpu->isar.id_mmfr3; 1296 case 0xd60: /* ISAR0. */ 1297 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1298 goto bad_offset; 1299 } 1300 return cpu->isar.id_isar0; 1301 case 0xd64: /* ISAR1. */ 1302 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1303 goto bad_offset; 1304 } 1305 return cpu->isar.id_isar1; 1306 case 0xd68: /* ISAR2. */ 1307 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1308 goto bad_offset; 1309 } 1310 return cpu->isar.id_isar2; 1311 case 0xd6c: /* ISAR3. */ 1312 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1313 goto bad_offset; 1314 } 1315 return cpu->isar.id_isar3; 1316 case 0xd70: /* ISAR4. */ 1317 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1318 goto bad_offset; 1319 } 1320 return cpu->isar.id_isar4; 1321 case 0xd74: /* ISAR5. */ 1322 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1323 goto bad_offset; 1324 } 1325 return cpu->isar.id_isar5; 1326 case 0xd78: /* CLIDR */ 1327 return cpu->clidr; 1328 case 0xd7c: /* CTR */ 1329 return cpu->ctr; 1330 case 0xd80: /* CSSIDR */ 1331 { 1332 int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK; 1333 return cpu->ccsidr[idx]; 1334 } 1335 case 0xd84: /* CSSELR */ 1336 return cpu->env.v7m.csselr[attrs.secure]; 1337 case 0xd88: /* CPACR */ 1338 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { 1339 return 0; 1340 } 1341 return cpu->env.v7m.cpacr[attrs.secure]; 1342 case 0xd8c: /* NSACR */ 1343 if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) { 1344 return 0; 1345 } 1346 return cpu->env.v7m.nsacr; 1347 /* TODO: Implement debug registers. */ 1348 case 0xd90: /* MPU_TYPE */ 1349 /* Unified MPU; if the MPU is not present this value is zero */ 1350 return cpu->pmsav7_dregion << 8; 1351 case 0xd94: /* MPU_CTRL */ 1352 return cpu->env.v7m.mpu_ctrl[attrs.secure]; 1353 case 0xd98: /* MPU_RNR */ 1354 return cpu->env.pmsav7.rnr[attrs.secure]; 1355 case 0xd9c: /* MPU_RBAR */ 1356 case 0xda4: /* MPU_RBAR_A1 */ 1357 case 0xdac: /* MPU_RBAR_A2 */ 1358 case 0xdb4: /* MPU_RBAR_A3 */ 1359 { 1360 int region = cpu->env.pmsav7.rnr[attrs.secure]; 1361 1362 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1363 /* PMSAv8M handling of the aliases is different from v7M: 1364 * aliases A1, A2, A3 override the low two bits of the region 1365 * number in MPU_RNR, and there is no 'region' field in the 1366 * RBAR register. 1367 */ 1368 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ 1369 if (aliasno) { 1370 region = deposit32(region, 0, 2, aliasno); 1371 } 1372 if (region >= cpu->pmsav7_dregion) { 1373 return 0; 1374 } 1375 return cpu->env.pmsav8.rbar[attrs.secure][region]; 1376 } 1377 1378 if (region >= cpu->pmsav7_dregion) { 1379 return 0; 1380 } 1381 return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf); 1382 } 1383 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */ 1384 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */ 1385 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */ 1386 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */ 1387 { 1388 int region = cpu->env.pmsav7.rnr[attrs.secure]; 1389 1390 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1391 /* PMSAv8M handling of the aliases is different from v7M: 1392 * aliases A1, A2, A3 override the low two bits of the region 1393 * number in MPU_RNR. 1394 */ 1395 int aliasno = (offset - 0xda0) / 8; /* 0..3 */ 1396 if (aliasno) { 1397 region = deposit32(region, 0, 2, aliasno); 1398 } 1399 if (region >= cpu->pmsav7_dregion) { 1400 return 0; 1401 } 1402 return cpu->env.pmsav8.rlar[attrs.secure][region]; 1403 } 1404 1405 if (region >= cpu->pmsav7_dregion) { 1406 return 0; 1407 } 1408 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) | 1409 (cpu->env.pmsav7.drsr[region] & 0xffff); 1410 } 1411 case 0xdc0: /* MPU_MAIR0 */ 1412 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1413 goto bad_offset; 1414 } 1415 return cpu->env.pmsav8.mair0[attrs.secure]; 1416 case 0xdc4: /* MPU_MAIR1 */ 1417 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1418 goto bad_offset; 1419 } 1420 return cpu->env.pmsav8.mair1[attrs.secure]; 1421 case 0xdd0: /* SAU_CTRL */ 1422 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1423 goto bad_offset; 1424 } 1425 if (!attrs.secure) { 1426 return 0; 1427 } 1428 return cpu->env.sau.ctrl; 1429 case 0xdd4: /* SAU_TYPE */ 1430 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1431 goto bad_offset; 1432 } 1433 if (!attrs.secure) { 1434 return 0; 1435 } 1436 return cpu->sau_sregion; 1437 case 0xdd8: /* SAU_RNR */ 1438 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1439 goto bad_offset; 1440 } 1441 if (!attrs.secure) { 1442 return 0; 1443 } 1444 return cpu->env.sau.rnr; 1445 case 0xddc: /* SAU_RBAR */ 1446 { 1447 int region = cpu->env.sau.rnr; 1448 1449 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1450 goto bad_offset; 1451 } 1452 if (!attrs.secure) { 1453 return 0; 1454 } 1455 if (region >= cpu->sau_sregion) { 1456 return 0; 1457 } 1458 return cpu->env.sau.rbar[region]; 1459 } 1460 case 0xde0: /* SAU_RLAR */ 1461 { 1462 int region = cpu->env.sau.rnr; 1463 1464 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1465 goto bad_offset; 1466 } 1467 if (!attrs.secure) { 1468 return 0; 1469 } 1470 if (region >= cpu->sau_sregion) { 1471 return 0; 1472 } 1473 return cpu->env.sau.rlar[region]; 1474 } 1475 case 0xde4: /* SFSR */ 1476 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1477 goto bad_offset; 1478 } 1479 if (!attrs.secure) { 1480 return 0; 1481 } 1482 return cpu->env.v7m.sfsr; 1483 case 0xde8: /* SFAR */ 1484 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1485 goto bad_offset; 1486 } 1487 if (!attrs.secure) { 1488 return 0; 1489 } 1490 return cpu->env.v7m.sfar; 1491 case 0xf04: /* RFSR */ 1492 if (!cpu_isar_feature(aa32_ras, cpu)) { 1493 goto bad_offset; 1494 } 1495 /* We provide minimal-RAS only: RFSR is RAZ/WI */ 1496 return 0; 1497 case 0xf34: /* FPCCR */ 1498 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { 1499 return 0; 1500 } 1501 if (attrs.secure) { 1502 return cpu->env.v7m.fpccr[M_REG_S]; 1503 } else { 1504 /* 1505 * NS can read LSPEN, CLRONRET and MONRDY. It can read 1506 * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0; 1507 * other non-banked bits RAZ. 1508 * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set. 1509 */ 1510 uint32_t value = cpu->env.v7m.fpccr[M_REG_S]; 1511 uint32_t mask = R_V7M_FPCCR_LSPEN_MASK | 1512 R_V7M_FPCCR_CLRONRET_MASK | 1513 R_V7M_FPCCR_MONRDY_MASK; 1514 1515 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 1516 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK; 1517 } 1518 1519 value &= mask; 1520 1521 value |= cpu->env.v7m.fpccr[M_REG_NS]; 1522 return value; 1523 } 1524 case 0xf38: /* FPCAR */ 1525 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { 1526 return 0; 1527 } 1528 return cpu->env.v7m.fpcar[attrs.secure]; 1529 case 0xf3c: /* FPDSCR */ 1530 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { 1531 return 0; 1532 } 1533 return cpu->env.v7m.fpdscr[attrs.secure]; 1534 case 0xf40: /* MVFR0 */ 1535 return cpu->isar.mvfr0; 1536 case 0xf44: /* MVFR1 */ 1537 return cpu->isar.mvfr1; 1538 case 0xf48: /* MVFR2 */ 1539 return cpu->isar.mvfr2; 1540 default: 1541 bad_offset: 1542 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); 1543 return 0; 1544 } 1545 } 1546 1547 static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, 1548 MemTxAttrs attrs) 1549 { 1550 ARMCPU *cpu = s->cpu; 1551 1552 switch (offset) { 1553 case 0xc: /* CPPWR */ 1554 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1555 goto bad_offset; 1556 } 1557 /* Make the IMPDEF choice to RAZ/WI this. */ 1558 break; 1559 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */ 1560 { 1561 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; 1562 int i; 1563 1564 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1565 goto bad_offset; 1566 } 1567 if (!attrs.secure) { 1568 break; 1569 } 1570 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) { 1571 s->itns[startvec + i] = (value >> i) & 1; 1572 } 1573 nvic_irq_update(s); 1574 break; 1575 } 1576 case 0xd04: /* Interrupt Control State (ICSR) */ 1577 if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 1578 if (value & (1 << 31)) { 1579 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); 1580 } else if (value & (1 << 30) && 1581 arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1582 /* PENDNMICLR didn't exist in v7M */ 1583 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false); 1584 } 1585 } 1586 if (value & (1 << 28)) { 1587 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure); 1588 } else if (value & (1 << 27)) { 1589 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure); 1590 } 1591 if (value & (1 << 26)) { 1592 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure); 1593 } else if (value & (1 << 25)) { 1594 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure); 1595 } 1596 break; 1597 case 0xd08: /* Vector Table Offset. */ 1598 cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80; 1599 break; 1600 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */ 1601 if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) { 1602 if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) { 1603 if (attrs.secure || 1604 !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) { 1605 signal_sysresetreq(s); 1606 } 1607 } 1608 if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) { 1609 qemu_log_mask(LOG_GUEST_ERROR, 1610 "Setting VECTCLRACTIVE when not in DEBUG mode " 1611 "is UNPREDICTABLE\n"); 1612 } 1613 if (value & R_V7M_AIRCR_VECTRESET_MASK) { 1614 /* NB: this bit is RES0 in v8M */ 1615 qemu_log_mask(LOG_GUEST_ERROR, 1616 "Setting VECTRESET when not in DEBUG mode " 1617 "is UNPREDICTABLE\n"); 1618 } 1619 if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1620 s->prigroup[attrs.secure] = 1621 extract32(value, 1622 R_V7M_AIRCR_PRIGROUP_SHIFT, 1623 R_V7M_AIRCR_PRIGROUP_LENGTH); 1624 } 1625 /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */ 1626 if (attrs.secure) { 1627 /* These bits are only writable by secure */ 1628 cpu->env.v7m.aircr = value & 1629 (R_V7M_AIRCR_SYSRESETREQS_MASK | 1630 R_V7M_AIRCR_BFHFNMINS_MASK | 1631 R_V7M_AIRCR_PRIS_MASK); 1632 /* BFHFNMINS changes the priority of Secure HardFault, and 1633 * allows a pending Non-secure HardFault to preempt (which 1634 * we implement by marking it enabled). 1635 */ 1636 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 1637 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3; 1638 s->vectors[ARMV7M_EXCP_HARD].enabled = 1; 1639 } else { 1640 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1; 1641 s->vectors[ARMV7M_EXCP_HARD].enabled = 0; 1642 } 1643 } 1644 nvic_irq_update(s); 1645 } 1646 break; 1647 case 0xd10: /* System Control. */ 1648 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1649 goto bad_offset; 1650 } 1651 /* We don't implement deep-sleep so these bits are RAZ/WI. 1652 * The other bits in the register are banked. 1653 * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which 1654 * is architecturally permitted. 1655 */ 1656 value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK); 1657 cpu->env.v7m.scr[attrs.secure] = value; 1658 break; 1659 case 0xd14: /* Configuration Control. */ 1660 { 1661 uint32_t mask; 1662 1663 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1664 goto bad_offset; 1665 } 1666 1667 /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */ 1668 mask = R_V7M_CCR_STKALIGN_MASK | 1669 R_V7M_CCR_BFHFNMIGN_MASK | 1670 R_V7M_CCR_DIV_0_TRP_MASK | 1671 R_V7M_CCR_UNALIGN_TRP_MASK | 1672 R_V7M_CCR_USERSETMPEND_MASK | 1673 R_V7M_CCR_NONBASETHRDENA_MASK; 1674 if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) { 1675 /* TRD is always RAZ/WI from NS */ 1676 mask |= R_V7M_CCR_TRD_MASK; 1677 } 1678 value &= mask; 1679 1680 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1681 /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */ 1682 value |= R_V7M_CCR_NONBASETHRDENA_MASK 1683 | R_V7M_CCR_STKALIGN_MASK; 1684 } 1685 if (attrs.secure) { 1686 /* the BFHFNMIGN bit is not banked; keep that in the NS copy */ 1687 cpu->env.v7m.ccr[M_REG_NS] = 1688 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK) 1689 | (value & R_V7M_CCR_BFHFNMIGN_MASK); 1690 value &= ~R_V7M_CCR_BFHFNMIGN_MASK; 1691 } else { 1692 /* 1693 * BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0, so 1694 * preserve the state currently in the NS element of the array 1695 */ 1696 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1697 value &= ~R_V7M_CCR_BFHFNMIGN_MASK; 1698 value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK; 1699 } 1700 } 1701 1702 cpu->env.v7m.ccr[attrs.secure] = value; 1703 break; 1704 } 1705 case 0xd24: /* System Handler Control and State (SHCSR) */ 1706 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1707 goto bad_offset; 1708 } 1709 if (attrs.secure) { 1710 s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; 1711 /* Secure HardFault active bit cannot be written */ 1712 s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0; 1713 s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0; 1714 s->sec_vectors[ARMV7M_EXCP_PENDSV].active = 1715 (value & (1 << 10)) != 0; 1716 s->sec_vectors[ARMV7M_EXCP_SYSTICK].active = 1717 (value & (1 << 11)) != 0; 1718 s->sec_vectors[ARMV7M_EXCP_USAGE].pending = 1719 (value & (1 << 12)) != 0; 1720 s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0; 1721 s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0; 1722 s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; 1723 s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; 1724 s->sec_vectors[ARMV7M_EXCP_USAGE].enabled = 1725 (value & (1 << 18)) != 0; 1726 s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; 1727 /* SecureFault not banked, but RAZ/WI to NS */ 1728 s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0; 1729 s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0; 1730 s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0; 1731 } else { 1732 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; 1733 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1734 /* HARDFAULTPENDED is not present in v7M */ 1735 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; 1736 } 1737 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0; 1738 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0; 1739 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0; 1740 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0; 1741 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0; 1742 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0; 1743 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0; 1744 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; 1745 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0; 1746 } 1747 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1748 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0; 1749 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0; 1750 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; 1751 } 1752 /* NMIACT can only be written if the write is of a zero, with 1753 * BFHFNMINS 1, and by the CPU in secure state via the NS alias. 1754 */ 1755 if (!attrs.secure && cpu->env.v7m.secure && 1756 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) && 1757 (value & (1 << 5)) == 0) { 1758 s->vectors[ARMV7M_EXCP_NMI].active = 0; 1759 } 1760 /* HARDFAULTACT can only be written if the write is of a zero 1761 * to the non-secure HardFault state by the CPU in secure state. 1762 * The only case where we can be targeting the non-secure HF state 1763 * when in secure state is if this is a write via the NS alias 1764 * and BFHFNMINS is 1. 1765 */ 1766 if (!attrs.secure && cpu->env.v7m.secure && 1767 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) && 1768 (value & (1 << 2)) == 0) { 1769 s->vectors[ARMV7M_EXCP_HARD].active = 0; 1770 } 1771 1772 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */ 1773 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0; 1774 nvic_irq_update(s); 1775 break; 1776 case 0xd2c: /* Hard Fault Status. */ 1777 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1778 goto bad_offset; 1779 } 1780 cpu->env.v7m.hfsr &= ~value; /* W1C */ 1781 break; 1782 case 0xd30: /* Debug Fault Status. */ 1783 cpu->env.v7m.dfsr &= ~value; /* W1C */ 1784 break; 1785 case 0xd34: /* Mem Manage Address. */ 1786 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1787 goto bad_offset; 1788 } 1789 cpu->env.v7m.mmfar[attrs.secure] = value; 1790 return; 1791 case 0xd38: /* Bus Fault Address. */ 1792 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1793 goto bad_offset; 1794 } 1795 if (!attrs.secure && 1796 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1797 return; 1798 } 1799 cpu->env.v7m.bfar = value; 1800 return; 1801 case 0xd3c: /* Aux Fault Status. */ 1802 qemu_log_mask(LOG_UNIMP, 1803 "NVIC: Aux fault status registers unimplemented\n"); 1804 break; 1805 case 0xd84: /* CSSELR */ 1806 if (!arm_v7m_csselr_razwi(cpu)) { 1807 cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK; 1808 } 1809 break; 1810 case 0xd88: /* CPACR */ 1811 if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 1812 /* We implement only the Floating Point extension's CP10/CP11 */ 1813 cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20); 1814 } 1815 break; 1816 case 0xd8c: /* NSACR */ 1817 if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) { 1818 /* We implement only the Floating Point extension's CP10/CP11 */ 1819 cpu->env.v7m.nsacr = value & (3 << 10); 1820 } 1821 break; 1822 case 0xd90: /* MPU_TYPE */ 1823 return; /* RO */ 1824 case 0xd94: /* MPU_CTRL */ 1825 if ((value & 1826 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK)) 1827 == R_V7M_MPU_CTRL_HFNMIENA_MASK) { 1828 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is " 1829 "UNPREDICTABLE\n"); 1830 } 1831 cpu->env.v7m.mpu_ctrl[attrs.secure] 1832 = value & (R_V7M_MPU_CTRL_ENABLE_MASK | 1833 R_V7M_MPU_CTRL_HFNMIENA_MASK | 1834 R_V7M_MPU_CTRL_PRIVDEFENA_MASK); 1835 tlb_flush(CPU(cpu)); 1836 break; 1837 case 0xd98: /* MPU_RNR */ 1838 if (value >= cpu->pmsav7_dregion) { 1839 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %" 1840 PRIu32 "/%" PRIu32 "\n", 1841 value, cpu->pmsav7_dregion); 1842 } else { 1843 cpu->env.pmsav7.rnr[attrs.secure] = value; 1844 } 1845 break; 1846 case 0xd9c: /* MPU_RBAR */ 1847 case 0xda4: /* MPU_RBAR_A1 */ 1848 case 0xdac: /* MPU_RBAR_A2 */ 1849 case 0xdb4: /* MPU_RBAR_A3 */ 1850 { 1851 int region; 1852 1853 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1854 /* PMSAv8M handling of the aliases is different from v7M: 1855 * aliases A1, A2, A3 override the low two bits of the region 1856 * number in MPU_RNR, and there is no 'region' field in the 1857 * RBAR register. 1858 */ 1859 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ 1860 1861 region = cpu->env.pmsav7.rnr[attrs.secure]; 1862 if (aliasno) { 1863 region = deposit32(region, 0, 2, aliasno); 1864 } 1865 if (region >= cpu->pmsav7_dregion) { 1866 return; 1867 } 1868 cpu->env.pmsav8.rbar[attrs.secure][region] = value; 1869 tlb_flush(CPU(cpu)); 1870 return; 1871 } 1872 1873 if (value & (1 << 4)) { 1874 /* VALID bit means use the region number specified in this 1875 * value and also update MPU_RNR.REGION with that value. 1876 */ 1877 region = extract32(value, 0, 4); 1878 if (region >= cpu->pmsav7_dregion) { 1879 qemu_log_mask(LOG_GUEST_ERROR, 1880 "MPU region out of range %u/%" PRIu32 "\n", 1881 region, cpu->pmsav7_dregion); 1882 return; 1883 } 1884 cpu->env.pmsav7.rnr[attrs.secure] = region; 1885 } else { 1886 region = cpu->env.pmsav7.rnr[attrs.secure]; 1887 } 1888 1889 if (region >= cpu->pmsav7_dregion) { 1890 return; 1891 } 1892 1893 cpu->env.pmsav7.drbar[region] = value & ~0x1f; 1894 tlb_flush(CPU(cpu)); 1895 break; 1896 } 1897 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */ 1898 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */ 1899 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */ 1900 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */ 1901 { 1902 int region = cpu->env.pmsav7.rnr[attrs.secure]; 1903 1904 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1905 /* PMSAv8M handling of the aliases is different from v7M: 1906 * aliases A1, A2, A3 override the low two bits of the region 1907 * number in MPU_RNR. 1908 */ 1909 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ 1910 1911 region = cpu->env.pmsav7.rnr[attrs.secure]; 1912 if (aliasno) { 1913 region = deposit32(region, 0, 2, aliasno); 1914 } 1915 if (region >= cpu->pmsav7_dregion) { 1916 return; 1917 } 1918 cpu->env.pmsav8.rlar[attrs.secure][region] = value; 1919 tlb_flush(CPU(cpu)); 1920 return; 1921 } 1922 1923 if (region >= cpu->pmsav7_dregion) { 1924 return; 1925 } 1926 1927 cpu->env.pmsav7.drsr[region] = value & 0xff3f; 1928 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f; 1929 tlb_flush(CPU(cpu)); 1930 break; 1931 } 1932 case 0xdc0: /* MPU_MAIR0 */ 1933 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1934 goto bad_offset; 1935 } 1936 if (cpu->pmsav7_dregion) { 1937 /* Register is RES0 if no MPU regions are implemented */ 1938 cpu->env.pmsav8.mair0[attrs.secure] = value; 1939 } 1940 /* We don't need to do anything else because memory attributes 1941 * only affect cacheability, and we don't implement caching. 1942 */ 1943 break; 1944 case 0xdc4: /* MPU_MAIR1 */ 1945 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1946 goto bad_offset; 1947 } 1948 if (cpu->pmsav7_dregion) { 1949 /* Register is RES0 if no MPU regions are implemented */ 1950 cpu->env.pmsav8.mair1[attrs.secure] = value; 1951 } 1952 /* We don't need to do anything else because memory attributes 1953 * only affect cacheability, and we don't implement caching. 1954 */ 1955 break; 1956 case 0xdd0: /* SAU_CTRL */ 1957 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1958 goto bad_offset; 1959 } 1960 if (!attrs.secure) { 1961 return; 1962 } 1963 cpu->env.sau.ctrl = value & 3; 1964 break; 1965 case 0xdd4: /* SAU_TYPE */ 1966 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1967 goto bad_offset; 1968 } 1969 break; 1970 case 0xdd8: /* SAU_RNR */ 1971 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1972 goto bad_offset; 1973 } 1974 if (!attrs.secure) { 1975 return; 1976 } 1977 if (value >= cpu->sau_sregion) { 1978 qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %" 1979 PRIu32 "/%" PRIu32 "\n", 1980 value, cpu->sau_sregion); 1981 } else { 1982 cpu->env.sau.rnr = value; 1983 } 1984 break; 1985 case 0xddc: /* SAU_RBAR */ 1986 { 1987 int region = cpu->env.sau.rnr; 1988 1989 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1990 goto bad_offset; 1991 } 1992 if (!attrs.secure) { 1993 return; 1994 } 1995 if (region >= cpu->sau_sregion) { 1996 return; 1997 } 1998 cpu->env.sau.rbar[region] = value & ~0x1f; 1999 tlb_flush(CPU(cpu)); 2000 break; 2001 } 2002 case 0xde0: /* SAU_RLAR */ 2003 { 2004 int region = cpu->env.sau.rnr; 2005 2006 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 2007 goto bad_offset; 2008 } 2009 if (!attrs.secure) { 2010 return; 2011 } 2012 if (region >= cpu->sau_sregion) { 2013 return; 2014 } 2015 cpu->env.sau.rlar[region] = value & ~0x1c; 2016 tlb_flush(CPU(cpu)); 2017 break; 2018 } 2019 case 0xde4: /* SFSR */ 2020 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 2021 goto bad_offset; 2022 } 2023 if (!attrs.secure) { 2024 return; 2025 } 2026 cpu->env.v7m.sfsr &= ~value; /* W1C */ 2027 break; 2028 case 0xde8: /* SFAR */ 2029 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 2030 goto bad_offset; 2031 } 2032 if (!attrs.secure) { 2033 return; 2034 } 2035 cpu->env.v7m.sfsr = value; 2036 break; 2037 case 0xf00: /* Software Triggered Interrupt Register */ 2038 { 2039 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ; 2040 2041 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 2042 goto bad_offset; 2043 } 2044 2045 if (excnum < s->num_irq) { 2046 armv7m_nvic_set_pending(s, excnum, false); 2047 } 2048 break; 2049 } 2050 case 0xf04: /* RFSR */ 2051 if (!cpu_isar_feature(aa32_ras, cpu)) { 2052 goto bad_offset; 2053 } 2054 /* We provide minimal-RAS only: RFSR is RAZ/WI */ 2055 break; 2056 case 0xf34: /* FPCCR */ 2057 if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 2058 /* Not all bits here are banked. */ 2059 uint32_t fpccr_s; 2060 2061 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 2062 /* Don't allow setting of bits not present in v7M */ 2063 value &= (R_V7M_FPCCR_LSPACT_MASK | 2064 R_V7M_FPCCR_USER_MASK | 2065 R_V7M_FPCCR_THREAD_MASK | 2066 R_V7M_FPCCR_HFRDY_MASK | 2067 R_V7M_FPCCR_MMRDY_MASK | 2068 R_V7M_FPCCR_BFRDY_MASK | 2069 R_V7M_FPCCR_MONRDY_MASK | 2070 R_V7M_FPCCR_LSPEN_MASK | 2071 R_V7M_FPCCR_ASPEN_MASK); 2072 } 2073 value &= ~R_V7M_FPCCR_RES0_MASK; 2074 2075 if (!attrs.secure) { 2076 /* Some non-banked bits are configurably writable by NS */ 2077 fpccr_s = cpu->env.v7m.fpccr[M_REG_S]; 2078 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) { 2079 uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN); 2080 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen); 2081 } 2082 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) { 2083 uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET); 2084 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor); 2085 } 2086 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 2087 uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY); 2088 uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY); 2089 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy); 2090 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy); 2091 } 2092 /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */ 2093 { 2094 uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY); 2095 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy); 2096 } 2097 2098 /* 2099 * All other non-banked bits are RAZ/WI from NS; write 2100 * just the banked bits to fpccr[M_REG_NS]. 2101 */ 2102 value &= R_V7M_FPCCR_BANKED_MASK; 2103 cpu->env.v7m.fpccr[M_REG_NS] = value; 2104 } else { 2105 fpccr_s = value; 2106 } 2107 cpu->env.v7m.fpccr[M_REG_S] = fpccr_s; 2108 } 2109 break; 2110 case 0xf38: /* FPCAR */ 2111 if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 2112 value &= ~7; 2113 cpu->env.v7m.fpcar[attrs.secure] = value; 2114 } 2115 break; 2116 case 0xf3c: /* FPDSCR */ 2117 if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 2118 uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK; 2119 if (cpu_isar_feature(any_fp16, cpu)) { 2120 mask |= FPCR_FZ16; 2121 } 2122 value &= mask; 2123 if (cpu_isar_feature(aa32_lob, cpu)) { 2124 value |= 4 << FPCR_LTPSIZE_SHIFT; 2125 } 2126 cpu->env.v7m.fpdscr[attrs.secure] = value; 2127 } 2128 break; 2129 case 0xf50: /* ICIALLU */ 2130 case 0xf58: /* ICIMVAU */ 2131 case 0xf5c: /* DCIMVAC */ 2132 case 0xf60: /* DCISW */ 2133 case 0xf64: /* DCCMVAU */ 2134 case 0xf68: /* DCCMVAC */ 2135 case 0xf6c: /* DCCSW */ 2136 case 0xf70: /* DCCIMVAC */ 2137 case 0xf74: /* DCCISW */ 2138 case 0xf78: /* BPIALL */ 2139 /* Cache and branch predictor maintenance: for QEMU these always NOP */ 2140 break; 2141 default: 2142 bad_offset: 2143 qemu_log_mask(LOG_GUEST_ERROR, 2144 "NVIC: Bad write offset 0x%x\n", offset); 2145 } 2146 } 2147 2148 static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs) 2149 { 2150 /* Return true if unprivileged access to this register is permitted. */ 2151 switch (offset) { 2152 case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */ 2153 /* For access via STIR_NS it is the NS CCR.USERSETMPEND that 2154 * controls access even though the CPU is in Secure state (I_QDKX). 2155 */ 2156 return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK; 2157 default: 2158 /* All other user accesses cause a BusFault unconditionally */ 2159 return false; 2160 } 2161 } 2162 2163 static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs) 2164 { 2165 /* Behaviour for the SHPR register field for this exception: 2166 * return M_REG_NS to use the nonsecure vector (including for 2167 * non-banked exceptions), M_REG_S for the secure version of 2168 * a banked exception, and -1 if this field should RAZ/WI. 2169 */ 2170 switch (exc) { 2171 case ARMV7M_EXCP_MEM: 2172 case ARMV7M_EXCP_USAGE: 2173 case ARMV7M_EXCP_SVC: 2174 case ARMV7M_EXCP_PENDSV: 2175 case ARMV7M_EXCP_SYSTICK: 2176 /* Banked exceptions */ 2177 return attrs.secure; 2178 case ARMV7M_EXCP_BUS: 2179 /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */ 2180 if (!attrs.secure && 2181 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 2182 return -1; 2183 } 2184 return M_REG_NS; 2185 case ARMV7M_EXCP_SECURE: 2186 /* Not banked, RAZ/WI from nonsecure */ 2187 if (!attrs.secure) { 2188 return -1; 2189 } 2190 return M_REG_NS; 2191 case ARMV7M_EXCP_DEBUG: 2192 /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */ 2193 return M_REG_NS; 2194 case 8 ... 10: 2195 case 13: 2196 /* RES0 */ 2197 return -1; 2198 default: 2199 /* Not reachable due to decode of SHPR register addresses */ 2200 g_assert_not_reached(); 2201 } 2202 } 2203 2204 static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr, 2205 uint64_t *data, unsigned size, 2206 MemTxAttrs attrs) 2207 { 2208 NVICState *s = (NVICState *)opaque; 2209 uint32_t offset = addr; 2210 unsigned i, startvec, end; 2211 uint32_t val; 2212 2213 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) { 2214 /* Generate BusFault for unprivileged accesses */ 2215 return MEMTX_ERROR; 2216 } 2217 2218 switch (offset) { 2219 /* reads of set and clear both return the status */ 2220 case 0x100 ... 0x13f: /* NVIC Set enable */ 2221 offset += 0x80; 2222 /* fall through */ 2223 case 0x180 ... 0x1bf: /* NVIC Clear enable */ 2224 val = 0; 2225 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */ 2226 2227 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2228 if (s->vectors[startvec + i].enabled && 2229 (attrs.secure || s->itns[startvec + i])) { 2230 val |= (1 << i); 2231 } 2232 } 2233 break; 2234 case 0x200 ... 0x23f: /* NVIC Set pend */ 2235 offset += 0x80; 2236 /* fall through */ 2237 case 0x280 ... 0x2bf: /* NVIC Clear pend */ 2238 val = 0; 2239 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ 2240 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2241 if (s->vectors[startvec + i].pending && 2242 (attrs.secure || s->itns[startvec + i])) { 2243 val |= (1 << i); 2244 } 2245 } 2246 break; 2247 case 0x300 ... 0x33f: /* NVIC Active */ 2248 val = 0; 2249 2250 if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) { 2251 break; 2252 } 2253 2254 startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */ 2255 2256 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2257 if (s->vectors[startvec + i].active && 2258 (attrs.secure || s->itns[startvec + i])) { 2259 val |= (1 << i); 2260 } 2261 } 2262 break; 2263 case 0x400 ... 0x5ef: /* NVIC Priority */ 2264 val = 0; 2265 startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */ 2266 2267 for (i = 0; i < size && startvec + i < s->num_irq; i++) { 2268 if (attrs.secure || s->itns[startvec + i]) { 2269 val |= s->vectors[startvec + i].prio << (8 * i); 2270 } 2271 } 2272 break; 2273 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */ 2274 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2275 val = 0; 2276 break; 2277 } 2278 /* fall through */ 2279 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ 2280 val = 0; 2281 for (i = 0; i < size; i++) { 2282 unsigned hdlidx = (offset - 0xd14) + i; 2283 int sbank = shpr_bank(s, hdlidx, attrs); 2284 2285 if (sbank < 0) { 2286 continue; 2287 } 2288 val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank)); 2289 } 2290 break; 2291 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ 2292 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2293 val = 0; 2294 break; 2295 }; 2296 /* 2297 * The BFSR bits [15:8] are shared between security states 2298 * and we store them in the NS copy. They are RAZ/WI for 2299 * NS code if AIRCR.BFHFNMINS is 0. 2300 */ 2301 val = s->cpu->env.v7m.cfsr[attrs.secure]; 2302 if (!attrs.secure && 2303 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 2304 val &= ~R_V7M_CFSR_BFSR_MASK; 2305 } else { 2306 val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK; 2307 } 2308 val = extract32(val, (offset - 0xd28) * 8, size * 8); 2309 break; 2310 case 0xfe0 ... 0xfff: /* ID. */ 2311 if (offset & 3) { 2312 val = 0; 2313 } else { 2314 val = nvic_id[(offset - 0xfe0) >> 2]; 2315 } 2316 break; 2317 default: 2318 if (size == 4) { 2319 val = nvic_readl(s, offset, attrs); 2320 } else { 2321 qemu_log_mask(LOG_GUEST_ERROR, 2322 "NVIC: Bad read of size %d at offset 0x%x\n", 2323 size, offset); 2324 val = 0; 2325 } 2326 } 2327 2328 trace_nvic_sysreg_read(addr, val, size); 2329 *data = val; 2330 return MEMTX_OK; 2331 } 2332 2333 static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr, 2334 uint64_t value, unsigned size, 2335 MemTxAttrs attrs) 2336 { 2337 NVICState *s = (NVICState *)opaque; 2338 uint32_t offset = addr; 2339 unsigned i, startvec, end; 2340 unsigned setval = 0; 2341 2342 trace_nvic_sysreg_write(addr, value, size); 2343 2344 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) { 2345 /* Generate BusFault for unprivileged accesses */ 2346 return MEMTX_ERROR; 2347 } 2348 2349 switch (offset) { 2350 case 0x100 ... 0x13f: /* NVIC Set enable */ 2351 offset += 0x80; 2352 setval = 1; 2353 /* fall through */ 2354 case 0x180 ... 0x1bf: /* NVIC Clear enable */ 2355 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; 2356 2357 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2358 if (value & (1 << i) && 2359 (attrs.secure || s->itns[startvec + i])) { 2360 s->vectors[startvec + i].enabled = setval; 2361 } 2362 } 2363 nvic_irq_update(s); 2364 goto exit_ok; 2365 case 0x200 ... 0x23f: /* NVIC Set pend */ 2366 /* the special logic in armv7m_nvic_set_pending() 2367 * is not needed since IRQs are never escalated 2368 */ 2369 offset += 0x80; 2370 setval = 1; 2371 /* fall through */ 2372 case 0x280 ... 0x2bf: /* NVIC Clear pend */ 2373 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ 2374 2375 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2376 if (value & (1 << i) && 2377 (attrs.secure || s->itns[startvec + i])) { 2378 s->vectors[startvec + i].pending = setval; 2379 } 2380 } 2381 nvic_irq_update(s); 2382 goto exit_ok; 2383 case 0x300 ... 0x33f: /* NVIC Active */ 2384 goto exit_ok; /* R/O */ 2385 case 0x400 ... 0x5ef: /* NVIC Priority */ 2386 startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */ 2387 2388 for (i = 0; i < size && startvec + i < s->num_irq; i++) { 2389 if (attrs.secure || s->itns[startvec + i]) { 2390 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff); 2391 } 2392 } 2393 nvic_irq_update(s); 2394 goto exit_ok; 2395 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */ 2396 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2397 goto exit_ok; 2398 } 2399 /* fall through */ 2400 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ 2401 for (i = 0; i < size; i++) { 2402 unsigned hdlidx = (offset - 0xd14) + i; 2403 int newprio = extract32(value, i * 8, 8); 2404 int sbank = shpr_bank(s, hdlidx, attrs); 2405 2406 if (sbank < 0) { 2407 continue; 2408 } 2409 set_prio(s, hdlidx, sbank, newprio); 2410 } 2411 nvic_irq_update(s); 2412 goto exit_ok; 2413 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ 2414 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2415 goto exit_ok; 2416 } 2417 /* All bits are W1C, so construct 32 bit value with 0s in 2418 * the parts not written by the access size 2419 */ 2420 value <<= ((offset - 0xd28) * 8); 2421 2422 if (!attrs.secure && 2423 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 2424 /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */ 2425 value &= ~R_V7M_CFSR_BFSR_MASK; 2426 } 2427 2428 s->cpu->env.v7m.cfsr[attrs.secure] &= ~value; 2429 if (attrs.secure) { 2430 /* The BFSR bits [15:8] are shared between security states 2431 * and we store them in the NS copy. 2432 */ 2433 s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK); 2434 } 2435 goto exit_ok; 2436 } 2437 if (size == 4) { 2438 nvic_writel(s, offset, value, attrs); 2439 goto exit_ok; 2440 } 2441 qemu_log_mask(LOG_GUEST_ERROR, 2442 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset); 2443 /* This is UNPREDICTABLE; treat as RAZ/WI */ 2444 2445 exit_ok: 2446 /* Ensure any changes made are reflected in the cached hflags. */ 2447 arm_rebuild_hflags(&s->cpu->env); 2448 return MEMTX_OK; 2449 } 2450 2451 static const MemoryRegionOps nvic_sysreg_ops = { 2452 .read_with_attrs = nvic_sysreg_read, 2453 .write_with_attrs = nvic_sysreg_write, 2454 .endianness = DEVICE_NATIVE_ENDIAN, 2455 }; 2456 2457 static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, 2458 uint64_t value, unsigned size, 2459 MemTxAttrs attrs) 2460 { 2461 MemoryRegion *mr = opaque; 2462 2463 if (attrs.secure) { 2464 /* S accesses to the alias act like NS accesses to the real region */ 2465 attrs.secure = 0; 2466 return memory_region_dispatch_write(mr, addr, value, 2467 size_memop(size) | MO_TE, attrs); 2468 } else { 2469 /* NS attrs are RAZ/WI for privileged, and BusFault for user */ 2470 if (attrs.user) { 2471 return MEMTX_ERROR; 2472 } 2473 return MEMTX_OK; 2474 } 2475 } 2476 2477 static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, 2478 uint64_t *data, unsigned size, 2479 MemTxAttrs attrs) 2480 { 2481 MemoryRegion *mr = opaque; 2482 2483 if (attrs.secure) { 2484 /* S accesses to the alias act like NS accesses to the real region */ 2485 attrs.secure = 0; 2486 return memory_region_dispatch_read(mr, addr, data, 2487 size_memop(size) | MO_TE, attrs); 2488 } else { 2489 /* NS attrs are RAZ/WI for privileged, and BusFault for user */ 2490 if (attrs.user) { 2491 return MEMTX_ERROR; 2492 } 2493 *data = 0; 2494 return MEMTX_OK; 2495 } 2496 } 2497 2498 static const MemoryRegionOps nvic_sysreg_ns_ops = { 2499 .read_with_attrs = nvic_sysreg_ns_read, 2500 .write_with_attrs = nvic_sysreg_ns_write, 2501 .endianness = DEVICE_NATIVE_ENDIAN, 2502 }; 2503 2504 static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, 2505 uint64_t value, unsigned size, 2506 MemTxAttrs attrs) 2507 { 2508 NVICState *s = opaque; 2509 MemoryRegion *mr; 2510 2511 /* Direct the access to the correct systick */ 2512 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); 2513 return memory_region_dispatch_write(mr, addr, value, 2514 size_memop(size) | MO_TE, attrs); 2515 } 2516 2517 static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, 2518 uint64_t *data, unsigned size, 2519 MemTxAttrs attrs) 2520 { 2521 NVICState *s = opaque; 2522 MemoryRegion *mr; 2523 2524 /* Direct the access to the correct systick */ 2525 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); 2526 return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, 2527 attrs); 2528 } 2529 2530 static const MemoryRegionOps nvic_systick_ops = { 2531 .read_with_attrs = nvic_systick_read, 2532 .write_with_attrs = nvic_systick_write, 2533 .endianness = DEVICE_NATIVE_ENDIAN, 2534 }; 2535 2536 2537 static MemTxResult ras_read(void *opaque, hwaddr addr, 2538 uint64_t *data, unsigned size, 2539 MemTxAttrs attrs) 2540 { 2541 if (attrs.user) { 2542 return MEMTX_ERROR; 2543 } 2544 2545 switch (addr) { 2546 case 0xe10: /* ERRIIDR */ 2547 /* architect field = Arm; product/variant/revision 0 */ 2548 *data = 0x43b; 2549 break; 2550 case 0xfc8: /* ERRDEVID */ 2551 /* Minimal RAS: we implement 0 error record indexes */ 2552 *data = 0; 2553 break; 2554 default: 2555 qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n", 2556 (uint32_t)addr); 2557 *data = 0; 2558 break; 2559 } 2560 return MEMTX_OK; 2561 } 2562 2563 static MemTxResult ras_write(void *opaque, hwaddr addr, 2564 uint64_t value, unsigned size, 2565 MemTxAttrs attrs) 2566 { 2567 if (attrs.user) { 2568 return MEMTX_ERROR; 2569 } 2570 2571 switch (addr) { 2572 default: 2573 qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n", 2574 (uint32_t)addr); 2575 break; 2576 } 2577 return MEMTX_OK; 2578 } 2579 2580 static const MemoryRegionOps ras_ops = { 2581 .read_with_attrs = ras_read, 2582 .write_with_attrs = ras_write, 2583 .endianness = DEVICE_NATIVE_ENDIAN, 2584 }; 2585 2586 /* 2587 * Unassigned portions of the PPB space are RAZ/WI for privileged 2588 * accesses, and fault for non-privileged accesses. 2589 */ 2590 static MemTxResult ppb_default_read(void *opaque, hwaddr addr, 2591 uint64_t *data, unsigned size, 2592 MemTxAttrs attrs) 2593 { 2594 qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n", 2595 (uint32_t)addr); 2596 if (attrs.user) { 2597 return MEMTX_ERROR; 2598 } 2599 *data = 0; 2600 return MEMTX_OK; 2601 } 2602 2603 static MemTxResult ppb_default_write(void *opaque, hwaddr addr, 2604 uint64_t value, unsigned size, 2605 MemTxAttrs attrs) 2606 { 2607 qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n", 2608 (uint32_t)addr); 2609 if (attrs.user) { 2610 return MEMTX_ERROR; 2611 } 2612 return MEMTX_OK; 2613 } 2614 2615 static const MemoryRegionOps ppb_default_ops = { 2616 .read_with_attrs = ppb_default_read, 2617 .write_with_attrs = ppb_default_write, 2618 .endianness = DEVICE_NATIVE_ENDIAN, 2619 .valid.min_access_size = 1, 2620 .valid.max_access_size = 8, 2621 }; 2622 2623 static int nvic_post_load(void *opaque, int version_id) 2624 { 2625 NVICState *s = opaque; 2626 unsigned i; 2627 int resetprio; 2628 2629 /* Check for out of range priority settings */ 2630 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3; 2631 2632 if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio || 2633 s->vectors[ARMV7M_EXCP_NMI].prio != -2 || 2634 s->vectors[ARMV7M_EXCP_HARD].prio != -1) { 2635 return 1; 2636 } 2637 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) { 2638 if (s->vectors[i].prio & ~0xff) { 2639 return 1; 2640 } 2641 } 2642 2643 nvic_recompute_state(s); 2644 2645 return 0; 2646 } 2647 2648 static const VMStateDescription vmstate_VecInfo = { 2649 .name = "armv7m_nvic_info", 2650 .version_id = 1, 2651 .minimum_version_id = 1, 2652 .fields = (VMStateField[]) { 2653 VMSTATE_INT16(prio, VecInfo), 2654 VMSTATE_UINT8(enabled, VecInfo), 2655 VMSTATE_UINT8(pending, VecInfo), 2656 VMSTATE_UINT8(active, VecInfo), 2657 VMSTATE_UINT8(level, VecInfo), 2658 VMSTATE_END_OF_LIST() 2659 } 2660 }; 2661 2662 static bool nvic_security_needed(void *opaque) 2663 { 2664 NVICState *s = opaque; 2665 2666 return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY); 2667 } 2668 2669 static int nvic_security_post_load(void *opaque, int version_id) 2670 { 2671 NVICState *s = opaque; 2672 int i; 2673 2674 /* Check for out of range priority settings */ 2675 if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1 2676 && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) { 2677 /* We can't cross-check against AIRCR.BFHFNMINS as we don't know 2678 * if the CPU state has been migrated yet; a mismatch won't 2679 * cause the emulation to blow up, though. 2680 */ 2681 return 1; 2682 } 2683 for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) { 2684 if (s->sec_vectors[i].prio & ~0xff) { 2685 return 1; 2686 } 2687 } 2688 return 0; 2689 } 2690 2691 static const VMStateDescription vmstate_nvic_security = { 2692 .name = "armv7m_nvic/m-security", 2693 .version_id = 1, 2694 .minimum_version_id = 1, 2695 .needed = nvic_security_needed, 2696 .post_load = &nvic_security_post_load, 2697 .fields = (VMStateField[]) { 2698 VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1, 2699 vmstate_VecInfo, VecInfo), 2700 VMSTATE_UINT32(prigroup[M_REG_S], NVICState), 2701 VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS), 2702 VMSTATE_END_OF_LIST() 2703 } 2704 }; 2705 2706 static const VMStateDescription vmstate_nvic = { 2707 .name = "armv7m_nvic", 2708 .version_id = 4, 2709 .minimum_version_id = 4, 2710 .post_load = &nvic_post_load, 2711 .fields = (VMStateField[]) { 2712 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1, 2713 vmstate_VecInfo, VecInfo), 2714 VMSTATE_UINT32(prigroup[M_REG_NS], NVICState), 2715 VMSTATE_END_OF_LIST() 2716 }, 2717 .subsections = (const VMStateDescription*[]) { 2718 &vmstate_nvic_security, 2719 NULL 2720 } 2721 }; 2722 2723 static Property props_nvic[] = { 2724 /* Number of external IRQ lines (so excluding the 16 internal exceptions) */ 2725 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64), 2726 DEFINE_PROP_END_OF_LIST() 2727 }; 2728 2729 static void armv7m_nvic_reset(DeviceState *dev) 2730 { 2731 int resetprio; 2732 NVICState *s = NVIC(dev); 2733 2734 memset(s->vectors, 0, sizeof(s->vectors)); 2735 memset(s->sec_vectors, 0, sizeof(s->sec_vectors)); 2736 s->prigroup[M_REG_NS] = 0; 2737 s->prigroup[M_REG_S] = 0; 2738 2739 s->vectors[ARMV7M_EXCP_NMI].enabled = 1; 2740 /* MEM, BUS, and USAGE are enabled through 2741 * the System Handler Control register 2742 */ 2743 s->vectors[ARMV7M_EXCP_SVC].enabled = 1; 2744 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1; 2745 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1; 2746 2747 /* DebugMonitor is enabled via DEMCR.MON_EN */ 2748 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0; 2749 2750 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3; 2751 s->vectors[ARMV7M_EXCP_RESET].prio = resetprio; 2752 s->vectors[ARMV7M_EXCP_NMI].prio = -2; 2753 s->vectors[ARMV7M_EXCP_HARD].prio = -1; 2754 2755 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 2756 s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1; 2757 s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1; 2758 s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1; 2759 s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1; 2760 2761 /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */ 2762 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1; 2763 /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */ 2764 s->vectors[ARMV7M_EXCP_HARD].enabled = 0; 2765 } else { 2766 s->vectors[ARMV7M_EXCP_HARD].enabled = 1; 2767 } 2768 2769 /* Strictly speaking the reset handler should be enabled. 2770 * However, we don't simulate soft resets through the NVIC, 2771 * and the reset vector should never be pended. 2772 * So we leave it disabled to catch logic errors. 2773 */ 2774 2775 s->exception_prio = NVIC_NOEXC_PRIO; 2776 s->vectpending = 0; 2777 s->vectpending_is_s_banked = false; 2778 s->vectpending_prio = NVIC_NOEXC_PRIO; 2779 2780 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 2781 memset(s->itns, 0, sizeof(s->itns)); 2782 } else { 2783 /* This state is constant and not guest accessible in a non-security 2784 * NVIC; we set the bits to true to avoid having to do a feature 2785 * bit check in the NVIC enable/pend/etc register accessors. 2786 */ 2787 int i; 2788 2789 for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) { 2790 s->itns[i] = true; 2791 } 2792 } 2793 2794 /* 2795 * We updated state that affects the CPU's MMUidx and thus its hflags; 2796 * and we can't guarantee that we run before the CPU reset function. 2797 */ 2798 arm_rebuild_hflags(&s->cpu->env); 2799 } 2800 2801 static void nvic_systick_trigger(void *opaque, int n, int level) 2802 { 2803 NVICState *s = opaque; 2804 2805 if (level) { 2806 /* SysTick just asked us to pend its exception. 2807 * (This is different from an external interrupt line's 2808 * behaviour.) 2809 * n == 0 : NonSecure systick 2810 * n == 1 : Secure systick 2811 */ 2812 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n); 2813 } 2814 } 2815 2816 static void armv7m_nvic_realize(DeviceState *dev, Error **errp) 2817 { 2818 NVICState *s = NVIC(dev); 2819 2820 /* The armv7m container object will have set our CPU pointer */ 2821 if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) { 2822 error_setg(errp, "The NVIC can only be used with a Cortex-M CPU"); 2823 return; 2824 } 2825 2826 if (s->num_irq > NVIC_MAX_IRQ) { 2827 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq); 2828 return; 2829 } 2830 2831 qdev_init_gpio_in(dev, set_irq_level, s->num_irq); 2832 2833 /* include space for internal exception vectors */ 2834 s->num_irq += NVIC_FIRST_IRQ; 2835 2836 s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; 2837 2838 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), errp)) { 2839 return; 2840 } 2841 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0, 2842 qdev_get_gpio_in_named(dev, "systick-trigger", 2843 M_REG_NS)); 2844 2845 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 2846 /* We couldn't init the secure systick device in instance_init 2847 * as we didn't know then if the CPU had the security extensions; 2848 * so we have to do it here. 2849 */ 2850 object_initialize_child(OBJECT(dev), "systick-reg-s", 2851 &s->systick[M_REG_S], TYPE_SYSTICK); 2852 2853 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_S]), errp)) { 2854 return; 2855 } 2856 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0, 2857 qdev_get_gpio_in_named(dev, "systick-trigger", 2858 M_REG_S)); 2859 } 2860 2861 /* 2862 * This device provides a single sysbus memory region which 2863 * represents the whole of the "System PPB" space. This is the 2864 * range from 0xe0000000 to 0xe00fffff and includes the NVIC, 2865 * the System Control Space (system registers), the systick timer, 2866 * and for CPUs with the Security extension an NS banked version 2867 * of all of these. 2868 * 2869 * The default behaviour for unimplemented registers/ranges 2870 * (for instance the Data Watchpoint and Trace unit at 0xe0001000) 2871 * is to RAZ/WI for privileged access and BusFault for non-privileged 2872 * access. 2873 * 2874 * The NVIC and System Control Space (SCS) starts at 0xe000e000 2875 * and looks like this: 2876 * 0x004 - ICTR 2877 * 0x010 - 0xff - systick 2878 * 0x100..0x7ec - NVIC 2879 * 0x7f0..0xcff - Reserved 2880 * 0xd00..0xd3c - SCS registers 2881 * 0xd40..0xeff - Reserved or Not implemented 2882 * 0xf00 - STIR 2883 * 2884 * Some registers within this space are banked between security states. 2885 * In v8M there is a second range 0xe002e000..0xe002efff which is the 2886 * NonSecure alias SCS; secure accesses to this behave like NS accesses 2887 * to the main SCS range, and non-secure accesses (including when 2888 * the security extension is not implemented) are RAZ/WI. 2889 * Note that both the main SCS range and the alias range are defined 2890 * to be exempt from memory attribution (R_BLJT) and so the memory 2891 * transaction attribute always matches the current CPU security 2892 * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops 2893 * wrappers we change attrs.secure to indicate the NS access; so 2894 * generally code determining which banked register to use should 2895 * use attrs.secure; code determining actual behaviour of the system 2896 * should use env->v7m.secure. 2897 * 2898 * The container covers the whole PPB space. Within it the priority 2899 * of overlapping regions is: 2900 * - default region (for RAZ/WI and BusFault) : -1 2901 * - system register regions : 0 2902 * - systick : 1 2903 * This is because the systick device is a small block of registers 2904 * in the middle of the other system control registers. 2905 */ 2906 memory_region_init(&s->container, OBJECT(s), "nvic", 0x100000); 2907 memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s, 2908 "nvic-default", 0x100000); 2909 memory_region_add_subregion_overlap(&s->container, 0, &s->defaultmem, -1); 2910 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s, 2911 "nvic_sysregs", 0x1000); 2912 memory_region_add_subregion(&s->container, 0xe000, &s->sysregmem); 2913 2914 memory_region_init_io(&s->systickmem, OBJECT(s), 2915 &nvic_systick_ops, s, 2916 "nvic_systick", 0xe0); 2917 2918 memory_region_add_subregion_overlap(&s->container, 0xe010, 2919 &s->systickmem, 1); 2920 2921 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { 2922 memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s), 2923 &nvic_sysreg_ns_ops, &s->sysregmem, 2924 "nvic_sysregs_ns", 0x1000); 2925 memory_region_add_subregion(&s->container, 0x2e000, &s->sysreg_ns_mem); 2926 memory_region_init_io(&s->systick_ns_mem, OBJECT(s), 2927 &nvic_sysreg_ns_ops, &s->systickmem, 2928 "nvic_systick_ns", 0xe0); 2929 memory_region_add_subregion_overlap(&s->container, 0x2e010, 2930 &s->systick_ns_mem, 1); 2931 } 2932 2933 if (cpu_isar_feature(aa32_ras, s->cpu)) { 2934 memory_region_init_io(&s->ras_mem, OBJECT(s), 2935 &ras_ops, s, "nvic_ras", 0x1000); 2936 memory_region_add_subregion(&s->container, 0x5000, &s->ras_mem); 2937 } 2938 2939 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container); 2940 } 2941 2942 static void armv7m_nvic_instance_init(Object *obj) 2943 { 2944 /* We have a different default value for the num-irq property 2945 * than our superclass. This function runs after qdev init 2946 * has set the defaults from the Property array and before 2947 * any user-specified property setting, so just modify the 2948 * value in the GICState struct. 2949 */ 2950 DeviceState *dev = DEVICE(obj); 2951 NVICState *nvic = NVIC(obj); 2952 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 2953 2954 object_initialize_child(obj, "systick-reg-ns", &nvic->systick[M_REG_NS], 2955 TYPE_SYSTICK); 2956 /* We can't initialize the secure systick here, as we don't know 2957 * yet if we need it. 2958 */ 2959 2960 sysbus_init_irq(sbd, &nvic->excpout); 2961 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); 2962 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 2963 M_REG_NUM_BANKS); 2964 qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1); 2965 } 2966 2967 static void armv7m_nvic_class_init(ObjectClass *klass, void *data) 2968 { 2969 DeviceClass *dc = DEVICE_CLASS(klass); 2970 2971 dc->vmsd = &vmstate_nvic; 2972 device_class_set_props(dc, props_nvic); 2973 dc->reset = armv7m_nvic_reset; 2974 dc->realize = armv7m_nvic_realize; 2975 } 2976 2977 static const TypeInfo armv7m_nvic_info = { 2978 .name = TYPE_NVIC, 2979 .parent = TYPE_SYS_BUS_DEVICE, 2980 .instance_init = armv7m_nvic_instance_init, 2981 .instance_size = sizeof(NVICState), 2982 .class_init = armv7m_nvic_class_init, 2983 .class_size = sizeof(SysBusDeviceClass), 2984 }; 2985 2986 static void armv7m_nvic_register_types(void) 2987 { 2988 type_register_static(&armv7m_nvic_info); 2989 } 2990 2991 type_init(armv7m_nvic_register_types) 2992