1 /* 2 * ARM Nested Vectored Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 * 9 * The ARMv7M System controller is fairly tightly tied in with the 10 * NVIC. Much of that is also implemented here. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "cpu.h" 16 #include "hw/sysbus.h" 17 #include "qemu/timer.h" 18 #include "hw/intc/armv7m_nvic.h" 19 #include "target/arm/cpu.h" 20 #include "exec/exec-all.h" 21 #include "qemu/log.h" 22 #include "qemu/module.h" 23 #include "trace.h" 24 25 /* IRQ number counting: 26 * 27 * the num-irq property counts the number of external IRQ lines 28 * 29 * NVICState::num_irq counts the total number of exceptions 30 * (external IRQs, the 15 internal exceptions including reset, 31 * and one for the unused exception number 0). 32 * 33 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines. 34 * 35 * NVIC_MAX_VECTORS is the highest permitted number of exceptions. 36 * 37 * Iterating through all exceptions should typically be done with 38 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0. 39 * 40 * The external qemu_irq lines are the NVIC's external IRQ lines, 41 * so line 0 is exception 16. 42 * 43 * In the terminology of the architecture manual, "interrupts" are 44 * a subcategory of exception referring to the external interrupts 45 * (which are exception numbers NVIC_FIRST_IRQ and upward). 46 * For historical reasons QEMU tends to use "interrupt" and 47 * "exception" more or less interchangeably. 48 */ 49 #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS 50 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ) 51 52 /* Effective running priority of the CPU when no exception is active 53 * (higher than the highest possible priority value) 54 */ 55 #define NVIC_NOEXC_PRIO 0x100 56 /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */ 57 #define NVIC_NS_PRIO_LIMIT 0x80 58 59 static const uint8_t nvic_id[] = { 60 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 61 }; 62 63 static int nvic_pending_prio(NVICState *s) 64 { 65 /* return the group priority of the current pending interrupt, 66 * or NVIC_NOEXC_PRIO if no interrupt is pending 67 */ 68 return s->vectpending_prio; 69 } 70 71 /* Return the value of the ISCR RETTOBASE bit: 72 * 1 if there is exactly one active exception 73 * 0 if there is more than one active exception 74 * UNKNOWN if there are no active exceptions (we choose 1, 75 * which matches the choice Cortex-M3 is documented as making). 76 * 77 * NB: some versions of the documentation talk about this 78 * counting "active exceptions other than the one shown by IPSR"; 79 * this is only different in the obscure corner case where guest 80 * code has manually deactivated an exception and is about 81 * to fail an exception-return integrity check. The definition 82 * above is the one from the v8M ARM ARM and is also in line 83 * with the behaviour documented for the Cortex-M3. 84 */ 85 static bool nvic_rettobase(NVICState *s) 86 { 87 int irq, nhand = 0; 88 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY); 89 90 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) { 91 if (s->vectors[irq].active || 92 (check_sec && irq < NVIC_INTERNAL_VECTORS && 93 s->sec_vectors[irq].active)) { 94 nhand++; 95 if (nhand == 2) { 96 return 0; 97 } 98 } 99 } 100 101 return 1; 102 } 103 104 /* Return the value of the ISCR ISRPENDING bit: 105 * 1 if an external interrupt is pending 106 * 0 if no external interrupt is pending 107 */ 108 static bool nvic_isrpending(NVICState *s) 109 { 110 int irq; 111 112 /* We can shortcut if the highest priority pending interrupt 113 * happens to be external or if there is nothing pending. 114 */ 115 if (s->vectpending > NVIC_FIRST_IRQ) { 116 return true; 117 } 118 if (s->vectpending == 0) { 119 return false; 120 } 121 122 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) { 123 if (s->vectors[irq].pending) { 124 return true; 125 } 126 } 127 return false; 128 } 129 130 static bool exc_is_banked(int exc) 131 { 132 /* Return true if this is one of the limited set of exceptions which 133 * are banked (and thus have state in sec_vectors[]) 134 */ 135 return exc == ARMV7M_EXCP_HARD || 136 exc == ARMV7M_EXCP_MEM || 137 exc == ARMV7M_EXCP_USAGE || 138 exc == ARMV7M_EXCP_SVC || 139 exc == ARMV7M_EXCP_PENDSV || 140 exc == ARMV7M_EXCP_SYSTICK; 141 } 142 143 /* Return a mask word which clears the subpriority bits from 144 * a priority value for an M-profile exception, leaving only 145 * the group priority. 146 */ 147 static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure) 148 { 149 return ~0U << (s->prigroup[secure] + 1); 150 } 151 152 static bool exc_targets_secure(NVICState *s, int exc) 153 { 154 /* Return true if this non-banked exception targets Secure state. */ 155 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 156 return false; 157 } 158 159 if (exc >= NVIC_FIRST_IRQ) { 160 return !s->itns[exc]; 161 } 162 163 /* Function shouldn't be called for banked exceptions. */ 164 assert(!exc_is_banked(exc)); 165 166 switch (exc) { 167 case ARMV7M_EXCP_NMI: 168 case ARMV7M_EXCP_BUS: 169 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 170 case ARMV7M_EXCP_SECURE: 171 return true; 172 case ARMV7M_EXCP_DEBUG: 173 /* TODO: controlled by DEMCR.SDME, which we don't yet implement */ 174 return false; 175 default: 176 /* reset, and reserved (unused) low exception numbers. 177 * We'll get called by code that loops through all the exception 178 * numbers, but it doesn't matter what we return here as these 179 * non-existent exceptions will never be pended or active. 180 */ 181 return true; 182 } 183 } 184 185 static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure) 186 { 187 /* Return the group priority for this exception, given its raw 188 * (group-and-subgroup) priority value and whether it is targeting 189 * secure state or not. 190 */ 191 if (rawprio < 0) { 192 return rawprio; 193 } 194 rawprio &= nvic_gprio_mask(s, targets_secure); 195 /* AIRCR.PRIS causes us to squash all NS priorities into the 196 * lower half of the total range 197 */ 198 if (!targets_secure && 199 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) { 200 rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT; 201 } 202 return rawprio; 203 } 204 205 /* Recompute vectpending and exception_prio for a CPU which implements 206 * the Security extension 207 */ 208 static void nvic_recompute_state_secure(NVICState *s) 209 { 210 int i, bank; 211 int pend_prio = NVIC_NOEXC_PRIO; 212 int active_prio = NVIC_NOEXC_PRIO; 213 int pend_irq = 0; 214 bool pending_is_s_banked = false; 215 int pend_subprio = 0; 216 217 /* R_CQRV: precedence is by: 218 * - lowest group priority; if both the same then 219 * - lowest subpriority; if both the same then 220 * - lowest exception number; if both the same (ie banked) then 221 * - secure exception takes precedence 222 * Compare pseudocode RawExecutionPriority. 223 * Annoyingly, now we have two prigroup values (for S and NS) 224 * we can't do the loop comparison on raw priority values. 225 */ 226 for (i = 1; i < s->num_irq; i++) { 227 for (bank = M_REG_S; bank >= M_REG_NS; bank--) { 228 VecInfo *vec; 229 int prio, subprio; 230 bool targets_secure; 231 232 if (bank == M_REG_S) { 233 if (!exc_is_banked(i)) { 234 continue; 235 } 236 vec = &s->sec_vectors[i]; 237 targets_secure = true; 238 } else { 239 vec = &s->vectors[i]; 240 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i); 241 } 242 243 prio = exc_group_prio(s, vec->prio, targets_secure); 244 subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure); 245 if (vec->enabled && vec->pending && 246 ((prio < pend_prio) || 247 (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) { 248 pend_prio = prio; 249 pend_subprio = subprio; 250 pend_irq = i; 251 pending_is_s_banked = (bank == M_REG_S); 252 } 253 if (vec->active && prio < active_prio) { 254 active_prio = prio; 255 } 256 } 257 } 258 259 s->vectpending_is_s_banked = pending_is_s_banked; 260 s->vectpending = pend_irq; 261 s->vectpending_prio = pend_prio; 262 s->exception_prio = active_prio; 263 264 trace_nvic_recompute_state_secure(s->vectpending, 265 s->vectpending_is_s_banked, 266 s->vectpending_prio, 267 s->exception_prio); 268 } 269 270 /* Recompute vectpending and exception_prio */ 271 static void nvic_recompute_state(NVICState *s) 272 { 273 int i; 274 int pend_prio = NVIC_NOEXC_PRIO; 275 int active_prio = NVIC_NOEXC_PRIO; 276 int pend_irq = 0; 277 278 /* In theory we could write one function that handled both 279 * the "security extension present" and "not present"; however 280 * the security related changes significantly complicate the 281 * recomputation just by themselves and mixing both cases together 282 * would be even worse, so we retain a separate non-secure-only 283 * version for CPUs which don't implement the security extension. 284 */ 285 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 286 nvic_recompute_state_secure(s); 287 return; 288 } 289 290 for (i = 1; i < s->num_irq; i++) { 291 VecInfo *vec = &s->vectors[i]; 292 293 if (vec->enabled && vec->pending && vec->prio < pend_prio) { 294 pend_prio = vec->prio; 295 pend_irq = i; 296 } 297 if (vec->active && vec->prio < active_prio) { 298 active_prio = vec->prio; 299 } 300 } 301 302 if (active_prio > 0) { 303 active_prio &= nvic_gprio_mask(s, false); 304 } 305 306 if (pend_prio > 0) { 307 pend_prio &= nvic_gprio_mask(s, false); 308 } 309 310 s->vectpending = pend_irq; 311 s->vectpending_prio = pend_prio; 312 s->exception_prio = active_prio; 313 314 trace_nvic_recompute_state(s->vectpending, 315 s->vectpending_prio, 316 s->exception_prio); 317 } 318 319 /* Return the current execution priority of the CPU 320 * (equivalent to the pseudocode ExecutionPriority function). 321 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO. 322 */ 323 static inline int nvic_exec_prio(NVICState *s) 324 { 325 CPUARMState *env = &s->cpu->env; 326 int running = NVIC_NOEXC_PRIO; 327 328 if (env->v7m.basepri[M_REG_NS] > 0) { 329 running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS); 330 } 331 332 if (env->v7m.basepri[M_REG_S] > 0) { 333 int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S); 334 if (running > basepri) { 335 running = basepri; 336 } 337 } 338 339 if (env->v7m.primask[M_REG_NS]) { 340 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) { 341 if (running > NVIC_NS_PRIO_LIMIT) { 342 running = NVIC_NS_PRIO_LIMIT; 343 } 344 } else { 345 running = 0; 346 } 347 } 348 349 if (env->v7m.primask[M_REG_S]) { 350 running = 0; 351 } 352 353 if (env->v7m.faultmask[M_REG_NS]) { 354 if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 355 running = -1; 356 } else { 357 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) { 358 if (running > NVIC_NS_PRIO_LIMIT) { 359 running = NVIC_NS_PRIO_LIMIT; 360 } 361 } else { 362 running = 0; 363 } 364 } 365 } 366 367 if (env->v7m.faultmask[M_REG_S]) { 368 running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1; 369 } 370 371 /* consider priority of active handler */ 372 return MIN(running, s->exception_prio); 373 } 374 375 bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure) 376 { 377 /* Return true if the requested execution priority is negative 378 * for the specified security state, ie that security state 379 * has an active NMI or HardFault or has set its FAULTMASK. 380 * Note that this is not the same as whether the execution 381 * priority is actually negative (for instance AIRCR.PRIS may 382 * mean we don't allow FAULTMASK_NS to actually make the execution 383 * priority negative). Compare pseudocode IsReqExcPriNeg(). 384 */ 385 NVICState *s = opaque; 386 387 if (s->cpu->env.v7m.faultmask[secure]) { 388 return true; 389 } 390 391 if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active : 392 s->vectors[ARMV7M_EXCP_HARD].active) { 393 return true; 394 } 395 396 if (s->vectors[ARMV7M_EXCP_NMI].active && 397 exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) { 398 return true; 399 } 400 401 return false; 402 } 403 404 bool armv7m_nvic_can_take_pending_exception(void *opaque) 405 { 406 NVICState *s = opaque; 407 408 return nvic_exec_prio(s) > nvic_pending_prio(s); 409 } 410 411 int armv7m_nvic_raw_execution_priority(void *opaque) 412 { 413 NVICState *s = opaque; 414 415 return s->exception_prio; 416 } 417 418 /* caller must call nvic_irq_update() after this. 419 * secure indicates the bank to use for banked exceptions (we assert if 420 * we are passed secure=true for a non-banked exception). 421 */ 422 static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio) 423 { 424 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ 425 assert(irq < s->num_irq); 426 427 prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits); 428 429 if (secure) { 430 assert(exc_is_banked(irq)); 431 s->sec_vectors[irq].prio = prio; 432 } else { 433 s->vectors[irq].prio = prio; 434 } 435 436 trace_nvic_set_prio(irq, secure, prio); 437 } 438 439 /* Return the current raw priority register value. 440 * secure indicates the bank to use for banked exceptions (we assert if 441 * we are passed secure=true for a non-banked exception). 442 */ 443 static int get_prio(NVICState *s, unsigned irq, bool secure) 444 { 445 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ 446 assert(irq < s->num_irq); 447 448 if (secure) { 449 assert(exc_is_banked(irq)); 450 return s->sec_vectors[irq].prio; 451 } else { 452 return s->vectors[irq].prio; 453 } 454 } 455 456 /* Recompute state and assert irq line accordingly. 457 * Must be called after changes to: 458 * vec->active, vec->enabled, vec->pending or vec->prio for any vector 459 * prigroup 460 */ 461 static void nvic_irq_update(NVICState *s) 462 { 463 int lvl; 464 int pend_prio; 465 466 nvic_recompute_state(s); 467 pend_prio = nvic_pending_prio(s); 468 469 /* Raise NVIC output if this IRQ would be taken, except that we 470 * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which 471 * will be checked for in arm_v7m_cpu_exec_interrupt()); changes 472 * to those CPU registers don't cause us to recalculate the NVIC 473 * pending info. 474 */ 475 lvl = (pend_prio < s->exception_prio); 476 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl); 477 qemu_set_irq(s->excpout, lvl); 478 } 479 480 /** 481 * armv7m_nvic_clear_pending: mark the specified exception as not pending 482 * @opaque: the NVIC 483 * @irq: the exception number to mark as not pending 484 * @secure: false for non-banked exceptions or for the nonsecure 485 * version of a banked exception, true for the secure version of a banked 486 * exception. 487 * 488 * Marks the specified exception as not pending. Note that we will assert() 489 * if @secure is true and @irq does not specify one of the fixed set 490 * of architecturally banked exceptions. 491 */ 492 static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure) 493 { 494 NVICState *s = (NVICState *)opaque; 495 VecInfo *vec; 496 497 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 498 499 if (secure) { 500 assert(exc_is_banked(irq)); 501 vec = &s->sec_vectors[irq]; 502 } else { 503 vec = &s->vectors[irq]; 504 } 505 trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio); 506 if (vec->pending) { 507 vec->pending = 0; 508 nvic_irq_update(s); 509 } 510 } 511 512 static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure, 513 bool derived) 514 { 515 /* Pend an exception, including possibly escalating it to HardFault. 516 * 517 * This function handles both "normal" pending of interrupts and 518 * exceptions, and also derived exceptions (ones which occur as 519 * a result of trying to take some other exception). 520 * 521 * If derived == true, the caller guarantees that we are part way through 522 * trying to take an exception (but have not yet called 523 * armv7m_nvic_acknowledge_irq() to make it active), and so: 524 * - s->vectpending is the "original exception" we were trying to take 525 * - irq is the "derived exception" 526 * - nvic_exec_prio(s) gives the priority before exception entry 527 * Here we handle the prioritization logic which the pseudocode puts 528 * in the DerivedLateArrival() function. 529 */ 530 531 NVICState *s = (NVICState *)opaque; 532 bool banked = exc_is_banked(irq); 533 VecInfo *vec; 534 bool targets_secure; 535 536 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 537 assert(!secure || banked); 538 539 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; 540 541 targets_secure = banked ? secure : exc_targets_secure(s, irq); 542 543 trace_nvic_set_pending(irq, secure, targets_secure, 544 derived, vec->enabled, vec->prio); 545 546 if (derived) { 547 /* Derived exceptions are always synchronous. */ 548 assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV); 549 550 if (irq == ARMV7M_EXCP_DEBUG && 551 exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) { 552 /* DebugMonitorFault, but its priority is lower than the 553 * preempted exception priority: just ignore it. 554 */ 555 return; 556 } 557 558 if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) { 559 /* If this is a terminal exception (one which means we cannot 560 * take the original exception, like a failure to read its 561 * vector table entry), then we must take the derived exception. 562 * If the derived exception can't take priority over the 563 * original exception, then we go into Lockup. 564 * 565 * For QEMU, we rely on the fact that a derived exception is 566 * terminal if and only if it's reported to us as HardFault, 567 * which saves having to have an extra argument is_terminal 568 * that we'd only use in one place. 569 */ 570 cpu_abort(&s->cpu->parent_obj, 571 "Lockup: can't take terminal derived exception " 572 "(original exception priority %d)\n", 573 s->vectpending_prio); 574 } 575 /* We now continue with the same code as for a normal pending 576 * exception, which will cause us to pend the derived exception. 577 * We'll then take either the original or the derived exception 578 * based on which is higher priority by the usual mechanism 579 * for selecting the highest priority pending interrupt. 580 */ 581 } 582 583 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) { 584 /* If a synchronous exception is pending then it may be 585 * escalated to HardFault if: 586 * * it is equal or lower priority to current execution 587 * * it is disabled 588 * (ie we need to take it immediately but we can't do so). 589 * Asynchronous exceptions (and interrupts) simply remain pending. 590 * 591 * For QEMU, we don't have any imprecise (asynchronous) faults, 592 * so we can assume that PREFETCH_ABORT and DATA_ABORT are always 593 * synchronous. 594 * Debug exceptions are awkward because only Debug exceptions 595 * resulting from the BKPT instruction should be escalated, 596 * but we don't currently implement any Debug exceptions other 597 * than those that result from BKPT, so we treat all debug exceptions 598 * as needing escalation. 599 * 600 * This all means we can identify whether to escalate based only on 601 * the exception number and don't (yet) need the caller to explicitly 602 * tell us whether this exception is synchronous or not. 603 */ 604 int running = nvic_exec_prio(s); 605 bool escalate = false; 606 607 if (exc_group_prio(s, vec->prio, secure) >= running) { 608 trace_nvic_escalate_prio(irq, vec->prio, running); 609 escalate = true; 610 } else if (!vec->enabled) { 611 trace_nvic_escalate_disabled(irq); 612 escalate = true; 613 } 614 615 if (escalate) { 616 617 /* We need to escalate this exception to a synchronous HardFault. 618 * If BFHFNMINS is set then we escalate to the banked HF for 619 * the target security state of the original exception; otherwise 620 * we take a Secure HardFault. 621 */ 622 irq = ARMV7M_EXCP_HARD; 623 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && 624 (targets_secure || 625 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { 626 vec = &s->sec_vectors[irq]; 627 } else { 628 vec = &s->vectors[irq]; 629 } 630 if (running <= vec->prio) { 631 /* We want to escalate to HardFault but we can't take the 632 * synchronous HardFault at this point either. This is a 633 * Lockup condition due to a guest bug. We don't model 634 * Lockup, so report via cpu_abort() instead. 635 */ 636 cpu_abort(&s->cpu->parent_obj, 637 "Lockup: can't escalate %d to HardFault " 638 "(current priority %d)\n", irq, running); 639 } 640 641 /* HF may be banked but there is only one shared HFSR */ 642 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; 643 } 644 } 645 646 if (!vec->pending) { 647 vec->pending = 1; 648 nvic_irq_update(s); 649 } 650 } 651 652 void armv7m_nvic_set_pending(void *opaque, int irq, bool secure) 653 { 654 do_armv7m_nvic_set_pending(opaque, irq, secure, false); 655 } 656 657 void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure) 658 { 659 do_armv7m_nvic_set_pending(opaque, irq, secure, true); 660 } 661 662 void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure) 663 { 664 /* 665 * Pend an exception during lazy FP stacking. This differs 666 * from the usual exception pending because the logic for 667 * whether we should escalate depends on the saved context 668 * in the FPCCR register, not on the current state of the CPU/NVIC. 669 */ 670 NVICState *s = (NVICState *)opaque; 671 bool banked = exc_is_banked(irq); 672 VecInfo *vec; 673 bool targets_secure; 674 bool escalate = false; 675 /* 676 * We will only look at bits in fpccr if this is a banked exception 677 * (in which case 'secure' tells us whether it is the S or NS version). 678 * All the bits for the non-banked exceptions are in fpccr_s. 679 */ 680 uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S]; 681 uint32_t fpccr = s->cpu->env.v7m.fpccr[secure]; 682 683 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 684 assert(!secure || banked); 685 686 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; 687 688 targets_secure = banked ? secure : exc_targets_secure(s, irq); 689 690 switch (irq) { 691 case ARMV7M_EXCP_DEBUG: 692 if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) { 693 /* Ignore DebugMonitor exception */ 694 return; 695 } 696 break; 697 case ARMV7M_EXCP_MEM: 698 escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK); 699 break; 700 case ARMV7M_EXCP_USAGE: 701 escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK); 702 break; 703 case ARMV7M_EXCP_BUS: 704 escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK); 705 break; 706 case ARMV7M_EXCP_SECURE: 707 escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK); 708 break; 709 default: 710 g_assert_not_reached(); 711 } 712 713 if (escalate) { 714 /* 715 * Escalate to HardFault: faults that initially targeted Secure 716 * continue to do so, even if HF normally targets NonSecure. 717 */ 718 irq = ARMV7M_EXCP_HARD; 719 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && 720 (targets_secure || 721 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { 722 vec = &s->sec_vectors[irq]; 723 } else { 724 vec = &s->vectors[irq]; 725 } 726 } 727 728 if (!vec->enabled || 729 nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) { 730 if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) { 731 /* 732 * We want to escalate to HardFault but the context the 733 * FP state belongs to prevents the exception pre-empting. 734 */ 735 cpu_abort(&s->cpu->parent_obj, 736 "Lockup: can't escalate to HardFault during " 737 "lazy FP register stacking\n"); 738 } 739 } 740 741 if (escalate) { 742 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; 743 } 744 if (!vec->pending) { 745 vec->pending = 1; 746 /* 747 * We do not call nvic_irq_update(), because we know our caller 748 * is going to handle causing us to take the exception by 749 * raising EXCP_LAZYFP, so raising the IRQ line would be 750 * pointless extra work. We just need to recompute the 751 * priorities so that armv7m_nvic_can_take_pending_exception() 752 * returns the right answer. 753 */ 754 nvic_recompute_state(s); 755 } 756 } 757 758 /* Make pending IRQ active. */ 759 void armv7m_nvic_acknowledge_irq(void *opaque) 760 { 761 NVICState *s = (NVICState *)opaque; 762 CPUARMState *env = &s->cpu->env; 763 const int pending = s->vectpending; 764 const int running = nvic_exec_prio(s); 765 VecInfo *vec; 766 767 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); 768 769 if (s->vectpending_is_s_banked) { 770 vec = &s->sec_vectors[pending]; 771 } else { 772 vec = &s->vectors[pending]; 773 } 774 775 assert(vec->enabled); 776 assert(vec->pending); 777 778 assert(s->vectpending_prio < running); 779 780 trace_nvic_acknowledge_irq(pending, s->vectpending_prio); 781 782 vec->active = 1; 783 vec->pending = 0; 784 785 write_v7m_exception(env, s->vectpending); 786 787 nvic_irq_update(s); 788 } 789 790 void armv7m_nvic_get_pending_irq_info(void *opaque, 791 int *pirq, bool *ptargets_secure) 792 { 793 NVICState *s = (NVICState *)opaque; 794 const int pending = s->vectpending; 795 bool targets_secure; 796 797 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); 798 799 if (s->vectpending_is_s_banked) { 800 targets_secure = true; 801 } else { 802 targets_secure = !exc_is_banked(pending) && 803 exc_targets_secure(s, pending); 804 } 805 806 trace_nvic_get_pending_irq_info(pending, targets_secure); 807 808 *ptargets_secure = targets_secure; 809 *pirq = pending; 810 } 811 812 int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure) 813 { 814 NVICState *s = (NVICState *)opaque; 815 VecInfo *vec = NULL; 816 int ret; 817 818 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 819 820 /* 821 * For negative priorities, v8M will forcibly deactivate the appropriate 822 * NMI or HardFault regardless of what interrupt we're being asked to 823 * deactivate (compare the DeActivate() pseudocode). This is a guard 824 * against software returning from NMI or HardFault with a corrupted 825 * IPSR and leaving the CPU in a negative-priority state. 826 * v7M does not do this, but simply deactivates the requested interrupt. 827 */ 828 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { 829 switch (armv7m_nvic_raw_execution_priority(s)) { 830 case -1: 831 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 832 vec = &s->vectors[ARMV7M_EXCP_HARD]; 833 } else { 834 vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; 835 } 836 break; 837 case -2: 838 vec = &s->vectors[ARMV7M_EXCP_NMI]; 839 break; 840 case -3: 841 vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; 842 break; 843 default: 844 break; 845 } 846 } 847 848 if (!vec) { 849 if (secure && exc_is_banked(irq)) { 850 vec = &s->sec_vectors[irq]; 851 } else { 852 vec = &s->vectors[irq]; 853 } 854 } 855 856 trace_nvic_complete_irq(irq, secure); 857 858 if (!vec->active) { 859 /* Tell the caller this was an illegal exception return */ 860 return -1; 861 } 862 863 /* 864 * If this is a configurable exception and it is currently 865 * targeting the opposite security state from the one we're trying 866 * to complete it for, this counts as an illegal exception return. 867 * We still need to deactivate whatever vector the logic above has 868 * selected, though, as it might not be the same as the one for the 869 * requested exception number. 870 */ 871 if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) { 872 ret = -1; 873 } else { 874 ret = nvic_rettobase(s); 875 } 876 877 vec->active = 0; 878 if (vec->level) { 879 /* Re-pend the exception if it's still held high; only 880 * happens for extenal IRQs 881 */ 882 assert(irq >= NVIC_FIRST_IRQ); 883 vec->pending = 1; 884 } 885 886 nvic_irq_update(s); 887 888 return ret; 889 } 890 891 bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure) 892 { 893 /* 894 * Return whether an exception is "ready", i.e. it is enabled and is 895 * configured at a priority which would allow it to interrupt the 896 * current execution priority. 897 * 898 * irq and secure have the same semantics as for armv7m_nvic_set_pending(): 899 * for non-banked exceptions secure is always false; for banked exceptions 900 * it indicates which of the exceptions is required. 901 */ 902 NVICState *s = (NVICState *)opaque; 903 bool banked = exc_is_banked(irq); 904 VecInfo *vec; 905 int running = nvic_exec_prio(s); 906 907 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); 908 assert(!secure || banked); 909 910 /* 911 * HardFault is an odd special case: we always check against -1, 912 * even if we're secure and HardFault has priority -3; we never 913 * need to check for enabled state. 914 */ 915 if (irq == ARMV7M_EXCP_HARD) { 916 return running > -1; 917 } 918 919 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; 920 921 return vec->enabled && 922 exc_group_prio(s, vec->prio, secure) < running; 923 } 924 925 /* callback when external interrupt line is changed */ 926 static void set_irq_level(void *opaque, int n, int level) 927 { 928 NVICState *s = opaque; 929 VecInfo *vec; 930 931 n += NVIC_FIRST_IRQ; 932 933 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq); 934 935 trace_nvic_set_irq_level(n, level); 936 937 /* The pending status of an external interrupt is 938 * latched on rising edge and exception handler return. 939 * 940 * Pulsing the IRQ will always run the handler 941 * once, and the handler will re-run until the 942 * level is low when the handler completes. 943 */ 944 vec = &s->vectors[n]; 945 if (level != vec->level) { 946 vec->level = level; 947 if (level) { 948 armv7m_nvic_set_pending(s, n, false); 949 } 950 } 951 } 952 953 /* callback when external NMI line is changed */ 954 static void nvic_nmi_trigger(void *opaque, int n, int level) 955 { 956 NVICState *s = opaque; 957 958 trace_nvic_set_nmi_level(level); 959 960 /* 961 * The architecture doesn't specify whether NMI should share 962 * the normal-interrupt behaviour of being resampled on 963 * exception handler return. We choose not to, so just 964 * set NMI pending here and don't track the current level. 965 */ 966 if (level) { 967 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); 968 } 969 } 970 971 static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) 972 { 973 ARMCPU *cpu = s->cpu; 974 uint32_t val; 975 976 switch (offset) { 977 case 4: /* Interrupt Control Type. */ 978 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 979 goto bad_offset; 980 } 981 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1; 982 case 0xc: /* CPPWR */ 983 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 984 goto bad_offset; 985 } 986 /* We make the IMPDEF choice that nothing can ever go into a 987 * non-retentive power state, which allows us to RAZ/WI this. 988 */ 989 return 0; 990 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */ 991 { 992 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; 993 int i; 994 995 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 996 goto bad_offset; 997 } 998 if (!attrs.secure) { 999 return 0; 1000 } 1001 val = 0; 1002 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) { 1003 if (s->itns[startvec + i]) { 1004 val |= (1 << i); 1005 } 1006 } 1007 return val; 1008 } 1009 case 0xd00: /* CPUID Base. */ 1010 return cpu->midr; 1011 case 0xd04: /* Interrupt Control State (ICSR) */ 1012 /* VECTACTIVE */ 1013 val = cpu->env.v7m.exception; 1014 /* VECTPENDING */ 1015 val |= (s->vectpending & 0xff) << 12; 1016 /* ISRPENDING - set if any external IRQ is pending */ 1017 if (nvic_isrpending(s)) { 1018 val |= (1 << 22); 1019 } 1020 /* RETTOBASE - set if only one handler is active */ 1021 if (nvic_rettobase(s)) { 1022 val |= (1 << 11); 1023 } 1024 if (attrs.secure) { 1025 /* PENDSTSET */ 1026 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) { 1027 val |= (1 << 26); 1028 } 1029 /* PENDSVSET */ 1030 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) { 1031 val |= (1 << 28); 1032 } 1033 } else { 1034 /* PENDSTSET */ 1035 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) { 1036 val |= (1 << 26); 1037 } 1038 /* PENDSVSET */ 1039 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) { 1040 val |= (1 << 28); 1041 } 1042 } 1043 /* NMIPENDSET */ 1044 if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) 1045 && s->vectors[ARMV7M_EXCP_NMI].pending) { 1046 val |= (1 << 31); 1047 } 1048 /* ISRPREEMPT: RES0 when halting debug not implemented */ 1049 /* STTNS: RES0 for the Main Extension */ 1050 return val; 1051 case 0xd08: /* Vector Table Offset. */ 1052 return cpu->env.v7m.vecbase[attrs.secure]; 1053 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */ 1054 val = 0xfa050000 | (s->prigroup[attrs.secure] << 8); 1055 if (attrs.secure) { 1056 /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */ 1057 val |= cpu->env.v7m.aircr; 1058 } else { 1059 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1060 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If 1061 * security isn't supported then BFHFNMINS is RAO (and 1062 * the bit in env.v7m.aircr is always set). 1063 */ 1064 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK; 1065 } 1066 } 1067 return val; 1068 case 0xd10: /* System Control. */ 1069 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1070 goto bad_offset; 1071 } 1072 return cpu->env.v7m.scr[attrs.secure]; 1073 case 0xd14: /* Configuration Control. */ 1074 /* The BFHFNMIGN bit is the only non-banked bit; we 1075 * keep it in the non-secure copy of the register. 1076 */ 1077 val = cpu->env.v7m.ccr[attrs.secure]; 1078 val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK; 1079 return val; 1080 case 0xd24: /* System Handler Control and State (SHCSR) */ 1081 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1082 goto bad_offset; 1083 } 1084 val = 0; 1085 if (attrs.secure) { 1086 if (s->sec_vectors[ARMV7M_EXCP_MEM].active) { 1087 val |= (1 << 0); 1088 } 1089 if (s->sec_vectors[ARMV7M_EXCP_HARD].active) { 1090 val |= (1 << 2); 1091 } 1092 if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) { 1093 val |= (1 << 3); 1094 } 1095 if (s->sec_vectors[ARMV7M_EXCP_SVC].active) { 1096 val |= (1 << 7); 1097 } 1098 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) { 1099 val |= (1 << 10); 1100 } 1101 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) { 1102 val |= (1 << 11); 1103 } 1104 if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) { 1105 val |= (1 << 12); 1106 } 1107 if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) { 1108 val |= (1 << 13); 1109 } 1110 if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) { 1111 val |= (1 << 15); 1112 } 1113 if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) { 1114 val |= (1 << 16); 1115 } 1116 if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) { 1117 val |= (1 << 18); 1118 } 1119 if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) { 1120 val |= (1 << 21); 1121 } 1122 /* SecureFault is not banked but is always RAZ/WI to NS */ 1123 if (s->vectors[ARMV7M_EXCP_SECURE].active) { 1124 val |= (1 << 4); 1125 } 1126 if (s->vectors[ARMV7M_EXCP_SECURE].enabled) { 1127 val |= (1 << 19); 1128 } 1129 if (s->vectors[ARMV7M_EXCP_SECURE].pending) { 1130 val |= (1 << 20); 1131 } 1132 } else { 1133 if (s->vectors[ARMV7M_EXCP_MEM].active) { 1134 val |= (1 << 0); 1135 } 1136 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1137 /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */ 1138 if (s->vectors[ARMV7M_EXCP_HARD].active) { 1139 val |= (1 << 2); 1140 } 1141 if (s->vectors[ARMV7M_EXCP_HARD].pending) { 1142 val |= (1 << 21); 1143 } 1144 } 1145 if (s->vectors[ARMV7M_EXCP_USAGE].active) { 1146 val |= (1 << 3); 1147 } 1148 if (s->vectors[ARMV7M_EXCP_SVC].active) { 1149 val |= (1 << 7); 1150 } 1151 if (s->vectors[ARMV7M_EXCP_PENDSV].active) { 1152 val |= (1 << 10); 1153 } 1154 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) { 1155 val |= (1 << 11); 1156 } 1157 if (s->vectors[ARMV7M_EXCP_USAGE].pending) { 1158 val |= (1 << 12); 1159 } 1160 if (s->vectors[ARMV7M_EXCP_MEM].pending) { 1161 val |= (1 << 13); 1162 } 1163 if (s->vectors[ARMV7M_EXCP_SVC].pending) { 1164 val |= (1 << 15); 1165 } 1166 if (s->vectors[ARMV7M_EXCP_MEM].enabled) { 1167 val |= (1 << 16); 1168 } 1169 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) { 1170 val |= (1 << 18); 1171 } 1172 } 1173 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1174 if (s->vectors[ARMV7M_EXCP_BUS].active) { 1175 val |= (1 << 1); 1176 } 1177 if (s->vectors[ARMV7M_EXCP_BUS].pending) { 1178 val |= (1 << 14); 1179 } 1180 if (s->vectors[ARMV7M_EXCP_BUS].enabled) { 1181 val |= (1 << 17); 1182 } 1183 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && 1184 s->vectors[ARMV7M_EXCP_NMI].active) { 1185 /* NMIACT is not present in v7M */ 1186 val |= (1 << 5); 1187 } 1188 } 1189 1190 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */ 1191 if (s->vectors[ARMV7M_EXCP_DEBUG].active) { 1192 val |= (1 << 8); 1193 } 1194 return val; 1195 case 0xd2c: /* Hard Fault Status. */ 1196 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1197 goto bad_offset; 1198 } 1199 return cpu->env.v7m.hfsr; 1200 case 0xd30: /* Debug Fault Status. */ 1201 return cpu->env.v7m.dfsr; 1202 case 0xd34: /* MMFAR MemManage Fault Address */ 1203 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1204 goto bad_offset; 1205 } 1206 return cpu->env.v7m.mmfar[attrs.secure]; 1207 case 0xd38: /* Bus Fault Address. */ 1208 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1209 goto bad_offset; 1210 } 1211 if (!attrs.secure && 1212 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1213 return 0; 1214 } 1215 return cpu->env.v7m.bfar; 1216 case 0xd3c: /* Aux Fault Status. */ 1217 /* TODO: Implement fault status registers. */ 1218 qemu_log_mask(LOG_UNIMP, 1219 "Aux Fault status registers unimplemented\n"); 1220 return 0; 1221 case 0xd40: /* PFR0. */ 1222 return cpu->id_pfr0; 1223 case 0xd44: /* PFR1. */ 1224 return cpu->id_pfr1; 1225 case 0xd48: /* DFR0. */ 1226 return cpu->id_dfr0; 1227 case 0xd4c: /* AFR0. */ 1228 return cpu->id_afr0; 1229 case 0xd50: /* MMFR0. */ 1230 return cpu->id_mmfr0; 1231 case 0xd54: /* MMFR1. */ 1232 return cpu->id_mmfr1; 1233 case 0xd58: /* MMFR2. */ 1234 return cpu->id_mmfr2; 1235 case 0xd5c: /* MMFR3. */ 1236 return cpu->id_mmfr3; 1237 case 0xd60: /* ISAR0. */ 1238 return cpu->isar.id_isar0; 1239 case 0xd64: /* ISAR1. */ 1240 return cpu->isar.id_isar1; 1241 case 0xd68: /* ISAR2. */ 1242 return cpu->isar.id_isar2; 1243 case 0xd6c: /* ISAR3. */ 1244 return cpu->isar.id_isar3; 1245 case 0xd70: /* ISAR4. */ 1246 return cpu->isar.id_isar4; 1247 case 0xd74: /* ISAR5. */ 1248 return cpu->isar.id_isar5; 1249 case 0xd78: /* CLIDR */ 1250 return cpu->clidr; 1251 case 0xd7c: /* CTR */ 1252 return cpu->ctr; 1253 case 0xd80: /* CSSIDR */ 1254 { 1255 int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK; 1256 return cpu->ccsidr[idx]; 1257 } 1258 case 0xd84: /* CSSELR */ 1259 return cpu->env.v7m.csselr[attrs.secure]; 1260 case 0xd88: /* CPACR */ 1261 if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1262 return 0; 1263 } 1264 return cpu->env.v7m.cpacr[attrs.secure]; 1265 case 0xd8c: /* NSACR */ 1266 if (!attrs.secure || !arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1267 return 0; 1268 } 1269 return cpu->env.v7m.nsacr; 1270 /* TODO: Implement debug registers. */ 1271 case 0xd90: /* MPU_TYPE */ 1272 /* Unified MPU; if the MPU is not present this value is zero */ 1273 return cpu->pmsav7_dregion << 8; 1274 break; 1275 case 0xd94: /* MPU_CTRL */ 1276 return cpu->env.v7m.mpu_ctrl[attrs.secure]; 1277 case 0xd98: /* MPU_RNR */ 1278 return cpu->env.pmsav7.rnr[attrs.secure]; 1279 case 0xd9c: /* MPU_RBAR */ 1280 case 0xda4: /* MPU_RBAR_A1 */ 1281 case 0xdac: /* MPU_RBAR_A2 */ 1282 case 0xdb4: /* MPU_RBAR_A3 */ 1283 { 1284 int region = cpu->env.pmsav7.rnr[attrs.secure]; 1285 1286 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1287 /* PMSAv8M handling of the aliases is different from v7M: 1288 * aliases A1, A2, A3 override the low two bits of the region 1289 * number in MPU_RNR, and there is no 'region' field in the 1290 * RBAR register. 1291 */ 1292 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ 1293 if (aliasno) { 1294 region = deposit32(region, 0, 2, aliasno); 1295 } 1296 if (region >= cpu->pmsav7_dregion) { 1297 return 0; 1298 } 1299 return cpu->env.pmsav8.rbar[attrs.secure][region]; 1300 } 1301 1302 if (region >= cpu->pmsav7_dregion) { 1303 return 0; 1304 } 1305 return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf); 1306 } 1307 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */ 1308 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */ 1309 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */ 1310 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */ 1311 { 1312 int region = cpu->env.pmsav7.rnr[attrs.secure]; 1313 1314 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1315 /* PMSAv8M handling of the aliases is different from v7M: 1316 * aliases A1, A2, A3 override the low two bits of the region 1317 * number in MPU_RNR. 1318 */ 1319 int aliasno = (offset - 0xda0) / 8; /* 0..3 */ 1320 if (aliasno) { 1321 region = deposit32(region, 0, 2, aliasno); 1322 } 1323 if (region >= cpu->pmsav7_dregion) { 1324 return 0; 1325 } 1326 return cpu->env.pmsav8.rlar[attrs.secure][region]; 1327 } 1328 1329 if (region >= cpu->pmsav7_dregion) { 1330 return 0; 1331 } 1332 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) | 1333 (cpu->env.pmsav7.drsr[region] & 0xffff); 1334 } 1335 case 0xdc0: /* MPU_MAIR0 */ 1336 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1337 goto bad_offset; 1338 } 1339 return cpu->env.pmsav8.mair0[attrs.secure]; 1340 case 0xdc4: /* MPU_MAIR1 */ 1341 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1342 goto bad_offset; 1343 } 1344 return cpu->env.pmsav8.mair1[attrs.secure]; 1345 case 0xdd0: /* SAU_CTRL */ 1346 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1347 goto bad_offset; 1348 } 1349 if (!attrs.secure) { 1350 return 0; 1351 } 1352 return cpu->env.sau.ctrl; 1353 case 0xdd4: /* SAU_TYPE */ 1354 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1355 goto bad_offset; 1356 } 1357 if (!attrs.secure) { 1358 return 0; 1359 } 1360 return cpu->sau_sregion; 1361 case 0xdd8: /* SAU_RNR */ 1362 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1363 goto bad_offset; 1364 } 1365 if (!attrs.secure) { 1366 return 0; 1367 } 1368 return cpu->env.sau.rnr; 1369 case 0xddc: /* SAU_RBAR */ 1370 { 1371 int region = cpu->env.sau.rnr; 1372 1373 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1374 goto bad_offset; 1375 } 1376 if (!attrs.secure) { 1377 return 0; 1378 } 1379 if (region >= cpu->sau_sregion) { 1380 return 0; 1381 } 1382 return cpu->env.sau.rbar[region]; 1383 } 1384 case 0xde0: /* SAU_RLAR */ 1385 { 1386 int region = cpu->env.sau.rnr; 1387 1388 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1389 goto bad_offset; 1390 } 1391 if (!attrs.secure) { 1392 return 0; 1393 } 1394 if (region >= cpu->sau_sregion) { 1395 return 0; 1396 } 1397 return cpu->env.sau.rlar[region]; 1398 } 1399 case 0xde4: /* SFSR */ 1400 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1401 goto bad_offset; 1402 } 1403 if (!attrs.secure) { 1404 return 0; 1405 } 1406 return cpu->env.v7m.sfsr; 1407 case 0xde8: /* SFAR */ 1408 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1409 goto bad_offset; 1410 } 1411 if (!attrs.secure) { 1412 return 0; 1413 } 1414 return cpu->env.v7m.sfar; 1415 case 0xf34: /* FPCCR */ 1416 if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1417 return 0; 1418 } 1419 if (attrs.secure) { 1420 return cpu->env.v7m.fpccr[M_REG_S]; 1421 } else { 1422 /* 1423 * NS can read LSPEN, CLRONRET and MONRDY. It can read 1424 * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0; 1425 * other non-banked bits RAZ. 1426 * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set. 1427 */ 1428 uint32_t value = cpu->env.v7m.fpccr[M_REG_S]; 1429 uint32_t mask = R_V7M_FPCCR_LSPEN_MASK | 1430 R_V7M_FPCCR_CLRONRET_MASK | 1431 R_V7M_FPCCR_MONRDY_MASK; 1432 1433 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 1434 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK; 1435 } 1436 1437 value &= mask; 1438 1439 value |= cpu->env.v7m.fpccr[M_REG_NS]; 1440 return value; 1441 } 1442 case 0xf38: /* FPCAR */ 1443 if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1444 return 0; 1445 } 1446 return cpu->env.v7m.fpcar[attrs.secure]; 1447 case 0xf3c: /* FPDSCR */ 1448 if (!arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1449 return 0; 1450 } 1451 return cpu->env.v7m.fpdscr[attrs.secure]; 1452 case 0xf40: /* MVFR0 */ 1453 return cpu->isar.mvfr0; 1454 case 0xf44: /* MVFR1 */ 1455 return cpu->isar.mvfr1; 1456 case 0xf48: /* MVFR2 */ 1457 return cpu->isar.mvfr2; 1458 default: 1459 bad_offset: 1460 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); 1461 return 0; 1462 } 1463 } 1464 1465 static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, 1466 MemTxAttrs attrs) 1467 { 1468 ARMCPU *cpu = s->cpu; 1469 1470 switch (offset) { 1471 case 0xc: /* CPPWR */ 1472 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1473 goto bad_offset; 1474 } 1475 /* Make the IMPDEF choice to RAZ/WI this. */ 1476 break; 1477 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */ 1478 { 1479 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; 1480 int i; 1481 1482 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1483 goto bad_offset; 1484 } 1485 if (!attrs.secure) { 1486 break; 1487 } 1488 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) { 1489 s->itns[startvec + i] = (value >> i) & 1; 1490 } 1491 nvic_irq_update(s); 1492 break; 1493 } 1494 case 0xd04: /* Interrupt Control State (ICSR) */ 1495 if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 1496 if (value & (1 << 31)) { 1497 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); 1498 } else if (value & (1 << 30) && 1499 arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1500 /* PENDNMICLR didn't exist in v7M */ 1501 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false); 1502 } 1503 } 1504 if (value & (1 << 28)) { 1505 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure); 1506 } else if (value & (1 << 27)) { 1507 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure); 1508 } 1509 if (value & (1 << 26)) { 1510 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure); 1511 } else if (value & (1 << 25)) { 1512 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure); 1513 } 1514 break; 1515 case 0xd08: /* Vector Table Offset. */ 1516 cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80; 1517 break; 1518 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */ 1519 if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) { 1520 if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) { 1521 if (attrs.secure || 1522 !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) { 1523 qemu_irq_pulse(s->sysresetreq); 1524 } 1525 } 1526 if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) { 1527 qemu_log_mask(LOG_GUEST_ERROR, 1528 "Setting VECTCLRACTIVE when not in DEBUG mode " 1529 "is UNPREDICTABLE\n"); 1530 } 1531 if (value & R_V7M_AIRCR_VECTRESET_MASK) { 1532 /* NB: this bit is RES0 in v8M */ 1533 qemu_log_mask(LOG_GUEST_ERROR, 1534 "Setting VECTRESET when not in DEBUG mode " 1535 "is UNPREDICTABLE\n"); 1536 } 1537 if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1538 s->prigroup[attrs.secure] = 1539 extract32(value, 1540 R_V7M_AIRCR_PRIGROUP_SHIFT, 1541 R_V7M_AIRCR_PRIGROUP_LENGTH); 1542 } 1543 if (attrs.secure) { 1544 /* These bits are only writable by secure */ 1545 cpu->env.v7m.aircr = value & 1546 (R_V7M_AIRCR_SYSRESETREQS_MASK | 1547 R_V7M_AIRCR_BFHFNMINS_MASK | 1548 R_V7M_AIRCR_PRIS_MASK); 1549 /* BFHFNMINS changes the priority of Secure HardFault, and 1550 * allows a pending Non-secure HardFault to preempt (which 1551 * we implement by marking it enabled). 1552 */ 1553 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { 1554 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3; 1555 s->vectors[ARMV7M_EXCP_HARD].enabled = 1; 1556 } else { 1557 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1; 1558 s->vectors[ARMV7M_EXCP_HARD].enabled = 0; 1559 } 1560 } 1561 nvic_irq_update(s); 1562 } 1563 break; 1564 case 0xd10: /* System Control. */ 1565 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1566 goto bad_offset; 1567 } 1568 /* We don't implement deep-sleep so these bits are RAZ/WI. 1569 * The other bits in the register are banked. 1570 * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which 1571 * is architecturally permitted. 1572 */ 1573 value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK); 1574 cpu->env.v7m.scr[attrs.secure] = value; 1575 break; 1576 case 0xd14: /* Configuration Control. */ 1577 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1578 goto bad_offset; 1579 } 1580 1581 /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */ 1582 value &= (R_V7M_CCR_STKALIGN_MASK | 1583 R_V7M_CCR_BFHFNMIGN_MASK | 1584 R_V7M_CCR_DIV_0_TRP_MASK | 1585 R_V7M_CCR_UNALIGN_TRP_MASK | 1586 R_V7M_CCR_USERSETMPEND_MASK | 1587 R_V7M_CCR_NONBASETHRDENA_MASK); 1588 1589 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1590 /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */ 1591 value |= R_V7M_CCR_NONBASETHRDENA_MASK 1592 | R_V7M_CCR_STKALIGN_MASK; 1593 } 1594 if (attrs.secure) { 1595 /* the BFHFNMIGN bit is not banked; keep that in the NS copy */ 1596 cpu->env.v7m.ccr[M_REG_NS] = 1597 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK) 1598 | (value & R_V7M_CCR_BFHFNMIGN_MASK); 1599 value &= ~R_V7M_CCR_BFHFNMIGN_MASK; 1600 } 1601 1602 cpu->env.v7m.ccr[attrs.secure] = value; 1603 break; 1604 case 0xd24: /* System Handler Control and State (SHCSR) */ 1605 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { 1606 goto bad_offset; 1607 } 1608 if (attrs.secure) { 1609 s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; 1610 /* Secure HardFault active bit cannot be written */ 1611 s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0; 1612 s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0; 1613 s->sec_vectors[ARMV7M_EXCP_PENDSV].active = 1614 (value & (1 << 10)) != 0; 1615 s->sec_vectors[ARMV7M_EXCP_SYSTICK].active = 1616 (value & (1 << 11)) != 0; 1617 s->sec_vectors[ARMV7M_EXCP_USAGE].pending = 1618 (value & (1 << 12)) != 0; 1619 s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0; 1620 s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0; 1621 s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; 1622 s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; 1623 s->sec_vectors[ARMV7M_EXCP_USAGE].enabled = 1624 (value & (1 << 18)) != 0; 1625 s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; 1626 /* SecureFault not banked, but RAZ/WI to NS */ 1627 s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0; 1628 s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0; 1629 s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0; 1630 } else { 1631 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; 1632 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1633 /* HARDFAULTPENDED is not present in v7M */ 1634 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; 1635 } 1636 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0; 1637 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0; 1638 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0; 1639 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0; 1640 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0; 1641 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0; 1642 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0; 1643 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; 1644 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0; 1645 } 1646 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1647 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0; 1648 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0; 1649 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; 1650 } 1651 /* NMIACT can only be written if the write is of a zero, with 1652 * BFHFNMINS 1, and by the CPU in secure state via the NS alias. 1653 */ 1654 if (!attrs.secure && cpu->env.v7m.secure && 1655 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) && 1656 (value & (1 << 5)) == 0) { 1657 s->vectors[ARMV7M_EXCP_NMI].active = 0; 1658 } 1659 /* HARDFAULTACT can only be written if the write is of a zero 1660 * to the non-secure HardFault state by the CPU in secure state. 1661 * The only case where we can be targeting the non-secure HF state 1662 * when in secure state is if this is a write via the NS alias 1663 * and BFHFNMINS is 1. 1664 */ 1665 if (!attrs.secure && cpu->env.v7m.secure && 1666 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) && 1667 (value & (1 << 2)) == 0) { 1668 s->vectors[ARMV7M_EXCP_HARD].active = 0; 1669 } 1670 1671 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */ 1672 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0; 1673 nvic_irq_update(s); 1674 break; 1675 case 0xd2c: /* Hard Fault Status. */ 1676 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1677 goto bad_offset; 1678 } 1679 cpu->env.v7m.hfsr &= ~value; /* W1C */ 1680 break; 1681 case 0xd30: /* Debug Fault Status. */ 1682 cpu->env.v7m.dfsr &= ~value; /* W1C */ 1683 break; 1684 case 0xd34: /* Mem Manage Address. */ 1685 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1686 goto bad_offset; 1687 } 1688 cpu->env.v7m.mmfar[attrs.secure] = value; 1689 return; 1690 case 0xd38: /* Bus Fault Address. */ 1691 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1692 goto bad_offset; 1693 } 1694 if (!attrs.secure && 1695 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1696 return; 1697 } 1698 cpu->env.v7m.bfar = value; 1699 return; 1700 case 0xd3c: /* Aux Fault Status. */ 1701 qemu_log_mask(LOG_UNIMP, 1702 "NVIC: Aux fault status registers unimplemented\n"); 1703 break; 1704 case 0xd84: /* CSSELR */ 1705 if (!arm_v7m_csselr_razwi(cpu)) { 1706 cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK; 1707 } 1708 break; 1709 case 0xd88: /* CPACR */ 1710 if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1711 /* We implement only the Floating Point extension's CP10/CP11 */ 1712 cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20); 1713 } 1714 break; 1715 case 0xd8c: /* NSACR */ 1716 if (attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1717 /* We implement only the Floating Point extension's CP10/CP11 */ 1718 cpu->env.v7m.nsacr = value & (3 << 10); 1719 } 1720 break; 1721 case 0xd90: /* MPU_TYPE */ 1722 return; /* RO */ 1723 case 0xd94: /* MPU_CTRL */ 1724 if ((value & 1725 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK)) 1726 == R_V7M_MPU_CTRL_HFNMIENA_MASK) { 1727 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is " 1728 "UNPREDICTABLE\n"); 1729 } 1730 cpu->env.v7m.mpu_ctrl[attrs.secure] 1731 = value & (R_V7M_MPU_CTRL_ENABLE_MASK | 1732 R_V7M_MPU_CTRL_HFNMIENA_MASK | 1733 R_V7M_MPU_CTRL_PRIVDEFENA_MASK); 1734 tlb_flush(CPU(cpu)); 1735 break; 1736 case 0xd98: /* MPU_RNR */ 1737 if (value >= cpu->pmsav7_dregion) { 1738 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %" 1739 PRIu32 "/%" PRIu32 "\n", 1740 value, cpu->pmsav7_dregion); 1741 } else { 1742 cpu->env.pmsav7.rnr[attrs.secure] = value; 1743 } 1744 break; 1745 case 0xd9c: /* MPU_RBAR */ 1746 case 0xda4: /* MPU_RBAR_A1 */ 1747 case 0xdac: /* MPU_RBAR_A2 */ 1748 case 0xdb4: /* MPU_RBAR_A3 */ 1749 { 1750 int region; 1751 1752 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1753 /* PMSAv8M handling of the aliases is different from v7M: 1754 * aliases A1, A2, A3 override the low two bits of the region 1755 * number in MPU_RNR, and there is no 'region' field in the 1756 * RBAR register. 1757 */ 1758 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ 1759 1760 region = cpu->env.pmsav7.rnr[attrs.secure]; 1761 if (aliasno) { 1762 region = deposit32(region, 0, 2, aliasno); 1763 } 1764 if (region >= cpu->pmsav7_dregion) { 1765 return; 1766 } 1767 cpu->env.pmsav8.rbar[attrs.secure][region] = value; 1768 tlb_flush(CPU(cpu)); 1769 return; 1770 } 1771 1772 if (value & (1 << 4)) { 1773 /* VALID bit means use the region number specified in this 1774 * value and also update MPU_RNR.REGION with that value. 1775 */ 1776 region = extract32(value, 0, 4); 1777 if (region >= cpu->pmsav7_dregion) { 1778 qemu_log_mask(LOG_GUEST_ERROR, 1779 "MPU region out of range %u/%" PRIu32 "\n", 1780 region, cpu->pmsav7_dregion); 1781 return; 1782 } 1783 cpu->env.pmsav7.rnr[attrs.secure] = region; 1784 } else { 1785 region = cpu->env.pmsav7.rnr[attrs.secure]; 1786 } 1787 1788 if (region >= cpu->pmsav7_dregion) { 1789 return; 1790 } 1791 1792 cpu->env.pmsav7.drbar[region] = value & ~0x1f; 1793 tlb_flush(CPU(cpu)); 1794 break; 1795 } 1796 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */ 1797 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */ 1798 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */ 1799 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */ 1800 { 1801 int region = cpu->env.pmsav7.rnr[attrs.secure]; 1802 1803 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1804 /* PMSAv8M handling of the aliases is different from v7M: 1805 * aliases A1, A2, A3 override the low two bits of the region 1806 * number in MPU_RNR. 1807 */ 1808 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ 1809 1810 region = cpu->env.pmsav7.rnr[attrs.secure]; 1811 if (aliasno) { 1812 region = deposit32(region, 0, 2, aliasno); 1813 } 1814 if (region >= cpu->pmsav7_dregion) { 1815 return; 1816 } 1817 cpu->env.pmsav8.rlar[attrs.secure][region] = value; 1818 tlb_flush(CPU(cpu)); 1819 return; 1820 } 1821 1822 if (region >= cpu->pmsav7_dregion) { 1823 return; 1824 } 1825 1826 cpu->env.pmsav7.drsr[region] = value & 0xff3f; 1827 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f; 1828 tlb_flush(CPU(cpu)); 1829 break; 1830 } 1831 case 0xdc0: /* MPU_MAIR0 */ 1832 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1833 goto bad_offset; 1834 } 1835 if (cpu->pmsav7_dregion) { 1836 /* Register is RES0 if no MPU regions are implemented */ 1837 cpu->env.pmsav8.mair0[attrs.secure] = value; 1838 } 1839 /* We don't need to do anything else because memory attributes 1840 * only affect cacheability, and we don't implement caching. 1841 */ 1842 break; 1843 case 0xdc4: /* MPU_MAIR1 */ 1844 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1845 goto bad_offset; 1846 } 1847 if (cpu->pmsav7_dregion) { 1848 /* Register is RES0 if no MPU regions are implemented */ 1849 cpu->env.pmsav8.mair1[attrs.secure] = value; 1850 } 1851 /* We don't need to do anything else because memory attributes 1852 * only affect cacheability, and we don't implement caching. 1853 */ 1854 break; 1855 case 0xdd0: /* SAU_CTRL */ 1856 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1857 goto bad_offset; 1858 } 1859 if (!attrs.secure) { 1860 return; 1861 } 1862 cpu->env.sau.ctrl = value & 3; 1863 break; 1864 case 0xdd4: /* SAU_TYPE */ 1865 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1866 goto bad_offset; 1867 } 1868 break; 1869 case 0xdd8: /* SAU_RNR */ 1870 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1871 goto bad_offset; 1872 } 1873 if (!attrs.secure) { 1874 return; 1875 } 1876 if (value >= cpu->sau_sregion) { 1877 qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %" 1878 PRIu32 "/%" PRIu32 "\n", 1879 value, cpu->sau_sregion); 1880 } else { 1881 cpu->env.sau.rnr = value; 1882 } 1883 break; 1884 case 0xddc: /* SAU_RBAR */ 1885 { 1886 int region = cpu->env.sau.rnr; 1887 1888 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1889 goto bad_offset; 1890 } 1891 if (!attrs.secure) { 1892 return; 1893 } 1894 if (region >= cpu->sau_sregion) { 1895 return; 1896 } 1897 cpu->env.sau.rbar[region] = value & ~0x1f; 1898 tlb_flush(CPU(cpu)); 1899 break; 1900 } 1901 case 0xde0: /* SAU_RLAR */ 1902 { 1903 int region = cpu->env.sau.rnr; 1904 1905 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1906 goto bad_offset; 1907 } 1908 if (!attrs.secure) { 1909 return; 1910 } 1911 if (region >= cpu->sau_sregion) { 1912 return; 1913 } 1914 cpu->env.sau.rlar[region] = value & ~0x1c; 1915 tlb_flush(CPU(cpu)); 1916 break; 1917 } 1918 case 0xde4: /* SFSR */ 1919 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1920 goto bad_offset; 1921 } 1922 if (!attrs.secure) { 1923 return; 1924 } 1925 cpu->env.v7m.sfsr &= ~value; /* W1C */ 1926 break; 1927 case 0xde8: /* SFAR */ 1928 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1929 goto bad_offset; 1930 } 1931 if (!attrs.secure) { 1932 return; 1933 } 1934 cpu->env.v7m.sfsr = value; 1935 break; 1936 case 0xf00: /* Software Triggered Interrupt Register */ 1937 { 1938 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ; 1939 1940 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { 1941 goto bad_offset; 1942 } 1943 1944 if (excnum < s->num_irq) { 1945 armv7m_nvic_set_pending(s, excnum, false); 1946 } 1947 break; 1948 } 1949 case 0xf34: /* FPCCR */ 1950 if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 1951 /* Not all bits here are banked. */ 1952 uint32_t fpccr_s; 1953 1954 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1955 /* Don't allow setting of bits not present in v7M */ 1956 value &= (R_V7M_FPCCR_LSPACT_MASK | 1957 R_V7M_FPCCR_USER_MASK | 1958 R_V7M_FPCCR_THREAD_MASK | 1959 R_V7M_FPCCR_HFRDY_MASK | 1960 R_V7M_FPCCR_MMRDY_MASK | 1961 R_V7M_FPCCR_BFRDY_MASK | 1962 R_V7M_FPCCR_MONRDY_MASK | 1963 R_V7M_FPCCR_LSPEN_MASK | 1964 R_V7M_FPCCR_ASPEN_MASK); 1965 } 1966 value &= ~R_V7M_FPCCR_RES0_MASK; 1967 1968 if (!attrs.secure) { 1969 /* Some non-banked bits are configurably writable by NS */ 1970 fpccr_s = cpu->env.v7m.fpccr[M_REG_S]; 1971 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) { 1972 uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN); 1973 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen); 1974 } 1975 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) { 1976 uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET); 1977 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor); 1978 } 1979 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 1980 uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY); 1981 uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY); 1982 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy); 1983 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy); 1984 } 1985 /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */ 1986 { 1987 uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY); 1988 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy); 1989 } 1990 1991 /* 1992 * All other non-banked bits are RAZ/WI from NS; write 1993 * just the banked bits to fpccr[M_REG_NS]. 1994 */ 1995 value &= R_V7M_FPCCR_BANKED_MASK; 1996 cpu->env.v7m.fpccr[M_REG_NS] = value; 1997 } else { 1998 fpccr_s = value; 1999 } 2000 cpu->env.v7m.fpccr[M_REG_S] = fpccr_s; 2001 } 2002 break; 2003 case 0xf38: /* FPCAR */ 2004 if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 2005 value &= ~7; 2006 cpu->env.v7m.fpcar[attrs.secure] = value; 2007 } 2008 break; 2009 case 0xf3c: /* FPDSCR */ 2010 if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) { 2011 value &= 0x07c00000; 2012 cpu->env.v7m.fpdscr[attrs.secure] = value; 2013 } 2014 break; 2015 case 0xf50: /* ICIALLU */ 2016 case 0xf58: /* ICIMVAU */ 2017 case 0xf5c: /* DCIMVAC */ 2018 case 0xf60: /* DCISW */ 2019 case 0xf64: /* DCCMVAU */ 2020 case 0xf68: /* DCCMVAC */ 2021 case 0xf6c: /* DCCSW */ 2022 case 0xf70: /* DCCIMVAC */ 2023 case 0xf74: /* DCCISW */ 2024 case 0xf78: /* BPIALL */ 2025 /* Cache and branch predictor maintenance: for QEMU these always NOP */ 2026 break; 2027 default: 2028 bad_offset: 2029 qemu_log_mask(LOG_GUEST_ERROR, 2030 "NVIC: Bad write offset 0x%x\n", offset); 2031 } 2032 } 2033 2034 static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs) 2035 { 2036 /* Return true if unprivileged access to this register is permitted. */ 2037 switch (offset) { 2038 case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */ 2039 /* For access via STIR_NS it is the NS CCR.USERSETMPEND that 2040 * controls access even though the CPU is in Secure state (I_QDKX). 2041 */ 2042 return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK; 2043 default: 2044 /* All other user accesses cause a BusFault unconditionally */ 2045 return false; 2046 } 2047 } 2048 2049 static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs) 2050 { 2051 /* Behaviour for the SHPR register field for this exception: 2052 * return M_REG_NS to use the nonsecure vector (including for 2053 * non-banked exceptions), M_REG_S for the secure version of 2054 * a banked exception, and -1 if this field should RAZ/WI. 2055 */ 2056 switch (exc) { 2057 case ARMV7M_EXCP_MEM: 2058 case ARMV7M_EXCP_USAGE: 2059 case ARMV7M_EXCP_SVC: 2060 case ARMV7M_EXCP_PENDSV: 2061 case ARMV7M_EXCP_SYSTICK: 2062 /* Banked exceptions */ 2063 return attrs.secure; 2064 case ARMV7M_EXCP_BUS: 2065 /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */ 2066 if (!attrs.secure && 2067 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 2068 return -1; 2069 } 2070 return M_REG_NS; 2071 case ARMV7M_EXCP_SECURE: 2072 /* Not banked, RAZ/WI from nonsecure */ 2073 if (!attrs.secure) { 2074 return -1; 2075 } 2076 return M_REG_NS; 2077 case ARMV7M_EXCP_DEBUG: 2078 /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */ 2079 return M_REG_NS; 2080 case 8 ... 10: 2081 case 13: 2082 /* RES0 */ 2083 return -1; 2084 default: 2085 /* Not reachable due to decode of SHPR register addresses */ 2086 g_assert_not_reached(); 2087 } 2088 } 2089 2090 static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr, 2091 uint64_t *data, unsigned size, 2092 MemTxAttrs attrs) 2093 { 2094 NVICState *s = (NVICState *)opaque; 2095 uint32_t offset = addr; 2096 unsigned i, startvec, end; 2097 uint32_t val; 2098 2099 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) { 2100 /* Generate BusFault for unprivileged accesses */ 2101 return MEMTX_ERROR; 2102 } 2103 2104 switch (offset) { 2105 /* reads of set and clear both return the status */ 2106 case 0x100 ... 0x13f: /* NVIC Set enable */ 2107 offset += 0x80; 2108 /* fall through */ 2109 case 0x180 ... 0x1bf: /* NVIC Clear enable */ 2110 val = 0; 2111 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */ 2112 2113 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2114 if (s->vectors[startvec + i].enabled && 2115 (attrs.secure || s->itns[startvec + i])) { 2116 val |= (1 << i); 2117 } 2118 } 2119 break; 2120 case 0x200 ... 0x23f: /* NVIC Set pend */ 2121 offset += 0x80; 2122 /* fall through */ 2123 case 0x280 ... 0x2bf: /* NVIC Clear pend */ 2124 val = 0; 2125 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ 2126 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2127 if (s->vectors[startvec + i].pending && 2128 (attrs.secure || s->itns[startvec + i])) { 2129 val |= (1 << i); 2130 } 2131 } 2132 break; 2133 case 0x300 ... 0x33f: /* NVIC Active */ 2134 val = 0; 2135 2136 if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) { 2137 break; 2138 } 2139 2140 startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */ 2141 2142 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2143 if (s->vectors[startvec + i].active && 2144 (attrs.secure || s->itns[startvec + i])) { 2145 val |= (1 << i); 2146 } 2147 } 2148 break; 2149 case 0x400 ... 0x5ef: /* NVIC Priority */ 2150 val = 0; 2151 startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */ 2152 2153 for (i = 0; i < size && startvec + i < s->num_irq; i++) { 2154 if (attrs.secure || s->itns[startvec + i]) { 2155 val |= s->vectors[startvec + i].prio << (8 * i); 2156 } 2157 } 2158 break; 2159 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */ 2160 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2161 val = 0; 2162 break; 2163 } 2164 /* fall through */ 2165 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ 2166 val = 0; 2167 for (i = 0; i < size; i++) { 2168 unsigned hdlidx = (offset - 0xd14) + i; 2169 int sbank = shpr_bank(s, hdlidx, attrs); 2170 2171 if (sbank < 0) { 2172 continue; 2173 } 2174 val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank)); 2175 } 2176 break; 2177 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ 2178 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2179 val = 0; 2180 break; 2181 }; 2182 /* 2183 * The BFSR bits [15:8] are shared between security states 2184 * and we store them in the NS copy. They are RAZ/WI for 2185 * NS code if AIRCR.BFHFNMINS is 0. 2186 */ 2187 val = s->cpu->env.v7m.cfsr[attrs.secure]; 2188 if (!attrs.secure && 2189 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 2190 val &= ~R_V7M_CFSR_BFSR_MASK; 2191 } else { 2192 val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK; 2193 } 2194 val = extract32(val, (offset - 0xd28) * 8, size * 8); 2195 break; 2196 case 0xfe0 ... 0xfff: /* ID. */ 2197 if (offset & 3) { 2198 val = 0; 2199 } else { 2200 val = nvic_id[(offset - 0xfe0) >> 2]; 2201 } 2202 break; 2203 default: 2204 if (size == 4) { 2205 val = nvic_readl(s, offset, attrs); 2206 } else { 2207 qemu_log_mask(LOG_GUEST_ERROR, 2208 "NVIC: Bad read of size %d at offset 0x%x\n", 2209 size, offset); 2210 val = 0; 2211 } 2212 } 2213 2214 trace_nvic_sysreg_read(addr, val, size); 2215 *data = val; 2216 return MEMTX_OK; 2217 } 2218 2219 static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr, 2220 uint64_t value, unsigned size, 2221 MemTxAttrs attrs) 2222 { 2223 NVICState *s = (NVICState *)opaque; 2224 uint32_t offset = addr; 2225 unsigned i, startvec, end; 2226 unsigned setval = 0; 2227 2228 trace_nvic_sysreg_write(addr, value, size); 2229 2230 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) { 2231 /* Generate BusFault for unprivileged accesses */ 2232 return MEMTX_ERROR; 2233 } 2234 2235 switch (offset) { 2236 case 0x100 ... 0x13f: /* NVIC Set enable */ 2237 offset += 0x80; 2238 setval = 1; 2239 /* fall through */ 2240 case 0x180 ... 0x1bf: /* NVIC Clear enable */ 2241 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; 2242 2243 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2244 if (value & (1 << i) && 2245 (attrs.secure || s->itns[startvec + i])) { 2246 s->vectors[startvec + i].enabled = setval; 2247 } 2248 } 2249 nvic_irq_update(s); 2250 return MEMTX_OK; 2251 case 0x200 ... 0x23f: /* NVIC Set pend */ 2252 /* the special logic in armv7m_nvic_set_pending() 2253 * is not needed since IRQs are never escalated 2254 */ 2255 offset += 0x80; 2256 setval = 1; 2257 /* fall through */ 2258 case 0x280 ... 0x2bf: /* NVIC Clear pend */ 2259 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ 2260 2261 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { 2262 if (value & (1 << i) && 2263 (attrs.secure || s->itns[startvec + i])) { 2264 s->vectors[startvec + i].pending = setval; 2265 } 2266 } 2267 nvic_irq_update(s); 2268 return MEMTX_OK; 2269 case 0x300 ... 0x33f: /* NVIC Active */ 2270 return MEMTX_OK; /* R/O */ 2271 case 0x400 ... 0x5ef: /* NVIC Priority */ 2272 startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */ 2273 2274 for (i = 0; i < size && startvec + i < s->num_irq; i++) { 2275 if (attrs.secure || s->itns[startvec + i]) { 2276 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff); 2277 } 2278 } 2279 nvic_irq_update(s); 2280 return MEMTX_OK; 2281 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */ 2282 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2283 return MEMTX_OK; 2284 } 2285 /* fall through */ 2286 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ 2287 for (i = 0; i < size; i++) { 2288 unsigned hdlidx = (offset - 0xd14) + i; 2289 int newprio = extract32(value, i * 8, 8); 2290 int sbank = shpr_bank(s, hdlidx, attrs); 2291 2292 if (sbank < 0) { 2293 continue; 2294 } 2295 set_prio(s, hdlidx, sbank, newprio); 2296 } 2297 nvic_irq_update(s); 2298 return MEMTX_OK; 2299 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ 2300 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { 2301 return MEMTX_OK; 2302 } 2303 /* All bits are W1C, so construct 32 bit value with 0s in 2304 * the parts not written by the access size 2305 */ 2306 value <<= ((offset - 0xd28) * 8); 2307 2308 if (!attrs.secure && 2309 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { 2310 /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */ 2311 value &= ~R_V7M_CFSR_BFSR_MASK; 2312 } 2313 2314 s->cpu->env.v7m.cfsr[attrs.secure] &= ~value; 2315 if (attrs.secure) { 2316 /* The BFSR bits [15:8] are shared between security states 2317 * and we store them in the NS copy. 2318 */ 2319 s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK); 2320 } 2321 return MEMTX_OK; 2322 } 2323 if (size == 4) { 2324 nvic_writel(s, offset, value, attrs); 2325 return MEMTX_OK; 2326 } 2327 qemu_log_mask(LOG_GUEST_ERROR, 2328 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset); 2329 /* This is UNPREDICTABLE; treat as RAZ/WI */ 2330 return MEMTX_OK; 2331 } 2332 2333 static const MemoryRegionOps nvic_sysreg_ops = { 2334 .read_with_attrs = nvic_sysreg_read, 2335 .write_with_attrs = nvic_sysreg_write, 2336 .endianness = DEVICE_NATIVE_ENDIAN, 2337 }; 2338 2339 static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, 2340 uint64_t value, unsigned size, 2341 MemTxAttrs attrs) 2342 { 2343 MemoryRegion *mr = opaque; 2344 2345 if (attrs.secure) { 2346 /* S accesses to the alias act like NS accesses to the real region */ 2347 attrs.secure = 0; 2348 return memory_region_dispatch_write(mr, addr, value, size, attrs); 2349 } else { 2350 /* NS attrs are RAZ/WI for privileged, and BusFault for user */ 2351 if (attrs.user) { 2352 return MEMTX_ERROR; 2353 } 2354 return MEMTX_OK; 2355 } 2356 } 2357 2358 static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, 2359 uint64_t *data, unsigned size, 2360 MemTxAttrs attrs) 2361 { 2362 MemoryRegion *mr = opaque; 2363 2364 if (attrs.secure) { 2365 /* S accesses to the alias act like NS accesses to the real region */ 2366 attrs.secure = 0; 2367 return memory_region_dispatch_read(mr, addr, data, size, attrs); 2368 } else { 2369 /* NS attrs are RAZ/WI for privileged, and BusFault for user */ 2370 if (attrs.user) { 2371 return MEMTX_ERROR; 2372 } 2373 *data = 0; 2374 return MEMTX_OK; 2375 } 2376 } 2377 2378 static const MemoryRegionOps nvic_sysreg_ns_ops = { 2379 .read_with_attrs = nvic_sysreg_ns_read, 2380 .write_with_attrs = nvic_sysreg_ns_write, 2381 .endianness = DEVICE_NATIVE_ENDIAN, 2382 }; 2383 2384 static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, 2385 uint64_t value, unsigned size, 2386 MemTxAttrs attrs) 2387 { 2388 NVICState *s = opaque; 2389 MemoryRegion *mr; 2390 2391 /* Direct the access to the correct systick */ 2392 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); 2393 return memory_region_dispatch_write(mr, addr, value, size, attrs); 2394 } 2395 2396 static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, 2397 uint64_t *data, unsigned size, 2398 MemTxAttrs attrs) 2399 { 2400 NVICState *s = opaque; 2401 MemoryRegion *mr; 2402 2403 /* Direct the access to the correct systick */ 2404 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); 2405 return memory_region_dispatch_read(mr, addr, data, size, attrs); 2406 } 2407 2408 static const MemoryRegionOps nvic_systick_ops = { 2409 .read_with_attrs = nvic_systick_read, 2410 .write_with_attrs = nvic_systick_write, 2411 .endianness = DEVICE_NATIVE_ENDIAN, 2412 }; 2413 2414 static int nvic_post_load(void *opaque, int version_id) 2415 { 2416 NVICState *s = opaque; 2417 unsigned i; 2418 int resetprio; 2419 2420 /* Check for out of range priority settings */ 2421 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3; 2422 2423 if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio || 2424 s->vectors[ARMV7M_EXCP_NMI].prio != -2 || 2425 s->vectors[ARMV7M_EXCP_HARD].prio != -1) { 2426 return 1; 2427 } 2428 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) { 2429 if (s->vectors[i].prio & ~0xff) { 2430 return 1; 2431 } 2432 } 2433 2434 nvic_recompute_state(s); 2435 2436 return 0; 2437 } 2438 2439 static const VMStateDescription vmstate_VecInfo = { 2440 .name = "armv7m_nvic_info", 2441 .version_id = 1, 2442 .minimum_version_id = 1, 2443 .fields = (VMStateField[]) { 2444 VMSTATE_INT16(prio, VecInfo), 2445 VMSTATE_UINT8(enabled, VecInfo), 2446 VMSTATE_UINT8(pending, VecInfo), 2447 VMSTATE_UINT8(active, VecInfo), 2448 VMSTATE_UINT8(level, VecInfo), 2449 VMSTATE_END_OF_LIST() 2450 } 2451 }; 2452 2453 static bool nvic_security_needed(void *opaque) 2454 { 2455 NVICState *s = opaque; 2456 2457 return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY); 2458 } 2459 2460 static int nvic_security_post_load(void *opaque, int version_id) 2461 { 2462 NVICState *s = opaque; 2463 int i; 2464 2465 /* Check for out of range priority settings */ 2466 if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1 2467 && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) { 2468 /* We can't cross-check against AIRCR.BFHFNMINS as we don't know 2469 * if the CPU state has been migrated yet; a mismatch won't 2470 * cause the emulation to blow up, though. 2471 */ 2472 return 1; 2473 } 2474 for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) { 2475 if (s->sec_vectors[i].prio & ~0xff) { 2476 return 1; 2477 } 2478 } 2479 return 0; 2480 } 2481 2482 static const VMStateDescription vmstate_nvic_security = { 2483 .name = "armv7m_nvic/m-security", 2484 .version_id = 1, 2485 .minimum_version_id = 1, 2486 .needed = nvic_security_needed, 2487 .post_load = &nvic_security_post_load, 2488 .fields = (VMStateField[]) { 2489 VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1, 2490 vmstate_VecInfo, VecInfo), 2491 VMSTATE_UINT32(prigroup[M_REG_S], NVICState), 2492 VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS), 2493 VMSTATE_END_OF_LIST() 2494 } 2495 }; 2496 2497 static const VMStateDescription vmstate_nvic = { 2498 .name = "armv7m_nvic", 2499 .version_id = 4, 2500 .minimum_version_id = 4, 2501 .post_load = &nvic_post_load, 2502 .fields = (VMStateField[]) { 2503 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1, 2504 vmstate_VecInfo, VecInfo), 2505 VMSTATE_UINT32(prigroup[M_REG_NS], NVICState), 2506 VMSTATE_END_OF_LIST() 2507 }, 2508 .subsections = (const VMStateDescription*[]) { 2509 &vmstate_nvic_security, 2510 NULL 2511 } 2512 }; 2513 2514 static Property props_nvic[] = { 2515 /* Number of external IRQ lines (so excluding the 16 internal exceptions) */ 2516 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64), 2517 DEFINE_PROP_END_OF_LIST() 2518 }; 2519 2520 static void armv7m_nvic_reset(DeviceState *dev) 2521 { 2522 int resetprio; 2523 NVICState *s = NVIC(dev); 2524 2525 memset(s->vectors, 0, sizeof(s->vectors)); 2526 memset(s->sec_vectors, 0, sizeof(s->sec_vectors)); 2527 s->prigroup[M_REG_NS] = 0; 2528 s->prigroup[M_REG_S] = 0; 2529 2530 s->vectors[ARMV7M_EXCP_NMI].enabled = 1; 2531 /* MEM, BUS, and USAGE are enabled through 2532 * the System Handler Control register 2533 */ 2534 s->vectors[ARMV7M_EXCP_SVC].enabled = 1; 2535 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1; 2536 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1; 2537 2538 /* DebugMonitor is enabled via DEMCR.MON_EN */ 2539 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0; 2540 2541 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3; 2542 s->vectors[ARMV7M_EXCP_RESET].prio = resetprio; 2543 s->vectors[ARMV7M_EXCP_NMI].prio = -2; 2544 s->vectors[ARMV7M_EXCP_HARD].prio = -1; 2545 2546 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 2547 s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1; 2548 s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1; 2549 s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1; 2550 s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1; 2551 2552 /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */ 2553 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1; 2554 /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */ 2555 s->vectors[ARMV7M_EXCP_HARD].enabled = 0; 2556 } else { 2557 s->vectors[ARMV7M_EXCP_HARD].enabled = 1; 2558 } 2559 2560 /* Strictly speaking the reset handler should be enabled. 2561 * However, we don't simulate soft resets through the NVIC, 2562 * and the reset vector should never be pended. 2563 * So we leave it disabled to catch logic errors. 2564 */ 2565 2566 s->exception_prio = NVIC_NOEXC_PRIO; 2567 s->vectpending = 0; 2568 s->vectpending_is_s_banked = false; 2569 s->vectpending_prio = NVIC_NOEXC_PRIO; 2570 2571 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 2572 memset(s->itns, 0, sizeof(s->itns)); 2573 } else { 2574 /* This state is constant and not guest accessible in a non-security 2575 * NVIC; we set the bits to true to avoid having to do a feature 2576 * bit check in the NVIC enable/pend/etc register accessors. 2577 */ 2578 int i; 2579 2580 for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) { 2581 s->itns[i] = true; 2582 } 2583 } 2584 } 2585 2586 static void nvic_systick_trigger(void *opaque, int n, int level) 2587 { 2588 NVICState *s = opaque; 2589 2590 if (level) { 2591 /* SysTick just asked us to pend its exception. 2592 * (This is different from an external interrupt line's 2593 * behaviour.) 2594 * n == 0 : NonSecure systick 2595 * n == 1 : Secure systick 2596 */ 2597 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n); 2598 } 2599 } 2600 2601 static void armv7m_nvic_realize(DeviceState *dev, Error **errp) 2602 { 2603 NVICState *s = NVIC(dev); 2604 Error *err = NULL; 2605 int regionlen; 2606 2607 /* The armv7m container object will have set our CPU pointer */ 2608 if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) { 2609 error_setg(errp, "The NVIC can only be used with a Cortex-M CPU"); 2610 return; 2611 } 2612 2613 if (s->num_irq > NVIC_MAX_IRQ) { 2614 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq); 2615 return; 2616 } 2617 2618 qdev_init_gpio_in(dev, set_irq_level, s->num_irq); 2619 2620 /* include space for internal exception vectors */ 2621 s->num_irq += NVIC_FIRST_IRQ; 2622 2623 s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; 2624 2625 object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true, 2626 "realized", &err); 2627 if (err != NULL) { 2628 error_propagate(errp, err); 2629 return; 2630 } 2631 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0, 2632 qdev_get_gpio_in_named(dev, "systick-trigger", 2633 M_REG_NS)); 2634 2635 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { 2636 /* We couldn't init the secure systick device in instance_init 2637 * as we didn't know then if the CPU had the security extensions; 2638 * so we have to do it here. 2639 */ 2640 sysbus_init_child_obj(OBJECT(dev), "systick-reg-s", 2641 &s->systick[M_REG_S], 2642 sizeof(s->systick[M_REG_S]), TYPE_SYSTICK); 2643 2644 object_property_set_bool(OBJECT(&s->systick[M_REG_S]), true, 2645 "realized", &err); 2646 if (err != NULL) { 2647 error_propagate(errp, err); 2648 return; 2649 } 2650 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0, 2651 qdev_get_gpio_in_named(dev, "systick-trigger", 2652 M_REG_S)); 2653 } 2654 2655 /* The NVIC and System Control Space (SCS) starts at 0xe000e000 2656 * and looks like this: 2657 * 0x004 - ICTR 2658 * 0x010 - 0xff - systick 2659 * 0x100..0x7ec - NVIC 2660 * 0x7f0..0xcff - Reserved 2661 * 0xd00..0xd3c - SCS registers 2662 * 0xd40..0xeff - Reserved or Not implemented 2663 * 0xf00 - STIR 2664 * 2665 * Some registers within this space are banked between security states. 2666 * In v8M there is a second range 0xe002e000..0xe002efff which is the 2667 * NonSecure alias SCS; secure accesses to this behave like NS accesses 2668 * to the main SCS range, and non-secure accesses (including when 2669 * the security extension is not implemented) are RAZ/WI. 2670 * Note that both the main SCS range and the alias range are defined 2671 * to be exempt from memory attribution (R_BLJT) and so the memory 2672 * transaction attribute always matches the current CPU security 2673 * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops 2674 * wrappers we change attrs.secure to indicate the NS access; so 2675 * generally code determining which banked register to use should 2676 * use attrs.secure; code determining actual behaviour of the system 2677 * should use env->v7m.secure. 2678 */ 2679 regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000; 2680 memory_region_init(&s->container, OBJECT(s), "nvic", regionlen); 2681 /* The system register region goes at the bottom of the priority 2682 * stack as it covers the whole page. 2683 */ 2684 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s, 2685 "nvic_sysregs", 0x1000); 2686 memory_region_add_subregion(&s->container, 0, &s->sysregmem); 2687 2688 memory_region_init_io(&s->systickmem, OBJECT(s), 2689 &nvic_systick_ops, s, 2690 "nvic_systick", 0xe0); 2691 2692 memory_region_add_subregion_overlap(&s->container, 0x10, 2693 &s->systickmem, 1); 2694 2695 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { 2696 memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s), 2697 &nvic_sysreg_ns_ops, &s->sysregmem, 2698 "nvic_sysregs_ns", 0x1000); 2699 memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem); 2700 memory_region_init_io(&s->systick_ns_mem, OBJECT(s), 2701 &nvic_sysreg_ns_ops, &s->systickmem, 2702 "nvic_systick_ns", 0xe0); 2703 memory_region_add_subregion_overlap(&s->container, 0x20010, 2704 &s->systick_ns_mem, 1); 2705 } 2706 2707 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container); 2708 } 2709 2710 static void armv7m_nvic_instance_init(Object *obj) 2711 { 2712 /* We have a different default value for the num-irq property 2713 * than our superclass. This function runs after qdev init 2714 * has set the defaults from the Property array and before 2715 * any user-specified property setting, so just modify the 2716 * value in the GICState struct. 2717 */ 2718 DeviceState *dev = DEVICE(obj); 2719 NVICState *nvic = NVIC(obj); 2720 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 2721 2722 sysbus_init_child_obj(obj, "systick-reg-ns", &nvic->systick[M_REG_NS], 2723 sizeof(nvic->systick[M_REG_NS]), TYPE_SYSTICK); 2724 /* We can't initialize the secure systick here, as we don't know 2725 * yet if we need it. 2726 */ 2727 2728 sysbus_init_irq(sbd, &nvic->excpout); 2729 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); 2730 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 2731 M_REG_NUM_BANKS); 2732 qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1); 2733 } 2734 2735 static void armv7m_nvic_class_init(ObjectClass *klass, void *data) 2736 { 2737 DeviceClass *dc = DEVICE_CLASS(klass); 2738 2739 dc->vmsd = &vmstate_nvic; 2740 dc->props = props_nvic; 2741 dc->reset = armv7m_nvic_reset; 2742 dc->realize = armv7m_nvic_realize; 2743 } 2744 2745 static const TypeInfo armv7m_nvic_info = { 2746 .name = TYPE_NVIC, 2747 .parent = TYPE_SYS_BUS_DEVICE, 2748 .instance_init = armv7m_nvic_instance_init, 2749 .instance_size = sizeof(NVICState), 2750 .class_init = armv7m_nvic_class_init, 2751 .class_size = sizeof(SysBusDeviceClass), 2752 }; 2753 2754 static void armv7m_nvic_register_types(void) 2755 { 2756 type_register_static(&armv7m_nvic_info); 2757 } 2758 2759 type_init(armv7m_nvic_register_types) 2760