1 /* 2 * ARM Generic Interrupt Controller v3 3 * 4 * Copyright (c) 2016 Linaro Limited 5 * Written by Peter Maydell 6 * 7 * This code is licensed under the GPL, version 2 or (at your option) 8 * any later version. 9 */ 10 11 /* This file contains the code for the system register interface 12 * portions of the GICv3. 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qemu/bitops.h" 17 #include "trace.h" 18 #include "gicv3_internal.h" 19 #include "cpu.h" 20 21 static GICv3CPUState *icc_cs_from_env(CPUARMState *env) 22 { 23 /* Given the CPU, find the right GICv3CPUState struct. 24 * Since we registered the CPU interface with the EL change hook as 25 * the opaque pointer, we can just directly get from the CPU to it. 26 */ 27 return arm_get_el_change_hook_opaque(arm_env_get_cpu(env)); 28 } 29 30 static bool gicv3_use_ns_bank(CPUARMState *env) 31 { 32 /* Return true if we should use the NonSecure bank for a banked GIC 33 * CPU interface register. Note that this differs from the 34 * access_secure_reg() function because GICv3 banked registers are 35 * banked even for AArch64, unlike the other CPU system registers. 36 */ 37 return !arm_is_secure_below_el3(env); 38 } 39 40 /* The minimum BPR for the virtual interface is a configurable property */ 41 static inline int icv_min_vbpr(GICv3CPUState *cs) 42 { 43 return 7 - cs->vprebits; 44 } 45 46 /* Simple accessor functions for LR fields */ 47 static uint32_t ich_lr_vintid(uint64_t lr) 48 { 49 return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH); 50 } 51 52 static uint32_t ich_lr_pintid(uint64_t lr) 53 { 54 return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH); 55 } 56 57 static uint32_t ich_lr_prio(uint64_t lr) 58 { 59 return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH); 60 } 61 62 static int ich_lr_state(uint64_t lr) 63 { 64 return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH); 65 } 66 67 static bool icv_access(CPUARMState *env, int hcr_flags) 68 { 69 /* Return true if this ICC_ register access should really be 70 * directed to an ICV_ access. hcr_flags is a mask of 71 * HCR_EL2 bits to check: we treat this as an ICV_ access 72 * if we are in NS EL1 and at least one of the specified 73 * HCR_EL2 bits is set. 74 * 75 * ICV registers fall into four categories: 76 * * access if NS EL1 and HCR_EL2.FMO == 1: 77 * all ICV regs with '0' in their name 78 * * access if NS EL1 and HCR_EL2.IMO == 1: 79 * all ICV regs with '1' in their name 80 * * access if NS EL1 and either IMO or FMO == 1: 81 * CTLR, DIR, PMR, RPR 82 */ 83 return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1 84 && !arm_is_secure_below_el3(env); 85 } 86 87 static int read_vbpr(GICv3CPUState *cs, int grp) 88 { 89 /* Read VBPR value out of the VMCR field (caller must handle 90 * VCBPR effects if required) 91 */ 92 if (grp == GICV3_G0) { 93 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, 94 ICH_VMCR_EL2_VBPR0_LENGTH); 95 } else { 96 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, 97 ICH_VMCR_EL2_VBPR1_LENGTH); 98 } 99 } 100 101 static void write_vbpr(GICv3CPUState *cs, int grp, int value) 102 { 103 /* Write new VBPR1 value, handling the "writing a value less than 104 * the minimum sets it to the minimum" semantics. 105 */ 106 int min = icv_min_vbpr(cs); 107 108 if (grp != GICV3_G0) { 109 min++; 110 } 111 112 value = MAX(value, min); 113 114 if (grp == GICV3_G0) { 115 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, 116 ICH_VMCR_EL2_VBPR0_LENGTH, value); 117 } else { 118 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, 119 ICH_VMCR_EL2_VBPR1_LENGTH, value); 120 } 121 } 122 123 static uint32_t icv_fullprio_mask(GICv3CPUState *cs) 124 { 125 /* Return a mask word which clears the unimplemented priority bits 126 * from a priority value for a virtual interrupt. (Not to be confused 127 * with the group priority, whose mask depends on the value of VBPR 128 * for the interrupt group.) 129 */ 130 return ~0U << (8 - cs->vpribits); 131 } 132 133 static int ich_highest_active_virt_prio(GICv3CPUState *cs) 134 { 135 /* Calculate the current running priority based on the set bits 136 * in the ICH Active Priority Registers. 137 */ 138 int i; 139 int aprmax = 1 << (cs->vprebits - 5); 140 141 assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); 142 143 for (i = 0; i < aprmax; i++) { 144 uint32_t apr = cs->ich_apr[GICV3_G0][i] | 145 cs->ich_apr[GICV3_G1NS][i]; 146 147 if (!apr) { 148 continue; 149 } 150 return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1); 151 } 152 /* No current active interrupts: return idle priority */ 153 return 0xff; 154 } 155 156 static int hppvi_index(GICv3CPUState *cs) 157 { 158 /* Return the list register index of the highest priority pending 159 * virtual interrupt, as per the HighestPriorityVirtualInterrupt 160 * pseudocode. If no pending virtual interrupts, return -1. 161 */ 162 int idx = -1; 163 int i; 164 /* Note that a list register entry with a priority of 0xff will 165 * never be reported by this function; this is the architecturally 166 * correct behaviour. 167 */ 168 int prio = 0xff; 169 170 if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) { 171 /* Both groups disabled, definitely nothing to do */ 172 return idx; 173 } 174 175 for (i = 0; i < cs->num_list_regs; i++) { 176 uint64_t lr = cs->ich_lr_el2[i]; 177 int thisprio; 178 179 if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) { 180 /* Not Pending */ 181 continue; 182 } 183 184 /* Ignore interrupts if relevant group enable not set */ 185 if (lr & ICH_LR_EL2_GROUP) { 186 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 187 continue; 188 } 189 } else { 190 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { 191 continue; 192 } 193 } 194 195 thisprio = ich_lr_prio(lr); 196 197 if (thisprio < prio) { 198 prio = thisprio; 199 idx = i; 200 } 201 } 202 203 return idx; 204 } 205 206 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group) 207 { 208 /* Return a mask word which clears the subpriority bits from 209 * a priority value for a virtual interrupt in the specified group. 210 * This depends on the VBPR value: 211 * a BPR of 0 means the group priority bits are [7:1]; 212 * a BPR of 1 means they are [7:2], and so on down to 213 * a BPR of 7 meaning no group priority bits at all. 214 * Which BPR to use depends on the group of the interrupt and 215 * the current ICH_VMCR_EL2.VCBPR settings. 216 */ 217 if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { 218 group = GICV3_G0; 219 } 220 221 return ~0U << (read_vbpr(cs, group) + 1); 222 } 223 224 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) 225 { 226 /* Return true if we can signal this virtual interrupt defined by 227 * the given list register value; see the pseudocode functions 228 * CanSignalVirtualInterrupt and CanSignalVirtualInt. 229 * Compare also icc_hppi_can_preempt() which is the non-virtual 230 * equivalent of these checks. 231 */ 232 int grp; 233 uint32_t mask, prio, rprio, vpmr; 234 235 if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { 236 /* Virtual interface disabled */ 237 return false; 238 } 239 240 /* We don't need to check that this LR is in Pending state because 241 * that has already been done in hppvi_index(). 242 */ 243 244 prio = ich_lr_prio(lr); 245 vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 246 ICH_VMCR_EL2_VPMR_LENGTH); 247 248 if (prio >= vpmr) { 249 /* Priority mask masks this interrupt */ 250 return false; 251 } 252 253 rprio = ich_highest_active_virt_prio(cs); 254 if (rprio == 0xff) { 255 /* No running interrupt so we can preempt */ 256 return true; 257 } 258 259 grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 260 261 mask = icv_gprio_mask(cs, grp); 262 263 /* We only preempt a running interrupt if the pending interrupt's 264 * group priority is sufficient (the subpriorities are not considered). 265 */ 266 if ((prio & mask) < (rprio & mask)) { 267 return true; 268 } 269 270 return false; 271 } 272 273 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs, 274 uint32_t *misr) 275 { 276 /* Return a set of bits indicating the EOI maintenance interrupt status 277 * for each list register. The EOI maintenance interrupt status is 278 * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1 279 * (see the GICv3 spec for the ICH_EISR_EL2 register). 280 * If misr is not NULL then we should also collect the information 281 * about the MISR.EOI, MISR.NP and MISR.U bits. 282 */ 283 uint32_t value = 0; 284 int validcount = 0; 285 bool seenpending = false; 286 int i; 287 288 for (i = 0; i < cs->num_list_regs; i++) { 289 uint64_t lr = cs->ich_lr_el2[i]; 290 291 if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI)) 292 == ICH_LR_EL2_EOI) { 293 value |= (1 << i); 294 } 295 if ((lr & ICH_LR_EL2_STATE_MASK)) { 296 validcount++; 297 } 298 if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) { 299 seenpending = true; 300 } 301 } 302 303 if (misr) { 304 if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) { 305 *misr |= ICH_MISR_EL2_U; 306 } 307 if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) { 308 *misr |= ICH_MISR_EL2_NP; 309 } 310 if (value) { 311 *misr |= ICH_MISR_EL2_EOI; 312 } 313 } 314 return value; 315 } 316 317 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs) 318 { 319 /* Return a set of bits indicating the maintenance interrupt status 320 * (as seen in the ICH_MISR_EL2 register). 321 */ 322 uint32_t value = 0; 323 324 /* Scan list registers and fill in the U, NP and EOI bits */ 325 eoi_maintenance_interrupt_state(cs, &value); 326 327 if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) { 328 value |= ICH_MISR_EL2_LRENP; 329 } 330 331 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) && 332 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { 333 value |= ICH_MISR_EL2_VGRP0E; 334 } 335 336 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) && 337 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 338 value |= ICH_MISR_EL2_VGRP0D; 339 } 340 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) && 341 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 342 value |= ICH_MISR_EL2_VGRP1E; 343 } 344 345 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) && 346 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 347 value |= ICH_MISR_EL2_VGRP1D; 348 } 349 350 return value; 351 } 352 353 static void gicv3_cpuif_virt_update(GICv3CPUState *cs) 354 { 355 } 356 357 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 358 { 359 GICv3CPUState *cs = icc_cs_from_env(env); 360 int regno = ri->opc2 & 3; 361 int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS; 362 uint64_t value = cs->ich_apr[grp][regno]; 363 364 trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 365 return value; 366 } 367 368 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 369 uint64_t value) 370 { 371 GICv3CPUState *cs = icc_cs_from_env(env); 372 int regno = ri->opc2 & 3; 373 int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS; 374 375 trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 376 377 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; 378 379 gicv3_cpuif_virt_update(cs); 380 return; 381 } 382 383 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 384 { 385 GICv3CPUState *cs = icc_cs_from_env(env); 386 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; 387 uint64_t bpr; 388 bool satinc = false; 389 390 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { 391 /* reads return bpr0 + 1 saturated to 7, writes ignored */ 392 grp = GICV3_G0; 393 satinc = true; 394 } 395 396 bpr = read_vbpr(cs, grp); 397 398 if (satinc) { 399 bpr++; 400 bpr = MIN(bpr, 7); 401 } 402 403 trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); 404 405 return bpr; 406 } 407 408 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, 409 uint64_t value) 410 { 411 GICv3CPUState *cs = icc_cs_from_env(env); 412 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; 413 414 trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1, 415 gicv3_redist_affid(cs), value); 416 417 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { 418 /* reads return bpr0 + 1 saturated to 7, writes ignored */ 419 return; 420 } 421 422 write_vbpr(cs, grp, value); 423 424 gicv3_cpuif_virt_update(cs); 425 } 426 427 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) 428 { 429 GICv3CPUState *cs = icc_cs_from_env(env); 430 uint64_t value; 431 432 value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 433 ICH_VMCR_EL2_VPMR_LENGTH); 434 435 trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value); 436 return value; 437 } 438 439 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, 440 uint64_t value) 441 { 442 GICv3CPUState *cs = icc_cs_from_env(env); 443 444 trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value); 445 446 value &= icv_fullprio_mask(cs); 447 448 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 449 ICH_VMCR_EL2_VPMR_LENGTH, value); 450 451 gicv3_cpuif_virt_update(cs); 452 } 453 454 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) 455 { 456 GICv3CPUState *cs = icc_cs_from_env(env); 457 int enbit; 458 uint64_t value; 459 460 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; 461 value = extract64(cs->ich_vmcr_el2, enbit, 1); 462 463 trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0, 464 gicv3_redist_affid(cs), value); 465 return value; 466 } 467 468 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, 469 uint64_t value) 470 { 471 GICv3CPUState *cs = icc_cs_from_env(env); 472 int enbit; 473 474 trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0, 475 gicv3_redist_affid(cs), value); 476 477 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; 478 479 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value); 480 gicv3_cpuif_virt_update(cs); 481 } 482 483 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) 484 { 485 GICv3CPUState *cs = icc_cs_from_env(env); 486 uint64_t value; 487 488 /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits) 489 * should match the ones reported in ich_vtr_read(). 490 */ 491 value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 492 (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); 493 494 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) { 495 value |= ICC_CTLR_EL1_EOIMODE; 496 } 497 498 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { 499 value |= ICC_CTLR_EL1_CBPR; 500 } 501 502 trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value); 503 return value; 504 } 505 506 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 507 uint64_t value) 508 { 509 GICv3CPUState *cs = icc_cs_from_env(env); 510 511 trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value); 512 513 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT, 514 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0); 515 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT, 516 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0); 517 518 gicv3_cpuif_virt_update(cs); 519 } 520 521 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 522 { 523 GICv3CPUState *cs = icc_cs_from_env(env); 524 int prio = ich_highest_active_virt_prio(cs); 525 526 trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio); 527 return prio; 528 } 529 530 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri) 531 { 532 GICv3CPUState *cs = icc_cs_from_env(env); 533 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 534 int idx = hppvi_index(cs); 535 uint64_t value = INTID_SPURIOUS; 536 537 if (idx >= 0) { 538 uint64_t lr = cs->ich_lr_el2[idx]; 539 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 540 541 if (grp == thisgrp) { 542 value = ich_lr_vintid(lr); 543 } 544 } 545 546 trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value); 547 return value; 548 } 549 550 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) 551 { 552 /* Activate the interrupt in the specified list register 553 * by moving it from Pending to Active state, and update the 554 * Active Priority Registers. 555 */ 556 uint32_t mask = icv_gprio_mask(cs, grp); 557 int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask; 558 int aprbit = prio >> (8 - cs->vprebits); 559 int regno = aprbit / 32; 560 int regbit = aprbit % 32; 561 562 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; 563 cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT; 564 cs->ich_apr[grp][regno] |= (1 << regbit); 565 } 566 567 static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) 568 { 569 GICv3CPUState *cs = icc_cs_from_env(env); 570 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 571 int idx = hppvi_index(cs); 572 uint64_t intid = INTID_SPURIOUS; 573 574 if (idx >= 0) { 575 uint64_t lr = cs->ich_lr_el2[idx]; 576 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 577 578 if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) { 579 intid = ich_lr_vintid(lr); 580 if (intid < INTID_SECURE) { 581 icv_activate_irq(cs, idx, grp); 582 } else { 583 /* Interrupt goes from Pending to Invalid */ 584 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; 585 /* We will now return the (bogus) ID from the list register, 586 * as per the pseudocode. 587 */ 588 } 589 } 590 } 591 592 trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1, 593 gicv3_redist_affid(cs), intid); 594 return intid; 595 } 596 597 static int icc_highest_active_prio(GICv3CPUState *cs) 598 { 599 /* Calculate the current running priority based on the set bits 600 * in the Active Priority Registers. 601 */ 602 int i; 603 604 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { 605 uint32_t apr = cs->icc_apr[GICV3_G0][i] | 606 cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i]; 607 608 if (!apr) { 609 continue; 610 } 611 return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); 612 } 613 /* No current active interrupts: return idle priority */ 614 return 0xff; 615 } 616 617 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group) 618 { 619 /* Return a mask word which clears the subpriority bits from 620 * a priority value for an interrupt in the specified group. 621 * This depends on the BPR value: 622 * a BPR of 0 means the group priority bits are [7:1]; 623 * a BPR of 1 means they are [7:2], and so on down to 624 * a BPR of 7 meaning no group priority bits at all. 625 * Which BPR to use depends on the group of the interrupt and 626 * the current ICC_CTLR.CBPR settings. 627 */ 628 if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) || 629 (group == GICV3_G1NS && 630 cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 631 group = GICV3_G0; 632 } 633 634 return ~0U << ((cs->icc_bpr[group] & 7) + 1); 635 } 636 637 static bool icc_no_enabled_hppi(GICv3CPUState *cs) 638 { 639 /* Return true if there is no pending interrupt, or the 640 * highest priority pending interrupt is in a group which has been 641 * disabled at the CPU interface by the ICC_IGRPEN* register enable bits. 642 */ 643 return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0); 644 } 645 646 static bool icc_hppi_can_preempt(GICv3CPUState *cs) 647 { 648 /* Return true if we have a pending interrupt of sufficient 649 * priority to preempt. 650 */ 651 int rprio; 652 uint32_t mask; 653 654 if (icc_no_enabled_hppi(cs)) { 655 return false; 656 } 657 658 if (cs->hppi.prio >= cs->icc_pmr_el1) { 659 /* Priority mask masks this interrupt */ 660 return false; 661 } 662 663 rprio = icc_highest_active_prio(cs); 664 if (rprio == 0xff) { 665 /* No currently running interrupt so we can preempt */ 666 return true; 667 } 668 669 mask = icc_gprio_mask(cs, cs->hppi.grp); 670 671 /* We only preempt a running interrupt if the pending interrupt's 672 * group priority is sufficient (the subpriorities are not considered). 673 */ 674 if ((cs->hppi.prio & mask) < (rprio & mask)) { 675 return true; 676 } 677 678 return false; 679 } 680 681 void gicv3_cpuif_update(GICv3CPUState *cs) 682 { 683 /* Tell the CPU about its highest priority pending interrupt */ 684 int irqlevel = 0; 685 int fiqlevel = 0; 686 ARMCPU *cpu = ARM_CPU(cs->cpu); 687 CPUARMState *env = &cpu->env; 688 689 trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, 690 cs->hppi.grp, cs->hppi.prio); 691 692 if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) { 693 /* If a Security-enabled GIC sends a G1S interrupt to a 694 * Security-disabled CPU, we must treat it as if it were G0. 695 */ 696 cs->hppi.grp = GICV3_G0; 697 } 698 699 if (icc_hppi_can_preempt(cs)) { 700 /* We have an interrupt: should we signal it as IRQ or FIQ? 701 * This is described in the GICv3 spec section 4.6.2. 702 */ 703 bool isfiq; 704 705 switch (cs->hppi.grp) { 706 case GICV3_G0: 707 isfiq = true; 708 break; 709 case GICV3_G1: 710 isfiq = (!arm_is_secure(env) || 711 (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3))); 712 break; 713 case GICV3_G1NS: 714 isfiq = arm_is_secure(env); 715 break; 716 default: 717 g_assert_not_reached(); 718 } 719 720 if (isfiq) { 721 fiqlevel = 1; 722 } else { 723 irqlevel = 1; 724 } 725 } 726 727 trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); 728 729 qemu_set_irq(cs->parent_fiq, fiqlevel); 730 qemu_set_irq(cs->parent_irq, irqlevel); 731 } 732 733 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) 734 { 735 GICv3CPUState *cs = icc_cs_from_env(env); 736 uint32_t value = cs->icc_pmr_el1; 737 738 if (icv_access(env, HCR_FMO | HCR_IMO)) { 739 return icv_pmr_read(env, ri); 740 } 741 742 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && 743 (env->cp15.scr_el3 & SCR_FIQ)) { 744 /* NS access and Group 0 is inaccessible to NS: return the 745 * NS view of the current priority 746 */ 747 if (value & 0x80) { 748 /* Secure priorities not visible to NS */ 749 value = 0; 750 } else if (value != 0xff) { 751 value = (value << 1) & 0xff; 752 } 753 } 754 755 trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value); 756 757 return value; 758 } 759 760 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, 761 uint64_t value) 762 { 763 GICv3CPUState *cs = icc_cs_from_env(env); 764 765 if (icv_access(env, HCR_FMO | HCR_IMO)) { 766 return icv_pmr_write(env, ri, value); 767 } 768 769 trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value); 770 771 value &= 0xff; 772 773 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && 774 (env->cp15.scr_el3 & SCR_FIQ)) { 775 /* NS access and Group 0 is inaccessible to NS: return the 776 * NS view of the current priority 777 */ 778 if (!(cs->icc_pmr_el1 & 0x80)) { 779 /* Current PMR in the secure range, don't allow NS to change it */ 780 return; 781 } 782 value = (value >> 1) & 0x80; 783 } 784 cs->icc_pmr_el1 = value; 785 gicv3_cpuif_update(cs); 786 } 787 788 static void icc_activate_irq(GICv3CPUState *cs, int irq) 789 { 790 /* Move the interrupt from the Pending state to Active, and update 791 * the Active Priority Registers 792 */ 793 uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp); 794 int prio = cs->hppi.prio & mask; 795 int aprbit = prio >> 1; 796 int regno = aprbit / 32; 797 int regbit = aprbit % 32; 798 799 cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit); 800 801 if (irq < GIC_INTERNAL) { 802 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1); 803 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0); 804 gicv3_redist_update(cs); 805 } else { 806 gicv3_gicd_active_set(cs->gic, irq); 807 gicv3_gicd_pending_clear(cs->gic, irq); 808 gicv3_update(cs->gic, irq, 1); 809 } 810 } 811 812 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env) 813 { 814 /* Return the highest priority pending interrupt register value 815 * for group 0. 816 */ 817 bool irq_is_secure; 818 819 if (cs->hppi.prio == 0xff) { 820 return INTID_SPURIOUS; 821 } 822 823 /* Check whether we can return the interrupt or if we should return 824 * a special identifier, as per the CheckGroup0ForSpecialIdentifiers 825 * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM 826 * is always zero.) 827 */ 828 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && 829 (cs->hppi.grp != GICV3_G1NS)); 830 831 if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) { 832 return INTID_SPURIOUS; 833 } 834 if (irq_is_secure && !arm_is_secure(env)) { 835 /* Secure interrupts not visible to Nonsecure */ 836 return INTID_SPURIOUS; 837 } 838 839 if (cs->hppi.grp != GICV3_G0) { 840 /* Indicate to EL3 that there's a Group 1 interrupt for the other 841 * state pending. 842 */ 843 return irq_is_secure ? INTID_SECURE : INTID_NONSECURE; 844 } 845 846 return cs->hppi.irq; 847 } 848 849 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env) 850 { 851 /* Return the highest priority pending interrupt register value 852 * for group 1. 853 */ 854 bool irq_is_secure; 855 856 if (cs->hppi.prio == 0xff) { 857 return INTID_SPURIOUS; 858 } 859 860 /* Check whether we can return the interrupt or if we should return 861 * a special identifier, as per the CheckGroup1ForSpecialIdentifiers 862 * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM 863 * is always zero.) 864 */ 865 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && 866 (cs->hppi.grp != GICV3_G1NS)); 867 868 if (cs->hppi.grp == GICV3_G0) { 869 /* Group 0 interrupts not visible via HPPIR1 */ 870 return INTID_SPURIOUS; 871 } 872 if (irq_is_secure) { 873 if (!arm_is_secure(env)) { 874 /* Secure interrupts not visible in Non-secure */ 875 return INTID_SPURIOUS; 876 } 877 } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { 878 /* Group 1 non-secure interrupts not visible in Secure EL1 */ 879 return INTID_SPURIOUS; 880 } 881 882 return cs->hppi.irq; 883 } 884 885 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri) 886 { 887 GICv3CPUState *cs = icc_cs_from_env(env); 888 uint64_t intid; 889 890 if (icv_access(env, HCR_FMO)) { 891 return icv_iar_read(env, ri); 892 } 893 894 if (!icc_hppi_can_preempt(cs)) { 895 intid = INTID_SPURIOUS; 896 } else { 897 intid = icc_hppir0_value(cs, env); 898 } 899 900 if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) { 901 icc_activate_irq(cs, intid); 902 } 903 904 trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid); 905 return intid; 906 } 907 908 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) 909 { 910 GICv3CPUState *cs = icc_cs_from_env(env); 911 uint64_t intid; 912 913 if (icv_access(env, HCR_IMO)) { 914 return icv_iar_read(env, ri); 915 } 916 917 if (!icc_hppi_can_preempt(cs)) { 918 intid = INTID_SPURIOUS; 919 } else { 920 intid = icc_hppir1_value(cs, env); 921 } 922 923 if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) { 924 icc_activate_irq(cs, intid); 925 } 926 927 trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid); 928 return intid; 929 } 930 931 static void icc_drop_prio(GICv3CPUState *cs, int grp) 932 { 933 /* Drop the priority of the currently active interrupt in 934 * the specified group. 935 * 936 * Note that we can guarantee (because of the requirement to nest 937 * ICC_IAR reads [which activate an interrupt and raise priority] 938 * with ICC_EOIR writes [which drop the priority for the interrupt]) 939 * that the interrupt we're being called for is the highest priority 940 * active interrupt, meaning that it has the lowest set bit in the 941 * APR registers. 942 * 943 * If the guest does not honour the ordering constraints then the 944 * behaviour of the GIC is UNPREDICTABLE, which for us means that 945 * the values of the APR registers might become incorrect and the 946 * running priority will be wrong, so interrupts that should preempt 947 * might not do so, and interrupts that should not preempt might do so. 948 */ 949 int i; 950 951 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) { 952 uint64_t *papr = &cs->icc_apr[grp][i]; 953 954 if (!*papr) { 955 continue; 956 } 957 /* Clear the lowest set bit */ 958 *papr &= *papr - 1; 959 break; 960 } 961 962 /* running priority change means we need an update for this cpu i/f */ 963 gicv3_cpuif_update(cs); 964 } 965 966 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs) 967 { 968 /* Return true if we should split priority drop and interrupt 969 * deactivation, ie whether the relevant EOIMode bit is set. 970 */ 971 if (arm_is_el3_or_mon(env)) { 972 return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3; 973 } 974 if (arm_is_secure_below_el3(env)) { 975 return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE; 976 } else { 977 return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE; 978 } 979 } 980 981 static int icc_highest_active_group(GICv3CPUState *cs) 982 { 983 /* Return the group with the highest priority active interrupt. 984 * We can do this by just comparing the APRs to see which one 985 * has the lowest set bit. 986 * (If more than one group is active at the same priority then 987 * we're in UNPREDICTABLE territory.) 988 */ 989 int i; 990 991 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { 992 int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]); 993 int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]); 994 int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]); 995 996 if (g1nsctz < g0ctz && g1nsctz < g1ctz) { 997 return GICV3_G1NS; 998 } 999 if (g1ctz < g0ctz) { 1000 return GICV3_G1; 1001 } 1002 if (g0ctz < 32) { 1003 return GICV3_G0; 1004 } 1005 } 1006 /* No set active bits? UNPREDICTABLE; return -1 so the caller 1007 * ignores the spurious EOI attempt. 1008 */ 1009 return -1; 1010 } 1011 1012 static void icc_deactivate_irq(GICv3CPUState *cs, int irq) 1013 { 1014 if (irq < GIC_INTERNAL) { 1015 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0); 1016 gicv3_redist_update(cs); 1017 } else { 1018 gicv3_gicd_active_clear(cs->gic, irq); 1019 gicv3_update(cs->gic, irq, 1); 1020 } 1021 } 1022 1023 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs) 1024 { 1025 /* Return true if we should split priority drop and interrupt 1026 * deactivation, ie whether the virtual EOIMode bit is set. 1027 */ 1028 return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM; 1029 } 1030 1031 static int icv_find_active(GICv3CPUState *cs, int irq) 1032 { 1033 /* Given an interrupt number for an active interrupt, return the index 1034 * of the corresponding list register, or -1 if there is no match. 1035 * Corresponds to FindActiveVirtualInterrupt pseudocode. 1036 */ 1037 int i; 1038 1039 for (i = 0; i < cs->num_list_regs; i++) { 1040 uint64_t lr = cs->ich_lr_el2[i]; 1041 1042 if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) { 1043 return i; 1044 } 1045 } 1046 1047 return -1; 1048 } 1049 1050 static void icv_deactivate_irq(GICv3CPUState *cs, int idx) 1051 { 1052 /* Deactivate the interrupt in the specified list register index */ 1053 uint64_t lr = cs->ich_lr_el2[idx]; 1054 1055 if (lr & ICH_LR_EL2_HW) { 1056 /* Deactivate the associated physical interrupt */ 1057 int pirq = ich_lr_pintid(lr); 1058 1059 if (pirq < INTID_SECURE) { 1060 icc_deactivate_irq(cs, pirq); 1061 } 1062 } 1063 1064 /* Clear the 'active' part of the state, so ActivePending->Pending 1065 * and Active->Invalid. 1066 */ 1067 lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT; 1068 cs->ich_lr_el2[idx] = lr; 1069 } 1070 1071 static void icv_increment_eoicount(GICv3CPUState *cs) 1072 { 1073 /* Increment the EOICOUNT field in ICH_HCR_EL2 */ 1074 int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, 1075 ICH_HCR_EL2_EOICOUNT_LENGTH); 1076 1077 cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, 1078 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1); 1079 } 1080 1081 static int icv_drop_prio(GICv3CPUState *cs) 1082 { 1083 /* Drop the priority of the currently active virtual interrupt 1084 * (favouring group 0 if there is a set active bit at 1085 * the same priority for both group 0 and group 1). 1086 * Return the priority value for the bit we just cleared, 1087 * or 0xff if no bits were set in the AP registers at all. 1088 * Note that though the ich_apr[] are uint64_t only the low 1089 * 32 bits are actually relevant. 1090 */ 1091 int i; 1092 int aprmax = 1 << (cs->vprebits - 5); 1093 1094 assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); 1095 1096 for (i = 0; i < aprmax; i++) { 1097 uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i]; 1098 uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i]; 1099 int apr0count, apr1count; 1100 1101 if (!*papr0 && !*papr1) { 1102 continue; 1103 } 1104 1105 /* We can't just use the bit-twiddling hack icc_drop_prio() does 1106 * because we need to return the bit number we cleared so 1107 * it can be compared against the list register's priority field. 1108 */ 1109 apr0count = ctz32(*papr0); 1110 apr1count = ctz32(*papr1); 1111 1112 if (apr0count <= apr1count) { 1113 *papr0 &= *papr0 - 1; 1114 return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1); 1115 } else { 1116 *papr1 &= *papr1 - 1; 1117 return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1); 1118 } 1119 } 1120 return 0xff; 1121 } 1122 1123 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1124 uint64_t value) 1125 { 1126 /* Deactivate interrupt */ 1127 GICv3CPUState *cs = icc_cs_from_env(env); 1128 int idx; 1129 int irq = value & 0xffffff; 1130 1131 trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value); 1132 1133 if (irq >= cs->gic->num_irq) { 1134 /* Also catches special interrupt numbers and LPIs */ 1135 return; 1136 } 1137 1138 if (!icv_eoi_split(env, cs)) { 1139 return; 1140 } 1141 1142 idx = icv_find_active(cs, irq); 1143 1144 if (idx < 0) { 1145 /* No list register matching this, so increment the EOI count 1146 * (might trigger a maintenance interrupt) 1147 */ 1148 icv_increment_eoicount(cs); 1149 } else { 1150 icv_deactivate_irq(cs, idx); 1151 } 1152 1153 gicv3_cpuif_virt_update(cs); 1154 } 1155 1156 static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1157 uint64_t value) 1158 { 1159 /* End of Interrupt */ 1160 GICv3CPUState *cs = icc_cs_from_env(env); 1161 int irq = value & 0xffffff; 1162 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 1163 int idx, dropprio; 1164 1165 trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1, 1166 gicv3_redist_affid(cs), value); 1167 1168 if (irq >= cs->gic->num_irq) { 1169 /* Also catches special interrupt numbers and LPIs */ 1170 return; 1171 } 1172 1173 /* We implement the IMPDEF choice of "drop priority before doing 1174 * error checks" (because that lets us avoid scanning the AP 1175 * registers twice). 1176 */ 1177 dropprio = icv_drop_prio(cs); 1178 if (dropprio == 0xff) { 1179 /* No active interrupt. It is CONSTRAINED UNPREDICTABLE 1180 * whether the list registers are checked in this 1181 * situation; we choose not to. 1182 */ 1183 return; 1184 } 1185 1186 idx = icv_find_active(cs, irq); 1187 1188 if (idx < 0) { 1189 /* No valid list register corresponding to EOI ID */ 1190 icv_increment_eoicount(cs); 1191 } else { 1192 uint64_t lr = cs->ich_lr_el2[idx]; 1193 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 1194 int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp); 1195 1196 if (thisgrp == grp && lr_gprio == dropprio) { 1197 if (!icv_eoi_split(env, cs)) { 1198 /* Priority drop and deactivate not split: deactivate irq now */ 1199 icv_deactivate_irq(cs, idx); 1200 } 1201 } 1202 } 1203 1204 gicv3_cpuif_virt_update(cs); 1205 } 1206 1207 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1208 uint64_t value) 1209 { 1210 /* End of Interrupt */ 1211 GICv3CPUState *cs = icc_cs_from_env(env); 1212 int irq = value & 0xffffff; 1213 int grp; 1214 1215 if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) { 1216 icv_eoir_write(env, ri, value); 1217 return; 1218 } 1219 1220 trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1, 1221 gicv3_redist_affid(cs), value); 1222 1223 if (ri->crm == 8) { 1224 /* EOIR0 */ 1225 grp = GICV3_G0; 1226 } else { 1227 /* EOIR1 */ 1228 if (arm_is_secure(env)) { 1229 grp = GICV3_G1; 1230 } else { 1231 grp = GICV3_G1NS; 1232 } 1233 } 1234 1235 if (irq >= cs->gic->num_irq) { 1236 /* This handles two cases: 1237 * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] 1238 * to the GICC_EOIR, the GIC ignores that write. 1239 * 2. If software writes the number of a non-existent interrupt 1240 * this must be a subcase of "value written does not match the last 1241 * valid interrupt value read from the Interrupt Acknowledge 1242 * register" and so this is UNPREDICTABLE. We choose to ignore it. 1243 */ 1244 return; 1245 } 1246 1247 if (icc_highest_active_group(cs) != grp) { 1248 return; 1249 } 1250 1251 icc_drop_prio(cs, grp); 1252 1253 if (!icc_eoi_split(env, cs)) { 1254 /* Priority drop and deactivate not split: deactivate irq now */ 1255 icc_deactivate_irq(cs, irq); 1256 } 1257 } 1258 1259 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri) 1260 { 1261 GICv3CPUState *cs = icc_cs_from_env(env); 1262 uint64_t value; 1263 1264 if (icv_access(env, HCR_FMO)) { 1265 return icv_hppir_read(env, ri); 1266 } 1267 1268 value = icc_hppir0_value(cs, env); 1269 trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value); 1270 return value; 1271 } 1272 1273 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1274 { 1275 GICv3CPUState *cs = icc_cs_from_env(env); 1276 uint64_t value; 1277 1278 if (icv_access(env, HCR_IMO)) { 1279 return icv_hppir_read(env, ri); 1280 } 1281 1282 value = icc_hppir1_value(cs, env); 1283 trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value); 1284 return value; 1285 } 1286 1287 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1288 { 1289 GICv3CPUState *cs = icc_cs_from_env(env); 1290 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; 1291 bool satinc = false; 1292 uint64_t bpr; 1293 1294 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1295 return icv_bpr_read(env, ri); 1296 } 1297 1298 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1299 grp = GICV3_G1NS; 1300 } 1301 1302 if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && 1303 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { 1304 /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses 1305 * modify BPR0 1306 */ 1307 grp = GICV3_G0; 1308 } 1309 1310 if (grp == GICV3_G1NS && arm_current_el(env) < 3 && 1311 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 1312 /* reads return bpr0 + 1 sat to 7, writes ignored */ 1313 grp = GICV3_G0; 1314 satinc = true; 1315 } 1316 1317 bpr = cs->icc_bpr[grp]; 1318 if (satinc) { 1319 bpr++; 1320 bpr = MIN(bpr, 7); 1321 } 1322 1323 trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); 1324 1325 return bpr; 1326 } 1327 1328 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1329 uint64_t value) 1330 { 1331 GICv3CPUState *cs = icc_cs_from_env(env); 1332 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; 1333 1334 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1335 icv_bpr_write(env, ri, value); 1336 return; 1337 } 1338 1339 trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1, 1340 gicv3_redist_affid(cs), value); 1341 1342 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1343 grp = GICV3_G1NS; 1344 } 1345 1346 if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && 1347 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { 1348 /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses 1349 * modify BPR0 1350 */ 1351 grp = GICV3_G0; 1352 } 1353 1354 if (grp == GICV3_G1NS && arm_current_el(env) < 3 && 1355 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 1356 /* reads return bpr0 + 1 sat to 7, writes ignored */ 1357 return; 1358 } 1359 1360 cs->icc_bpr[grp] = value & 7; 1361 gicv3_cpuif_update(cs); 1362 } 1363 1364 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 1365 { 1366 GICv3CPUState *cs = icc_cs_from_env(env); 1367 uint64_t value; 1368 1369 int regno = ri->opc2 & 3; 1370 int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1; 1371 1372 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1373 return icv_ap_read(env, ri); 1374 } 1375 1376 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1377 grp = GICV3_G1NS; 1378 } 1379 1380 value = cs->icc_apr[grp][regno]; 1381 1382 trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 1383 return value; 1384 } 1385 1386 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 1387 uint64_t value) 1388 { 1389 GICv3CPUState *cs = icc_cs_from_env(env); 1390 1391 int regno = ri->opc2 & 3; 1392 int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1; 1393 1394 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1395 icv_ap_write(env, ri, value); 1396 return; 1397 } 1398 1399 trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 1400 1401 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1402 grp = GICV3_G1NS; 1403 } 1404 1405 /* It's not possible to claim that a Non-secure interrupt is active 1406 * at a priority outside the Non-secure range (128..255), since this 1407 * would otherwise allow malicious NS code to block delivery of S interrupts 1408 * by writing a bad value to these registers. 1409 */ 1410 if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) { 1411 return; 1412 } 1413 1414 cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU; 1415 gicv3_cpuif_update(cs); 1416 } 1417 1418 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1419 uint64_t value) 1420 { 1421 /* Deactivate interrupt */ 1422 GICv3CPUState *cs = icc_cs_from_env(env); 1423 int irq = value & 0xffffff; 1424 bool irq_is_secure, single_sec_state, irq_is_grp0; 1425 bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2; 1426 1427 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1428 icv_dir_write(env, ri, value); 1429 return; 1430 } 1431 1432 trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value); 1433 1434 if (irq >= cs->gic->num_irq) { 1435 /* Also catches special interrupt numbers and LPIs */ 1436 return; 1437 } 1438 1439 if (!icc_eoi_split(env, cs)) { 1440 return; 1441 } 1442 1443 int grp = gicv3_irq_group(cs->gic, cs, irq); 1444 1445 single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS; 1446 irq_is_secure = !single_sec_state && (grp != GICV3_G1NS); 1447 irq_is_grp0 = grp == GICV3_G0; 1448 1449 /* Check whether we're allowed to deactivate this interrupt based 1450 * on its group and the current CPU state. 1451 * These checks are laid out to correspond to the spec's pseudocode. 1452 */ 1453 route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ; 1454 route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ; 1455 /* No need to include !IsSecure in route_*_to_el2 as it's only 1456 * tested in cases where we know !IsSecure is true. 1457 */ 1458 route_fiq_to_el2 = env->cp15.hcr_el2 & HCR_FMO; 1459 route_irq_to_el2 = env->cp15.hcr_el2 & HCR_FMO; 1460 1461 switch (arm_current_el(env)) { 1462 case 3: 1463 break; 1464 case 2: 1465 if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) { 1466 break; 1467 } 1468 if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) { 1469 break; 1470 } 1471 return; 1472 case 1: 1473 if (!arm_is_secure_below_el3(env)) { 1474 if (single_sec_state && irq_is_grp0 && 1475 !route_fiq_to_el3 && !route_fiq_to_el2) { 1476 break; 1477 } 1478 if (!irq_is_secure && !irq_is_grp0 && 1479 !route_irq_to_el3 && !route_irq_to_el2) { 1480 break; 1481 } 1482 } else { 1483 if (irq_is_grp0 && !route_fiq_to_el3) { 1484 break; 1485 } 1486 if (!irq_is_grp0 && 1487 (!irq_is_secure || !single_sec_state) && 1488 !route_irq_to_el3) { 1489 break; 1490 } 1491 } 1492 return; 1493 default: 1494 g_assert_not_reached(); 1495 } 1496 1497 icc_deactivate_irq(cs, irq); 1498 } 1499 1500 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1501 { 1502 GICv3CPUState *cs = icc_cs_from_env(env); 1503 int prio; 1504 1505 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1506 return icv_rpr_read(env, ri); 1507 } 1508 1509 prio = icc_highest_active_prio(cs); 1510 1511 if (arm_feature(env, ARM_FEATURE_EL3) && 1512 !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) { 1513 /* NS GIC access and Group 0 is inaccessible to NS */ 1514 if (prio & 0x80) { 1515 /* NS mustn't see priorities in the Secure half of the range */ 1516 prio = 0; 1517 } else if (prio != 0xff) { 1518 /* Non-idle priority: show the Non-secure view of it */ 1519 prio = (prio << 1) & 0xff; 1520 } 1521 } 1522 1523 trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio); 1524 return prio; 1525 } 1526 1527 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs, 1528 uint64_t value, int grp, bool ns) 1529 { 1530 GICv3State *s = cs->gic; 1531 1532 /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */ 1533 uint64_t aff = extract64(value, 48, 8) << 16 | 1534 extract64(value, 32, 8) << 8 | 1535 extract64(value, 16, 8); 1536 uint32_t targetlist = extract64(value, 0, 16); 1537 uint32_t irq = extract64(value, 24, 4); 1538 bool irm = extract64(value, 40, 1); 1539 int i; 1540 1541 if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) { 1542 /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1 1543 * interrupts as Group 0 interrupts and must send Secure Group 0 1544 * interrupts to the target CPUs. 1545 */ 1546 grp = GICV3_G0; 1547 } 1548 1549 trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm, 1550 aff, targetlist); 1551 1552 for (i = 0; i < s->num_cpu; i++) { 1553 GICv3CPUState *ocs = &s->cpu[i]; 1554 1555 if (irm) { 1556 /* IRM == 1 : route to all CPUs except self */ 1557 if (cs == ocs) { 1558 continue; 1559 } 1560 } else { 1561 /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15] 1562 * where the corresponding bit is set in targetlist 1563 */ 1564 int aff0; 1565 1566 if (ocs->gicr_typer >> 40 != aff) { 1567 continue; 1568 } 1569 aff0 = extract64(ocs->gicr_typer, 32, 8); 1570 if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) { 1571 continue; 1572 } 1573 } 1574 1575 /* The redistributor will check against its own GICR_NSACR as needed */ 1576 gicv3_redist_send_sgi(ocs, grp, irq, ns); 1577 } 1578 } 1579 1580 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri, 1581 uint64_t value) 1582 { 1583 /* Generate Secure Group 0 SGI. */ 1584 GICv3CPUState *cs = icc_cs_from_env(env); 1585 bool ns = !arm_is_secure(env); 1586 1587 icc_generate_sgi(env, cs, value, GICV3_G0, ns); 1588 } 1589 1590 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, 1591 uint64_t value) 1592 { 1593 /* Generate Group 1 SGI for the current Security state */ 1594 GICv3CPUState *cs = icc_cs_from_env(env); 1595 int grp; 1596 bool ns = !arm_is_secure(env); 1597 1598 grp = ns ? GICV3_G1NS : GICV3_G1; 1599 icc_generate_sgi(env, cs, value, grp, ns); 1600 } 1601 1602 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, 1603 uint64_t value) 1604 { 1605 /* Generate Group 1 SGI for the Security state that is not 1606 * the current state 1607 */ 1608 GICv3CPUState *cs = icc_cs_from_env(env); 1609 int grp; 1610 bool ns = !arm_is_secure(env); 1611 1612 grp = ns ? GICV3_G1 : GICV3_G1NS; 1613 icc_generate_sgi(env, cs, value, grp, ns); 1614 } 1615 1616 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) 1617 { 1618 GICv3CPUState *cs = icc_cs_from_env(env); 1619 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; 1620 uint64_t value; 1621 1622 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1623 return icv_igrpen_read(env, ri); 1624 } 1625 1626 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1627 grp = GICV3_G1NS; 1628 } 1629 1630 value = cs->icc_igrpen[grp]; 1631 trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0, 1632 gicv3_redist_affid(cs), value); 1633 return value; 1634 } 1635 1636 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, 1637 uint64_t value) 1638 { 1639 GICv3CPUState *cs = icc_cs_from_env(env); 1640 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; 1641 1642 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1643 icv_igrpen_write(env, ri, value); 1644 return; 1645 } 1646 1647 trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0, 1648 gicv3_redist_affid(cs), value); 1649 1650 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1651 grp = GICV3_G1NS; 1652 } 1653 1654 cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE; 1655 gicv3_cpuif_update(cs); 1656 } 1657 1658 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) 1659 { 1660 GICv3CPUState *cs = icc_cs_from_env(env); 1661 uint64_t value; 1662 1663 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ 1664 value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1); 1665 trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value); 1666 return value; 1667 } 1668 1669 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, 1670 uint64_t value) 1671 { 1672 GICv3CPUState *cs = icc_cs_from_env(env); 1673 1674 trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value); 1675 1676 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ 1677 cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1); 1678 cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1); 1679 gicv3_cpuif_update(cs); 1680 } 1681 1682 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1683 { 1684 GICv3CPUState *cs = icc_cs_from_env(env); 1685 int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; 1686 uint64_t value; 1687 1688 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1689 return icv_ctlr_read(env, ri); 1690 } 1691 1692 value = cs->icc_ctlr_el1[bank]; 1693 trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value); 1694 return value; 1695 } 1696 1697 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 1698 uint64_t value) 1699 { 1700 GICv3CPUState *cs = icc_cs_from_env(env); 1701 int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; 1702 uint64_t mask; 1703 1704 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1705 icv_ctlr_write(env, ri, value); 1706 return; 1707 } 1708 1709 trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value); 1710 1711 /* Only CBPR and EOIMODE can be RW; 1712 * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or 1713 * the asseciated priority-based routing of them); 1714 * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO. 1715 */ 1716 if (arm_feature(env, ARM_FEATURE_EL3) && 1717 ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) { 1718 mask = ICC_CTLR_EL1_EOIMODE; 1719 } else { 1720 mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE; 1721 } 1722 1723 cs->icc_ctlr_el1[bank] &= ~mask; 1724 cs->icc_ctlr_el1[bank] |= (value & mask); 1725 gicv3_cpuif_update(cs); 1726 } 1727 1728 1729 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) 1730 { 1731 GICv3CPUState *cs = icc_cs_from_env(env); 1732 uint64_t value; 1733 1734 value = cs->icc_ctlr_el3; 1735 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { 1736 value |= ICC_CTLR_EL3_EOIMODE_EL1NS; 1737 } 1738 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { 1739 value |= ICC_CTLR_EL3_CBPR_EL1NS; 1740 } 1741 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { 1742 value |= ICC_CTLR_EL3_EOIMODE_EL1S; 1743 } 1744 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { 1745 value |= ICC_CTLR_EL3_CBPR_EL1S; 1746 } 1747 1748 trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value); 1749 return value; 1750 } 1751 1752 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, 1753 uint64_t value) 1754 { 1755 GICv3CPUState *cs = icc_cs_from_env(env); 1756 uint64_t mask; 1757 1758 trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value); 1759 1760 /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */ 1761 cs->icc_ctlr_el1[GICV3_NS] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); 1762 if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) { 1763 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE; 1764 } 1765 if (value & ICC_CTLR_EL3_CBPR_EL1NS) { 1766 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR; 1767 } 1768 1769 cs->icc_ctlr_el1[GICV3_S] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); 1770 if (value & ICC_CTLR_EL3_EOIMODE_EL1S) { 1771 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE; 1772 } 1773 if (value & ICC_CTLR_EL3_CBPR_EL1S) { 1774 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR; 1775 } 1776 1777 /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */ 1778 mask = ICC_CTLR_EL3_EOIMODE_EL3; 1779 1780 cs->icc_ctlr_el3 &= ~mask; 1781 cs->icc_ctlr_el3 |= (value & mask); 1782 gicv3_cpuif_update(cs); 1783 } 1784 1785 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, 1786 const ARMCPRegInfo *ri, bool isread) 1787 { 1788 CPAccessResult r = CP_ACCESS_OK; 1789 1790 if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) { 1791 switch (arm_current_el(env)) { 1792 case 1: 1793 if (arm_is_secure_below_el3(env) || 1794 ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) { 1795 r = CP_ACCESS_TRAP_EL3; 1796 } 1797 break; 1798 case 2: 1799 r = CP_ACCESS_TRAP_EL3; 1800 break; 1801 case 3: 1802 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 1803 r = CP_ACCESS_TRAP_EL3; 1804 } 1805 break; 1806 default: 1807 g_assert_not_reached(); 1808 } 1809 } 1810 1811 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 1812 r = CP_ACCESS_TRAP; 1813 } 1814 return r; 1815 } 1816 1817 static CPAccessResult gicv3_fiq_access(CPUARMState *env, 1818 const ARMCPRegInfo *ri, bool isread) 1819 { 1820 CPAccessResult r = CP_ACCESS_OK; 1821 1822 if (env->cp15.scr_el3 & SCR_FIQ) { 1823 switch (arm_current_el(env)) { 1824 case 1: 1825 if (arm_is_secure_below_el3(env) || 1826 ((env->cp15.hcr_el2 & HCR_FMO) == 0)) { 1827 r = CP_ACCESS_TRAP_EL3; 1828 } 1829 break; 1830 case 2: 1831 r = CP_ACCESS_TRAP_EL3; 1832 break; 1833 case 3: 1834 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 1835 r = CP_ACCESS_TRAP_EL3; 1836 } 1837 break; 1838 default: 1839 g_assert_not_reached(); 1840 } 1841 } 1842 1843 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 1844 r = CP_ACCESS_TRAP; 1845 } 1846 return r; 1847 } 1848 1849 static CPAccessResult gicv3_irq_access(CPUARMState *env, 1850 const ARMCPRegInfo *ri, bool isread) 1851 { 1852 CPAccessResult r = CP_ACCESS_OK; 1853 1854 if (env->cp15.scr_el3 & SCR_IRQ) { 1855 switch (arm_current_el(env)) { 1856 case 1: 1857 if (arm_is_secure_below_el3(env) || 1858 ((env->cp15.hcr_el2 & HCR_IMO) == 0)) { 1859 r = CP_ACCESS_TRAP_EL3; 1860 } 1861 break; 1862 case 2: 1863 r = CP_ACCESS_TRAP_EL3; 1864 break; 1865 case 3: 1866 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 1867 r = CP_ACCESS_TRAP_EL3; 1868 } 1869 break; 1870 default: 1871 g_assert_not_reached(); 1872 } 1873 } 1874 1875 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 1876 r = CP_ACCESS_TRAP; 1877 } 1878 return r; 1879 } 1880 1881 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1882 { 1883 GICv3CPUState *cs = icc_cs_from_env(env); 1884 1885 cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V | 1886 (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 1887 (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); 1888 cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V | 1889 (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 1890 (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); 1891 cs->icc_pmr_el1 = 0; 1892 cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR; 1893 cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR; 1894 if (arm_feature(env, ARM_FEATURE_EL3)) { 1895 cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS; 1896 } else { 1897 cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR; 1898 } 1899 memset(cs->icc_apr, 0, sizeof(cs->icc_apr)); 1900 memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen)); 1901 cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V | 1902 (1 << ICC_CTLR_EL3_IDBITS_SHIFT) | 1903 (7 << ICC_CTLR_EL3_PRIBITS_SHIFT); 1904 1905 memset(cs->ich_apr, 0, sizeof(cs->ich_apr)); 1906 cs->ich_hcr_el2 = 0; 1907 memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2)); 1908 cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN | 1909 (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR1_SHIFT) | 1910 (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT); 1911 } 1912 1913 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { 1914 { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH, 1915 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0, 1916 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1917 .access = PL1_RW, .accessfn = gicv3_irqfiq_access, 1918 .readfn = icc_pmr_read, 1919 .writefn = icc_pmr_write, 1920 /* We hang the whole cpu interface reset routine off here 1921 * rather than parcelling it out into one little function 1922 * per register 1923 */ 1924 .resetfn = icc_reset, 1925 }, 1926 { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH, 1927 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0, 1928 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1929 .access = PL1_R, .accessfn = gicv3_fiq_access, 1930 .readfn = icc_iar0_read, 1931 }, 1932 { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH, 1933 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1, 1934 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1935 .access = PL1_W, .accessfn = gicv3_fiq_access, 1936 .writefn = icc_eoir_write, 1937 }, 1938 { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH, 1939 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2, 1940 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1941 .access = PL1_R, .accessfn = gicv3_fiq_access, 1942 .readfn = icc_hppir0_read, 1943 }, 1944 { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH, 1945 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3, 1946 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1947 .access = PL1_RW, .accessfn = gicv3_fiq_access, 1948 .readfn = icc_bpr_read, 1949 .writefn = icc_bpr_write, 1950 }, 1951 { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH, 1952 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4, 1953 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1954 .access = PL1_RW, .accessfn = gicv3_fiq_access, 1955 .readfn = icc_ap_read, 1956 .writefn = icc_ap_write, 1957 }, 1958 { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH, 1959 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5, 1960 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1961 .access = PL1_RW, .accessfn = gicv3_fiq_access, 1962 .readfn = icc_ap_read, 1963 .writefn = icc_ap_write, 1964 }, 1965 { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH, 1966 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6, 1967 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1968 .access = PL1_RW, .accessfn = gicv3_fiq_access, 1969 .readfn = icc_ap_read, 1970 .writefn = icc_ap_write, 1971 }, 1972 { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH, 1973 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7, 1974 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1975 .access = PL1_RW, .accessfn = gicv3_fiq_access, 1976 .readfn = icc_ap_read, 1977 .writefn = icc_ap_write, 1978 }, 1979 /* All the ICC_AP1R*_EL1 registers are banked */ 1980 { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH, 1981 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0, 1982 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1983 .access = PL1_RW, .accessfn = gicv3_irq_access, 1984 .readfn = icc_ap_read, 1985 .writefn = icc_ap_write, 1986 }, 1987 { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH, 1988 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1, 1989 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1990 .access = PL1_RW, .accessfn = gicv3_irq_access, 1991 .readfn = icc_ap_read, 1992 .writefn = icc_ap_write, 1993 }, 1994 { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH, 1995 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2, 1996 .type = ARM_CP_IO | ARM_CP_NO_RAW, 1997 .access = PL1_RW, .accessfn = gicv3_irq_access, 1998 .readfn = icc_ap_read, 1999 .writefn = icc_ap_write, 2000 }, 2001 { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH, 2002 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3, 2003 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2004 .access = PL1_RW, .accessfn = gicv3_irq_access, 2005 .readfn = icc_ap_read, 2006 .writefn = icc_ap_write, 2007 }, 2008 { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH, 2009 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1, 2010 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2011 .access = PL1_W, .accessfn = gicv3_irqfiq_access, 2012 .writefn = icc_dir_write, 2013 }, 2014 { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH, 2015 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3, 2016 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2017 .access = PL1_R, .accessfn = gicv3_irqfiq_access, 2018 .readfn = icc_rpr_read, 2019 }, 2020 { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64, 2021 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5, 2022 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2023 .access = PL1_W, .accessfn = gicv3_irqfiq_access, 2024 .writefn = icc_sgi1r_write, 2025 }, 2026 { .name = "ICC_SGI1R", 2027 .cp = 15, .opc1 = 0, .crm = 12, 2028 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2029 .access = PL1_W, .accessfn = gicv3_irqfiq_access, 2030 .writefn = icc_sgi1r_write, 2031 }, 2032 { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64, 2033 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6, 2034 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2035 .access = PL1_W, .accessfn = gicv3_irqfiq_access, 2036 .writefn = icc_asgi1r_write, 2037 }, 2038 { .name = "ICC_ASGI1R", 2039 .cp = 15, .opc1 = 1, .crm = 12, 2040 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2041 .access = PL1_W, .accessfn = gicv3_irqfiq_access, 2042 .writefn = icc_asgi1r_write, 2043 }, 2044 { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64, 2045 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7, 2046 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2047 .access = PL1_W, .accessfn = gicv3_irqfiq_access, 2048 .writefn = icc_sgi0r_write, 2049 }, 2050 { .name = "ICC_SGI0R", 2051 .cp = 15, .opc1 = 2, .crm = 12, 2052 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2053 .access = PL1_W, .accessfn = gicv3_irqfiq_access, 2054 .writefn = icc_sgi0r_write, 2055 }, 2056 { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH, 2057 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0, 2058 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2059 .access = PL1_R, .accessfn = gicv3_irq_access, 2060 .readfn = icc_iar1_read, 2061 }, 2062 { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH, 2063 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1, 2064 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2065 .access = PL1_W, .accessfn = gicv3_irq_access, 2066 .writefn = icc_eoir_write, 2067 }, 2068 { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH, 2069 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2, 2070 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2071 .access = PL1_R, .accessfn = gicv3_irq_access, 2072 .readfn = icc_hppir1_read, 2073 }, 2074 /* This register is banked */ 2075 { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH, 2076 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3, 2077 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2078 .access = PL1_RW, .accessfn = gicv3_irq_access, 2079 .readfn = icc_bpr_read, 2080 .writefn = icc_bpr_write, 2081 }, 2082 /* This register is banked */ 2083 { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH, 2084 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4, 2085 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2086 .access = PL1_RW, .accessfn = gicv3_irqfiq_access, 2087 .readfn = icc_ctlr_el1_read, 2088 .writefn = icc_ctlr_el1_write, 2089 }, 2090 { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH, 2091 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5, 2092 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2093 .access = PL1_RW, 2094 /* We don't support IRQ/FIQ bypass and system registers are 2095 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2096 * This register is banked but since it's constant we don't 2097 * need to do anything special. 2098 */ 2099 .resetvalue = 0x7, 2100 }, 2101 { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH, 2102 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6, 2103 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2104 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2105 .readfn = icc_igrpen_read, 2106 .writefn = icc_igrpen_write, 2107 }, 2108 /* This register is banked */ 2109 { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH, 2110 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7, 2111 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2112 .access = PL1_RW, .accessfn = gicv3_irq_access, 2113 .readfn = icc_igrpen_read, 2114 .writefn = icc_igrpen_write, 2115 }, 2116 { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH, 2117 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5, 2118 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2119 .access = PL2_RW, 2120 /* We don't support IRQ/FIQ bypass and system registers are 2121 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2122 */ 2123 .resetvalue = 0xf, 2124 }, 2125 { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH, 2126 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4, 2127 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2128 .access = PL3_RW, 2129 .readfn = icc_ctlr_el3_read, 2130 .writefn = icc_ctlr_el3_write, 2131 }, 2132 { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH, 2133 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5, 2134 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2135 .access = PL3_RW, 2136 /* We don't support IRQ/FIQ bypass and system registers are 2137 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2138 */ 2139 .resetvalue = 0xf, 2140 }, 2141 { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH, 2142 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7, 2143 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2144 .access = PL3_RW, 2145 .readfn = icc_igrpen1_el3_read, 2146 .writefn = icc_igrpen1_el3_write, 2147 }, 2148 REGINFO_SENTINEL 2149 }; 2150 2151 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2152 { 2153 GICv3CPUState *cs = icc_cs_from_env(env); 2154 int regno = ri->opc2 & 3; 2155 int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS; 2156 uint64_t value; 2157 2158 value = cs->ich_apr[grp][regno]; 2159 trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 2160 return value; 2161 } 2162 2163 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2164 uint64_t value) 2165 { 2166 GICv3CPUState *cs = icc_cs_from_env(env); 2167 int regno = ri->opc2 & 3; 2168 int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS; 2169 2170 trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 2171 2172 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; 2173 gicv3_cpuif_virt_update(cs); 2174 } 2175 2176 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2177 { 2178 GICv3CPUState *cs = icc_cs_from_env(env); 2179 uint64_t value = cs->ich_hcr_el2; 2180 2181 trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value); 2182 return value; 2183 } 2184 2185 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2186 uint64_t value) 2187 { 2188 GICv3CPUState *cs = icc_cs_from_env(env); 2189 2190 trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value); 2191 2192 value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE | 2193 ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE | 2194 ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC | 2195 ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI | 2196 ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK; 2197 2198 cs->ich_hcr_el2 = value; 2199 gicv3_cpuif_virt_update(cs); 2200 } 2201 2202 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2203 { 2204 GICv3CPUState *cs = icc_cs_from_env(env); 2205 uint64_t value = cs->ich_vmcr_el2; 2206 2207 trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value); 2208 return value; 2209 } 2210 2211 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2212 uint64_t value) 2213 { 2214 GICv3CPUState *cs = icc_cs_from_env(env); 2215 2216 trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value); 2217 2218 value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR | 2219 ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK | 2220 ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK; 2221 value |= ICH_VMCR_EL2_VFIQEN; 2222 2223 cs->ich_vmcr_el2 = value; 2224 /* Enforce "writing BPRs to less than minimum sets them to the minimum" 2225 * by reading and writing back the fields. 2226 */ 2227 write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G0)); 2228 write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1)); 2229 2230 gicv3_cpuif_virt_update(cs); 2231 } 2232 2233 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2234 { 2235 GICv3CPUState *cs = icc_cs_from_env(env); 2236 int regno = ri->opc2 | ((ri->crm & 1) << 3); 2237 uint64_t value; 2238 2239 /* This read function handles all of: 2240 * 64-bit reads of the whole LR 2241 * 32-bit reads of the low half of the LR 2242 * 32-bit reads of the high half of the LR 2243 */ 2244 if (ri->state == ARM_CP_STATE_AA32) { 2245 if (ri->crm >= 14) { 2246 value = extract64(cs->ich_lr_el2[regno], 32, 32); 2247 trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value); 2248 } else { 2249 value = extract64(cs->ich_lr_el2[regno], 0, 32); 2250 trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value); 2251 } 2252 } else { 2253 value = cs->ich_lr_el2[regno]; 2254 trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value); 2255 } 2256 2257 return value; 2258 } 2259 2260 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2261 uint64_t value) 2262 { 2263 GICv3CPUState *cs = icc_cs_from_env(env); 2264 int regno = ri->opc2 | ((ri->crm & 1) << 3); 2265 2266 /* This write function handles all of: 2267 * 64-bit writes to the whole LR 2268 * 32-bit writes to the low half of the LR 2269 * 32-bit writes to the high half of the LR 2270 */ 2271 if (ri->state == ARM_CP_STATE_AA32) { 2272 if (ri->crm >= 14) { 2273 trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value); 2274 value = deposit64(cs->ich_lr_el2[regno], 32, 32, value); 2275 } else { 2276 trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value); 2277 value = deposit64(cs->ich_lr_el2[regno], 0, 32, value); 2278 } 2279 } else { 2280 trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value); 2281 } 2282 2283 /* Enforce RES0 bits in priority field */ 2284 if (cs->vpribits < 8) { 2285 value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT, 2286 8 - cs->vpribits, 0); 2287 } 2288 2289 cs->ich_lr_el2[regno] = value; 2290 gicv3_cpuif_virt_update(cs); 2291 } 2292 2293 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2294 { 2295 GICv3CPUState *cs = icc_cs_from_env(env); 2296 uint64_t value; 2297 2298 value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT) 2299 | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V 2300 | (1 << ICH_VTR_EL2_IDBITS_SHIFT) 2301 | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT) 2302 | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT); 2303 2304 trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value); 2305 return value; 2306 } 2307 2308 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2309 { 2310 GICv3CPUState *cs = icc_cs_from_env(env); 2311 uint64_t value = maintenance_interrupt_state(cs); 2312 2313 trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value); 2314 return value; 2315 } 2316 2317 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2318 { 2319 GICv3CPUState *cs = icc_cs_from_env(env); 2320 uint64_t value = eoi_maintenance_interrupt_state(cs, NULL); 2321 2322 trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value); 2323 return value; 2324 } 2325 2326 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2327 { 2328 GICv3CPUState *cs = icc_cs_from_env(env); 2329 uint64_t value = 0; 2330 int i; 2331 2332 for (i = 0; i < cs->num_list_regs; i++) { 2333 uint64_t lr = cs->ich_lr_el2[i]; 2334 2335 if ((lr & ICH_LR_EL2_STATE_MASK) == 0 && 2336 ((lr & ICH_LR_EL2_HW) == 1 || (lr & ICH_LR_EL2_EOI) == 0)) { 2337 value |= (1 << i); 2338 } 2339 } 2340 2341 trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value); 2342 return value; 2343 } 2344 2345 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { 2346 { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH, 2347 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0, 2348 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2349 .access = PL2_RW, 2350 .readfn = ich_ap_read, 2351 .writefn = ich_ap_write, 2352 }, 2353 { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH, 2354 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0, 2355 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2356 .access = PL2_RW, 2357 .readfn = ich_ap_read, 2358 .writefn = ich_ap_write, 2359 }, 2360 { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH, 2361 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0, 2362 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2363 .access = PL2_RW, 2364 .readfn = ich_hcr_read, 2365 .writefn = ich_hcr_write, 2366 }, 2367 { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH, 2368 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1, 2369 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2370 .access = PL2_R, 2371 .readfn = ich_vtr_read, 2372 }, 2373 { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH, 2374 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2, 2375 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2376 .access = PL2_R, 2377 .readfn = ich_misr_read, 2378 }, 2379 { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH, 2380 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3, 2381 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2382 .access = PL2_R, 2383 .readfn = ich_eisr_read, 2384 }, 2385 { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH, 2386 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5, 2387 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2388 .access = PL2_R, 2389 .readfn = ich_elrsr_read, 2390 }, 2391 { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH, 2392 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7, 2393 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2394 .access = PL2_RW, 2395 .readfn = ich_vmcr_read, 2396 .writefn = ich_vmcr_write, 2397 }, 2398 REGINFO_SENTINEL 2399 }; 2400 2401 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { 2402 { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH, 2403 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1, 2404 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2405 .access = PL2_RW, 2406 .readfn = ich_ap_read, 2407 .writefn = ich_ap_write, 2408 }, 2409 { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH, 2410 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1, 2411 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2412 .access = PL2_RW, 2413 .readfn = ich_ap_read, 2414 .writefn = ich_ap_write, 2415 }, 2416 REGINFO_SENTINEL 2417 }; 2418 2419 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { 2420 { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH, 2421 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2, 2422 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2423 .access = PL2_RW, 2424 .readfn = ich_ap_read, 2425 .writefn = ich_ap_write, 2426 }, 2427 { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH, 2428 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3, 2429 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2430 .access = PL2_RW, 2431 .readfn = ich_ap_read, 2432 .writefn = ich_ap_write, 2433 }, 2434 { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH, 2435 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2, 2436 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2437 .access = PL2_RW, 2438 .readfn = ich_ap_read, 2439 .writefn = ich_ap_write, 2440 }, 2441 { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH, 2442 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3, 2443 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2444 .access = PL2_RW, 2445 .readfn = ich_ap_read, 2446 .writefn = ich_ap_write, 2447 }, 2448 REGINFO_SENTINEL 2449 }; 2450 2451 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) 2452 { 2453 GICv3CPUState *cs = opaque; 2454 2455 gicv3_cpuif_update(cs); 2456 } 2457 2458 void gicv3_init_cpuif(GICv3State *s) 2459 { 2460 /* Called from the GICv3 realize function; register our system 2461 * registers with the CPU 2462 */ 2463 int i; 2464 2465 for (i = 0; i < s->num_cpu; i++) { 2466 ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); 2467 GICv3CPUState *cs = &s->cpu[i]; 2468 2469 /* Note that we can't just use the GICv3CPUState as an opaque pointer 2470 * in define_arm_cp_regs_with_opaque(), because when we're called back 2471 * it might be with code translated by CPU 0 but run by CPU 1, in 2472 * which case we'd get the wrong value. 2473 * So instead we define the regs with no ri->opaque info, and 2474 * get back to the GICv3CPUState from the ARMCPU by reading back 2475 * the opaque pointer from the el_change_hook, which we're going 2476 * to need to register anyway. 2477 */ 2478 define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); 2479 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) 2480 && cpu->gic_num_lrs) { 2481 int j; 2482 2483 cs->num_list_regs = cpu->gic_num_lrs; 2484 cs->vpribits = cpu->gic_vpribits; 2485 cs->vprebits = cpu->gic_vprebits; 2486 2487 /* Check against architectural constraints: getting these 2488 * wrong would be a bug in the CPU code defining these, 2489 * and the implementation relies on them holding. 2490 */ 2491 g_assert(cs->vprebits <= cs->vpribits); 2492 g_assert(cs->vprebits >= 5 && cs->vprebits <= 7); 2493 g_assert(cs->vpribits >= 5 && cs->vpribits <= 8); 2494 2495 define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo); 2496 2497 for (j = 0; j < cs->num_list_regs; j++) { 2498 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs 2499 * are split into two cp15 regs, LR (the low part, with the 2500 * same encoding as the AArch64 LR) and LRC (the high part). 2501 */ 2502 ARMCPRegInfo lr_regset[] = { 2503 { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH, 2504 .opc0 = 3, .opc1 = 4, .crn = 12, 2505 .crm = 12 + (j >> 3), .opc2 = j & 7, 2506 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2507 .access = PL2_RW, 2508 .readfn = ich_lr_read, 2509 .writefn = ich_lr_write, 2510 }, 2511 { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32, 2512 .cp = 15, .opc1 = 4, .crn = 12, 2513 .crm = 14 + (j >> 3), .opc2 = j & 7, 2514 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2515 .access = PL2_RW, 2516 .readfn = ich_lr_read, 2517 .writefn = ich_lr_write, 2518 }, 2519 REGINFO_SENTINEL 2520 }; 2521 define_arm_cp_regs(cpu, lr_regset); 2522 } 2523 if (cs->vprebits >= 6) { 2524 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo); 2525 } 2526 if (cs->vprebits == 7) { 2527 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo); 2528 } 2529 } 2530 arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs); 2531 } 2532 } 2533