1 /* 2 * ARM GICv3 emulation: Distributor 3 * 4 * Copyright (c) 2015 Huawei. 5 * Copyright (c) 2016 Linaro Limited. 6 * Written by Shlomo Pongratz, Peter Maydell 7 * 8 * This code is licensed under the GPL, version 2 or (at your option) 9 * any later version. 10 */ 11 12 #include "qemu/osdep.h" 13 #include "trace.h" 14 #include "gicv3_internal.h" 15 16 /* The GICD_NSACR registers contain a two bit field for each interrupt which 17 * allows the guest to give NonSecure code access to registers controlling 18 * Secure interrupts: 19 * 0b00: no access (NS accesses to bits for Secure interrupts will RAZ/WI) 20 * 0b01: NS r/w accesses permitted to ISPENDR, SETSPI_NSR, SGIR 21 * 0b10: as 0b01, and also r/w to ICPENDR, r/o to ISACTIVER/ICACTIVER, 22 * and w/o to CLRSPI_NSR 23 * 0b11: as 0b10, and also r/w to IROUTER and ITARGETSR 24 * 25 * Given a (multiple-of-32) interrupt number, these mask functions return 26 * a mask word where each bit is 1 if the NSACR settings permit access 27 * to the interrupt. The mask returned can then be ORed with the GICD_GROUP 28 * word for this set of interrupts to give an overall mask. 29 */ 30 31 typedef uint32_t maskfn(GICv3State *s, int irq); 32 33 static uint32_t mask_nsacr_ge1(GICv3State *s, int irq) 34 { 35 /* Return a mask where each bit is set if the NSACR field is >= 1 */ 36 uint64_t raw_nsacr = s->gicd_nsacr[irq / 16 + 1]; 37 38 raw_nsacr = raw_nsacr << 32 | s->gicd_nsacr[irq / 16]; 39 raw_nsacr = (raw_nsacr >> 1) | raw_nsacr; 40 return half_unshuffle64(raw_nsacr); 41 } 42 43 static uint32_t mask_nsacr_ge2(GICv3State *s, int irq) 44 { 45 /* Return a mask where each bit is set if the NSACR field is >= 2 */ 46 uint64_t raw_nsacr = s->gicd_nsacr[irq / 16 + 1]; 47 48 raw_nsacr = raw_nsacr << 32 | s->gicd_nsacr[irq / 16]; 49 raw_nsacr = raw_nsacr >> 1; 50 return half_unshuffle64(raw_nsacr); 51 } 52 53 /* We don't need a mask_nsacr_ge3() because IROUTER<n> isn't a bitmap register, 54 * but it would be implemented using: 55 * raw_nsacr = (raw_nsacr >> 1) & raw_nsacr; 56 */ 57 58 static uint32_t mask_group_and_nsacr(GICv3State *s, MemTxAttrs attrs, 59 maskfn *maskfn, int irq) 60 { 61 /* Return a 32-bit mask which should be applied for this set of 32 62 * interrupts; each bit is 1 if access is permitted by the 63 * combination of attrs.secure, GICD_GROUPR and GICD_NSACR. 64 */ 65 uint32_t mask; 66 67 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 68 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI 69 * unless the NSACR bits permit access. 70 */ 71 mask = *gic_bmp_ptr32(s->group, irq); 72 if (maskfn) { 73 mask |= maskfn(s, irq); 74 } 75 return mask; 76 } 77 return 0xFFFFFFFFU; 78 } 79 80 static int gicd_ns_access(GICv3State *s, int irq) 81 { 82 /* Return the 2 bit NS_access<x> field from GICD_NSACR<n> for the 83 * specified interrupt. 84 */ 85 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 86 return 0; 87 } 88 return extract32(s->gicd_nsacr[irq / 16], (irq % 16) * 2, 2); 89 } 90 91 static void gicd_write_set_bitmap_reg(GICv3State *s, MemTxAttrs attrs, 92 uint32_t *bmp, 93 maskfn *maskfn, 94 int offset, uint32_t val) 95 { 96 /* Helper routine to implement writing to a "set-bitmap" register 97 * (GICD_ISENABLER, GICD_ISPENDR, etc). 98 * Semantics implemented here: 99 * RAZ/WI for SGIs, PPIs, unimplemented IRQs 100 * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI. 101 * Writing 1 means "set bit in bitmap"; writing 0 is ignored. 102 * offset should be the offset in bytes of the register from the start 103 * of its group. 104 */ 105 int irq = offset * 8; 106 107 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 108 return; 109 } 110 val &= mask_group_and_nsacr(s, attrs, maskfn, irq); 111 *gic_bmp_ptr32(bmp, irq) |= val; 112 gicv3_update(s, irq, 32); 113 } 114 115 static void gicd_write_clear_bitmap_reg(GICv3State *s, MemTxAttrs attrs, 116 uint32_t *bmp, 117 maskfn *maskfn, 118 int offset, uint32_t val) 119 { 120 /* Helper routine to implement writing to a "clear-bitmap" register 121 * (GICD_ICENABLER, GICD_ICPENDR, etc). 122 * Semantics implemented here: 123 * RAZ/WI for SGIs, PPIs, unimplemented IRQs 124 * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI. 125 * Writing 1 means "clear bit in bitmap"; writing 0 is ignored. 126 * offset should be the offset in bytes of the register from the start 127 * of its group. 128 */ 129 int irq = offset * 8; 130 131 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 132 return; 133 } 134 val &= mask_group_and_nsacr(s, attrs, maskfn, irq); 135 *gic_bmp_ptr32(bmp, irq) &= ~val; 136 gicv3_update(s, irq, 32); 137 } 138 139 static uint32_t gicd_read_bitmap_reg(GICv3State *s, MemTxAttrs attrs, 140 uint32_t *bmp, 141 maskfn *maskfn, 142 int offset) 143 { 144 /* Helper routine to implement reading a "set/clear-bitmap" register 145 * (GICD_ICENABLER, GICD_ISENABLER, GICD_ICPENDR, etc). 146 * Semantics implemented here: 147 * RAZ/WI for SGIs, PPIs, unimplemented IRQs 148 * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI. 149 * offset should be the offset in bytes of the register from the start 150 * of its group. 151 */ 152 int irq = offset * 8; 153 uint32_t val; 154 155 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 156 return 0; 157 } 158 val = *gic_bmp_ptr32(bmp, irq); 159 if (bmp == s->pending) { 160 /* The PENDING register is a special case -- for level triggered 161 * interrupts, the PENDING state is the logical OR of the state of 162 * the PENDING latch with the input line level. 163 */ 164 uint32_t edge = *gic_bmp_ptr32(s->edge_trigger, irq); 165 uint32_t level = *gic_bmp_ptr32(s->level, irq); 166 val |= (~edge & level); 167 } 168 val &= mask_group_and_nsacr(s, attrs, maskfn, irq); 169 return val; 170 } 171 172 static uint8_t gicd_read_ipriorityr(GICv3State *s, MemTxAttrs attrs, int irq) 173 { 174 /* Read the value of GICD_IPRIORITYR<n> for the specified interrupt, 175 * honouring security state (these are RAZ/WI for Group 0 or Secure 176 * Group 1 interrupts). 177 */ 178 uint32_t prio; 179 180 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 181 return 0; 182 } 183 184 prio = s->gicd_ipriority[irq]; 185 186 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 187 if (!gicv3_gicd_group_test(s, irq)) { 188 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ 189 return 0; 190 } 191 /* NS view of the interrupt priority */ 192 prio = (prio << 1) & 0xff; 193 } 194 return prio; 195 } 196 197 static void gicd_write_ipriorityr(GICv3State *s, MemTxAttrs attrs, int irq, 198 uint8_t value) 199 { 200 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt, 201 * honouring security state (these are RAZ/WI for Group 0 or Secure 202 * Group 1 interrupts). 203 */ 204 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 205 return; 206 } 207 208 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 209 if (!gicv3_gicd_group_test(s, irq)) { 210 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ 211 return; 212 } 213 /* NS view of the interrupt priority */ 214 value = 0x80 | (value >> 1); 215 } 216 s->gicd_ipriority[irq] = value; 217 } 218 219 static uint64_t gicd_read_irouter(GICv3State *s, MemTxAttrs attrs, int irq) 220 { 221 /* Read the value of GICD_IROUTER<n> for the specified interrupt, 222 * honouring security state. 223 */ 224 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 225 return 0; 226 } 227 228 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 229 /* RAZ/WI for NS accesses to secure interrupts */ 230 if (!gicv3_gicd_group_test(s, irq)) { 231 if (gicd_ns_access(s, irq) != 3) { 232 return 0; 233 } 234 } 235 } 236 237 return s->gicd_irouter[irq]; 238 } 239 240 static void gicd_write_irouter(GICv3State *s, MemTxAttrs attrs, int irq, 241 uint64_t val) 242 { 243 /* Write the value of GICD_IROUTER<n> for the specified interrupt, 244 * honouring security state. 245 */ 246 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 247 return; 248 } 249 250 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 251 /* RAZ/WI for NS accesses to secure interrupts */ 252 if (!gicv3_gicd_group_test(s, irq)) { 253 if (gicd_ns_access(s, irq) != 3) { 254 return; 255 } 256 } 257 } 258 259 s->gicd_irouter[irq] = val; 260 gicv3_cache_target_cpustate(s, irq); 261 gicv3_update(s, irq, 1); 262 } 263 264 static MemTxResult gicd_readb(GICv3State *s, hwaddr offset, 265 uint64_t *data, MemTxAttrs attrs) 266 { 267 /* Most GICv3 distributor registers do not support byte accesses. */ 268 switch (offset) { 269 case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: 270 case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: 271 case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: 272 /* This GIC implementation always has affinity routing enabled, 273 * so these registers are all RAZ/WI. 274 */ 275 return MEMTX_OK; 276 case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: 277 *data = gicd_read_ipriorityr(s, attrs, offset - GICD_IPRIORITYR); 278 return MEMTX_OK; 279 default: 280 return MEMTX_ERROR; 281 } 282 } 283 284 static MemTxResult gicd_writeb(GICv3State *s, hwaddr offset, 285 uint64_t value, MemTxAttrs attrs) 286 { 287 /* Most GICv3 distributor registers do not support byte accesses. */ 288 switch (offset) { 289 case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: 290 case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: 291 case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: 292 /* This GIC implementation always has affinity routing enabled, 293 * so these registers are all RAZ/WI. 294 */ 295 return MEMTX_OK; 296 case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: 297 { 298 int irq = offset - GICD_IPRIORITYR; 299 300 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 301 return MEMTX_OK; 302 } 303 gicd_write_ipriorityr(s, attrs, irq, value); 304 gicv3_update(s, irq, 1); 305 return MEMTX_OK; 306 } 307 default: 308 return MEMTX_ERROR; 309 } 310 } 311 312 static MemTxResult gicd_readw(GICv3State *s, hwaddr offset, 313 uint64_t *data, MemTxAttrs attrs) 314 { 315 /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR 316 * support 16 bit accesses, and those registers are all part of the 317 * optional message-based SPI feature which this GIC does not currently 318 * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are 319 * reserved. 320 */ 321 return MEMTX_ERROR; 322 } 323 324 static MemTxResult gicd_writew(GICv3State *s, hwaddr offset, 325 uint64_t value, MemTxAttrs attrs) 326 { 327 /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR 328 * support 16 bit accesses, and those registers are all part of the 329 * optional message-based SPI feature which this GIC does not currently 330 * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are 331 * reserved. 332 */ 333 return MEMTX_ERROR; 334 } 335 336 static MemTxResult gicd_readl(GICv3State *s, hwaddr offset, 337 uint64_t *data, MemTxAttrs attrs) 338 { 339 /* Almost all GICv3 distributor registers are 32-bit. 340 * Note that WO registers must return an UNKNOWN value on reads, 341 * not an abort. 342 */ 343 344 switch (offset) { 345 case GICD_CTLR: 346 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 347 /* The NS view of the GICD_CTLR sees only certain bits: 348 * + bit [31] (RWP) is an alias of the Secure bit [31] 349 * + bit [4] (ARE_NS) is an alias of Secure bit [5] 350 * + bit [1] (EnableGrp1A) is an alias of Secure bit [1] if 351 * NS affinity routing is enabled, otherwise RES0 352 * + bit [0] (EnableGrp1) is an alias of Secure bit [1] if 353 * NS affinity routing is not enabled, otherwise RES0 354 * Since for QEMU affinity routing is always enabled 355 * for both S and NS this means that bits [4] and [5] are 356 * both always 1, and we can simply make the NS view 357 * be bits 31, 4 and 1 of the S view. 358 */ 359 *data = s->gicd_ctlr & (GICD_CTLR_ARE_S | 360 GICD_CTLR_EN_GRP1NS | 361 GICD_CTLR_RWP); 362 } else { 363 *data = s->gicd_ctlr; 364 } 365 return MEMTX_OK; 366 case GICD_TYPER: 367 { 368 /* For this implementation: 369 * No1N == 1 (1-of-N SPI interrupts not supported) 370 * A3V == 1 (non-zero values of Affinity level 3 supported) 371 * IDbits == 0xf (we support 16-bit interrupt identifiers) 372 * DVIS == 0 (Direct virtual LPI injection not supported) 373 * LPIS == 0 (LPIs not supported) 374 * MBIS == 0 (message-based SPIs not supported) 375 * SecurityExtn == 1 if security extns supported 376 * CPUNumber == 0 since for us ARE is always 1 377 * ITLinesNumber == (num external irqs / 32) - 1 378 */ 379 int itlinesnumber = ((s->num_irq - GIC_INTERNAL) / 32) - 1; 380 381 *data = (1 << 25) | (1 << 24) | (s->security_extn << 10) | 382 (0xf << 19) | itlinesnumber; 383 return MEMTX_OK; 384 } 385 case GICD_IIDR: 386 /* We claim to be an ARM r0p0 with a zero ProductID. 387 * This is the same as an r0p0 GIC-500. 388 */ 389 *data = gicv3_iidr(); 390 return MEMTX_OK; 391 case GICD_STATUSR: 392 /* RAZ/WI for us (this is an optional register and our implementation 393 * does not track RO/WO/reserved violations to report them to the guest) 394 */ 395 *data = 0; 396 return MEMTX_OK; 397 case GICD_IGROUPR ... GICD_IGROUPR + 0x7f: 398 { 399 int irq; 400 401 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 402 *data = 0; 403 return MEMTX_OK; 404 } 405 /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ 406 irq = (offset - GICD_IGROUPR) * 8; 407 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 408 *data = 0; 409 return MEMTX_OK; 410 } 411 *data = *gic_bmp_ptr32(s->group, irq); 412 return MEMTX_OK; 413 } 414 case GICD_ISENABLER ... GICD_ISENABLER + 0x7f: 415 *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL, 416 offset - GICD_ISENABLER); 417 return MEMTX_OK; 418 case GICD_ICENABLER ... GICD_ICENABLER + 0x7f: 419 *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL, 420 offset - GICD_ICENABLER); 421 return MEMTX_OK; 422 case GICD_ISPENDR ... GICD_ISPENDR + 0x7f: 423 *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1, 424 offset - GICD_ISPENDR); 425 return MEMTX_OK; 426 case GICD_ICPENDR ... GICD_ICPENDR + 0x7f: 427 *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2, 428 offset - GICD_ICPENDR); 429 return MEMTX_OK; 430 case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f: 431 *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2, 432 offset - GICD_ISACTIVER); 433 return MEMTX_OK; 434 case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f: 435 *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2, 436 offset - GICD_ICACTIVER); 437 return MEMTX_OK; 438 case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: 439 { 440 int i, irq = offset - GICD_IPRIORITYR; 441 uint32_t value = 0; 442 443 for (i = irq + 3; i >= irq; i--, value <<= 8) { 444 value |= gicd_read_ipriorityr(s, attrs, i); 445 } 446 *data = value; 447 return MEMTX_OK; 448 } 449 case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: 450 /* RAZ/WI since affinity routing is always enabled */ 451 *data = 0; 452 return MEMTX_OK; 453 case GICD_ICFGR ... GICD_ICFGR + 0xff: 454 { 455 /* Here only the even bits are used; odd bits are RES0 */ 456 int irq = (offset - GICD_ICFGR) * 4; 457 uint32_t value = 0; 458 459 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 460 *data = 0; 461 return MEMTX_OK; 462 } 463 464 /* Since our edge_trigger bitmap is one bit per irq, we only need 465 * half of the 32-bit word, which we can then spread out 466 * into the odd bits. 467 */ 468 value = *gic_bmp_ptr32(s->edge_trigger, irq & ~0x1f); 469 value &= mask_group_and_nsacr(s, attrs, NULL, irq & ~0x1f); 470 value = extract32(value, (irq & 0x1f) ? 16 : 0, 16); 471 value = half_shuffle32(value) << 1; 472 *data = value; 473 return MEMTX_OK; 474 } 475 case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff: 476 { 477 int irq; 478 479 if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 480 /* RAZ/WI if security disabled, or if 481 * security enabled and this is an NS access 482 */ 483 *data = 0; 484 return MEMTX_OK; 485 } 486 /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ 487 irq = (offset - GICD_IGRPMODR) * 8; 488 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 489 *data = 0; 490 return MEMTX_OK; 491 } 492 *data = *gic_bmp_ptr32(s->grpmod, irq); 493 return MEMTX_OK; 494 } 495 case GICD_NSACR ... GICD_NSACR + 0xff: 496 { 497 /* Two bits per interrupt */ 498 int irq = (offset - GICD_NSACR) * 4; 499 500 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 501 *data = 0; 502 return MEMTX_OK; 503 } 504 505 if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 506 /* RAZ/WI if security disabled, or if 507 * security enabled and this is an NS access 508 */ 509 *data = 0; 510 return MEMTX_OK; 511 } 512 513 *data = s->gicd_nsacr[irq / 16]; 514 return MEMTX_OK; 515 } 516 case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: 517 case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: 518 /* RAZ/WI since affinity routing is always enabled */ 519 *data = 0; 520 return MEMTX_OK; 521 case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: 522 { 523 uint64_t r; 524 int irq = (offset - GICD_IROUTER) / 8; 525 526 r = gicd_read_irouter(s, attrs, irq); 527 if (offset & 7) { 528 *data = r >> 32; 529 } else { 530 *data = (uint32_t)r; 531 } 532 return MEMTX_OK; 533 } 534 case GICD_IDREGS ... GICD_IDREGS + 0x1f: 535 /* ID registers */ 536 *data = gicv3_idreg(offset - GICD_IDREGS); 537 return MEMTX_OK; 538 case GICD_SGIR: 539 /* WO registers, return unknown value */ 540 qemu_log_mask(LOG_GUEST_ERROR, 541 "%s: invalid guest read from WO register at offset " 542 TARGET_FMT_plx "\n", __func__, offset); 543 *data = 0; 544 return MEMTX_OK; 545 default: 546 return MEMTX_ERROR; 547 } 548 } 549 550 static MemTxResult gicd_writel(GICv3State *s, hwaddr offset, 551 uint64_t value, MemTxAttrs attrs) 552 { 553 /* Almost all GICv3 distributor registers are 32-bit. Note that 554 * RO registers must ignore writes, not abort. 555 */ 556 557 switch (offset) { 558 case GICD_CTLR: 559 { 560 uint32_t mask; 561 /* GICv3 5.3.20 */ 562 if (s->gicd_ctlr & GICD_CTLR_DS) { 563 /* With only one security state, E1NWF is RAZ/WI, DS is RAO/WI, 564 * ARE is RAO/WI (affinity routing always on), and only 565 * bits 0 and 1 (group enables) are writable. 566 */ 567 mask = GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1NS; 568 } else { 569 if (attrs.secure) { 570 /* for secure access: 571 * ARE_NS and ARE_S are RAO/WI (affinity routing always on) 572 * E1NWF is RAZ/WI (we don't support enable-1-of-n-wakeup) 573 * 574 * We can only modify bits[2:0] (the group enables). 575 */ 576 mask = GICD_CTLR_DS | GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1_ALL; 577 } else { 578 /* For non secure access ARE_NS is RAO/WI and EnableGrp1 579 * is RES0. The only writable bit is [1] (EnableGrp1A), which 580 * is an alias of the Secure bit [1]. 581 */ 582 mask = GICD_CTLR_EN_GRP1NS; 583 } 584 } 585 s->gicd_ctlr = (s->gicd_ctlr & ~mask) | (value & mask); 586 if (value & mask & GICD_CTLR_DS) { 587 /* We just set DS, so the ARE_NS and EnG1S bits are now RES0. 588 * Note that this is a one-way transition because if DS is set 589 * then it's not writeable, so it can only go back to 0 with a 590 * hardware reset. 591 */ 592 s->gicd_ctlr &= ~(GICD_CTLR_EN_GRP1S | GICD_CTLR_ARE_NS); 593 } 594 gicv3_full_update(s); 595 return MEMTX_OK; 596 } 597 case GICD_STATUSR: 598 /* RAZ/WI for our implementation */ 599 return MEMTX_OK; 600 case GICD_IGROUPR ... GICD_IGROUPR + 0x7f: 601 { 602 int irq; 603 604 if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { 605 return MEMTX_OK; 606 } 607 /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ 608 irq = (offset - GICD_IGROUPR) * 8; 609 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 610 return MEMTX_OK; 611 } 612 *gic_bmp_ptr32(s->group, irq) = value; 613 gicv3_update(s, irq, 32); 614 return MEMTX_OK; 615 } 616 case GICD_ISENABLER ... GICD_ISENABLER + 0x7f: 617 gicd_write_set_bitmap_reg(s, attrs, s->enabled, NULL, 618 offset - GICD_ISENABLER, value); 619 return MEMTX_OK; 620 case GICD_ICENABLER ... GICD_ICENABLER + 0x7f: 621 gicd_write_clear_bitmap_reg(s, attrs, s->enabled, NULL, 622 offset - GICD_ICENABLER, value); 623 return MEMTX_OK; 624 case GICD_ISPENDR ... GICD_ISPENDR + 0x7f: 625 gicd_write_set_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1, 626 offset - GICD_ISPENDR, value); 627 return MEMTX_OK; 628 case GICD_ICPENDR ... GICD_ICPENDR + 0x7f: 629 gicd_write_clear_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2, 630 offset - GICD_ICPENDR, value); 631 return MEMTX_OK; 632 case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f: 633 gicd_write_set_bitmap_reg(s, attrs, s->active, NULL, 634 offset - GICD_ISACTIVER, value); 635 return MEMTX_OK; 636 case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f: 637 gicd_write_clear_bitmap_reg(s, attrs, s->active, NULL, 638 offset - GICD_ICACTIVER, value); 639 return MEMTX_OK; 640 case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: 641 { 642 int i, irq = offset - GICD_IPRIORITYR; 643 644 if (irq < GIC_INTERNAL || irq + 3 >= s->num_irq) { 645 return MEMTX_OK; 646 } 647 648 for (i = irq; i < irq + 4; i++, value >>= 8) { 649 gicd_write_ipriorityr(s, attrs, i, value); 650 } 651 gicv3_update(s, irq, 4); 652 return MEMTX_OK; 653 } 654 case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: 655 /* RAZ/WI since affinity routing is always enabled */ 656 return MEMTX_OK; 657 case GICD_ICFGR ... GICD_ICFGR + 0xff: 658 { 659 /* Here only the odd bits are used; even bits are RES0 */ 660 int irq = (offset - GICD_ICFGR) * 4; 661 uint32_t mask, oldval; 662 663 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 664 return MEMTX_OK; 665 } 666 667 /* Since our edge_trigger bitmap is one bit per irq, our input 668 * 32-bits will compress down into 16 bits which we need 669 * to write into the bitmap. 670 */ 671 value = half_unshuffle32(value >> 1); 672 mask = mask_group_and_nsacr(s, attrs, NULL, irq & ~0x1f); 673 if (irq & 0x1f) { 674 value <<= 16; 675 mask &= 0xffff0000U; 676 } else { 677 mask &= 0xffff; 678 } 679 oldval = *gic_bmp_ptr32(s->edge_trigger, (irq & ~0x1f)); 680 value = (oldval & ~mask) | (value & mask); 681 *gic_bmp_ptr32(s->edge_trigger, irq & ~0x1f) = value; 682 return MEMTX_OK; 683 } 684 case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff: 685 { 686 int irq; 687 688 if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 689 /* RAZ/WI if security disabled, or if 690 * security enabled and this is an NS access 691 */ 692 return MEMTX_OK; 693 } 694 /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ 695 irq = (offset - GICD_IGRPMODR) * 8; 696 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 697 return MEMTX_OK; 698 } 699 *gic_bmp_ptr32(s->grpmod, irq) = value; 700 gicv3_update(s, irq, 32); 701 return MEMTX_OK; 702 } 703 case GICD_NSACR ... GICD_NSACR + 0xff: 704 { 705 /* Two bits per interrupt */ 706 int irq = (offset - GICD_NSACR) * 4; 707 708 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 709 return MEMTX_OK; 710 } 711 712 if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 713 /* RAZ/WI if security disabled, or if 714 * security enabled and this is an NS access 715 */ 716 return MEMTX_OK; 717 } 718 719 s->gicd_nsacr[irq / 16] = value; 720 /* No update required as this only affects access permission checks */ 721 return MEMTX_OK; 722 } 723 case GICD_SGIR: 724 /* RES0 if affinity routing is enabled */ 725 return MEMTX_OK; 726 case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: 727 case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: 728 /* RAZ/WI since affinity routing is always enabled */ 729 return MEMTX_OK; 730 case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: 731 { 732 uint64_t r; 733 int irq = (offset - GICD_IROUTER) / 8; 734 735 if (irq < GIC_INTERNAL || irq >= s->num_irq) { 736 return MEMTX_OK; 737 } 738 739 /* Write half of the 64-bit register */ 740 r = gicd_read_irouter(s, attrs, irq); 741 r = deposit64(r, (offset & 7) ? 32 : 0, 32, value); 742 gicd_write_irouter(s, attrs, irq, r); 743 return MEMTX_OK; 744 } 745 case GICD_IDREGS ... GICD_IDREGS + 0x1f: 746 case GICD_TYPER: 747 case GICD_IIDR: 748 /* RO registers, ignore the write */ 749 qemu_log_mask(LOG_GUEST_ERROR, 750 "%s: invalid guest write to RO register at offset " 751 TARGET_FMT_plx "\n", __func__, offset); 752 return MEMTX_OK; 753 default: 754 return MEMTX_ERROR; 755 } 756 } 757 758 static MemTxResult gicd_writell(GICv3State *s, hwaddr offset, 759 uint64_t value, MemTxAttrs attrs) 760 { 761 /* Our only 64-bit registers are GICD_IROUTER<n> */ 762 int irq; 763 764 switch (offset) { 765 case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: 766 irq = (offset - GICD_IROUTER) / 8; 767 gicd_write_irouter(s, attrs, irq, value); 768 return MEMTX_OK; 769 default: 770 return MEMTX_ERROR; 771 } 772 } 773 774 static MemTxResult gicd_readll(GICv3State *s, hwaddr offset, 775 uint64_t *data, MemTxAttrs attrs) 776 { 777 /* Our only 64-bit registers are GICD_IROUTER<n> */ 778 int irq; 779 780 switch (offset) { 781 case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: 782 irq = (offset - GICD_IROUTER) / 8; 783 *data = gicd_read_irouter(s, attrs, irq); 784 return MEMTX_OK; 785 default: 786 return MEMTX_ERROR; 787 } 788 } 789 790 MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data, 791 unsigned size, MemTxAttrs attrs) 792 { 793 GICv3State *s = (GICv3State *)opaque; 794 MemTxResult r; 795 796 switch (size) { 797 case 1: 798 r = gicd_readb(s, offset, data, attrs); 799 break; 800 case 2: 801 r = gicd_readw(s, offset, data, attrs); 802 break; 803 case 4: 804 r = gicd_readl(s, offset, data, attrs); 805 break; 806 case 8: 807 r = gicd_readll(s, offset, data, attrs); 808 break; 809 default: 810 r = MEMTX_ERROR; 811 break; 812 } 813 814 if (r == MEMTX_ERROR) { 815 qemu_log_mask(LOG_GUEST_ERROR, 816 "%s: invalid guest read at offset " TARGET_FMT_plx 817 "size %u\n", __func__, offset, size); 818 trace_gicv3_dist_badread(offset, size, attrs.secure); 819 } else { 820 trace_gicv3_dist_read(offset, *data, size, attrs.secure); 821 } 822 return r; 823 } 824 825 MemTxResult gicv3_dist_write(void *opaque, hwaddr offset, uint64_t data, 826 unsigned size, MemTxAttrs attrs) 827 { 828 GICv3State *s = (GICv3State *)opaque; 829 MemTxResult r; 830 831 switch (size) { 832 case 1: 833 r = gicd_writeb(s, offset, data, attrs); 834 break; 835 case 2: 836 r = gicd_writew(s, offset, data, attrs); 837 break; 838 case 4: 839 r = gicd_writel(s, offset, data, attrs); 840 break; 841 case 8: 842 r = gicd_writell(s, offset, data, attrs); 843 break; 844 default: 845 r = MEMTX_ERROR; 846 break; 847 } 848 849 if (r == MEMTX_ERROR) { 850 qemu_log_mask(LOG_GUEST_ERROR, 851 "%s: invalid guest write at offset " TARGET_FMT_plx 852 "size %u\n", __func__, offset, size); 853 trace_gicv3_dist_badwrite(offset, data, size, attrs.secure); 854 } else { 855 trace_gicv3_dist_write(offset, data, size, attrs.secure); 856 } 857 return r; 858 } 859 860 void gicv3_dist_set_irq(GICv3State *s, int irq, int level) 861 { 862 /* Update distributor state for a change in an external SPI input line */ 863 if (level == gicv3_gicd_level_test(s, irq)) { 864 return; 865 } 866 867 trace_gicv3_dist_set_irq(irq, level); 868 869 gicv3_gicd_level_replace(s, irq, level); 870 871 if (level) { 872 /* 0->1 edges latch the pending bit for edge-triggered interrupts */ 873 if (gicv3_gicd_edge_trigger_test(s, irq)) { 874 gicv3_gicd_pending_set(s, irq); 875 } 876 } 877 878 gicv3_update(s, irq, 1); 879 } 880