1 /* 2 * ARM GICv3 emulation: Redistributor 3 * 4 * Copyright (c) 2015 Huawei. 5 * Copyright (c) 2016 Linaro Limited. 6 * Written by Shlomo Pongratz, Peter Maydell 7 * 8 * This code is licensed under the GPL, version 2 or (at your option) 9 * any later version. 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qemu/log.h" 14 #include "trace.h" 15 #include "gicv3_internal.h" 16 17 static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs) 18 { 19 /* Return a 32-bit mask which should be applied for this set of 32 20 * interrupts; each bit is 1 if access is permitted by the 21 * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does 22 * not affect config register accesses, unlike GICD_NSACR.) 23 */ 24 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { 25 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */ 26 return cs->gicr_igroupr0; 27 } 28 return 0xFFFFFFFFU; 29 } 30 31 static int gicr_ns_access(GICv3CPUState *cs, int irq) 32 { 33 /* Return the 2 bit NSACR.NS_access field for this SGI */ 34 assert(irq < 16); 35 return extract32(cs->gicr_nsacr, irq * 2, 2); 36 } 37 38 static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, 39 uint32_t *reg, uint32_t val) 40 { 41 /* Helper routine to implement writing to a "set-bitmap" register */ 42 val &= mask_group(cs, attrs); 43 *reg |= val; 44 gicv3_redist_update(cs); 45 } 46 47 static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, 48 uint32_t *reg, uint32_t val) 49 { 50 /* Helper routine to implement writing to a "clear-bitmap" register */ 51 val &= mask_group(cs, attrs); 52 *reg &= ~val; 53 gicv3_redist_update(cs); 54 } 55 56 static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, 57 uint32_t reg) 58 { 59 reg &= mask_group(cs, attrs); 60 return reg; 61 } 62 63 /** 64 * update_for_one_lpi: Update pending information if this LPI is better 65 * 66 * @cs: GICv3CPUState 67 * @irq: interrupt to look up in the LPI Configuration table 68 * @ctbase: physical address of the LPI Configuration table to use 69 * @ds: true if priority value should not be shifted 70 * @hpp: points to pending information to update 71 * 72 * Look up @irq in the Configuration table specified by @ctbase 73 * to see if it is enabled and what its priority is. If it is an 74 * enabled interrupt with a higher priority than that currently 75 * recorded in @hpp, update @hpp. 76 */ 77 static void update_for_one_lpi(GICv3CPUState *cs, int irq, 78 uint64_t ctbase, bool ds, PendingIrq *hpp) 79 { 80 uint8_t lpite; 81 uint8_t prio; 82 83 address_space_read(&cs->gic->dma_as, 84 ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)), 85 MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite)); 86 87 if (!(lpite & LPI_CTE_ENABLED)) { 88 return; 89 } 90 91 if (ds) { 92 prio = lpite & LPI_PRIORITY_MASK; 93 } else { 94 prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80; 95 } 96 97 if ((prio < hpp->prio) || 98 ((prio == hpp->prio) && (irq <= hpp->irq))) { 99 hpp->irq = irq; 100 hpp->prio = prio; 101 /* LPIs and vLPIs are always non-secure Grp1 interrupts */ 102 hpp->grp = GICV3_G1NS; 103 } 104 } 105 106 static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, 107 int irq) 108 { 109 /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt, 110 * honouring security state (these are RAZ/WI for Group 0 or Secure 111 * Group 1 interrupts). 112 */ 113 uint32_t prio; 114 115 prio = cs->gicr_ipriorityr[irq]; 116 117 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { 118 if (!(cs->gicr_igroupr0 & (1U << irq))) { 119 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ 120 return 0; 121 } 122 /* NS view of the interrupt priority */ 123 prio = (prio << 1) & 0xff; 124 } 125 return prio; 126 } 127 128 static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq, 129 uint8_t value) 130 { 131 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt, 132 * honouring security state (these are RAZ/WI for Group 0 or Secure 133 * Group 1 interrupts). 134 */ 135 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { 136 if (!(cs->gicr_igroupr0 & (1U << irq))) { 137 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ 138 return; 139 } 140 /* NS view of the interrupt priority */ 141 value = 0x80 | (value >> 1); 142 } 143 cs->gicr_ipriorityr[irq] = value; 144 } 145 146 static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset, 147 uint64_t *data, MemTxAttrs attrs) 148 { 149 switch (offset) { 150 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: 151 *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR); 152 return MEMTX_OK; 153 default: 154 return MEMTX_ERROR; 155 } 156 } 157 158 static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset, 159 uint64_t value, MemTxAttrs attrs) 160 { 161 switch (offset) { 162 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: 163 gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value); 164 gicv3_redist_update(cs); 165 return MEMTX_OK; 166 default: 167 return MEMTX_ERROR; 168 } 169 } 170 171 static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset, 172 uint64_t *data, MemTxAttrs attrs) 173 { 174 switch (offset) { 175 case GICR_CTLR: 176 *data = cs->gicr_ctlr; 177 return MEMTX_OK; 178 case GICR_IIDR: 179 *data = gicv3_iidr(); 180 return MEMTX_OK; 181 case GICR_TYPER: 182 *data = extract64(cs->gicr_typer, 0, 32); 183 return MEMTX_OK; 184 case GICR_TYPER + 4: 185 *data = extract64(cs->gicr_typer, 32, 32); 186 return MEMTX_OK; 187 case GICR_STATUSR: 188 /* RAZ/WI for us (this is an optional register and our implementation 189 * does not track RO/WO/reserved violations to report them to the guest) 190 */ 191 *data = 0; 192 return MEMTX_OK; 193 case GICR_WAKER: 194 *data = cs->gicr_waker; 195 return MEMTX_OK; 196 case GICR_PROPBASER: 197 *data = extract64(cs->gicr_propbaser, 0, 32); 198 return MEMTX_OK; 199 case GICR_PROPBASER + 4: 200 *data = extract64(cs->gicr_propbaser, 32, 32); 201 return MEMTX_OK; 202 case GICR_PENDBASER: 203 *data = extract64(cs->gicr_pendbaser, 0, 32); 204 return MEMTX_OK; 205 case GICR_PENDBASER + 4: 206 *data = extract64(cs->gicr_pendbaser, 32, 32); 207 return MEMTX_OK; 208 case GICR_IGROUPR0: 209 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { 210 *data = 0; 211 return MEMTX_OK; 212 } 213 *data = cs->gicr_igroupr0; 214 return MEMTX_OK; 215 case GICR_ISENABLER0: 216 case GICR_ICENABLER0: 217 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0); 218 return MEMTX_OK; 219 case GICR_ISPENDR0: 220 case GICR_ICPENDR0: 221 { 222 /* The pending register reads as the logical OR of the pending 223 * latch and the input line level for level-triggered interrupts. 224 */ 225 uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level); 226 *data = gicr_read_bitmap_reg(cs, attrs, val); 227 return MEMTX_OK; 228 } 229 case GICR_ISACTIVER0: 230 case GICR_ICACTIVER0: 231 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0); 232 return MEMTX_OK; 233 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: 234 { 235 int i, irq = offset - GICR_IPRIORITYR; 236 uint32_t value = 0; 237 238 for (i = irq + 3; i >= irq; i--) { 239 value <<= 8; 240 value |= gicr_read_ipriorityr(cs, attrs, i); 241 } 242 *data = value; 243 return MEMTX_OK; 244 } 245 case GICR_ICFGR0: 246 case GICR_ICFGR1: 247 { 248 /* Our edge_trigger bitmap is one bit per irq; take the correct 249 * half of it, and spread it out into the odd bits. 250 */ 251 uint32_t value; 252 253 value = cs->edge_trigger & mask_group(cs, attrs); 254 value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16); 255 value = half_shuffle32(value) << 1; 256 *data = value; 257 return MEMTX_OK; 258 } 259 case GICR_IGRPMODR0: 260 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 261 /* RAZ/WI if security disabled, or if 262 * security enabled and this is an NS access 263 */ 264 *data = 0; 265 return MEMTX_OK; 266 } 267 *data = cs->gicr_igrpmodr0; 268 return MEMTX_OK; 269 case GICR_NSACR: 270 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 271 /* RAZ/WI if security disabled, or if 272 * security enabled and this is an NS access 273 */ 274 *data = 0; 275 return MEMTX_OK; 276 } 277 *data = cs->gicr_nsacr; 278 return MEMTX_OK; 279 case GICR_IDREGS ... GICR_IDREGS + 0x2f: 280 *data = gicv3_idreg(offset - GICR_IDREGS, GICV3_PIDR0_REDIST); 281 return MEMTX_OK; 282 /* 283 * VLPI frame registers. We don't need a version check for 284 * VPROPBASER and VPENDBASER because gicv3_redist_size() will 285 * prevent pre-v4 GIC from passing us offsets this high. 286 */ 287 case GICR_VPROPBASER: 288 *data = extract64(cs->gicr_vpropbaser, 0, 32); 289 return MEMTX_OK; 290 case GICR_VPROPBASER + 4: 291 *data = extract64(cs->gicr_vpropbaser, 32, 32); 292 return MEMTX_OK; 293 case GICR_VPENDBASER: 294 *data = extract64(cs->gicr_vpendbaser, 0, 32); 295 return MEMTX_OK; 296 case GICR_VPENDBASER + 4: 297 *data = extract64(cs->gicr_vpendbaser, 32, 32); 298 return MEMTX_OK; 299 default: 300 return MEMTX_ERROR; 301 } 302 } 303 304 static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset, 305 uint64_t value, MemTxAttrs attrs) 306 { 307 switch (offset) { 308 case GICR_CTLR: 309 /* For our implementation, GICR_TYPER.DPGS is 0 and so all 310 * the DPG bits are RAZ/WI. We don't do anything asynchronously, 311 * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we 312 * implement LPIs) so Enable_LPIs is programmable. 313 */ 314 if (cs->gicr_typer & GICR_TYPER_PLPIS) { 315 if (value & GICR_CTLR_ENABLE_LPIS) { 316 cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS; 317 /* Check for any pending interr in pending table */ 318 gicv3_redist_update_lpi(cs); 319 } else { 320 cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS; 321 /* cs->hppi might have been an LPI; recalculate */ 322 gicv3_redist_update(cs); 323 } 324 } 325 return MEMTX_OK; 326 case GICR_STATUSR: 327 /* RAZ/WI for our implementation */ 328 return MEMTX_OK; 329 case GICR_WAKER: 330 /* Only the ProcessorSleep bit is writeable. When the guest sets 331 * it it requests that we transition the channel between the 332 * redistributor and the cpu interface to quiescent, and that 333 * we set the ChildrenAsleep bit once the inteface has reached the 334 * quiescent state. 335 * Setting the ProcessorSleep to 0 reverses the quiescing, and 336 * ChildrenAsleep is cleared once the transition is complete. 337 * Since our interface is not asynchronous, we complete these 338 * transitions instantaneously, so we set ChildrenAsleep to the 339 * same value as ProcessorSleep here. 340 */ 341 value &= GICR_WAKER_ProcessorSleep; 342 if (value & GICR_WAKER_ProcessorSleep) { 343 value |= GICR_WAKER_ChildrenAsleep; 344 } 345 cs->gicr_waker = value; 346 return MEMTX_OK; 347 case GICR_PROPBASER: 348 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value); 349 return MEMTX_OK; 350 case GICR_PROPBASER + 4: 351 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value); 352 return MEMTX_OK; 353 case GICR_PENDBASER: 354 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value); 355 return MEMTX_OK; 356 case GICR_PENDBASER + 4: 357 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value); 358 return MEMTX_OK; 359 case GICR_IGROUPR0: 360 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { 361 return MEMTX_OK; 362 } 363 cs->gicr_igroupr0 = value; 364 gicv3_redist_update(cs); 365 return MEMTX_OK; 366 case GICR_ISENABLER0: 367 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value); 368 return MEMTX_OK; 369 case GICR_ICENABLER0: 370 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value); 371 return MEMTX_OK; 372 case GICR_ISPENDR0: 373 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value); 374 return MEMTX_OK; 375 case GICR_ICPENDR0: 376 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value); 377 return MEMTX_OK; 378 case GICR_ISACTIVER0: 379 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value); 380 return MEMTX_OK; 381 case GICR_ICACTIVER0: 382 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value); 383 return MEMTX_OK; 384 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: 385 { 386 int i, irq = offset - GICR_IPRIORITYR; 387 388 for (i = irq; i < irq + 4; i++, value >>= 8) { 389 gicr_write_ipriorityr(cs, attrs, i, value); 390 } 391 gicv3_redist_update(cs); 392 return MEMTX_OK; 393 } 394 case GICR_ICFGR0: 395 /* Register is all RAZ/WI or RAO/WI bits */ 396 return MEMTX_OK; 397 case GICR_ICFGR1: 398 { 399 uint32_t mask; 400 401 /* Since our edge_trigger bitmap is one bit per irq, our input 402 * 32-bits will compress down into 16 bits which we need 403 * to write into the bitmap. 404 */ 405 value = half_unshuffle32(value >> 1) << 16; 406 mask = mask_group(cs, attrs) & 0xffff0000U; 407 408 cs->edge_trigger &= ~mask; 409 cs->edge_trigger |= (value & mask); 410 411 gicv3_redist_update(cs); 412 return MEMTX_OK; 413 } 414 case GICR_IGRPMODR0: 415 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 416 /* RAZ/WI if security disabled, or if 417 * security enabled and this is an NS access 418 */ 419 return MEMTX_OK; 420 } 421 cs->gicr_igrpmodr0 = value; 422 gicv3_redist_update(cs); 423 return MEMTX_OK; 424 case GICR_NSACR: 425 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { 426 /* RAZ/WI if security disabled, or if 427 * security enabled and this is an NS access 428 */ 429 return MEMTX_OK; 430 } 431 cs->gicr_nsacr = value; 432 /* no update required as this only affects access permission checks */ 433 return MEMTX_OK; 434 case GICR_IIDR: 435 case GICR_TYPER: 436 case GICR_IDREGS ... GICR_IDREGS + 0x2f: 437 /* RO registers, ignore the write */ 438 qemu_log_mask(LOG_GUEST_ERROR, 439 "%s: invalid guest write to RO register at offset " 440 TARGET_FMT_plx "\n", __func__, offset); 441 return MEMTX_OK; 442 /* 443 * VLPI frame registers. We don't need a version check for 444 * VPROPBASER and VPENDBASER because gicv3_redist_size() will 445 * prevent pre-v4 GIC from passing us offsets this high. 446 */ 447 case GICR_VPROPBASER: 448 cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value); 449 return MEMTX_OK; 450 case GICR_VPROPBASER + 4: 451 cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value); 452 return MEMTX_OK; 453 case GICR_VPENDBASER: 454 cs->gicr_vpendbaser = deposit64(cs->gicr_vpendbaser, 0, 32, value); 455 return MEMTX_OK; 456 case GICR_VPENDBASER + 4: 457 cs->gicr_vpendbaser = deposit64(cs->gicr_vpendbaser, 32, 32, value); 458 return MEMTX_OK; 459 default: 460 return MEMTX_ERROR; 461 } 462 } 463 464 static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset, 465 uint64_t *data, MemTxAttrs attrs) 466 { 467 switch (offset) { 468 case GICR_TYPER: 469 *data = cs->gicr_typer; 470 return MEMTX_OK; 471 case GICR_PROPBASER: 472 *data = cs->gicr_propbaser; 473 return MEMTX_OK; 474 case GICR_PENDBASER: 475 *data = cs->gicr_pendbaser; 476 return MEMTX_OK; 477 /* 478 * VLPI frame registers. We don't need a version check for 479 * VPROPBASER and VPENDBASER because gicv3_redist_size() will 480 * prevent pre-v4 GIC from passing us offsets this high. 481 */ 482 case GICR_VPROPBASER: 483 *data = cs->gicr_vpropbaser; 484 return MEMTX_OK; 485 case GICR_VPENDBASER: 486 *data = cs->gicr_vpendbaser; 487 return MEMTX_OK; 488 default: 489 return MEMTX_ERROR; 490 } 491 } 492 493 static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset, 494 uint64_t value, MemTxAttrs attrs) 495 { 496 switch (offset) { 497 case GICR_PROPBASER: 498 cs->gicr_propbaser = value; 499 return MEMTX_OK; 500 case GICR_PENDBASER: 501 cs->gicr_pendbaser = value; 502 return MEMTX_OK; 503 case GICR_TYPER: 504 /* RO register, ignore the write */ 505 qemu_log_mask(LOG_GUEST_ERROR, 506 "%s: invalid guest write to RO register at offset " 507 TARGET_FMT_plx "\n", __func__, offset); 508 return MEMTX_OK; 509 /* 510 * VLPI frame registers. We don't need a version check for 511 * VPROPBASER and VPENDBASER because gicv3_redist_size() will 512 * prevent pre-v4 GIC from passing us offsets this high. 513 */ 514 case GICR_VPROPBASER: 515 cs->gicr_vpropbaser = value; 516 return MEMTX_OK; 517 case GICR_VPENDBASER: 518 cs->gicr_vpendbaser = value; 519 return MEMTX_OK; 520 default: 521 return MEMTX_ERROR; 522 } 523 } 524 525 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, 526 unsigned size, MemTxAttrs attrs) 527 { 528 GICv3RedistRegion *region = opaque; 529 GICv3State *s = region->gic; 530 GICv3CPUState *cs; 531 MemTxResult r; 532 int cpuidx; 533 534 assert((offset & (size - 1)) == 0); 535 536 /* 537 * There are (for GICv3) two 64K redistributor pages per CPU. 538 * In some cases the redistributor pages for all CPUs are not 539 * contiguous (eg on the virt board they are split into two 540 * parts if there are too many CPUs to all fit in the same place 541 * in the memory map); if so then the GIC has multiple MemoryRegions 542 * for the redistributors. 543 */ 544 cpuidx = region->cpuidx + offset / gicv3_redist_size(s); 545 offset %= gicv3_redist_size(s); 546 547 cs = &s->cpu[cpuidx]; 548 549 switch (size) { 550 case 1: 551 r = gicr_readb(cs, offset, data, attrs); 552 break; 553 case 4: 554 r = gicr_readl(cs, offset, data, attrs); 555 break; 556 case 8: 557 r = gicr_readll(cs, offset, data, attrs); 558 break; 559 default: 560 r = MEMTX_ERROR; 561 break; 562 } 563 564 if (r != MEMTX_OK) { 565 qemu_log_mask(LOG_GUEST_ERROR, 566 "%s: invalid guest read at offset " TARGET_FMT_plx 567 " size %u\n", __func__, offset, size); 568 trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset, 569 size, attrs.secure); 570 /* The spec requires that reserved registers are RAZ/WI; 571 * so use MEMTX_ERROR returns from leaf functions as a way to 572 * trigger the guest-error logging but don't return it to 573 * the caller, or we'll cause a spurious guest data abort. 574 */ 575 r = MEMTX_OK; 576 *data = 0; 577 } else { 578 trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data, 579 size, attrs.secure); 580 } 581 return r; 582 } 583 584 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, 585 unsigned size, MemTxAttrs attrs) 586 { 587 GICv3RedistRegion *region = opaque; 588 GICv3State *s = region->gic; 589 GICv3CPUState *cs; 590 MemTxResult r; 591 int cpuidx; 592 593 assert((offset & (size - 1)) == 0); 594 595 /* 596 * There are (for GICv3) two 64K redistributor pages per CPU. 597 * In some cases the redistributor pages for all CPUs are not 598 * contiguous (eg on the virt board they are split into two 599 * parts if there are too many CPUs to all fit in the same place 600 * in the memory map); if so then the GIC has multiple MemoryRegions 601 * for the redistributors. 602 */ 603 cpuidx = region->cpuidx + offset / gicv3_redist_size(s); 604 offset %= gicv3_redist_size(s); 605 606 cs = &s->cpu[cpuidx]; 607 608 switch (size) { 609 case 1: 610 r = gicr_writeb(cs, offset, data, attrs); 611 break; 612 case 4: 613 r = gicr_writel(cs, offset, data, attrs); 614 break; 615 case 8: 616 r = gicr_writell(cs, offset, data, attrs); 617 break; 618 default: 619 r = MEMTX_ERROR; 620 break; 621 } 622 623 if (r != MEMTX_OK) { 624 qemu_log_mask(LOG_GUEST_ERROR, 625 "%s: invalid guest write at offset " TARGET_FMT_plx 626 " size %u\n", __func__, offset, size); 627 trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data, 628 size, attrs.secure); 629 /* The spec requires that reserved registers are RAZ/WI; 630 * so use MEMTX_ERROR returns from leaf functions as a way to 631 * trigger the guest-error logging but don't return it to 632 * the caller, or we'll cause a spurious guest data abort. 633 */ 634 r = MEMTX_OK; 635 } else { 636 trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data, 637 size, attrs.secure); 638 } 639 return r; 640 } 641 642 static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq) 643 { 644 uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK; 645 646 update_for_one_lpi(cs, irq, lpict_baddr, 647 cs->gic->gicd_ctlr & GICD_CTLR_DS, 648 &cs->hpplpi); 649 } 650 651 void gicv3_redist_update_lpi_only(GICv3CPUState *cs) 652 { 653 /* 654 * This function scans the LPI pending table and for each pending 655 * LPI, reads the corresponding entry from LPI configuration table 656 * to extract the priority info and determine if the current LPI 657 * priority is lower than the last computed high priority lpi interrupt. 658 * If yes, replace current LPI as the new high priority lpi interrupt. 659 */ 660 AddressSpace *as = &cs->gic->dma_as; 661 uint64_t lpipt_baddr; 662 uint32_t pendt_size = 0; 663 uint8_t pend; 664 int i, bit; 665 uint64_t idbits; 666 667 idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS), 668 GICD_TYPER_IDBITS); 669 670 if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { 671 return; 672 } 673 674 cs->hpplpi.prio = 0xff; 675 676 lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; 677 678 /* Determine the highest priority pending interrupt among LPIs */ 679 pendt_size = (1ULL << (idbits + 1)); 680 681 for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { 682 address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend, 683 sizeof(pend)); 684 685 while (pend) { 686 bit = ctz32(pend); 687 gicv3_redist_check_lpi_priority(cs, i * 8 + bit); 688 pend &= ~(1 << bit); 689 } 690 } 691 } 692 693 void gicv3_redist_update_lpi(GICv3CPUState *cs) 694 { 695 gicv3_redist_update_lpi_only(cs); 696 gicv3_redist_update(cs); 697 } 698 699 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level) 700 { 701 /* 702 * This function updates the pending bit in lpi pending table for 703 * the irq being activated or deactivated. 704 */ 705 AddressSpace *as = &cs->gic->dma_as; 706 uint64_t lpipt_baddr; 707 bool ispend = false; 708 uint8_t pend; 709 710 /* 711 * get the bit value corresponding to this irq in the 712 * lpi pending table 713 */ 714 lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; 715 716 address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)), 717 MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend)); 718 719 ispend = extract32(pend, irq % 8, 1); 720 721 /* no change in the value of pending bit, return */ 722 if (ispend == level) { 723 return; 724 } 725 pend = deposit32(pend, irq % 8, 1, level ? 1 : 0); 726 727 address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)), 728 MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend)); 729 730 /* 731 * check if this LPI is better than the current hpplpi, if yes 732 * just set hpplpi.prio and .irq without doing a full rescan 733 */ 734 if (level) { 735 gicv3_redist_check_lpi_priority(cs, irq); 736 gicv3_redist_update(cs); 737 } else { 738 if (irq == cs->hpplpi.irq) { 739 gicv3_redist_update_lpi(cs); 740 } 741 } 742 } 743 744 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level) 745 { 746 uint64_t idbits; 747 748 idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS), 749 GICD_TYPER_IDBITS); 750 751 if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || 752 (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) { 753 return; 754 } 755 756 /* set/clear the pending bit for this irq */ 757 gicv3_redist_lpi_pending(cs, irq, level); 758 } 759 760 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq) 761 { 762 /* 763 * The only cached information for LPIs we have is the HPPLPI. 764 * We could be cleverer about identifying when we don't need 765 * to do a full rescan of the pending table, but until we find 766 * this is a performance issue, just always recalculate. 767 */ 768 gicv3_redist_update_lpi(cs); 769 } 770 771 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq) 772 { 773 /* 774 * Move the specified LPI's pending state from the source redistributor 775 * to the destination. 776 * 777 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE: 778 * we choose to NOP. If LPIs are disabled on source there's nothing 779 * to be transferred anyway. 780 */ 781 AddressSpace *as = &src->gic->dma_as; 782 uint64_t idbits; 783 uint32_t pendt_size; 784 uint64_t src_baddr; 785 uint8_t src_pend; 786 787 if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || 788 !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { 789 return; 790 } 791 792 idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS), 793 GICD_TYPER_IDBITS); 794 idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS), 795 idbits); 796 797 pendt_size = 1ULL << (idbits + 1); 798 if ((irq / 8) >= pendt_size) { 799 return; 800 } 801 802 src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; 803 804 address_space_read(as, src_baddr + (irq / 8), 805 MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend)); 806 if (!extract32(src_pend, irq % 8, 1)) { 807 /* Not pending on source, nothing to do */ 808 return; 809 } 810 src_pend &= ~(1 << (irq % 8)); 811 address_space_write(as, src_baddr + (irq / 8), 812 MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend)); 813 if (irq == src->hpplpi.irq) { 814 /* 815 * We just made this LPI not-pending so only need to update 816 * if it was previously the highest priority pending LPI 817 */ 818 gicv3_redist_update_lpi(src); 819 } 820 /* Mark it pending on the destination */ 821 gicv3_redist_lpi_pending(dest, irq, 1); 822 } 823 824 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest) 825 { 826 /* 827 * We must move all pending LPIs from the source redistributor 828 * to the destination. That is, for every pending LPI X on 829 * src, we must set it not-pending on src and pending on dest. 830 * LPIs that are already pending on dest are not cleared. 831 * 832 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE: 833 * we choose to NOP. If LPIs are disabled on source there's nothing 834 * to be transferred anyway. 835 */ 836 AddressSpace *as = &src->gic->dma_as; 837 uint64_t idbits; 838 uint32_t pendt_size; 839 uint64_t src_baddr, dest_baddr; 840 int i; 841 842 if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || 843 !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { 844 return; 845 } 846 847 idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS), 848 GICD_TYPER_IDBITS); 849 idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS), 850 idbits); 851 852 pendt_size = 1ULL << (idbits + 1); 853 src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; 854 dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; 855 856 for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { 857 uint8_t src_pend, dest_pend; 858 859 address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED, 860 &src_pend, sizeof(src_pend)); 861 if (!src_pend) { 862 continue; 863 } 864 address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED, 865 &dest_pend, sizeof(dest_pend)); 866 dest_pend |= src_pend; 867 src_pend = 0; 868 address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED, 869 &src_pend, sizeof(src_pend)); 870 address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED, 871 &dest_pend, sizeof(dest_pend)); 872 } 873 874 gicv3_redist_update_lpi(src); 875 gicv3_redist_update_lpi(dest); 876 } 877 878 void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level) 879 { 880 /* 881 * The redistributor handling for changing the pending state 882 * of a vLPI will be added in a subsequent commit. 883 */ 884 } 885 886 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr, 887 int doorbell, int level) 888 { 889 /* 890 * The redistributor handling for being handed a VLPI by the ITS 891 * will be added in a subsequent commit. 892 */ 893 } 894 895 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr, 896 GICv3CPUState *dest, uint64_t dest_vptaddr, 897 int irq, int doorbell) 898 { 899 /* 900 * The redistributor handling for moving a VLPI will be added 901 * in a subsequent commit. 902 */ 903 } 904 905 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr) 906 { 907 /* The redistributor handling will be added in a subsequent commit */ 908 } 909 910 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr) 911 { 912 /* 913 * The redistributor handling for invalidating cached information 914 * about a VLPI will be added in a subsequent commit. 915 */ 916 } 917 918 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level) 919 { 920 /* Update redistributor state for a change in an external PPI input line */ 921 if (level == extract32(cs->level, irq, 1)) { 922 return; 923 } 924 925 trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level); 926 927 cs->level = deposit32(cs->level, irq, 1, level); 928 929 if (level) { 930 /* 0->1 edges latch the pending bit for edge-triggered interrupts */ 931 if (extract32(cs->edge_trigger, irq, 1)) { 932 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1); 933 } 934 } 935 936 gicv3_redist_update(cs); 937 } 938 939 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns) 940 { 941 /* Update redistributor state for a generated SGI */ 942 int irqgrp = gicv3_irq_group(cs->gic, cs, irq); 943 944 /* If we are asked for a Secure Group 1 SGI and it's actually 945 * configured as Secure Group 0 this is OK (subject to the usual 946 * NSACR checks). 947 */ 948 if (grp == GICV3_G1 && irqgrp == GICV3_G0) { 949 grp = GICV3_G0; 950 } 951 952 if (grp != irqgrp) { 953 return; 954 } 955 956 if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { 957 /* If security is enabled we must test the NSACR bits */ 958 int nsaccess = gicr_ns_access(cs, irq); 959 960 if ((irqgrp == GICV3_G0 && nsaccess < 1) || 961 (irqgrp == GICV3_G1 && nsaccess < 2)) { 962 return; 963 } 964 } 965 966 /* OK, we can accept the SGI */ 967 trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq); 968 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1); 969 gicv3_redist_update(cs); 970 } 971