1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "qemu/module.h" 12 #include "qapi/error.h" 13 #include "target/ppc/cpu.h" 14 #include "system/cpus.h" 15 #include "system/dma.h" 16 #include "system/reset.h" 17 #include "hw/qdev-properties.h" 18 #include "migration/vmstate.h" 19 #include "hw/irq.h" 20 #include "hw/ppc/xive.h" 21 #include "hw/ppc/xive2.h" 22 #include "hw/ppc/xive_regs.h" 23 #include "trace.h" 24 25 /* 26 * XIVE Thread Interrupt Management context 27 */ 28 bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr) 29 { 30 switch (ring) { 31 case TM_QW1_OS: 32 return !!(nsr & TM_QW1_NSR_EO); 33 case TM_QW2_HV_POOL: 34 case TM_QW3_HV_PHYS: 35 return !!(nsr & TM_QW3_NSR_HE); 36 default: 37 g_assert_not_reached(); 38 } 39 } 40 41 bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr) 42 { 43 if ((nsr & TM_NSR_GRP_LVL) > 0) { 44 g_assert(xive_nsr_indicates_exception(ring, nsr)); 45 return true; 46 } 47 return false; 48 } 49 50 uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr) 51 { 52 /* NSR determines if pool/phys ring is for phys or pool interrupt */ 53 if ((ring == TM_QW3_HV_PHYS) || (ring == TM_QW2_HV_POOL)) { 54 uint8_t he = (nsr & TM_QW3_NSR_HE) >> 6; 55 56 if (he == TM_QW3_NSR_HE_PHYS) { 57 return TM_QW3_HV_PHYS; 58 } else if (he == TM_QW3_NSR_HE_POOL) { 59 return TM_QW2_HV_POOL; 60 } else { 61 /* Don't support LSI mode */ 62 g_assert_not_reached(); 63 } 64 } 65 return ring; 66 } 67 68 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) 69 { 70 switch (ring) { 71 case TM_QW0_USER: 72 return 0; /* Not supported */ 73 case TM_QW1_OS: 74 return tctx->os_output; 75 case TM_QW2_HV_POOL: 76 case TM_QW3_HV_PHYS: 77 return tctx->hv_output; 78 default: 79 return 0; 80 } 81 } 82 83 uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) 84 { 85 uint8_t *regs = &tctx->regs[ring]; 86 uint8_t nsr = regs[TM_NSR]; 87 88 qemu_irq_lower(xive_tctx_output(tctx, ring)); 89 90 if (xive_nsr_indicates_exception(ring, nsr)) { 91 uint8_t cppr = regs[TM_PIPR]; 92 uint8_t alt_ring; 93 uint8_t *alt_regs; 94 95 alt_ring = xive_nsr_exception_ring(ring, nsr); 96 alt_regs = &tctx->regs[alt_ring]; 97 98 regs[TM_CPPR] = cppr; 99 100 /* 101 * If the interrupt was for a specific VP, reset the pending 102 * buffer bit, otherwise clear the logical server indicator 103 */ 104 if (!xive_nsr_indicates_group_exception(ring, nsr)) { 105 alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); 106 } 107 108 /* Clear the exception from NSR */ 109 regs[TM_NSR] = 0; 110 111 trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, 112 alt_regs[TM_IPB], regs[TM_PIPR], 113 regs[TM_CPPR], regs[TM_NSR]); 114 } 115 116 return ((uint64_t)nsr << 8) | regs[TM_CPPR]; 117 } 118 119 void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level) 120 { 121 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ 122 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; 123 uint8_t *alt_regs = &tctx->regs[alt_ring]; 124 uint8_t *regs = &tctx->regs[ring]; 125 126 if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { 127 switch (ring) { 128 case TM_QW1_OS: 129 regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); 130 break; 131 case TM_QW2_HV_POOL: 132 alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); 133 break; 134 case TM_QW3_HV_PHYS: 135 regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); 136 break; 137 default: 138 g_assert_not_reached(); 139 } 140 trace_xive_tctx_notify(tctx->cs->cpu_index, ring, 141 regs[TM_IPB], alt_regs[TM_PIPR], 142 alt_regs[TM_CPPR], alt_regs[TM_NSR]); 143 qemu_irq_raise(xive_tctx_output(tctx, ring)); 144 } else { 145 alt_regs[TM_NSR] = 0; 146 qemu_irq_lower(xive_tctx_output(tctx, ring)); 147 } 148 } 149 150 void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring) 151 { 152 /* 153 * Lower the External interrupt. Used when pulling a context. It is 154 * necessary to avoid catching it in the higher privilege context. It 155 * should be raised again when re-pushing the lower privilege context. 156 */ 157 qemu_irq_lower(xive_tctx_output(tctx, ring)); 158 } 159 160 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) 161 { 162 uint8_t *regs = &tctx->regs[ring]; 163 uint8_t pipr_min; 164 uint8_t ring_min; 165 166 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, 167 regs[TM_IPB], regs[TM_PIPR], 168 cppr, regs[TM_NSR]); 169 170 if (cppr > XIVE_PRIORITY_MAX) { 171 cppr = 0xff; 172 } 173 174 tctx->regs[ring + TM_CPPR] = cppr; 175 176 /* 177 * Recompute the PIPR based on local pending interrupts. The PHYS 178 * ring must take the minimum of both the PHYS and POOL PIPR values. 179 */ 180 pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); 181 ring_min = ring; 182 183 /* PHYS updates also depend on POOL values */ 184 if (ring == TM_QW3_HV_PHYS) { 185 uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; 186 187 /* POOL values only matter if POOL ctx is valid */ 188 if (pool_regs[TM_WORD2] & 0x80) { 189 190 uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); 191 192 /* 193 * Determine highest priority interrupt and 194 * remember which ring has it. 195 */ 196 if (pool_pipr < pipr_min) { 197 pipr_min = pool_pipr; 198 ring_min = TM_QW2_HV_POOL; 199 } 200 } 201 } 202 203 regs[TM_PIPR] = pipr_min; 204 205 /* CPPR has changed, check if we need to raise a pending exception */ 206 xive_tctx_notify(tctx, ring_min, 0); 207 } 208 209 void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, 210 uint8_t group_level) 211 { 212 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ 213 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; 214 uint8_t *alt_regs = &tctx->regs[alt_ring]; 215 uint8_t *regs = &tctx->regs[ring]; 216 217 if (group_level == 0) { 218 /* VP-specific */ 219 regs[TM_IPB] |= xive_priority_to_ipb(priority); 220 alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); 221 } else { 222 /* VP-group */ 223 alt_regs[TM_PIPR] = xive_priority_to_pipr(priority); 224 } 225 xive_tctx_notify(tctx, ring, group_level); 226 } 227 228 void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority, 229 uint8_t group_level) 230 { 231 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ 232 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; 233 uint8_t *aregs = &tctx->regs[alt_ring]; 234 uint8_t *regs = &tctx->regs[ring]; 235 uint8_t pipr = xive_priority_to_pipr(priority); 236 237 if (group_level == 0) { 238 regs[TM_IPB] |= xive_priority_to_ipb(priority); 239 if (pipr >= aregs[TM_PIPR]) { 240 /* VP interrupts can come here with lower priority than PIPR */ 241 return; 242 } 243 } 244 g_assert(pipr <= xive_ipb_to_pipr(regs[TM_IPB])); 245 g_assert(pipr < aregs[TM_PIPR]); 246 aregs[TM_PIPR] = pipr; 247 xive_tctx_notify(tctx, ring, group_level); 248 } 249 250 /* 251 * XIVE Thread Interrupt Management Area (TIMA) 252 */ 253 254 static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, 255 hwaddr offset, uint64_t value, unsigned size) 256 { 257 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); 258 } 259 260 static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, 261 hwaddr offset, unsigned size) 262 { 263 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); 264 } 265 266 static void xive_pool_cam_decode(uint32_t cam, uint8_t *nvt_blk, 267 uint32_t *nvt_idx, bool *vp) 268 { 269 if (nvt_blk) { 270 *nvt_blk = xive_nvt_blk(cam); 271 } 272 if (nvt_idx) { 273 *nvt_idx = xive_nvt_idx(cam); 274 } 275 if (vp) { 276 *vp = !!(cam & TM_QW2W2_VP); 277 } 278 } 279 280 static uint32_t xive_tctx_get_pool_cam(XiveTCTX *tctx, uint8_t *nvt_blk, 281 uint32_t *nvt_idx, bool *vp) 282 { 283 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 284 uint32_t cam = be32_to_cpu(qw2w2); 285 286 xive_pool_cam_decode(cam, nvt_blk, nvt_idx, vp); 287 return qw2w2; 288 } 289 290 static void xive_tctx_set_pool_cam(XiveTCTX *tctx, uint32_t qw2w2) 291 { 292 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); 293 } 294 295 static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, 296 hwaddr offset, unsigned size) 297 { 298 uint32_t qw2w2; 299 uint32_t qw2w2_new; 300 uint8_t nvt_blk; 301 uint32_t nvt_idx; 302 bool vp; 303 304 qw2w2 = xive_tctx_get_pool_cam(tctx, &nvt_blk, &nvt_idx, &vp); 305 306 if (!vp) { 307 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid POOL NVT %x/%x !?\n", 308 nvt_blk, nvt_idx); 309 } 310 311 /* Invalidate CAM line */ 312 qw2w2_new = xive_set_field32(TM_QW2W2_VP, qw2w2, 0); 313 xive_tctx_set_pool_cam(tctx, qw2w2_new); 314 315 xive_tctx_reset_signal(tctx, TM_QW1_OS); 316 xive_tctx_reset_signal(tctx, TM_QW2_HV_POOL); 317 return qw2w2; 318 } 319 320 static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, 321 hwaddr offset, unsigned size) 322 { 323 uint8_t qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; 324 uint8_t qw3b8_new; 325 326 qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; 327 if (!(qw3b8 & TM_QW3B8_VT)) { 328 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid PHYS thread!?\n"); 329 } 330 qw3b8_new = qw3b8 & ~TM_QW3B8_VT; 331 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8_new; 332 333 xive_tctx_reset_signal(tctx, TM_QW1_OS); 334 xive_tctx_reset_signal(tctx, TM_QW3_HV_PHYS); 335 return qw3b8; 336 } 337 338 static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 339 uint64_t value, unsigned size) 340 { 341 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; 342 } 343 344 static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, 345 hwaddr offset, unsigned size) 346 { 347 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; 348 } 349 350 /* 351 * Define an access map for each page of the TIMA that we will use in 352 * the memory region ops to filter values when doing loads and stores 353 * of raw registers values 354 * 355 * Registers accessibility bits : 356 * 357 * 0x0 - no access 358 * 0x1 - write only 359 * 0x2 - read only 360 * 0x3 - read/write 361 */ 362 363 static const uint8_t xive_tm_hw_view[] = { 364 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 365 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 366 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 367 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ 368 }; 369 370 static const uint8_t xive_tm_hv_view[] = { 371 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 372 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 373 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 374 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ 375 }; 376 377 static const uint8_t xive_tm_os_view[] = { 378 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 379 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 380 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 381 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 382 }; 383 384 static const uint8_t xive_tm_user_view[] = { 385 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */ 386 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 387 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 388 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 389 }; 390 391 /* 392 * Overall TIMA access map for the thread interrupt management context 393 * registers 394 */ 395 static const uint8_t *xive_tm_views[] = { 396 [XIVE_TM_HW_PAGE] = xive_tm_hw_view, 397 [XIVE_TM_HV_PAGE] = xive_tm_hv_view, 398 [XIVE_TM_OS_PAGE] = xive_tm_os_view, 399 [XIVE_TM_USER_PAGE] = xive_tm_user_view, 400 }; 401 402 /* 403 * Computes a register access mask for a given offset in the TIMA 404 */ 405 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) 406 { 407 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 408 uint8_t reg_offset = offset & TM_REG_OFFSET; 409 uint8_t reg_mask = write ? 0x1 : 0x2; 410 uint64_t mask = 0x0; 411 int i; 412 413 for (i = 0; i < size; i++) { 414 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { 415 mask |= (uint64_t) 0xff << (8 * (size - i - 1)); 416 } 417 } 418 419 return mask; 420 } 421 422 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, 423 unsigned size) 424 { 425 uint8_t ring_offset = offset & TM_RING_OFFSET; 426 uint8_t reg_offset = offset & TM_REG_OFFSET; 427 uint64_t mask = xive_tm_mask(offset, size, true); 428 int i; 429 430 /* 431 * Only 4 or 8 bytes stores are allowed and the User ring is 432 * excluded 433 */ 434 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 435 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" 436 HWADDR_PRIx" size %d\n", offset, size); 437 return; 438 } 439 440 /* 441 * Use the register offset for the raw values and filter out 442 * reserved values 443 */ 444 for (i = 0; i < size; i++) { 445 uint8_t byte_mask = (mask >> (8 * (size - i - 1))); 446 if (byte_mask) { 447 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & 448 byte_mask; 449 } 450 } 451 } 452 453 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) 454 { 455 uint8_t ring_offset = offset & TM_RING_OFFSET; 456 uint8_t reg_offset = offset & TM_REG_OFFSET; 457 uint64_t mask = xive_tm_mask(offset, size, false); 458 uint64_t ret; 459 int i; 460 461 /* 462 * Only 4 or 8 bytes loads are allowed and the User ring is 463 * excluded 464 */ 465 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 466 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" 467 HWADDR_PRIx" size %d\n", offset, size); 468 return -1; 469 } 470 471 /* Use the register offset for the raw values */ 472 ret = 0; 473 for (i = 0; i < size; i++) { 474 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); 475 } 476 477 /* filter out reserved values */ 478 return ret & mask; 479 } 480 481 /* 482 * The TM context is mapped twice within each page. Stores and loads 483 * to the first mapping below 2K write and read the specified values 484 * without modification. The second mapping above 2K performs specific 485 * state changes (side effects) in addition to setting/returning the 486 * interrupt management area context of the processor thread. 487 */ 488 static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx, 489 hwaddr offset, unsigned size) 490 { 491 return xive_tctx_accept(tctx, TM_QW1_OS); 492 } 493 494 static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, 495 hwaddr offset, uint64_t value, unsigned size) 496 { 497 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); 498 } 499 500 static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs) 501 { 502 uint8_t *regs = &tctx->regs[ring]; 503 504 regs[TM_LGS] = lgs; 505 } 506 507 static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, 508 hwaddr offset, uint64_t value, unsigned size) 509 { 510 xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff); 511 } 512 513 /* 514 * Adjust the PIPR to allow a CPU to process event queues of other 515 * priorities during one physical interrupt cycle. 516 */ 517 static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, 518 hwaddr offset, uint64_t value, unsigned size) 519 { 520 xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0); 521 } 522 523 static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, 524 uint32_t *nvt_idx, bool *vo) 525 { 526 if (nvt_blk) { 527 *nvt_blk = xive_nvt_blk(cam); 528 } 529 if (nvt_idx) { 530 *nvt_idx = xive_nvt_idx(cam); 531 } 532 if (vo) { 533 *vo = !!(cam & TM_QW1W2_VO); 534 } 535 } 536 537 static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk, 538 uint32_t *nvt_idx, bool *vo) 539 { 540 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 541 uint32_t cam = be32_to_cpu(qw1w2); 542 543 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo); 544 return qw1w2; 545 } 546 547 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2) 548 { 549 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 550 } 551 552 static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 553 hwaddr offset, unsigned size) 554 { 555 uint32_t qw1w2; 556 uint32_t qw1w2_new; 557 uint8_t nvt_blk; 558 uint32_t nvt_idx; 559 bool vo; 560 561 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); 562 563 if (!vo) { 564 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid OS NVT %x/%x !?\n", 565 nvt_blk, nvt_idx); 566 } 567 568 /* Invalidate CAM line */ 569 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); 570 xive_tctx_set_os_cam(tctx, qw1w2_new); 571 572 xive_tctx_reset_signal(tctx, TM_QW1_OS); 573 return qw1w2; 574 } 575 576 static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, 577 uint8_t nvt_blk, uint32_t nvt_idx) 578 { 579 XiveNVT nvt; 580 uint8_t ipb; 581 582 /* 583 * Grab the associated NVT to pull the pending bits, and merge 584 * them with the IPB of the thread interrupt context registers 585 */ 586 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { 587 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n", 588 nvt_blk, nvt_idx); 589 return; 590 } 591 592 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4); 593 594 if (ipb) { 595 /* Reset the NVT value */ 596 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); 597 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); 598 599 uint8_t *regs = &tctx->regs[TM_QW1_OS]; 600 regs[TM_IPB] |= ipb; 601 } 602 603 /* 604 * Always call xive_tctx_pipr_update(). Even if there were no 605 * escalation triggered, there could be a pending interrupt which 606 * was saved when the context was pulled and that we need to take 607 * into account by recalculating the PIPR (which is not 608 * saved/restored). 609 * It will also raise the External interrupt signal if needed. 610 */ 611 xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */ 612 } 613 614 /* 615 * Updating the OS CAM line can trigger a resend of interrupt 616 */ 617 static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 618 hwaddr offset, uint64_t value, unsigned size) 619 { 620 uint32_t cam = value; 621 uint32_t qw1w2 = cpu_to_be32(cam); 622 uint8_t nvt_blk; 623 uint32_t nvt_idx; 624 bool vo; 625 626 xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo); 627 628 /* First update the registers */ 629 xive_tctx_set_os_cam(tctx, qw1w2); 630 631 /* Check the interrupt pending bits */ 632 if (vo) { 633 xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); 634 } 635 } 636 637 static uint32_t xive_presenter_get_config(XivePresenter *xptr) 638 { 639 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); 640 641 return xpc->get_config(xptr); 642 } 643 644 /* 645 * Define a mapping of "special" operations depending on the TIMA page 646 * offset and the size of the operation. 647 */ 648 typedef struct XiveTmOp { 649 uint8_t page_offset; 650 uint32_t op_offset; 651 unsigned size; 652 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, 653 hwaddr offset, 654 uint64_t value, unsigned size); 655 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 656 unsigned size); 657 } XiveTmOp; 658 659 static const XiveTmOp xive_tm_operations[] = { 660 /* 661 * MMIOs below 2K : raw values and special operations without side 662 * effects 663 */ 664 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, 665 NULL }, 666 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, 667 NULL }, 668 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, 669 NULL }, 670 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, 671 NULL }, 672 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, 673 xive_tm_vt_poll }, 674 675 /* MMIOs above 2K : special operations with side effects */ 676 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, 677 xive_tm_ack_os_reg }, 678 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, 679 NULL }, 680 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, 681 xive_tm_pull_os_ctx }, 682 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, 683 xive_tm_pull_os_ctx }, 684 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, 685 xive_tm_ack_hv_reg }, 686 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, 687 xive_tm_pull_pool_ctx }, 688 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, 689 xive_tm_pull_pool_ctx }, 690 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, 691 xive_tm_pull_phys_ctx }, 692 }; 693 694 static const XiveTmOp xive2_tm_operations[] = { 695 /* 696 * MMIOs below 2K : raw values and special operations without side 697 * effects 698 */ 699 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr, 700 NULL }, 701 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, 702 NULL }, 703 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx, 704 NULL }, 705 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, 706 NULL }, 707 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr, 708 NULL }, 709 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, 710 NULL }, 711 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, 712 xive_tm_vt_poll }, 713 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target, 714 NULL }, 715 716 /* MMIOs above 2K : special operations with side effects */ 717 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, 718 xive_tm_ack_os_reg }, 719 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, 720 NULL }, 721 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL, 722 xive2_tm_pull_os_ctx }, 723 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, 724 xive2_tm_pull_os_ctx }, 725 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, 726 xive2_tm_pull_os_ctx }, 727 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, 728 xive_tm_ack_hv_reg }, 729 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL, 730 xive2_tm_pull_pool_ctx }, 731 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, 732 xive2_tm_pull_pool_ctx }, 733 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, 734 xive2_tm_pull_pool_ctx }, 735 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol, 736 NULL }, 737 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL, 738 xive2_tm_pull_phys_ctx }, 739 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, 740 xive2_tm_pull_phys_ctx }, 741 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol, 742 NULL }, 743 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, xive2_tm_ack_os_el, 744 NULL }, 745 }; 746 747 static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, 748 unsigned size, bool write) 749 { 750 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 751 uint32_t op_offset = offset & TM_ADDRESS_MASK; 752 const XiveTmOp *tm_ops; 753 int i, tm_ops_count; 754 uint32_t cfg; 755 756 cfg = xive_presenter_get_config(xptr); 757 if (cfg & XIVE_PRESENTER_GEN1_TIMA_OS) { 758 tm_ops = xive_tm_operations; 759 tm_ops_count = ARRAY_SIZE(xive_tm_operations); 760 } else { 761 tm_ops = xive2_tm_operations; 762 tm_ops_count = ARRAY_SIZE(xive2_tm_operations); 763 } 764 765 for (i = 0; i < tm_ops_count; i++) { 766 const XiveTmOp *xto = &tm_ops[i]; 767 768 /* Accesses done from a more privileged TIMA page is allowed */ 769 if (xto->page_offset >= page_offset && 770 xto->op_offset == op_offset && 771 xto->size == size && 772 ((write && xto->write_handler) || (!write && xto->read_handler))) { 773 return xto; 774 } 775 } 776 return NULL; 777 } 778 779 /* 780 * TIMA MMIO handlers 781 */ 782 void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 783 uint64_t value, unsigned size) 784 { 785 const XiveTmOp *xto; 786 787 trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value); 788 789 /* 790 * TODO: check V bit in Q[0-3]W2 791 */ 792 793 /* 794 * First, check for special operations in the 2K region 795 */ 796 if (offset & TM_SPECIAL_OP) { 797 xto = xive_tm_find_op(tctx->xptr, offset, size, true); 798 if (!xto) { 799 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " 800 "@%"HWADDR_PRIx" size %d\n", offset, size); 801 } else { 802 xto->write_handler(xptr, tctx, offset, value, size); 803 } 804 return; 805 } 806 807 /* 808 * Then, for special operations in the region below 2K. 809 */ 810 xto = xive_tm_find_op(tctx->xptr, offset, size, true); 811 if (xto) { 812 xto->write_handler(xptr, tctx, offset, value, size); 813 return; 814 } 815 816 /* 817 * Finish with raw access to the register values 818 */ 819 xive_tm_raw_write(tctx, offset, value, size); 820 } 821 822 uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 823 unsigned size) 824 { 825 const XiveTmOp *xto; 826 uint64_t ret; 827 828 /* 829 * TODO: check V bit in Q[0-3]W2 830 */ 831 832 /* 833 * First, check for special operations in the 2K region 834 */ 835 if (offset & TM_SPECIAL_OP) { 836 xto = xive_tm_find_op(tctx->xptr, offset, size, false); 837 if (!xto) { 838 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" 839 "@%"HWADDR_PRIx" size %d\n", offset, size); 840 return -1; 841 } 842 ret = xto->read_handler(xptr, tctx, offset, size); 843 goto out; 844 } 845 846 /* 847 * Then, for special operations in the region below 2K. 848 */ 849 xto = xive_tm_find_op(tctx->xptr, offset, size, false); 850 if (xto) { 851 ret = xto->read_handler(xptr, tctx, offset, size); 852 goto out; 853 } 854 855 /* 856 * Finish with raw access to the register values 857 */ 858 ret = xive_tm_raw_read(tctx, offset, size); 859 out: 860 trace_xive_tctx_tm_read(tctx->cs->cpu_index, offset, size, ret); 861 return ret; 862 } 863 864 static char *xive_tctx_ring_print(uint8_t *ring) 865 { 866 uint32_t w2 = xive_tctx_word2(ring); 867 868 return g_strdup_printf("%02x %02x %02x %02x %02x " 869 "%02x %02x %02x %08x", 870 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], 871 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], 872 be32_to_cpu(w2)); 873 } 874 875 static const char * const xive_tctx_ring_names[] = { 876 "USER", "OS", "POOL", "PHYS", 877 }; 878 879 /* 880 * kvm_irqchip_in_kernel() will cause the compiler to turn this 881 * info a nop if CONFIG_KVM isn't defined. 882 */ 883 #define xive_in_kernel(xptr) \ 884 (kvm_irqchip_in_kernel() && \ 885 ({ \ 886 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \ 887 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \ 888 })) 889 890 void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf) 891 { 892 int cpu_index; 893 int i; 894 895 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs 896 * are hot plugged or unplugged. 897 */ 898 if (!tctx) { 899 return; 900 } 901 902 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; 903 904 if (xive_in_kernel(tctx->xptr)) { 905 Error *local_err = NULL; 906 907 kvmppc_xive_cpu_synchronize_state(tctx, &local_err); 908 if (local_err) { 909 error_report_err(local_err); 910 return; 911 } 912 } 913 914 if (xive_presenter_get_config(tctx->xptr) & XIVE_PRESENTER_GEN1_TIMA_OS) { 915 g_string_append_printf(buf, "CPU[%04x]: " 916 "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" 917 " W2\n", cpu_index); 918 } else { 919 g_string_append_printf(buf, "CPU[%04x]: " 920 "QW NSR CPPR IPB LSMFB - LGS T PIPR" 921 " W2\n", cpu_index); 922 } 923 924 for (i = 0; i < XIVE_TM_RING_COUNT; i++) { 925 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); 926 g_string_append_printf(buf, "CPU[%04x]: %4s %s\n", 927 cpu_index, xive_tctx_ring_names[i], s); 928 g_free(s); 929 } 930 } 931 932 void xive_tctx_reset(XiveTCTX *tctx) 933 { 934 memset(tctx->regs, 0, sizeof(tctx->regs)); 935 936 /* Set some defaults */ 937 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; 938 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; 939 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; 940 if (!(xive_presenter_get_config(tctx->xptr) & 941 XIVE_PRESENTER_GEN1_TIMA_OS)) { 942 tctx->regs[TM_QW1_OS + TM_OGEN] = 2; 943 } 944 945 /* 946 * Initialize PIPR to 0xFF to avoid phantom interrupts when the 947 * CPPR is first set. 948 */ 949 tctx->regs[TM_QW1_OS + TM_PIPR] = 950 xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); 951 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = 952 xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); 953 } 954 955 static void xive_tctx_realize(DeviceState *dev, Error **errp) 956 { 957 XiveTCTX *tctx = XIVE_TCTX(dev); 958 PowerPCCPU *cpu; 959 CPUPPCState *env; 960 961 assert(tctx->cs); 962 assert(tctx->xptr); 963 964 cpu = POWERPC_CPU(tctx->cs); 965 env = &cpu->env; 966 switch (PPC_INPUT(env)) { 967 case PPC_FLAGS_INPUT_POWER9: 968 tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT); 969 tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); 970 break; 971 972 default: 973 error_setg(errp, "XIVE interrupt controller does not support " 974 "this CPU bus model"); 975 return; 976 } 977 978 /* Connect the presenter to the VCPU (required for CPU hotplug) */ 979 if (xive_in_kernel(tctx->xptr)) { 980 if (kvmppc_xive_cpu_connect(tctx, errp) < 0) { 981 return; 982 } 983 } 984 } 985 986 static int vmstate_xive_tctx_pre_save(void *opaque) 987 { 988 XiveTCTX *tctx = XIVE_TCTX(opaque); 989 Error *local_err = NULL; 990 int ret; 991 992 if (xive_in_kernel(tctx->xptr)) { 993 ret = kvmppc_xive_cpu_get_state(tctx, &local_err); 994 if (ret < 0) { 995 error_report_err(local_err); 996 return ret; 997 } 998 } 999 1000 return 0; 1001 } 1002 1003 static int vmstate_xive_tctx_post_load(void *opaque, int version_id) 1004 { 1005 XiveTCTX *tctx = XIVE_TCTX(opaque); 1006 Error *local_err = NULL; 1007 int ret; 1008 1009 if (xive_in_kernel(tctx->xptr)) { 1010 /* 1011 * Required for hotplugged CPU, for which the state comes 1012 * after all states of the machine. 1013 */ 1014 ret = kvmppc_xive_cpu_set_state(tctx, &local_err); 1015 if (ret < 0) { 1016 error_report_err(local_err); 1017 return ret; 1018 } 1019 } 1020 1021 return 0; 1022 } 1023 1024 static const VMStateDescription vmstate_xive_tctx = { 1025 .name = TYPE_XIVE_TCTX, 1026 .version_id = 1, 1027 .minimum_version_id = 1, 1028 .pre_save = vmstate_xive_tctx_pre_save, 1029 .post_load = vmstate_xive_tctx_post_load, 1030 .fields = (const VMStateField[]) { 1031 VMSTATE_BUFFER(regs, XiveTCTX), 1032 VMSTATE_END_OF_LIST() 1033 }, 1034 }; 1035 1036 static const Property xive_tctx_properties[] = { 1037 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *), 1038 DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER, 1039 XivePresenter *), 1040 }; 1041 1042 static void xive_tctx_class_init(ObjectClass *klass, const void *data) 1043 { 1044 DeviceClass *dc = DEVICE_CLASS(klass); 1045 1046 dc->desc = "XIVE Interrupt Thread Context"; 1047 dc->realize = xive_tctx_realize; 1048 dc->vmsd = &vmstate_xive_tctx; 1049 device_class_set_props(dc, xive_tctx_properties); 1050 /* 1051 * Reason: part of XIVE interrupt controller, needs to be wired up 1052 * by xive_tctx_create(). 1053 */ 1054 dc->user_creatable = false; 1055 } 1056 1057 static const TypeInfo xive_tctx_info = { 1058 .name = TYPE_XIVE_TCTX, 1059 .parent = TYPE_DEVICE, 1060 .instance_size = sizeof(XiveTCTX), 1061 .class_init = xive_tctx_class_init, 1062 }; 1063 1064 Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp) 1065 { 1066 Object *obj; 1067 1068 obj = object_new(TYPE_XIVE_TCTX); 1069 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj); 1070 object_unref(obj); 1071 object_property_set_link(obj, "cpu", cpu, &error_abort); 1072 object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort); 1073 if (!qdev_realize(DEVICE(obj), NULL, errp)) { 1074 object_unparent(obj); 1075 return NULL; 1076 } 1077 return obj; 1078 } 1079 1080 void xive_tctx_destroy(XiveTCTX *tctx) 1081 { 1082 Object *obj = OBJECT(tctx); 1083 1084 object_unparent(obj); 1085 } 1086 1087 /* 1088 * XIVE ESB helpers 1089 */ 1090 1091 uint8_t xive_esb_set(uint8_t *pq, uint8_t value) 1092 { 1093 uint8_t old_pq = *pq & 0x3; 1094 1095 *pq &= ~0x3; 1096 *pq |= value & 0x3; 1097 1098 return old_pq; 1099 } 1100 1101 bool xive_esb_trigger(uint8_t *pq) 1102 { 1103 uint8_t old_pq = *pq & 0x3; 1104 1105 switch (old_pq) { 1106 case XIVE_ESB_RESET: 1107 xive_esb_set(pq, XIVE_ESB_PENDING); 1108 return true; 1109 case XIVE_ESB_PENDING: 1110 case XIVE_ESB_QUEUED: 1111 xive_esb_set(pq, XIVE_ESB_QUEUED); 1112 return false; 1113 case XIVE_ESB_OFF: 1114 xive_esb_set(pq, XIVE_ESB_OFF); 1115 return false; 1116 default: 1117 g_assert_not_reached(); 1118 } 1119 } 1120 1121 bool xive_esb_eoi(uint8_t *pq) 1122 { 1123 uint8_t old_pq = *pq & 0x3; 1124 1125 switch (old_pq) { 1126 case XIVE_ESB_RESET: 1127 case XIVE_ESB_PENDING: 1128 xive_esb_set(pq, XIVE_ESB_RESET); 1129 return false; 1130 case XIVE_ESB_QUEUED: 1131 xive_esb_set(pq, XIVE_ESB_PENDING); 1132 return true; 1133 case XIVE_ESB_OFF: 1134 xive_esb_set(pq, XIVE_ESB_OFF); 1135 return false; 1136 default: 1137 g_assert_not_reached(); 1138 } 1139 } 1140 1141 /* 1142 * XIVE Interrupt Source (or IVSE) 1143 */ 1144 1145 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) 1146 { 1147 assert(srcno < xsrc->nr_irqs); 1148 1149 return xsrc->status[srcno] & 0x3; 1150 } 1151 1152 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) 1153 { 1154 assert(srcno < xsrc->nr_irqs); 1155 1156 return xive_esb_set(&xsrc->status[srcno], pq); 1157 } 1158 1159 /* 1160 * Returns whether the event notification should be forwarded. 1161 */ 1162 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) 1163 { 1164 uint8_t old_pq = xive_source_esb_get(xsrc, srcno); 1165 1166 xive_source_set_asserted(xsrc, srcno, true); 1167 1168 switch (old_pq) { 1169 case XIVE_ESB_RESET: 1170 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); 1171 return true; 1172 default: 1173 return false; 1174 } 1175 } 1176 1177 /* 1178 * Sources can be configured with PQ offloading in which case the check 1179 * on the PQ state bits of MSIs is disabled 1180 */ 1181 static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno) 1182 { 1183 return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) && 1184 !xive_source_irq_is_lsi(xsrc, srcno); 1185 } 1186 1187 /* 1188 * Returns whether the event notification should be forwarded. 1189 */ 1190 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) 1191 { 1192 bool ret; 1193 1194 assert(srcno < xsrc->nr_irqs); 1195 1196 if (xive_source_esb_disabled(xsrc, srcno)) { 1197 return true; 1198 } 1199 1200 ret = xive_esb_trigger(&xsrc->status[srcno]); 1201 1202 if (xive_source_irq_is_lsi(xsrc, srcno) && 1203 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { 1204 qemu_log_mask(LOG_GUEST_ERROR, 1205 "XIVE: queued an event on LSI IRQ %d\n", srcno); 1206 } 1207 1208 return ret; 1209 } 1210 1211 /* 1212 * Returns whether the event notification should be forwarded. 1213 */ 1214 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) 1215 { 1216 bool ret; 1217 1218 assert(srcno < xsrc->nr_irqs); 1219 1220 if (xive_source_esb_disabled(xsrc, srcno)) { 1221 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno); 1222 return false; 1223 } 1224 1225 ret = xive_esb_eoi(&xsrc->status[srcno]); 1226 1227 /* 1228 * LSI sources do not set the Q bit but they can still be 1229 * asserted, in which case we should forward a new event 1230 * notification 1231 */ 1232 if (xive_source_irq_is_lsi(xsrc, srcno) && 1233 xive_source_is_asserted(xsrc, srcno)) { 1234 ret = xive_source_lsi_trigger(xsrc, srcno); 1235 } 1236 1237 return ret; 1238 } 1239 1240 /* 1241 * Forward the source event notification to the Router 1242 */ 1243 static void xive_source_notify(XiveSource *xsrc, int srcno) 1244 { 1245 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); 1246 bool pq_checked = !xive_source_esb_disabled(xsrc, srcno); 1247 1248 if (xnc->notify) { 1249 xnc->notify(xsrc->xive, srcno, pq_checked); 1250 } 1251 } 1252 1253 /* 1254 * In a two pages ESB MMIO setting, even page is the trigger page, odd 1255 * page is for management 1256 */ 1257 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 1258 { 1259 return !((addr >> shift) & 1); 1260 } 1261 1262 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) 1263 { 1264 return xive_source_esb_has_2page(xsrc) && 1265 addr_is_even(addr, xsrc->esb_shift - 1); 1266 } 1267 1268 /* 1269 * ESB MMIO loads 1270 * Trigger page Management/EOI page 1271 * 1272 * ESB MMIO setting 2 pages 1 or 2 pages 1273 * 1274 * 0x000 .. 0x3FF -1 EOI and return 0|1 1275 * 0x400 .. 0x7FF -1 EOI and return 0|1 1276 * 0x800 .. 0xBFF -1 return PQ 1277 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 1278 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 1279 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 1280 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 1281 */ 1282 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) 1283 { 1284 XiveSource *xsrc = XIVE_SOURCE(opaque); 1285 uint32_t offset = addr & 0xFFF; 1286 uint32_t srcno = addr >> xsrc->esb_shift; 1287 uint64_t ret = -1; 1288 1289 /* In a two pages ESB MMIO setting, trigger page should not be read */ 1290 if (xive_source_is_trigger_page(xsrc, addr)) { 1291 qemu_log_mask(LOG_GUEST_ERROR, 1292 "XIVE: invalid load on IRQ %d trigger page at " 1293 "0x%"HWADDR_PRIx"\n", srcno, addr); 1294 return -1; 1295 } 1296 1297 switch (offset) { 1298 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 1299 ret = xive_source_esb_eoi(xsrc, srcno); 1300 1301 /* Forward the source event notification for routing */ 1302 if (ret) { 1303 trace_xive_source_notify(srcno); 1304 xive_source_notify(xsrc, srcno); 1305 } 1306 break; 1307 1308 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 1309 ret = xive_source_esb_get(xsrc, srcno); 1310 break; 1311 1312 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1313 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1314 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1315 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1316 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 1317 break; 1318 default: 1319 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n", 1320 offset); 1321 } 1322 1323 trace_xive_source_esb_read(addr, srcno, ret); 1324 1325 return ret; 1326 } 1327 1328 /* 1329 * ESB MMIO stores 1330 * Trigger page Management/EOI page 1331 * 1332 * ESB MMIO setting 2 pages 1 or 2 pages 1333 * 1334 * 0x000 .. 0x3FF Trigger Trigger 1335 * 0x400 .. 0x7FF Trigger EOI 1336 * 0x800 .. 0xBFF Trigger undefined 1337 * 0xC00 .. 0xCFF Trigger PQ=00 1338 * 0xD00 .. 0xDFF Trigger PQ=01 1339 * 0xE00 .. 0xDFF Trigger PQ=10 1340 * 0xF00 .. 0xDFF Trigger PQ=11 1341 */ 1342 static void xive_source_esb_write(void *opaque, hwaddr addr, 1343 uint64_t value, unsigned size) 1344 { 1345 XiveSource *xsrc = XIVE_SOURCE(opaque); 1346 uint32_t offset = addr & 0xFFF; 1347 uint32_t srcno = addr >> xsrc->esb_shift; 1348 bool notify = false; 1349 1350 trace_xive_source_esb_write(addr, srcno, value); 1351 1352 /* In a two pages ESB MMIO setting, trigger page only triggers */ 1353 if (xive_source_is_trigger_page(xsrc, addr)) { 1354 notify = xive_source_esb_trigger(xsrc, srcno); 1355 goto out; 1356 } 1357 1358 switch (offset) { 1359 case 0 ... 0x3FF: 1360 notify = xive_source_esb_trigger(xsrc, srcno); 1361 break; 1362 1363 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 1364 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { 1365 qemu_log_mask(LOG_GUEST_ERROR, 1366 "XIVE: invalid Store EOI for IRQ %d\n", srcno); 1367 return; 1368 } 1369 1370 notify = xive_source_esb_eoi(xsrc, srcno); 1371 break; 1372 1373 /* 1374 * This is an internal offset used to inject triggers when the PQ 1375 * state bits are not controlled locally. Such as for LSIs when 1376 * under ABT mode. 1377 */ 1378 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: 1379 notify = true; 1380 break; 1381 1382 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1383 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1384 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1385 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1386 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 1387 break; 1388 1389 default: 1390 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n", 1391 offset); 1392 return; 1393 } 1394 1395 out: 1396 /* Forward the source event notification for routing */ 1397 if (notify) { 1398 xive_source_notify(xsrc, srcno); 1399 } else { 1400 trace_xive_source_blocked(srcno); 1401 } 1402 } 1403 1404 static const MemoryRegionOps xive_source_esb_ops = { 1405 .read = xive_source_esb_read, 1406 .write = xive_source_esb_write, 1407 .endianness = DEVICE_BIG_ENDIAN, 1408 .valid = { 1409 .min_access_size = 1, 1410 .max_access_size = 8, 1411 }, 1412 .impl = { 1413 .min_access_size = 1, 1414 .max_access_size = 8, 1415 }, 1416 }; 1417 1418 void xive_source_set_irq(void *opaque, int srcno, int val) 1419 { 1420 XiveSource *xsrc = XIVE_SOURCE(opaque); 1421 bool notify = false; 1422 1423 if (xive_source_irq_is_lsi(xsrc, srcno)) { 1424 if (val) { 1425 notify = xive_source_lsi_trigger(xsrc, srcno); 1426 } else { 1427 xive_source_set_asserted(xsrc, srcno, false); 1428 } 1429 } else { 1430 if (val) { 1431 notify = xive_source_esb_trigger(xsrc, srcno); 1432 } 1433 } 1434 1435 /* Forward the source event notification for routing */ 1436 if (notify) { 1437 xive_source_notify(xsrc, srcno); 1438 } 1439 } 1440 1441 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, GString *buf) 1442 { 1443 for (unsigned i = 0; i < xsrc->nr_irqs; i++) { 1444 uint8_t pq = xive_source_esb_get(xsrc, i); 1445 1446 if (pq == XIVE_ESB_OFF) { 1447 continue; 1448 } 1449 1450 g_string_append_printf(buf, " %08x %s %c%c%c\n", i + offset, 1451 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 1452 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1453 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1454 xive_source_is_asserted(xsrc, i) ? 'A' : ' '); 1455 } 1456 } 1457 1458 static void xive_source_reset(void *dev) 1459 { 1460 XiveSource *xsrc = XIVE_SOURCE(dev); 1461 1462 /* Do not clear the LSI bitmap */ 1463 1464 memset(xsrc->status, xsrc->reset_pq, xsrc->nr_irqs); 1465 } 1466 1467 static void xive_source_realize(DeviceState *dev, Error **errp) 1468 { 1469 XiveSource *xsrc = XIVE_SOURCE(dev); 1470 uint64_t esb_len = xive_source_esb_len(xsrc); 1471 1472 assert(xsrc->xive); 1473 1474 if (!xsrc->nr_irqs) { 1475 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1476 return; 1477 } 1478 1479 if (xsrc->esb_shift != XIVE_ESB_4K && 1480 xsrc->esb_shift != XIVE_ESB_4K_2PAGE && 1481 xsrc->esb_shift != XIVE_ESB_64K && 1482 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { 1483 error_setg(errp, "Invalid ESB shift setting"); 1484 return; 1485 } 1486 1487 xsrc->status = g_malloc0(xsrc->nr_irqs); 1488 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); 1489 1490 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len); 1491 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc), 1492 &xive_source_esb_ops, xsrc, "xive.esb-emulated", 1493 esb_len); 1494 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated); 1495 1496 qemu_register_reset(xive_source_reset, dev); 1497 } 1498 1499 static const VMStateDescription vmstate_xive_source = { 1500 .name = TYPE_XIVE_SOURCE, 1501 .version_id = 1, 1502 .minimum_version_id = 1, 1503 .fields = (const VMStateField[]) { 1504 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), 1505 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), 1506 VMSTATE_END_OF_LIST() 1507 }, 1508 }; 1509 1510 /* 1511 * The default XIVE interrupt source setting for the ESB MMIOs is two 1512 * 64k pages without Store EOI, to be in sync with KVM. 1513 */ 1514 static const Property xive_source_properties[] = { 1515 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), 1516 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), 1517 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), 1518 /* 1519 * By default, PQs are initialized to 0b01 (Q=1) which corresponds 1520 * to "ints off" 1521 */ 1522 DEFINE_PROP_UINT8("reset-pq", XiveSource, reset_pq, XIVE_ESB_OFF), 1523 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER, 1524 XiveNotifier *), 1525 }; 1526 1527 static void xive_source_class_init(ObjectClass *klass, const void *data) 1528 { 1529 DeviceClass *dc = DEVICE_CLASS(klass); 1530 1531 dc->desc = "XIVE Interrupt Source"; 1532 device_class_set_props(dc, xive_source_properties); 1533 dc->realize = xive_source_realize; 1534 dc->vmsd = &vmstate_xive_source; 1535 /* 1536 * Reason: part of XIVE interrupt controller, needs to be wired up, 1537 * e.g. by spapr_xive_instance_init(). 1538 */ 1539 dc->user_creatable = false; 1540 } 1541 1542 static const TypeInfo xive_source_info = { 1543 .name = TYPE_XIVE_SOURCE, 1544 .parent = TYPE_DEVICE, 1545 .instance_size = sizeof(XiveSource), 1546 .class_init = xive_source_class_init, 1547 }; 1548 1549 /* 1550 * XiveEND helpers 1551 */ 1552 1553 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, GString *buf) 1554 { 1555 uint64_t qaddr_base = xive_end_qaddr(end); 1556 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1557 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1558 uint32_t qentries = 1 << (qsize + 10); 1559 int i; 1560 1561 /* 1562 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 1563 */ 1564 g_string_append_printf(buf, " [ "); 1565 qindex = (qindex - (width - 1)) & (qentries - 1); 1566 for (i = 0; i < width; i++) { 1567 uint64_t qaddr = qaddr_base + (qindex << 2); 1568 uint32_t qdata = -1; 1569 1570 if (dma_memory_read(&address_space_memory, qaddr, 1571 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 1572 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 1573 HWADDR_PRIx "\n", qaddr); 1574 return; 1575 } 1576 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "", 1577 be32_to_cpu(qdata)); 1578 qindex = (qindex + 1) & (qentries - 1); 1579 } 1580 g_string_append_c(buf, ']'); 1581 } 1582 1583 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf) 1584 { 1585 uint64_t qaddr_base = xive_end_qaddr(end); 1586 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1587 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1588 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1589 uint32_t qentries = 1 << (qsize + 10); 1590 1591 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); 1592 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); 1593 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 1594 uint8_t pq; 1595 1596 if (!xive_end_is_valid(end)) { 1597 return; 1598 } 1599 1600 pq = xive_get_field32(END_W1_ESn, end->w1); 1601 1602 g_string_append_printf(buf, 1603 " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x", 1604 end_idx, 1605 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1606 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1607 xive_end_is_valid(end) ? 'v' : '-', 1608 xive_end_is_enqueue(end) ? 'q' : '-', 1609 xive_end_is_notify(end) ? 'n' : '-', 1610 xive_end_is_backlog(end) ? 'b' : '-', 1611 xive_end_is_escalate(end) ? 'e' : '-', 1612 xive_end_is_uncond_escalation(end) ? 'u' : '-', 1613 xive_end_is_silent_escalation(end) ? 's' : '-', 1614 xive_end_is_firmware(end) ? 'f' : '-', 1615 priority, nvt_blk, nvt_idx); 1616 1617 if (qaddr_base) { 1618 g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", 1619 qaddr_base, qindex, qentries, qgen); 1620 xive_end_queue_pic_print_info(end, 6, buf); 1621 } 1622 g_string_append_c(buf, '\n'); 1623 } 1624 1625 static void xive_end_enqueue(XiveEND *end, uint32_t data) 1626 { 1627 uint64_t qaddr_base = xive_end_qaddr(end); 1628 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1629 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1630 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1631 1632 uint64_t qaddr = qaddr_base + (qindex << 2); 1633 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 1634 uint32_t qentries = 1 << (qsize + 10); 1635 1636 if (dma_memory_write(&address_space_memory, qaddr, 1637 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 1638 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 1639 HWADDR_PRIx "\n", qaddr); 1640 return; 1641 } 1642 1643 qindex = (qindex + 1) & (qentries - 1); 1644 if (qindex == 0) { 1645 qgen ^= 1; 1646 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); 1647 } 1648 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); 1649 } 1650 1651 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf) 1652 { 1653 XiveEAS *eas = (XiveEAS *) &end->w4; 1654 uint8_t pq; 1655 1656 if (!xive_end_is_escalate(end)) { 1657 return; 1658 } 1659 1660 pq = xive_get_field32(END_W1_ESe, end->w1); 1661 1662 g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", 1663 end_idx, 1664 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1665 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1666 xive_eas_is_valid(eas) ? 'V' : ' ', 1667 xive_eas_is_masked(eas) ? 'M' : ' ', 1668 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), 1669 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), 1670 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); 1671 } 1672 1673 /* 1674 * XIVE Router (aka. Virtualization Controller or IVRE) 1675 */ 1676 1677 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1678 XiveEAS *eas) 1679 { 1680 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1681 1682 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 1683 } 1684 1685 static 1686 int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1687 uint8_t *pq) 1688 { 1689 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1690 1691 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); 1692 } 1693 1694 static 1695 int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1696 uint8_t *pq) 1697 { 1698 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1699 1700 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); 1701 } 1702 1703 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1704 XiveEND *end) 1705 { 1706 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1707 1708 return xrc->get_end(xrtr, end_blk, end_idx, end); 1709 } 1710 1711 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1712 XiveEND *end, uint8_t word_number) 1713 { 1714 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1715 1716 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 1717 } 1718 1719 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1720 XiveNVT *nvt) 1721 { 1722 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1723 1724 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); 1725 } 1726 1727 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1728 XiveNVT *nvt, uint8_t word_number) 1729 { 1730 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1731 1732 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); 1733 } 1734 1735 static int xive_router_get_block_id(XiveRouter *xrtr) 1736 { 1737 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1738 1739 return xrc->get_block_id(xrtr); 1740 } 1741 1742 static void xive_router_realize(DeviceState *dev, Error **errp) 1743 { 1744 XiveRouter *xrtr = XIVE_ROUTER(dev); 1745 1746 assert(xrtr->xfb); 1747 } 1748 1749 static void xive_router_end_notify_handler(XiveRouter *xrtr, XiveEAS *eas) 1750 { 1751 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1752 1753 return xrc->end_notify(xrtr, eas); 1754 } 1755 1756 /* 1757 * Encode the HW CAM line in the block group mode format : 1758 * 1759 * chip << 19 | 0000000 0 0001 thread (7Bit) 1760 */ 1761 static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) 1762 { 1763 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 1764 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 1765 uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr)); 1766 1767 return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); 1768 } 1769 1770 uint32_t xive_get_vpgroup_size(uint32_t nvp_index) 1771 { 1772 /* 1773 * Group size is a power of 2. The position of the first 0 1774 * (starting with the least significant bits) in the NVP index 1775 * gives the size of the group. 1776 */ 1777 int first_zero = cto32(nvp_index); 1778 if (first_zero >= 31) { 1779 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x", 1780 nvp_index); 1781 return 0; 1782 } 1783 1784 return 1U << (first_zero + 1); 1785 } 1786 1787 uint8_t xive_get_group_level(bool crowd, bool ignore, 1788 uint32_t nvp_blk, uint32_t nvp_index) 1789 { 1790 int first_zero; 1791 uint8_t level; 1792 1793 if (!ignore) { 1794 g_assert(!crowd); 1795 return 0; 1796 } 1797 1798 first_zero = cto32(nvp_index); 1799 if (first_zero >= 31) { 1800 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x", 1801 nvp_index); 1802 return 0; 1803 } 1804 1805 level = (first_zero + 1) & 0b1111; 1806 if (crowd) { 1807 uint32_t blk; 1808 1809 /* crowd level is bit position of first 0 from the right in nvp_blk */ 1810 first_zero = cto32(nvp_blk); 1811 if (first_zero >= 31) { 1812 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd block 0x%08x", 1813 nvp_blk); 1814 return 0; 1815 } 1816 blk = first_zero + 1; 1817 1818 /* 1819 * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported. 1820 * HW will encode level 4 as the value 3. See xive2_pgofnext(). 1821 */ 1822 switch (blk) { 1823 case 1: 1824 case 2: 1825 break; 1826 case 4: 1827 blk = 3; 1828 break; 1829 default: 1830 g_assert_not_reached(); 1831 } 1832 1833 /* Crowd level bits reside in upper 2 bits of the 6 bit group level */ 1834 level |= blk << 4; 1835 } 1836 return level; 1837 } 1838 1839 /* 1840 * The thread context register words are in big-endian format. 1841 */ 1842 int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, 1843 uint8_t format, 1844 uint8_t nvt_blk, uint32_t nvt_idx, 1845 bool cam_ignore, uint32_t logic_serv) 1846 { 1847 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); 1848 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 1849 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 1850 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 1851 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 1852 1853 /* 1854 * TODO (PowerNV): ignore mode. The low order bits of the NVT 1855 * identifier are ignored in the "CAM" match. 1856 */ 1857 1858 if (format == 0) { 1859 if (cam_ignore == true) { 1860 /* 1861 * F=0 & i=1: Logical server notification (bits ignored at 1862 * the end of the NVT identifier) 1863 */ 1864 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", 1865 nvt_blk, nvt_idx); 1866 return -1; 1867 } 1868 1869 /* F=0 & i=0: Specific NVT notification */ 1870 1871 /* PHYS ring */ 1872 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && 1873 cam == xive_tctx_hw_cam_line(xptr, tctx)) { 1874 return TM_QW3_HV_PHYS; 1875 } 1876 1877 /* HV POOL ring */ 1878 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && 1879 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { 1880 return TM_QW2_HV_POOL; 1881 } 1882 1883 /* OS ring */ 1884 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1885 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { 1886 return TM_QW1_OS; 1887 } 1888 } else { 1889 /* F=1 : User level Event-Based Branch (EBB) notification */ 1890 1891 /* USER ring */ 1892 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1893 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && 1894 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && 1895 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { 1896 return TM_QW0_USER; 1897 } 1898 } 1899 return -1; 1900 } 1901 1902 /* 1903 * This is our simple Xive Presenter Engine model. It is merged in the 1904 * Router as it does not require an extra object. 1905 */ 1906 bool xive_presenter_match(XiveFabric *xfb, uint8_t format, 1907 uint8_t nvt_blk, uint32_t nvt_idx, 1908 bool crowd, bool cam_ignore, uint8_t priority, 1909 uint32_t logic_serv, XiveTCTXMatch *match) 1910 { 1911 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); 1912 1913 memset(match, 0, sizeof(*match)); 1914 1915 /* 1916 * Ask the machine to scan the interrupt controllers for a match. 1917 * 1918 * For VP-specific notification, we expect at most one match and 1919 * one call to the presenters is all we need (abbreviated notify 1920 * sequence documented by the architecture). 1921 * 1922 * For VP-group notification, match_nvt() is the equivalent of the 1923 * "histogram" and "poll" commands sent to the power bus to the 1924 * presenters. 'count' could be more than one, but we always 1925 * select the first match for now. 'precluded' tells if (at least) 1926 * one thread matches but can't take the interrupt now because 1927 * it's running at a more favored priority. We return the 1928 * information to the router so that it can take appropriate 1929 * actions (backlog, escalation, broadcast, etc...) 1930 * 1931 * If we were to implement a better way of dispatching the 1932 * interrupt in case of multiple matches (instead of the first 1933 * match), we would need a heuristic to elect a thread (for 1934 * example, the hardware keeps track of an 'age' in the TIMA) and 1935 * a new command to the presenters (the equivalent of the "assign" 1936 * power bus command in the documented full notify sequence. 1937 */ 1938 return xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, 1939 priority, logic_serv, match); 1940 } 1941 1942 /* 1943 * Notification using the END ESe/ESn bit (Event State Buffer for 1944 * escalation and notification). Provide further coalescing in the 1945 * Router. 1946 */ 1947 static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk, 1948 uint32_t end_idx, XiveEND *end, 1949 uint32_t end_esmask) 1950 { 1951 uint8_t pq = xive_get_field32(end_esmask, end->w1); 1952 bool notify = xive_esb_trigger(&pq); 1953 1954 if (pq != xive_get_field32(end_esmask, end->w1)) { 1955 end->w1 = xive_set_field32(end_esmask, end->w1, pq); 1956 xive_router_write_end(xrtr, end_blk, end_idx, end, 1); 1957 } 1958 1959 /* ESe/n[Q]=1 : end of notification */ 1960 return notify; 1961 } 1962 1963 /* 1964 * An END trigger can come from an event trigger (IPI or HW) or from 1965 * another chip. We don't model the PowerBus but the END trigger 1966 * message has the same parameters than in the function below. 1967 */ 1968 void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) 1969 { 1970 XiveEND end; 1971 uint8_t priority; 1972 uint8_t format; 1973 uint8_t nvt_blk; 1974 uint32_t nvt_idx; 1975 XiveNVT nvt; 1976 XiveTCTXMatch match; 1977 1978 uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); 1979 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 1980 uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w); 1981 1982 /* END cache lookup */ 1983 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { 1984 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1985 end_idx); 1986 return; 1987 } 1988 1989 if (!xive_end_is_valid(&end)) { 1990 trace_xive_router_end_notify(end_blk, end_idx, end_data); 1991 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1992 end_blk, end_idx); 1993 return; 1994 } 1995 1996 if (xive_end_is_enqueue(&end)) { 1997 xive_end_enqueue(&end, end_data); 1998 /* Enqueuing event data modifies the EQ toggle and index */ 1999 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); 2000 } 2001 2002 /* 2003 * When the END is silent, we skip the notification part. 2004 */ 2005 if (xive_end_is_silent_escalation(&end)) { 2006 goto do_escalation; 2007 } 2008 2009 /* 2010 * The W7 format depends on the F bit in W6. It defines the type 2011 * of the notification : 2012 * 2013 * F=0 : single or multiple NVT notification 2014 * F=1 : User level Event-Based Branch (EBB) notification, no 2015 * priority 2016 */ 2017 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); 2018 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); 2019 2020 /* The END is masked */ 2021 if (format == 0 && priority == 0xff) { 2022 return; 2023 } 2024 2025 /* 2026 * Check the END ESn (Event State Buffer for notification) for 2027 * even further coalescing in the Router 2028 */ 2029 if (!xive_end_is_notify(&end)) { 2030 /* ESn[Q]=1 : end of notification */ 2031 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, 2032 &end, END_W1_ESn)) { 2033 return; 2034 } 2035 } 2036 2037 /* 2038 * Follows IVPE notification 2039 */ 2040 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6); 2041 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6); 2042 2043 /* NVT cache lookup */ 2044 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { 2045 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n", 2046 nvt_blk, nvt_idx); 2047 return; 2048 } 2049 2050 if (!xive_nvt_is_valid(&nvt)) { 2051 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n", 2052 nvt_blk, nvt_idx); 2053 return; 2054 } 2055 2056 /* TODO: Auto EOI. */ 2057 /* we don't support VP-group notification on P9, so precluded is not used */ 2058 if (xive_presenter_match(xrtr->xfb, format, nvt_blk, nvt_idx, 2059 false /* crowd */, 2060 xive_get_field32(END_W7_F0_IGNORE, end.w7), 2061 priority, 2062 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), 2063 &match)) { 2064 trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, 0); 2065 xive_tctx_pipr_present(match.tctx, match.ring, priority, 0); 2066 return; 2067 } 2068 2069 /* 2070 * If no matching NVT is dispatched on a HW thread : 2071 * - specific VP: update the NVT structure if backlog is activated 2072 * - logical server : forward request to IVPE (not supported) 2073 */ 2074 if (xive_end_is_backlog(&end)) { 2075 uint8_t ipb; 2076 2077 if (format == 1) { 2078 qemu_log_mask(LOG_GUEST_ERROR, 2079 "XIVE: END %x/%x invalid config: F1 & backlog\n", 2080 end_blk, end_idx); 2081 return; 2082 } 2083 /* 2084 * Record the IPB in the associated NVT structure for later 2085 * use. The presenter will resend the interrupt when the vCPU 2086 * is dispatched again on a HW thread. 2087 */ 2088 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | 2089 xive_priority_to_ipb(priority); 2090 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); 2091 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); 2092 2093 /* 2094 * On HW, follows a "Broadcast Backlog" to IVPEs 2095 */ 2096 } 2097 2098 do_escalation: 2099 /* 2100 * If activated, escalate notification using the ESe PQ bits and 2101 * the EAS in w4-5 2102 */ 2103 if (!xive_end_is_escalate(&end)) { 2104 return; 2105 } 2106 2107 /* 2108 * Check the END ESe (Event State Buffer for escalation) for even 2109 * further coalescing in the Router 2110 */ 2111 if (!xive_end_is_uncond_escalation(&end)) { 2112 /* ESe[Q]=1 : end of notification */ 2113 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, 2114 &end, END_W1_ESe)) { 2115 return; 2116 } 2117 } 2118 2119 trace_xive_router_end_escalate(end_blk, end_idx, 2120 (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4), 2121 (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4), 2122 (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5)); 2123 /* 2124 * The END trigger becomes an Escalation trigger 2125 */ 2126 xive_router_end_notify_handler(xrtr, (XiveEAS *) &end.w4); 2127 } 2128 2129 void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) 2130 { 2131 XiveRouter *xrtr = XIVE_ROUTER(xn); 2132 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); 2133 uint32_t eas_idx = XIVE_EAS_INDEX(lisn); 2134 XiveEAS eas; 2135 2136 /* EAS cache lookup */ 2137 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 2138 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 2139 return; 2140 } 2141 2142 if (!pq_checked) { 2143 bool notify; 2144 uint8_t pq; 2145 2146 /* PQ cache lookup */ 2147 if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { 2148 /* Set FIR */ 2149 g_assert_not_reached(); 2150 } 2151 2152 notify = xive_esb_trigger(&pq); 2153 2154 if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { 2155 /* Set FIR */ 2156 g_assert_not_reached(); 2157 } 2158 2159 if (!notify) { 2160 return; 2161 } 2162 } 2163 2164 if (!xive_eas_is_valid(&eas)) { 2165 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); 2166 return; 2167 } 2168 2169 if (xive_eas_is_masked(&eas)) { 2170 /* Notification completed */ 2171 return; 2172 } 2173 2174 /* 2175 * The event trigger becomes an END trigger 2176 */ 2177 xive_router_end_notify_handler(xrtr, &eas); 2178 } 2179 2180 static const Property xive_router_properties[] = { 2181 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb, 2182 TYPE_XIVE_FABRIC, XiveFabric *), 2183 }; 2184 2185 static void xive_router_class_init(ObjectClass *klass, const void *data) 2186 { 2187 DeviceClass *dc = DEVICE_CLASS(klass); 2188 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 2189 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 2190 2191 dc->desc = "XIVE Router Engine"; 2192 device_class_set_props(dc, xive_router_properties); 2193 /* Parent is SysBusDeviceClass. No need to call its realize hook */ 2194 dc->realize = xive_router_realize; 2195 xnc->notify = xive_router_notify; 2196 2197 /* By default, the router handles END triggers locally */ 2198 xrc->end_notify = xive_router_end_notify; 2199 } 2200 2201 static const TypeInfo xive_router_info = { 2202 .name = TYPE_XIVE_ROUTER, 2203 .parent = TYPE_SYS_BUS_DEVICE, 2204 .abstract = true, 2205 .instance_size = sizeof(XiveRouter), 2206 .class_size = sizeof(XiveRouterClass), 2207 .class_init = xive_router_class_init, 2208 .interfaces = (const InterfaceInfo[]) { 2209 { TYPE_XIVE_NOTIFIER }, 2210 { TYPE_XIVE_PRESENTER }, 2211 { } 2212 } 2213 }; 2214 2215 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, GString *buf) 2216 { 2217 if (!xive_eas_is_valid(eas)) { 2218 return; 2219 } 2220 2221 g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n", 2222 lisn, xive_eas_is_masked(eas) ? "M" : " ", 2223 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), 2224 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), 2225 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); 2226 } 2227 2228 /* 2229 * END ESB MMIO loads 2230 */ 2231 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) 2232 { 2233 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); 2234 uint32_t offset = addr & 0xFFF; 2235 uint8_t end_blk; 2236 uint32_t end_idx; 2237 XiveEND end; 2238 uint32_t end_esmask; 2239 uint8_t pq; 2240 uint64_t ret = -1; 2241 2242 /* 2243 * The block id should be deduced from the load address on the END 2244 * ESB MMIO but our model only supports a single block per XIVE chip. 2245 */ 2246 end_blk = xive_router_get_block_id(xsrc->xrtr); 2247 end_idx = addr >> (xsrc->esb_shift + 1); 2248 2249 trace_xive_end_source_read(end_blk, end_idx, addr); 2250 2251 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 2252 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 2253 end_idx); 2254 return -1; 2255 } 2256 2257 if (!xive_end_is_valid(&end)) { 2258 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 2259 end_blk, end_idx); 2260 return -1; 2261 } 2262 2263 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; 2264 pq = xive_get_field32(end_esmask, end.w1); 2265 2266 switch (offset) { 2267 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 2268 ret = xive_esb_eoi(&pq); 2269 2270 /* Forward the source event notification for routing ?? */ 2271 break; 2272 2273 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 2274 ret = pq; 2275 break; 2276 2277 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 2278 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 2279 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 2280 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 2281 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 2282 break; 2283 default: 2284 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 2285 offset); 2286 return -1; 2287 } 2288 2289 if (pq != xive_get_field32(end_esmask, end.w1)) { 2290 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 2291 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 2292 } 2293 2294 return ret; 2295 } 2296 2297 /* 2298 * END ESB MMIO stores are invalid 2299 */ 2300 static void xive_end_source_write(void *opaque, hwaddr addr, 2301 uint64_t value, unsigned size) 2302 { 2303 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" 2304 HWADDR_PRIx"\n", addr); 2305 } 2306 2307 static const MemoryRegionOps xive_end_source_ops = { 2308 .read = xive_end_source_read, 2309 .write = xive_end_source_write, 2310 .endianness = DEVICE_BIG_ENDIAN, 2311 .valid = { 2312 .min_access_size = 1, 2313 .max_access_size = 8, 2314 }, 2315 .impl = { 2316 .min_access_size = 1, 2317 .max_access_size = 8, 2318 }, 2319 }; 2320 2321 static void xive_end_source_realize(DeviceState *dev, Error **errp) 2322 { 2323 XiveENDSource *xsrc = XIVE_END_SOURCE(dev); 2324 2325 assert(xsrc->xrtr); 2326 2327 if (!xsrc->nr_ends) { 2328 error_setg(errp, "Number of interrupt needs to be greater than 0"); 2329 return; 2330 } 2331 2332 if (xsrc->esb_shift != XIVE_ESB_4K && 2333 xsrc->esb_shift != XIVE_ESB_64K) { 2334 error_setg(errp, "Invalid ESB shift setting"); 2335 return; 2336 } 2337 2338 /* 2339 * Each END is assigned an even/odd pair of MMIO pages, the even page 2340 * manages the ESn field while the odd page manages the ESe field. 2341 */ 2342 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 2343 &xive_end_source_ops, xsrc, "xive.end", 2344 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 2345 } 2346 2347 static const Property xive_end_source_properties[] = { 2348 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), 2349 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), 2350 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER, 2351 XiveRouter *), 2352 }; 2353 2354 static void xive_end_source_class_init(ObjectClass *klass, const void *data) 2355 { 2356 DeviceClass *dc = DEVICE_CLASS(klass); 2357 2358 dc->desc = "XIVE END Source"; 2359 device_class_set_props(dc, xive_end_source_properties); 2360 dc->realize = xive_end_source_realize; 2361 /* 2362 * Reason: part of XIVE interrupt controller, needs to be wired up, 2363 * e.g. by spapr_xive_instance_init(). 2364 */ 2365 dc->user_creatable = false; 2366 } 2367 2368 static const TypeInfo xive_end_source_info = { 2369 .name = TYPE_XIVE_END_SOURCE, 2370 .parent = TYPE_DEVICE, 2371 .instance_size = sizeof(XiveENDSource), 2372 .class_init = xive_end_source_class_init, 2373 }; 2374 2375 /* 2376 * XIVE Notifier 2377 */ 2378 static const TypeInfo xive_notifier_info = { 2379 .name = TYPE_XIVE_NOTIFIER, 2380 .parent = TYPE_INTERFACE, 2381 .class_size = sizeof(XiveNotifierClass), 2382 }; 2383 2384 /* 2385 * XIVE Presenter 2386 */ 2387 static const TypeInfo xive_presenter_info = { 2388 .name = TYPE_XIVE_PRESENTER, 2389 .parent = TYPE_INTERFACE, 2390 .class_size = sizeof(XivePresenterClass), 2391 }; 2392 2393 /* 2394 * XIVE Fabric 2395 */ 2396 static const TypeInfo xive_fabric_info = { 2397 .name = TYPE_XIVE_FABRIC, 2398 .parent = TYPE_INTERFACE, 2399 .class_size = sizeof(XiveFabricClass), 2400 }; 2401 2402 static void xive_register_types(void) 2403 { 2404 type_register_static(&xive_fabric_info); 2405 type_register_static(&xive_source_info); 2406 type_register_static(&xive_notifier_info); 2407 type_register_static(&xive_presenter_info); 2408 type_register_static(&xive_router_info); 2409 type_register_static(&xive_end_source_info); 2410 type_register_static(&xive_tctx_info); 2411 } 2412 2413 type_init(xive_register_types) 2414