1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "qemu/module.h" 12 #include "qapi/error.h" 13 #include "target/ppc/cpu.h" 14 #include "system/cpus.h" 15 #include "system/dma.h" 16 #include "system/reset.h" 17 #include "hw/qdev-properties.h" 18 #include "migration/vmstate.h" 19 #include "hw/irq.h" 20 #include "hw/ppc/xive.h" 21 #include "hw/ppc/xive2.h" 22 #include "hw/ppc/xive_regs.h" 23 #include "trace.h" 24 25 /* 26 * XIVE Thread Interrupt Management context 27 */ 28 bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr) 29 { 30 switch (ring) { 31 case TM_QW1_OS: 32 return !!(nsr & TM_QW1_NSR_EO); 33 case TM_QW2_HV_POOL: 34 case TM_QW3_HV_PHYS: 35 return !!(nsr & TM_QW3_NSR_HE); 36 default: 37 g_assert_not_reached(); 38 } 39 } 40 41 bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr) 42 { 43 if ((nsr & TM_NSR_GRP_LVL) > 0) { 44 g_assert(xive_nsr_indicates_exception(ring, nsr)); 45 return true; 46 } 47 return false; 48 } 49 50 uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr) 51 { 52 /* NSR determines if pool/phys ring is for phys or pool interrupt */ 53 if ((ring == TM_QW3_HV_PHYS) || (ring == TM_QW2_HV_POOL)) { 54 uint8_t he = (nsr & TM_QW3_NSR_HE) >> 6; 55 56 if (he == TM_QW3_NSR_HE_PHYS) { 57 return TM_QW3_HV_PHYS; 58 } else if (he == TM_QW3_NSR_HE_POOL) { 59 return TM_QW2_HV_POOL; 60 } else { 61 /* Don't support LSI mode */ 62 g_assert_not_reached(); 63 } 64 } 65 return ring; 66 } 67 68 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) 69 { 70 switch (ring) { 71 case TM_QW0_USER: 72 return 0; /* Not supported */ 73 case TM_QW1_OS: 74 return tctx->os_output; 75 case TM_QW2_HV_POOL: 76 case TM_QW3_HV_PHYS: 77 return tctx->hv_output; 78 default: 79 return 0; 80 } 81 } 82 83 uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) 84 { 85 uint8_t *regs = &tctx->regs[ring]; 86 uint8_t nsr = regs[TM_NSR]; 87 88 qemu_irq_lower(xive_tctx_output(tctx, ring)); 89 90 if (xive_nsr_indicates_exception(ring, nsr)) { 91 uint8_t cppr = regs[TM_PIPR]; 92 uint8_t alt_ring; 93 uint8_t *alt_regs; 94 95 alt_ring = xive_nsr_exception_ring(ring, nsr); 96 alt_regs = &tctx->regs[alt_ring]; 97 98 regs[TM_CPPR] = cppr; 99 100 /* 101 * If the interrupt was for a specific VP, reset the pending 102 * buffer bit, otherwise clear the logical server indicator 103 */ 104 if (!xive_nsr_indicates_group_exception(ring, nsr)) { 105 alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); 106 } 107 108 /* Clear the exception from NSR */ 109 regs[TM_NSR] = 0; 110 111 trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, 112 alt_regs[TM_IPB], regs[TM_PIPR], 113 regs[TM_CPPR], regs[TM_NSR]); 114 } 115 116 return ((uint64_t)nsr << 8) | regs[TM_CPPR]; 117 } 118 119 void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level) 120 { 121 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ 122 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; 123 uint8_t *alt_regs = &tctx->regs[alt_ring]; 124 uint8_t *regs = &tctx->regs[ring]; 125 126 if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { 127 switch (ring) { 128 case TM_QW1_OS: 129 regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); 130 break; 131 case TM_QW2_HV_POOL: 132 alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); 133 break; 134 case TM_QW3_HV_PHYS: 135 regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); 136 break; 137 default: 138 g_assert_not_reached(); 139 } 140 trace_xive_tctx_notify(tctx->cs->cpu_index, ring, 141 regs[TM_IPB], alt_regs[TM_PIPR], 142 alt_regs[TM_CPPR], alt_regs[TM_NSR]); 143 qemu_irq_raise(xive_tctx_output(tctx, ring)); 144 } else { 145 alt_regs[TM_NSR] = 0; 146 qemu_irq_lower(xive_tctx_output(tctx, ring)); 147 } 148 } 149 150 void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring) 151 { 152 /* 153 * Lower the External interrupt. Used when pulling a context. It is 154 * necessary to avoid catching it in the higher privilege context. It 155 * should be raised again when re-pushing the lower privilege context. 156 */ 157 qemu_irq_lower(xive_tctx_output(tctx, ring)); 158 } 159 160 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) 161 { 162 uint8_t *regs = &tctx->regs[ring]; 163 uint8_t pipr_min; 164 uint8_t ring_min; 165 166 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, 167 regs[TM_IPB], regs[TM_PIPR], 168 cppr, regs[TM_NSR]); 169 170 if (cppr > XIVE_PRIORITY_MAX) { 171 cppr = 0xff; 172 } 173 174 tctx->regs[ring + TM_CPPR] = cppr; 175 176 /* 177 * Recompute the PIPR based on local pending interrupts. The PHYS 178 * ring must take the minimum of both the PHYS and POOL PIPR values. 179 */ 180 pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); 181 ring_min = ring; 182 183 /* PHYS updates also depend on POOL values */ 184 if (ring == TM_QW3_HV_PHYS) { 185 uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; 186 187 /* POOL values only matter if POOL ctx is valid */ 188 if (pool_regs[TM_WORD2] & 0x80) { 189 190 uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); 191 192 /* 193 * Determine highest priority interrupt and 194 * remember which ring has it. 195 */ 196 if (pool_pipr < pipr_min) { 197 pipr_min = pool_pipr; 198 ring_min = TM_QW2_HV_POOL; 199 } 200 } 201 } 202 203 regs[TM_PIPR] = pipr_min; 204 205 /* CPPR has changed, check if we need to raise a pending exception */ 206 xive_tctx_notify(tctx, ring_min, 0); 207 } 208 209 void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, 210 uint8_t group_level) 211 { 212 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ 213 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; 214 uint8_t *alt_regs = &tctx->regs[alt_ring]; 215 uint8_t *regs = &tctx->regs[ring]; 216 217 if (group_level == 0) { 218 /* VP-specific */ 219 regs[TM_IPB] |= xive_priority_to_ipb(priority); 220 alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); 221 } else { 222 /* VP-group */ 223 alt_regs[TM_PIPR] = xive_priority_to_pipr(priority); 224 } 225 xive_tctx_notify(tctx, ring, group_level); 226 } 227 228 /* 229 * XIVE Thread Interrupt Management Area (TIMA) 230 */ 231 232 static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, 233 hwaddr offset, uint64_t value, unsigned size) 234 { 235 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); 236 } 237 238 static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, 239 hwaddr offset, unsigned size) 240 { 241 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); 242 } 243 244 static void xive_pool_cam_decode(uint32_t cam, uint8_t *nvt_blk, 245 uint32_t *nvt_idx, bool *vp) 246 { 247 if (nvt_blk) { 248 *nvt_blk = xive_nvt_blk(cam); 249 } 250 if (nvt_idx) { 251 *nvt_idx = xive_nvt_idx(cam); 252 } 253 if (vp) { 254 *vp = !!(cam & TM_QW2W2_VP); 255 } 256 } 257 258 static uint32_t xive_tctx_get_pool_cam(XiveTCTX *tctx, uint8_t *nvt_blk, 259 uint32_t *nvt_idx, bool *vp) 260 { 261 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 262 uint32_t cam = be32_to_cpu(qw2w2); 263 264 xive_pool_cam_decode(cam, nvt_blk, nvt_idx, vp); 265 return qw2w2; 266 } 267 268 static void xive_tctx_set_pool_cam(XiveTCTX *tctx, uint32_t qw2w2) 269 { 270 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); 271 } 272 273 static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, 274 hwaddr offset, unsigned size) 275 { 276 uint32_t qw2w2; 277 uint32_t qw2w2_new; 278 uint8_t nvt_blk; 279 uint32_t nvt_idx; 280 bool vp; 281 282 qw2w2 = xive_tctx_get_pool_cam(tctx, &nvt_blk, &nvt_idx, &vp); 283 284 if (!vp) { 285 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid POOL NVT %x/%x !?\n", 286 nvt_blk, nvt_idx); 287 } 288 289 /* Invalidate CAM line */ 290 qw2w2_new = xive_set_field32(TM_QW2W2_VP, qw2w2, 0); 291 xive_tctx_set_pool_cam(tctx, qw2w2_new); 292 293 xive_tctx_reset_signal(tctx, TM_QW1_OS); 294 xive_tctx_reset_signal(tctx, TM_QW2_HV_POOL); 295 return qw2w2; 296 } 297 298 static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, 299 hwaddr offset, unsigned size) 300 { 301 uint8_t qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; 302 uint8_t qw3b8_new; 303 304 qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; 305 if (!(qw3b8 & TM_QW3B8_VT)) { 306 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid PHYS thread!?\n"); 307 } 308 qw3b8_new = qw3b8 & ~TM_QW3B8_VT; 309 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8_new; 310 311 xive_tctx_reset_signal(tctx, TM_QW1_OS); 312 xive_tctx_reset_signal(tctx, TM_QW3_HV_PHYS); 313 return qw3b8; 314 } 315 316 static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 317 uint64_t value, unsigned size) 318 { 319 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; 320 } 321 322 static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, 323 hwaddr offset, unsigned size) 324 { 325 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; 326 } 327 328 /* 329 * Define an access map for each page of the TIMA that we will use in 330 * the memory region ops to filter values when doing loads and stores 331 * of raw registers values 332 * 333 * Registers accessibility bits : 334 * 335 * 0x0 - no access 336 * 0x1 - write only 337 * 0x2 - read only 338 * 0x3 - read/write 339 */ 340 341 static const uint8_t xive_tm_hw_view[] = { 342 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 343 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 344 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 345 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ 346 }; 347 348 static const uint8_t xive_tm_hv_view[] = { 349 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 350 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 351 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 352 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ 353 }; 354 355 static const uint8_t xive_tm_os_view[] = { 356 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 357 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 358 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 359 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 360 }; 361 362 static const uint8_t xive_tm_user_view[] = { 363 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */ 364 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 365 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 366 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 367 }; 368 369 /* 370 * Overall TIMA access map for the thread interrupt management context 371 * registers 372 */ 373 static const uint8_t *xive_tm_views[] = { 374 [XIVE_TM_HW_PAGE] = xive_tm_hw_view, 375 [XIVE_TM_HV_PAGE] = xive_tm_hv_view, 376 [XIVE_TM_OS_PAGE] = xive_tm_os_view, 377 [XIVE_TM_USER_PAGE] = xive_tm_user_view, 378 }; 379 380 /* 381 * Computes a register access mask for a given offset in the TIMA 382 */ 383 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) 384 { 385 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 386 uint8_t reg_offset = offset & TM_REG_OFFSET; 387 uint8_t reg_mask = write ? 0x1 : 0x2; 388 uint64_t mask = 0x0; 389 int i; 390 391 for (i = 0; i < size; i++) { 392 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { 393 mask |= (uint64_t) 0xff << (8 * (size - i - 1)); 394 } 395 } 396 397 return mask; 398 } 399 400 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, 401 unsigned size) 402 { 403 uint8_t ring_offset = offset & TM_RING_OFFSET; 404 uint8_t reg_offset = offset & TM_REG_OFFSET; 405 uint64_t mask = xive_tm_mask(offset, size, true); 406 int i; 407 408 /* 409 * Only 4 or 8 bytes stores are allowed and the User ring is 410 * excluded 411 */ 412 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 413 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" 414 HWADDR_PRIx" size %d\n", offset, size); 415 return; 416 } 417 418 /* 419 * Use the register offset for the raw values and filter out 420 * reserved values 421 */ 422 for (i = 0; i < size; i++) { 423 uint8_t byte_mask = (mask >> (8 * (size - i - 1))); 424 if (byte_mask) { 425 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & 426 byte_mask; 427 } 428 } 429 } 430 431 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) 432 { 433 uint8_t ring_offset = offset & TM_RING_OFFSET; 434 uint8_t reg_offset = offset & TM_REG_OFFSET; 435 uint64_t mask = xive_tm_mask(offset, size, false); 436 uint64_t ret; 437 int i; 438 439 /* 440 * Only 4 or 8 bytes loads are allowed and the User ring is 441 * excluded 442 */ 443 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 444 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" 445 HWADDR_PRIx" size %d\n", offset, size); 446 return -1; 447 } 448 449 /* Use the register offset for the raw values */ 450 ret = 0; 451 for (i = 0; i < size; i++) { 452 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); 453 } 454 455 /* filter out reserved values */ 456 return ret & mask; 457 } 458 459 /* 460 * The TM context is mapped twice within each page. Stores and loads 461 * to the first mapping below 2K write and read the specified values 462 * without modification. The second mapping above 2K performs specific 463 * state changes (side effects) in addition to setting/returning the 464 * interrupt management area context of the processor thread. 465 */ 466 static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx, 467 hwaddr offset, unsigned size) 468 { 469 return xive_tctx_accept(tctx, TM_QW1_OS); 470 } 471 472 static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, 473 hwaddr offset, uint64_t value, unsigned size) 474 { 475 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); 476 } 477 478 static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs) 479 { 480 uint8_t *regs = &tctx->regs[ring]; 481 482 regs[TM_LGS] = lgs; 483 } 484 485 static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, 486 hwaddr offset, uint64_t value, unsigned size) 487 { 488 xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff); 489 } 490 491 /* 492 * Adjust the PIPR to allow a CPU to process event queues of other 493 * priorities during one physical interrupt cycle. 494 */ 495 static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, 496 hwaddr offset, uint64_t value, unsigned size) 497 { 498 xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0); 499 } 500 501 static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, 502 uint32_t *nvt_idx, bool *vo) 503 { 504 if (nvt_blk) { 505 *nvt_blk = xive_nvt_blk(cam); 506 } 507 if (nvt_idx) { 508 *nvt_idx = xive_nvt_idx(cam); 509 } 510 if (vo) { 511 *vo = !!(cam & TM_QW1W2_VO); 512 } 513 } 514 515 static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk, 516 uint32_t *nvt_idx, bool *vo) 517 { 518 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 519 uint32_t cam = be32_to_cpu(qw1w2); 520 521 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo); 522 return qw1w2; 523 } 524 525 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2) 526 { 527 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 528 } 529 530 static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 531 hwaddr offset, unsigned size) 532 { 533 uint32_t qw1w2; 534 uint32_t qw1w2_new; 535 uint8_t nvt_blk; 536 uint32_t nvt_idx; 537 bool vo; 538 539 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); 540 541 if (!vo) { 542 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid OS NVT %x/%x !?\n", 543 nvt_blk, nvt_idx); 544 } 545 546 /* Invalidate CAM line */ 547 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); 548 xive_tctx_set_os_cam(tctx, qw1w2_new); 549 550 xive_tctx_reset_signal(tctx, TM_QW1_OS); 551 return qw1w2; 552 } 553 554 static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, 555 uint8_t nvt_blk, uint32_t nvt_idx) 556 { 557 XiveNVT nvt; 558 uint8_t ipb; 559 560 /* 561 * Grab the associated NVT to pull the pending bits, and merge 562 * them with the IPB of the thread interrupt context registers 563 */ 564 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { 565 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n", 566 nvt_blk, nvt_idx); 567 return; 568 } 569 570 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4); 571 572 if (ipb) { 573 /* Reset the NVT value */ 574 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); 575 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); 576 577 uint8_t *regs = &tctx->regs[TM_QW1_OS]; 578 regs[TM_IPB] |= ipb; 579 } 580 581 /* 582 * Always call xive_tctx_pipr_update(). Even if there were no 583 * escalation triggered, there could be a pending interrupt which 584 * was saved when the context was pulled and that we need to take 585 * into account by recalculating the PIPR (which is not 586 * saved/restored). 587 * It will also raise the External interrupt signal if needed. 588 */ 589 xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */ 590 } 591 592 /* 593 * Updating the OS CAM line can trigger a resend of interrupt 594 */ 595 static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 596 hwaddr offset, uint64_t value, unsigned size) 597 { 598 uint32_t cam = value; 599 uint32_t qw1w2 = cpu_to_be32(cam); 600 uint8_t nvt_blk; 601 uint32_t nvt_idx; 602 bool vo; 603 604 xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo); 605 606 /* First update the registers */ 607 xive_tctx_set_os_cam(tctx, qw1w2); 608 609 /* Check the interrupt pending bits */ 610 if (vo) { 611 xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); 612 } 613 } 614 615 static uint32_t xive_presenter_get_config(XivePresenter *xptr) 616 { 617 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); 618 619 return xpc->get_config(xptr); 620 } 621 622 /* 623 * Define a mapping of "special" operations depending on the TIMA page 624 * offset and the size of the operation. 625 */ 626 typedef struct XiveTmOp { 627 uint8_t page_offset; 628 uint32_t op_offset; 629 unsigned size; 630 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, 631 hwaddr offset, 632 uint64_t value, unsigned size); 633 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 634 unsigned size); 635 } XiveTmOp; 636 637 static const XiveTmOp xive_tm_operations[] = { 638 /* 639 * MMIOs below 2K : raw values and special operations without side 640 * effects 641 */ 642 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, 643 NULL }, 644 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, 645 NULL }, 646 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, 647 NULL }, 648 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, 649 NULL }, 650 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, 651 xive_tm_vt_poll }, 652 653 /* MMIOs above 2K : special operations with side effects */ 654 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, 655 xive_tm_ack_os_reg }, 656 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, 657 NULL }, 658 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, 659 xive_tm_pull_os_ctx }, 660 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, 661 xive_tm_pull_os_ctx }, 662 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, 663 xive_tm_ack_hv_reg }, 664 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, 665 xive_tm_pull_pool_ctx }, 666 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, 667 xive_tm_pull_pool_ctx }, 668 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, 669 xive_tm_pull_phys_ctx }, 670 }; 671 672 static const XiveTmOp xive2_tm_operations[] = { 673 /* 674 * MMIOs below 2K : raw values and special operations without side 675 * effects 676 */ 677 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr, 678 NULL }, 679 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, 680 NULL }, 681 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx, 682 NULL }, 683 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, 684 NULL }, 685 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr, 686 NULL }, 687 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, 688 NULL }, 689 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, 690 xive_tm_vt_poll }, 691 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target, 692 NULL }, 693 694 /* MMIOs above 2K : special operations with side effects */ 695 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, 696 xive_tm_ack_os_reg }, 697 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, 698 NULL }, 699 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL, 700 xive2_tm_pull_os_ctx }, 701 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, 702 xive2_tm_pull_os_ctx }, 703 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, 704 xive2_tm_pull_os_ctx }, 705 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, 706 xive_tm_ack_hv_reg }, 707 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL, 708 xive2_tm_pull_pool_ctx }, 709 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, 710 xive2_tm_pull_pool_ctx }, 711 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, 712 xive2_tm_pull_pool_ctx }, 713 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol, 714 NULL }, 715 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL, 716 xive2_tm_pull_phys_ctx }, 717 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, 718 xive2_tm_pull_phys_ctx }, 719 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol, 720 NULL }, 721 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, xive2_tm_ack_os_el, 722 NULL }, 723 }; 724 725 static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, 726 unsigned size, bool write) 727 { 728 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 729 uint32_t op_offset = offset & TM_ADDRESS_MASK; 730 const XiveTmOp *tm_ops; 731 int i, tm_ops_count; 732 uint32_t cfg; 733 734 cfg = xive_presenter_get_config(xptr); 735 if (cfg & XIVE_PRESENTER_GEN1_TIMA_OS) { 736 tm_ops = xive_tm_operations; 737 tm_ops_count = ARRAY_SIZE(xive_tm_operations); 738 } else { 739 tm_ops = xive2_tm_operations; 740 tm_ops_count = ARRAY_SIZE(xive2_tm_operations); 741 } 742 743 for (i = 0; i < tm_ops_count; i++) { 744 const XiveTmOp *xto = &tm_ops[i]; 745 746 /* Accesses done from a more privileged TIMA page is allowed */ 747 if (xto->page_offset >= page_offset && 748 xto->op_offset == op_offset && 749 xto->size == size && 750 ((write && xto->write_handler) || (!write && xto->read_handler))) { 751 return xto; 752 } 753 } 754 return NULL; 755 } 756 757 /* 758 * TIMA MMIO handlers 759 */ 760 void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 761 uint64_t value, unsigned size) 762 { 763 const XiveTmOp *xto; 764 765 trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value); 766 767 /* 768 * TODO: check V bit in Q[0-3]W2 769 */ 770 771 /* 772 * First, check for special operations in the 2K region 773 */ 774 if (offset & TM_SPECIAL_OP) { 775 xto = xive_tm_find_op(tctx->xptr, offset, size, true); 776 if (!xto) { 777 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " 778 "@%"HWADDR_PRIx" size %d\n", offset, size); 779 } else { 780 xto->write_handler(xptr, tctx, offset, value, size); 781 } 782 return; 783 } 784 785 /* 786 * Then, for special operations in the region below 2K. 787 */ 788 xto = xive_tm_find_op(tctx->xptr, offset, size, true); 789 if (xto) { 790 xto->write_handler(xptr, tctx, offset, value, size); 791 return; 792 } 793 794 /* 795 * Finish with raw access to the register values 796 */ 797 xive_tm_raw_write(tctx, offset, value, size); 798 } 799 800 uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 801 unsigned size) 802 { 803 const XiveTmOp *xto; 804 uint64_t ret; 805 806 /* 807 * TODO: check V bit in Q[0-3]W2 808 */ 809 810 /* 811 * First, check for special operations in the 2K region 812 */ 813 if (offset & TM_SPECIAL_OP) { 814 xto = xive_tm_find_op(tctx->xptr, offset, size, false); 815 if (!xto) { 816 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" 817 "@%"HWADDR_PRIx" size %d\n", offset, size); 818 return -1; 819 } 820 ret = xto->read_handler(xptr, tctx, offset, size); 821 goto out; 822 } 823 824 /* 825 * Then, for special operations in the region below 2K. 826 */ 827 xto = xive_tm_find_op(tctx->xptr, offset, size, false); 828 if (xto) { 829 ret = xto->read_handler(xptr, tctx, offset, size); 830 goto out; 831 } 832 833 /* 834 * Finish with raw access to the register values 835 */ 836 ret = xive_tm_raw_read(tctx, offset, size); 837 out: 838 trace_xive_tctx_tm_read(tctx->cs->cpu_index, offset, size, ret); 839 return ret; 840 } 841 842 static char *xive_tctx_ring_print(uint8_t *ring) 843 { 844 uint32_t w2 = xive_tctx_word2(ring); 845 846 return g_strdup_printf("%02x %02x %02x %02x %02x " 847 "%02x %02x %02x %08x", 848 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], 849 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], 850 be32_to_cpu(w2)); 851 } 852 853 static const char * const xive_tctx_ring_names[] = { 854 "USER", "OS", "POOL", "PHYS", 855 }; 856 857 /* 858 * kvm_irqchip_in_kernel() will cause the compiler to turn this 859 * info a nop if CONFIG_KVM isn't defined. 860 */ 861 #define xive_in_kernel(xptr) \ 862 (kvm_irqchip_in_kernel() && \ 863 ({ \ 864 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \ 865 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \ 866 })) 867 868 void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf) 869 { 870 int cpu_index; 871 int i; 872 873 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs 874 * are hot plugged or unplugged. 875 */ 876 if (!tctx) { 877 return; 878 } 879 880 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; 881 882 if (xive_in_kernel(tctx->xptr)) { 883 Error *local_err = NULL; 884 885 kvmppc_xive_cpu_synchronize_state(tctx, &local_err); 886 if (local_err) { 887 error_report_err(local_err); 888 return; 889 } 890 } 891 892 if (xive_presenter_get_config(tctx->xptr) & XIVE_PRESENTER_GEN1_TIMA_OS) { 893 g_string_append_printf(buf, "CPU[%04x]: " 894 "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" 895 " W2\n", cpu_index); 896 } else { 897 g_string_append_printf(buf, "CPU[%04x]: " 898 "QW NSR CPPR IPB LSMFB - LGS T PIPR" 899 " W2\n", cpu_index); 900 } 901 902 for (i = 0; i < XIVE_TM_RING_COUNT; i++) { 903 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); 904 g_string_append_printf(buf, "CPU[%04x]: %4s %s\n", 905 cpu_index, xive_tctx_ring_names[i], s); 906 g_free(s); 907 } 908 } 909 910 void xive_tctx_reset(XiveTCTX *tctx) 911 { 912 memset(tctx->regs, 0, sizeof(tctx->regs)); 913 914 /* Set some defaults */ 915 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; 916 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; 917 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; 918 if (!(xive_presenter_get_config(tctx->xptr) & 919 XIVE_PRESENTER_GEN1_TIMA_OS)) { 920 tctx->regs[TM_QW1_OS + TM_OGEN] = 2; 921 } 922 923 /* 924 * Initialize PIPR to 0xFF to avoid phantom interrupts when the 925 * CPPR is first set. 926 */ 927 tctx->regs[TM_QW1_OS + TM_PIPR] = 928 xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); 929 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = 930 xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); 931 } 932 933 static void xive_tctx_realize(DeviceState *dev, Error **errp) 934 { 935 XiveTCTX *tctx = XIVE_TCTX(dev); 936 PowerPCCPU *cpu; 937 CPUPPCState *env; 938 939 assert(tctx->cs); 940 assert(tctx->xptr); 941 942 cpu = POWERPC_CPU(tctx->cs); 943 env = &cpu->env; 944 switch (PPC_INPUT(env)) { 945 case PPC_FLAGS_INPUT_POWER9: 946 tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT); 947 tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); 948 break; 949 950 default: 951 error_setg(errp, "XIVE interrupt controller does not support " 952 "this CPU bus model"); 953 return; 954 } 955 956 /* Connect the presenter to the VCPU (required for CPU hotplug) */ 957 if (xive_in_kernel(tctx->xptr)) { 958 if (kvmppc_xive_cpu_connect(tctx, errp) < 0) { 959 return; 960 } 961 } 962 } 963 964 static int vmstate_xive_tctx_pre_save(void *opaque) 965 { 966 XiveTCTX *tctx = XIVE_TCTX(opaque); 967 Error *local_err = NULL; 968 int ret; 969 970 if (xive_in_kernel(tctx->xptr)) { 971 ret = kvmppc_xive_cpu_get_state(tctx, &local_err); 972 if (ret < 0) { 973 error_report_err(local_err); 974 return ret; 975 } 976 } 977 978 return 0; 979 } 980 981 static int vmstate_xive_tctx_post_load(void *opaque, int version_id) 982 { 983 XiveTCTX *tctx = XIVE_TCTX(opaque); 984 Error *local_err = NULL; 985 int ret; 986 987 if (xive_in_kernel(tctx->xptr)) { 988 /* 989 * Required for hotplugged CPU, for which the state comes 990 * after all states of the machine. 991 */ 992 ret = kvmppc_xive_cpu_set_state(tctx, &local_err); 993 if (ret < 0) { 994 error_report_err(local_err); 995 return ret; 996 } 997 } 998 999 return 0; 1000 } 1001 1002 static const VMStateDescription vmstate_xive_tctx = { 1003 .name = TYPE_XIVE_TCTX, 1004 .version_id = 1, 1005 .minimum_version_id = 1, 1006 .pre_save = vmstate_xive_tctx_pre_save, 1007 .post_load = vmstate_xive_tctx_post_load, 1008 .fields = (const VMStateField[]) { 1009 VMSTATE_BUFFER(regs, XiveTCTX), 1010 VMSTATE_END_OF_LIST() 1011 }, 1012 }; 1013 1014 static const Property xive_tctx_properties[] = { 1015 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *), 1016 DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER, 1017 XivePresenter *), 1018 }; 1019 1020 static void xive_tctx_class_init(ObjectClass *klass, const void *data) 1021 { 1022 DeviceClass *dc = DEVICE_CLASS(klass); 1023 1024 dc->desc = "XIVE Interrupt Thread Context"; 1025 dc->realize = xive_tctx_realize; 1026 dc->vmsd = &vmstate_xive_tctx; 1027 device_class_set_props(dc, xive_tctx_properties); 1028 /* 1029 * Reason: part of XIVE interrupt controller, needs to be wired up 1030 * by xive_tctx_create(). 1031 */ 1032 dc->user_creatable = false; 1033 } 1034 1035 static const TypeInfo xive_tctx_info = { 1036 .name = TYPE_XIVE_TCTX, 1037 .parent = TYPE_DEVICE, 1038 .instance_size = sizeof(XiveTCTX), 1039 .class_init = xive_tctx_class_init, 1040 }; 1041 1042 Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp) 1043 { 1044 Object *obj; 1045 1046 obj = object_new(TYPE_XIVE_TCTX); 1047 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj); 1048 object_unref(obj); 1049 object_property_set_link(obj, "cpu", cpu, &error_abort); 1050 object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort); 1051 if (!qdev_realize(DEVICE(obj), NULL, errp)) { 1052 object_unparent(obj); 1053 return NULL; 1054 } 1055 return obj; 1056 } 1057 1058 void xive_tctx_destroy(XiveTCTX *tctx) 1059 { 1060 Object *obj = OBJECT(tctx); 1061 1062 object_unparent(obj); 1063 } 1064 1065 /* 1066 * XIVE ESB helpers 1067 */ 1068 1069 uint8_t xive_esb_set(uint8_t *pq, uint8_t value) 1070 { 1071 uint8_t old_pq = *pq & 0x3; 1072 1073 *pq &= ~0x3; 1074 *pq |= value & 0x3; 1075 1076 return old_pq; 1077 } 1078 1079 bool xive_esb_trigger(uint8_t *pq) 1080 { 1081 uint8_t old_pq = *pq & 0x3; 1082 1083 switch (old_pq) { 1084 case XIVE_ESB_RESET: 1085 xive_esb_set(pq, XIVE_ESB_PENDING); 1086 return true; 1087 case XIVE_ESB_PENDING: 1088 case XIVE_ESB_QUEUED: 1089 xive_esb_set(pq, XIVE_ESB_QUEUED); 1090 return false; 1091 case XIVE_ESB_OFF: 1092 xive_esb_set(pq, XIVE_ESB_OFF); 1093 return false; 1094 default: 1095 g_assert_not_reached(); 1096 } 1097 } 1098 1099 bool xive_esb_eoi(uint8_t *pq) 1100 { 1101 uint8_t old_pq = *pq & 0x3; 1102 1103 switch (old_pq) { 1104 case XIVE_ESB_RESET: 1105 case XIVE_ESB_PENDING: 1106 xive_esb_set(pq, XIVE_ESB_RESET); 1107 return false; 1108 case XIVE_ESB_QUEUED: 1109 xive_esb_set(pq, XIVE_ESB_PENDING); 1110 return true; 1111 case XIVE_ESB_OFF: 1112 xive_esb_set(pq, XIVE_ESB_OFF); 1113 return false; 1114 default: 1115 g_assert_not_reached(); 1116 } 1117 } 1118 1119 /* 1120 * XIVE Interrupt Source (or IVSE) 1121 */ 1122 1123 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) 1124 { 1125 assert(srcno < xsrc->nr_irqs); 1126 1127 return xsrc->status[srcno] & 0x3; 1128 } 1129 1130 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) 1131 { 1132 assert(srcno < xsrc->nr_irqs); 1133 1134 return xive_esb_set(&xsrc->status[srcno], pq); 1135 } 1136 1137 /* 1138 * Returns whether the event notification should be forwarded. 1139 */ 1140 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) 1141 { 1142 uint8_t old_pq = xive_source_esb_get(xsrc, srcno); 1143 1144 xive_source_set_asserted(xsrc, srcno, true); 1145 1146 switch (old_pq) { 1147 case XIVE_ESB_RESET: 1148 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); 1149 return true; 1150 default: 1151 return false; 1152 } 1153 } 1154 1155 /* 1156 * Sources can be configured with PQ offloading in which case the check 1157 * on the PQ state bits of MSIs is disabled 1158 */ 1159 static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno) 1160 { 1161 return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) && 1162 !xive_source_irq_is_lsi(xsrc, srcno); 1163 } 1164 1165 /* 1166 * Returns whether the event notification should be forwarded. 1167 */ 1168 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) 1169 { 1170 bool ret; 1171 1172 assert(srcno < xsrc->nr_irqs); 1173 1174 if (xive_source_esb_disabled(xsrc, srcno)) { 1175 return true; 1176 } 1177 1178 ret = xive_esb_trigger(&xsrc->status[srcno]); 1179 1180 if (xive_source_irq_is_lsi(xsrc, srcno) && 1181 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { 1182 qemu_log_mask(LOG_GUEST_ERROR, 1183 "XIVE: queued an event on LSI IRQ %d\n", srcno); 1184 } 1185 1186 return ret; 1187 } 1188 1189 /* 1190 * Returns whether the event notification should be forwarded. 1191 */ 1192 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) 1193 { 1194 bool ret; 1195 1196 assert(srcno < xsrc->nr_irqs); 1197 1198 if (xive_source_esb_disabled(xsrc, srcno)) { 1199 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno); 1200 return false; 1201 } 1202 1203 ret = xive_esb_eoi(&xsrc->status[srcno]); 1204 1205 /* 1206 * LSI sources do not set the Q bit but they can still be 1207 * asserted, in which case we should forward a new event 1208 * notification 1209 */ 1210 if (xive_source_irq_is_lsi(xsrc, srcno) && 1211 xive_source_is_asserted(xsrc, srcno)) { 1212 ret = xive_source_lsi_trigger(xsrc, srcno); 1213 } 1214 1215 return ret; 1216 } 1217 1218 /* 1219 * Forward the source event notification to the Router 1220 */ 1221 static void xive_source_notify(XiveSource *xsrc, int srcno) 1222 { 1223 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); 1224 bool pq_checked = !xive_source_esb_disabled(xsrc, srcno); 1225 1226 if (xnc->notify) { 1227 xnc->notify(xsrc->xive, srcno, pq_checked); 1228 } 1229 } 1230 1231 /* 1232 * In a two pages ESB MMIO setting, even page is the trigger page, odd 1233 * page is for management 1234 */ 1235 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 1236 { 1237 return !((addr >> shift) & 1); 1238 } 1239 1240 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) 1241 { 1242 return xive_source_esb_has_2page(xsrc) && 1243 addr_is_even(addr, xsrc->esb_shift - 1); 1244 } 1245 1246 /* 1247 * ESB MMIO loads 1248 * Trigger page Management/EOI page 1249 * 1250 * ESB MMIO setting 2 pages 1 or 2 pages 1251 * 1252 * 0x000 .. 0x3FF -1 EOI and return 0|1 1253 * 0x400 .. 0x7FF -1 EOI and return 0|1 1254 * 0x800 .. 0xBFF -1 return PQ 1255 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 1256 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 1257 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 1258 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 1259 */ 1260 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) 1261 { 1262 XiveSource *xsrc = XIVE_SOURCE(opaque); 1263 uint32_t offset = addr & 0xFFF; 1264 uint32_t srcno = addr >> xsrc->esb_shift; 1265 uint64_t ret = -1; 1266 1267 /* In a two pages ESB MMIO setting, trigger page should not be read */ 1268 if (xive_source_is_trigger_page(xsrc, addr)) { 1269 qemu_log_mask(LOG_GUEST_ERROR, 1270 "XIVE: invalid load on IRQ %d trigger page at " 1271 "0x%"HWADDR_PRIx"\n", srcno, addr); 1272 return -1; 1273 } 1274 1275 switch (offset) { 1276 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 1277 ret = xive_source_esb_eoi(xsrc, srcno); 1278 1279 /* Forward the source event notification for routing */ 1280 if (ret) { 1281 trace_xive_source_notify(srcno); 1282 xive_source_notify(xsrc, srcno); 1283 } 1284 break; 1285 1286 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 1287 ret = xive_source_esb_get(xsrc, srcno); 1288 break; 1289 1290 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1291 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1292 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1293 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1294 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 1295 break; 1296 default: 1297 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n", 1298 offset); 1299 } 1300 1301 trace_xive_source_esb_read(addr, srcno, ret); 1302 1303 return ret; 1304 } 1305 1306 /* 1307 * ESB MMIO stores 1308 * Trigger page Management/EOI page 1309 * 1310 * ESB MMIO setting 2 pages 1 or 2 pages 1311 * 1312 * 0x000 .. 0x3FF Trigger Trigger 1313 * 0x400 .. 0x7FF Trigger EOI 1314 * 0x800 .. 0xBFF Trigger undefined 1315 * 0xC00 .. 0xCFF Trigger PQ=00 1316 * 0xD00 .. 0xDFF Trigger PQ=01 1317 * 0xE00 .. 0xDFF Trigger PQ=10 1318 * 0xF00 .. 0xDFF Trigger PQ=11 1319 */ 1320 static void xive_source_esb_write(void *opaque, hwaddr addr, 1321 uint64_t value, unsigned size) 1322 { 1323 XiveSource *xsrc = XIVE_SOURCE(opaque); 1324 uint32_t offset = addr & 0xFFF; 1325 uint32_t srcno = addr >> xsrc->esb_shift; 1326 bool notify = false; 1327 1328 trace_xive_source_esb_write(addr, srcno, value); 1329 1330 /* In a two pages ESB MMIO setting, trigger page only triggers */ 1331 if (xive_source_is_trigger_page(xsrc, addr)) { 1332 notify = xive_source_esb_trigger(xsrc, srcno); 1333 goto out; 1334 } 1335 1336 switch (offset) { 1337 case 0 ... 0x3FF: 1338 notify = xive_source_esb_trigger(xsrc, srcno); 1339 break; 1340 1341 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 1342 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { 1343 qemu_log_mask(LOG_GUEST_ERROR, 1344 "XIVE: invalid Store EOI for IRQ %d\n", srcno); 1345 return; 1346 } 1347 1348 notify = xive_source_esb_eoi(xsrc, srcno); 1349 break; 1350 1351 /* 1352 * This is an internal offset used to inject triggers when the PQ 1353 * state bits are not controlled locally. Such as for LSIs when 1354 * under ABT mode. 1355 */ 1356 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: 1357 notify = true; 1358 break; 1359 1360 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1361 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1362 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1363 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1364 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 1365 break; 1366 1367 default: 1368 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n", 1369 offset); 1370 return; 1371 } 1372 1373 out: 1374 /* Forward the source event notification for routing */ 1375 if (notify) { 1376 xive_source_notify(xsrc, srcno); 1377 } else { 1378 trace_xive_source_blocked(srcno); 1379 } 1380 } 1381 1382 static const MemoryRegionOps xive_source_esb_ops = { 1383 .read = xive_source_esb_read, 1384 .write = xive_source_esb_write, 1385 .endianness = DEVICE_BIG_ENDIAN, 1386 .valid = { 1387 .min_access_size = 1, 1388 .max_access_size = 8, 1389 }, 1390 .impl = { 1391 .min_access_size = 1, 1392 .max_access_size = 8, 1393 }, 1394 }; 1395 1396 void xive_source_set_irq(void *opaque, int srcno, int val) 1397 { 1398 XiveSource *xsrc = XIVE_SOURCE(opaque); 1399 bool notify = false; 1400 1401 if (xive_source_irq_is_lsi(xsrc, srcno)) { 1402 if (val) { 1403 notify = xive_source_lsi_trigger(xsrc, srcno); 1404 } else { 1405 xive_source_set_asserted(xsrc, srcno, false); 1406 } 1407 } else { 1408 if (val) { 1409 notify = xive_source_esb_trigger(xsrc, srcno); 1410 } 1411 } 1412 1413 /* Forward the source event notification for routing */ 1414 if (notify) { 1415 xive_source_notify(xsrc, srcno); 1416 } 1417 } 1418 1419 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, GString *buf) 1420 { 1421 for (unsigned i = 0; i < xsrc->nr_irqs; i++) { 1422 uint8_t pq = xive_source_esb_get(xsrc, i); 1423 1424 if (pq == XIVE_ESB_OFF) { 1425 continue; 1426 } 1427 1428 g_string_append_printf(buf, " %08x %s %c%c%c\n", i + offset, 1429 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 1430 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1431 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1432 xive_source_is_asserted(xsrc, i) ? 'A' : ' '); 1433 } 1434 } 1435 1436 static void xive_source_reset(void *dev) 1437 { 1438 XiveSource *xsrc = XIVE_SOURCE(dev); 1439 1440 /* Do not clear the LSI bitmap */ 1441 1442 memset(xsrc->status, xsrc->reset_pq, xsrc->nr_irqs); 1443 } 1444 1445 static void xive_source_realize(DeviceState *dev, Error **errp) 1446 { 1447 XiveSource *xsrc = XIVE_SOURCE(dev); 1448 uint64_t esb_len = xive_source_esb_len(xsrc); 1449 1450 assert(xsrc->xive); 1451 1452 if (!xsrc->nr_irqs) { 1453 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1454 return; 1455 } 1456 1457 if (xsrc->esb_shift != XIVE_ESB_4K && 1458 xsrc->esb_shift != XIVE_ESB_4K_2PAGE && 1459 xsrc->esb_shift != XIVE_ESB_64K && 1460 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { 1461 error_setg(errp, "Invalid ESB shift setting"); 1462 return; 1463 } 1464 1465 xsrc->status = g_malloc0(xsrc->nr_irqs); 1466 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); 1467 1468 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len); 1469 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc), 1470 &xive_source_esb_ops, xsrc, "xive.esb-emulated", 1471 esb_len); 1472 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated); 1473 1474 qemu_register_reset(xive_source_reset, dev); 1475 } 1476 1477 static const VMStateDescription vmstate_xive_source = { 1478 .name = TYPE_XIVE_SOURCE, 1479 .version_id = 1, 1480 .minimum_version_id = 1, 1481 .fields = (const VMStateField[]) { 1482 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), 1483 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), 1484 VMSTATE_END_OF_LIST() 1485 }, 1486 }; 1487 1488 /* 1489 * The default XIVE interrupt source setting for the ESB MMIOs is two 1490 * 64k pages without Store EOI, to be in sync with KVM. 1491 */ 1492 static const Property xive_source_properties[] = { 1493 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), 1494 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), 1495 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), 1496 /* 1497 * By default, PQs are initialized to 0b01 (Q=1) which corresponds 1498 * to "ints off" 1499 */ 1500 DEFINE_PROP_UINT8("reset-pq", XiveSource, reset_pq, XIVE_ESB_OFF), 1501 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER, 1502 XiveNotifier *), 1503 }; 1504 1505 static void xive_source_class_init(ObjectClass *klass, const void *data) 1506 { 1507 DeviceClass *dc = DEVICE_CLASS(klass); 1508 1509 dc->desc = "XIVE Interrupt Source"; 1510 device_class_set_props(dc, xive_source_properties); 1511 dc->realize = xive_source_realize; 1512 dc->vmsd = &vmstate_xive_source; 1513 /* 1514 * Reason: part of XIVE interrupt controller, needs to be wired up, 1515 * e.g. by spapr_xive_instance_init(). 1516 */ 1517 dc->user_creatable = false; 1518 } 1519 1520 static const TypeInfo xive_source_info = { 1521 .name = TYPE_XIVE_SOURCE, 1522 .parent = TYPE_DEVICE, 1523 .instance_size = sizeof(XiveSource), 1524 .class_init = xive_source_class_init, 1525 }; 1526 1527 /* 1528 * XiveEND helpers 1529 */ 1530 1531 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, GString *buf) 1532 { 1533 uint64_t qaddr_base = xive_end_qaddr(end); 1534 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1535 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1536 uint32_t qentries = 1 << (qsize + 10); 1537 int i; 1538 1539 /* 1540 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 1541 */ 1542 g_string_append_printf(buf, " [ "); 1543 qindex = (qindex - (width - 1)) & (qentries - 1); 1544 for (i = 0; i < width; i++) { 1545 uint64_t qaddr = qaddr_base + (qindex << 2); 1546 uint32_t qdata = -1; 1547 1548 if (dma_memory_read(&address_space_memory, qaddr, 1549 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 1550 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 1551 HWADDR_PRIx "\n", qaddr); 1552 return; 1553 } 1554 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "", 1555 be32_to_cpu(qdata)); 1556 qindex = (qindex + 1) & (qentries - 1); 1557 } 1558 g_string_append_c(buf, ']'); 1559 } 1560 1561 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf) 1562 { 1563 uint64_t qaddr_base = xive_end_qaddr(end); 1564 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1565 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1566 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1567 uint32_t qentries = 1 << (qsize + 10); 1568 1569 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); 1570 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); 1571 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 1572 uint8_t pq; 1573 1574 if (!xive_end_is_valid(end)) { 1575 return; 1576 } 1577 1578 pq = xive_get_field32(END_W1_ESn, end->w1); 1579 1580 g_string_append_printf(buf, 1581 " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x", 1582 end_idx, 1583 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1584 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1585 xive_end_is_valid(end) ? 'v' : '-', 1586 xive_end_is_enqueue(end) ? 'q' : '-', 1587 xive_end_is_notify(end) ? 'n' : '-', 1588 xive_end_is_backlog(end) ? 'b' : '-', 1589 xive_end_is_escalate(end) ? 'e' : '-', 1590 xive_end_is_uncond_escalation(end) ? 'u' : '-', 1591 xive_end_is_silent_escalation(end) ? 's' : '-', 1592 xive_end_is_firmware(end) ? 'f' : '-', 1593 priority, nvt_blk, nvt_idx); 1594 1595 if (qaddr_base) { 1596 g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", 1597 qaddr_base, qindex, qentries, qgen); 1598 xive_end_queue_pic_print_info(end, 6, buf); 1599 } 1600 g_string_append_c(buf, '\n'); 1601 } 1602 1603 static void xive_end_enqueue(XiveEND *end, uint32_t data) 1604 { 1605 uint64_t qaddr_base = xive_end_qaddr(end); 1606 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1607 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1608 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1609 1610 uint64_t qaddr = qaddr_base + (qindex << 2); 1611 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 1612 uint32_t qentries = 1 << (qsize + 10); 1613 1614 if (dma_memory_write(&address_space_memory, qaddr, 1615 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 1616 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 1617 HWADDR_PRIx "\n", qaddr); 1618 return; 1619 } 1620 1621 qindex = (qindex + 1) & (qentries - 1); 1622 if (qindex == 0) { 1623 qgen ^= 1; 1624 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); 1625 } 1626 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); 1627 } 1628 1629 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf) 1630 { 1631 XiveEAS *eas = (XiveEAS *) &end->w4; 1632 uint8_t pq; 1633 1634 if (!xive_end_is_escalate(end)) { 1635 return; 1636 } 1637 1638 pq = xive_get_field32(END_W1_ESe, end->w1); 1639 1640 g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", 1641 end_idx, 1642 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1643 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1644 xive_eas_is_valid(eas) ? 'V' : ' ', 1645 xive_eas_is_masked(eas) ? 'M' : ' ', 1646 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), 1647 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), 1648 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); 1649 } 1650 1651 /* 1652 * XIVE Router (aka. Virtualization Controller or IVRE) 1653 */ 1654 1655 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1656 XiveEAS *eas) 1657 { 1658 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1659 1660 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 1661 } 1662 1663 static 1664 int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1665 uint8_t *pq) 1666 { 1667 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1668 1669 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); 1670 } 1671 1672 static 1673 int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1674 uint8_t *pq) 1675 { 1676 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1677 1678 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); 1679 } 1680 1681 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1682 XiveEND *end) 1683 { 1684 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1685 1686 return xrc->get_end(xrtr, end_blk, end_idx, end); 1687 } 1688 1689 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1690 XiveEND *end, uint8_t word_number) 1691 { 1692 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1693 1694 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 1695 } 1696 1697 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1698 XiveNVT *nvt) 1699 { 1700 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1701 1702 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); 1703 } 1704 1705 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1706 XiveNVT *nvt, uint8_t word_number) 1707 { 1708 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1709 1710 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); 1711 } 1712 1713 static int xive_router_get_block_id(XiveRouter *xrtr) 1714 { 1715 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1716 1717 return xrc->get_block_id(xrtr); 1718 } 1719 1720 static void xive_router_realize(DeviceState *dev, Error **errp) 1721 { 1722 XiveRouter *xrtr = XIVE_ROUTER(dev); 1723 1724 assert(xrtr->xfb); 1725 } 1726 1727 static void xive_router_end_notify_handler(XiveRouter *xrtr, XiveEAS *eas) 1728 { 1729 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1730 1731 return xrc->end_notify(xrtr, eas); 1732 } 1733 1734 /* 1735 * Encode the HW CAM line in the block group mode format : 1736 * 1737 * chip << 19 | 0000000 0 0001 thread (7Bit) 1738 */ 1739 static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) 1740 { 1741 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 1742 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 1743 uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr)); 1744 1745 return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); 1746 } 1747 1748 uint32_t xive_get_vpgroup_size(uint32_t nvp_index) 1749 { 1750 /* 1751 * Group size is a power of 2. The position of the first 0 1752 * (starting with the least significant bits) in the NVP index 1753 * gives the size of the group. 1754 */ 1755 int first_zero = cto32(nvp_index); 1756 if (first_zero >= 31) { 1757 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x", 1758 nvp_index); 1759 return 0; 1760 } 1761 1762 return 1U << (first_zero + 1); 1763 } 1764 1765 uint8_t xive_get_group_level(bool crowd, bool ignore, 1766 uint32_t nvp_blk, uint32_t nvp_index) 1767 { 1768 int first_zero; 1769 uint8_t level; 1770 1771 if (!ignore) { 1772 g_assert(!crowd); 1773 return 0; 1774 } 1775 1776 first_zero = cto32(nvp_index); 1777 if (first_zero >= 31) { 1778 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x", 1779 nvp_index); 1780 return 0; 1781 } 1782 1783 level = (first_zero + 1) & 0b1111; 1784 if (crowd) { 1785 uint32_t blk; 1786 1787 /* crowd level is bit position of first 0 from the right in nvp_blk */ 1788 first_zero = cto32(nvp_blk); 1789 if (first_zero >= 31) { 1790 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd block 0x%08x", 1791 nvp_blk); 1792 return 0; 1793 } 1794 blk = first_zero + 1; 1795 1796 /* 1797 * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported. 1798 * HW will encode level 4 as the value 3. See xive2_pgofnext(). 1799 */ 1800 switch (blk) { 1801 case 1: 1802 case 2: 1803 break; 1804 case 4: 1805 blk = 3; 1806 break; 1807 default: 1808 g_assert_not_reached(); 1809 } 1810 1811 /* Crowd level bits reside in upper 2 bits of the 6 bit group level */ 1812 level |= blk << 4; 1813 } 1814 return level; 1815 } 1816 1817 /* 1818 * The thread context register words are in big-endian format. 1819 */ 1820 int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, 1821 uint8_t format, 1822 uint8_t nvt_blk, uint32_t nvt_idx, 1823 bool cam_ignore, uint32_t logic_serv) 1824 { 1825 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); 1826 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 1827 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 1828 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 1829 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 1830 1831 /* 1832 * TODO (PowerNV): ignore mode. The low order bits of the NVT 1833 * identifier are ignored in the "CAM" match. 1834 */ 1835 1836 if (format == 0) { 1837 if (cam_ignore == true) { 1838 /* 1839 * F=0 & i=1: Logical server notification (bits ignored at 1840 * the end of the NVT identifier) 1841 */ 1842 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", 1843 nvt_blk, nvt_idx); 1844 return -1; 1845 } 1846 1847 /* F=0 & i=0: Specific NVT notification */ 1848 1849 /* PHYS ring */ 1850 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && 1851 cam == xive_tctx_hw_cam_line(xptr, tctx)) { 1852 return TM_QW3_HV_PHYS; 1853 } 1854 1855 /* HV POOL ring */ 1856 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && 1857 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { 1858 return TM_QW2_HV_POOL; 1859 } 1860 1861 /* OS ring */ 1862 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1863 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { 1864 return TM_QW1_OS; 1865 } 1866 } else { 1867 /* F=1 : User level Event-Based Branch (EBB) notification */ 1868 1869 /* USER ring */ 1870 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1871 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && 1872 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && 1873 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { 1874 return TM_QW0_USER; 1875 } 1876 } 1877 return -1; 1878 } 1879 1880 /* 1881 * This is our simple Xive Presenter Engine model. It is merged in the 1882 * Router as it does not require an extra object. 1883 */ 1884 bool xive_presenter_match(XiveFabric *xfb, uint8_t format, 1885 uint8_t nvt_blk, uint32_t nvt_idx, 1886 bool crowd, bool cam_ignore, uint8_t priority, 1887 uint32_t logic_serv, XiveTCTXMatch *match) 1888 { 1889 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); 1890 1891 memset(match, 0, sizeof(*match)); 1892 1893 /* 1894 * Ask the machine to scan the interrupt controllers for a match. 1895 * 1896 * For VP-specific notification, we expect at most one match and 1897 * one call to the presenters is all we need (abbreviated notify 1898 * sequence documented by the architecture). 1899 * 1900 * For VP-group notification, match_nvt() is the equivalent of the 1901 * "histogram" and "poll" commands sent to the power bus to the 1902 * presenters. 'count' could be more than one, but we always 1903 * select the first match for now. 'precluded' tells if (at least) 1904 * one thread matches but can't take the interrupt now because 1905 * it's running at a more favored priority. We return the 1906 * information to the router so that it can take appropriate 1907 * actions (backlog, escalation, broadcast, etc...) 1908 * 1909 * If we were to implement a better way of dispatching the 1910 * interrupt in case of multiple matches (instead of the first 1911 * match), we would need a heuristic to elect a thread (for 1912 * example, the hardware keeps track of an 'age' in the TIMA) and 1913 * a new command to the presenters (the equivalent of the "assign" 1914 * power bus command in the documented full notify sequence. 1915 */ 1916 return xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, 1917 priority, logic_serv, match); 1918 } 1919 1920 /* 1921 * Notification using the END ESe/ESn bit (Event State Buffer for 1922 * escalation and notification). Provide further coalescing in the 1923 * Router. 1924 */ 1925 static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk, 1926 uint32_t end_idx, XiveEND *end, 1927 uint32_t end_esmask) 1928 { 1929 uint8_t pq = xive_get_field32(end_esmask, end->w1); 1930 bool notify = xive_esb_trigger(&pq); 1931 1932 if (pq != xive_get_field32(end_esmask, end->w1)) { 1933 end->w1 = xive_set_field32(end_esmask, end->w1, pq); 1934 xive_router_write_end(xrtr, end_blk, end_idx, end, 1); 1935 } 1936 1937 /* ESe/n[Q]=1 : end of notification */ 1938 return notify; 1939 } 1940 1941 /* 1942 * An END trigger can come from an event trigger (IPI or HW) or from 1943 * another chip. We don't model the PowerBus but the END trigger 1944 * message has the same parameters than in the function below. 1945 */ 1946 void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) 1947 { 1948 XiveEND end; 1949 uint8_t priority; 1950 uint8_t format; 1951 uint8_t nvt_blk; 1952 uint32_t nvt_idx; 1953 XiveNVT nvt; 1954 XiveTCTXMatch match; 1955 1956 uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); 1957 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 1958 uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w); 1959 1960 /* END cache lookup */ 1961 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { 1962 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1963 end_idx); 1964 return; 1965 } 1966 1967 if (!xive_end_is_valid(&end)) { 1968 trace_xive_router_end_notify(end_blk, end_idx, end_data); 1969 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1970 end_blk, end_idx); 1971 return; 1972 } 1973 1974 if (xive_end_is_enqueue(&end)) { 1975 xive_end_enqueue(&end, end_data); 1976 /* Enqueuing event data modifies the EQ toggle and index */ 1977 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); 1978 } 1979 1980 /* 1981 * When the END is silent, we skip the notification part. 1982 */ 1983 if (xive_end_is_silent_escalation(&end)) { 1984 goto do_escalation; 1985 } 1986 1987 /* 1988 * The W7 format depends on the F bit in W6. It defines the type 1989 * of the notification : 1990 * 1991 * F=0 : single or multiple NVT notification 1992 * F=1 : User level Event-Based Branch (EBB) notification, no 1993 * priority 1994 */ 1995 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); 1996 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); 1997 1998 /* The END is masked */ 1999 if (format == 0 && priority == 0xff) { 2000 return; 2001 } 2002 2003 /* 2004 * Check the END ESn (Event State Buffer for notification) for 2005 * even further coalescing in the Router 2006 */ 2007 if (!xive_end_is_notify(&end)) { 2008 /* ESn[Q]=1 : end of notification */ 2009 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, 2010 &end, END_W1_ESn)) { 2011 return; 2012 } 2013 } 2014 2015 /* 2016 * Follows IVPE notification 2017 */ 2018 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6); 2019 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6); 2020 2021 /* NVT cache lookup */ 2022 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { 2023 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n", 2024 nvt_blk, nvt_idx); 2025 return; 2026 } 2027 2028 if (!xive_nvt_is_valid(&nvt)) { 2029 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n", 2030 nvt_blk, nvt_idx); 2031 return; 2032 } 2033 2034 /* TODO: Auto EOI. */ 2035 /* we don't support VP-group notification on P9, so precluded is not used */ 2036 if (xive_presenter_match(xrtr->xfb, format, nvt_blk, nvt_idx, 2037 false /* crowd */, 2038 xive_get_field32(END_W7_F0_IGNORE, end.w7), 2039 priority, 2040 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), 2041 &match)) { 2042 trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, 0); 2043 xive_tctx_pipr_update(match.tctx, match.ring, priority, 0); 2044 return; 2045 } 2046 2047 /* 2048 * If no matching NVT is dispatched on a HW thread : 2049 * - specific VP: update the NVT structure if backlog is activated 2050 * - logical server : forward request to IVPE (not supported) 2051 */ 2052 if (xive_end_is_backlog(&end)) { 2053 uint8_t ipb; 2054 2055 if (format == 1) { 2056 qemu_log_mask(LOG_GUEST_ERROR, 2057 "XIVE: END %x/%x invalid config: F1 & backlog\n", 2058 end_blk, end_idx); 2059 return; 2060 } 2061 /* 2062 * Record the IPB in the associated NVT structure for later 2063 * use. The presenter will resend the interrupt when the vCPU 2064 * is dispatched again on a HW thread. 2065 */ 2066 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | 2067 xive_priority_to_ipb(priority); 2068 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); 2069 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); 2070 2071 /* 2072 * On HW, follows a "Broadcast Backlog" to IVPEs 2073 */ 2074 } 2075 2076 do_escalation: 2077 /* 2078 * If activated, escalate notification using the ESe PQ bits and 2079 * the EAS in w4-5 2080 */ 2081 if (!xive_end_is_escalate(&end)) { 2082 return; 2083 } 2084 2085 /* 2086 * Check the END ESe (Event State Buffer for escalation) for even 2087 * further coalescing in the Router 2088 */ 2089 if (!xive_end_is_uncond_escalation(&end)) { 2090 /* ESe[Q]=1 : end of notification */ 2091 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, 2092 &end, END_W1_ESe)) { 2093 return; 2094 } 2095 } 2096 2097 trace_xive_router_end_escalate(end_blk, end_idx, 2098 (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4), 2099 (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4), 2100 (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5)); 2101 /* 2102 * The END trigger becomes an Escalation trigger 2103 */ 2104 xive_router_end_notify_handler(xrtr, (XiveEAS *) &end.w4); 2105 } 2106 2107 void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) 2108 { 2109 XiveRouter *xrtr = XIVE_ROUTER(xn); 2110 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); 2111 uint32_t eas_idx = XIVE_EAS_INDEX(lisn); 2112 XiveEAS eas; 2113 2114 /* EAS cache lookup */ 2115 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 2116 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 2117 return; 2118 } 2119 2120 if (!pq_checked) { 2121 bool notify; 2122 uint8_t pq; 2123 2124 /* PQ cache lookup */ 2125 if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { 2126 /* Set FIR */ 2127 g_assert_not_reached(); 2128 } 2129 2130 notify = xive_esb_trigger(&pq); 2131 2132 if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { 2133 /* Set FIR */ 2134 g_assert_not_reached(); 2135 } 2136 2137 if (!notify) { 2138 return; 2139 } 2140 } 2141 2142 if (!xive_eas_is_valid(&eas)) { 2143 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); 2144 return; 2145 } 2146 2147 if (xive_eas_is_masked(&eas)) { 2148 /* Notification completed */ 2149 return; 2150 } 2151 2152 /* 2153 * The event trigger becomes an END trigger 2154 */ 2155 xive_router_end_notify_handler(xrtr, &eas); 2156 } 2157 2158 static const Property xive_router_properties[] = { 2159 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb, 2160 TYPE_XIVE_FABRIC, XiveFabric *), 2161 }; 2162 2163 static void xive_router_class_init(ObjectClass *klass, const void *data) 2164 { 2165 DeviceClass *dc = DEVICE_CLASS(klass); 2166 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 2167 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 2168 2169 dc->desc = "XIVE Router Engine"; 2170 device_class_set_props(dc, xive_router_properties); 2171 /* Parent is SysBusDeviceClass. No need to call its realize hook */ 2172 dc->realize = xive_router_realize; 2173 xnc->notify = xive_router_notify; 2174 2175 /* By default, the router handles END triggers locally */ 2176 xrc->end_notify = xive_router_end_notify; 2177 } 2178 2179 static const TypeInfo xive_router_info = { 2180 .name = TYPE_XIVE_ROUTER, 2181 .parent = TYPE_SYS_BUS_DEVICE, 2182 .abstract = true, 2183 .instance_size = sizeof(XiveRouter), 2184 .class_size = sizeof(XiveRouterClass), 2185 .class_init = xive_router_class_init, 2186 .interfaces = (const InterfaceInfo[]) { 2187 { TYPE_XIVE_NOTIFIER }, 2188 { TYPE_XIVE_PRESENTER }, 2189 { } 2190 } 2191 }; 2192 2193 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, GString *buf) 2194 { 2195 if (!xive_eas_is_valid(eas)) { 2196 return; 2197 } 2198 2199 g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n", 2200 lisn, xive_eas_is_masked(eas) ? "M" : " ", 2201 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), 2202 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), 2203 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); 2204 } 2205 2206 /* 2207 * END ESB MMIO loads 2208 */ 2209 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) 2210 { 2211 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); 2212 uint32_t offset = addr & 0xFFF; 2213 uint8_t end_blk; 2214 uint32_t end_idx; 2215 XiveEND end; 2216 uint32_t end_esmask; 2217 uint8_t pq; 2218 uint64_t ret = -1; 2219 2220 /* 2221 * The block id should be deduced from the load address on the END 2222 * ESB MMIO but our model only supports a single block per XIVE chip. 2223 */ 2224 end_blk = xive_router_get_block_id(xsrc->xrtr); 2225 end_idx = addr >> (xsrc->esb_shift + 1); 2226 2227 trace_xive_end_source_read(end_blk, end_idx, addr); 2228 2229 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 2230 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 2231 end_idx); 2232 return -1; 2233 } 2234 2235 if (!xive_end_is_valid(&end)) { 2236 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 2237 end_blk, end_idx); 2238 return -1; 2239 } 2240 2241 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; 2242 pq = xive_get_field32(end_esmask, end.w1); 2243 2244 switch (offset) { 2245 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 2246 ret = xive_esb_eoi(&pq); 2247 2248 /* Forward the source event notification for routing ?? */ 2249 break; 2250 2251 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 2252 ret = pq; 2253 break; 2254 2255 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 2256 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 2257 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 2258 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 2259 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 2260 break; 2261 default: 2262 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 2263 offset); 2264 return -1; 2265 } 2266 2267 if (pq != xive_get_field32(end_esmask, end.w1)) { 2268 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 2269 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 2270 } 2271 2272 return ret; 2273 } 2274 2275 /* 2276 * END ESB MMIO stores are invalid 2277 */ 2278 static void xive_end_source_write(void *opaque, hwaddr addr, 2279 uint64_t value, unsigned size) 2280 { 2281 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" 2282 HWADDR_PRIx"\n", addr); 2283 } 2284 2285 static const MemoryRegionOps xive_end_source_ops = { 2286 .read = xive_end_source_read, 2287 .write = xive_end_source_write, 2288 .endianness = DEVICE_BIG_ENDIAN, 2289 .valid = { 2290 .min_access_size = 1, 2291 .max_access_size = 8, 2292 }, 2293 .impl = { 2294 .min_access_size = 1, 2295 .max_access_size = 8, 2296 }, 2297 }; 2298 2299 static void xive_end_source_realize(DeviceState *dev, Error **errp) 2300 { 2301 XiveENDSource *xsrc = XIVE_END_SOURCE(dev); 2302 2303 assert(xsrc->xrtr); 2304 2305 if (!xsrc->nr_ends) { 2306 error_setg(errp, "Number of interrupt needs to be greater than 0"); 2307 return; 2308 } 2309 2310 if (xsrc->esb_shift != XIVE_ESB_4K && 2311 xsrc->esb_shift != XIVE_ESB_64K) { 2312 error_setg(errp, "Invalid ESB shift setting"); 2313 return; 2314 } 2315 2316 /* 2317 * Each END is assigned an even/odd pair of MMIO pages, the even page 2318 * manages the ESn field while the odd page manages the ESe field. 2319 */ 2320 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 2321 &xive_end_source_ops, xsrc, "xive.end", 2322 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 2323 } 2324 2325 static const Property xive_end_source_properties[] = { 2326 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), 2327 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), 2328 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER, 2329 XiveRouter *), 2330 }; 2331 2332 static void xive_end_source_class_init(ObjectClass *klass, const void *data) 2333 { 2334 DeviceClass *dc = DEVICE_CLASS(klass); 2335 2336 dc->desc = "XIVE END Source"; 2337 device_class_set_props(dc, xive_end_source_properties); 2338 dc->realize = xive_end_source_realize; 2339 /* 2340 * Reason: part of XIVE interrupt controller, needs to be wired up, 2341 * e.g. by spapr_xive_instance_init(). 2342 */ 2343 dc->user_creatable = false; 2344 } 2345 2346 static const TypeInfo xive_end_source_info = { 2347 .name = TYPE_XIVE_END_SOURCE, 2348 .parent = TYPE_DEVICE, 2349 .instance_size = sizeof(XiveENDSource), 2350 .class_init = xive_end_source_class_init, 2351 }; 2352 2353 /* 2354 * XIVE Notifier 2355 */ 2356 static const TypeInfo xive_notifier_info = { 2357 .name = TYPE_XIVE_NOTIFIER, 2358 .parent = TYPE_INTERFACE, 2359 .class_size = sizeof(XiveNotifierClass), 2360 }; 2361 2362 /* 2363 * XIVE Presenter 2364 */ 2365 static const TypeInfo xive_presenter_info = { 2366 .name = TYPE_XIVE_PRESENTER, 2367 .parent = TYPE_INTERFACE, 2368 .class_size = sizeof(XivePresenterClass), 2369 }; 2370 2371 /* 2372 * XIVE Fabric 2373 */ 2374 static const TypeInfo xive_fabric_info = { 2375 .name = TYPE_XIVE_FABRIC, 2376 .parent = TYPE_INTERFACE, 2377 .class_size = sizeof(XiveFabricClass), 2378 }; 2379 2380 static void xive_register_types(void) 2381 { 2382 type_register_static(&xive_fabric_info); 2383 type_register_static(&xive_source_info); 2384 type_register_static(&xive_notifier_info); 2385 type_register_static(&xive_presenter_info); 2386 type_register_static(&xive_router_info); 2387 type_register_static(&xive_end_source_info); 2388 type_register_static(&xive_tctx_info); 2389 } 2390 2391 type_init(xive_register_types) 2392