1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "sysemu/reset.h" 18 #include "hw/qdev-properties.h" 19 #include "migration/vmstate.h" 20 #include "monitor/monitor.h" 21 #include "hw/irq.h" 22 #include "hw/ppc/xive.h" 23 #include "hw/ppc/xive_regs.h" 24 25 /* 26 * XIVE Thread Interrupt Management context 27 */ 28 29 /* 30 * Convert a priority number to an Interrupt Pending Buffer (IPB) 31 * register, which indicates a pending interrupt at the priority 32 * corresponding to the bit number 33 */ 34 static uint8_t priority_to_ipb(uint8_t priority) 35 { 36 return priority > XIVE_PRIORITY_MAX ? 37 0 : 1 << (XIVE_PRIORITY_MAX - priority); 38 } 39 40 /* 41 * Convert an Interrupt Pending Buffer (IPB) register to a Pending 42 * Interrupt Priority Register (PIPR), which contains the priority of 43 * the most favored pending notification. 44 */ 45 static uint8_t ipb_to_pipr(uint8_t ibp) 46 { 47 return ibp ? clz32((uint32_t)ibp << 24) : 0xff; 48 } 49 50 static void ipb_update(uint8_t *regs, uint8_t priority) 51 { 52 regs[TM_IPB] |= priority_to_ipb(priority); 53 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); 54 } 55 56 static uint8_t exception_mask(uint8_t ring) 57 { 58 switch (ring) { 59 case TM_QW1_OS: 60 return TM_QW1_NSR_EO; 61 case TM_QW3_HV_PHYS: 62 return TM_QW3_NSR_HE; 63 default: 64 g_assert_not_reached(); 65 } 66 } 67 68 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) 69 { 70 switch (ring) { 71 case TM_QW0_USER: 72 return 0; /* Not supported */ 73 case TM_QW1_OS: 74 return tctx->os_output; 75 case TM_QW2_HV_POOL: 76 case TM_QW3_HV_PHYS: 77 return tctx->hv_output; 78 default: 79 return 0; 80 } 81 } 82 83 static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) 84 { 85 uint8_t *regs = &tctx->regs[ring]; 86 uint8_t nsr = regs[TM_NSR]; 87 uint8_t mask = exception_mask(ring); 88 89 qemu_irq_lower(xive_tctx_output(tctx, ring)); 90 91 if (regs[TM_NSR] & mask) { 92 uint8_t cppr = regs[TM_PIPR]; 93 94 regs[TM_CPPR] = cppr; 95 96 /* Reset the pending buffer bit */ 97 regs[TM_IPB] &= ~priority_to_ipb(cppr); 98 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); 99 100 /* Drop Exception bit */ 101 regs[TM_NSR] &= ~mask; 102 } 103 104 return (nsr << 8) | regs[TM_CPPR]; 105 } 106 107 static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) 108 { 109 uint8_t *regs = &tctx->regs[ring]; 110 111 if (regs[TM_PIPR] < regs[TM_CPPR]) { 112 switch (ring) { 113 case TM_QW1_OS: 114 regs[TM_NSR] |= TM_QW1_NSR_EO; 115 break; 116 case TM_QW3_HV_PHYS: 117 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); 118 break; 119 default: 120 g_assert_not_reached(); 121 } 122 qemu_irq_raise(xive_tctx_output(tctx, ring)); 123 } 124 } 125 126 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) 127 { 128 if (cppr > XIVE_PRIORITY_MAX) { 129 cppr = 0xff; 130 } 131 132 tctx->regs[ring + TM_CPPR] = cppr; 133 134 /* CPPR has changed, check if we need to raise a pending exception */ 135 xive_tctx_notify(tctx, ring); 136 } 137 138 static inline uint32_t xive_tctx_word2(uint8_t *ring) 139 { 140 return *((uint32_t *) &ring[TM_WORD2]); 141 } 142 143 /* 144 * XIVE Thread Interrupt Management Area (TIMA) 145 */ 146 147 static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, 148 hwaddr offset, uint64_t value, unsigned size) 149 { 150 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); 151 } 152 153 static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, 154 hwaddr offset, unsigned size) 155 { 156 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); 157 } 158 159 static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, 160 hwaddr offset, unsigned size) 161 { 162 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 163 uint32_t qw2w2; 164 165 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); 166 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); 167 return qw2w2; 168 } 169 170 static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 171 uint64_t value, unsigned size) 172 { 173 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; 174 } 175 176 static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, 177 hwaddr offset, unsigned size) 178 { 179 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; 180 } 181 182 /* 183 * Define an access map for each page of the TIMA that we will use in 184 * the memory region ops to filter values when doing loads and stores 185 * of raw registers values 186 * 187 * Registers accessibility bits : 188 * 189 * 0x0 - no access 190 * 0x1 - write only 191 * 0x2 - read only 192 * 0x3 - read/write 193 */ 194 195 static const uint8_t xive_tm_hw_view[] = { 196 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 197 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ 198 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 199 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ 200 }; 201 202 static const uint8_t xive_tm_hv_view[] = { 203 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 204 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ 205 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 206 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ 207 }; 208 209 static const uint8_t xive_tm_os_view[] = { 210 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 211 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 212 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 213 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 214 }; 215 216 static const uint8_t xive_tm_user_view[] = { 217 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */ 218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 219 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 220 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 221 }; 222 223 /* 224 * Overall TIMA access map for the thread interrupt management context 225 * registers 226 */ 227 static const uint8_t *xive_tm_views[] = { 228 [XIVE_TM_HW_PAGE] = xive_tm_hw_view, 229 [XIVE_TM_HV_PAGE] = xive_tm_hv_view, 230 [XIVE_TM_OS_PAGE] = xive_tm_os_view, 231 [XIVE_TM_USER_PAGE] = xive_tm_user_view, 232 }; 233 234 /* 235 * Computes a register access mask for a given offset in the TIMA 236 */ 237 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) 238 { 239 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 240 uint8_t reg_offset = offset & 0x3F; 241 uint8_t reg_mask = write ? 0x1 : 0x2; 242 uint64_t mask = 0x0; 243 int i; 244 245 for (i = 0; i < size; i++) { 246 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { 247 mask |= (uint64_t) 0xff << (8 * (size - i - 1)); 248 } 249 } 250 251 return mask; 252 } 253 254 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, 255 unsigned size) 256 { 257 uint8_t ring_offset = offset & 0x30; 258 uint8_t reg_offset = offset & 0x3F; 259 uint64_t mask = xive_tm_mask(offset, size, true); 260 int i; 261 262 /* 263 * Only 4 or 8 bytes stores are allowed and the User ring is 264 * excluded 265 */ 266 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 267 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" 268 HWADDR_PRIx"\n", offset); 269 return; 270 } 271 272 /* 273 * Use the register offset for the raw values and filter out 274 * reserved values 275 */ 276 for (i = 0; i < size; i++) { 277 uint8_t byte_mask = (mask >> (8 * (size - i - 1))); 278 if (byte_mask) { 279 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & 280 byte_mask; 281 } 282 } 283 } 284 285 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) 286 { 287 uint8_t ring_offset = offset & 0x30; 288 uint8_t reg_offset = offset & 0x3F; 289 uint64_t mask = xive_tm_mask(offset, size, false); 290 uint64_t ret; 291 int i; 292 293 /* 294 * Only 4 or 8 bytes loads are allowed and the User ring is 295 * excluded 296 */ 297 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 298 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" 299 HWADDR_PRIx"\n", offset); 300 return -1; 301 } 302 303 /* Use the register offset for the raw values */ 304 ret = 0; 305 for (i = 0; i < size; i++) { 306 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); 307 } 308 309 /* filter out reserved values */ 310 return ret & mask; 311 } 312 313 /* 314 * The TM context is mapped twice within each page. Stores and loads 315 * to the first mapping below 2K write and read the specified values 316 * without modification. The second mapping above 2K performs specific 317 * state changes (side effects) in addition to setting/returning the 318 * interrupt management area context of the processor thread. 319 */ 320 static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx, 321 hwaddr offset, unsigned size) 322 { 323 return xive_tctx_accept(tctx, TM_QW1_OS); 324 } 325 326 static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, 327 hwaddr offset, uint64_t value, unsigned size) 328 { 329 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); 330 } 331 332 /* 333 * Adjust the IPB to allow a CPU to process event queues of other 334 * priorities during one physical interrupt cycle. 335 */ 336 static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, 337 hwaddr offset, uint64_t value, unsigned size) 338 { 339 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff); 340 xive_tctx_notify(tctx, TM_QW1_OS); 341 } 342 343 static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, 344 uint32_t *nvt_idx, bool *vo) 345 { 346 if (nvt_blk) { 347 *nvt_blk = xive_nvt_blk(cam); 348 } 349 if (nvt_idx) { 350 *nvt_idx = xive_nvt_idx(cam); 351 } 352 if (vo) { 353 *vo = !!(cam & TM_QW1W2_VO); 354 } 355 } 356 357 static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk, 358 uint32_t *nvt_idx, bool *vo) 359 { 360 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 361 uint32_t cam = be32_to_cpu(qw1w2); 362 363 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo); 364 return qw1w2; 365 } 366 367 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2) 368 { 369 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 370 } 371 372 static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 373 hwaddr offset, unsigned size) 374 { 375 uint32_t qw1w2; 376 uint32_t qw1w2_new; 377 uint8_t nvt_blk; 378 uint32_t nvt_idx; 379 bool vo; 380 381 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); 382 383 if (!vo) { 384 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n", 385 nvt_blk, nvt_idx); 386 } 387 388 /* Invalidate CAM line */ 389 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); 390 xive_tctx_set_os_cam(tctx, qw1w2_new); 391 return qw1w2; 392 } 393 394 /* 395 * Define a mapping of "special" operations depending on the TIMA page 396 * offset and the size of the operation. 397 */ 398 typedef struct XiveTmOp { 399 uint8_t page_offset; 400 uint32_t op_offset; 401 unsigned size; 402 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, 403 hwaddr offset, 404 uint64_t value, unsigned size); 405 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 406 unsigned size); 407 } XiveTmOp; 408 409 static const XiveTmOp xive_tm_operations[] = { 410 /* 411 * MMIOs below 2K : raw values and special operations without side 412 * effects 413 */ 414 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, 415 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, 416 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, 417 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, 418 419 /* MMIOs above 2K : special operations with side effects */ 420 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, 421 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, 422 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx }, 423 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx }, 424 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, 425 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, 426 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, 427 }; 428 429 static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) 430 { 431 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 432 uint32_t op_offset = offset & 0xFFF; 433 int i; 434 435 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { 436 const XiveTmOp *xto = &xive_tm_operations[i]; 437 438 /* Accesses done from a more privileged TIMA page is allowed */ 439 if (xto->page_offset >= page_offset && 440 xto->op_offset == op_offset && 441 xto->size == size && 442 ((write && xto->write_handler) || (!write && xto->read_handler))) { 443 return xto; 444 } 445 } 446 return NULL; 447 } 448 449 /* 450 * TIMA MMIO handlers 451 */ 452 void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 453 uint64_t value, unsigned size) 454 { 455 const XiveTmOp *xto; 456 457 /* 458 * TODO: check V bit in Q[0-3]W2 459 */ 460 461 /* 462 * First, check for special operations in the 2K region 463 */ 464 if (offset & 0x800) { 465 xto = xive_tm_find_op(offset, size, true); 466 if (!xto) { 467 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " 468 "@%"HWADDR_PRIx"\n", offset); 469 } else { 470 xto->write_handler(xptr, tctx, offset, value, size); 471 } 472 return; 473 } 474 475 /* 476 * Then, for special operations in the region below 2K. 477 */ 478 xto = xive_tm_find_op(offset, size, true); 479 if (xto) { 480 xto->write_handler(xptr, tctx, offset, value, size); 481 return; 482 } 483 484 /* 485 * Finish with raw access to the register values 486 */ 487 xive_tm_raw_write(tctx, offset, value, size); 488 } 489 490 uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, 491 unsigned size) 492 { 493 const XiveTmOp *xto; 494 495 /* 496 * TODO: check V bit in Q[0-3]W2 497 */ 498 499 /* 500 * First, check for special operations in the 2K region 501 */ 502 if (offset & 0x800) { 503 xto = xive_tm_find_op(offset, size, false); 504 if (!xto) { 505 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" 506 "@%"HWADDR_PRIx"\n", offset); 507 return -1; 508 } 509 return xto->read_handler(xptr, tctx, offset, size); 510 } 511 512 /* 513 * Then, for special operations in the region below 2K. 514 */ 515 xto = xive_tm_find_op(offset, size, false); 516 if (xto) { 517 return xto->read_handler(xptr, tctx, offset, size); 518 } 519 520 /* 521 * Finish with raw access to the register values 522 */ 523 return xive_tm_raw_read(tctx, offset, size); 524 } 525 526 static void xive_tm_write(void *opaque, hwaddr offset, 527 uint64_t value, unsigned size) 528 { 529 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); 530 531 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 532 } 533 534 static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size) 535 { 536 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); 537 538 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 539 } 540 541 const MemoryRegionOps xive_tm_ops = { 542 .read = xive_tm_read, 543 .write = xive_tm_write, 544 .endianness = DEVICE_BIG_ENDIAN, 545 .valid = { 546 .min_access_size = 1, 547 .max_access_size = 8, 548 }, 549 .impl = { 550 .min_access_size = 1, 551 .max_access_size = 8, 552 }, 553 }; 554 555 static char *xive_tctx_ring_print(uint8_t *ring) 556 { 557 uint32_t w2 = xive_tctx_word2(ring); 558 559 return g_strdup_printf("%02x %02x %02x %02x %02x " 560 "%02x %02x %02x %08x", 561 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], 562 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], 563 be32_to_cpu(w2)); 564 } 565 566 static const char * const xive_tctx_ring_names[] = { 567 "USER", "OS", "POOL", "PHYS", 568 }; 569 570 void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) 571 { 572 int cpu_index; 573 int i; 574 575 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs 576 * are hot plugged or unplugged. 577 */ 578 if (!tctx) { 579 return; 580 } 581 582 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; 583 584 if (kvm_irqchip_in_kernel()) { 585 Error *local_err = NULL; 586 587 kvmppc_xive_cpu_synchronize_state(tctx, &local_err); 588 if (local_err) { 589 error_report_err(local_err); 590 return; 591 } 592 } 593 594 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" 595 " W2\n", cpu_index); 596 597 for (i = 0; i < XIVE_TM_RING_COUNT; i++) { 598 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); 599 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index, 600 xive_tctx_ring_names[i], s); 601 g_free(s); 602 } 603 } 604 605 void xive_tctx_reset(XiveTCTX *tctx) 606 { 607 memset(tctx->regs, 0, sizeof(tctx->regs)); 608 609 /* Set some defaults */ 610 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; 611 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; 612 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; 613 614 /* 615 * Initialize PIPR to 0xFF to avoid phantom interrupts when the 616 * CPPR is first set. 617 */ 618 tctx->regs[TM_QW1_OS + TM_PIPR] = 619 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); 620 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = 621 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); 622 } 623 624 static void xive_tctx_realize(DeviceState *dev, Error **errp) 625 { 626 XiveTCTX *tctx = XIVE_TCTX(dev); 627 PowerPCCPU *cpu; 628 CPUPPCState *env; 629 Error *local_err = NULL; 630 631 assert(tctx->cs); 632 633 cpu = POWERPC_CPU(tctx->cs); 634 env = &cpu->env; 635 switch (PPC_INPUT(env)) { 636 case PPC_FLAGS_INPUT_POWER9: 637 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT]; 638 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT]; 639 break; 640 641 default: 642 error_setg(errp, "XIVE interrupt controller does not support " 643 "this CPU bus model"); 644 return; 645 } 646 647 /* Connect the presenter to the VCPU (required for CPU hotplug) */ 648 if (kvm_irqchip_in_kernel()) { 649 kvmppc_xive_cpu_connect(tctx, &local_err); 650 if (local_err) { 651 error_propagate(errp, local_err); 652 return; 653 } 654 } 655 } 656 657 static int vmstate_xive_tctx_pre_save(void *opaque) 658 { 659 Error *local_err = NULL; 660 661 if (kvm_irqchip_in_kernel()) { 662 kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err); 663 if (local_err) { 664 error_report_err(local_err); 665 return -1; 666 } 667 } 668 669 return 0; 670 } 671 672 static int vmstate_xive_tctx_post_load(void *opaque, int version_id) 673 { 674 Error *local_err = NULL; 675 676 if (kvm_irqchip_in_kernel()) { 677 /* 678 * Required for hotplugged CPU, for which the state comes 679 * after all states of the machine. 680 */ 681 kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err); 682 if (local_err) { 683 error_report_err(local_err); 684 return -1; 685 } 686 } 687 688 return 0; 689 } 690 691 static const VMStateDescription vmstate_xive_tctx = { 692 .name = TYPE_XIVE_TCTX, 693 .version_id = 1, 694 .minimum_version_id = 1, 695 .pre_save = vmstate_xive_tctx_pre_save, 696 .post_load = vmstate_xive_tctx_post_load, 697 .fields = (VMStateField[]) { 698 VMSTATE_BUFFER(regs, XiveTCTX), 699 VMSTATE_END_OF_LIST() 700 }, 701 }; 702 703 static Property xive_tctx_properties[] = { 704 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *), 705 DEFINE_PROP_END_OF_LIST(), 706 }; 707 708 static void xive_tctx_class_init(ObjectClass *klass, void *data) 709 { 710 DeviceClass *dc = DEVICE_CLASS(klass); 711 712 dc->desc = "XIVE Interrupt Thread Context"; 713 dc->realize = xive_tctx_realize; 714 dc->vmsd = &vmstate_xive_tctx; 715 dc->props = xive_tctx_properties; 716 /* 717 * Reason: part of XIVE interrupt controller, needs to be wired up 718 * by xive_tctx_create(). 719 */ 720 dc->user_creatable = false; 721 } 722 723 static const TypeInfo xive_tctx_info = { 724 .name = TYPE_XIVE_TCTX, 725 .parent = TYPE_DEVICE, 726 .instance_size = sizeof(XiveTCTX), 727 .class_init = xive_tctx_class_init, 728 }; 729 730 Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp) 731 { 732 Error *local_err = NULL; 733 Object *obj; 734 735 obj = object_new(TYPE_XIVE_TCTX); 736 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort); 737 object_unref(obj); 738 object_property_set_link(obj, cpu, "cpu", &error_abort); 739 object_property_set_bool(obj, true, "realized", &local_err); 740 if (local_err) { 741 goto error; 742 } 743 744 return obj; 745 746 error: 747 object_unparent(obj); 748 error_propagate(errp, local_err); 749 return NULL; 750 } 751 752 void xive_tctx_destroy(XiveTCTX *tctx) 753 { 754 Object *obj = OBJECT(tctx); 755 756 object_unparent(obj); 757 } 758 759 /* 760 * XIVE ESB helpers 761 */ 762 763 static uint8_t xive_esb_set(uint8_t *pq, uint8_t value) 764 { 765 uint8_t old_pq = *pq & 0x3; 766 767 *pq &= ~0x3; 768 *pq |= value & 0x3; 769 770 return old_pq; 771 } 772 773 static bool xive_esb_trigger(uint8_t *pq) 774 { 775 uint8_t old_pq = *pq & 0x3; 776 777 switch (old_pq) { 778 case XIVE_ESB_RESET: 779 xive_esb_set(pq, XIVE_ESB_PENDING); 780 return true; 781 case XIVE_ESB_PENDING: 782 case XIVE_ESB_QUEUED: 783 xive_esb_set(pq, XIVE_ESB_QUEUED); 784 return false; 785 case XIVE_ESB_OFF: 786 xive_esb_set(pq, XIVE_ESB_OFF); 787 return false; 788 default: 789 g_assert_not_reached(); 790 } 791 } 792 793 static bool xive_esb_eoi(uint8_t *pq) 794 { 795 uint8_t old_pq = *pq & 0x3; 796 797 switch (old_pq) { 798 case XIVE_ESB_RESET: 799 case XIVE_ESB_PENDING: 800 xive_esb_set(pq, XIVE_ESB_RESET); 801 return false; 802 case XIVE_ESB_QUEUED: 803 xive_esb_set(pq, XIVE_ESB_PENDING); 804 return true; 805 case XIVE_ESB_OFF: 806 xive_esb_set(pq, XIVE_ESB_OFF); 807 return false; 808 default: 809 g_assert_not_reached(); 810 } 811 } 812 813 /* 814 * XIVE Interrupt Source (or IVSE) 815 */ 816 817 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) 818 { 819 assert(srcno < xsrc->nr_irqs); 820 821 return xsrc->status[srcno] & 0x3; 822 } 823 824 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) 825 { 826 assert(srcno < xsrc->nr_irqs); 827 828 return xive_esb_set(&xsrc->status[srcno], pq); 829 } 830 831 /* 832 * Returns whether the event notification should be forwarded. 833 */ 834 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) 835 { 836 uint8_t old_pq = xive_source_esb_get(xsrc, srcno); 837 838 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; 839 840 switch (old_pq) { 841 case XIVE_ESB_RESET: 842 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); 843 return true; 844 default: 845 return false; 846 } 847 } 848 849 /* 850 * Returns whether the event notification should be forwarded. 851 */ 852 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) 853 { 854 bool ret; 855 856 assert(srcno < xsrc->nr_irqs); 857 858 ret = xive_esb_trigger(&xsrc->status[srcno]); 859 860 if (xive_source_irq_is_lsi(xsrc, srcno) && 861 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { 862 qemu_log_mask(LOG_GUEST_ERROR, 863 "XIVE: queued an event on LSI IRQ %d\n", srcno); 864 } 865 866 return ret; 867 } 868 869 /* 870 * Returns whether the event notification should be forwarded. 871 */ 872 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) 873 { 874 bool ret; 875 876 assert(srcno < xsrc->nr_irqs); 877 878 ret = xive_esb_eoi(&xsrc->status[srcno]); 879 880 /* 881 * LSI sources do not set the Q bit but they can still be 882 * asserted, in which case we should forward a new event 883 * notification 884 */ 885 if (xive_source_irq_is_lsi(xsrc, srcno) && 886 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { 887 ret = xive_source_lsi_trigger(xsrc, srcno); 888 } 889 890 return ret; 891 } 892 893 /* 894 * Forward the source event notification to the Router 895 */ 896 static void xive_source_notify(XiveSource *xsrc, int srcno) 897 { 898 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); 899 900 if (xnc->notify) { 901 xnc->notify(xsrc->xive, srcno); 902 } 903 } 904 905 /* 906 * In a two pages ESB MMIO setting, even page is the trigger page, odd 907 * page is for management 908 */ 909 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 910 { 911 return !((addr >> shift) & 1); 912 } 913 914 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) 915 { 916 return xive_source_esb_has_2page(xsrc) && 917 addr_is_even(addr, xsrc->esb_shift - 1); 918 } 919 920 /* 921 * ESB MMIO loads 922 * Trigger page Management/EOI page 923 * 924 * ESB MMIO setting 2 pages 1 or 2 pages 925 * 926 * 0x000 .. 0x3FF -1 EOI and return 0|1 927 * 0x400 .. 0x7FF -1 EOI and return 0|1 928 * 0x800 .. 0xBFF -1 return PQ 929 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 930 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 931 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 932 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 933 */ 934 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) 935 { 936 XiveSource *xsrc = XIVE_SOURCE(opaque); 937 uint32_t offset = addr & 0xFFF; 938 uint32_t srcno = addr >> xsrc->esb_shift; 939 uint64_t ret = -1; 940 941 /* In a two pages ESB MMIO setting, trigger page should not be read */ 942 if (xive_source_is_trigger_page(xsrc, addr)) { 943 qemu_log_mask(LOG_GUEST_ERROR, 944 "XIVE: invalid load on IRQ %d trigger page at " 945 "0x%"HWADDR_PRIx"\n", srcno, addr); 946 return -1; 947 } 948 949 switch (offset) { 950 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 951 ret = xive_source_esb_eoi(xsrc, srcno); 952 953 /* Forward the source event notification for routing */ 954 if (ret) { 955 xive_source_notify(xsrc, srcno); 956 } 957 break; 958 959 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 960 ret = xive_source_esb_get(xsrc, srcno); 961 break; 962 963 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 964 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 965 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 966 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 967 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 968 break; 969 default: 970 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n", 971 offset); 972 } 973 974 return ret; 975 } 976 977 /* 978 * ESB MMIO stores 979 * Trigger page Management/EOI page 980 * 981 * ESB MMIO setting 2 pages 1 or 2 pages 982 * 983 * 0x000 .. 0x3FF Trigger Trigger 984 * 0x400 .. 0x7FF Trigger EOI 985 * 0x800 .. 0xBFF Trigger undefined 986 * 0xC00 .. 0xCFF Trigger PQ=00 987 * 0xD00 .. 0xDFF Trigger PQ=01 988 * 0xE00 .. 0xDFF Trigger PQ=10 989 * 0xF00 .. 0xDFF Trigger PQ=11 990 */ 991 static void xive_source_esb_write(void *opaque, hwaddr addr, 992 uint64_t value, unsigned size) 993 { 994 XiveSource *xsrc = XIVE_SOURCE(opaque); 995 uint32_t offset = addr & 0xFFF; 996 uint32_t srcno = addr >> xsrc->esb_shift; 997 bool notify = false; 998 999 /* In a two pages ESB MMIO setting, trigger page only triggers */ 1000 if (xive_source_is_trigger_page(xsrc, addr)) { 1001 notify = xive_source_esb_trigger(xsrc, srcno); 1002 goto out; 1003 } 1004 1005 switch (offset) { 1006 case 0 ... 0x3FF: 1007 notify = xive_source_esb_trigger(xsrc, srcno); 1008 break; 1009 1010 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 1011 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { 1012 qemu_log_mask(LOG_GUEST_ERROR, 1013 "XIVE: invalid Store EOI for IRQ %d\n", srcno); 1014 return; 1015 } 1016 1017 notify = xive_source_esb_eoi(xsrc, srcno); 1018 break; 1019 1020 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1021 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1022 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1023 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1024 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 1025 break; 1026 1027 default: 1028 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n", 1029 offset); 1030 return; 1031 } 1032 1033 out: 1034 /* Forward the source event notification for routing */ 1035 if (notify) { 1036 xive_source_notify(xsrc, srcno); 1037 } 1038 } 1039 1040 static const MemoryRegionOps xive_source_esb_ops = { 1041 .read = xive_source_esb_read, 1042 .write = xive_source_esb_write, 1043 .endianness = DEVICE_BIG_ENDIAN, 1044 .valid = { 1045 .min_access_size = 8, 1046 .max_access_size = 8, 1047 }, 1048 .impl = { 1049 .min_access_size = 8, 1050 .max_access_size = 8, 1051 }, 1052 }; 1053 1054 void xive_source_set_irq(void *opaque, int srcno, int val) 1055 { 1056 XiveSource *xsrc = XIVE_SOURCE(opaque); 1057 bool notify = false; 1058 1059 if (xive_source_irq_is_lsi(xsrc, srcno)) { 1060 if (val) { 1061 notify = xive_source_lsi_trigger(xsrc, srcno); 1062 } else { 1063 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; 1064 } 1065 } else { 1066 if (val) { 1067 notify = xive_source_esb_trigger(xsrc, srcno); 1068 } 1069 } 1070 1071 /* Forward the source event notification for routing */ 1072 if (notify) { 1073 xive_source_notify(xsrc, srcno); 1074 } 1075 } 1076 1077 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) 1078 { 1079 int i; 1080 1081 for (i = 0; i < xsrc->nr_irqs; i++) { 1082 uint8_t pq = xive_source_esb_get(xsrc, i); 1083 1084 if (pq == XIVE_ESB_OFF) { 1085 continue; 1086 } 1087 1088 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset, 1089 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 1090 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1091 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1092 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' '); 1093 } 1094 } 1095 1096 static void xive_source_reset(void *dev) 1097 { 1098 XiveSource *xsrc = XIVE_SOURCE(dev); 1099 1100 /* Do not clear the LSI bitmap */ 1101 1102 /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ 1103 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); 1104 } 1105 1106 static void xive_source_realize(DeviceState *dev, Error **errp) 1107 { 1108 XiveSource *xsrc = XIVE_SOURCE(dev); 1109 1110 assert(xsrc->xive); 1111 1112 if (!xsrc->nr_irqs) { 1113 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1114 return; 1115 } 1116 1117 if (xsrc->esb_shift != XIVE_ESB_4K && 1118 xsrc->esb_shift != XIVE_ESB_4K_2PAGE && 1119 xsrc->esb_shift != XIVE_ESB_64K && 1120 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { 1121 error_setg(errp, "Invalid ESB shift setting"); 1122 return; 1123 } 1124 1125 xsrc->status = g_malloc0(xsrc->nr_irqs); 1126 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); 1127 1128 if (!kvm_irqchip_in_kernel()) { 1129 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 1130 &xive_source_esb_ops, xsrc, "xive.esb", 1131 (1ull << xsrc->esb_shift) * xsrc->nr_irqs); 1132 } 1133 1134 qemu_register_reset(xive_source_reset, dev); 1135 } 1136 1137 static const VMStateDescription vmstate_xive_source = { 1138 .name = TYPE_XIVE_SOURCE, 1139 .version_id = 1, 1140 .minimum_version_id = 1, 1141 .fields = (VMStateField[]) { 1142 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), 1143 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), 1144 VMSTATE_END_OF_LIST() 1145 }, 1146 }; 1147 1148 /* 1149 * The default XIVE interrupt source setting for the ESB MMIOs is two 1150 * 64k pages without Store EOI, to be in sync with KVM. 1151 */ 1152 static Property xive_source_properties[] = { 1153 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), 1154 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), 1155 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), 1156 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER, 1157 XiveNotifier *), 1158 DEFINE_PROP_END_OF_LIST(), 1159 }; 1160 1161 static void xive_source_class_init(ObjectClass *klass, void *data) 1162 { 1163 DeviceClass *dc = DEVICE_CLASS(klass); 1164 1165 dc->desc = "XIVE Interrupt Source"; 1166 dc->props = xive_source_properties; 1167 dc->realize = xive_source_realize; 1168 dc->vmsd = &vmstate_xive_source; 1169 /* 1170 * Reason: part of XIVE interrupt controller, needs to be wired up, 1171 * e.g. by spapr_xive_instance_init(). 1172 */ 1173 dc->user_creatable = false; 1174 } 1175 1176 static const TypeInfo xive_source_info = { 1177 .name = TYPE_XIVE_SOURCE, 1178 .parent = TYPE_DEVICE, 1179 .instance_size = sizeof(XiveSource), 1180 .class_init = xive_source_class_init, 1181 }; 1182 1183 /* 1184 * XiveEND helpers 1185 */ 1186 1187 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) 1188 { 1189 uint64_t qaddr_base = xive_end_qaddr(end); 1190 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1191 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1192 uint32_t qentries = 1 << (qsize + 10); 1193 int i; 1194 1195 /* 1196 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 1197 */ 1198 monitor_printf(mon, " [ "); 1199 qindex = (qindex - (width - 1)) & (qentries - 1); 1200 for (i = 0; i < width; i++) { 1201 uint64_t qaddr = qaddr_base + (qindex << 2); 1202 uint32_t qdata = -1; 1203 1204 if (dma_memory_read(&address_space_memory, qaddr, &qdata, 1205 sizeof(qdata))) { 1206 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 1207 HWADDR_PRIx "\n", qaddr); 1208 return; 1209 } 1210 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "", 1211 be32_to_cpu(qdata)); 1212 qindex = (qindex + 1) & (qentries - 1); 1213 } 1214 monitor_printf(mon, "]"); 1215 } 1216 1217 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) 1218 { 1219 uint64_t qaddr_base = xive_end_qaddr(end); 1220 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1221 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1222 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1223 uint32_t qentries = 1 << (qsize + 10); 1224 1225 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); 1226 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); 1227 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 1228 uint8_t pq; 1229 1230 if (!xive_end_is_valid(end)) { 1231 return; 1232 } 1233 1234 pq = xive_get_field32(END_W1_ESn, end->w1); 1235 1236 monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c prio:%d nvt:%02x/%04x", 1237 end_idx, 1238 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1239 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1240 xive_end_is_valid(end) ? 'v' : '-', 1241 xive_end_is_enqueue(end) ? 'q' : '-', 1242 xive_end_is_notify(end) ? 'n' : '-', 1243 xive_end_is_backlog(end) ? 'b' : '-', 1244 xive_end_is_escalate(end) ? 'e' : '-', 1245 xive_end_is_uncond_escalation(end) ? 'u' : '-', 1246 xive_end_is_silent_escalation(end) ? 's' : '-', 1247 priority, nvt_blk, nvt_idx); 1248 1249 if (qaddr_base) { 1250 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d", 1251 qaddr_base, qindex, qentries, qgen); 1252 xive_end_queue_pic_print_info(end, 6, mon); 1253 } 1254 monitor_printf(mon, "\n"); 1255 } 1256 1257 static void xive_end_enqueue(XiveEND *end, uint32_t data) 1258 { 1259 uint64_t qaddr_base = xive_end_qaddr(end); 1260 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1261 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1262 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1263 1264 uint64_t qaddr = qaddr_base + (qindex << 2); 1265 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 1266 uint32_t qentries = 1 << (qsize + 10); 1267 1268 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { 1269 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 1270 HWADDR_PRIx "\n", qaddr); 1271 return; 1272 } 1273 1274 qindex = (qindex + 1) & (qentries - 1); 1275 if (qindex == 0) { 1276 qgen ^= 1; 1277 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); 1278 } 1279 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); 1280 } 1281 1282 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, 1283 Monitor *mon) 1284 { 1285 XiveEAS *eas = (XiveEAS *) &end->w4; 1286 uint8_t pq; 1287 1288 if (!xive_end_is_escalate(end)) { 1289 return; 1290 } 1291 1292 pq = xive_get_field32(END_W1_ESe, end->w1); 1293 1294 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", 1295 end_idx, 1296 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1297 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1298 xive_eas_is_valid(eas) ? 'V' : ' ', 1299 xive_eas_is_masked(eas) ? 'M' : ' ', 1300 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), 1301 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), 1302 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); 1303 } 1304 1305 /* 1306 * XIVE Router (aka. Virtualization Controller or IVRE) 1307 */ 1308 1309 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1310 XiveEAS *eas) 1311 { 1312 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1313 1314 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 1315 } 1316 1317 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1318 XiveEND *end) 1319 { 1320 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1321 1322 return xrc->get_end(xrtr, end_blk, end_idx, end); 1323 } 1324 1325 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1326 XiveEND *end, uint8_t word_number) 1327 { 1328 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1329 1330 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 1331 } 1332 1333 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1334 XiveNVT *nvt) 1335 { 1336 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1337 1338 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); 1339 } 1340 1341 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1342 XiveNVT *nvt, uint8_t word_number) 1343 { 1344 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1345 1346 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); 1347 } 1348 1349 XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs) 1350 { 1351 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1352 1353 return xrc->get_tctx(xrtr, cs); 1354 } 1355 1356 /* 1357 * Encode the HW CAM line in the block group mode format : 1358 * 1359 * chip << 19 | 0000000 0 0001 thread (7Bit) 1360 */ 1361 static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx) 1362 { 1363 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 1364 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 1365 1366 return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f)); 1367 } 1368 1369 /* 1370 * The thread context register words are in big-endian format. 1371 */ 1372 int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, 1373 uint8_t format, 1374 uint8_t nvt_blk, uint32_t nvt_idx, 1375 bool cam_ignore, uint32_t logic_serv) 1376 { 1377 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); 1378 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 1379 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 1380 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 1381 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 1382 1383 /* 1384 * TODO (PowerNV): ignore mode. The low order bits of the NVT 1385 * identifier are ignored in the "CAM" match. 1386 */ 1387 1388 if (format == 0) { 1389 if (cam_ignore == true) { 1390 /* 1391 * F=0 & i=1: Logical server notification (bits ignored at 1392 * the end of the NVT identifier) 1393 */ 1394 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", 1395 nvt_blk, nvt_idx); 1396 return -1; 1397 } 1398 1399 /* F=0 & i=0: Specific NVT notification */ 1400 1401 /* PHYS ring */ 1402 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && 1403 cam == xive_tctx_hw_cam_line(tctx)) { 1404 return TM_QW3_HV_PHYS; 1405 } 1406 1407 /* HV POOL ring */ 1408 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && 1409 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { 1410 return TM_QW2_HV_POOL; 1411 } 1412 1413 /* OS ring */ 1414 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1415 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { 1416 return TM_QW1_OS; 1417 } 1418 } else { 1419 /* F=1 : User level Event-Based Branch (EBB) notification */ 1420 1421 /* USER ring */ 1422 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1423 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && 1424 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && 1425 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { 1426 return TM_QW0_USER; 1427 } 1428 } 1429 return -1; 1430 } 1431 1432 /* 1433 * This is our simple Xive Presenter Engine model. It is merged in the 1434 * Router as it does not require an extra object. 1435 * 1436 * It receives notification requests sent by the IVRE to find one 1437 * matching NVT (or more) dispatched on the processor threads. In case 1438 * of a single NVT notification, the process is abreviated and the 1439 * thread is signaled if a match is found. In case of a logical server 1440 * notification (bits ignored at the end of the NVT identifier), the 1441 * IVPE and IVRE select a winning thread using different filters. This 1442 * involves 2 or 3 exchanges on the PowerBus that the model does not 1443 * support. 1444 * 1445 * The parameters represent what is sent on the PowerBus 1446 */ 1447 static bool xive_presenter_notify(uint8_t format, 1448 uint8_t nvt_blk, uint32_t nvt_idx, 1449 bool cam_ignore, uint8_t priority, 1450 uint32_t logic_serv) 1451 { 1452 XiveFabric *xfb = XIVE_FABRIC(qdev_get_machine()); 1453 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); 1454 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; 1455 int count; 1456 1457 /* 1458 * Ask the machine to scan the interrupt controllers for a match 1459 */ 1460 count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, 1461 priority, logic_serv, &match); 1462 if (count < 0) { 1463 return false; 1464 } 1465 1466 /* handle CPU exception delivery */ 1467 if (count) { 1468 ipb_update(&match.tctx->regs[match.ring], priority); 1469 xive_tctx_notify(match.tctx, match.ring); 1470 } 1471 1472 return !!count; 1473 } 1474 1475 /* 1476 * Notification using the END ESe/ESn bit (Event State Buffer for 1477 * escalation and notification). Profide futher coalescing in the 1478 * Router. 1479 */ 1480 static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk, 1481 uint32_t end_idx, XiveEND *end, 1482 uint32_t end_esmask) 1483 { 1484 uint8_t pq = xive_get_field32(end_esmask, end->w1); 1485 bool notify = xive_esb_trigger(&pq); 1486 1487 if (pq != xive_get_field32(end_esmask, end->w1)) { 1488 end->w1 = xive_set_field32(end_esmask, end->w1, pq); 1489 xive_router_write_end(xrtr, end_blk, end_idx, end, 1); 1490 } 1491 1492 /* ESe/n[Q]=1 : end of notification */ 1493 return notify; 1494 } 1495 1496 /* 1497 * An END trigger can come from an event trigger (IPI or HW) or from 1498 * another chip. We don't model the PowerBus but the END trigger 1499 * message has the same parameters than in the function below. 1500 */ 1501 static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, 1502 uint32_t end_idx, uint32_t end_data) 1503 { 1504 XiveEND end; 1505 uint8_t priority; 1506 uint8_t format; 1507 uint8_t nvt_blk; 1508 uint32_t nvt_idx; 1509 XiveNVT nvt; 1510 bool found; 1511 1512 /* END cache lookup */ 1513 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { 1514 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1515 end_idx); 1516 return; 1517 } 1518 1519 if (!xive_end_is_valid(&end)) { 1520 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1521 end_blk, end_idx); 1522 return; 1523 } 1524 1525 if (xive_end_is_enqueue(&end)) { 1526 xive_end_enqueue(&end, end_data); 1527 /* Enqueuing event data modifies the EQ toggle and index */ 1528 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); 1529 } 1530 1531 /* 1532 * When the END is silent, we skip the notification part. 1533 */ 1534 if (xive_end_is_silent_escalation(&end)) { 1535 goto do_escalation; 1536 } 1537 1538 /* 1539 * The W7 format depends on the F bit in W6. It defines the type 1540 * of the notification : 1541 * 1542 * F=0 : single or multiple NVT notification 1543 * F=1 : User level Event-Based Branch (EBB) notification, no 1544 * priority 1545 */ 1546 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); 1547 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); 1548 1549 /* The END is masked */ 1550 if (format == 0 && priority == 0xff) { 1551 return; 1552 } 1553 1554 /* 1555 * Check the END ESn (Event State Buffer for notification) for 1556 * even futher coalescing in the Router 1557 */ 1558 if (!xive_end_is_notify(&end)) { 1559 /* ESn[Q]=1 : end of notification */ 1560 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, 1561 &end, END_W1_ESn)) { 1562 return; 1563 } 1564 } 1565 1566 /* 1567 * Follows IVPE notification 1568 */ 1569 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6); 1570 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6); 1571 1572 /* NVT cache lookup */ 1573 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { 1574 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n", 1575 nvt_blk, nvt_idx); 1576 return; 1577 } 1578 1579 if (!xive_nvt_is_valid(&nvt)) { 1580 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n", 1581 nvt_blk, nvt_idx); 1582 return; 1583 } 1584 1585 found = xive_presenter_notify(format, nvt_blk, nvt_idx, 1586 xive_get_field32(END_W7_F0_IGNORE, end.w7), 1587 priority, 1588 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); 1589 1590 /* TODO: Auto EOI. */ 1591 1592 if (found) { 1593 return; 1594 } 1595 1596 /* 1597 * If no matching NVT is dispatched on a HW thread : 1598 * - specific VP: update the NVT structure if backlog is activated 1599 * - logical server : forward request to IVPE (not supported) 1600 */ 1601 if (xive_end_is_backlog(&end)) { 1602 uint8_t ipb; 1603 1604 if (format == 1) { 1605 qemu_log_mask(LOG_GUEST_ERROR, 1606 "XIVE: END %x/%x invalid config: F1 & backlog\n", 1607 end_blk, end_idx); 1608 return; 1609 } 1610 /* 1611 * Record the IPB in the associated NVT structure for later 1612 * use. The presenter will resend the interrupt when the vCPU 1613 * is dispatched again on a HW thread. 1614 */ 1615 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | priority_to_ipb(priority); 1616 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); 1617 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); 1618 1619 /* 1620 * On HW, follows a "Broadcast Backlog" to IVPEs 1621 */ 1622 } 1623 1624 do_escalation: 1625 /* 1626 * If activated, escalate notification using the ESe PQ bits and 1627 * the EAS in w4-5 1628 */ 1629 if (!xive_end_is_escalate(&end)) { 1630 return; 1631 } 1632 1633 /* 1634 * Check the END ESe (Event State Buffer for escalation) for even 1635 * futher coalescing in the Router 1636 */ 1637 if (!xive_end_is_uncond_escalation(&end)) { 1638 /* ESe[Q]=1 : end of notification */ 1639 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, 1640 &end, END_W1_ESe)) { 1641 return; 1642 } 1643 } 1644 1645 /* 1646 * The END trigger becomes an Escalation trigger 1647 */ 1648 xive_router_end_notify(xrtr, 1649 xive_get_field32(END_W4_ESC_END_BLOCK, end.w4), 1650 xive_get_field32(END_W4_ESC_END_INDEX, end.w4), 1651 xive_get_field32(END_W5_ESC_END_DATA, end.w5)); 1652 } 1653 1654 void xive_router_notify(XiveNotifier *xn, uint32_t lisn) 1655 { 1656 XiveRouter *xrtr = XIVE_ROUTER(xn); 1657 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); 1658 uint32_t eas_idx = XIVE_EAS_INDEX(lisn); 1659 XiveEAS eas; 1660 1661 /* EAS cache lookup */ 1662 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 1663 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 1664 return; 1665 } 1666 1667 /* 1668 * The IVRE checks the State Bit Cache at this point. We skip the 1669 * SBC lookup because the state bits of the sources are modeled 1670 * internally in QEMU. 1671 */ 1672 1673 if (!xive_eas_is_valid(&eas)) { 1674 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); 1675 return; 1676 } 1677 1678 if (xive_eas_is_masked(&eas)) { 1679 /* Notification completed */ 1680 return; 1681 } 1682 1683 /* 1684 * The event trigger becomes an END trigger 1685 */ 1686 xive_router_end_notify(xrtr, 1687 xive_get_field64(EAS_END_BLOCK, eas.w), 1688 xive_get_field64(EAS_END_INDEX, eas.w), 1689 xive_get_field64(EAS_END_DATA, eas.w)); 1690 } 1691 1692 static void xive_router_class_init(ObjectClass *klass, void *data) 1693 { 1694 DeviceClass *dc = DEVICE_CLASS(klass); 1695 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1696 1697 dc->desc = "XIVE Router Engine"; 1698 xnc->notify = xive_router_notify; 1699 } 1700 1701 static const TypeInfo xive_router_info = { 1702 .name = TYPE_XIVE_ROUTER, 1703 .parent = TYPE_SYS_BUS_DEVICE, 1704 .abstract = true, 1705 .class_size = sizeof(XiveRouterClass), 1706 .class_init = xive_router_class_init, 1707 .interfaces = (InterfaceInfo[]) { 1708 { TYPE_XIVE_NOTIFIER }, 1709 { TYPE_XIVE_PRESENTER }, 1710 { } 1711 } 1712 }; 1713 1714 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) 1715 { 1716 if (!xive_eas_is_valid(eas)) { 1717 return; 1718 } 1719 1720 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n", 1721 lisn, xive_eas_is_masked(eas) ? "M" : " ", 1722 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), 1723 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), 1724 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); 1725 } 1726 1727 /* 1728 * END ESB MMIO loads 1729 */ 1730 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) 1731 { 1732 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); 1733 uint32_t offset = addr & 0xFFF; 1734 uint8_t end_blk; 1735 uint32_t end_idx; 1736 XiveEND end; 1737 uint32_t end_esmask; 1738 uint8_t pq; 1739 uint64_t ret = -1; 1740 1741 end_blk = xsrc->block_id; 1742 end_idx = addr >> (xsrc->esb_shift + 1); 1743 1744 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 1745 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1746 end_idx); 1747 return -1; 1748 } 1749 1750 if (!xive_end_is_valid(&end)) { 1751 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1752 end_blk, end_idx); 1753 return -1; 1754 } 1755 1756 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; 1757 pq = xive_get_field32(end_esmask, end.w1); 1758 1759 switch (offset) { 1760 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 1761 ret = xive_esb_eoi(&pq); 1762 1763 /* Forward the source event notification for routing ?? */ 1764 break; 1765 1766 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 1767 ret = pq; 1768 break; 1769 1770 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1771 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1772 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1773 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1774 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 1775 break; 1776 default: 1777 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 1778 offset); 1779 return -1; 1780 } 1781 1782 if (pq != xive_get_field32(end_esmask, end.w1)) { 1783 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 1784 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 1785 } 1786 1787 return ret; 1788 } 1789 1790 /* 1791 * END ESB MMIO stores are invalid 1792 */ 1793 static void xive_end_source_write(void *opaque, hwaddr addr, 1794 uint64_t value, unsigned size) 1795 { 1796 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" 1797 HWADDR_PRIx"\n", addr); 1798 } 1799 1800 static const MemoryRegionOps xive_end_source_ops = { 1801 .read = xive_end_source_read, 1802 .write = xive_end_source_write, 1803 .endianness = DEVICE_BIG_ENDIAN, 1804 .valid = { 1805 .min_access_size = 8, 1806 .max_access_size = 8, 1807 }, 1808 .impl = { 1809 .min_access_size = 8, 1810 .max_access_size = 8, 1811 }, 1812 }; 1813 1814 static void xive_end_source_realize(DeviceState *dev, Error **errp) 1815 { 1816 XiveENDSource *xsrc = XIVE_END_SOURCE(dev); 1817 1818 assert(xsrc->xrtr); 1819 1820 if (!xsrc->nr_ends) { 1821 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1822 return; 1823 } 1824 1825 if (xsrc->esb_shift != XIVE_ESB_4K && 1826 xsrc->esb_shift != XIVE_ESB_64K) { 1827 error_setg(errp, "Invalid ESB shift setting"); 1828 return; 1829 } 1830 1831 /* 1832 * Each END is assigned an even/odd pair of MMIO pages, the even page 1833 * manages the ESn field while the odd page manages the ESe field. 1834 */ 1835 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 1836 &xive_end_source_ops, xsrc, "xive.end", 1837 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 1838 } 1839 1840 static Property xive_end_source_properties[] = { 1841 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0), 1842 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), 1843 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), 1844 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER, 1845 XiveRouter *), 1846 DEFINE_PROP_END_OF_LIST(), 1847 }; 1848 1849 static void xive_end_source_class_init(ObjectClass *klass, void *data) 1850 { 1851 DeviceClass *dc = DEVICE_CLASS(klass); 1852 1853 dc->desc = "XIVE END Source"; 1854 dc->props = xive_end_source_properties; 1855 dc->realize = xive_end_source_realize; 1856 /* 1857 * Reason: part of XIVE interrupt controller, needs to be wired up, 1858 * e.g. by spapr_xive_instance_init(). 1859 */ 1860 dc->user_creatable = false; 1861 } 1862 1863 static const TypeInfo xive_end_source_info = { 1864 .name = TYPE_XIVE_END_SOURCE, 1865 .parent = TYPE_DEVICE, 1866 .instance_size = sizeof(XiveENDSource), 1867 .class_init = xive_end_source_class_init, 1868 }; 1869 1870 /* 1871 * XIVE Notifier 1872 */ 1873 static const TypeInfo xive_notifier_info = { 1874 .name = TYPE_XIVE_NOTIFIER, 1875 .parent = TYPE_INTERFACE, 1876 .class_size = sizeof(XiveNotifierClass), 1877 }; 1878 1879 /* 1880 * XIVE Presenter 1881 */ 1882 static const TypeInfo xive_presenter_info = { 1883 .name = TYPE_XIVE_PRESENTER, 1884 .parent = TYPE_INTERFACE, 1885 .class_size = sizeof(XivePresenterClass), 1886 }; 1887 1888 /* 1889 * XIVE Fabric 1890 */ 1891 static const TypeInfo xive_fabric_info = { 1892 .name = TYPE_XIVE_FABRIC, 1893 .parent = TYPE_INTERFACE, 1894 .class_size = sizeof(XiveFabricClass), 1895 }; 1896 1897 static void xive_register_types(void) 1898 { 1899 type_register_static(&xive_fabric_info); 1900 type_register_static(&xive_source_info); 1901 type_register_static(&xive_notifier_info); 1902 type_register_static(&xive_presenter_info); 1903 type_register_static(&xive_router_info); 1904 type_register_static(&xive_end_source_info); 1905 type_register_static(&xive_tctx_info); 1906 } 1907 1908 type_init(xive_register_types) 1909