1 /* 2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10) 3 * 4 * Copyright (c) 2019-2022, IBM Corporation.. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "hw/qdev-properties.h" 18 #include "hw/ppc/xive.h" 19 #include "hw/ppc/xive2.h" 20 #include "hw/ppc/xive2_regs.h" 21 22 uint32_t xive2_router_get_config(Xive2Router *xrtr) 23 { 24 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 25 26 return xrc->get_config(xrtr); 27 } 28 29 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) 30 { 31 if (!xive2_eas_is_valid(eas)) { 32 return; 33 } 34 35 g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n", 36 lisn, xive2_eas_is_masked(eas) ? "M" : " ", 37 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 38 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 39 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 40 } 41 42 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf) 43 { 44 uint64_t qaddr_base = xive2_end_qaddr(end); 45 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 46 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 47 uint32_t qentries = 1 << (qsize + 10); 48 int i; 49 50 /* 51 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 52 */ 53 g_string_append_printf(buf, " [ "); 54 qindex = (qindex - (width - 1)) & (qentries - 1); 55 for (i = 0; i < width; i++) { 56 uint64_t qaddr = qaddr_base + (qindex << 2); 57 uint32_t qdata = -1; 58 59 if (dma_memory_read(&address_space_memory, qaddr, &qdata, 60 sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 61 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 62 HWADDR_PRIx "\n", qaddr); 63 return; 64 } 65 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "", 66 be32_to_cpu(qdata)); 67 qindex = (qindex + 1) & (qentries - 1); 68 } 69 g_string_append_printf(buf, "]"); 70 } 71 72 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf) 73 { 74 uint64_t qaddr_base = xive2_end_qaddr(end); 75 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 76 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 77 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 78 uint32_t qentries = 1 << (qsize + 10); 79 80 uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); 81 uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); 82 uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7); 83 uint8_t pq; 84 85 if (!xive2_end_is_valid(end)) { 86 return; 87 } 88 89 pq = xive_get_field32(END2_W1_ESn, end->w1); 90 91 g_string_append_printf(buf, 92 " %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c " 93 "prio:%d nvp:%02x/%04x", 94 end_idx, 95 pq & XIVE_ESB_VAL_P ? 'P' : '-', 96 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 97 xive2_end_is_valid(end) ? 'v' : '-', 98 xive2_end_is_enqueue(end) ? 'q' : '-', 99 xive2_end_is_notify(end) ? 'n' : '-', 100 xive2_end_is_backlog(end) ? 'b' : '-', 101 xive2_end_is_precluded_escalation(end) ? 'p' : '-', 102 xive2_end_is_escalate(end) ? 'e' : '-', 103 xive2_end_is_escalate_end(end) ? 'N' : '-', 104 xive2_end_is_uncond_escalation(end) ? 'u' : '-', 105 xive2_end_is_silent_escalation(end) ? 's' : '-', 106 xive2_end_is_firmware1(end) ? 'f' : '-', 107 xive2_end_is_firmware2(end) ? 'F' : '-', 108 xive2_end_is_ignore(end) ? 'i' : '-', 109 xive2_end_is_crowd(end) ? 'c' : '-', 110 priority, nvp_blk, nvp_idx); 111 112 if (qaddr_base) { 113 g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", 114 qaddr_base, qindex, qentries, qgen); 115 xive2_end_queue_pic_print_info(end, 6, buf); 116 } 117 g_string_append_c(buf, '\n'); 118 } 119 120 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx, 121 GString *buf) 122 { 123 Xive2Eas *eas = (Xive2Eas *) &end->w4; 124 uint8_t pq; 125 126 if (!xive2_end_is_escalate(end)) { 127 return; 128 } 129 130 pq = xive_get_field32(END2_W1_ESe, end->w1); 131 132 g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", 133 end_idx, 134 pq & XIVE_ESB_VAL_P ? 'P' : '-', 135 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 136 xive2_eas_is_valid(eas) ? 'v' : ' ', 137 xive2_eas_is_masked(eas) ? 'M' : ' ', 138 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 139 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 140 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 141 } 142 143 void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) 144 { 145 uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); 146 uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); 147 148 if (!xive2_nvp_is_valid(nvp)) { 149 return; 150 } 151 152 g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x", 153 nvp_idx, eq_blk, eq_idx, 154 xive_get_field32(NVP2_W2_IPB, nvp->w2)); 155 /* 156 * When the NVP is HW controlled, more fields are updated 157 */ 158 if (xive2_nvp_is_hw(nvp)) { 159 g_string_append_printf(buf, " CPPR:%02x", 160 xive_get_field32(NVP2_W2_CPPR, nvp->w2)); 161 if (xive2_nvp_is_co(nvp)) { 162 g_string_append_printf(buf, " CO:%04x", 163 xive_get_field32(NVP2_W1_CO_THRID, nvp->w1)); 164 } 165 } 166 g_string_append_c(buf, '\n'); 167 } 168 169 static void xive2_end_enqueue(Xive2End *end, uint32_t data) 170 { 171 uint64_t qaddr_base = xive2_end_qaddr(end); 172 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 173 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 174 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 175 176 uint64_t qaddr = qaddr_base + (qindex << 2); 177 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 178 uint32_t qentries = 1 << (qsize + 10); 179 180 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), 181 MEMTXATTRS_UNSPECIFIED)) { 182 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 183 HWADDR_PRIx "\n", qaddr); 184 return; 185 } 186 187 qindex = (qindex + 1) & (qentries - 1); 188 if (qindex == 0) { 189 qgen ^= 1; 190 end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); 191 192 /* TODO(PowerNV): reset GF bit on a cache watch operation */ 193 end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen); 194 } 195 end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); 196 } 197 198 /* 199 * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode 200 * 201 * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit 202 * 203 * - if a context is enabled with the H bit set, the VP context 204 * information is retrieved from the NVP structure (“check out”) 205 * and stored back on a context pull (“check in”), the SW receives 206 * the same context pull information as on P9 207 * 208 * - the H bit cannot be changed while the V bit is set, i.e. a 209 * context cannot be set up in the TIMA and then be “pushed” into 210 * the NVP by changing the H bit while the context is enabled 211 */ 212 213 static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 214 uint8_t nvp_blk, uint32_t nvp_idx) 215 { 216 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 217 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 218 Xive2Nvp nvp; 219 uint8_t *regs = &tctx->regs[TM_QW1_OS]; 220 221 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 222 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 223 nvp_blk, nvp_idx); 224 return; 225 } 226 227 if (!xive2_nvp_is_valid(&nvp)) { 228 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 229 nvp_blk, nvp_idx); 230 return; 231 } 232 233 if (!xive2_nvp_is_hw(&nvp)) { 234 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 235 nvp_blk, nvp_idx); 236 return; 237 } 238 239 if (!xive2_nvp_is_co(&nvp)) { 240 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n", 241 nvp_blk, nvp_idx); 242 return; 243 } 244 245 if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) && 246 xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) { 247 qemu_log_mask(LOG_GUEST_ERROR, 248 "XIVE: NVP %x/%x invalid checkout Thread %x\n", 249 nvp_blk, nvp_idx, pir); 250 return; 251 } 252 253 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); 254 nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); 255 nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); 256 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 257 258 nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0); 259 /* NVP2_W1_CO_THRID_VALID only set once */ 260 nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF); 261 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); 262 } 263 264 static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk, 265 uint32_t *nvp_idx, bool *vo, bool *ho) 266 { 267 *nvp_blk = xive2_nvp_blk(cam); 268 *nvp_idx = xive2_nvp_idx(cam); 269 *vo = !!(cam & TM2_QW1W2_VO); 270 *ho = !!(cam & TM2_QW1W2_HO); 271 } 272 273 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 274 hwaddr offset, unsigned size) 275 { 276 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 277 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 278 uint32_t qw1w2_new; 279 uint32_t cam = be32_to_cpu(qw1w2); 280 uint8_t nvp_blk; 281 uint32_t nvp_idx; 282 bool vo; 283 bool do_save; 284 285 xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save); 286 287 if (!vo) { 288 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", 289 nvp_blk, nvp_idx); 290 } 291 292 /* Invalidate CAM line */ 293 qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0); 294 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4); 295 296 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { 297 xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx); 298 } 299 300 xive_tctx_reset_os_signal(tctx); 301 return qw1w2; 302 } 303 304 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 305 uint8_t nvp_blk, uint32_t nvp_idx, 306 Xive2Nvp *nvp) 307 { 308 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 309 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 310 uint8_t cppr; 311 312 if (!xive2_nvp_is_hw(nvp)) { 313 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 314 nvp_blk, nvp_idx); 315 return 0; 316 } 317 318 cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2); 319 nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); 320 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); 321 322 tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; 323 /* we don't model LSMFB */ 324 325 nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); 326 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); 327 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir); 328 329 /* 330 * Checkout privilege: 0:OS, 1:Pool, 2:Hard 331 * 332 * TODO: we only support OS push/pull 333 */ 334 nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); 335 336 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); 337 338 /* return restored CPPR to generate a CPU exception if needed */ 339 return cppr; 340 } 341 342 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, 343 uint8_t nvp_blk, uint32_t nvp_idx, 344 bool do_restore) 345 { 346 Xive2Nvp nvp; 347 uint8_t ipb; 348 349 /* 350 * Grab the associated thread interrupt context registers in the 351 * associated NVP 352 */ 353 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 354 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 355 nvp_blk, nvp_idx); 356 return; 357 } 358 359 if (!xive2_nvp_is_valid(&nvp)) { 360 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 361 nvp_blk, nvp_idx); 362 return; 363 } 364 365 /* Automatically restore thread context registers */ 366 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && 367 do_restore) { 368 xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); 369 } 370 371 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); 372 if (ipb) { 373 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); 374 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 375 } 376 /* 377 * Always call xive_tctx_ipb_update(). Even if there were no 378 * escalation triggered, there could be a pending interrupt which 379 * was saved when the context was pulled and that we need to take 380 * into account by recalculating the PIPR (which is not 381 * saved/restored). 382 * It will also raise the External interrupt signal if needed. 383 */ 384 xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); 385 } 386 387 /* 388 * Updating the OS CAM line can trigger a resend of interrupt 389 */ 390 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 391 hwaddr offset, uint64_t value, unsigned size) 392 { 393 uint32_t cam = value; 394 uint32_t qw1w2 = cpu_to_be32(cam); 395 uint8_t nvp_blk; 396 uint32_t nvp_idx; 397 bool vo; 398 bool do_restore; 399 400 xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); 401 402 /* First update the thead context */ 403 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 404 405 /* Check the interrupt pending bits */ 406 if (vo) { 407 xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, 408 do_restore); 409 } 410 } 411 412 /* 413 * XIVE Router (aka. Virtualization Controller or IVRE) 414 */ 415 416 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 417 Xive2Eas *eas) 418 { 419 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 420 421 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 422 } 423 424 static 425 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 426 uint8_t *pq) 427 { 428 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 429 430 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); 431 } 432 433 static 434 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 435 uint8_t *pq) 436 { 437 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 438 439 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); 440 } 441 442 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 443 Xive2End *end) 444 { 445 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 446 447 return xrc->get_end(xrtr, end_blk, end_idx, end); 448 } 449 450 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 451 Xive2End *end, uint8_t word_number) 452 { 453 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 454 455 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 456 } 457 458 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 459 Xive2Nvp *nvp) 460 { 461 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 462 463 return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp); 464 } 465 466 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 467 Xive2Nvp *nvp, uint8_t word_number) 468 { 469 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 470 471 return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); 472 } 473 474 static int xive2_router_get_block_id(Xive2Router *xrtr) 475 { 476 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 477 478 return xrc->get_block_id(xrtr); 479 } 480 481 /* 482 * Encode the HW CAM line with 7bit or 8bit thread id. The thread id 483 * width and block id width is configurable at the IC level. 484 * 485 * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) 486 * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) 487 */ 488 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) 489 { 490 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 491 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 492 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 493 uint8_t blk = xive2_router_get_block_id(xrtr); 494 uint8_t tid_shift = 495 xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; 496 uint8_t tid_mask = (1 << tid_shift) - 1; 497 498 return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); 499 } 500 501 /* 502 * The thread context register words are in big-endian format. 503 */ 504 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, 505 uint8_t format, 506 uint8_t nvt_blk, uint32_t nvt_idx, 507 bool cam_ignore, uint32_t logic_serv) 508 { 509 uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); 510 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 511 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 512 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 513 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 514 515 /* 516 * TODO (PowerNV): ignore mode. The low order bits of the NVT 517 * identifier are ignored in the "CAM" match. 518 */ 519 520 if (format == 0) { 521 if (cam_ignore == true) { 522 /* 523 * F=0 & i=1: Logical server notification (bits ignored at 524 * the end of the NVT identifier) 525 */ 526 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", 527 nvt_blk, nvt_idx); 528 return -1; 529 } 530 531 /* F=0 & i=0: Specific NVT notification */ 532 533 /* PHYS ring */ 534 if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && 535 cam == xive2_tctx_hw_cam_line(xptr, tctx)) { 536 return TM_QW3_HV_PHYS; 537 } 538 539 /* HV POOL ring */ 540 if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && 541 cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) { 542 return TM_QW2_HV_POOL; 543 } 544 545 /* OS ring */ 546 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 547 cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) { 548 return TM_QW1_OS; 549 } 550 } else { 551 /* F=1 : User level Event-Based Branch (EBB) notification */ 552 553 /* USER ring */ 554 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 555 (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && 556 (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) && 557 (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) { 558 return TM_QW0_USER; 559 } 560 } 561 return -1; 562 } 563 564 static void xive2_router_realize(DeviceState *dev, Error **errp) 565 { 566 Xive2Router *xrtr = XIVE2_ROUTER(dev); 567 568 assert(xrtr->xfb); 569 } 570 571 /* 572 * Notification using the END ESe/ESn bit (Event State Buffer for 573 * escalation and notification). Profide further coalescing in the 574 * Router. 575 */ 576 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, 577 uint32_t end_idx, Xive2End *end, 578 uint32_t end_esmask) 579 { 580 uint8_t pq = xive_get_field32(end_esmask, end->w1); 581 bool notify = xive_esb_trigger(&pq); 582 583 if (pq != xive_get_field32(end_esmask, end->w1)) { 584 end->w1 = xive_set_field32(end_esmask, end->w1, pq); 585 xive2_router_write_end(xrtr, end_blk, end_idx, end, 1); 586 } 587 588 /* ESe/n[Q]=1 : end of notification */ 589 return notify; 590 } 591 592 /* 593 * An END trigger can come from an event trigger (IPI or HW) or from 594 * another chip. We don't model the PowerBus but the END trigger 595 * message has the same parameters than in the function below. 596 */ 597 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, 598 uint32_t end_idx, uint32_t end_data) 599 { 600 Xive2End end; 601 uint8_t priority; 602 uint8_t format; 603 bool found; 604 Xive2Nvp nvp; 605 uint8_t nvp_blk; 606 uint32_t nvp_idx; 607 608 /* END cache lookup */ 609 if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) { 610 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 611 end_idx); 612 return; 613 } 614 615 if (!xive2_end_is_valid(&end)) { 616 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 617 end_blk, end_idx); 618 return; 619 } 620 621 if (xive2_end_is_enqueue(&end)) { 622 xive2_end_enqueue(&end, end_data); 623 /* Enqueuing event data modifies the EQ toggle and index */ 624 xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); 625 } 626 627 /* 628 * When the END is silent, we skip the notification part. 629 */ 630 if (xive2_end_is_silent_escalation(&end)) { 631 goto do_escalation; 632 } 633 634 /* 635 * The W7 format depends on the F bit in W6. It defines the type 636 * of the notification : 637 * 638 * F=0 : single or multiple NVP notification 639 * F=1 : User level Event-Based Branch (EBB) notification, no 640 * priority 641 */ 642 format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6); 643 priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7); 644 645 /* The END is masked */ 646 if (format == 0 && priority == 0xff) { 647 return; 648 } 649 650 /* 651 * Check the END ESn (Event State Buffer for notification) for 652 * even further coalescing in the Router 653 */ 654 if (!xive2_end_is_notify(&end)) { 655 /* ESn[Q]=1 : end of notification */ 656 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 657 &end, END2_W1_ESn)) { 658 return; 659 } 660 } 661 662 /* 663 * Follows IVPE notification 664 */ 665 nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); 666 nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); 667 668 /* NVP cache lookup */ 669 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 670 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n", 671 nvp_blk, nvp_idx); 672 return; 673 } 674 675 if (!xive2_nvp_is_valid(&nvp)) { 676 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n", 677 nvp_blk, nvp_idx); 678 return; 679 } 680 681 found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, 682 xive2_end_is_ignore(&end), 683 priority, 684 xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7)); 685 686 /* TODO: Auto EOI. */ 687 688 if (found) { 689 return; 690 } 691 692 /* 693 * If no matching NVP is dispatched on a HW thread : 694 * - specific VP: update the NVP structure if backlog is activated 695 * - logical server : forward request to IVPE (not supported) 696 */ 697 if (xive2_end_is_backlog(&end)) { 698 uint8_t ipb; 699 700 if (format == 1) { 701 qemu_log_mask(LOG_GUEST_ERROR, 702 "XIVE: END %x/%x invalid config: F1 & backlog\n", 703 end_blk, end_idx); 704 return; 705 } 706 707 /* 708 * Record the IPB in the associated NVP structure for later 709 * use. The presenter will resend the interrupt when the vCPU 710 * is dispatched again on a HW thread. 711 */ 712 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | 713 xive_priority_to_ipb(priority); 714 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); 715 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 716 717 /* 718 * On HW, follows a "Broadcast Backlog" to IVPEs 719 */ 720 } 721 722 do_escalation: 723 /* 724 * If activated, escalate notification using the ESe PQ bits and 725 * the EAS in w4-5 726 */ 727 if (!xive2_end_is_escalate(&end)) { 728 return; 729 } 730 731 /* 732 * Check the END ESe (Event State Buffer for escalation) for even 733 * further coalescing in the Router 734 */ 735 if (!xive2_end_is_uncond_escalation(&end)) { 736 /* ESe[Q]=1 : end of escalation notification */ 737 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 738 &end, END2_W1_ESe)) { 739 return; 740 } 741 } 742 743 /* 744 * The END trigger becomes an Escalation trigger 745 */ 746 xive2_router_end_notify(xrtr, 747 xive_get_field32(END2_W4_END_BLOCK, end.w4), 748 xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), 749 xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); 750 } 751 752 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) 753 { 754 Xive2Router *xrtr = XIVE2_ROUTER(xn); 755 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); 756 uint32_t eas_idx = XIVE_EAS_INDEX(lisn); 757 Xive2Eas eas; 758 759 /* EAS cache lookup */ 760 if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 761 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 762 return; 763 } 764 765 if (!pq_checked) { 766 bool notify; 767 uint8_t pq; 768 769 /* PQ cache lookup */ 770 if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { 771 /* Set FIR */ 772 g_assert_not_reached(); 773 } 774 775 notify = xive_esb_trigger(&pq); 776 777 if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { 778 /* Set FIR */ 779 g_assert_not_reached(); 780 } 781 782 if (!notify) { 783 return; 784 } 785 } 786 787 if (!xive2_eas_is_valid(&eas)) { 788 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn); 789 return; 790 } 791 792 if (xive2_eas_is_masked(&eas)) { 793 /* Notification completed */ 794 return; 795 } 796 797 /* 798 * The event trigger becomes an END trigger 799 */ 800 xive2_router_end_notify(xrtr, 801 xive_get_field64(EAS2_END_BLOCK, eas.w), 802 xive_get_field64(EAS2_END_INDEX, eas.w), 803 xive_get_field64(EAS2_END_DATA, eas.w)); 804 } 805 806 static Property xive2_router_properties[] = { 807 DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb, 808 TYPE_XIVE_FABRIC, XiveFabric *), 809 DEFINE_PROP_END_OF_LIST(), 810 }; 811 812 static void xive2_router_class_init(ObjectClass *klass, void *data) 813 { 814 DeviceClass *dc = DEVICE_CLASS(klass); 815 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 816 817 dc->desc = "XIVE2 Router Engine"; 818 device_class_set_props(dc, xive2_router_properties); 819 /* Parent is SysBusDeviceClass. No need to call its realize hook */ 820 dc->realize = xive2_router_realize; 821 xnc->notify = xive2_router_notify; 822 } 823 824 static const TypeInfo xive2_router_info = { 825 .name = TYPE_XIVE2_ROUTER, 826 .parent = TYPE_SYS_BUS_DEVICE, 827 .abstract = true, 828 .instance_size = sizeof(Xive2Router), 829 .class_size = sizeof(Xive2RouterClass), 830 .class_init = xive2_router_class_init, 831 .interfaces = (InterfaceInfo[]) { 832 { TYPE_XIVE_NOTIFIER }, 833 { TYPE_XIVE_PRESENTER }, 834 { } 835 } 836 }; 837 838 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 839 { 840 return !((addr >> shift) & 1); 841 } 842 843 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size) 844 { 845 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 846 uint32_t offset = addr & 0xFFF; 847 uint8_t end_blk; 848 uint32_t end_idx; 849 Xive2End end; 850 uint32_t end_esmask; 851 uint8_t pq; 852 uint64_t ret; 853 854 /* 855 * The block id should be deduced from the load address on the END 856 * ESB MMIO but our model only supports a single block per XIVE chip. 857 */ 858 end_blk = xive2_router_get_block_id(xsrc->xrtr); 859 end_idx = addr >> (xsrc->esb_shift + 1); 860 861 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 862 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 863 end_idx); 864 return -1; 865 } 866 867 if (!xive2_end_is_valid(&end)) { 868 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 869 end_blk, end_idx); 870 return -1; 871 } 872 873 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 874 END2_W1_ESe; 875 pq = xive_get_field32(end_esmask, end.w1); 876 877 switch (offset) { 878 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 879 ret = xive_esb_eoi(&pq); 880 881 /* Forward the source event notification for routing ?? */ 882 break; 883 884 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 885 ret = pq; 886 break; 887 888 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 889 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 890 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 891 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 892 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 893 break; 894 default: 895 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 896 offset); 897 return -1; 898 } 899 900 if (pq != xive_get_field32(end_esmask, end.w1)) { 901 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 902 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 903 } 904 905 return ret; 906 } 907 908 static void xive2_end_source_write(void *opaque, hwaddr addr, 909 uint64_t value, unsigned size) 910 { 911 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 912 uint32_t offset = addr & 0xFFF; 913 uint8_t end_blk; 914 uint32_t end_idx; 915 Xive2End end; 916 uint32_t end_esmask; 917 uint8_t pq; 918 bool notify = false; 919 920 /* 921 * The block id should be deduced from the load address on the END 922 * ESB MMIO but our model only supports a single block per XIVE chip. 923 */ 924 end_blk = xive2_router_get_block_id(xsrc->xrtr); 925 end_idx = addr >> (xsrc->esb_shift + 1); 926 927 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 928 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 929 end_idx); 930 return; 931 } 932 933 if (!xive2_end_is_valid(&end)) { 934 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 935 end_blk, end_idx); 936 return; 937 } 938 939 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 940 END2_W1_ESe; 941 pq = xive_get_field32(end_esmask, end.w1); 942 943 switch (offset) { 944 case 0 ... 0x3FF: 945 notify = xive_esb_trigger(&pq); 946 break; 947 948 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 949 /* TODO: can we check StoreEOI availability from the router ? */ 950 notify = xive_esb_eoi(&pq); 951 break; 952 953 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: 954 if (end_esmask == END2_W1_ESe) { 955 qemu_log_mask(LOG_GUEST_ERROR, 956 "XIVE: END %x/%x can not EQ inject on ESe\n", 957 end_blk, end_idx); 958 return; 959 } 960 notify = true; 961 break; 962 963 default: 964 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n", 965 offset); 966 return; 967 } 968 969 if (pq != xive_get_field32(end_esmask, end.w1)) { 970 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 971 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 972 } 973 974 /* TODO: Forward the source event notification for routing */ 975 if (notify) { 976 ; 977 } 978 } 979 980 static const MemoryRegionOps xive2_end_source_ops = { 981 .read = xive2_end_source_read, 982 .write = xive2_end_source_write, 983 .endianness = DEVICE_BIG_ENDIAN, 984 .valid = { 985 .min_access_size = 1, 986 .max_access_size = 8, 987 }, 988 .impl = { 989 .min_access_size = 1, 990 .max_access_size = 8, 991 }, 992 }; 993 994 static void xive2_end_source_realize(DeviceState *dev, Error **errp) 995 { 996 Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev); 997 998 assert(xsrc->xrtr); 999 1000 if (!xsrc->nr_ends) { 1001 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1002 return; 1003 } 1004 1005 if (xsrc->esb_shift != XIVE_ESB_4K && 1006 xsrc->esb_shift != XIVE_ESB_64K) { 1007 error_setg(errp, "Invalid ESB shift setting"); 1008 return; 1009 } 1010 1011 /* 1012 * Each END is assigned an even/odd pair of MMIO pages, the even page 1013 * manages the ESn field while the odd page manages the ESe field. 1014 */ 1015 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 1016 &xive2_end_source_ops, xsrc, "xive.end", 1017 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 1018 } 1019 1020 static Property xive2_end_source_properties[] = { 1021 DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0), 1022 DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K), 1023 DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER, 1024 Xive2Router *), 1025 DEFINE_PROP_END_OF_LIST(), 1026 }; 1027 1028 static void xive2_end_source_class_init(ObjectClass *klass, void *data) 1029 { 1030 DeviceClass *dc = DEVICE_CLASS(klass); 1031 1032 dc->desc = "XIVE END Source"; 1033 device_class_set_props(dc, xive2_end_source_properties); 1034 dc->realize = xive2_end_source_realize; 1035 dc->user_creatable = false; 1036 } 1037 1038 static const TypeInfo xive2_end_source_info = { 1039 .name = TYPE_XIVE2_END_SOURCE, 1040 .parent = TYPE_DEVICE, 1041 .instance_size = sizeof(Xive2EndSource), 1042 .class_init = xive2_end_source_class_init, 1043 }; 1044 1045 static void xive2_register_types(void) 1046 { 1047 type_register_static(&xive2_router_info); 1048 type_register_static(&xive2_end_source_info); 1049 } 1050 1051 type_init(xive2_register_types) 1052