1 /* 2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10) 3 * 4 * Copyright (c) 2019-2022, IBM Corporation.. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "hw/qdev-properties.h" 18 #include "monitor/monitor.h" 19 #include "hw/ppc/xive.h" 20 #include "hw/ppc/xive2.h" 21 #include "hw/ppc/xive2_regs.h" 22 23 uint32_t xive2_router_get_config(Xive2Router *xrtr) 24 { 25 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 26 27 return xrc->get_config(xrtr); 28 } 29 30 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon) 31 { 32 if (!xive2_eas_is_valid(eas)) { 33 return; 34 } 35 36 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n", 37 lisn, xive2_eas_is_masked(eas) ? "M" : " ", 38 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 39 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 40 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 41 } 42 43 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, 44 Monitor *mon) 45 { 46 uint64_t qaddr_base = xive2_end_qaddr(end); 47 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 48 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 49 uint32_t qentries = 1 << (qsize + 10); 50 int i; 51 52 /* 53 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 54 */ 55 monitor_printf(mon, " [ "); 56 qindex = (qindex - (width - 1)) & (qentries - 1); 57 for (i = 0; i < width; i++) { 58 uint64_t qaddr = qaddr_base + (qindex << 2); 59 uint32_t qdata = -1; 60 61 if (dma_memory_read(&address_space_memory, qaddr, &qdata, 62 sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 63 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 64 HWADDR_PRIx "\n", qaddr); 65 return; 66 } 67 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "", 68 be32_to_cpu(qdata)); 69 qindex = (qindex + 1) & (qentries - 1); 70 } 71 monitor_printf(mon, "]"); 72 } 73 74 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon) 75 { 76 uint64_t qaddr_base = xive2_end_qaddr(end); 77 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 78 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 79 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 80 uint32_t qentries = 1 << (qsize + 10); 81 82 uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); 83 uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); 84 uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7); 85 uint8_t pq; 86 87 if (!xive2_end_is_valid(end)) { 88 return; 89 } 90 91 pq = xive_get_field32(END2_W1_ESn, end->w1); 92 93 monitor_printf(mon, 94 " %08x %c%c %c%c%c%c%c%c%c%c%c%c prio:%d nvp:%02x/%04x", 95 end_idx, 96 pq & XIVE_ESB_VAL_P ? 'P' : '-', 97 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 98 xive2_end_is_valid(end) ? 'v' : '-', 99 xive2_end_is_enqueue(end) ? 'q' : '-', 100 xive2_end_is_notify(end) ? 'n' : '-', 101 xive2_end_is_backlog(end) ? 'b' : '-', 102 xive2_end_is_escalate(end) ? 'e' : '-', 103 xive2_end_is_escalate_end(end) ? 'N' : '-', 104 xive2_end_is_uncond_escalation(end) ? 'u' : '-', 105 xive2_end_is_silent_escalation(end) ? 's' : '-', 106 xive2_end_is_firmware1(end) ? 'f' : '-', 107 xive2_end_is_firmware2(end) ? 'F' : '-', 108 priority, nvp_blk, nvp_idx); 109 110 if (qaddr_base) { 111 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d", 112 qaddr_base, qindex, qentries, qgen); 113 xive2_end_queue_pic_print_info(end, 6, mon); 114 } 115 monitor_printf(mon, "\n"); 116 } 117 118 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx, 119 Monitor *mon) 120 { 121 Xive2Eas *eas = (Xive2Eas *) &end->w4; 122 uint8_t pq; 123 124 if (!xive2_end_is_escalate(end)) { 125 return; 126 } 127 128 pq = xive_get_field32(END2_W1_ESe, end->w1); 129 130 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", 131 end_idx, 132 pq & XIVE_ESB_VAL_P ? 'P' : '-', 133 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 134 xive2_eas_is_valid(eas) ? 'v' : ' ', 135 xive2_eas_is_masked(eas) ? 'M' : ' ', 136 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 137 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 138 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 139 } 140 141 static void xive2_end_enqueue(Xive2End *end, uint32_t data) 142 { 143 uint64_t qaddr_base = xive2_end_qaddr(end); 144 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 145 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 146 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 147 148 uint64_t qaddr = qaddr_base + (qindex << 2); 149 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 150 uint32_t qentries = 1 << (qsize + 10); 151 152 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), 153 MEMTXATTRS_UNSPECIFIED)) { 154 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 155 HWADDR_PRIx "\n", qaddr); 156 return; 157 } 158 159 qindex = (qindex + 1) & (qentries - 1); 160 if (qindex == 0) { 161 qgen ^= 1; 162 end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); 163 164 /* TODO(PowerNV): reset GF bit on a cache watch operation */ 165 end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen); 166 } 167 end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); 168 } 169 170 /* 171 * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode 172 * 173 * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit 174 * 175 * - if a context is enabled with the H bit set, the VP context 176 * information is retrieved from the NVP structure (“check out”) 177 * and stored back on a context pull (“check in”), the SW receives 178 * the same context pull information as on P9 179 * 180 * - the H bit cannot be changed while the V bit is set, i.e. a 181 * context cannot be set up in the TIMA and then be “pushed” into 182 * the NVP by changing the H bit while the context is enabled 183 */ 184 185 static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 186 uint8_t nvp_blk, uint32_t nvp_idx) 187 { 188 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 189 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 190 Xive2Nvp nvp; 191 uint8_t *regs = &tctx->regs[TM_QW1_OS]; 192 193 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 194 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 195 nvp_blk, nvp_idx); 196 return; 197 } 198 199 if (!xive2_nvp_is_valid(&nvp)) { 200 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 201 nvp_blk, nvp_idx); 202 return; 203 } 204 205 if (!xive2_nvp_is_hw(&nvp)) { 206 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 207 nvp_blk, nvp_idx); 208 return; 209 } 210 211 if (!xive2_nvp_is_co(&nvp)) { 212 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n", 213 nvp_blk, nvp_idx); 214 return; 215 } 216 217 if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) && 218 xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) { 219 qemu_log_mask(LOG_GUEST_ERROR, 220 "XIVE: NVP %x/%x invalid checkout Thread %x\n", 221 nvp_blk, nvp_idx, pir); 222 return; 223 } 224 225 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); 226 nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); 227 nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); 228 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 229 230 nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0); 231 /* NVP2_W1_CO_THRID_VALID only set once */ 232 nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF); 233 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); 234 } 235 236 static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk, 237 uint32_t *nvp_idx, bool *vo, bool *ho) 238 { 239 *nvp_blk = xive2_nvp_blk(cam); 240 *nvp_idx = xive2_nvp_idx(cam); 241 *vo = !!(cam & TM2_QW1W2_VO); 242 *ho = !!(cam & TM2_QW1W2_HO); 243 } 244 245 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 246 hwaddr offset, unsigned size) 247 { 248 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 249 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 250 uint32_t qw1w2_new; 251 uint32_t cam = be32_to_cpu(qw1w2); 252 uint8_t nvp_blk; 253 uint32_t nvp_idx; 254 bool vo; 255 bool do_save; 256 257 xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save); 258 259 if (!vo) { 260 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", 261 nvp_blk, nvp_idx); 262 } 263 264 /* Invalidate CAM line */ 265 qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0); 266 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4); 267 268 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { 269 xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx); 270 } 271 272 return qw1w2; 273 } 274 275 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 276 uint8_t nvp_blk, uint32_t nvp_idx, 277 Xive2Nvp *nvp) 278 { 279 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 280 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 281 uint8_t cppr; 282 283 if (!xive2_nvp_is_hw(nvp)) { 284 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 285 nvp_blk, nvp_idx); 286 return 0; 287 } 288 289 cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2); 290 nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); 291 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); 292 293 tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; 294 /* we don't model LSMFB */ 295 296 nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); 297 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); 298 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir); 299 300 /* 301 * Checkout privilege: 0:OS, 1:Pool, 2:Hard 302 * 303 * TODO: we only support OS push/pull 304 */ 305 nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); 306 307 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); 308 309 /* return restored CPPR to generate a CPU exception if needed */ 310 return cppr; 311 } 312 313 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, 314 uint8_t nvp_blk, uint32_t nvp_idx, 315 bool do_restore) 316 { 317 Xive2Nvp nvp; 318 uint8_t ipb; 319 uint8_t cppr = 0; 320 321 /* 322 * Grab the associated thread interrupt context registers in the 323 * associated NVP 324 */ 325 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 326 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 327 nvp_blk, nvp_idx); 328 return; 329 } 330 331 if (!xive2_nvp_is_valid(&nvp)) { 332 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 333 nvp_blk, nvp_idx); 334 return; 335 } 336 337 /* Automatically restore thread context registers */ 338 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && 339 do_restore) { 340 cppr = xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); 341 } 342 343 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); 344 if (ipb) { 345 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); 346 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 347 } 348 349 /* An IPB or CPPR change can trigger a resend */ 350 if (ipb || cppr) { 351 xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); 352 } 353 } 354 355 /* 356 * Updating the OS CAM line can trigger a resend of interrupt 357 */ 358 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 359 hwaddr offset, uint64_t value, unsigned size) 360 { 361 uint32_t cam = value; 362 uint32_t qw1w2 = cpu_to_be32(cam); 363 uint8_t nvp_blk; 364 uint32_t nvp_idx; 365 bool vo; 366 bool do_restore; 367 368 xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); 369 370 /* First update the thead context */ 371 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 372 373 /* Check the interrupt pending bits */ 374 if (vo) { 375 xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, 376 do_restore); 377 } 378 } 379 380 /* 381 * XIVE Router (aka. Virtualization Controller or IVRE) 382 */ 383 384 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 385 Xive2Eas *eas) 386 { 387 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 388 389 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 390 } 391 392 static 393 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 394 uint8_t *pq) 395 { 396 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 397 398 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); 399 } 400 401 static 402 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 403 uint8_t *pq) 404 { 405 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 406 407 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); 408 } 409 410 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 411 Xive2End *end) 412 { 413 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 414 415 return xrc->get_end(xrtr, end_blk, end_idx, end); 416 } 417 418 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 419 Xive2End *end, uint8_t word_number) 420 { 421 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 422 423 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 424 } 425 426 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 427 Xive2Nvp *nvp) 428 { 429 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 430 431 return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp); 432 } 433 434 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 435 Xive2Nvp *nvp, uint8_t word_number) 436 { 437 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 438 439 return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); 440 } 441 442 static int xive2_router_get_block_id(Xive2Router *xrtr) 443 { 444 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 445 446 return xrc->get_block_id(xrtr); 447 } 448 449 /* 450 * Encode the HW CAM line with 7bit or 8bit thread id. The thread id 451 * width and block id width is configurable at the IC level. 452 * 453 * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) 454 * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) 455 */ 456 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) 457 { 458 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 459 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 460 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 461 uint8_t blk = xive2_router_get_block_id(xrtr); 462 uint8_t tid_shift = 7; 463 uint8_t tid_mask = (1 << tid_shift) - 1; 464 465 return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); 466 } 467 468 /* 469 * The thread context register words are in big-endian format. 470 */ 471 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, 472 uint8_t format, 473 uint8_t nvt_blk, uint32_t nvt_idx, 474 bool cam_ignore, uint32_t logic_serv) 475 { 476 uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); 477 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 478 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 479 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 480 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 481 482 /* 483 * TODO (PowerNV): ignore mode. The low order bits of the NVT 484 * identifier are ignored in the "CAM" match. 485 */ 486 487 if (format == 0) { 488 if (cam_ignore == true) { 489 /* 490 * F=0 & i=1: Logical server notification (bits ignored at 491 * the end of the NVT identifier) 492 */ 493 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", 494 nvt_blk, nvt_idx); 495 return -1; 496 } 497 498 /* F=0 & i=0: Specific NVT notification */ 499 500 /* PHYS ring */ 501 if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && 502 cam == xive2_tctx_hw_cam_line(xptr, tctx)) { 503 return TM_QW3_HV_PHYS; 504 } 505 506 /* HV POOL ring */ 507 if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && 508 cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) { 509 return TM_QW2_HV_POOL; 510 } 511 512 /* OS ring */ 513 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 514 cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) { 515 return TM_QW1_OS; 516 } 517 } else { 518 /* F=1 : User level Event-Based Branch (EBB) notification */ 519 520 /* USER ring */ 521 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 522 (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && 523 (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) && 524 (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) { 525 return TM_QW0_USER; 526 } 527 } 528 return -1; 529 } 530 531 static void xive2_router_realize(DeviceState *dev, Error **errp) 532 { 533 Xive2Router *xrtr = XIVE2_ROUTER(dev); 534 535 assert(xrtr->xfb); 536 } 537 538 /* 539 * Notification using the END ESe/ESn bit (Event State Buffer for 540 * escalation and notification). Profide futher coalescing in the 541 * Router. 542 */ 543 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, 544 uint32_t end_idx, Xive2End *end, 545 uint32_t end_esmask) 546 { 547 uint8_t pq = xive_get_field32(end_esmask, end->w1); 548 bool notify = xive_esb_trigger(&pq); 549 550 if (pq != xive_get_field32(end_esmask, end->w1)) { 551 end->w1 = xive_set_field32(end_esmask, end->w1, pq); 552 xive2_router_write_end(xrtr, end_blk, end_idx, end, 1); 553 } 554 555 /* ESe/n[Q]=1 : end of notification */ 556 return notify; 557 } 558 559 /* 560 * An END trigger can come from an event trigger (IPI or HW) or from 561 * another chip. We don't model the PowerBus but the END trigger 562 * message has the same parameters than in the function below. 563 */ 564 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, 565 uint32_t end_idx, uint32_t end_data) 566 { 567 Xive2End end; 568 uint8_t priority; 569 uint8_t format; 570 bool found; 571 Xive2Nvp nvp; 572 uint8_t nvp_blk; 573 uint32_t nvp_idx; 574 575 /* END cache lookup */ 576 if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) { 577 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 578 end_idx); 579 return; 580 } 581 582 if (!xive2_end_is_valid(&end)) { 583 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 584 end_blk, end_idx); 585 return; 586 } 587 588 if (xive2_end_is_enqueue(&end)) { 589 xive2_end_enqueue(&end, end_data); 590 /* Enqueuing event data modifies the EQ toggle and index */ 591 xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); 592 } 593 594 /* 595 * When the END is silent, we skip the notification part. 596 */ 597 if (xive2_end_is_silent_escalation(&end)) { 598 goto do_escalation; 599 } 600 601 /* 602 * The W7 format depends on the F bit in W6. It defines the type 603 * of the notification : 604 * 605 * F=0 : single or multiple NVP notification 606 * F=1 : User level Event-Based Branch (EBB) notification, no 607 * priority 608 */ 609 format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6); 610 priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7); 611 612 /* The END is masked */ 613 if (format == 0 && priority == 0xff) { 614 return; 615 } 616 617 /* 618 * Check the END ESn (Event State Buffer for notification) for 619 * even futher coalescing in the Router 620 */ 621 if (!xive2_end_is_notify(&end)) { 622 /* ESn[Q]=1 : end of notification */ 623 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 624 &end, END2_W1_ESn)) { 625 return; 626 } 627 } 628 629 /* 630 * Follows IVPE notification 631 */ 632 nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); 633 nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); 634 635 /* NVP cache lookup */ 636 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 637 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n", 638 nvp_blk, nvp_idx); 639 return; 640 } 641 642 if (!xive2_nvp_is_valid(&nvp)) { 643 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n", 644 nvp_blk, nvp_idx); 645 return; 646 } 647 648 found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, 649 xive_get_field32(END2_W6_IGNORE, end.w7), 650 priority, 651 xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7)); 652 653 /* TODO: Auto EOI. */ 654 655 if (found) { 656 return; 657 } 658 659 /* 660 * If no matching NVP is dispatched on a HW thread : 661 * - specific VP: update the NVP structure if backlog is activated 662 * - logical server : forward request to IVPE (not supported) 663 */ 664 if (xive2_end_is_backlog(&end)) { 665 uint8_t ipb; 666 667 if (format == 1) { 668 qemu_log_mask(LOG_GUEST_ERROR, 669 "XIVE: END %x/%x invalid config: F1 & backlog\n", 670 end_blk, end_idx); 671 return; 672 } 673 674 /* 675 * Record the IPB in the associated NVP structure for later 676 * use. The presenter will resend the interrupt when the vCPU 677 * is dispatched again on a HW thread. 678 */ 679 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | 680 xive_priority_to_ipb(priority); 681 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); 682 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 683 684 /* 685 * On HW, follows a "Broadcast Backlog" to IVPEs 686 */ 687 } 688 689 do_escalation: 690 /* 691 * If activated, escalate notification using the ESe PQ bits and 692 * the EAS in w4-5 693 */ 694 if (!xive2_end_is_escalate(&end)) { 695 return; 696 } 697 698 /* 699 * Check the END ESe (Event State Buffer for escalation) for even 700 * futher coalescing in the Router 701 */ 702 if (!xive2_end_is_uncond_escalation(&end)) { 703 /* ESe[Q]=1 : end of escalation notification */ 704 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 705 &end, END2_W1_ESe)) { 706 return; 707 } 708 } 709 710 /* 711 * The END trigger becomes an Escalation trigger 712 */ 713 xive2_router_end_notify(xrtr, 714 xive_get_field32(END2_W4_END_BLOCK, end.w4), 715 xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), 716 xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); 717 } 718 719 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) 720 { 721 Xive2Router *xrtr = XIVE2_ROUTER(xn); 722 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); 723 uint32_t eas_idx = XIVE_EAS_INDEX(lisn); 724 Xive2Eas eas; 725 726 /* EAS cache lookup */ 727 if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 728 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 729 return; 730 } 731 732 if (!pq_checked) { 733 bool notify; 734 uint8_t pq; 735 736 /* PQ cache lookup */ 737 if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { 738 /* Set FIR */ 739 g_assert_not_reached(); 740 } 741 742 notify = xive_esb_trigger(&pq); 743 744 if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { 745 /* Set FIR */ 746 g_assert_not_reached(); 747 } 748 749 if (!notify) { 750 return; 751 } 752 } 753 754 if (!xive2_eas_is_valid(&eas)) { 755 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn); 756 return; 757 } 758 759 if (xive2_eas_is_masked(&eas)) { 760 /* Notification completed */ 761 return; 762 } 763 764 /* 765 * The event trigger becomes an END trigger 766 */ 767 xive2_router_end_notify(xrtr, 768 xive_get_field64(EAS2_END_BLOCK, eas.w), 769 xive_get_field64(EAS2_END_INDEX, eas.w), 770 xive_get_field64(EAS2_END_DATA, eas.w)); 771 } 772 773 static Property xive2_router_properties[] = { 774 DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb, 775 TYPE_XIVE_FABRIC, XiveFabric *), 776 DEFINE_PROP_END_OF_LIST(), 777 }; 778 779 static void xive2_router_class_init(ObjectClass *klass, void *data) 780 { 781 DeviceClass *dc = DEVICE_CLASS(klass); 782 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 783 784 dc->desc = "XIVE2 Router Engine"; 785 device_class_set_props(dc, xive2_router_properties); 786 /* Parent is SysBusDeviceClass. No need to call its realize hook */ 787 dc->realize = xive2_router_realize; 788 xnc->notify = xive2_router_notify; 789 } 790 791 static const TypeInfo xive2_router_info = { 792 .name = TYPE_XIVE2_ROUTER, 793 .parent = TYPE_SYS_BUS_DEVICE, 794 .abstract = true, 795 .instance_size = sizeof(Xive2Router), 796 .class_size = sizeof(Xive2RouterClass), 797 .class_init = xive2_router_class_init, 798 .interfaces = (InterfaceInfo[]) { 799 { TYPE_XIVE_NOTIFIER }, 800 { TYPE_XIVE_PRESENTER }, 801 { } 802 } 803 }; 804 805 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 806 { 807 return !((addr >> shift) & 1); 808 } 809 810 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size) 811 { 812 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 813 uint32_t offset = addr & 0xFFF; 814 uint8_t end_blk; 815 uint32_t end_idx; 816 Xive2End end; 817 uint32_t end_esmask; 818 uint8_t pq; 819 uint64_t ret; 820 821 /* 822 * The block id should be deduced from the load address on the END 823 * ESB MMIO but our model only supports a single block per XIVE chip. 824 */ 825 end_blk = xive2_router_get_block_id(xsrc->xrtr); 826 end_idx = addr >> (xsrc->esb_shift + 1); 827 828 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 829 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 830 end_idx); 831 return -1; 832 } 833 834 if (!xive2_end_is_valid(&end)) { 835 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 836 end_blk, end_idx); 837 return -1; 838 } 839 840 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 841 END2_W1_ESe; 842 pq = xive_get_field32(end_esmask, end.w1); 843 844 switch (offset) { 845 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 846 ret = xive_esb_eoi(&pq); 847 848 /* Forward the source event notification for routing ?? */ 849 break; 850 851 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 852 ret = pq; 853 break; 854 855 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 856 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 857 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 858 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 859 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 860 break; 861 default: 862 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 863 offset); 864 return -1; 865 } 866 867 if (pq != xive_get_field32(end_esmask, end.w1)) { 868 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 869 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 870 } 871 872 return ret; 873 } 874 875 static void xive2_end_source_write(void *opaque, hwaddr addr, 876 uint64_t value, unsigned size) 877 { 878 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 879 uint32_t offset = addr & 0xFFF; 880 uint8_t end_blk; 881 uint32_t end_idx; 882 Xive2End end; 883 uint32_t end_esmask; 884 uint8_t pq; 885 bool notify = false; 886 887 /* 888 * The block id should be deduced from the load address on the END 889 * ESB MMIO but our model only supports a single block per XIVE chip. 890 */ 891 end_blk = xive2_router_get_block_id(xsrc->xrtr); 892 end_idx = addr >> (xsrc->esb_shift + 1); 893 894 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 895 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 896 end_idx); 897 return; 898 } 899 900 if (!xive2_end_is_valid(&end)) { 901 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 902 end_blk, end_idx); 903 return; 904 } 905 906 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 907 END2_W1_ESe; 908 pq = xive_get_field32(end_esmask, end.w1); 909 910 switch (offset) { 911 case 0 ... 0x3FF: 912 notify = xive_esb_trigger(&pq); 913 break; 914 915 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 916 /* TODO: can we check StoreEOI availability from the router ? */ 917 notify = xive_esb_eoi(&pq); 918 break; 919 920 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: 921 if (end_esmask == END2_W1_ESe) { 922 qemu_log_mask(LOG_GUEST_ERROR, 923 "XIVE: END %x/%x can not EQ inject on ESe\n", 924 end_blk, end_idx); 925 return; 926 } 927 notify = true; 928 break; 929 930 default: 931 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n", 932 offset); 933 return; 934 } 935 936 if (pq != xive_get_field32(end_esmask, end.w1)) { 937 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 938 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 939 } 940 941 /* TODO: Forward the source event notification for routing */ 942 if (notify) { 943 ; 944 } 945 } 946 947 static const MemoryRegionOps xive2_end_source_ops = { 948 .read = xive2_end_source_read, 949 .write = xive2_end_source_write, 950 .endianness = DEVICE_BIG_ENDIAN, 951 .valid = { 952 .min_access_size = 8, 953 .max_access_size = 8, 954 }, 955 .impl = { 956 .min_access_size = 8, 957 .max_access_size = 8, 958 }, 959 }; 960 961 static void xive2_end_source_realize(DeviceState *dev, Error **errp) 962 { 963 Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev); 964 965 assert(xsrc->xrtr); 966 967 if (!xsrc->nr_ends) { 968 error_setg(errp, "Number of interrupt needs to be greater than 0"); 969 return; 970 } 971 972 if (xsrc->esb_shift != XIVE_ESB_4K && 973 xsrc->esb_shift != XIVE_ESB_64K) { 974 error_setg(errp, "Invalid ESB shift setting"); 975 return; 976 } 977 978 /* 979 * Each END is assigned an even/odd pair of MMIO pages, the even page 980 * manages the ESn field while the odd page manages the ESe field. 981 */ 982 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 983 &xive2_end_source_ops, xsrc, "xive.end", 984 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 985 } 986 987 static Property xive2_end_source_properties[] = { 988 DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0), 989 DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K), 990 DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER, 991 Xive2Router *), 992 DEFINE_PROP_END_OF_LIST(), 993 }; 994 995 static void xive2_end_source_class_init(ObjectClass *klass, void *data) 996 { 997 DeviceClass *dc = DEVICE_CLASS(klass); 998 999 dc->desc = "XIVE END Source"; 1000 device_class_set_props(dc, xive2_end_source_properties); 1001 dc->realize = xive2_end_source_realize; 1002 } 1003 1004 static const TypeInfo xive2_end_source_info = { 1005 .name = TYPE_XIVE2_END_SOURCE, 1006 .parent = TYPE_DEVICE, 1007 .instance_size = sizeof(Xive2EndSource), 1008 .class_init = xive2_end_source_class_init, 1009 }; 1010 1011 static void xive2_register_types(void) 1012 { 1013 type_register_static(&xive2_router_info); 1014 type_register_static(&xive2_end_source_info); 1015 } 1016 1017 type_init(xive2_register_types) 1018