1 /* 2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10) 3 * 4 * Copyright (c) 2019-2024, IBM Corporation.. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "qemu/module.h" 12 #include "qapi/error.h" 13 #include "target/ppc/cpu.h" 14 #include "system/cpus.h" 15 #include "system/dma.h" 16 #include "hw/qdev-properties.h" 17 #include "hw/ppc/xive.h" 18 #include "hw/ppc/xive2.h" 19 #include "hw/ppc/xive2_regs.h" 20 #include "trace.h" 21 22 uint32_t xive2_router_get_config(Xive2Router *xrtr) 23 { 24 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 25 26 return xrc->get_config(xrtr); 27 } 28 29 static int xive2_router_get_block_id(Xive2Router *xrtr) 30 { 31 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 32 33 return xrc->get_block_id(xrtr); 34 } 35 36 static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp) 37 { 38 uint64_t cache_addr; 39 40 cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 | 41 xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7); 42 cache_addr <<= 8; /* aligned on a cache line pair */ 43 return cache_addr; 44 } 45 46 static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) 47 { 48 uint32_t val = 0; 49 uint8_t *ptr, i; 50 51 if (priority > 7) { 52 return 0; 53 } 54 55 /* 56 * The per-priority backlog counters are 24-bit and the structure 57 * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from 58 * w2, which fits 8 priorities * 24-bits per priority. 59 */ 60 ptr = (uint8_t *)&nvgc->w2 + priority * 3; 61 for (i = 0; i < 3; i++, ptr++) { 62 val = (val << 8) + *ptr; 63 } 64 return val; 65 } 66 67 static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, 68 uint32_t val) 69 { 70 uint8_t *ptr, i; 71 uint32_t shift; 72 73 if (priority > 7) { 74 return; 75 } 76 77 if (val > 0xFFFFFF) { 78 val = 0xFFFFFF; 79 } 80 /* 81 * The per-priority backlog counters are 24-bit and the structure 82 * is stored in big endian 83 */ 84 ptr = (uint8_t *)&nvgc->w2 + priority * 3; 85 for (i = 0; i < 3; i++, ptr++) { 86 shift = 8 * (2 - i); 87 *ptr = (val >> shift) & 0xFF; 88 } 89 } 90 91 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) 92 { 93 if (!xive2_eas_is_valid(eas)) { 94 return; 95 } 96 97 g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n", 98 lisn, xive2_eas_is_masked(eas) ? "M" : " ", 99 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 100 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 101 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 102 } 103 104 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf) 105 { 106 uint64_t qaddr_base = xive2_end_qaddr(end); 107 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 108 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 109 uint32_t qentries = 1 << (qsize + 10); 110 int i; 111 112 /* 113 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 114 */ 115 g_string_append_printf(buf, " [ "); 116 qindex = (qindex - (width - 1)) & (qentries - 1); 117 for (i = 0; i < width; i++) { 118 uint64_t qaddr = qaddr_base + (qindex << 2); 119 uint32_t qdata = -1; 120 121 if (dma_memory_read(&address_space_memory, qaddr, &qdata, 122 sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 123 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 124 HWADDR_PRIx "\n", qaddr); 125 return; 126 } 127 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "", 128 be32_to_cpu(qdata)); 129 qindex = (qindex + 1) & (qentries - 1); 130 } 131 g_string_append_printf(buf, "]"); 132 } 133 134 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf) 135 { 136 uint64_t qaddr_base = xive2_end_qaddr(end); 137 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 138 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 139 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 140 uint32_t qentries = 1 << (qsize + 10); 141 142 uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); 143 uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); 144 uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7); 145 uint8_t pq; 146 147 if (!xive2_end_is_valid(end)) { 148 return; 149 } 150 151 pq = xive_get_field32(END2_W1_ESn, end->w1); 152 153 g_string_append_printf(buf, 154 " %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c " 155 "prio:%d nvp:%02x/%04x", 156 end_idx, 157 pq & XIVE_ESB_VAL_P ? 'P' : '-', 158 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 159 xive2_end_is_valid(end) ? 'v' : '-', 160 xive2_end_is_enqueue(end) ? 'q' : '-', 161 xive2_end_is_notify(end) ? 'n' : '-', 162 xive2_end_is_backlog(end) ? 'b' : '-', 163 xive2_end_is_precluded_escalation(end) ? 'p' : '-', 164 xive2_end_is_escalate(end) ? 'e' : '-', 165 xive2_end_is_escalate_end(end) ? 'N' : '-', 166 xive2_end_is_uncond_escalation(end) ? 'u' : '-', 167 xive2_end_is_silent_escalation(end) ? 's' : '-', 168 xive2_end_is_firmware1(end) ? 'f' : '-', 169 xive2_end_is_firmware2(end) ? 'F' : '-', 170 xive2_end_is_ignore(end) ? 'i' : '-', 171 xive2_end_is_crowd(end) ? 'c' : '-', 172 priority, nvp_blk, nvp_idx); 173 174 if (qaddr_base) { 175 g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", 176 qaddr_base, qindex, qentries, qgen); 177 xive2_end_queue_pic_print_info(end, 6, buf); 178 } 179 g_string_append_c(buf, '\n'); 180 } 181 182 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx, 183 GString *buf) 184 { 185 Xive2Eas *eas = (Xive2Eas *) &end->w4; 186 uint8_t pq; 187 188 if (!xive2_end_is_escalate(end)) { 189 return; 190 } 191 192 pq = xive_get_field32(END2_W1_ESe, end->w1); 193 194 g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", 195 end_idx, 196 pq & XIVE_ESB_VAL_P ? 'P' : '-', 197 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 198 xive2_eas_is_valid(eas) ? 'v' : ' ', 199 xive2_eas_is_masked(eas) ? 'M' : ' ', 200 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 201 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 202 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 203 } 204 205 void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) 206 { 207 uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); 208 uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); 209 uint64_t cache_line = xive2_nvp_reporting_addr(nvp); 210 211 if (!xive2_nvp_is_valid(nvp)) { 212 return; 213 } 214 215 g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x", 216 nvp_idx, eq_blk, eq_idx, 217 xive_get_field32(NVP2_W2_IPB, nvp->w2), 218 xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0)); 219 if (cache_line) { 220 g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line); 221 } 222 223 /* 224 * When the NVP is HW controlled, more fields are updated 225 */ 226 if (xive2_nvp_is_hw(nvp)) { 227 g_string_append_printf(buf, " CPPR:%02x", 228 xive_get_field32(NVP2_W2_CPPR, nvp->w2)); 229 if (xive2_nvp_is_co(nvp)) { 230 g_string_append_printf(buf, " CO:%04x", 231 xive_get_field32(NVP2_W1_CO_THRID, nvp->w1)); 232 } 233 } 234 g_string_append_c(buf, '\n'); 235 } 236 237 void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf) 238 { 239 uint8_t i; 240 241 if (!xive2_nvgc_is_valid(nvgc)) { 242 return; 243 } 244 245 g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx, 246 xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0)); 247 for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { 248 g_string_append_printf(buf, "[%d]=0x%x ", 249 i, xive2_nvgc_get_backlog(nvgc, i)); 250 } 251 g_string_append_printf(buf, "\n"); 252 } 253 254 static void xive2_end_enqueue(Xive2End *end, uint32_t data) 255 { 256 uint64_t qaddr_base = xive2_end_qaddr(end); 257 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); 258 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 259 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 260 261 uint64_t qaddr = qaddr_base + (qindex << 2); 262 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 263 uint32_t qentries = 1 << (qsize + 10); 264 265 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), 266 MEMTXATTRS_UNSPECIFIED)) { 267 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 268 HWADDR_PRIx "\n", qaddr); 269 return; 270 } 271 272 qindex = (qindex + 1) & (qentries - 1); 273 if (qindex == 0) { 274 qgen ^= 1; 275 end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); 276 277 /* TODO(PowerNV): reset GF bit on a cache watch operation */ 278 end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen); 279 } 280 end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); 281 } 282 283 /* 284 * Scan the group chain and return the highest priority and group 285 * level of pending group interrupts. 286 */ 287 static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, 288 uint8_t nvp_blk, uint32_t nvp_idx, 289 uint8_t first_group, 290 uint8_t *out_level) 291 { 292 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 293 uint32_t nvgc_idx, mask; 294 uint32_t current_level, count; 295 uint8_t prio; 296 Xive2Nvgc nvgc; 297 298 for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { 299 current_level = first_group & 0xF; 300 301 while (current_level) { 302 mask = (1 << current_level) - 1; 303 nvgc_idx = nvp_idx & ~mask; 304 nvgc_idx |= mask >> 1; 305 qemu_log("fxb %s checking backlog for prio %d group idx %x\n", 306 __func__, prio, nvgc_idx); 307 308 if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { 309 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", 310 nvp_blk, nvgc_idx); 311 return 0xFF; 312 } 313 if (!xive2_nvgc_is_valid(&nvgc)) { 314 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", 315 nvp_blk, nvgc_idx); 316 return 0xFF; 317 } 318 319 count = xive2_nvgc_get_backlog(&nvgc, prio); 320 if (count) { 321 *out_level = current_level; 322 return prio; 323 } 324 current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0xF; 325 } 326 } 327 return 0xFF; 328 } 329 330 static void xive2_presenter_backlog_decr(XivePresenter *xptr, 331 uint8_t nvp_blk, uint32_t nvp_idx, 332 uint8_t group_prio, 333 uint8_t group_level) 334 { 335 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 336 uint32_t nvgc_idx, mask, count; 337 Xive2Nvgc nvgc; 338 339 group_level &= 0xF; 340 mask = (1 << group_level) - 1; 341 nvgc_idx = nvp_idx & ~mask; 342 nvgc_idx |= mask >> 1; 343 344 if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { 345 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", 346 nvp_blk, nvgc_idx); 347 return; 348 } 349 if (!xive2_nvgc_is_valid(&nvgc)) { 350 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", 351 nvp_blk, nvgc_idx); 352 return; 353 } 354 count = xive2_nvgc_get_backlog(&nvgc, group_prio); 355 if (!count) { 356 return; 357 } 358 xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1); 359 xive2_router_write_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc); 360 } 361 362 /* 363 * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode 364 * 365 * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit 366 * 367 * - if a context is enabled with the H bit set, the VP context 368 * information is retrieved from the NVP structure (“check out”) 369 * and stored back on a context pull (“check in”), the SW receives 370 * the same context pull information as on P9 371 * 372 * - the H bit cannot be changed while the V bit is set, i.e. a 373 * context cannot be set up in the TIMA and then be “pushed” into 374 * the NVP by changing the H bit while the context is enabled 375 */ 376 377 static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 378 uint8_t nvp_blk, uint32_t nvp_idx, 379 uint8_t ring) 380 { 381 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 382 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 383 Xive2Nvp nvp; 384 uint8_t *regs = &tctx->regs[ring]; 385 386 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 387 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 388 nvp_blk, nvp_idx); 389 return; 390 } 391 392 if (!xive2_nvp_is_valid(&nvp)) { 393 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 394 nvp_blk, nvp_idx); 395 return; 396 } 397 398 if (!xive2_nvp_is_hw(&nvp)) { 399 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 400 nvp_blk, nvp_idx); 401 return; 402 } 403 404 if (!xive2_nvp_is_co(&nvp)) { 405 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n", 406 nvp_blk, nvp_idx); 407 return; 408 } 409 410 if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) && 411 xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) { 412 qemu_log_mask(LOG_GUEST_ERROR, 413 "XIVE: NVP %x/%x invalid checkout Thread %x\n", 414 nvp_blk, nvp_idx, pir); 415 return; 416 } 417 418 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); 419 nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); 420 if (nvp.w0 & NVP2_W0_L) { 421 /* 422 * Typically not used. If LSMFB is restored with 0, it will 423 * force a backlog rescan 424 */ 425 nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); 426 } 427 if (nvp.w0 & NVP2_W0_G) { 428 nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]); 429 } 430 if (nvp.w0 & NVP2_W0_T) { 431 nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]); 432 } 433 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 434 435 nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0); 436 /* NVP2_W1_CO_THRID_VALID only set once */ 437 nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF); 438 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); 439 } 440 441 static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk, 442 uint32_t *nvp_idx, bool *valid, bool *hw) 443 { 444 *nvp_blk = xive2_nvp_blk(cam); 445 *nvp_idx = xive2_nvp_idx(cam); 446 *valid = !!(cam & TM2_W2_VALID); 447 *hw = !!(cam & TM2_W2_HW); 448 } 449 450 /* 451 * Encode the HW CAM line with 7bit or 8bit thread id. The thread id 452 * width and block id width is configurable at the IC level. 453 * 454 * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) 455 * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) 456 */ 457 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) 458 { 459 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 460 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 461 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 462 uint8_t blk = xive2_router_get_block_id(xrtr); 463 uint8_t tid_shift = 464 xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; 465 uint8_t tid_mask = (1 << tid_shift) - 1; 466 467 return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); 468 } 469 470 static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, 471 hwaddr offset, unsigned size, uint8_t ring) 472 { 473 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 474 uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]); 475 uint32_t cam = be32_to_cpu(target_ringw2); 476 uint8_t nvp_blk; 477 uint32_t nvp_idx; 478 uint8_t cur_ring; 479 bool valid; 480 bool do_save; 481 482 xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save); 483 484 if (!valid) { 485 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", 486 nvp_blk, nvp_idx); 487 } 488 489 /* Invalidate CAM line of requested ring and all lower rings */ 490 for (cur_ring = TM_QW0_USER; cur_ring <= ring; 491 cur_ring += XIVE_TM_RING_SIZE) { 492 uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]); 493 uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0); 494 memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4); 495 } 496 497 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { 498 xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring); 499 } 500 501 /* 502 * Lower external interrupt line of requested ring and below except for 503 * USER, which doesn't exist. 504 */ 505 for (cur_ring = TM_QW1_OS; cur_ring <= ring; 506 cur_ring += XIVE_TM_RING_SIZE) { 507 xive_tctx_reset_signal(tctx, cur_ring); 508 } 509 return target_ringw2; 510 } 511 512 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 513 hwaddr offset, unsigned size) 514 { 515 return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS); 516 } 517 518 #define REPORT_LINE_GEN1_SIZE 16 519 520 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data, 521 uint8_t size) 522 { 523 uint8_t *regs = tctx->regs; 524 525 g_assert(size == REPORT_LINE_GEN1_SIZE); 526 memset(data, 0, size); 527 /* 528 * See xive architecture for description of what is saved. It is 529 * hand-picked information to fit in 16 bytes. 530 */ 531 data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR]; 532 data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR]; 533 data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB]; 534 data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB]; 535 data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT]; 536 data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS]; 537 data[0x6] = 0xFF; 538 data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80; 539 data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1; 540 data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2; 541 data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3); 542 data[0x8] = regs[TM_QW1_OS + TM_NSR]; 543 data[0x9] = regs[TM_QW1_OS + TM_CPPR]; 544 data[0xA] = regs[TM_QW1_OS + TM_IPB]; 545 data[0xB] = regs[TM_QW1_OS + TM_LGS]; 546 if (regs[TM_QW0_USER + TM_WORD2] & 0x80) { 547 /* 548 * Logical server extension, except VU bit replaced by EB bit 549 * from NSR 550 */ 551 data[0xC] = regs[TM_QW0_USER + TM_WORD2]; 552 data[0xC] &= ~0x80; 553 data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80; 554 data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1]; 555 data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2]; 556 data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3]; 557 } 558 } 559 560 static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, 561 hwaddr offset, uint64_t value, 562 unsigned size, uint8_t ring) 563 { 564 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 565 uint32_t hw_cam, nvp_idx, xive2_cfg, reserved; 566 uint8_t nvp_blk; 567 Xive2Nvp nvp; 568 uint64_t phys_addr; 569 MemTxResult result; 570 571 hw_cam = xive2_tctx_hw_cam_line(xptr, tctx); 572 nvp_blk = xive2_nvp_blk(hw_cam); 573 nvp_idx = xive2_nvp_idx(hw_cam); 574 575 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 576 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 577 nvp_blk, nvp_idx); 578 return; 579 } 580 581 if (!xive2_nvp_is_valid(&nvp)) { 582 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 583 nvp_blk, nvp_idx); 584 return; 585 } 586 587 xive2_cfg = xive2_router_get_config(xrtr); 588 589 phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */ 590 if (xive2_cfg & XIVE2_GEN1_TIMA_OS) { 591 uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE]; 592 593 xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE); 594 result = dma_memory_write(&address_space_memory, phys_addr, 595 pull_ctxt, REPORT_LINE_GEN1_SIZE, 596 MEMTXATTRS_UNSPECIFIED); 597 assert(result == MEMTX_OK); 598 } else { 599 result = dma_memory_write(&address_space_memory, phys_addr, 600 &tctx->regs, sizeof(tctx->regs), 601 MEMTXATTRS_UNSPECIFIED); 602 assert(result == MEMTX_OK); 603 reserved = 0xFFFFFFFF; 604 result = dma_memory_write(&address_space_memory, phys_addr + 12, 605 &reserved, sizeof(reserved), 606 MEMTXATTRS_UNSPECIFIED); 607 assert(result == MEMTX_OK); 608 } 609 610 /* the rest is similar to pull context to registers */ 611 xive2_tm_pull_ctx(xptr, tctx, offset, size, ring); 612 } 613 614 void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, 615 hwaddr offset, uint64_t value, unsigned size) 616 { 617 xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS); 618 } 619 620 621 void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, 622 hwaddr offset, uint64_t value, unsigned size) 623 { 624 xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); 625 } 626 627 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 628 uint8_t nvp_blk, uint32_t nvp_idx, 629 Xive2Nvp *nvp) 630 { 631 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 632 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 633 uint8_t cppr; 634 635 if (!xive2_nvp_is_hw(nvp)) { 636 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 637 nvp_blk, nvp_idx); 638 return 0; 639 } 640 641 cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2); 642 nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); 643 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); 644 645 tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; 646 tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2); 647 tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2); 648 tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2); 649 650 nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); 651 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); 652 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir); 653 654 /* 655 * Checkout privilege: 0:OS, 1:Pool, 2:Hard 656 * 657 * TODO: we only support OS push/pull 658 */ 659 nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); 660 661 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); 662 663 /* return restored CPPR to generate a CPU exception if needed */ 664 return cppr; 665 } 666 667 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, 668 uint8_t nvp_blk, uint32_t nvp_idx, 669 bool do_restore) 670 { 671 XivePresenter *xptr = XIVE_PRESENTER(xrtr); 672 uint8_t ipb; 673 uint8_t backlog_level; 674 uint8_t group_level; 675 uint8_t first_group; 676 uint8_t backlog_prio; 677 uint8_t group_prio; 678 uint8_t *regs = &tctx->regs[TM_QW1_OS]; 679 Xive2Nvp nvp; 680 681 /* 682 * Grab the associated thread interrupt context registers in the 683 * associated NVP 684 */ 685 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 686 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 687 nvp_blk, nvp_idx); 688 return; 689 } 690 691 if (!xive2_nvp_is_valid(&nvp)) { 692 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 693 nvp_blk, nvp_idx); 694 return; 695 } 696 697 /* Automatically restore thread context registers */ 698 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && 699 do_restore) { 700 xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); 701 } 702 703 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); 704 if (ipb) { 705 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); 706 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 707 } 708 regs[TM_IPB] |= ipb; 709 backlog_prio = xive_ipb_to_pipr(ipb); 710 backlog_level = 0; 711 712 first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); 713 if (first_group && regs[TM_LSMFB] < backlog_prio) { 714 group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx, 715 first_group, &group_level); 716 regs[TM_LSMFB] = group_prio; 717 if (regs[TM_LGS] && group_prio < backlog_prio) { 718 /* VP can take a group interrupt */ 719 xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx, 720 group_prio, group_level); 721 backlog_prio = group_prio; 722 backlog_level = group_level; 723 } 724 } 725 726 /* 727 * Compute the PIPR based on the restored state. 728 * It will raise the External interrupt signal if needed. 729 */ 730 xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); 731 } 732 733 /* 734 * Updating the OS CAM line can trigger a resend of interrupt 735 */ 736 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 737 hwaddr offset, uint64_t value, unsigned size) 738 { 739 uint32_t cam; 740 uint32_t qw1w2; 741 uint64_t qw1dw1; 742 uint8_t nvp_blk; 743 uint32_t nvp_idx; 744 bool vo; 745 bool do_restore; 746 747 /* First update the thead context */ 748 switch (size) { 749 case 4: 750 cam = value; 751 qw1w2 = cpu_to_be32(cam); 752 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 753 break; 754 case 8: 755 cam = value >> 32; 756 qw1dw1 = cpu_to_be64(value); 757 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8); 758 break; 759 default: 760 g_assert_not_reached(); 761 } 762 763 xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); 764 765 /* Check the interrupt pending bits */ 766 if (vo) { 767 xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, 768 do_restore); 769 } 770 } 771 772 static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, 773 uint32_t *nvp_blk, uint32_t *nvp_idx) 774 { 775 uint32_t w2, cam; 776 777 w2 = xive_tctx_word2(&tctx->regs[ring]); 778 switch (ring) { 779 case TM_QW1_OS: 780 if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { 781 return -1; 782 } 783 cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); 784 break; 785 case TM_QW2_HV_POOL: 786 if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { 787 return -1; 788 } 789 cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); 790 break; 791 case TM_QW3_HV_PHYS: 792 if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { 793 return -1; 794 } 795 cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); 796 break; 797 default: 798 return -1; 799 } 800 *nvp_blk = xive2_nvp_blk(cam); 801 *nvp_idx = xive2_nvp_idx(cam); 802 return 0; 803 } 804 805 static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) 806 { 807 uint8_t *regs = &tctx->regs[ring]; 808 Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); 809 uint8_t old_cppr, backlog_prio, first_group, group_level = 0; 810 uint8_t pipr_min, lsmfb_min, ring_min; 811 bool group_enabled; 812 uint32_t nvp_blk, nvp_idx; 813 Xive2Nvp nvp; 814 int rc; 815 816 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, 817 regs[TM_IPB], regs[TM_PIPR], 818 cppr, regs[TM_NSR]); 819 820 if (cppr > XIVE_PRIORITY_MAX) { 821 cppr = 0xff; 822 } 823 824 old_cppr = regs[TM_CPPR]; 825 regs[TM_CPPR] = cppr; 826 827 /* 828 * Recompute the PIPR based on local pending interrupts. It will 829 * be adjusted below if needed in case of pending group interrupts. 830 */ 831 pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); 832 group_enabled = !!regs[TM_LGS]; 833 lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff; 834 ring_min = ring; 835 836 /* PHYS updates also depend on POOL values */ 837 if (ring == TM_QW3_HV_PHYS) { 838 uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL]; 839 840 /* POOL values only matter if POOL ctx is valid */ 841 if (pregs[TM_WORD2] & 0x80) { 842 843 uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]); 844 uint8_t pool_lsmfb = pregs[TM_LSMFB]; 845 846 /* 847 * Determine highest priority interrupt and 848 * remember which ring has it. 849 */ 850 if (pool_pipr < pipr_min) { 851 pipr_min = pool_pipr; 852 if (pool_pipr < lsmfb_min) { 853 ring_min = TM_QW2_HV_POOL; 854 } 855 } 856 857 /* Values needed for group priority calculation */ 858 if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { 859 group_enabled = true; 860 lsmfb_min = pool_lsmfb; 861 if (lsmfb_min < pipr_min) { 862 ring_min = TM_QW2_HV_POOL; 863 } 864 } 865 } 866 } 867 regs[TM_PIPR] = pipr_min; 868 869 rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); 870 if (rc) { 871 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n"); 872 return; 873 } 874 875 if (cppr < old_cppr) { 876 /* 877 * FIXME: check if there's a group interrupt being presented 878 * and if the new cppr prevents it. If so, then the group 879 * interrupt needs to be re-added to the backlog and 880 * re-triggered (see re-trigger END info in the NVGC 881 * structure) 882 */ 883 } 884 885 if (group_enabled && 886 lsmfb_min < cppr && 887 lsmfb_min < regs[TM_PIPR]) { 888 /* 889 * Thread has seen a group interrupt with a higher priority 890 * than the new cppr or pending local interrupt. Check the 891 * backlog 892 */ 893 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 894 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 895 nvp_blk, nvp_idx); 896 return; 897 } 898 899 if (!xive2_nvp_is_valid(&nvp)) { 900 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 901 nvp_blk, nvp_idx); 902 return; 903 } 904 905 first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); 906 if (!first_group) { 907 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 908 nvp_blk, nvp_idx); 909 return; 910 } 911 912 backlog_prio = xive2_presenter_backlog_scan(tctx->xptr, 913 nvp_blk, nvp_idx, 914 first_group, &group_level); 915 tctx->regs[ring_min + TM_LSMFB] = backlog_prio; 916 if (backlog_prio != 0xFF) { 917 xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, 918 backlog_prio, group_level); 919 regs[TM_PIPR] = backlog_prio; 920 } 921 } 922 /* CPPR has changed, check if we need to raise a pending exception */ 923 xive_tctx_notify(tctx, ring_min, group_level); 924 } 925 926 void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, 927 hwaddr offset, uint64_t value, unsigned size) 928 { 929 xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); 930 } 931 932 void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, 933 hwaddr offset, uint64_t value, unsigned size) 934 { 935 xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); 936 } 937 938 static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) 939 { 940 uint8_t *regs = &tctx->regs[ring]; 941 942 regs[TM_T] = target; 943 } 944 945 void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, 946 hwaddr offset, uint64_t value, unsigned size) 947 { 948 xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff); 949 } 950 951 /* 952 * XIVE Router (aka. Virtualization Controller or IVRE) 953 */ 954 955 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 956 Xive2Eas *eas) 957 { 958 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 959 960 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 961 } 962 963 static 964 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 965 uint8_t *pq) 966 { 967 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 968 969 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); 970 } 971 972 static 973 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 974 uint8_t *pq) 975 { 976 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 977 978 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); 979 } 980 981 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 982 Xive2End *end) 983 { 984 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 985 986 return xrc->get_end(xrtr, end_blk, end_idx, end); 987 } 988 989 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 990 Xive2End *end, uint8_t word_number) 991 { 992 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 993 994 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 995 } 996 997 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 998 Xive2Nvp *nvp) 999 { 1000 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1001 1002 return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp); 1003 } 1004 1005 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 1006 Xive2Nvp *nvp, uint8_t word_number) 1007 { 1008 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1009 1010 return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); 1011 } 1012 1013 int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd, 1014 uint8_t nvgc_blk, uint32_t nvgc_idx, 1015 Xive2Nvgc *nvgc) 1016 { 1017 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1018 1019 return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); 1020 } 1021 1022 int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, 1023 uint8_t nvgc_blk, uint32_t nvgc_idx, 1024 Xive2Nvgc *nvgc) 1025 { 1026 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1027 1028 return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); 1029 } 1030 1031 static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, 1032 uint32_t vp_mask) 1033 { 1034 return (cam1 & vp_mask) == (cam2 & vp_mask); 1035 } 1036 1037 /* 1038 * The thread context register words are in big-endian format. 1039 */ 1040 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, 1041 uint8_t format, 1042 uint8_t nvt_blk, uint32_t nvt_idx, 1043 bool cam_ignore, uint32_t logic_serv) 1044 { 1045 uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); 1046 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 1047 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 1048 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 1049 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 1050 1051 uint32_t vp_mask = 0xFFFFFFFF; 1052 1053 if (format == 0) { 1054 /* 1055 * i=0: Specific NVT notification 1056 * i=1: VP-group notification (bits ignored at the end of the 1057 * NVT identifier) 1058 */ 1059 if (cam_ignore) { 1060 vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1); 1061 } 1062 1063 /* For VP-group notifications, threads with LGS=0 are excluded */ 1064 1065 /* PHYS ring */ 1066 if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && 1067 !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) && 1068 xive2_vp_match_mask(cam, 1069 xive2_tctx_hw_cam_line(xptr, tctx), 1070 vp_mask)) { 1071 return TM_QW3_HV_PHYS; 1072 } 1073 1074 /* HV POOL ring */ 1075 if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && 1076 !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) && 1077 xive2_vp_match_mask(cam, 1078 xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2), 1079 vp_mask)) { 1080 return TM_QW2_HV_POOL; 1081 } 1082 1083 /* OS ring */ 1084 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 1085 !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) && 1086 xive2_vp_match_mask(cam, 1087 xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2), 1088 vp_mask)) { 1089 return TM_QW1_OS; 1090 } 1091 } else { 1092 /* F=1 : User level Event-Based Branch (EBB) notification */ 1093 1094 /* FIXME: what if cam_ignore and LGS = 0 ? */ 1095 /* USER ring */ 1096 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 1097 (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && 1098 (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) && 1099 (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) { 1100 return TM_QW0_USER; 1101 } 1102 } 1103 return -1; 1104 } 1105 1106 bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) 1107 { 1108 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ 1109 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; 1110 uint8_t *alt_regs = &tctx->regs[alt_ring]; 1111 1112 /* 1113 * The xive2_presenter_tctx_match() above tells if there's a match 1114 * but for VP-group notification, we still need to look at the 1115 * priority to know if the thread can take the interrupt now or if 1116 * it is precluded. 1117 */ 1118 if (priority < alt_regs[TM_CPPR]) { 1119 return false; 1120 } 1121 return true; 1122 } 1123 1124 void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority) 1125 { 1126 uint8_t *regs = &tctx->regs[ring]; 1127 1128 /* 1129 * Called by the router during a VP-group notification when the 1130 * thread matches but can't take the interrupt because it's 1131 * already running at a more favored priority. It then stores the 1132 * new interrupt priority in the LSMFB field. 1133 */ 1134 regs[TM_LSMFB] = priority; 1135 } 1136 1137 static void xive2_router_realize(DeviceState *dev, Error **errp) 1138 { 1139 Xive2Router *xrtr = XIVE2_ROUTER(dev); 1140 1141 assert(xrtr->xfb); 1142 } 1143 1144 /* 1145 * Notification using the END ESe/ESn bit (Event State Buffer for 1146 * escalation and notification). Profide further coalescing in the 1147 * Router. 1148 */ 1149 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, 1150 uint32_t end_idx, Xive2End *end, 1151 uint32_t end_esmask) 1152 { 1153 uint8_t pq = xive_get_field32(end_esmask, end->w1); 1154 bool notify = xive_esb_trigger(&pq); 1155 1156 if (pq != xive_get_field32(end_esmask, end->w1)) { 1157 end->w1 = xive_set_field32(end_esmask, end->w1, pq); 1158 xive2_router_write_end(xrtr, end_blk, end_idx, end, 1); 1159 } 1160 1161 /* ESe/n[Q]=1 : end of notification */ 1162 return notify; 1163 } 1164 1165 /* 1166 * An END trigger can come from an event trigger (IPI or HW) or from 1167 * another chip. We don't model the PowerBus but the END trigger 1168 * message has the same parameters than in the function below. 1169 */ 1170 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, 1171 uint32_t end_idx, uint32_t end_data) 1172 { 1173 Xive2End end; 1174 uint8_t priority; 1175 uint8_t format; 1176 bool found, precluded; 1177 uint8_t nvp_blk; 1178 uint32_t nvp_idx; 1179 1180 /* END cache lookup */ 1181 if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) { 1182 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1183 end_idx); 1184 return; 1185 } 1186 1187 if (!xive2_end_is_valid(&end)) { 1188 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1189 end_blk, end_idx); 1190 return; 1191 } 1192 1193 if (xive2_end_is_enqueue(&end)) { 1194 xive2_end_enqueue(&end, end_data); 1195 /* Enqueuing event data modifies the EQ toggle and index */ 1196 xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); 1197 } 1198 1199 /* 1200 * When the END is silent, we skip the notification part. 1201 */ 1202 if (xive2_end_is_silent_escalation(&end)) { 1203 goto do_escalation; 1204 } 1205 1206 /* 1207 * The W7 format depends on the F bit in W6. It defines the type 1208 * of the notification : 1209 * 1210 * F=0 : single or multiple NVP notification 1211 * F=1 : User level Event-Based Branch (EBB) notification, no 1212 * priority 1213 */ 1214 format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6); 1215 priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7); 1216 1217 /* The END is masked */ 1218 if (format == 0 && priority == 0xff) { 1219 return; 1220 } 1221 1222 /* 1223 * Check the END ESn (Event State Buffer for notification) for 1224 * even further coalescing in the Router 1225 */ 1226 if (!xive2_end_is_notify(&end)) { 1227 /* ESn[Q]=1 : end of notification */ 1228 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 1229 &end, END2_W1_ESn)) { 1230 return; 1231 } 1232 } 1233 1234 /* 1235 * Follows IVPE notification 1236 */ 1237 nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); 1238 nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); 1239 1240 found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, 1241 xive2_end_is_ignore(&end), 1242 priority, 1243 xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), 1244 &precluded); 1245 1246 /* TODO: Auto EOI. */ 1247 1248 if (found) { 1249 return; 1250 } 1251 1252 /* 1253 * If no matching NVP is dispatched on a HW thread : 1254 * - specific VP: update the NVP structure if backlog is activated 1255 * - VP-group: update the backlog counter for that priority in the NVG 1256 */ 1257 if (xive2_end_is_backlog(&end)) { 1258 1259 if (format == 1) { 1260 qemu_log_mask(LOG_GUEST_ERROR, 1261 "XIVE: END %x/%x invalid config: F1 & backlog\n", 1262 end_blk, end_idx); 1263 return; 1264 } 1265 1266 if (!xive2_end_is_ignore(&end)) { 1267 uint8_t ipb; 1268 Xive2Nvp nvp; 1269 1270 /* NVP cache lookup */ 1271 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 1272 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n", 1273 nvp_blk, nvp_idx); 1274 return; 1275 } 1276 1277 if (!xive2_nvp_is_valid(&nvp)) { 1278 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n", 1279 nvp_blk, nvp_idx); 1280 return; 1281 } 1282 1283 /* 1284 * Record the IPB in the associated NVP structure for later 1285 * use. The presenter will resend the interrupt when the vCPU 1286 * is dispatched again on a HW thread. 1287 */ 1288 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | 1289 xive_priority_to_ipb(priority); 1290 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); 1291 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 1292 } else { 1293 Xive2Nvgc nvg; 1294 uint32_t backlog; 1295 1296 /* For groups, the per-priority backlog counters are in the NVG */ 1297 if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) { 1298 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n", 1299 nvp_blk, nvp_idx); 1300 return; 1301 } 1302 1303 if (!xive2_nvgc_is_valid(&nvg)) { 1304 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", 1305 nvp_blk, nvp_idx); 1306 return; 1307 } 1308 1309 /* 1310 * Increment the backlog counter for that priority. 1311 * We only call broadcast the first time the counter is 1312 * incremented. broadcast will set the LSMFB field of the TIMA of 1313 * relevant threads so that they know an interrupt is pending. 1314 */ 1315 backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1; 1316 xive2_nvgc_set_backlog(&nvg, priority, backlog); 1317 xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg); 1318 1319 if (backlog == 1) { 1320 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); 1321 xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority); 1322 1323 if (!xive2_end_is_precluded_escalation(&end)) { 1324 /* 1325 * The interrupt will be picked up when the 1326 * matching thread lowers its priority level 1327 */ 1328 return; 1329 } 1330 } 1331 } 1332 } 1333 1334 do_escalation: 1335 /* 1336 * If activated, escalate notification using the ESe PQ bits and 1337 * the EAS in w4-5 1338 */ 1339 if (!xive2_end_is_escalate(&end)) { 1340 return; 1341 } 1342 1343 /* 1344 * Check the END ESe (Event State Buffer for escalation) for even 1345 * further coalescing in the Router 1346 */ 1347 if (!xive2_end_is_uncond_escalation(&end)) { 1348 /* ESe[Q]=1 : end of escalation notification */ 1349 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 1350 &end, END2_W1_ESe)) { 1351 return; 1352 } 1353 } 1354 1355 /* 1356 * The END trigger becomes an Escalation trigger 1357 */ 1358 xive2_router_end_notify(xrtr, 1359 xive_get_field32(END2_W4_END_BLOCK, end.w4), 1360 xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), 1361 xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); 1362 } 1363 1364 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) 1365 { 1366 Xive2Router *xrtr = XIVE2_ROUTER(xn); 1367 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); 1368 uint32_t eas_idx = XIVE_EAS_INDEX(lisn); 1369 Xive2Eas eas; 1370 1371 /* EAS cache lookup */ 1372 if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 1373 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 1374 return; 1375 } 1376 1377 if (!pq_checked) { 1378 bool notify; 1379 uint8_t pq; 1380 1381 /* PQ cache lookup */ 1382 if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { 1383 /* Set FIR */ 1384 g_assert_not_reached(); 1385 } 1386 1387 notify = xive_esb_trigger(&pq); 1388 1389 if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { 1390 /* Set FIR */ 1391 g_assert_not_reached(); 1392 } 1393 1394 if (!notify) { 1395 return; 1396 } 1397 } 1398 1399 if (!xive2_eas_is_valid(&eas)) { 1400 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn); 1401 return; 1402 } 1403 1404 if (xive2_eas_is_masked(&eas)) { 1405 /* Notification completed */ 1406 return; 1407 } 1408 1409 /* 1410 * The event trigger becomes an END trigger 1411 */ 1412 xive2_router_end_notify(xrtr, 1413 xive_get_field64(EAS2_END_BLOCK, eas.w), 1414 xive_get_field64(EAS2_END_INDEX, eas.w), 1415 xive_get_field64(EAS2_END_DATA, eas.w)); 1416 } 1417 1418 static const Property xive2_router_properties[] = { 1419 DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb, 1420 TYPE_XIVE_FABRIC, XiveFabric *), 1421 }; 1422 1423 static void xive2_router_class_init(ObjectClass *klass, void *data) 1424 { 1425 DeviceClass *dc = DEVICE_CLASS(klass); 1426 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1427 1428 dc->desc = "XIVE2 Router Engine"; 1429 device_class_set_props(dc, xive2_router_properties); 1430 /* Parent is SysBusDeviceClass. No need to call its realize hook */ 1431 dc->realize = xive2_router_realize; 1432 xnc->notify = xive2_router_notify; 1433 } 1434 1435 static const TypeInfo xive2_router_info = { 1436 .name = TYPE_XIVE2_ROUTER, 1437 .parent = TYPE_SYS_BUS_DEVICE, 1438 .abstract = true, 1439 .instance_size = sizeof(Xive2Router), 1440 .class_size = sizeof(Xive2RouterClass), 1441 .class_init = xive2_router_class_init, 1442 .interfaces = (InterfaceInfo[]) { 1443 { TYPE_XIVE_NOTIFIER }, 1444 { TYPE_XIVE_PRESENTER }, 1445 { } 1446 } 1447 }; 1448 1449 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 1450 { 1451 return !((addr >> shift) & 1); 1452 } 1453 1454 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size) 1455 { 1456 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 1457 uint32_t offset = addr & 0xFFF; 1458 uint8_t end_blk; 1459 uint32_t end_idx; 1460 Xive2End end; 1461 uint32_t end_esmask; 1462 uint8_t pq; 1463 uint64_t ret; 1464 1465 /* 1466 * The block id should be deduced from the load address on the END 1467 * ESB MMIO but our model only supports a single block per XIVE chip. 1468 */ 1469 end_blk = xive2_router_get_block_id(xsrc->xrtr); 1470 end_idx = addr >> (xsrc->esb_shift + 1); 1471 1472 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 1473 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1474 end_idx); 1475 return -1; 1476 } 1477 1478 if (!xive2_end_is_valid(&end)) { 1479 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1480 end_blk, end_idx); 1481 return -1; 1482 } 1483 1484 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 1485 END2_W1_ESe; 1486 pq = xive_get_field32(end_esmask, end.w1); 1487 1488 switch (offset) { 1489 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 1490 ret = xive_esb_eoi(&pq); 1491 1492 /* Forward the source event notification for routing ?? */ 1493 break; 1494 1495 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 1496 ret = pq; 1497 break; 1498 1499 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1500 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1501 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1502 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1503 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 1504 break; 1505 default: 1506 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 1507 offset); 1508 return -1; 1509 } 1510 1511 if (pq != xive_get_field32(end_esmask, end.w1)) { 1512 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 1513 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 1514 } 1515 1516 return ret; 1517 } 1518 1519 static void xive2_end_source_write(void *opaque, hwaddr addr, 1520 uint64_t value, unsigned size) 1521 { 1522 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 1523 uint32_t offset = addr & 0xFFF; 1524 uint8_t end_blk; 1525 uint32_t end_idx; 1526 Xive2End end; 1527 uint32_t end_esmask; 1528 uint8_t pq; 1529 bool notify = false; 1530 1531 /* 1532 * The block id should be deduced from the load address on the END 1533 * ESB MMIO but our model only supports a single block per XIVE chip. 1534 */ 1535 end_blk = xive2_router_get_block_id(xsrc->xrtr); 1536 end_idx = addr >> (xsrc->esb_shift + 1); 1537 1538 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 1539 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1540 end_idx); 1541 return; 1542 } 1543 1544 if (!xive2_end_is_valid(&end)) { 1545 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1546 end_blk, end_idx); 1547 return; 1548 } 1549 1550 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 1551 END2_W1_ESe; 1552 pq = xive_get_field32(end_esmask, end.w1); 1553 1554 switch (offset) { 1555 case 0 ... 0x3FF: 1556 notify = xive_esb_trigger(&pq); 1557 break; 1558 1559 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 1560 /* TODO: can we check StoreEOI availability from the router ? */ 1561 notify = xive_esb_eoi(&pq); 1562 break; 1563 1564 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: 1565 if (end_esmask == END2_W1_ESe) { 1566 qemu_log_mask(LOG_GUEST_ERROR, 1567 "XIVE: END %x/%x can not EQ inject on ESe\n", 1568 end_blk, end_idx); 1569 return; 1570 } 1571 notify = true; 1572 break; 1573 1574 default: 1575 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n", 1576 offset); 1577 return; 1578 } 1579 1580 if (pq != xive_get_field32(end_esmask, end.w1)) { 1581 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 1582 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 1583 } 1584 1585 /* TODO: Forward the source event notification for routing */ 1586 if (notify) { 1587 ; 1588 } 1589 } 1590 1591 static const MemoryRegionOps xive2_end_source_ops = { 1592 .read = xive2_end_source_read, 1593 .write = xive2_end_source_write, 1594 .endianness = DEVICE_BIG_ENDIAN, 1595 .valid = { 1596 .min_access_size = 1, 1597 .max_access_size = 8, 1598 }, 1599 .impl = { 1600 .min_access_size = 1, 1601 .max_access_size = 8, 1602 }, 1603 }; 1604 1605 static void xive2_end_source_realize(DeviceState *dev, Error **errp) 1606 { 1607 Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev); 1608 1609 assert(xsrc->xrtr); 1610 1611 if (!xsrc->nr_ends) { 1612 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1613 return; 1614 } 1615 1616 if (xsrc->esb_shift != XIVE_ESB_4K && 1617 xsrc->esb_shift != XIVE_ESB_64K) { 1618 error_setg(errp, "Invalid ESB shift setting"); 1619 return; 1620 } 1621 1622 /* 1623 * Each END is assigned an even/odd pair of MMIO pages, the even page 1624 * manages the ESn field while the odd page manages the ESe field. 1625 */ 1626 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 1627 &xive2_end_source_ops, xsrc, "xive.end", 1628 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 1629 } 1630 1631 static const Property xive2_end_source_properties[] = { 1632 DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0), 1633 DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K), 1634 DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER, 1635 Xive2Router *), 1636 }; 1637 1638 static void xive2_end_source_class_init(ObjectClass *klass, void *data) 1639 { 1640 DeviceClass *dc = DEVICE_CLASS(klass); 1641 1642 dc->desc = "XIVE END Source"; 1643 device_class_set_props(dc, xive2_end_source_properties); 1644 dc->realize = xive2_end_source_realize; 1645 dc->user_creatable = false; 1646 } 1647 1648 static const TypeInfo xive2_end_source_info = { 1649 .name = TYPE_XIVE2_END_SOURCE, 1650 .parent = TYPE_DEVICE, 1651 .instance_size = sizeof(Xive2EndSource), 1652 .class_init = xive2_end_source_class_init, 1653 }; 1654 1655 static void xive2_register_types(void) 1656 { 1657 type_register_static(&xive2_router_info); 1658 type_register_static(&xive2_end_source_info); 1659 } 1660 1661 type_init(xive2_register_types) 1662