1 /* 2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10) 3 * 4 * Copyright (c) 2019-2024, IBM Corporation.. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "qemu/module.h" 12 #include "qapi/error.h" 13 #include "target/ppc/cpu.h" 14 #include "system/cpus.h" 15 #include "system/dma.h" 16 #include "hw/qdev-properties.h" 17 #include "hw/ppc/xive.h" 18 #include "hw/ppc/xive2.h" 19 #include "hw/ppc/xive2_regs.h" 20 #include "trace.h" 21 22 uint32_t xive2_router_get_config(Xive2Router *xrtr) 23 { 24 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 25 26 return xrc->get_config(xrtr); 27 } 28 29 static int xive2_router_get_block_id(Xive2Router *xrtr) 30 { 31 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 32 33 return xrc->get_block_id(xrtr); 34 } 35 36 static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp) 37 { 38 uint64_t cache_addr; 39 40 cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 | 41 xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7); 42 cache_addr <<= 8; /* aligned on a cache line pair */ 43 return cache_addr; 44 } 45 46 static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) 47 { 48 uint32_t val = 0; 49 uint8_t *ptr, i; 50 51 if (priority > 7) { 52 return 0; 53 } 54 55 /* 56 * The per-priority backlog counters are 24-bit and the structure 57 * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from 58 * w2, which fits 8 priorities * 24-bits per priority. 59 */ 60 ptr = (uint8_t *)&nvgc->w2 + priority * 3; 61 for (i = 0; i < 3; i++, ptr++) { 62 val = (val << 8) + *ptr; 63 } 64 return val; 65 } 66 67 static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, 68 uint32_t val) 69 { 70 uint8_t *ptr, i; 71 uint32_t shift; 72 73 if (priority > 7) { 74 return; 75 } 76 77 if (val > 0xFFFFFF) { 78 val = 0xFFFFFF; 79 } 80 /* 81 * The per-priority backlog counters are 24-bit and the structure 82 * is stored in big endian 83 */ 84 ptr = (uint8_t *)&nvgc->w2 + priority * 3; 85 for (i = 0; i < 3; i++, ptr++) { 86 shift = 8 * (2 - i); 87 *ptr = (val >> shift) & 0xFF; 88 } 89 } 90 91 uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, 92 bool crowd, 93 uint8_t blk, uint32_t idx, 94 uint16_t offset, uint16_t val) 95 { 96 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 97 uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); 98 uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); 99 Xive2Nvgc nvgc; 100 uint32_t count, old_count; 101 102 if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) { 103 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n", 104 crowd ? "NVC" : "NVG", blk, idx); 105 return -1; 106 } 107 if (!xive2_nvgc_is_valid(&nvgc)) { 108 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx); 109 return -1; 110 } 111 112 old_count = xive2_nvgc_get_backlog(&nvgc, priority); 113 count = old_count; 114 /* 115 * op: 116 * 0b00 => increment 117 * 0b01 => decrement 118 * 0b1- => read 119 */ 120 if (op == 0b00 || op == 0b01) { 121 if (op == 0b00) { 122 count += val; 123 } else { 124 if (count > val) { 125 count -= val; 126 } else { 127 count = 0; 128 } 129 } 130 xive2_nvgc_set_backlog(&nvgc, priority, count); 131 xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc); 132 } 133 trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count); 134 return old_count; 135 } 136 137 uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, 138 uint8_t blk, uint32_t idx, 139 uint16_t offset) 140 { 141 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 142 uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); 143 uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); 144 Xive2Nvp nvp; 145 uint8_t ipb, old_ipb, rc; 146 147 if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) { 148 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx); 149 return -1; 150 } 151 if (!xive2_nvp_is_valid(&nvp)) { 152 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx); 153 return -1; 154 } 155 156 old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); 157 ipb = old_ipb; 158 /* 159 * op: 160 * 0b00 => set priority bit 161 * 0b01 => reset priority bit 162 * 0b1- => read 163 */ 164 if (op == 0b00 || op == 0b01) { 165 if (op == 0b00) { 166 ipb |= xive_priority_to_ipb(priority); 167 } else { 168 ipb &= ~xive_priority_to_ipb(priority); 169 } 170 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); 171 xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2); 172 } 173 rc = !!(old_ipb & xive_priority_to_ipb(priority)); 174 trace_xive_nvp_backlog_op(blk, idx, op, priority, rc); 175 return rc; 176 } 177 178 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) 179 { 180 if (!xive2_eas_is_valid(eas)) { 181 return; 182 } 183 184 g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n", 185 lisn, xive2_eas_is_masked(eas) ? "M" : " ", 186 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 187 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 188 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 189 } 190 191 #define XIVE2_QSIZE_CHUNK_CL 128 192 #define XIVE2_QSIZE_CHUNK_4k 4096 193 /* Calculate max number of queue entries for an END */ 194 static uint32_t xive2_end_get_qentries(Xive2End *end) 195 { 196 uint32_t w3 = end->w3; 197 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, w3); 198 if (xive_get_field32(END2_W3_CL, w3)) { 199 g_assert(qsize <= 4); 200 return (XIVE2_QSIZE_CHUNK_CL << qsize) / sizeof(uint32_t); 201 } else { 202 g_assert(qsize <= 12); 203 return (XIVE2_QSIZE_CHUNK_4k << qsize) / sizeof(uint32_t); 204 } 205 } 206 207 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf) 208 { 209 uint64_t qaddr_base = xive2_end_qaddr(end); 210 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 211 uint32_t qentries = xive2_end_get_qentries(end); 212 int i; 213 214 /* 215 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 216 */ 217 g_string_append_printf(buf, " [ "); 218 qindex = (qindex - (width - 1)) & (qentries - 1); 219 for (i = 0; i < width; i++) { 220 uint64_t qaddr = qaddr_base + (qindex << 2); 221 uint32_t qdata = -1; 222 223 if (dma_memory_read(&address_space_memory, qaddr, &qdata, 224 sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { 225 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 226 HWADDR_PRIx "\n", qaddr); 227 return; 228 } 229 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "", 230 be32_to_cpu(qdata)); 231 qindex = (qindex + 1) & (qentries - 1); 232 } 233 g_string_append_printf(buf, "]"); 234 } 235 236 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf) 237 { 238 uint64_t qaddr_base = xive2_end_qaddr(end); 239 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 240 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 241 uint32_t qentries = xive2_end_get_qentries(end); 242 243 uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); 244 uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); 245 uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7); 246 uint8_t pq; 247 248 if (!xive2_end_is_valid(end)) { 249 return; 250 } 251 252 pq = xive_get_field32(END2_W1_ESn, end->w1); 253 254 g_string_append_printf(buf, 255 " %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c " 256 "prio:%d nvp:%02x/%04x", 257 end_idx, 258 pq & XIVE_ESB_VAL_P ? 'P' : '-', 259 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 260 xive2_end_is_valid(end) ? 'v' : '-', 261 xive2_end_is_enqueue(end) ? 'q' : '-', 262 xive2_end_is_notify(end) ? 'n' : '-', 263 xive2_end_is_backlog(end) ? 'b' : '-', 264 xive2_end_is_precluded_escalation(end) ? 'p' : '-', 265 xive2_end_is_escalate(end) ? 'e' : '-', 266 xive2_end_is_escalate_end(end) ? 'N' : '-', 267 xive2_end_is_uncond_escalation(end) ? 'u' : '-', 268 xive2_end_is_silent_escalation(end) ? 's' : '-', 269 xive2_end_is_firmware1(end) ? 'f' : '-', 270 xive2_end_is_firmware2(end) ? 'F' : '-', 271 xive2_end_is_ignore(end) ? 'i' : '-', 272 xive2_end_is_crowd(end) ? 'c' : '-', 273 priority, nvx_blk, nvx_idx); 274 275 if (qaddr_base) { 276 g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", 277 qaddr_base, qindex, qentries, qgen); 278 xive2_end_queue_pic_print_info(end, 6, buf); 279 } 280 g_string_append_c(buf, '\n'); 281 } 282 283 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx, 284 GString *buf) 285 { 286 Xive2Eas *eas = (Xive2Eas *) &end->w4; 287 uint8_t pq; 288 289 if (!xive2_end_is_escalate(end)) { 290 return; 291 } 292 293 pq = xive_get_field32(END2_W1_ESe, end->w1); 294 295 g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", 296 end_idx, 297 pq & XIVE_ESB_VAL_P ? 'P' : '-', 298 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 299 xive2_eas_is_valid(eas) ? 'v' : ' ', 300 xive2_eas_is_masked(eas) ? 'M' : ' ', 301 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w), 302 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w), 303 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); 304 } 305 306 void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) 307 { 308 uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); 309 uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); 310 uint64_t cache_line = xive2_nvp_reporting_addr(nvp); 311 312 if (!xive2_nvp_is_valid(nvp)) { 313 return; 314 } 315 316 g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x", 317 nvp_idx, eq_blk, eq_idx, 318 xive_get_field32(NVP2_W2_IPB, nvp->w2), 319 xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0)); 320 if (cache_line) { 321 g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line); 322 } 323 324 /* 325 * When the NVP is HW controlled, more fields are updated 326 */ 327 if (xive2_nvp_is_hw(nvp)) { 328 g_string_append_printf(buf, " CPPR:%02x", 329 xive_get_field32(NVP2_W2_CPPR, nvp->w2)); 330 if (xive2_nvp_is_co(nvp)) { 331 g_string_append_printf(buf, " CO:%04x", 332 xive_get_field32(NVP2_W1_CO_THRID, nvp->w1)); 333 } 334 } 335 g_string_append_c(buf, '\n'); 336 } 337 338 void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf) 339 { 340 uint8_t i; 341 342 if (!xive2_nvgc_is_valid(nvgc)) { 343 return; 344 } 345 346 g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx, 347 xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0)); 348 for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { 349 g_string_append_printf(buf, "[%d]=0x%x ", 350 i, xive2_nvgc_get_backlog(nvgc, i)); 351 } 352 g_string_append_printf(buf, "\n"); 353 } 354 355 static void xive2_end_enqueue(Xive2End *end, uint32_t data) 356 { 357 uint64_t qaddr_base = xive2_end_qaddr(end); 358 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); 359 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); 360 361 uint64_t qaddr = qaddr_base + (qindex << 2); 362 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 363 uint32_t qentries = xive2_end_get_qentries(end); 364 365 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), 366 MEMTXATTRS_UNSPECIFIED)) { 367 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 368 HWADDR_PRIx "\n", qaddr); 369 return; 370 } 371 372 qindex = (qindex + 1) & (qentries - 1); 373 if (qindex == 0) { 374 qgen ^= 1; 375 end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); 376 377 /* Set gen flipped to 1, it gets reset on a cache watch operation */ 378 end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, 1); 379 } 380 end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); 381 } 382 383 static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx, 384 uint8_t next_level) 385 { 386 uint32_t mask, next_idx; 387 uint8_t next_blk; 388 389 /* 390 * Adjust the block and index of a VP for the next group/crowd 391 * size (PGofFirst/PGofNext field in the NVP and NVGC structures). 392 * 393 * The 6-bit group level is split into a 2-bit crowd and 4-bit 394 * group levels. Encoding is similar. However, we don't support 395 * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd 396 * size of 16. 397 */ 398 next_blk = NVx_CROWD_LVL(next_level); 399 if (next_blk == 3) { 400 next_blk = 4; 401 } 402 mask = (1 << next_blk) - 1; 403 *nvgc_blk &= ~mask; 404 *nvgc_blk |= mask >> 1; 405 406 next_idx = NVx_GROUP_LVL(next_level); 407 mask = (1 << next_idx) - 1; 408 *nvgc_idx &= ~mask; 409 *nvgc_idx |= mask >> 1; 410 } 411 412 /* 413 * Scan the group chain and return the highest priority and group 414 * level of pending group interrupts. 415 */ 416 static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, 417 uint8_t nvx_blk, uint32_t nvx_idx, 418 uint8_t first_group, 419 uint8_t *out_level) 420 { 421 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 422 uint32_t nvgc_idx; 423 uint32_t current_level, count; 424 uint8_t nvgc_blk, prio; 425 Xive2Nvgc nvgc; 426 427 for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { 428 current_level = first_group & 0x3F; 429 nvgc_blk = nvx_blk; 430 nvgc_idx = nvx_idx; 431 432 while (current_level) { 433 xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level); 434 435 if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level), 436 nvgc_blk, nvgc_idx, &nvgc)) { 437 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", 438 nvgc_blk, nvgc_idx); 439 return 0xFF; 440 } 441 if (!xive2_nvgc_is_valid(&nvgc)) { 442 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", 443 nvgc_blk, nvgc_idx); 444 return 0xFF; 445 } 446 447 count = xive2_nvgc_get_backlog(&nvgc, prio); 448 if (count) { 449 *out_level = current_level; 450 return prio; 451 } 452 current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F; 453 } 454 } 455 return 0xFF; 456 } 457 458 static void xive2_presenter_backlog_decr(XivePresenter *xptr, 459 uint8_t nvx_blk, uint32_t nvx_idx, 460 uint8_t group_prio, 461 uint8_t group_level) 462 { 463 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 464 uint32_t nvgc_idx, count; 465 uint8_t nvgc_blk; 466 Xive2Nvgc nvgc; 467 468 nvgc_blk = nvx_blk; 469 nvgc_idx = nvx_idx; 470 xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level); 471 472 if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level), 473 nvgc_blk, nvgc_idx, &nvgc)) { 474 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", 475 nvgc_blk, nvgc_idx); 476 return; 477 } 478 if (!xive2_nvgc_is_valid(&nvgc)) { 479 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", 480 nvgc_blk, nvgc_idx); 481 return; 482 } 483 count = xive2_nvgc_get_backlog(&nvgc, group_prio); 484 if (!count) { 485 return; 486 } 487 xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1); 488 xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level), 489 nvgc_blk, nvgc_idx, &nvgc); 490 } 491 492 /* 493 * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode 494 * 495 * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit 496 * 497 * - if a context is enabled with the H bit set, the VP context 498 * information is retrieved from the NVP structure (“check out”) 499 * and stored back on a context pull (“check in”), the SW receives 500 * the same context pull information as on P9 501 * 502 * - the H bit cannot be changed while the V bit is set, i.e. a 503 * context cannot be set up in the TIMA and then be “pushed” into 504 * the NVP by changing the H bit while the context is enabled 505 */ 506 507 static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 508 uint8_t nvp_blk, uint32_t nvp_idx, 509 uint8_t ring) 510 { 511 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 512 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 513 Xive2Nvp nvp; 514 uint8_t *regs = &tctx->regs[ring]; 515 516 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 517 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 518 nvp_blk, nvp_idx); 519 return; 520 } 521 522 if (!xive2_nvp_is_valid(&nvp)) { 523 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 524 nvp_blk, nvp_idx); 525 return; 526 } 527 528 if (!xive2_nvp_is_hw(&nvp)) { 529 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 530 nvp_blk, nvp_idx); 531 return; 532 } 533 534 if (!xive2_nvp_is_co(&nvp)) { 535 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n", 536 nvp_blk, nvp_idx); 537 return; 538 } 539 540 if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) && 541 xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) { 542 qemu_log_mask(LOG_GUEST_ERROR, 543 "XIVE: NVP %x/%x invalid checkout Thread %x\n", 544 nvp_blk, nvp_idx, pir); 545 return; 546 } 547 548 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); 549 nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); 550 if (nvp.w0 & NVP2_W0_L) { 551 /* 552 * Typically not used. If LSMFB is restored with 0, it will 553 * force a backlog rescan 554 */ 555 nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); 556 } 557 if (nvp.w0 & NVP2_W0_G) { 558 nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]); 559 } 560 if (nvp.w0 & NVP2_W0_T) { 561 nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]); 562 } 563 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 564 565 nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0); 566 /* NVP2_W1_CO_THRID_VALID only set once */ 567 nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF); 568 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); 569 } 570 571 static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk, 572 uint32_t *nvp_idx, bool *valid, bool *hw) 573 { 574 *nvp_blk = xive2_nvp_blk(cam); 575 *nvp_idx = xive2_nvp_idx(cam); 576 *valid = !!(cam & TM2_W2_VALID); 577 *hw = !!(cam & TM2_W2_HW); 578 } 579 580 /* 581 * Encode the HW CAM line with 7bit or 8bit thread id. The thread id 582 * width and block id width is configurable at the IC level. 583 * 584 * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) 585 * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) 586 */ 587 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) 588 { 589 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 590 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 591 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 592 uint8_t blk = xive2_router_get_block_id(xrtr); 593 uint8_t tid_shift = 594 xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; 595 uint8_t tid_mask = (1 << tid_shift) - 1; 596 597 return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); 598 } 599 600 static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, 601 hwaddr offset, unsigned size, uint8_t ring) 602 { 603 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 604 uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]); 605 uint32_t cam = be32_to_cpu(target_ringw2); 606 uint8_t nvp_blk; 607 uint32_t nvp_idx; 608 uint8_t cur_ring; 609 bool valid; 610 bool do_save; 611 612 xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save); 613 614 if (!valid) { 615 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", 616 nvp_blk, nvp_idx); 617 } 618 619 /* Invalidate CAM line of requested ring and all lower rings */ 620 for (cur_ring = TM_QW0_USER; cur_ring <= ring; 621 cur_ring += XIVE_TM_RING_SIZE) { 622 uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]); 623 uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0); 624 memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4); 625 } 626 627 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { 628 xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring); 629 } 630 631 /* 632 * Lower external interrupt line of requested ring and below except for 633 * USER, which doesn't exist. 634 */ 635 for (cur_ring = TM_QW1_OS; cur_ring <= ring; 636 cur_ring += XIVE_TM_RING_SIZE) { 637 xive_tctx_reset_signal(tctx, cur_ring); 638 } 639 return target_ringw2; 640 } 641 642 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 643 hwaddr offset, unsigned size) 644 { 645 return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS); 646 } 647 648 #define REPORT_LINE_GEN1_SIZE 16 649 650 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data, 651 uint8_t size) 652 { 653 uint8_t *regs = tctx->regs; 654 655 g_assert(size == REPORT_LINE_GEN1_SIZE); 656 memset(data, 0, size); 657 /* 658 * See xive architecture for description of what is saved. It is 659 * hand-picked information to fit in 16 bytes. 660 */ 661 data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR]; 662 data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR]; 663 data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB]; 664 data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB]; 665 data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT]; 666 data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS]; 667 data[0x6] = 0xFF; 668 data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80; 669 data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1; 670 data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2; 671 data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3); 672 data[0x8] = regs[TM_QW1_OS + TM_NSR]; 673 data[0x9] = regs[TM_QW1_OS + TM_CPPR]; 674 data[0xA] = regs[TM_QW1_OS + TM_IPB]; 675 data[0xB] = regs[TM_QW1_OS + TM_LGS]; 676 if (regs[TM_QW0_USER + TM_WORD2] & 0x80) { 677 /* 678 * Logical server extension, except VU bit replaced by EB bit 679 * from NSR 680 */ 681 data[0xC] = regs[TM_QW0_USER + TM_WORD2]; 682 data[0xC] &= ~0x80; 683 data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80; 684 data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1]; 685 data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2]; 686 data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3]; 687 } 688 } 689 690 static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, 691 hwaddr offset, uint64_t value, 692 unsigned size, uint8_t ring) 693 { 694 Xive2Router *xrtr = XIVE2_ROUTER(xptr); 695 uint32_t hw_cam, nvp_idx, xive2_cfg, reserved; 696 uint8_t nvp_blk; 697 Xive2Nvp nvp; 698 uint64_t phys_addr; 699 MemTxResult result; 700 701 hw_cam = xive2_tctx_hw_cam_line(xptr, tctx); 702 nvp_blk = xive2_nvp_blk(hw_cam); 703 nvp_idx = xive2_nvp_idx(hw_cam); 704 705 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 706 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 707 nvp_blk, nvp_idx); 708 return; 709 } 710 711 if (!xive2_nvp_is_valid(&nvp)) { 712 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 713 nvp_blk, nvp_idx); 714 return; 715 } 716 717 xive2_cfg = xive2_router_get_config(xrtr); 718 719 phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */ 720 if (xive2_cfg & XIVE2_GEN1_TIMA_OS) { 721 uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE]; 722 723 xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE); 724 result = dma_memory_write(&address_space_memory, phys_addr, 725 pull_ctxt, REPORT_LINE_GEN1_SIZE, 726 MEMTXATTRS_UNSPECIFIED); 727 assert(result == MEMTX_OK); 728 } else { 729 result = dma_memory_write(&address_space_memory, phys_addr, 730 &tctx->regs, sizeof(tctx->regs), 731 MEMTXATTRS_UNSPECIFIED); 732 assert(result == MEMTX_OK); 733 reserved = 0xFFFFFFFF; 734 result = dma_memory_write(&address_space_memory, phys_addr + 12, 735 &reserved, sizeof(reserved), 736 MEMTXATTRS_UNSPECIFIED); 737 assert(result == MEMTX_OK); 738 } 739 740 /* the rest is similar to pull context to registers */ 741 xive2_tm_pull_ctx(xptr, tctx, offset, size, ring); 742 } 743 744 void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, 745 hwaddr offset, uint64_t value, unsigned size) 746 { 747 xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS); 748 } 749 750 751 void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, 752 hwaddr offset, uint64_t value, unsigned size) 753 { 754 xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); 755 } 756 757 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, 758 uint8_t nvp_blk, uint32_t nvp_idx, 759 Xive2Nvp *nvp) 760 { 761 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 762 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 763 uint8_t cppr; 764 765 if (!xive2_nvp_is_hw(nvp)) { 766 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n", 767 nvp_blk, nvp_idx); 768 return 0; 769 } 770 771 cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2); 772 nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); 773 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); 774 775 tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; 776 tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2); 777 tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2); 778 tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2); 779 780 nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); 781 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); 782 nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir); 783 784 /* 785 * Checkout privilege: 0:OS, 1:Pool, 2:Hard 786 * 787 * TODO: we only support OS push/pull 788 */ 789 nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); 790 791 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); 792 793 /* return restored CPPR to generate a CPU exception if needed */ 794 return cppr; 795 } 796 797 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, 798 uint8_t nvp_blk, uint32_t nvp_idx, 799 bool do_restore) 800 { 801 XivePresenter *xptr = XIVE_PRESENTER(xrtr); 802 uint8_t ipb; 803 uint8_t backlog_level; 804 uint8_t group_level; 805 uint8_t first_group; 806 uint8_t backlog_prio; 807 uint8_t group_prio; 808 uint8_t *regs = &tctx->regs[TM_QW1_OS]; 809 Xive2Nvp nvp; 810 811 /* 812 * Grab the associated thread interrupt context registers in the 813 * associated NVP 814 */ 815 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 816 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 817 nvp_blk, nvp_idx); 818 return; 819 } 820 821 if (!xive2_nvp_is_valid(&nvp)) { 822 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 823 nvp_blk, nvp_idx); 824 return; 825 } 826 827 /* Automatically restore thread context registers */ 828 if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && 829 do_restore) { 830 xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); 831 } 832 833 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); 834 if (ipb) { 835 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); 836 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); 837 } 838 /* IPB bits in the backlog are merged with the TIMA IPB bits */ 839 regs[TM_IPB] |= ipb; 840 backlog_prio = xive_ipb_to_pipr(regs[TM_IPB]); 841 backlog_level = 0; 842 843 first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); 844 if (first_group && regs[TM_LSMFB] < backlog_prio) { 845 group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx, 846 first_group, &group_level); 847 regs[TM_LSMFB] = group_prio; 848 if (regs[TM_LGS] && group_prio < backlog_prio) { 849 /* VP can take a group interrupt */ 850 xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx, 851 group_prio, group_level); 852 backlog_prio = group_prio; 853 backlog_level = group_level; 854 } 855 } 856 857 /* 858 * Compute the PIPR based on the restored state. 859 * It will raise the External interrupt signal if needed. 860 */ 861 xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); 862 } 863 864 /* 865 * Updating the OS CAM line can trigger a resend of interrupt 866 */ 867 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, 868 hwaddr offset, uint64_t value, unsigned size) 869 { 870 uint32_t cam; 871 uint32_t qw1w2; 872 uint64_t qw1dw1; 873 uint8_t nvp_blk; 874 uint32_t nvp_idx; 875 bool vo; 876 bool do_restore; 877 878 /* First update the thead context */ 879 switch (size) { 880 case 4: 881 cam = value; 882 qw1w2 = cpu_to_be32(cam); 883 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 884 break; 885 case 8: 886 cam = value >> 32; 887 qw1dw1 = cpu_to_be64(value); 888 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8); 889 break; 890 default: 891 g_assert_not_reached(); 892 } 893 894 xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); 895 896 /* Check the interrupt pending bits */ 897 if (vo) { 898 xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, 899 do_restore); 900 } 901 } 902 903 static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, 904 uint32_t *nvp_blk, uint32_t *nvp_idx) 905 { 906 uint32_t w2, cam; 907 908 w2 = xive_tctx_word2(&tctx->regs[ring]); 909 switch (ring) { 910 case TM_QW1_OS: 911 if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { 912 return -1; 913 } 914 cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); 915 break; 916 case TM_QW2_HV_POOL: 917 if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { 918 return -1; 919 } 920 cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); 921 break; 922 case TM_QW3_HV_PHYS: 923 if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { 924 return -1; 925 } 926 cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); 927 break; 928 default: 929 return -1; 930 } 931 *nvp_blk = xive2_nvp_blk(cam); 932 *nvp_idx = xive2_nvp_idx(cam); 933 return 0; 934 } 935 936 static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) 937 { 938 uint8_t *regs = &tctx->regs[ring]; 939 Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); 940 uint8_t old_cppr, backlog_prio, first_group, group_level = 0; 941 uint8_t pipr_min, lsmfb_min, ring_min; 942 bool group_enabled; 943 uint32_t nvp_blk, nvp_idx; 944 Xive2Nvp nvp; 945 int rc; 946 947 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, 948 regs[TM_IPB], regs[TM_PIPR], 949 cppr, regs[TM_NSR]); 950 951 if (cppr > XIVE_PRIORITY_MAX) { 952 cppr = 0xff; 953 } 954 955 old_cppr = regs[TM_CPPR]; 956 regs[TM_CPPR] = cppr; 957 958 /* 959 * Recompute the PIPR based on local pending interrupts. It will 960 * be adjusted below if needed in case of pending group interrupts. 961 */ 962 pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); 963 group_enabled = !!regs[TM_LGS]; 964 lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff; 965 ring_min = ring; 966 967 /* PHYS updates also depend on POOL values */ 968 if (ring == TM_QW3_HV_PHYS) { 969 uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL]; 970 971 /* POOL values only matter if POOL ctx is valid */ 972 if (pregs[TM_WORD2] & 0x80) { 973 974 uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]); 975 uint8_t pool_lsmfb = pregs[TM_LSMFB]; 976 977 /* 978 * Determine highest priority interrupt and 979 * remember which ring has it. 980 */ 981 if (pool_pipr < pipr_min) { 982 pipr_min = pool_pipr; 983 if (pool_pipr < lsmfb_min) { 984 ring_min = TM_QW2_HV_POOL; 985 } 986 } 987 988 /* Values needed for group priority calculation */ 989 if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { 990 group_enabled = true; 991 lsmfb_min = pool_lsmfb; 992 if (lsmfb_min < pipr_min) { 993 ring_min = TM_QW2_HV_POOL; 994 } 995 } 996 } 997 } 998 999 /* PIPR should not be set to a value greater than CPPR */ 1000 regs[TM_PIPR] = (pipr_min > cppr) ? cppr : pipr_min; 1001 1002 rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); 1003 if (rc) { 1004 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n"); 1005 return; 1006 } 1007 1008 if (cppr < old_cppr) { 1009 /* 1010 * FIXME: check if there's a group interrupt being presented 1011 * and if the new cppr prevents it. If so, then the group 1012 * interrupt needs to be re-added to the backlog and 1013 * re-triggered (see re-trigger END info in the NVGC 1014 * structure) 1015 */ 1016 } 1017 1018 if (group_enabled && 1019 lsmfb_min < cppr && 1020 lsmfb_min < regs[TM_PIPR]) { 1021 /* 1022 * Thread has seen a group interrupt with a higher priority 1023 * than the new cppr or pending local interrupt. Check the 1024 * backlog 1025 */ 1026 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { 1027 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", 1028 nvp_blk, nvp_idx); 1029 return; 1030 } 1031 1032 if (!xive2_nvp_is_valid(&nvp)) { 1033 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 1034 nvp_blk, nvp_idx); 1035 return; 1036 } 1037 1038 first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); 1039 if (!first_group) { 1040 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", 1041 nvp_blk, nvp_idx); 1042 return; 1043 } 1044 1045 backlog_prio = xive2_presenter_backlog_scan(tctx->xptr, 1046 nvp_blk, nvp_idx, 1047 first_group, &group_level); 1048 tctx->regs[ring_min + TM_LSMFB] = backlog_prio; 1049 if (backlog_prio != 0xFF) { 1050 xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, 1051 backlog_prio, group_level); 1052 regs[TM_PIPR] = backlog_prio; 1053 } 1054 } 1055 /* CPPR has changed, check if we need to raise a pending exception */ 1056 xive_tctx_notify(tctx, ring_min, group_level); 1057 } 1058 1059 void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, 1060 hwaddr offset, uint64_t value, unsigned size) 1061 { 1062 xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); 1063 } 1064 1065 void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, 1066 hwaddr offset, uint64_t value, unsigned size) 1067 { 1068 xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); 1069 } 1070 1071 static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) 1072 { 1073 uint8_t *regs = &tctx->regs[ring]; 1074 1075 regs[TM_T] = target; 1076 } 1077 1078 void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, 1079 hwaddr offset, uint64_t value, unsigned size) 1080 { 1081 xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff); 1082 } 1083 1084 /* 1085 * XIVE Router (aka. Virtualization Controller or IVRE) 1086 */ 1087 1088 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1089 Xive2Eas *eas) 1090 { 1091 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1092 1093 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 1094 } 1095 1096 static 1097 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1098 uint8_t *pq) 1099 { 1100 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1101 1102 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); 1103 } 1104 1105 static 1106 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1107 uint8_t *pq) 1108 { 1109 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1110 1111 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); 1112 } 1113 1114 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 1115 Xive2End *end) 1116 { 1117 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1118 1119 return xrc->get_end(xrtr, end_blk, end_idx, end); 1120 } 1121 1122 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx, 1123 Xive2End *end, uint8_t word_number) 1124 { 1125 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1126 1127 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 1128 } 1129 1130 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 1131 Xive2Nvp *nvp) 1132 { 1133 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1134 1135 return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp); 1136 } 1137 1138 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, 1139 Xive2Nvp *nvp, uint8_t word_number) 1140 { 1141 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1142 1143 return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); 1144 } 1145 1146 int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd, 1147 uint8_t nvgc_blk, uint32_t nvgc_idx, 1148 Xive2Nvgc *nvgc) 1149 { 1150 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1151 1152 return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); 1153 } 1154 1155 int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, 1156 uint8_t nvgc_blk, uint32_t nvgc_idx, 1157 Xive2Nvgc *nvgc) 1158 { 1159 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); 1160 1161 return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); 1162 } 1163 1164 static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, 1165 uint32_t vp_mask) 1166 { 1167 return (cam1 & vp_mask) == (cam2 & vp_mask); 1168 } 1169 1170 static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd) 1171 { 1172 uint8_t block_mask = 0b1111; 1173 1174 /* 3 supported crowd sizes: 2, 4, 16 */ 1175 if (crowd) { 1176 uint32_t size = xive_get_vpgroup_size(nvt_blk); 1177 1178 if (size != 2 && size != 4 && size != 16) { 1179 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of %d", 1180 size); 1181 return block_mask; 1182 } 1183 block_mask &= ~(size - 1); 1184 } 1185 return block_mask; 1186 } 1187 1188 static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore) 1189 { 1190 uint32_t index_mask = 0xFFFFFF; /* 24 bits */ 1191 1192 if (cam_ignore) { 1193 uint32_t size = xive_get_vpgroup_size(nvt_index); 1194 1195 if (size < 2) { 1196 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group size of %d", 1197 size); 1198 return index_mask; 1199 } 1200 index_mask &= ~(size - 1); 1201 } 1202 return index_mask; 1203 } 1204 1205 /* 1206 * The thread context register words are in big-endian format. 1207 */ 1208 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, 1209 uint8_t format, 1210 uint8_t nvt_blk, uint32_t nvt_idx, 1211 bool crowd, bool cam_ignore, 1212 uint32_t logic_serv) 1213 { 1214 uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); 1215 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 1216 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 1217 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 1218 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 1219 1220 uint32_t index_mask, vp_mask; 1221 uint8_t block_mask; 1222 1223 if (format == 0) { 1224 /* 1225 * i=0: Specific NVT notification 1226 * i=1: VP-group notification (bits ignored at the end of the 1227 * NVT identifier) 1228 */ 1229 block_mask = xive2_get_vp_block_mask(nvt_blk, crowd); 1230 index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore); 1231 vp_mask = xive2_nvp_cam_line(block_mask, index_mask); 1232 1233 /* For VP-group notifications, threads with LGS=0 are excluded */ 1234 1235 /* PHYS ring */ 1236 if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && 1237 !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) && 1238 xive2_vp_match_mask(cam, 1239 xive2_tctx_hw_cam_line(xptr, tctx), 1240 vp_mask)) { 1241 return TM_QW3_HV_PHYS; 1242 } 1243 1244 /* HV POOL ring */ 1245 if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && 1246 !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) && 1247 xive2_vp_match_mask(cam, 1248 xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2), 1249 vp_mask)) { 1250 return TM_QW2_HV_POOL; 1251 } 1252 1253 /* OS ring */ 1254 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 1255 !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) && 1256 xive2_vp_match_mask(cam, 1257 xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2), 1258 vp_mask)) { 1259 return TM_QW1_OS; 1260 } 1261 } else { 1262 /* F=1 : User level Event-Based Branch (EBB) notification */ 1263 1264 /* FIXME: what if cam_ignore and LGS = 0 ? */ 1265 /* USER ring */ 1266 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && 1267 (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && 1268 (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) && 1269 (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) { 1270 return TM_QW0_USER; 1271 } 1272 } 1273 return -1; 1274 } 1275 1276 bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) 1277 { 1278 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ 1279 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; 1280 uint8_t *alt_regs = &tctx->regs[alt_ring]; 1281 1282 /* 1283 * The xive2_presenter_tctx_match() above tells if there's a match 1284 * but for VP-group notification, we still need to look at the 1285 * priority to know if the thread can take the interrupt now or if 1286 * it is precluded. 1287 */ 1288 if (priority < alt_regs[TM_PIPR]) { 1289 return false; 1290 } 1291 return true; 1292 } 1293 1294 void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority) 1295 { 1296 uint8_t *regs = &tctx->regs[ring]; 1297 1298 /* 1299 * Called by the router during a VP-group notification when the 1300 * thread matches but can't take the interrupt because it's 1301 * already running at a more favored priority. It then stores the 1302 * new interrupt priority in the LSMFB field. 1303 */ 1304 regs[TM_LSMFB] = priority; 1305 } 1306 1307 static void xive2_router_realize(DeviceState *dev, Error **errp) 1308 { 1309 Xive2Router *xrtr = XIVE2_ROUTER(dev); 1310 1311 assert(xrtr->xfb); 1312 } 1313 1314 /* 1315 * Notification using the END ESe/ESn bit (Event State Buffer for 1316 * escalation and notification). Profide further coalescing in the 1317 * Router. 1318 */ 1319 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, 1320 uint32_t end_idx, Xive2End *end, 1321 uint32_t end_esmask) 1322 { 1323 uint8_t pq = xive_get_field32(end_esmask, end->w1); 1324 bool notify = xive_esb_trigger(&pq); 1325 1326 if (pq != xive_get_field32(end_esmask, end->w1)) { 1327 end->w1 = xive_set_field32(end_esmask, end->w1, pq); 1328 xive2_router_write_end(xrtr, end_blk, end_idx, end, 1); 1329 } 1330 1331 /* ESe/n[Q]=1 : end of notification */ 1332 return notify; 1333 } 1334 1335 /* 1336 * An END trigger can come from an event trigger (IPI or HW) or from 1337 * another chip. We don't model the PowerBus but the END trigger 1338 * message has the same parameters than in the function below. 1339 */ 1340 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, 1341 uint32_t end_idx, uint32_t end_data) 1342 { 1343 Xive2End end; 1344 uint8_t priority; 1345 uint8_t format; 1346 bool found, precluded; 1347 uint8_t nvx_blk; 1348 uint32_t nvx_idx; 1349 1350 /* END cache lookup */ 1351 if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) { 1352 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1353 end_idx); 1354 return; 1355 } 1356 1357 if (!xive2_end_is_valid(&end)) { 1358 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1359 end_blk, end_idx); 1360 return; 1361 } 1362 1363 if (xive2_end_is_crowd(&end) && !xive2_end_is_ignore(&end)) { 1364 qemu_log_mask(LOG_GUEST_ERROR, 1365 "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n"); 1366 return; 1367 } 1368 1369 if (xive2_end_is_enqueue(&end)) { 1370 xive2_end_enqueue(&end, end_data); 1371 /* Enqueuing event data modifies the EQ toggle and index */ 1372 xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); 1373 } 1374 1375 /* 1376 * When the END is silent, we skip the notification part. 1377 */ 1378 if (xive2_end_is_silent_escalation(&end)) { 1379 goto do_escalation; 1380 } 1381 1382 /* 1383 * The W7 format depends on the F bit in W6. It defines the type 1384 * of the notification : 1385 * 1386 * F=0 : single or multiple NVP notification 1387 * F=1 : User level Event-Based Branch (EBB) notification, no 1388 * priority 1389 */ 1390 format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6); 1391 priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7); 1392 1393 /* The END is masked */ 1394 if (format == 0 && priority == 0xff) { 1395 return; 1396 } 1397 1398 /* 1399 * Check the END ESn (Event State Buffer for notification) for 1400 * even further coalescing in the Router 1401 */ 1402 if (!xive2_end_is_notify(&end)) { 1403 /* ESn[Q]=1 : end of notification */ 1404 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 1405 &end, END2_W1_ESn)) { 1406 return; 1407 } 1408 } 1409 1410 /* 1411 * Follows IVPE notification 1412 */ 1413 nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); 1414 nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); 1415 1416 found = xive_presenter_notify(xrtr->xfb, format, nvx_blk, nvx_idx, 1417 xive2_end_is_crowd(&end), xive2_end_is_ignore(&end), 1418 priority, 1419 xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), 1420 &precluded); 1421 1422 /* TODO: Auto EOI. */ 1423 1424 if (found) { 1425 return; 1426 } 1427 1428 /* 1429 * If no matching NVP is dispatched on a HW thread : 1430 * - specific VP: update the NVP structure if backlog is activated 1431 * - VP-group: update the backlog counter for that priority in the NVG 1432 */ 1433 if (xive2_end_is_backlog(&end)) { 1434 1435 if (format == 1) { 1436 qemu_log_mask(LOG_GUEST_ERROR, 1437 "XIVE: END %x/%x invalid config: F1 & backlog\n", 1438 end_blk, end_idx); 1439 return; 1440 } 1441 1442 if (!xive2_end_is_ignore(&end)) { 1443 uint8_t ipb; 1444 Xive2Nvp nvp; 1445 1446 /* NVP cache lookup */ 1447 if (xive2_router_get_nvp(xrtr, nvx_blk, nvx_idx, &nvp)) { 1448 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n", 1449 nvx_blk, nvx_idx); 1450 return; 1451 } 1452 1453 if (!xive2_nvp_is_valid(&nvp)) { 1454 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n", 1455 nvx_blk, nvx_idx); 1456 return; 1457 } 1458 1459 /* 1460 * Record the IPB in the associated NVP structure for later 1461 * use. The presenter will resend the interrupt when the vCPU 1462 * is dispatched again on a HW thread. 1463 */ 1464 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | 1465 xive_priority_to_ipb(priority); 1466 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); 1467 xive2_router_write_nvp(xrtr, nvx_blk, nvx_idx, &nvp, 2); 1468 } else { 1469 Xive2Nvgc nvgc; 1470 uint32_t backlog; 1471 bool crowd; 1472 1473 crowd = xive2_end_is_crowd(&end); 1474 1475 /* 1476 * For groups and crowds, the per-priority backlog 1477 * counters are stored in the NVG/NVC structures 1478 */ 1479 if (xive2_router_get_nvgc(xrtr, crowd, 1480 nvx_blk, nvx_idx, &nvgc)) { 1481 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", 1482 crowd ? "NVC" : "NVG", nvx_blk, nvx_idx); 1483 return; 1484 } 1485 1486 if (!xive2_nvgc_is_valid(&nvgc)) { 1487 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", 1488 nvx_blk, nvx_idx); 1489 return; 1490 } 1491 1492 /* 1493 * Increment the backlog counter for that priority. 1494 * We only call broadcast the first time the counter is 1495 * incremented. broadcast will set the LSMFB field of the TIMA of 1496 * relevant threads so that they know an interrupt is pending. 1497 */ 1498 backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1; 1499 xive2_nvgc_set_backlog(&nvgc, priority, backlog); 1500 xive2_router_write_nvgc(xrtr, crowd, nvx_blk, nvx_idx, &nvgc); 1501 1502 if (backlog == 1) { 1503 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); 1504 xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx, 1505 xive2_end_is_crowd(&end), 1506 xive2_end_is_ignore(&end), 1507 priority); 1508 1509 if (!xive2_end_is_precluded_escalation(&end)) { 1510 /* 1511 * The interrupt will be picked up when the 1512 * matching thread lowers its priority level 1513 */ 1514 return; 1515 } 1516 } 1517 } 1518 } 1519 1520 do_escalation: 1521 /* 1522 * If activated, escalate notification using the ESe PQ bits and 1523 * the EAS in w4-5 1524 */ 1525 if (!xive2_end_is_escalate(&end)) { 1526 return; 1527 } 1528 1529 /* 1530 * Check the END ESe (Event State Buffer for escalation) for even 1531 * further coalescing in the Router 1532 */ 1533 if (!xive2_end_is_uncond_escalation(&end)) { 1534 /* ESe[Q]=1 : end of escalation notification */ 1535 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx, 1536 &end, END2_W1_ESe)) { 1537 return; 1538 } 1539 } 1540 1541 /* 1542 * The END trigger becomes an Escalation trigger 1543 */ 1544 xive2_router_end_notify(xrtr, 1545 xive_get_field32(END2_W4_END_BLOCK, end.w4), 1546 xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), 1547 xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); 1548 } 1549 1550 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) 1551 { 1552 Xive2Router *xrtr = XIVE2_ROUTER(xn); 1553 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); 1554 uint32_t eas_idx = XIVE_EAS_INDEX(lisn); 1555 Xive2Eas eas; 1556 1557 /* EAS cache lookup */ 1558 if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 1559 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 1560 return; 1561 } 1562 1563 if (!pq_checked) { 1564 bool notify; 1565 uint8_t pq; 1566 1567 /* PQ cache lookup */ 1568 if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) { 1569 /* Set FIR */ 1570 g_assert_not_reached(); 1571 } 1572 1573 notify = xive_esb_trigger(&pq); 1574 1575 if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) { 1576 /* Set FIR */ 1577 g_assert_not_reached(); 1578 } 1579 1580 if (!notify) { 1581 return; 1582 } 1583 } 1584 1585 if (!xive2_eas_is_valid(&eas)) { 1586 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn); 1587 return; 1588 } 1589 1590 if (xive2_eas_is_masked(&eas)) { 1591 /* Notification completed */ 1592 return; 1593 } 1594 1595 /* 1596 * The event trigger becomes an END trigger 1597 */ 1598 xive2_router_end_notify(xrtr, 1599 xive_get_field64(EAS2_END_BLOCK, eas.w), 1600 xive_get_field64(EAS2_END_INDEX, eas.w), 1601 xive_get_field64(EAS2_END_DATA, eas.w)); 1602 } 1603 1604 static const Property xive2_router_properties[] = { 1605 DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb, 1606 TYPE_XIVE_FABRIC, XiveFabric *), 1607 }; 1608 1609 static void xive2_router_class_init(ObjectClass *klass, const void *data) 1610 { 1611 DeviceClass *dc = DEVICE_CLASS(klass); 1612 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1613 1614 dc->desc = "XIVE2 Router Engine"; 1615 device_class_set_props(dc, xive2_router_properties); 1616 /* Parent is SysBusDeviceClass. No need to call its realize hook */ 1617 dc->realize = xive2_router_realize; 1618 xnc->notify = xive2_router_notify; 1619 } 1620 1621 static const TypeInfo xive2_router_info = { 1622 .name = TYPE_XIVE2_ROUTER, 1623 .parent = TYPE_SYS_BUS_DEVICE, 1624 .abstract = true, 1625 .instance_size = sizeof(Xive2Router), 1626 .class_size = sizeof(Xive2RouterClass), 1627 .class_init = xive2_router_class_init, 1628 .interfaces = (const InterfaceInfo[]) { 1629 { TYPE_XIVE_NOTIFIER }, 1630 { TYPE_XIVE_PRESENTER }, 1631 { } 1632 } 1633 }; 1634 1635 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 1636 { 1637 return !((addr >> shift) & 1); 1638 } 1639 1640 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size) 1641 { 1642 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 1643 uint32_t offset = addr & 0xFFF; 1644 uint8_t end_blk; 1645 uint32_t end_idx; 1646 Xive2End end; 1647 uint32_t end_esmask; 1648 uint8_t pq; 1649 uint64_t ret; 1650 1651 /* 1652 * The block id should be deduced from the load address on the END 1653 * ESB MMIO but our model only supports a single block per XIVE chip. 1654 */ 1655 end_blk = xive2_router_get_block_id(xsrc->xrtr); 1656 end_idx = addr >> (xsrc->esb_shift + 1); 1657 1658 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 1659 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1660 end_idx); 1661 return -1; 1662 } 1663 1664 if (!xive2_end_is_valid(&end)) { 1665 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1666 end_blk, end_idx); 1667 return -1; 1668 } 1669 1670 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 1671 END2_W1_ESe; 1672 pq = xive_get_field32(end_esmask, end.w1); 1673 1674 switch (offset) { 1675 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 1676 ret = xive_esb_eoi(&pq); 1677 1678 /* Forward the source event notification for routing ?? */ 1679 break; 1680 1681 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 1682 ret = pq; 1683 break; 1684 1685 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1686 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1687 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1688 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1689 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 1690 break; 1691 default: 1692 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 1693 offset); 1694 return -1; 1695 } 1696 1697 if (pq != xive_get_field32(end_esmask, end.w1)) { 1698 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 1699 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 1700 } 1701 1702 return ret; 1703 } 1704 1705 static void xive2_end_source_write(void *opaque, hwaddr addr, 1706 uint64_t value, unsigned size) 1707 { 1708 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque); 1709 uint32_t offset = addr & 0xFFF; 1710 uint8_t end_blk; 1711 uint32_t end_idx; 1712 Xive2End end; 1713 uint32_t end_esmask; 1714 uint8_t pq; 1715 bool notify = false; 1716 1717 /* 1718 * The block id should be deduced from the load address on the END 1719 * ESB MMIO but our model only supports a single block per XIVE chip. 1720 */ 1721 end_blk = xive2_router_get_block_id(xsrc->xrtr); 1722 end_idx = addr >> (xsrc->esb_shift + 1); 1723 1724 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 1725 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1726 end_idx); 1727 return; 1728 } 1729 1730 if (!xive2_end_is_valid(&end)) { 1731 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1732 end_blk, end_idx); 1733 return; 1734 } 1735 1736 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn : 1737 END2_W1_ESe; 1738 pq = xive_get_field32(end_esmask, end.w1); 1739 1740 switch (offset) { 1741 case 0 ... 0x3FF: 1742 notify = xive_esb_trigger(&pq); 1743 break; 1744 1745 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 1746 /* TODO: can we check StoreEOI availability from the router ? */ 1747 notify = xive_esb_eoi(&pq); 1748 break; 1749 1750 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF: 1751 if (end_esmask == END2_W1_ESe) { 1752 qemu_log_mask(LOG_GUEST_ERROR, 1753 "XIVE: END %x/%x can not EQ inject on ESe\n", 1754 end_blk, end_idx); 1755 return; 1756 } 1757 notify = true; 1758 break; 1759 1760 default: 1761 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n", 1762 offset); 1763 return; 1764 } 1765 1766 if (pq != xive_get_field32(end_esmask, end.w1)) { 1767 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 1768 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 1769 } 1770 1771 /* TODO: Forward the source event notification for routing */ 1772 if (notify) { 1773 ; 1774 } 1775 } 1776 1777 static const MemoryRegionOps xive2_end_source_ops = { 1778 .read = xive2_end_source_read, 1779 .write = xive2_end_source_write, 1780 .endianness = DEVICE_BIG_ENDIAN, 1781 .valid = { 1782 .min_access_size = 1, 1783 .max_access_size = 8, 1784 }, 1785 .impl = { 1786 .min_access_size = 1, 1787 .max_access_size = 8, 1788 }, 1789 }; 1790 1791 static void xive2_end_source_realize(DeviceState *dev, Error **errp) 1792 { 1793 Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev); 1794 1795 assert(xsrc->xrtr); 1796 1797 if (!xsrc->nr_ends) { 1798 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1799 return; 1800 } 1801 1802 if (xsrc->esb_shift != XIVE_ESB_4K && 1803 xsrc->esb_shift != XIVE_ESB_64K) { 1804 error_setg(errp, "Invalid ESB shift setting"); 1805 return; 1806 } 1807 1808 /* 1809 * Each END is assigned an even/odd pair of MMIO pages, the even page 1810 * manages the ESn field while the odd page manages the ESe field. 1811 */ 1812 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 1813 &xive2_end_source_ops, xsrc, "xive.end", 1814 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 1815 } 1816 1817 static const Property xive2_end_source_properties[] = { 1818 DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0), 1819 DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K), 1820 DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER, 1821 Xive2Router *), 1822 }; 1823 1824 static void xive2_end_source_class_init(ObjectClass *klass, const void *data) 1825 { 1826 DeviceClass *dc = DEVICE_CLASS(klass); 1827 1828 dc->desc = "XIVE END Source"; 1829 device_class_set_props(dc, xive2_end_source_properties); 1830 dc->realize = xive2_end_source_realize; 1831 dc->user_creatable = false; 1832 } 1833 1834 static const TypeInfo xive2_end_source_info = { 1835 .name = TYPE_XIVE2_END_SOURCE, 1836 .parent = TYPE_DEVICE, 1837 .instance_size = sizeof(Xive2EndSource), 1838 .class_init = xive2_end_source_class_init, 1839 }; 1840 1841 static void xive2_register_types(void) 1842 { 1843 type_register_static(&xive2_router_info); 1844 type_register_static(&xive2_end_source_info); 1845 } 1846 1847 type_init(xive2_register_types) 1848