1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "sysemu/reset.h" 18 #include "hw/qdev-properties.h" 19 #include "monitor/monitor.h" 20 #include "hw/irq.h" 21 #include "hw/ppc/xive.h" 22 #include "hw/ppc/xive_regs.h" 23 24 /* 25 * XIVE Thread Interrupt Management context 26 */ 27 28 /* 29 * Convert a priority number to an Interrupt Pending Buffer (IPB) 30 * register, which indicates a pending interrupt at the priority 31 * corresponding to the bit number 32 */ 33 static uint8_t priority_to_ipb(uint8_t priority) 34 { 35 return priority > XIVE_PRIORITY_MAX ? 36 0 : 1 << (XIVE_PRIORITY_MAX - priority); 37 } 38 39 /* 40 * Convert an Interrupt Pending Buffer (IPB) register to a Pending 41 * Interrupt Priority Register (PIPR), which contains the priority of 42 * the most favored pending notification. 43 */ 44 static uint8_t ipb_to_pipr(uint8_t ibp) 45 { 46 return ibp ? clz32((uint32_t)ibp << 24) : 0xff; 47 } 48 49 static void ipb_update(uint8_t *regs, uint8_t priority) 50 { 51 regs[TM_IPB] |= priority_to_ipb(priority); 52 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); 53 } 54 55 static uint8_t exception_mask(uint8_t ring) 56 { 57 switch (ring) { 58 case TM_QW1_OS: 59 return TM_QW1_NSR_EO; 60 case TM_QW3_HV_PHYS: 61 return TM_QW3_NSR_HE; 62 default: 63 g_assert_not_reached(); 64 } 65 } 66 67 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) 68 { 69 switch (ring) { 70 case TM_QW0_USER: 71 return 0; /* Not supported */ 72 case TM_QW1_OS: 73 return tctx->os_output; 74 case TM_QW2_HV_POOL: 75 case TM_QW3_HV_PHYS: 76 return tctx->hv_output; 77 default: 78 return 0; 79 } 80 } 81 82 static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) 83 { 84 uint8_t *regs = &tctx->regs[ring]; 85 uint8_t nsr = regs[TM_NSR]; 86 uint8_t mask = exception_mask(ring); 87 88 qemu_irq_lower(xive_tctx_output(tctx, ring)); 89 90 if (regs[TM_NSR] & mask) { 91 uint8_t cppr = regs[TM_PIPR]; 92 93 regs[TM_CPPR] = cppr; 94 95 /* Reset the pending buffer bit */ 96 regs[TM_IPB] &= ~priority_to_ipb(cppr); 97 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); 98 99 /* Drop Exception bit */ 100 regs[TM_NSR] &= ~mask; 101 } 102 103 return (nsr << 8) | regs[TM_CPPR]; 104 } 105 106 static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) 107 { 108 uint8_t *regs = &tctx->regs[ring]; 109 110 if (regs[TM_PIPR] < regs[TM_CPPR]) { 111 switch (ring) { 112 case TM_QW1_OS: 113 regs[TM_NSR] |= TM_QW1_NSR_EO; 114 break; 115 case TM_QW3_HV_PHYS: 116 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); 117 break; 118 default: 119 g_assert_not_reached(); 120 } 121 qemu_irq_raise(xive_tctx_output(tctx, ring)); 122 } 123 } 124 125 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) 126 { 127 if (cppr > XIVE_PRIORITY_MAX) { 128 cppr = 0xff; 129 } 130 131 tctx->regs[ring + TM_CPPR] = cppr; 132 133 /* CPPR has changed, check if we need to raise a pending exception */ 134 xive_tctx_notify(tctx, ring); 135 } 136 137 static inline uint32_t xive_tctx_word2(uint8_t *ring) 138 { 139 return *((uint32_t *) &ring[TM_WORD2]); 140 } 141 142 /* 143 * XIVE Thread Interrupt Management Area (TIMA) 144 */ 145 146 static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset, 147 uint64_t value, unsigned size) 148 { 149 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); 150 } 151 152 static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) 153 { 154 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); 155 } 156 157 static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset, 158 unsigned size) 159 { 160 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 161 uint32_t qw2w2; 162 163 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); 164 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); 165 return qw2w2; 166 } 167 168 static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset, 169 uint64_t value, unsigned size) 170 { 171 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; 172 } 173 174 static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size) 175 { 176 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; 177 } 178 179 /* 180 * Define an access map for each page of the TIMA that we will use in 181 * the memory region ops to filter values when doing loads and stores 182 * of raw registers values 183 * 184 * Registers accessibility bits : 185 * 186 * 0x0 - no access 187 * 0x1 - write only 188 * 0x2 - read only 189 * 0x3 - read/write 190 */ 191 192 static const uint8_t xive_tm_hw_view[] = { 193 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 194 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ 195 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 196 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ 197 }; 198 199 static const uint8_t xive_tm_hv_view[] = { 200 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 201 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ 202 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 203 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ 204 }; 205 206 static const uint8_t xive_tm_os_view[] = { 207 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 208 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 210 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 211 }; 212 213 static const uint8_t xive_tm_user_view[] = { 214 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */ 215 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ 216 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ 217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ 218 }; 219 220 /* 221 * Overall TIMA access map for the thread interrupt management context 222 * registers 223 */ 224 static const uint8_t *xive_tm_views[] = { 225 [XIVE_TM_HW_PAGE] = xive_tm_hw_view, 226 [XIVE_TM_HV_PAGE] = xive_tm_hv_view, 227 [XIVE_TM_OS_PAGE] = xive_tm_os_view, 228 [XIVE_TM_USER_PAGE] = xive_tm_user_view, 229 }; 230 231 /* 232 * Computes a register access mask for a given offset in the TIMA 233 */ 234 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) 235 { 236 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 237 uint8_t reg_offset = offset & 0x3F; 238 uint8_t reg_mask = write ? 0x1 : 0x2; 239 uint64_t mask = 0x0; 240 int i; 241 242 for (i = 0; i < size; i++) { 243 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { 244 mask |= (uint64_t) 0xff << (8 * (size - i - 1)); 245 } 246 } 247 248 return mask; 249 } 250 251 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, 252 unsigned size) 253 { 254 uint8_t ring_offset = offset & 0x30; 255 uint8_t reg_offset = offset & 0x3F; 256 uint64_t mask = xive_tm_mask(offset, size, true); 257 int i; 258 259 /* 260 * Only 4 or 8 bytes stores are allowed and the User ring is 261 * excluded 262 */ 263 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 264 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" 265 HWADDR_PRIx"\n", offset); 266 return; 267 } 268 269 /* 270 * Use the register offset for the raw values and filter out 271 * reserved values 272 */ 273 for (i = 0; i < size; i++) { 274 uint8_t byte_mask = (mask >> (8 * (size - i - 1))); 275 if (byte_mask) { 276 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & 277 byte_mask; 278 } 279 } 280 } 281 282 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) 283 { 284 uint8_t ring_offset = offset & 0x30; 285 uint8_t reg_offset = offset & 0x3F; 286 uint64_t mask = xive_tm_mask(offset, size, false); 287 uint64_t ret; 288 int i; 289 290 /* 291 * Only 4 or 8 bytes loads are allowed and the User ring is 292 * excluded 293 */ 294 if (size < 4 || !mask || ring_offset == TM_QW0_USER) { 295 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" 296 HWADDR_PRIx"\n", offset); 297 return -1; 298 } 299 300 /* Use the register offset for the raw values */ 301 ret = 0; 302 for (i = 0; i < size; i++) { 303 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); 304 } 305 306 /* filter out reserved values */ 307 return ret & mask; 308 } 309 310 /* 311 * The TM context is mapped twice within each page. Stores and loads 312 * to the first mapping below 2K write and read the specified values 313 * without modification. The second mapping above 2K performs specific 314 * state changes (side effects) in addition to setting/returning the 315 * interrupt management area context of the processor thread. 316 */ 317 static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) 318 { 319 return xive_tctx_accept(tctx, TM_QW1_OS); 320 } 321 322 static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, 323 uint64_t value, unsigned size) 324 { 325 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); 326 } 327 328 /* 329 * Adjust the IPB to allow a CPU to process event queues of other 330 * priorities during one physical interrupt cycle. 331 */ 332 static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset, 333 uint64_t value, unsigned size) 334 { 335 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff); 336 xive_tctx_notify(tctx, TM_QW1_OS); 337 } 338 339 /* 340 * Define a mapping of "special" operations depending on the TIMA page 341 * offset and the size of the operation. 342 */ 343 typedef struct XiveTmOp { 344 uint8_t page_offset; 345 uint32_t op_offset; 346 unsigned size; 347 void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value, 348 unsigned size); 349 uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size); 350 } XiveTmOp; 351 352 static const XiveTmOp xive_tm_operations[] = { 353 /* 354 * MMIOs below 2K : raw values and special operations without side 355 * effects 356 */ 357 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, 358 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, 359 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, 360 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, 361 362 /* MMIOs above 2K : special operations with side effects */ 363 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, 364 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, 365 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, 366 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, 367 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, 368 }; 369 370 static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) 371 { 372 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; 373 uint32_t op_offset = offset & 0xFFF; 374 int i; 375 376 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { 377 const XiveTmOp *xto = &xive_tm_operations[i]; 378 379 /* Accesses done from a more privileged TIMA page is allowed */ 380 if (xto->page_offset >= page_offset && 381 xto->op_offset == op_offset && 382 xto->size == size && 383 ((write && xto->write_handler) || (!write && xto->read_handler))) { 384 return xto; 385 } 386 } 387 return NULL; 388 } 389 390 /* 391 * TIMA MMIO handlers 392 */ 393 void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, 394 unsigned size) 395 { 396 const XiveTmOp *xto; 397 398 /* 399 * TODO: check V bit in Q[0-3]W2 400 */ 401 402 /* 403 * First, check for special operations in the 2K region 404 */ 405 if (offset & 0x800) { 406 xto = xive_tm_find_op(offset, size, true); 407 if (!xto) { 408 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA" 409 "@%"HWADDR_PRIx"\n", offset); 410 } else { 411 xto->write_handler(tctx, offset, value, size); 412 } 413 return; 414 } 415 416 /* 417 * Then, for special operations in the region below 2K. 418 */ 419 xto = xive_tm_find_op(offset, size, true); 420 if (xto) { 421 xto->write_handler(tctx, offset, value, size); 422 return; 423 } 424 425 /* 426 * Finish with raw access to the register values 427 */ 428 xive_tm_raw_write(tctx, offset, value, size); 429 } 430 431 uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size) 432 { 433 const XiveTmOp *xto; 434 435 /* 436 * TODO: check V bit in Q[0-3]W2 437 */ 438 439 /* 440 * First, check for special operations in the 2K region 441 */ 442 if (offset & 0x800) { 443 xto = xive_tm_find_op(offset, size, false); 444 if (!xto) { 445 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" 446 "@%"HWADDR_PRIx"\n", offset); 447 return -1; 448 } 449 return xto->read_handler(tctx, offset, size); 450 } 451 452 /* 453 * Then, for special operations in the region below 2K. 454 */ 455 xto = xive_tm_find_op(offset, size, false); 456 if (xto) { 457 return xto->read_handler(tctx, offset, size); 458 } 459 460 /* 461 * Finish with raw access to the register values 462 */ 463 return xive_tm_raw_read(tctx, offset, size); 464 } 465 466 static void xive_tm_write(void *opaque, hwaddr offset, 467 uint64_t value, unsigned size) 468 { 469 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); 470 471 xive_tctx_tm_write(tctx, offset, value, size); 472 } 473 474 static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size) 475 { 476 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); 477 478 return xive_tctx_tm_read(tctx, offset, size); 479 } 480 481 const MemoryRegionOps xive_tm_ops = { 482 .read = xive_tm_read, 483 .write = xive_tm_write, 484 .endianness = DEVICE_BIG_ENDIAN, 485 .valid = { 486 .min_access_size = 1, 487 .max_access_size = 8, 488 }, 489 .impl = { 490 .min_access_size = 1, 491 .max_access_size = 8, 492 }, 493 }; 494 495 static char *xive_tctx_ring_print(uint8_t *ring) 496 { 497 uint32_t w2 = xive_tctx_word2(ring); 498 499 return g_strdup_printf("%02x %02x %02x %02x %02x " 500 "%02x %02x %02x %08x", 501 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], 502 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], 503 be32_to_cpu(w2)); 504 } 505 506 static const char * const xive_tctx_ring_names[] = { 507 "USER", "OS", "POOL", "PHYS", 508 }; 509 510 void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) 511 { 512 int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; 513 int i; 514 515 if (kvm_irqchip_in_kernel()) { 516 Error *local_err = NULL; 517 518 kvmppc_xive_cpu_synchronize_state(tctx, &local_err); 519 if (local_err) { 520 error_report_err(local_err); 521 return; 522 } 523 } 524 525 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" 526 " W2\n", cpu_index); 527 528 for (i = 0; i < XIVE_TM_RING_COUNT; i++) { 529 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); 530 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index, 531 xive_tctx_ring_names[i], s); 532 g_free(s); 533 } 534 } 535 536 static void xive_tctx_reset(void *dev) 537 { 538 XiveTCTX *tctx = XIVE_TCTX(dev); 539 540 memset(tctx->regs, 0, sizeof(tctx->regs)); 541 542 /* Set some defaults */ 543 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; 544 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; 545 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; 546 547 /* 548 * Initialize PIPR to 0xFF to avoid phantom interrupts when the 549 * CPPR is first set. 550 */ 551 tctx->regs[TM_QW1_OS + TM_PIPR] = 552 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); 553 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = 554 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); 555 } 556 557 static void xive_tctx_realize(DeviceState *dev, Error **errp) 558 { 559 XiveTCTX *tctx = XIVE_TCTX(dev); 560 PowerPCCPU *cpu; 561 CPUPPCState *env; 562 Object *obj; 563 Error *local_err = NULL; 564 565 obj = object_property_get_link(OBJECT(dev), "cpu", &local_err); 566 if (!obj) { 567 error_propagate(errp, local_err); 568 error_prepend(errp, "required link 'cpu' not found: "); 569 return; 570 } 571 572 cpu = POWERPC_CPU(obj); 573 tctx->cs = CPU(obj); 574 575 env = &cpu->env; 576 switch (PPC_INPUT(env)) { 577 case PPC_FLAGS_INPUT_POWER9: 578 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT]; 579 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT]; 580 break; 581 582 default: 583 error_setg(errp, "XIVE interrupt controller does not support " 584 "this CPU bus model"); 585 return; 586 } 587 588 /* Connect the presenter to the VCPU (required for CPU hotplug) */ 589 if (kvm_irqchip_in_kernel()) { 590 kvmppc_xive_cpu_connect(tctx, &local_err); 591 if (local_err) { 592 error_propagate(errp, local_err); 593 return; 594 } 595 } 596 597 qemu_register_reset(xive_tctx_reset, dev); 598 } 599 600 static void xive_tctx_unrealize(DeviceState *dev, Error **errp) 601 { 602 qemu_unregister_reset(xive_tctx_reset, dev); 603 } 604 605 static int vmstate_xive_tctx_pre_save(void *opaque) 606 { 607 Error *local_err = NULL; 608 609 if (kvm_irqchip_in_kernel()) { 610 kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err); 611 if (local_err) { 612 error_report_err(local_err); 613 return -1; 614 } 615 } 616 617 return 0; 618 } 619 620 static int vmstate_xive_tctx_post_load(void *opaque, int version_id) 621 { 622 Error *local_err = NULL; 623 624 if (kvm_irqchip_in_kernel()) { 625 /* 626 * Required for hotplugged CPU, for which the state comes 627 * after all states of the machine. 628 */ 629 kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err); 630 if (local_err) { 631 error_report_err(local_err); 632 return -1; 633 } 634 } 635 636 return 0; 637 } 638 639 static const VMStateDescription vmstate_xive_tctx = { 640 .name = TYPE_XIVE_TCTX, 641 .version_id = 1, 642 .minimum_version_id = 1, 643 .pre_save = vmstate_xive_tctx_pre_save, 644 .post_load = vmstate_xive_tctx_post_load, 645 .fields = (VMStateField[]) { 646 VMSTATE_BUFFER(regs, XiveTCTX), 647 VMSTATE_END_OF_LIST() 648 }, 649 }; 650 651 static void xive_tctx_class_init(ObjectClass *klass, void *data) 652 { 653 DeviceClass *dc = DEVICE_CLASS(klass); 654 655 dc->desc = "XIVE Interrupt Thread Context"; 656 dc->realize = xive_tctx_realize; 657 dc->unrealize = xive_tctx_unrealize; 658 dc->vmsd = &vmstate_xive_tctx; 659 } 660 661 static const TypeInfo xive_tctx_info = { 662 .name = TYPE_XIVE_TCTX, 663 .parent = TYPE_DEVICE, 664 .instance_size = sizeof(XiveTCTX), 665 .class_init = xive_tctx_class_init, 666 }; 667 668 Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp) 669 { 670 Error *local_err = NULL; 671 Object *obj; 672 673 obj = object_new(TYPE_XIVE_TCTX); 674 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort); 675 object_unref(obj); 676 object_property_add_const_link(obj, "cpu", cpu, &error_abort); 677 object_property_set_bool(obj, true, "realized", &local_err); 678 if (local_err) { 679 goto error; 680 } 681 682 return obj; 683 684 error: 685 object_unparent(obj); 686 error_propagate(errp, local_err); 687 return NULL; 688 } 689 690 /* 691 * XIVE ESB helpers 692 */ 693 694 static uint8_t xive_esb_set(uint8_t *pq, uint8_t value) 695 { 696 uint8_t old_pq = *pq & 0x3; 697 698 *pq &= ~0x3; 699 *pq |= value & 0x3; 700 701 return old_pq; 702 } 703 704 static bool xive_esb_trigger(uint8_t *pq) 705 { 706 uint8_t old_pq = *pq & 0x3; 707 708 switch (old_pq) { 709 case XIVE_ESB_RESET: 710 xive_esb_set(pq, XIVE_ESB_PENDING); 711 return true; 712 case XIVE_ESB_PENDING: 713 case XIVE_ESB_QUEUED: 714 xive_esb_set(pq, XIVE_ESB_QUEUED); 715 return false; 716 case XIVE_ESB_OFF: 717 xive_esb_set(pq, XIVE_ESB_OFF); 718 return false; 719 default: 720 g_assert_not_reached(); 721 } 722 } 723 724 static bool xive_esb_eoi(uint8_t *pq) 725 { 726 uint8_t old_pq = *pq & 0x3; 727 728 switch (old_pq) { 729 case XIVE_ESB_RESET: 730 case XIVE_ESB_PENDING: 731 xive_esb_set(pq, XIVE_ESB_RESET); 732 return false; 733 case XIVE_ESB_QUEUED: 734 xive_esb_set(pq, XIVE_ESB_PENDING); 735 return true; 736 case XIVE_ESB_OFF: 737 xive_esb_set(pq, XIVE_ESB_OFF); 738 return false; 739 default: 740 g_assert_not_reached(); 741 } 742 } 743 744 /* 745 * XIVE Interrupt Source (or IVSE) 746 */ 747 748 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) 749 { 750 assert(srcno < xsrc->nr_irqs); 751 752 return xsrc->status[srcno] & 0x3; 753 } 754 755 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) 756 { 757 assert(srcno < xsrc->nr_irqs); 758 759 return xive_esb_set(&xsrc->status[srcno], pq); 760 } 761 762 /* 763 * Returns whether the event notification should be forwarded. 764 */ 765 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) 766 { 767 uint8_t old_pq = xive_source_esb_get(xsrc, srcno); 768 769 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; 770 771 switch (old_pq) { 772 case XIVE_ESB_RESET: 773 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); 774 return true; 775 default: 776 return false; 777 } 778 } 779 780 /* 781 * Returns whether the event notification should be forwarded. 782 */ 783 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) 784 { 785 bool ret; 786 787 assert(srcno < xsrc->nr_irqs); 788 789 ret = xive_esb_trigger(&xsrc->status[srcno]); 790 791 if (xive_source_irq_is_lsi(xsrc, srcno) && 792 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { 793 qemu_log_mask(LOG_GUEST_ERROR, 794 "XIVE: queued an event on LSI IRQ %d\n", srcno); 795 } 796 797 return ret; 798 } 799 800 /* 801 * Returns whether the event notification should be forwarded. 802 */ 803 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) 804 { 805 bool ret; 806 807 assert(srcno < xsrc->nr_irqs); 808 809 ret = xive_esb_eoi(&xsrc->status[srcno]); 810 811 /* 812 * LSI sources do not set the Q bit but they can still be 813 * asserted, in which case we should forward a new event 814 * notification 815 */ 816 if (xive_source_irq_is_lsi(xsrc, srcno) && 817 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { 818 ret = xive_source_lsi_trigger(xsrc, srcno); 819 } 820 821 return ret; 822 } 823 824 /* 825 * Forward the source event notification to the Router 826 */ 827 static void xive_source_notify(XiveSource *xsrc, int srcno) 828 { 829 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); 830 831 if (xnc->notify) { 832 xnc->notify(xsrc->xive, srcno); 833 } 834 } 835 836 /* 837 * In a two pages ESB MMIO setting, even page is the trigger page, odd 838 * page is for management 839 */ 840 static inline bool addr_is_even(hwaddr addr, uint32_t shift) 841 { 842 return !((addr >> shift) & 1); 843 } 844 845 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) 846 { 847 return xive_source_esb_has_2page(xsrc) && 848 addr_is_even(addr, xsrc->esb_shift - 1); 849 } 850 851 /* 852 * ESB MMIO loads 853 * Trigger page Management/EOI page 854 * 855 * ESB MMIO setting 2 pages 1 or 2 pages 856 * 857 * 0x000 .. 0x3FF -1 EOI and return 0|1 858 * 0x400 .. 0x7FF -1 EOI and return 0|1 859 * 0x800 .. 0xBFF -1 return PQ 860 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 861 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 862 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 863 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 864 */ 865 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) 866 { 867 XiveSource *xsrc = XIVE_SOURCE(opaque); 868 uint32_t offset = addr & 0xFFF; 869 uint32_t srcno = addr >> xsrc->esb_shift; 870 uint64_t ret = -1; 871 872 /* In a two pages ESB MMIO setting, trigger page should not be read */ 873 if (xive_source_is_trigger_page(xsrc, addr)) { 874 qemu_log_mask(LOG_GUEST_ERROR, 875 "XIVE: invalid load on IRQ %d trigger page at " 876 "0x%"HWADDR_PRIx"\n", srcno, addr); 877 return -1; 878 } 879 880 switch (offset) { 881 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 882 ret = xive_source_esb_eoi(xsrc, srcno); 883 884 /* Forward the source event notification for routing */ 885 if (ret) { 886 xive_source_notify(xsrc, srcno); 887 } 888 break; 889 890 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 891 ret = xive_source_esb_get(xsrc, srcno); 892 break; 893 894 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 895 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 896 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 897 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 898 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 899 break; 900 default: 901 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n", 902 offset); 903 } 904 905 return ret; 906 } 907 908 /* 909 * ESB MMIO stores 910 * Trigger page Management/EOI page 911 * 912 * ESB MMIO setting 2 pages 1 or 2 pages 913 * 914 * 0x000 .. 0x3FF Trigger Trigger 915 * 0x400 .. 0x7FF Trigger EOI 916 * 0x800 .. 0xBFF Trigger undefined 917 * 0xC00 .. 0xCFF Trigger PQ=00 918 * 0xD00 .. 0xDFF Trigger PQ=01 919 * 0xE00 .. 0xDFF Trigger PQ=10 920 * 0xF00 .. 0xDFF Trigger PQ=11 921 */ 922 static void xive_source_esb_write(void *opaque, hwaddr addr, 923 uint64_t value, unsigned size) 924 { 925 XiveSource *xsrc = XIVE_SOURCE(opaque); 926 uint32_t offset = addr & 0xFFF; 927 uint32_t srcno = addr >> xsrc->esb_shift; 928 bool notify = false; 929 930 /* In a two pages ESB MMIO setting, trigger page only triggers */ 931 if (xive_source_is_trigger_page(xsrc, addr)) { 932 notify = xive_source_esb_trigger(xsrc, srcno); 933 goto out; 934 } 935 936 switch (offset) { 937 case 0 ... 0x3FF: 938 notify = xive_source_esb_trigger(xsrc, srcno); 939 break; 940 941 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: 942 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { 943 qemu_log_mask(LOG_GUEST_ERROR, 944 "XIVE: invalid Store EOI for IRQ %d\n", srcno); 945 return; 946 } 947 948 notify = xive_source_esb_eoi(xsrc, srcno); 949 break; 950 951 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 952 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 953 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 954 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 955 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); 956 break; 957 958 default: 959 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n", 960 offset); 961 return; 962 } 963 964 out: 965 /* Forward the source event notification for routing */ 966 if (notify) { 967 xive_source_notify(xsrc, srcno); 968 } 969 } 970 971 static const MemoryRegionOps xive_source_esb_ops = { 972 .read = xive_source_esb_read, 973 .write = xive_source_esb_write, 974 .endianness = DEVICE_BIG_ENDIAN, 975 .valid = { 976 .min_access_size = 8, 977 .max_access_size = 8, 978 }, 979 .impl = { 980 .min_access_size = 8, 981 .max_access_size = 8, 982 }, 983 }; 984 985 void xive_source_set_irq(void *opaque, int srcno, int val) 986 { 987 XiveSource *xsrc = XIVE_SOURCE(opaque); 988 bool notify = false; 989 990 if (xive_source_irq_is_lsi(xsrc, srcno)) { 991 if (val) { 992 notify = xive_source_lsi_trigger(xsrc, srcno); 993 } else { 994 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; 995 } 996 } else { 997 if (val) { 998 notify = xive_source_esb_trigger(xsrc, srcno); 999 } 1000 } 1001 1002 /* Forward the source event notification for routing */ 1003 if (notify) { 1004 xive_source_notify(xsrc, srcno); 1005 } 1006 } 1007 1008 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) 1009 { 1010 int i; 1011 1012 for (i = 0; i < xsrc->nr_irqs; i++) { 1013 uint8_t pq = xive_source_esb_get(xsrc, i); 1014 1015 if (pq == XIVE_ESB_OFF) { 1016 continue; 1017 } 1018 1019 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset, 1020 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 1021 pq & XIVE_ESB_VAL_P ? 'P' : '-', 1022 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 1023 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' '); 1024 } 1025 } 1026 1027 static void xive_source_reset(void *dev) 1028 { 1029 XiveSource *xsrc = XIVE_SOURCE(dev); 1030 1031 /* Do not clear the LSI bitmap */ 1032 1033 /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ 1034 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); 1035 } 1036 1037 static void xive_source_realize(DeviceState *dev, Error **errp) 1038 { 1039 XiveSource *xsrc = XIVE_SOURCE(dev); 1040 Object *obj; 1041 Error *local_err = NULL; 1042 1043 obj = object_property_get_link(OBJECT(dev), "xive", &local_err); 1044 if (!obj) { 1045 error_propagate(errp, local_err); 1046 error_prepend(errp, "required link 'xive' not found: "); 1047 return; 1048 } 1049 1050 xsrc->xive = XIVE_NOTIFIER(obj); 1051 1052 if (!xsrc->nr_irqs) { 1053 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1054 return; 1055 } 1056 1057 if (xsrc->esb_shift != XIVE_ESB_4K && 1058 xsrc->esb_shift != XIVE_ESB_4K_2PAGE && 1059 xsrc->esb_shift != XIVE_ESB_64K && 1060 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { 1061 error_setg(errp, "Invalid ESB shift setting"); 1062 return; 1063 } 1064 1065 xsrc->status = g_malloc0(xsrc->nr_irqs); 1066 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); 1067 1068 if (!kvm_irqchip_in_kernel()) { 1069 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 1070 &xive_source_esb_ops, xsrc, "xive.esb", 1071 (1ull << xsrc->esb_shift) * xsrc->nr_irqs); 1072 } 1073 1074 qemu_register_reset(xive_source_reset, dev); 1075 } 1076 1077 static const VMStateDescription vmstate_xive_source = { 1078 .name = TYPE_XIVE_SOURCE, 1079 .version_id = 1, 1080 .minimum_version_id = 1, 1081 .fields = (VMStateField[]) { 1082 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), 1083 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), 1084 VMSTATE_END_OF_LIST() 1085 }, 1086 }; 1087 1088 /* 1089 * The default XIVE interrupt source setting for the ESB MMIOs is two 1090 * 64k pages without Store EOI, to be in sync with KVM. 1091 */ 1092 static Property xive_source_properties[] = { 1093 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), 1094 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), 1095 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), 1096 DEFINE_PROP_END_OF_LIST(), 1097 }; 1098 1099 static void xive_source_class_init(ObjectClass *klass, void *data) 1100 { 1101 DeviceClass *dc = DEVICE_CLASS(klass); 1102 1103 dc->desc = "XIVE Interrupt Source"; 1104 dc->props = xive_source_properties; 1105 dc->realize = xive_source_realize; 1106 dc->vmsd = &vmstate_xive_source; 1107 } 1108 1109 static const TypeInfo xive_source_info = { 1110 .name = TYPE_XIVE_SOURCE, 1111 .parent = TYPE_DEVICE, 1112 .instance_size = sizeof(XiveSource), 1113 .class_init = xive_source_class_init, 1114 }; 1115 1116 /* 1117 * XiveEND helpers 1118 */ 1119 1120 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) 1121 { 1122 uint64_t qaddr_base = xive_end_qaddr(end); 1123 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1124 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1125 uint32_t qentries = 1 << (qsize + 10); 1126 int i; 1127 1128 /* 1129 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window 1130 */ 1131 monitor_printf(mon, " [ "); 1132 qindex = (qindex - (width - 1)) & (qentries - 1); 1133 for (i = 0; i < width; i++) { 1134 uint64_t qaddr = qaddr_base + (qindex << 2); 1135 uint32_t qdata = -1; 1136 1137 if (dma_memory_read(&address_space_memory, qaddr, &qdata, 1138 sizeof(qdata))) { 1139 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" 1140 HWADDR_PRIx "\n", qaddr); 1141 return; 1142 } 1143 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "", 1144 be32_to_cpu(qdata)); 1145 qindex = (qindex + 1) & (qentries - 1); 1146 } 1147 } 1148 1149 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) 1150 { 1151 uint64_t qaddr_base = xive_end_qaddr(end); 1152 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1153 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1154 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1155 uint32_t qentries = 1 << (qsize + 10); 1156 1157 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); 1158 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 1159 1160 if (!xive_end_is_valid(end)) { 1161 return; 1162 } 1163 1164 monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64 1165 "% 6d/%5d ^%d", end_idx, 1166 xive_end_is_valid(end) ? 'v' : '-', 1167 xive_end_is_enqueue(end) ? 'q' : '-', 1168 xive_end_is_notify(end) ? 'n' : '-', 1169 xive_end_is_backlog(end) ? 'b' : '-', 1170 xive_end_is_escalate(end) ? 'e' : '-', 1171 priority, nvt, qaddr_base, qindex, qentries, qgen); 1172 1173 xive_end_queue_pic_print_info(end, 6, mon); 1174 monitor_printf(mon, "]\n"); 1175 } 1176 1177 static void xive_end_enqueue(XiveEND *end, uint32_t data) 1178 { 1179 uint64_t qaddr_base = xive_end_qaddr(end); 1180 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 1181 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1182 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 1183 1184 uint64_t qaddr = qaddr_base + (qindex << 2); 1185 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); 1186 uint32_t qentries = 1 << (qsize + 10); 1187 1188 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { 1189 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" 1190 HWADDR_PRIx "\n", qaddr); 1191 return; 1192 } 1193 1194 qindex = (qindex + 1) & (qentries - 1); 1195 if (qindex == 0) { 1196 qgen ^= 1; 1197 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); 1198 } 1199 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); 1200 } 1201 1202 /* 1203 * XIVE Router (aka. Virtualization Controller or IVRE) 1204 */ 1205 1206 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, 1207 XiveEAS *eas) 1208 { 1209 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1210 1211 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); 1212 } 1213 1214 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1215 XiveEND *end) 1216 { 1217 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1218 1219 return xrc->get_end(xrtr, end_blk, end_idx, end); 1220 } 1221 1222 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, 1223 XiveEND *end, uint8_t word_number) 1224 { 1225 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1226 1227 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); 1228 } 1229 1230 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1231 XiveNVT *nvt) 1232 { 1233 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1234 1235 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); 1236 } 1237 1238 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, 1239 XiveNVT *nvt, uint8_t word_number) 1240 { 1241 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1242 1243 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); 1244 } 1245 1246 XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs) 1247 { 1248 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); 1249 1250 return xrc->get_tctx(xrtr, cs); 1251 } 1252 1253 /* 1254 * Encode the HW CAM line in the block group mode format : 1255 * 1256 * chip << 19 | 0000000 0 0001 thread (7Bit) 1257 */ 1258 static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx) 1259 { 1260 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; 1261 uint32_t pir = env->spr_cb[SPR_PIR].default_value; 1262 1263 return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f)); 1264 } 1265 1266 /* 1267 * The thread context register words are in big-endian format. 1268 */ 1269 static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, 1270 uint8_t nvt_blk, uint32_t nvt_idx, 1271 bool cam_ignore, uint32_t logic_serv) 1272 { 1273 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); 1274 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); 1275 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); 1276 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); 1277 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); 1278 1279 /* 1280 * TODO (PowerNV): ignore mode. The low order bits of the NVT 1281 * identifier are ignored in the "CAM" match. 1282 */ 1283 1284 if (format == 0) { 1285 if (cam_ignore == true) { 1286 /* 1287 * F=0 & i=1: Logical server notification (bits ignored at 1288 * the end of the NVT identifier) 1289 */ 1290 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", 1291 nvt_blk, nvt_idx); 1292 return -1; 1293 } 1294 1295 /* F=0 & i=0: Specific NVT notification */ 1296 1297 /* PHYS ring */ 1298 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && 1299 cam == xive_tctx_hw_cam_line(tctx)) { 1300 return TM_QW3_HV_PHYS; 1301 } 1302 1303 /* HV POOL ring */ 1304 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && 1305 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { 1306 return TM_QW2_HV_POOL; 1307 } 1308 1309 /* OS ring */ 1310 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1311 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { 1312 return TM_QW1_OS; 1313 } 1314 } else { 1315 /* F=1 : User level Event-Based Branch (EBB) notification */ 1316 1317 /* USER ring */ 1318 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && 1319 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && 1320 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && 1321 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { 1322 return TM_QW0_USER; 1323 } 1324 } 1325 return -1; 1326 } 1327 1328 typedef struct XiveTCTXMatch { 1329 XiveTCTX *tctx; 1330 uint8_t ring; 1331 } XiveTCTXMatch; 1332 1333 static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format, 1334 uint8_t nvt_blk, uint32_t nvt_idx, 1335 bool cam_ignore, uint8_t priority, 1336 uint32_t logic_serv, XiveTCTXMatch *match) 1337 { 1338 CPUState *cs; 1339 1340 /* 1341 * TODO (PowerNV): handle chip_id overwrite of block field for 1342 * hardwired CAM compares 1343 */ 1344 1345 CPU_FOREACH(cs) { 1346 XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs); 1347 int ring; 1348 1349 /* 1350 * HW checks that the CPU is enabled in the Physical Thread 1351 * Enable Register (PTER). 1352 */ 1353 1354 /* 1355 * Check the thread context CAM lines and record matches. We 1356 * will handle CPU exception delivery later 1357 */ 1358 ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx, 1359 cam_ignore, logic_serv); 1360 /* 1361 * Save the context and follow on to catch duplicates, that we 1362 * don't support yet. 1363 */ 1364 if (ring != -1) { 1365 if (match->tctx) { 1366 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " 1367 "context NVT %x/%x\n", nvt_blk, nvt_idx); 1368 return false; 1369 } 1370 1371 match->ring = ring; 1372 match->tctx = tctx; 1373 } 1374 } 1375 1376 if (!match->tctx) { 1377 qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n", 1378 nvt_blk, nvt_idx); 1379 return false; 1380 } 1381 1382 return true; 1383 } 1384 1385 /* 1386 * This is our simple Xive Presenter Engine model. It is merged in the 1387 * Router as it does not require an extra object. 1388 * 1389 * It receives notification requests sent by the IVRE to find one 1390 * matching NVT (or more) dispatched on the processor threads. In case 1391 * of a single NVT notification, the process is abreviated and the 1392 * thread is signaled if a match is found. In case of a logical server 1393 * notification (bits ignored at the end of the NVT identifier), the 1394 * IVPE and IVRE select a winning thread using different filters. This 1395 * involves 2 or 3 exchanges on the PowerBus that the model does not 1396 * support. 1397 * 1398 * The parameters represent what is sent on the PowerBus 1399 */ 1400 static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format, 1401 uint8_t nvt_blk, uint32_t nvt_idx, 1402 bool cam_ignore, uint8_t priority, 1403 uint32_t logic_serv) 1404 { 1405 XiveNVT nvt; 1406 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; 1407 bool found; 1408 1409 /* NVT cache lookup */ 1410 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { 1411 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n", 1412 nvt_blk, nvt_idx); 1413 return; 1414 } 1415 1416 if (!xive_nvt_is_valid(&nvt)) { 1417 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n", 1418 nvt_blk, nvt_idx); 1419 return; 1420 } 1421 1422 found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, 1423 priority, logic_serv, &match); 1424 if (found) { 1425 ipb_update(&match.tctx->regs[match.ring], priority); 1426 xive_tctx_notify(match.tctx, match.ring); 1427 return; 1428 } 1429 1430 /* Record the IPB in the associated NVT structure */ 1431 ipb_update((uint8_t *) &nvt.w4, priority); 1432 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); 1433 1434 /* 1435 * If no matching NVT is dispatched on a HW thread : 1436 * - update the NVT structure if backlog is activated 1437 * - escalate (ESe PQ bits and EAS in w4-5) if escalation is 1438 * activated 1439 */ 1440 } 1441 1442 /* 1443 * An END trigger can come from an event trigger (IPI or HW) or from 1444 * another chip. We don't model the PowerBus but the END trigger 1445 * message has the same parameters than in the function below. 1446 */ 1447 static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, 1448 uint32_t end_idx, uint32_t end_data) 1449 { 1450 XiveEND end; 1451 uint8_t priority; 1452 uint8_t format; 1453 1454 /* END cache lookup */ 1455 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { 1456 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1457 end_idx); 1458 return; 1459 } 1460 1461 if (!xive_end_is_valid(&end)) { 1462 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1463 end_blk, end_idx); 1464 return; 1465 } 1466 1467 if (xive_end_is_enqueue(&end)) { 1468 xive_end_enqueue(&end, end_data); 1469 /* Enqueuing event data modifies the EQ toggle and index */ 1470 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); 1471 } 1472 1473 /* 1474 * The W7 format depends on the F bit in W6. It defines the type 1475 * of the notification : 1476 * 1477 * F=0 : single or multiple NVT notification 1478 * F=1 : User level Event-Based Branch (EBB) notification, no 1479 * priority 1480 */ 1481 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); 1482 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); 1483 1484 /* The END is masked */ 1485 if (format == 0 && priority == 0xff) { 1486 return; 1487 } 1488 1489 /* 1490 * Check the END ESn (Event State Buffer for notification) for 1491 * even futher coalescing in the Router 1492 */ 1493 if (!xive_end_is_notify(&end)) { 1494 uint8_t pq = xive_get_field32(END_W1_ESn, end.w1); 1495 bool notify = xive_esb_trigger(&pq); 1496 1497 if (pq != xive_get_field32(END_W1_ESn, end.w1)) { 1498 end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq); 1499 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); 1500 } 1501 1502 /* ESn[Q]=1 : end of notification */ 1503 if (!notify) { 1504 return; 1505 } 1506 } 1507 1508 /* 1509 * Follows IVPE notification 1510 */ 1511 xive_presenter_notify(xrtr, format, 1512 xive_get_field32(END_W6_NVT_BLOCK, end.w6), 1513 xive_get_field32(END_W6_NVT_INDEX, end.w6), 1514 xive_get_field32(END_W7_F0_IGNORE, end.w7), 1515 priority, 1516 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); 1517 1518 /* TODO: Auto EOI. */ 1519 } 1520 1521 void xive_router_notify(XiveNotifier *xn, uint32_t lisn) 1522 { 1523 XiveRouter *xrtr = XIVE_ROUTER(xn); 1524 uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn); 1525 uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn); 1526 XiveEAS eas; 1527 1528 /* EAS cache lookup */ 1529 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { 1530 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); 1531 return; 1532 } 1533 1534 /* 1535 * The IVRE checks the State Bit Cache at this point. We skip the 1536 * SBC lookup because the state bits of the sources are modeled 1537 * internally in QEMU. 1538 */ 1539 1540 if (!xive_eas_is_valid(&eas)) { 1541 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); 1542 return; 1543 } 1544 1545 if (xive_eas_is_masked(&eas)) { 1546 /* Notification completed */ 1547 return; 1548 } 1549 1550 /* 1551 * The event trigger becomes an END trigger 1552 */ 1553 xive_router_end_notify(xrtr, 1554 xive_get_field64(EAS_END_BLOCK, eas.w), 1555 xive_get_field64(EAS_END_INDEX, eas.w), 1556 xive_get_field64(EAS_END_DATA, eas.w)); 1557 } 1558 1559 static void xive_router_class_init(ObjectClass *klass, void *data) 1560 { 1561 DeviceClass *dc = DEVICE_CLASS(klass); 1562 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1563 1564 dc->desc = "XIVE Router Engine"; 1565 xnc->notify = xive_router_notify; 1566 } 1567 1568 static const TypeInfo xive_router_info = { 1569 .name = TYPE_XIVE_ROUTER, 1570 .parent = TYPE_SYS_BUS_DEVICE, 1571 .abstract = true, 1572 .class_size = sizeof(XiveRouterClass), 1573 .class_init = xive_router_class_init, 1574 .interfaces = (InterfaceInfo[]) { 1575 { TYPE_XIVE_NOTIFIER }, 1576 { } 1577 } 1578 }; 1579 1580 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) 1581 { 1582 if (!xive_eas_is_valid(eas)) { 1583 return; 1584 } 1585 1586 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n", 1587 lisn, xive_eas_is_masked(eas) ? "M" : " ", 1588 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), 1589 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), 1590 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); 1591 } 1592 1593 /* 1594 * END ESB MMIO loads 1595 */ 1596 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) 1597 { 1598 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); 1599 uint32_t offset = addr & 0xFFF; 1600 uint8_t end_blk; 1601 uint32_t end_idx; 1602 XiveEND end; 1603 uint32_t end_esmask; 1604 uint8_t pq; 1605 uint64_t ret = -1; 1606 1607 end_blk = xsrc->block_id; 1608 end_idx = addr >> (xsrc->esb_shift + 1); 1609 1610 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { 1611 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, 1612 end_idx); 1613 return -1; 1614 } 1615 1616 if (!xive_end_is_valid(&end)) { 1617 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", 1618 end_blk, end_idx); 1619 return -1; 1620 } 1621 1622 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; 1623 pq = xive_get_field32(end_esmask, end.w1); 1624 1625 switch (offset) { 1626 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: 1627 ret = xive_esb_eoi(&pq); 1628 1629 /* Forward the source event notification for routing ?? */ 1630 break; 1631 1632 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: 1633 ret = pq; 1634 break; 1635 1636 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: 1637 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: 1638 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: 1639 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: 1640 ret = xive_esb_set(&pq, (offset >> 8) & 0x3); 1641 break; 1642 default: 1643 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", 1644 offset); 1645 return -1; 1646 } 1647 1648 if (pq != xive_get_field32(end_esmask, end.w1)) { 1649 end.w1 = xive_set_field32(end_esmask, end.w1, pq); 1650 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); 1651 } 1652 1653 return ret; 1654 } 1655 1656 /* 1657 * END ESB MMIO stores are invalid 1658 */ 1659 static void xive_end_source_write(void *opaque, hwaddr addr, 1660 uint64_t value, unsigned size) 1661 { 1662 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" 1663 HWADDR_PRIx"\n", addr); 1664 } 1665 1666 static const MemoryRegionOps xive_end_source_ops = { 1667 .read = xive_end_source_read, 1668 .write = xive_end_source_write, 1669 .endianness = DEVICE_BIG_ENDIAN, 1670 .valid = { 1671 .min_access_size = 8, 1672 .max_access_size = 8, 1673 }, 1674 .impl = { 1675 .min_access_size = 8, 1676 .max_access_size = 8, 1677 }, 1678 }; 1679 1680 static void xive_end_source_realize(DeviceState *dev, Error **errp) 1681 { 1682 XiveENDSource *xsrc = XIVE_END_SOURCE(dev); 1683 Object *obj; 1684 Error *local_err = NULL; 1685 1686 obj = object_property_get_link(OBJECT(dev), "xive", &local_err); 1687 if (!obj) { 1688 error_propagate(errp, local_err); 1689 error_prepend(errp, "required link 'xive' not found: "); 1690 return; 1691 } 1692 1693 xsrc->xrtr = XIVE_ROUTER(obj); 1694 1695 if (!xsrc->nr_ends) { 1696 error_setg(errp, "Number of interrupt needs to be greater than 0"); 1697 return; 1698 } 1699 1700 if (xsrc->esb_shift != XIVE_ESB_4K && 1701 xsrc->esb_shift != XIVE_ESB_64K) { 1702 error_setg(errp, "Invalid ESB shift setting"); 1703 return; 1704 } 1705 1706 /* 1707 * Each END is assigned an even/odd pair of MMIO pages, the even page 1708 * manages the ESn field while the odd page manages the ESe field. 1709 */ 1710 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), 1711 &xive_end_source_ops, xsrc, "xive.end", 1712 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); 1713 } 1714 1715 static Property xive_end_source_properties[] = { 1716 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0), 1717 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), 1718 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), 1719 DEFINE_PROP_END_OF_LIST(), 1720 }; 1721 1722 static void xive_end_source_class_init(ObjectClass *klass, void *data) 1723 { 1724 DeviceClass *dc = DEVICE_CLASS(klass); 1725 1726 dc->desc = "XIVE END Source"; 1727 dc->props = xive_end_source_properties; 1728 dc->realize = xive_end_source_realize; 1729 } 1730 1731 static const TypeInfo xive_end_source_info = { 1732 .name = TYPE_XIVE_END_SOURCE, 1733 .parent = TYPE_DEVICE, 1734 .instance_size = sizeof(XiveENDSource), 1735 .class_init = xive_end_source_class_init, 1736 }; 1737 1738 /* 1739 * XIVE Notifier 1740 */ 1741 static const TypeInfo xive_notifier_info = { 1742 .name = TYPE_XIVE_NOTIFIER, 1743 .parent = TYPE_INTERFACE, 1744 .class_size = sizeof(XiveNotifierClass), 1745 }; 1746 1747 static void xive_register_types(void) 1748 { 1749 type_register_static(&xive_source_info); 1750 type_register_static(&xive_notifier_info); 1751 type_register_static(&xive_router_info); 1752 type_register_static(&xive_end_source_info); 1753 type_register_static(&xive_tctx_info); 1754 } 1755 1756 type_init(xive_register_types) 1757