1 /* 2 * QEMU PowerPC sPAPR XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "qapi/type-helpers.h" 15 #include "qemu/error-report.h" 16 #include "target/ppc/cpu.h" 17 #include "sysemu/cpus.h" 18 #include "sysemu/reset.h" 19 #include "migration/vmstate.h" 20 #include "monitor/monitor.h" 21 #include "hw/ppc/fdt.h" 22 #include "hw/ppc/spapr.h" 23 #include "hw/ppc/spapr_cpu_core.h" 24 #include "hw/ppc/spapr_xive.h" 25 #include "hw/ppc/xive.h" 26 #include "hw/ppc/xive_regs.h" 27 #include "hw/qdev-properties.h" 28 #include "trace.h" 29 30 /* 31 * XIVE Virtualization Controller BAR and Thread Management BAR that we 32 * use for the ESB pages and the TIMA pages 33 */ 34 #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull 35 #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull 36 37 /* 38 * The allocation of VP blocks is a complex operation in OPAL and the 39 * VP identifiers have a relation with the number of HW chips, the 40 * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE 41 * controller model does not have the same constraints and can use a 42 * simple mapping scheme of the CPU vcpu_id 43 * 44 * These identifiers are never returned to the OS. 45 */ 46 47 #define SPAPR_XIVE_NVT_BASE 0x400 48 49 /* 50 * sPAPR NVT and END indexing helpers 51 */ 52 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) 53 { 54 return nvt_idx - SPAPR_XIVE_NVT_BASE; 55 } 56 57 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu, 58 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 59 { 60 assert(cpu); 61 62 if (out_nvt_blk) { 63 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID; 64 } 65 66 if (out_nvt_blk) { 67 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id; 68 } 69 } 70 71 static int spapr_xive_target_to_nvt(uint32_t target, 72 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 73 { 74 PowerPCCPU *cpu = spapr_find_cpu(target); 75 76 if (!cpu) { 77 return -1; 78 } 79 80 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx); 81 return 0; 82 } 83 84 /* 85 * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 86 * priorities per CPU 87 */ 88 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx, 89 uint32_t *out_server, uint8_t *out_prio) 90 { 91 92 assert(end_blk == SPAPR_XIVE_BLOCK_ID); 93 94 if (out_server) { 95 *out_server = end_idx >> 3; 96 } 97 98 if (out_prio) { 99 *out_prio = end_idx & 0x7; 100 } 101 return 0; 102 } 103 104 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, 105 uint8_t *out_end_blk, uint32_t *out_end_idx) 106 { 107 assert(cpu); 108 109 if (out_end_blk) { 110 *out_end_blk = SPAPR_XIVE_BLOCK_ID; 111 } 112 113 if (out_end_idx) { 114 *out_end_idx = (cpu->vcpu_id << 3) + prio; 115 } 116 } 117 118 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, 119 uint8_t *out_end_blk, uint32_t *out_end_idx) 120 { 121 PowerPCCPU *cpu = spapr_find_cpu(target); 122 123 if (!cpu) { 124 return -1; 125 } 126 127 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx); 128 return 0; 129 } 130 131 /* 132 * On sPAPR machines, use a simplified output for the XIVE END 133 * structure dumping only the information related to the OS EQ. 134 */ 135 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end, 136 GString *buf) 137 { 138 uint64_t qaddr_base = xive_end_qaddr(end); 139 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 140 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 141 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 142 uint32_t qentries = 1 << (qsize + 10); 143 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); 144 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 145 146 g_string_append_printf(buf, "%3d/%d % 6d/%5d @%"PRIx64" ^%d", 147 spapr_xive_nvt_to_target(0, nvt), 148 priority, qindex, qentries, qaddr_base, qgen); 149 150 xive_end_queue_pic_print_info(end, 6, buf); 151 } 152 153 /* 154 * kvm_irqchip_in_kernel() will cause the compiler to turn this 155 * info a nop if CONFIG_KVM isn't defined. 156 */ 157 #define spapr_xive_in_kernel(xive) \ 158 (kvm_irqchip_in_kernel() && (xive)->fd != -1) 159 160 static void spapr_xive_pic_print_info(SpaprXive *xive, GString *buf) 161 { 162 XiveSource *xsrc = &xive->source; 163 int i; 164 165 if (spapr_xive_in_kernel(xive)) { 166 Error *local_err = NULL; 167 168 kvmppc_xive_synchronize_state(xive, &local_err); 169 if (local_err) { 170 error_report_err(local_err); 171 return; 172 } 173 } 174 175 g_string_append_printf(buf, " LISN PQ EISN CPU/PRIO EQ\n"); 176 177 for (i = 0; i < xive->nr_irqs; i++) { 178 uint8_t pq = xive_source_esb_get(xsrc, i); 179 XiveEAS *eas = &xive->eat[i]; 180 181 if (!xive_eas_is_valid(eas)) { 182 continue; 183 } 184 185 g_string_append_printf(buf, " %08x %s %c%c%c %s %08x ", i, 186 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 187 pq & XIVE_ESB_VAL_P ? 'P' : '-', 188 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 189 xive_source_is_asserted(xsrc, i) ? 'A' : ' ', 190 xive_eas_is_masked(eas) ? "M" : " ", 191 (int) xive_get_field64(EAS_END_DATA, eas->w)); 192 193 if (!xive_eas_is_masked(eas)) { 194 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 195 XiveEND *end; 196 197 assert(end_idx < xive->nr_ends); 198 end = &xive->endt[end_idx]; 199 200 if (xive_end_is_valid(end)) { 201 spapr_xive_end_pic_print_info(xive, end, buf); 202 } 203 204 } 205 g_string_append_c(buf, '\n'); 206 } 207 } 208 209 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable) 210 { 211 memory_region_set_enabled(&xive->source.esb_mmio, enable); 212 memory_region_set_enabled(&xive->tm_mmio, enable); 213 214 /* Disable the END ESBs until a guest OS makes use of them */ 215 memory_region_set_enabled(&xive->end_source.esb_mmio, false); 216 } 217 218 static void spapr_xive_tm_write(void *opaque, hwaddr offset, 219 uint64_t value, unsigned size) 220 { 221 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; 222 223 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 224 } 225 226 static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size) 227 { 228 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; 229 230 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 231 } 232 233 const MemoryRegionOps spapr_xive_tm_ops = { 234 .read = spapr_xive_tm_read, 235 .write = spapr_xive_tm_write, 236 .endianness = DEVICE_BIG_ENDIAN, 237 .valid = { 238 .min_access_size = 1, 239 .max_access_size = 8, 240 }, 241 .impl = { 242 .min_access_size = 1, 243 .max_access_size = 8, 244 }, 245 }; 246 247 static void spapr_xive_end_reset(XiveEND *end) 248 { 249 memset(end, 0, sizeof(*end)); 250 251 /* switch off the escalation and notification ESBs */ 252 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q); 253 } 254 255 static void spapr_xive_reset(void *dev) 256 { 257 SpaprXive *xive = SPAPR_XIVE(dev); 258 int i; 259 260 /* 261 * The XiveSource has its own reset handler, which mask off all 262 * IRQs (!P|Q) 263 */ 264 265 /* Mask all valid EASs in the IRQ number space. */ 266 for (i = 0; i < xive->nr_irqs; i++) { 267 XiveEAS *eas = &xive->eat[i]; 268 if (xive_eas_is_valid(eas)) { 269 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED); 270 } else { 271 eas->w = 0; 272 } 273 } 274 275 /* Clear all ENDs */ 276 for (i = 0; i < xive->nr_ends; i++) { 277 spapr_xive_end_reset(&xive->endt[i]); 278 } 279 } 280 281 static void spapr_xive_instance_init(Object *obj) 282 { 283 SpaprXive *xive = SPAPR_XIVE(obj); 284 285 object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE); 286 287 object_initialize_child(obj, "end_source", &xive->end_source, 288 TYPE_XIVE_END_SOURCE); 289 290 /* Not connected to the KVM XIVE device */ 291 xive->fd = -1; 292 } 293 294 static void spapr_xive_realize(DeviceState *dev, Error **errp) 295 { 296 SpaprXive *xive = SPAPR_XIVE(dev); 297 SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive); 298 XiveSource *xsrc = &xive->source; 299 XiveENDSource *end_xsrc = &xive->end_source; 300 Error *local_err = NULL; 301 302 /* Set by spapr_irq_init() */ 303 g_assert(xive->nr_irqs); 304 g_assert(xive->nr_ends); 305 306 sxc->parent_realize(dev, &local_err); 307 if (local_err) { 308 error_propagate(errp, local_err); 309 return; 310 } 311 312 /* 313 * Initialize the internal sources, for IPIs and virtual devices. 314 */ 315 object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs, 316 &error_fatal); 317 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort); 318 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { 319 return; 320 } 321 322 /* 323 * Initialize the END ESB source 324 */ 325 object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs, 326 &error_fatal); 327 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 328 &error_abort); 329 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) { 330 return; 331 } 332 333 /* Set the mapping address of the END ESB pages after the source ESBs */ 334 xive->end_base = xive->vc_base + xive_source_esb_len(xsrc); 335 336 /* 337 * Allocate the routing tables 338 */ 339 xive->eat = g_new0(XiveEAS, xive->nr_irqs); 340 xive->endt = g_new0(XiveEND, xive->nr_ends); 341 342 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64, 343 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); 344 345 qemu_register_reset(spapr_xive_reset, dev); 346 347 /* TIMA initialization */ 348 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops, 349 xive, "xive.tima", 4ull << TM_SHIFT); 350 351 /* 352 * Map all regions. These will be enabled or disabled at reset and 353 * can also be overridden by KVM memory regions if active 354 */ 355 memory_region_add_subregion(get_system_memory(), xive->vc_base, 356 &xsrc->esb_mmio); 357 memory_region_add_subregion(get_system_memory(), xive->end_base, 358 &end_xsrc->esb_mmio); 359 memory_region_add_subregion(get_system_memory(), xive->tm_base, 360 &xive->tm_mmio); 361 } 362 363 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, 364 uint32_t eas_idx, XiveEAS *eas) 365 { 366 SpaprXive *xive = SPAPR_XIVE(xrtr); 367 368 if (eas_idx >= xive->nr_irqs) { 369 return -1; 370 } 371 372 *eas = xive->eat[eas_idx]; 373 return 0; 374 } 375 376 static int spapr_xive_get_end(XiveRouter *xrtr, 377 uint8_t end_blk, uint32_t end_idx, XiveEND *end) 378 { 379 SpaprXive *xive = SPAPR_XIVE(xrtr); 380 381 if (end_idx >= xive->nr_ends) { 382 return -1; 383 } 384 385 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND)); 386 return 0; 387 } 388 389 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, 390 uint32_t end_idx, XiveEND *end, 391 uint8_t word_number) 392 { 393 SpaprXive *xive = SPAPR_XIVE(xrtr); 394 395 if (end_idx >= xive->nr_ends) { 396 return -1; 397 } 398 399 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND)); 400 return 0; 401 } 402 403 static int spapr_xive_get_nvt(XiveRouter *xrtr, 404 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt) 405 { 406 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 407 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 408 409 if (!cpu) { 410 /* TODO: should we assert() if we can find a NVT ? */ 411 return -1; 412 } 413 414 /* 415 * sPAPR does not maintain a NVT table. Return that the NVT is 416 * valid if we have found a matching CPU 417 */ 418 nvt->w0 = cpu_to_be32(NVT_W0_VALID); 419 return 0; 420 } 421 422 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, 423 uint32_t nvt_idx, XiveNVT *nvt, 424 uint8_t word_number) 425 { 426 /* 427 * We don't need to write back to the NVTs because the sPAPR 428 * machine should never hit a non-scheduled NVT. It should never 429 * get called. 430 */ 431 g_assert_not_reached(); 432 } 433 434 static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, 435 uint8_t nvt_blk, uint32_t nvt_idx, 436 bool cam_ignore, uint8_t priority, 437 uint32_t logic_serv, XiveTCTXMatch *match) 438 { 439 CPUState *cs; 440 int count = 0; 441 442 CPU_FOREACH(cs) { 443 PowerPCCPU *cpu = POWERPC_CPU(cs); 444 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; 445 int ring; 446 447 /* 448 * Skip partially initialized vCPUs. This can happen when 449 * vCPUs are hotplugged. 450 */ 451 if (!tctx) { 452 continue; 453 } 454 455 /* 456 * Check the thread context CAM lines and record matches. 457 */ 458 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx, 459 cam_ignore, logic_serv); 460 /* 461 * Save the matching thread interrupt context and follow on to 462 * check for duplicates which are invalid. 463 */ 464 if (ring != -1) { 465 if (match->tctx) { 466 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " 467 "context NVT %x/%x\n", nvt_blk, nvt_idx); 468 return -1; 469 } 470 471 match->ring = ring; 472 match->tctx = tctx; 473 count++; 474 } 475 } 476 477 return count; 478 } 479 480 static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr) 481 { 482 uint32_t cfg = 0; 483 484 /* 485 * Let's claim GEN1 TIMA format. If running with KVM on P10, the 486 * correct answer is deep in the hardware and not accessible to 487 * us. But it shouldn't matter as it only affects the presenter 488 * as seen by a guest OS. 489 */ 490 cfg |= XIVE_PRESENTER_GEN1_TIMA_OS; 491 492 return cfg; 493 } 494 495 static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr) 496 { 497 return SPAPR_XIVE_BLOCK_ID; 498 } 499 500 static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 501 uint8_t *pq) 502 { 503 SpaprXive *xive = SPAPR_XIVE(xrtr); 504 505 assert(SPAPR_XIVE_BLOCK_ID == blk); 506 507 *pq = xive_source_esb_get(&xive->source, idx); 508 return 0; 509 } 510 511 static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 512 uint8_t *pq) 513 { 514 SpaprXive *xive = SPAPR_XIVE(xrtr); 515 516 assert(SPAPR_XIVE_BLOCK_ID == blk); 517 518 *pq = xive_source_esb_set(&xive->source, idx, *pq); 519 return 0; 520 } 521 522 523 static const VMStateDescription vmstate_spapr_xive_end = { 524 .name = TYPE_SPAPR_XIVE "/end", 525 .version_id = 1, 526 .minimum_version_id = 1, 527 .fields = (const VMStateField []) { 528 VMSTATE_UINT32(w0, XiveEND), 529 VMSTATE_UINT32(w1, XiveEND), 530 VMSTATE_UINT32(w2, XiveEND), 531 VMSTATE_UINT32(w3, XiveEND), 532 VMSTATE_UINT32(w4, XiveEND), 533 VMSTATE_UINT32(w5, XiveEND), 534 VMSTATE_UINT32(w6, XiveEND), 535 VMSTATE_UINT32(w7, XiveEND), 536 VMSTATE_END_OF_LIST() 537 }, 538 }; 539 540 static const VMStateDescription vmstate_spapr_xive_eas = { 541 .name = TYPE_SPAPR_XIVE "/eas", 542 .version_id = 1, 543 .minimum_version_id = 1, 544 .fields = (const VMStateField []) { 545 VMSTATE_UINT64(w, XiveEAS), 546 VMSTATE_END_OF_LIST() 547 }, 548 }; 549 550 static int vmstate_spapr_xive_pre_save(void *opaque) 551 { 552 SpaprXive *xive = SPAPR_XIVE(opaque); 553 554 if (spapr_xive_in_kernel(xive)) { 555 return kvmppc_xive_pre_save(xive); 556 } 557 558 return 0; 559 } 560 561 /* 562 * Called by the sPAPR IRQ backend 'post_load' method at the machine 563 * level. 564 */ 565 static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id) 566 { 567 SpaprXive *xive = SPAPR_XIVE(intc); 568 569 if (spapr_xive_in_kernel(xive)) { 570 return kvmppc_xive_post_load(xive, version_id); 571 } 572 573 return 0; 574 } 575 576 static const VMStateDescription vmstate_spapr_xive = { 577 .name = TYPE_SPAPR_XIVE, 578 .version_id = 1, 579 .minimum_version_id = 1, 580 .pre_save = vmstate_spapr_xive_pre_save, 581 .post_load = NULL, /* handled at the machine level */ 582 .fields = (const VMStateField[]) { 583 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL), 584 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs, 585 vmstate_spapr_xive_eas, XiveEAS), 586 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends, 587 vmstate_spapr_xive_end, XiveEND), 588 VMSTATE_END_OF_LIST() 589 }, 590 }; 591 592 static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn, 593 bool lsi, Error **errp) 594 { 595 SpaprXive *xive = SPAPR_XIVE(intc); 596 XiveSource *xsrc = &xive->source; 597 598 assert(lisn < xive->nr_irqs); 599 600 trace_spapr_xive_claim_irq(lisn, lsi); 601 602 if (xive_eas_is_valid(&xive->eat[lisn])) { 603 error_setg(errp, "IRQ %d is not free", lisn); 604 return -EBUSY; 605 } 606 607 /* 608 * Set default values when allocating an IRQ number 609 */ 610 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED); 611 if (lsi) { 612 xive_source_irq_set_lsi(xsrc, lisn); 613 } 614 615 if (spapr_xive_in_kernel(xive)) { 616 return kvmppc_xive_source_reset_one(xsrc, lisn, errp); 617 } 618 619 return 0; 620 } 621 622 static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn) 623 { 624 SpaprXive *xive = SPAPR_XIVE(intc); 625 assert(lisn < xive->nr_irqs); 626 627 trace_spapr_xive_free_irq(lisn); 628 629 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); 630 } 631 632 static Property spapr_xive_properties[] = { 633 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0), 634 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0), 635 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE), 636 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE), 637 DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7), 638 DEFINE_PROP_END_OF_LIST(), 639 }; 640 641 static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc, 642 PowerPCCPU *cpu, Error **errp) 643 { 644 SpaprXive *xive = SPAPR_XIVE(intc); 645 Object *obj; 646 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 647 648 obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp); 649 if (!obj) { 650 return -1; 651 } 652 653 spapr_cpu->tctx = XIVE_TCTX(obj); 654 return 0; 655 } 656 657 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam) 658 { 659 uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam); 660 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 661 } 662 663 static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc, 664 PowerPCCPU *cpu) 665 { 666 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; 667 uint8_t nvt_blk; 668 uint32_t nvt_idx; 669 670 xive_tctx_reset(tctx); 671 672 /* 673 * When a Virtual Processor is scheduled to run on a HW thread, 674 * the hypervisor pushes its identifier in the OS CAM line. 675 * Emulate the same behavior under QEMU. 676 */ 677 spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx); 678 679 xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx)); 680 } 681 682 static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc, 683 PowerPCCPU *cpu) 684 { 685 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 686 687 xive_tctx_destroy(spapr_cpu->tctx); 688 spapr_cpu->tctx = NULL; 689 } 690 691 static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val) 692 { 693 SpaprXive *xive = SPAPR_XIVE(intc); 694 695 trace_spapr_xive_set_irq(irq, val); 696 697 if (spapr_xive_in_kernel(xive)) { 698 kvmppc_xive_source_set_irq(&xive->source, irq, val); 699 } else { 700 xive_source_set_irq(&xive->source, irq, val); 701 } 702 } 703 704 static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon) 705 { 706 SpaprXive *xive = SPAPR_XIVE(intc); 707 CPUState *cs; 708 g_autoptr(GString) buf = g_string_new(""); 709 g_autoptr(HumanReadableText) info = NULL; 710 711 CPU_FOREACH(cs) { 712 PowerPCCPU *cpu = POWERPC_CPU(cs); 713 714 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, buf); 715 } 716 spapr_xive_pic_print_info(xive, buf); 717 718 info = human_readable_text_from_str(buf); 719 monitor_puts(mon, info->human_readable_text); 720 } 721 722 static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers, 723 void *fdt, uint32_t phandle) 724 { 725 SpaprXive *xive = SPAPR_XIVE(intc); 726 int node; 727 uint64_t timas[2 * 2]; 728 /* Interrupt number ranges for the IPIs */ 729 uint32_t lisn_ranges[] = { 730 cpu_to_be32(SPAPR_IRQ_IPI), 731 cpu_to_be32(SPAPR_IRQ_IPI + nr_servers), 732 }; 733 /* 734 * EQ size - the sizes of pages supported by the system 4K, 64K, 735 * 2M, 16M. We only advertise 64K for the moment. 736 */ 737 uint32_t eq_sizes[] = { 738 cpu_to_be32(16), /* 64K */ 739 }; 740 /* 741 * QEMU/KVM only needs to define a single range to reserve the 742 * escalation priority. A priority bitmask would have been more 743 * appropriate. 744 */ 745 uint32_t plat_res_int_priorities[] = { 746 cpu_to_be32(xive->hv_prio), /* start */ 747 cpu_to_be32(0xff - xive->hv_prio), /* count */ 748 }; 749 750 /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */ 751 timas[0] = cpu_to_be64(xive->tm_base + 752 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT)); 753 timas[1] = cpu_to_be64(1ull << TM_SHIFT); 754 timas[2] = cpu_to_be64(xive->tm_base + 755 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT)); 756 timas[3] = cpu_to_be64(1ull << TM_SHIFT); 757 758 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename)); 759 760 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe")); 761 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas))); 762 763 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe")); 764 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes, 765 sizeof(eq_sizes))); 766 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges, 767 sizeof(lisn_ranges))); 768 769 /* For Linux to link the LSIs to the interrupt controller. */ 770 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); 771 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); 772 773 /* For SLOF */ 774 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); 775 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); 776 777 /* 778 * The "ibm,plat-res-int-priorities" property defines the priority 779 * ranges reserved by the hypervisor 780 */ 781 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities", 782 plat_res_int_priorities, sizeof(plat_res_int_priorities))); 783 } 784 785 static int spapr_xive_activate(SpaprInterruptController *intc, 786 uint32_t nr_servers, Error **errp) 787 { 788 SpaprXive *xive = SPAPR_XIVE(intc); 789 790 if (kvm_enabled()) { 791 int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers, 792 errp); 793 if (rc < 0) { 794 return rc; 795 } 796 } 797 798 /* Activate the XIVE MMIOs */ 799 spapr_xive_mmio_set_enabled(xive, true); 800 801 return 0; 802 } 803 804 static void spapr_xive_deactivate(SpaprInterruptController *intc) 805 { 806 SpaprXive *xive = SPAPR_XIVE(intc); 807 808 spapr_xive_mmio_set_enabled(xive, false); 809 810 if (spapr_xive_in_kernel(xive)) { 811 kvmppc_xive_disconnect(intc); 812 } 813 } 814 815 static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr) 816 { 817 return spapr_xive_in_kernel(SPAPR_XIVE(xptr)); 818 } 819 820 static void spapr_xive_class_init(ObjectClass *klass, void *data) 821 { 822 DeviceClass *dc = DEVICE_CLASS(klass); 823 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 824 SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass); 825 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 826 SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass); 827 828 dc->desc = "sPAPR XIVE Interrupt Controller"; 829 device_class_set_props(dc, spapr_xive_properties); 830 device_class_set_parent_realize(dc, spapr_xive_realize, 831 &sxc->parent_realize); 832 dc->vmsd = &vmstate_spapr_xive; 833 834 xrc->get_eas = spapr_xive_get_eas; 835 xrc->get_pq = spapr_xive_get_pq; 836 xrc->set_pq = spapr_xive_set_pq; 837 xrc->get_end = spapr_xive_get_end; 838 xrc->write_end = spapr_xive_write_end; 839 xrc->get_nvt = spapr_xive_get_nvt; 840 xrc->write_nvt = spapr_xive_write_nvt; 841 xrc->get_block_id = spapr_xive_get_block_id; 842 843 sicc->activate = spapr_xive_activate; 844 sicc->deactivate = spapr_xive_deactivate; 845 sicc->cpu_intc_create = spapr_xive_cpu_intc_create; 846 sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset; 847 sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy; 848 sicc->claim_irq = spapr_xive_claim_irq; 849 sicc->free_irq = spapr_xive_free_irq; 850 sicc->set_irq = spapr_xive_set_irq; 851 sicc->print_info = spapr_xive_print_info; 852 sicc->dt = spapr_xive_dt; 853 sicc->post_load = spapr_xive_post_load; 854 855 xpc->match_nvt = spapr_xive_match_nvt; 856 xpc->get_config = spapr_xive_presenter_get_config; 857 xpc->in_kernel = spapr_xive_in_kernel_xptr; 858 } 859 860 static const TypeInfo spapr_xive_info = { 861 .name = TYPE_SPAPR_XIVE, 862 .parent = TYPE_XIVE_ROUTER, 863 .instance_init = spapr_xive_instance_init, 864 .instance_size = sizeof(SpaprXive), 865 .class_init = spapr_xive_class_init, 866 .class_size = sizeof(SpaprXiveClass), 867 .interfaces = (InterfaceInfo[]) { 868 { TYPE_SPAPR_INTC }, 869 { } 870 }, 871 }; 872 873 static void spapr_xive_register_types(void) 874 { 875 type_register_static(&spapr_xive_info); 876 } 877 878 type_init(spapr_xive_register_types) 879 880 /* 881 * XIVE hcalls 882 * 883 * The terminology used by the XIVE hcalls is the following : 884 * 885 * TARGET vCPU number 886 * EQ Event Queue assigned by OS to receive event data 887 * ESB page for source interrupt management 888 * LISN Logical Interrupt Source Number identifying a source in the 889 * machine 890 * EISN Effective Interrupt Source Number used by guest OS to 891 * identify source in the guest 892 * 893 * The EAS, END, NVT structures are not exposed. 894 */ 895 896 /* 897 * On POWER9, the KVM XIVE device uses priority 7 for the escalation 898 * interrupts. So we only allow the guest to use priorities [0..6]. 899 */ 900 static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority) 901 { 902 return priority >= xive->hv_prio; 903 } 904 905 /* 906 * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical 907 * real address of the MMIO page through which the Event State Buffer 908 * entry associated with the value of the "lisn" parameter is managed. 909 * 910 * Parameters: 911 * Input 912 * - R4: "flags" 913 * Bits 0-63 reserved 914 * - R5: "lisn" is per "interrupts", "interrupt-map", or 915 * "ibm,xive-lisn-ranges" properties, or as returned by the 916 * ibm,query-interrupt-source-number RTAS call, or as returned 917 * by the H_ALLOCATE_VAS_WINDOW hcall 918 * 919 * Output 920 * - R4: "flags" 921 * Bits 0-59: Reserved 922 * Bit 60: H_INT_ESB must be used for Event State Buffer 923 * management 924 * Bit 61: 1 == LSI 0 == MSI 925 * Bit 62: the full function page supports trigger 926 * Bit 63: Store EOI Supported 927 * - R5: Logical Real address of full function Event State Buffer 928 * management page, -1 if H_INT_ESB hcall flag is set to 1. 929 * - R6: Logical Real Address of trigger only Event State Buffer 930 * management page or -1. 931 * - R7: Power of 2 page size for the ESB management pages returned in 932 * R5 and R6. 933 */ 934 935 #define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */ 936 #define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */ 937 #define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management 938 on same page */ 939 #define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */ 940 941 static target_ulong h_int_get_source_info(PowerPCCPU *cpu, 942 SpaprMachineState *spapr, 943 target_ulong opcode, 944 target_ulong *args) 945 { 946 SpaprXive *xive = spapr->xive; 947 XiveSource *xsrc = &xive->source; 948 target_ulong flags = args[0]; 949 target_ulong lisn = args[1]; 950 951 trace_spapr_xive_get_source_info(flags, lisn); 952 953 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 954 return H_FUNCTION; 955 } 956 957 if (flags) { 958 return H_PARAMETER; 959 } 960 961 if (lisn >= xive->nr_irqs) { 962 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 963 lisn); 964 return H_P2; 965 } 966 967 if (!xive_eas_is_valid(&xive->eat[lisn])) { 968 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 969 lisn); 970 return H_P2; 971 } 972 973 /* 974 * All sources are emulated under the main XIVE object and share 975 * the same characteristics. 976 */ 977 args[0] = 0; 978 if (!xive_source_esb_has_2page(xsrc)) { 979 args[0] |= SPAPR_XIVE_SRC_TRIGGER; 980 } 981 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) { 982 args[0] |= SPAPR_XIVE_SRC_STORE_EOI; 983 } 984 985 /* 986 * Force the use of the H_INT_ESB hcall in case of an LSI 987 * interrupt. This is necessary under KVM to re-trigger the 988 * interrupt if the level is still asserted 989 */ 990 if (xive_source_irq_is_lsi(xsrc, lisn)) { 991 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI; 992 } 993 994 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 995 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn); 996 } else { 997 args[1] = -1; 998 } 999 1000 if (xive_source_esb_has_2page(xsrc) && 1001 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 1002 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn); 1003 } else { 1004 args[2] = -1; 1005 } 1006 1007 if (xive_source_esb_has_2page(xsrc)) { 1008 args[3] = xsrc->esb_shift - 1; 1009 } else { 1010 args[3] = xsrc->esb_shift; 1011 } 1012 1013 return H_SUCCESS; 1014 } 1015 1016 /* 1017 * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical 1018 * Interrupt Source to a target. The Logical Interrupt Source is 1019 * designated with the "lisn" parameter and the target is designated 1020 * with the "target" and "priority" parameters. Upon return from the 1021 * hcall(), no additional interrupts will be directed to the old EQ. 1022 * 1023 * Parameters: 1024 * Input: 1025 * - R4: "flags" 1026 * Bits 0-61: Reserved 1027 * Bit 62: set the "eisn" in the EAS 1028 * Bit 63: masks the interrupt source in the hardware interrupt 1029 * control structure. An interrupt masked by this mechanism will 1030 * be dropped, but it's source state bits will still be 1031 * set. There is no race-free way of unmasking and restoring the 1032 * source. Thus this should only be used in interrupts that are 1033 * also masked at the source, and only in cases where the 1034 * interrupt is not meant to be used for a large amount of time 1035 * because no valid target exists for it for example 1036 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1037 * "ibm,xive-lisn-ranges" properties, or as returned by the 1038 * ibm,query-interrupt-source-number RTAS call, or as returned by 1039 * the H_ALLOCATE_VAS_WINDOW hcall 1040 * - R6: "target" is per "ibm,ppc-interrupt-server#s" or 1041 * "ibm,ppc-interrupt-gserver#s" 1042 * - R7: "priority" is a valid priority not in 1043 * "ibm,plat-res-int-priorities" 1044 * - R8: "eisn" is the guest EISN associated with the "lisn" 1045 * 1046 * Output: 1047 * - None 1048 */ 1049 1050 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62) 1051 #define SPAPR_XIVE_SRC_MASK PPC_BIT(63) 1052 1053 static target_ulong h_int_set_source_config(PowerPCCPU *cpu, 1054 SpaprMachineState *spapr, 1055 target_ulong opcode, 1056 target_ulong *args) 1057 { 1058 SpaprXive *xive = spapr->xive; 1059 XiveEAS eas, new_eas; 1060 target_ulong flags = args[0]; 1061 target_ulong lisn = args[1]; 1062 target_ulong target = args[2]; 1063 target_ulong priority = args[3]; 1064 target_ulong eisn = args[4]; 1065 uint8_t end_blk; 1066 uint32_t end_idx; 1067 1068 trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn); 1069 1070 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1071 return H_FUNCTION; 1072 } 1073 1074 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) { 1075 return H_PARAMETER; 1076 } 1077 1078 if (lisn >= xive->nr_irqs) { 1079 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1080 lisn); 1081 return H_P2; 1082 } 1083 1084 eas = xive->eat[lisn]; 1085 if (!xive_eas_is_valid(&eas)) { 1086 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1087 lisn); 1088 return H_P2; 1089 } 1090 1091 /* priority 0xff is used to reset the EAS */ 1092 if (priority == 0xff) { 1093 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED); 1094 goto out; 1095 } 1096 1097 if (flags & SPAPR_XIVE_SRC_MASK) { 1098 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED); 1099 } else { 1100 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED); 1101 } 1102 1103 if (spapr_xive_priority_is_reserved(xive, priority)) { 1104 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1105 " is reserved\n", priority); 1106 return H_P4; 1107 } 1108 1109 /* 1110 * Validate that "target" is part of the list of threads allocated 1111 * to the partition. For that, find the END corresponding to the 1112 * target. 1113 */ 1114 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1115 return H_P3; 1116 } 1117 1118 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk); 1119 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx); 1120 1121 if (flags & SPAPR_XIVE_SRC_SET_EISN) { 1122 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); 1123 } 1124 1125 if (spapr_xive_in_kernel(xive)) { 1126 Error *local_err = NULL; 1127 1128 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err); 1129 if (local_err) { 1130 error_report_err(local_err); 1131 return H_HARDWARE; 1132 } 1133 } 1134 1135 out: 1136 xive->eat[lisn] = new_eas; 1137 return H_SUCCESS; 1138 } 1139 1140 /* 1141 * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which 1142 * target/priority pair is assigned to the specified Logical Interrupt 1143 * Source. 1144 * 1145 * Parameters: 1146 * Input: 1147 * - R4: "flags" 1148 * Bits 0-63 Reserved 1149 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1150 * "ibm,xive-lisn-ranges" properties, or as returned by the 1151 * ibm,query-interrupt-source-number RTAS call, or as 1152 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1153 * 1154 * Output: 1155 * - R4: Target to which the specified Logical Interrupt Source is 1156 * assigned 1157 * - R5: Priority to which the specified Logical Interrupt Source is 1158 * assigned 1159 * - R6: EISN for the specified Logical Interrupt Source (this will be 1160 * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG) 1161 */ 1162 static target_ulong h_int_get_source_config(PowerPCCPU *cpu, 1163 SpaprMachineState *spapr, 1164 target_ulong opcode, 1165 target_ulong *args) 1166 { 1167 SpaprXive *xive = spapr->xive; 1168 target_ulong flags = args[0]; 1169 target_ulong lisn = args[1]; 1170 XiveEAS eas; 1171 XiveEND *end; 1172 uint8_t nvt_blk; 1173 uint32_t end_idx, nvt_idx; 1174 1175 trace_spapr_xive_get_source_config(flags, lisn); 1176 1177 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1178 return H_FUNCTION; 1179 } 1180 1181 if (flags) { 1182 return H_PARAMETER; 1183 } 1184 1185 if (lisn >= xive->nr_irqs) { 1186 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1187 lisn); 1188 return H_P2; 1189 } 1190 1191 eas = xive->eat[lisn]; 1192 if (!xive_eas_is_valid(&eas)) { 1193 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1194 lisn); 1195 return H_P2; 1196 } 1197 1198 /* EAS_END_BLOCK is unused on sPAPR */ 1199 end_idx = xive_get_field64(EAS_END_INDEX, eas.w); 1200 1201 assert(end_idx < xive->nr_ends); 1202 end = &xive->endt[end_idx]; 1203 1204 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); 1205 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); 1206 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 1207 1208 if (xive_eas_is_masked(&eas)) { 1209 args[1] = 0xff; 1210 } else { 1211 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 1212 } 1213 1214 args[2] = xive_get_field64(EAS_END_DATA, eas.w); 1215 1216 return H_SUCCESS; 1217 } 1218 1219 /* 1220 * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real 1221 * address of the notification management page associated with the 1222 * specified target and priority. 1223 * 1224 * Parameters: 1225 * Input: 1226 * - R4: "flags" 1227 * Bits 0-63 Reserved 1228 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1229 * "ibm,ppc-interrupt-gserver#s" 1230 * - R6: "priority" is a valid priority not in 1231 * "ibm,plat-res-int-priorities" 1232 * 1233 * Output: 1234 * - R4: Logical real address of notification page 1235 * - R5: Power of 2 page size of the notification page 1236 */ 1237 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu, 1238 SpaprMachineState *spapr, 1239 target_ulong opcode, 1240 target_ulong *args) 1241 { 1242 SpaprXive *xive = spapr->xive; 1243 XiveENDSource *end_xsrc = &xive->end_source; 1244 target_ulong flags = args[0]; 1245 target_ulong target = args[1]; 1246 target_ulong priority = args[2]; 1247 XiveEND *end; 1248 uint8_t end_blk; 1249 uint32_t end_idx; 1250 1251 trace_spapr_xive_get_queue_info(flags, target, priority); 1252 1253 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1254 return H_FUNCTION; 1255 } 1256 1257 if (flags) { 1258 return H_PARAMETER; 1259 } 1260 1261 /* 1262 * H_STATE should be returned if a H_INT_RESET is in progress. 1263 * This is not needed when running the emulation under QEMU 1264 */ 1265 1266 if (spapr_xive_priority_is_reserved(xive, priority)) { 1267 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1268 " is reserved\n", priority); 1269 return H_P3; 1270 } 1271 1272 /* 1273 * Validate that "target" is part of the list of threads allocated 1274 * to the partition. For that, find the END corresponding to the 1275 * target. 1276 */ 1277 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1278 return H_P2; 1279 } 1280 1281 assert(end_idx < xive->nr_ends); 1282 end = &xive->endt[end_idx]; 1283 1284 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx; 1285 if (xive_end_is_enqueue(end)) { 1286 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 1287 } else { 1288 args[1] = 0; 1289 } 1290 1291 return H_SUCCESS; 1292 } 1293 1294 /* 1295 * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for 1296 * a given "target" and "priority". It is also used to set the 1297 * notification config associated with the EQ. An EQ size of 0 is 1298 * used to reset the EQ config for a given target and priority. If 1299 * resetting the EQ config, the END associated with the given "target" 1300 * and "priority" will be changed to disable queueing. 1301 * 1302 * Upon return from the hcall(), no additional interrupts will be 1303 * directed to the old EQ (if one was set). The old EQ (if one was 1304 * set) should be investigated for interrupts that occurred prior to 1305 * or during the hcall(). 1306 * 1307 * Parameters: 1308 * Input: 1309 * - R4: "flags" 1310 * Bits 0-62: Reserved 1311 * Bit 63: Unconditional Notify (n) per the XIVE spec 1312 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1313 * "ibm,ppc-interrupt-gserver#s" 1314 * - R6: "priority" is a valid priority not in 1315 * "ibm,plat-res-int-priorities" 1316 * - R7: "eventQueue": The logical real address of the start of the EQ 1317 * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes" 1318 * 1319 * Output: 1320 * - None 1321 */ 1322 1323 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63) 1324 1325 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, 1326 SpaprMachineState *spapr, 1327 target_ulong opcode, 1328 target_ulong *args) 1329 { 1330 SpaprXive *xive = spapr->xive; 1331 target_ulong flags = args[0]; 1332 target_ulong target = args[1]; 1333 target_ulong priority = args[2]; 1334 target_ulong qpage = args[3]; 1335 target_ulong qsize = args[4]; 1336 XiveEND end; 1337 uint8_t end_blk, nvt_blk; 1338 uint32_t end_idx, nvt_idx; 1339 1340 trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize); 1341 1342 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1343 return H_FUNCTION; 1344 } 1345 1346 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) { 1347 return H_PARAMETER; 1348 } 1349 1350 /* 1351 * H_STATE should be returned if a H_INT_RESET is in progress. 1352 * This is not needed when running the emulation under QEMU 1353 */ 1354 1355 if (spapr_xive_priority_is_reserved(xive, priority)) { 1356 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1357 " is reserved\n", priority); 1358 return H_P3; 1359 } 1360 1361 /* 1362 * Validate that "target" is part of the list of threads allocated 1363 * to the partition. For that, find the END corresponding to the 1364 * target. 1365 */ 1366 1367 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1368 return H_P2; 1369 } 1370 1371 assert(end_idx < xive->nr_ends); 1372 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND)); 1373 1374 switch (qsize) { 1375 case 12: 1376 case 16: 1377 case 21: 1378 case 24: 1379 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) { 1380 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx 1381 " is not naturally aligned with %" HWADDR_PRIx "\n", 1382 qpage, (hwaddr)1 << qsize); 1383 return H_P4; 1384 } 1385 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); 1386 end.w3 = cpu_to_be32(qpage & 0xffffffff); 1387 end.w0 |= cpu_to_be32(END_W0_ENQUEUE); 1388 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12); 1389 break; 1390 case 0: 1391 /* reset queue and disable queueing */ 1392 spapr_xive_end_reset(&end); 1393 goto out; 1394 1395 default: 1396 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n", 1397 qsize); 1398 return H_P5; 1399 } 1400 1401 if (qsize) { 1402 hwaddr plen = 1 << qsize; 1403 void *eq; 1404 1405 /* 1406 * Validate the guest EQ. We should also check that the queue 1407 * has been zeroed by the OS. 1408 */ 1409 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true, 1410 MEMTXATTRS_UNSPECIFIED); 1411 if (plen != 1 << qsize) { 1412 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%" 1413 HWADDR_PRIx "\n", qpage); 1414 return H_P4; 1415 } 1416 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen); 1417 } 1418 1419 /* "target" should have been validated above */ 1420 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) { 1421 g_assert_not_reached(); 1422 } 1423 1424 /* 1425 * Ensure the priority and target are correctly set (they will not 1426 * be right after allocation) 1427 */ 1428 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) | 1429 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx); 1430 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority); 1431 1432 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) { 1433 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY); 1434 } else { 1435 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY); 1436 } 1437 1438 /* 1439 * The generation bit for the END starts at 1 and The END page 1440 * offset counter starts at 0. 1441 */ 1442 end.w1 = cpu_to_be32(END_W1_GENERATION) | 1443 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul); 1444 end.w0 |= cpu_to_be32(END_W0_VALID); 1445 1446 /* 1447 * TODO: issue syncs required to ensure all in-flight interrupts 1448 * are complete on the old END 1449 */ 1450 1451 out: 1452 if (spapr_xive_in_kernel(xive)) { 1453 Error *local_err = NULL; 1454 1455 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err); 1456 if (local_err) { 1457 error_report_err(local_err); 1458 return H_HARDWARE; 1459 } 1460 } 1461 1462 /* Update END */ 1463 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); 1464 return H_SUCCESS; 1465 } 1466 1467 /* 1468 * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given 1469 * target and priority. 1470 * 1471 * Parameters: 1472 * Input: 1473 * - R4: "flags" 1474 * Bits 0-62: Reserved 1475 * Bit 63: Debug: Return debug data 1476 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1477 * "ibm,ppc-interrupt-gserver#s" 1478 * - R6: "priority" is a valid priority not in 1479 * "ibm,plat-res-int-priorities" 1480 * 1481 * Output: 1482 * - R4: "flags": 1483 * Bits 0-61: Reserved 1484 * Bit 62: The value of Event Queue Generation Number (g) per 1485 * the XIVE spec if "Debug" = 1 1486 * Bit 63: The value of Unconditional Notify (n) per the XIVE spec 1487 * - R5: The logical real address of the start of the EQ 1488 * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes" 1489 * - R7: The value of Event Queue Offset Counter per XIVE spec 1490 * if "Debug" = 1, else 0 1491 * 1492 */ 1493 1494 #define SPAPR_XIVE_END_DEBUG PPC_BIT(63) 1495 1496 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, 1497 SpaprMachineState *spapr, 1498 target_ulong opcode, 1499 target_ulong *args) 1500 { 1501 SpaprXive *xive = spapr->xive; 1502 target_ulong flags = args[0]; 1503 target_ulong target = args[1]; 1504 target_ulong priority = args[2]; 1505 XiveEND *end; 1506 uint8_t end_blk; 1507 uint32_t end_idx; 1508 1509 trace_spapr_xive_get_queue_config(flags, target, priority); 1510 1511 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1512 return H_FUNCTION; 1513 } 1514 1515 if (flags & ~SPAPR_XIVE_END_DEBUG) { 1516 return H_PARAMETER; 1517 } 1518 1519 /* 1520 * H_STATE should be returned if a H_INT_RESET is in progress. 1521 * This is not needed when running the emulation under QEMU 1522 */ 1523 1524 if (spapr_xive_priority_is_reserved(xive, priority)) { 1525 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1526 " is reserved\n", priority); 1527 return H_P3; 1528 } 1529 1530 /* 1531 * Validate that "target" is part of the list of threads allocated 1532 * to the partition. For that, find the END corresponding to the 1533 * target. 1534 */ 1535 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1536 return H_P2; 1537 } 1538 1539 assert(end_idx < xive->nr_ends); 1540 end = &xive->endt[end_idx]; 1541 1542 args[0] = 0; 1543 if (xive_end_is_notify(end)) { 1544 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY; 1545 } 1546 1547 if (xive_end_is_enqueue(end)) { 1548 args[1] = xive_end_qaddr(end); 1549 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 1550 } else { 1551 args[1] = 0; 1552 args[2] = 0; 1553 } 1554 1555 if (spapr_xive_in_kernel(xive)) { 1556 Error *local_err = NULL; 1557 1558 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err); 1559 if (local_err) { 1560 error_report_err(local_err); 1561 return H_HARDWARE; 1562 } 1563 } 1564 1565 /* TODO: do we need any locking on the END ? */ 1566 if (flags & SPAPR_XIVE_END_DEBUG) { 1567 /* Load the event queue generation number into the return flags */ 1568 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62; 1569 1570 /* Load R7 with the event queue offset counter */ 1571 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1572 } else { 1573 args[3] = 0; 1574 } 1575 1576 return H_SUCCESS; 1577 } 1578 1579 /* 1580 * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the 1581 * reporting cache line pair for the calling thread. The reporting 1582 * cache lines will contain the OS interrupt context when the OS 1583 * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS 1584 * interrupt. The reporting cache lines can be reset by inputting -1 1585 * in "reportingLine". Issuing the CI store byte without reporting 1586 * cache lines registered will result in the data not being accessible 1587 * to the OS. 1588 * 1589 * Parameters: 1590 * Input: 1591 * - R4: "flags" 1592 * Bits 0-63: Reserved 1593 * - R5: "reportingLine": The logical real address of the reporting cache 1594 * line pair 1595 * 1596 * Output: 1597 * - None 1598 */ 1599 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu, 1600 SpaprMachineState *spapr, 1601 target_ulong opcode, 1602 target_ulong *args) 1603 { 1604 target_ulong flags = args[0]; 1605 1606 trace_spapr_xive_set_os_reporting_line(flags); 1607 1608 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1609 return H_FUNCTION; 1610 } 1611 1612 /* 1613 * H_STATE should be returned if a H_INT_RESET is in progress. 1614 * This is not needed when running the emulation under QEMU 1615 */ 1616 1617 /* TODO: H_INT_SET_OS_REPORTING_LINE */ 1618 return H_FUNCTION; 1619 } 1620 1621 /* 1622 * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical 1623 * real address of the reporting cache line pair set for the input 1624 * "target". If no reporting cache line pair has been set, -1 is 1625 * returned. 1626 * 1627 * Parameters: 1628 * Input: 1629 * - R4: "flags" 1630 * Bits 0-63: Reserved 1631 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1632 * "ibm,ppc-interrupt-gserver#s" 1633 * - R6: "reportingLine": The logical real address of the reporting 1634 * cache line pair 1635 * 1636 * Output: 1637 * - R4: The logical real address of the reporting line if set, else -1 1638 */ 1639 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu, 1640 SpaprMachineState *spapr, 1641 target_ulong opcode, 1642 target_ulong *args) 1643 { 1644 target_ulong flags = args[0]; 1645 1646 trace_spapr_xive_get_os_reporting_line(flags); 1647 1648 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1649 return H_FUNCTION; 1650 } 1651 1652 /* 1653 * H_STATE should be returned if a H_INT_RESET is in progress. 1654 * This is not needed when running the emulation under QEMU 1655 */ 1656 1657 /* TODO: H_INT_GET_OS_REPORTING_LINE */ 1658 return H_FUNCTION; 1659 } 1660 1661 /* 1662 * The H_INT_ESB hcall() is used to issue a load or store to the ESB 1663 * page for the input "lisn". This hcall is only supported for LISNs 1664 * that have the ESB hcall flag set to 1 when returned from hcall() 1665 * H_INT_GET_SOURCE_INFO. 1666 * 1667 * Parameters: 1668 * Input: 1669 * - R4: "flags" 1670 * Bits 0-62: Reserved 1671 * bit 63: Store: Store=1, store operation, else load operation 1672 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1673 * "ibm,xive-lisn-ranges" properties, or as returned by the 1674 * ibm,query-interrupt-source-number RTAS call, or as 1675 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1676 * - R6: "esbOffset" is the offset into the ESB page for the load or 1677 * store operation 1678 * - R7: "storeData" is the data to write for a store operation 1679 * 1680 * Output: 1681 * - R4: The value of the load if load operation, else -1 1682 */ 1683 1684 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63) 1685 1686 static target_ulong h_int_esb(PowerPCCPU *cpu, 1687 SpaprMachineState *spapr, 1688 target_ulong opcode, 1689 target_ulong *args) 1690 { 1691 SpaprXive *xive = spapr->xive; 1692 XiveEAS eas; 1693 target_ulong flags = args[0]; 1694 target_ulong lisn = args[1]; 1695 target_ulong offset = args[2]; 1696 target_ulong data = args[3]; 1697 hwaddr mmio_addr; 1698 XiveSource *xsrc = &xive->source; 1699 1700 trace_spapr_xive_esb(flags, lisn, offset, data); 1701 1702 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1703 return H_FUNCTION; 1704 } 1705 1706 if (flags & ~SPAPR_XIVE_ESB_STORE) { 1707 return H_PARAMETER; 1708 } 1709 1710 if (lisn >= xive->nr_irqs) { 1711 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1712 lisn); 1713 return H_P2; 1714 } 1715 1716 eas = xive->eat[lisn]; 1717 if (!xive_eas_is_valid(&eas)) { 1718 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1719 lisn); 1720 return H_P2; 1721 } 1722 1723 if (offset > (1ull << xsrc->esb_shift)) { 1724 return H_P3; 1725 } 1726 1727 if (spapr_xive_in_kernel(xive)) { 1728 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data, 1729 flags & SPAPR_XIVE_ESB_STORE); 1730 } else { 1731 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; 1732 1733 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, 1734 (flags & SPAPR_XIVE_ESB_STORE), 1735 MEMTXATTRS_UNSPECIFIED)) { 1736 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" 1737 HWADDR_PRIx "\n", mmio_addr); 1738 return H_HARDWARE; 1739 } 1740 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; 1741 } 1742 return H_SUCCESS; 1743 } 1744 1745 /* 1746 * The H_INT_SYNC hcall() is used to issue hardware syncs that will 1747 * ensure any in flight events for the input lisn are in the event 1748 * queue. 1749 * 1750 * Parameters: 1751 * Input: 1752 * - R4: "flags" 1753 * Bits 0-63: Reserved 1754 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1755 * "ibm,xive-lisn-ranges" properties, or as returned by the 1756 * ibm,query-interrupt-source-number RTAS call, or as 1757 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1758 * 1759 * Output: 1760 * - None 1761 */ 1762 static target_ulong h_int_sync(PowerPCCPU *cpu, 1763 SpaprMachineState *spapr, 1764 target_ulong opcode, 1765 target_ulong *args) 1766 { 1767 SpaprXive *xive = spapr->xive; 1768 XiveEAS eas; 1769 target_ulong flags = args[0]; 1770 target_ulong lisn = args[1]; 1771 1772 trace_spapr_xive_sync(flags, lisn); 1773 1774 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1775 return H_FUNCTION; 1776 } 1777 1778 if (flags) { 1779 return H_PARAMETER; 1780 } 1781 1782 if (lisn >= xive->nr_irqs) { 1783 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1784 lisn); 1785 return H_P2; 1786 } 1787 1788 eas = xive->eat[lisn]; 1789 if (!xive_eas_is_valid(&eas)) { 1790 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1791 lisn); 1792 return H_P2; 1793 } 1794 1795 /* 1796 * H_STATE should be returned if a H_INT_RESET is in progress. 1797 * This is not needed when running the emulation under QEMU 1798 */ 1799 1800 /* 1801 * This is not real hardware. Nothing to be done unless when 1802 * under KVM 1803 */ 1804 1805 if (spapr_xive_in_kernel(xive)) { 1806 Error *local_err = NULL; 1807 1808 kvmppc_xive_sync_source(xive, lisn, &local_err); 1809 if (local_err) { 1810 error_report_err(local_err); 1811 return H_HARDWARE; 1812 } 1813 } 1814 return H_SUCCESS; 1815 } 1816 1817 /* 1818 * The H_INT_RESET hcall() is used to reset all of the partition's 1819 * interrupt exploitation structures to their initial state. This 1820 * means losing all previously set interrupt state set via 1821 * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. 1822 * 1823 * Parameters: 1824 * Input: 1825 * - R4: "flags" 1826 * Bits 0-63: Reserved 1827 * 1828 * Output: 1829 * - None 1830 */ 1831 static target_ulong h_int_reset(PowerPCCPU *cpu, 1832 SpaprMachineState *spapr, 1833 target_ulong opcode, 1834 target_ulong *args) 1835 { 1836 SpaprXive *xive = spapr->xive; 1837 target_ulong flags = args[0]; 1838 1839 trace_spapr_xive_reset(flags); 1840 1841 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1842 return H_FUNCTION; 1843 } 1844 1845 if (flags) { 1846 return H_PARAMETER; 1847 } 1848 1849 device_cold_reset(DEVICE(xive)); 1850 1851 if (spapr_xive_in_kernel(xive)) { 1852 Error *local_err = NULL; 1853 1854 kvmppc_xive_reset(xive, &local_err); 1855 if (local_err) { 1856 error_report_err(local_err); 1857 return H_HARDWARE; 1858 } 1859 } 1860 return H_SUCCESS; 1861 } 1862 1863 void spapr_xive_hcall_init(SpaprMachineState *spapr) 1864 { 1865 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info); 1866 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config); 1867 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config); 1868 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info); 1869 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config); 1870 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config); 1871 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE, 1872 h_int_set_os_reporting_line); 1873 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE, 1874 h_int_get_os_reporting_line); 1875 spapr_register_hypercall(H_INT_ESB, h_int_esb); 1876 spapr_register_hypercall(H_INT_SYNC, h_int_sync); 1877 spapr_register_hypercall(H_INT_RESET, h_int_reset); 1878 } 1879