1 /* 2 * QEMU PowerPC sPAPR XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "qemu/error-report.h" 15 #include "target/ppc/cpu.h" 16 #include "sysemu/cpus.h" 17 #include "sysemu/reset.h" 18 #include "migration/vmstate.h" 19 #include "monitor/monitor.h" 20 #include "hw/ppc/fdt.h" 21 #include "hw/ppc/spapr.h" 22 #include "hw/ppc/spapr_cpu_core.h" 23 #include "hw/ppc/spapr_xive.h" 24 #include "hw/ppc/xive.h" 25 #include "hw/ppc/xive_regs.h" 26 #include "hw/qdev-properties.h" 27 28 /* 29 * XIVE Virtualization Controller BAR and Thread Managment BAR that we 30 * use for the ESB pages and the TIMA pages 31 */ 32 #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull 33 #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull 34 35 /* 36 * The allocation of VP blocks is a complex operation in OPAL and the 37 * VP identifiers have a relation with the number of HW chips, the 38 * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE 39 * controller model does not have the same constraints and can use a 40 * simple mapping scheme of the CPU vcpu_id 41 * 42 * These identifiers are never returned to the OS. 43 */ 44 45 #define SPAPR_XIVE_NVT_BASE 0x400 46 47 /* 48 * sPAPR NVT and END indexing helpers 49 */ 50 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) 51 { 52 return nvt_idx - SPAPR_XIVE_NVT_BASE; 53 } 54 55 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu, 56 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 57 { 58 assert(cpu); 59 60 if (out_nvt_blk) { 61 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID; 62 } 63 64 if (out_nvt_blk) { 65 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id; 66 } 67 } 68 69 static int spapr_xive_target_to_nvt(uint32_t target, 70 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 71 { 72 PowerPCCPU *cpu = spapr_find_cpu(target); 73 74 if (!cpu) { 75 return -1; 76 } 77 78 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx); 79 return 0; 80 } 81 82 /* 83 * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 84 * priorities per CPU 85 */ 86 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx, 87 uint32_t *out_server, uint8_t *out_prio) 88 { 89 90 assert(end_blk == SPAPR_XIVE_BLOCK_ID); 91 92 if (out_server) { 93 *out_server = end_idx >> 3; 94 } 95 96 if (out_prio) { 97 *out_prio = end_idx & 0x7; 98 } 99 return 0; 100 } 101 102 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, 103 uint8_t *out_end_blk, uint32_t *out_end_idx) 104 { 105 assert(cpu); 106 107 if (out_end_blk) { 108 *out_end_blk = SPAPR_XIVE_BLOCK_ID; 109 } 110 111 if (out_end_idx) { 112 *out_end_idx = (cpu->vcpu_id << 3) + prio; 113 } 114 } 115 116 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, 117 uint8_t *out_end_blk, uint32_t *out_end_idx) 118 { 119 PowerPCCPU *cpu = spapr_find_cpu(target); 120 121 if (!cpu) { 122 return -1; 123 } 124 125 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx); 126 return 0; 127 } 128 129 /* 130 * On sPAPR machines, use a simplified output for the XIVE END 131 * structure dumping only the information related to the OS EQ. 132 */ 133 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end, 134 Monitor *mon) 135 { 136 uint64_t qaddr_base = xive_end_qaddr(end); 137 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 138 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 139 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 140 uint32_t qentries = 1 << (qsize + 10); 141 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); 142 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 143 144 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d", 145 spapr_xive_nvt_to_target(0, nvt), 146 priority, qindex, qentries, qaddr_base, qgen); 147 148 xive_end_queue_pic_print_info(end, 6, mon); 149 } 150 151 void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) 152 { 153 XiveSource *xsrc = &xive->source; 154 int i; 155 156 if (kvm_irqchip_in_kernel()) { 157 Error *local_err = NULL; 158 159 kvmppc_xive_synchronize_state(xive, &local_err); 160 if (local_err) { 161 error_report_err(local_err); 162 return; 163 } 164 } 165 166 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n"); 167 168 for (i = 0; i < xive->nr_irqs; i++) { 169 uint8_t pq = xive_source_esb_get(xsrc, i); 170 XiveEAS *eas = &xive->eat[i]; 171 172 if (!xive_eas_is_valid(eas)) { 173 continue; 174 } 175 176 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i, 177 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 178 pq & XIVE_ESB_VAL_P ? 'P' : '-', 179 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 180 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ', 181 xive_eas_is_masked(eas) ? "M" : " ", 182 (int) xive_get_field64(EAS_END_DATA, eas->w)); 183 184 if (!xive_eas_is_masked(eas)) { 185 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 186 XiveEND *end; 187 188 assert(end_idx < xive->nr_ends); 189 end = &xive->endt[end_idx]; 190 191 if (xive_end_is_valid(end)) { 192 spapr_xive_end_pic_print_info(xive, end, mon); 193 } 194 } 195 monitor_printf(mon, "\n"); 196 } 197 } 198 199 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable) 200 { 201 memory_region_set_enabled(&xive->source.esb_mmio, enable); 202 memory_region_set_enabled(&xive->tm_mmio, enable); 203 204 /* Disable the END ESBs until a guest OS makes use of them */ 205 memory_region_set_enabled(&xive->end_source.esb_mmio, false); 206 } 207 208 static void spapr_xive_tm_write(void *opaque, hwaddr offset, 209 uint64_t value, unsigned size) 210 { 211 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; 212 213 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 214 } 215 216 static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size) 217 { 218 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; 219 220 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 221 } 222 223 const MemoryRegionOps spapr_xive_tm_ops = { 224 .read = spapr_xive_tm_read, 225 .write = spapr_xive_tm_write, 226 .endianness = DEVICE_BIG_ENDIAN, 227 .valid = { 228 .min_access_size = 1, 229 .max_access_size = 8, 230 }, 231 .impl = { 232 .min_access_size = 1, 233 .max_access_size = 8, 234 }, 235 }; 236 237 static void spapr_xive_end_reset(XiveEND *end) 238 { 239 memset(end, 0, sizeof(*end)); 240 241 /* switch off the escalation and notification ESBs */ 242 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q); 243 } 244 245 static void spapr_xive_reset(void *dev) 246 { 247 SpaprXive *xive = SPAPR_XIVE(dev); 248 int i; 249 250 /* 251 * The XiveSource has its own reset handler, which mask off all 252 * IRQs (!P|Q) 253 */ 254 255 /* Mask all valid EASs in the IRQ number space. */ 256 for (i = 0; i < xive->nr_irqs; i++) { 257 XiveEAS *eas = &xive->eat[i]; 258 if (xive_eas_is_valid(eas)) { 259 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED); 260 } else { 261 eas->w = 0; 262 } 263 } 264 265 /* Clear all ENDs */ 266 for (i = 0; i < xive->nr_ends; i++) { 267 spapr_xive_end_reset(&xive->endt[i]); 268 } 269 } 270 271 static void spapr_xive_instance_init(Object *obj) 272 { 273 SpaprXive *xive = SPAPR_XIVE(obj); 274 275 object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE); 276 277 object_initialize_child(obj, "end_source", &xive->end_source, 278 TYPE_XIVE_END_SOURCE); 279 280 /* Not connected to the KVM XIVE device */ 281 xive->fd = -1; 282 } 283 284 static void spapr_xive_realize(DeviceState *dev, Error **errp) 285 { 286 SpaprXive *xive = SPAPR_XIVE(dev); 287 SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive); 288 XiveSource *xsrc = &xive->source; 289 XiveENDSource *end_xsrc = &xive->end_source; 290 Error *local_err = NULL; 291 292 sxc->parent_realize(dev, &local_err); 293 if (local_err) { 294 error_propagate(errp, local_err); 295 return; 296 } 297 298 if (!xive->nr_irqs) { 299 error_setg(errp, "Number of interrupt needs to be greater 0"); 300 return; 301 } 302 303 if (!xive->nr_ends) { 304 error_setg(errp, "Number of interrupt needs to be greater 0"); 305 return; 306 } 307 308 /* 309 * Initialize the internal sources, for IPIs and virtual devices. 310 */ 311 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs", 312 &error_fatal); 313 object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive", 314 &error_abort); 315 if (!qdev_realize(DEVICE(xsrc), NULL, &local_err)) { 316 error_propagate(errp, local_err); 317 return; 318 } 319 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); 320 321 /* 322 * Initialize the END ESB source 323 */ 324 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends", 325 &error_fatal); 326 object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive", 327 &error_abort); 328 if (!qdev_realize(DEVICE(end_xsrc), NULL, &local_err)) { 329 error_propagate(errp, local_err); 330 return; 331 } 332 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); 333 334 /* Set the mapping address of the END ESB pages after the source ESBs */ 335 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs; 336 337 /* 338 * Allocate the routing tables 339 */ 340 xive->eat = g_new0(XiveEAS, xive->nr_irqs); 341 xive->endt = g_new0(XiveEND, xive->nr_ends); 342 343 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64, 344 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); 345 346 qemu_register_reset(spapr_xive_reset, dev); 347 348 /* TIMA initialization */ 349 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops, 350 xive, "xive.tima", 4ull << TM_SHIFT); 351 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); 352 353 /* 354 * Map all regions. These will be enabled or disabled at reset and 355 * can also be overridden by KVM memory regions if active 356 */ 357 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); 358 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); 359 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); 360 } 361 362 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, 363 uint32_t eas_idx, XiveEAS *eas) 364 { 365 SpaprXive *xive = SPAPR_XIVE(xrtr); 366 367 if (eas_idx >= xive->nr_irqs) { 368 return -1; 369 } 370 371 *eas = xive->eat[eas_idx]; 372 return 0; 373 } 374 375 static int spapr_xive_get_end(XiveRouter *xrtr, 376 uint8_t end_blk, uint32_t end_idx, XiveEND *end) 377 { 378 SpaprXive *xive = SPAPR_XIVE(xrtr); 379 380 if (end_idx >= xive->nr_ends) { 381 return -1; 382 } 383 384 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND)); 385 return 0; 386 } 387 388 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, 389 uint32_t end_idx, XiveEND *end, 390 uint8_t word_number) 391 { 392 SpaprXive *xive = SPAPR_XIVE(xrtr); 393 394 if (end_idx >= xive->nr_ends) { 395 return -1; 396 } 397 398 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND)); 399 return 0; 400 } 401 402 static int spapr_xive_get_nvt(XiveRouter *xrtr, 403 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt) 404 { 405 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 406 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 407 408 if (!cpu) { 409 /* TODO: should we assert() if we can find a NVT ? */ 410 return -1; 411 } 412 413 /* 414 * sPAPR does not maintain a NVT table. Return that the NVT is 415 * valid if we have found a matching CPU 416 */ 417 nvt->w0 = cpu_to_be32(NVT_W0_VALID); 418 return 0; 419 } 420 421 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, 422 uint32_t nvt_idx, XiveNVT *nvt, 423 uint8_t word_number) 424 { 425 /* 426 * We don't need to write back to the NVTs because the sPAPR 427 * machine should never hit a non-scheduled NVT. It should never 428 * get called. 429 */ 430 g_assert_not_reached(); 431 } 432 433 static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, 434 uint8_t nvt_blk, uint32_t nvt_idx, 435 bool cam_ignore, uint8_t priority, 436 uint32_t logic_serv, XiveTCTXMatch *match) 437 { 438 CPUState *cs; 439 int count = 0; 440 441 CPU_FOREACH(cs) { 442 PowerPCCPU *cpu = POWERPC_CPU(cs); 443 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; 444 int ring; 445 446 /* 447 * Skip partially initialized vCPUs. This can happen when 448 * vCPUs are hotplugged. 449 */ 450 if (!tctx) { 451 continue; 452 } 453 454 /* 455 * Check the thread context CAM lines and record matches. 456 */ 457 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx, 458 cam_ignore, logic_serv); 459 /* 460 * Save the matching thread interrupt context and follow on to 461 * check for duplicates which are invalid. 462 */ 463 if (ring != -1) { 464 if (match->tctx) { 465 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " 466 "context NVT %x/%x\n", nvt_blk, nvt_idx); 467 return -1; 468 } 469 470 match->ring = ring; 471 match->tctx = tctx; 472 count++; 473 } 474 } 475 476 return count; 477 } 478 479 static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr) 480 { 481 return SPAPR_XIVE_BLOCK_ID; 482 } 483 484 static const VMStateDescription vmstate_spapr_xive_end = { 485 .name = TYPE_SPAPR_XIVE "/end", 486 .version_id = 1, 487 .minimum_version_id = 1, 488 .fields = (VMStateField []) { 489 VMSTATE_UINT32(w0, XiveEND), 490 VMSTATE_UINT32(w1, XiveEND), 491 VMSTATE_UINT32(w2, XiveEND), 492 VMSTATE_UINT32(w3, XiveEND), 493 VMSTATE_UINT32(w4, XiveEND), 494 VMSTATE_UINT32(w5, XiveEND), 495 VMSTATE_UINT32(w6, XiveEND), 496 VMSTATE_UINT32(w7, XiveEND), 497 VMSTATE_END_OF_LIST() 498 }, 499 }; 500 501 static const VMStateDescription vmstate_spapr_xive_eas = { 502 .name = TYPE_SPAPR_XIVE "/eas", 503 .version_id = 1, 504 .minimum_version_id = 1, 505 .fields = (VMStateField []) { 506 VMSTATE_UINT64(w, XiveEAS), 507 VMSTATE_END_OF_LIST() 508 }, 509 }; 510 511 static int vmstate_spapr_xive_pre_save(void *opaque) 512 { 513 if (kvm_irqchip_in_kernel()) { 514 return kvmppc_xive_pre_save(SPAPR_XIVE(opaque)); 515 } 516 517 return 0; 518 } 519 520 /* 521 * Called by the sPAPR IRQ backend 'post_load' method at the machine 522 * level. 523 */ 524 static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id) 525 { 526 if (kvm_irqchip_in_kernel()) { 527 return kvmppc_xive_post_load(SPAPR_XIVE(intc), version_id); 528 } 529 530 return 0; 531 } 532 533 static const VMStateDescription vmstate_spapr_xive = { 534 .name = TYPE_SPAPR_XIVE, 535 .version_id = 1, 536 .minimum_version_id = 1, 537 .pre_save = vmstate_spapr_xive_pre_save, 538 .post_load = NULL, /* handled at the machine level */ 539 .fields = (VMStateField[]) { 540 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL), 541 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs, 542 vmstate_spapr_xive_eas, XiveEAS), 543 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends, 544 vmstate_spapr_xive_end, XiveEND), 545 VMSTATE_END_OF_LIST() 546 }, 547 }; 548 549 static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn, 550 bool lsi, Error **errp) 551 { 552 SpaprXive *xive = SPAPR_XIVE(intc); 553 XiveSource *xsrc = &xive->source; 554 555 assert(lisn < xive->nr_irqs); 556 557 if (xive_eas_is_valid(&xive->eat[lisn])) { 558 error_setg(errp, "IRQ %d is not free", lisn); 559 return -EBUSY; 560 } 561 562 /* 563 * Set default values when allocating an IRQ number 564 */ 565 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED); 566 if (lsi) { 567 xive_source_irq_set_lsi(xsrc, lisn); 568 } 569 570 if (kvm_irqchip_in_kernel()) { 571 return kvmppc_xive_source_reset_one(xsrc, lisn, errp); 572 } 573 574 return 0; 575 } 576 577 static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn) 578 { 579 SpaprXive *xive = SPAPR_XIVE(intc); 580 assert(lisn < xive->nr_irqs); 581 582 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); 583 } 584 585 static Property spapr_xive_properties[] = { 586 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0), 587 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0), 588 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE), 589 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE), 590 DEFINE_PROP_END_OF_LIST(), 591 }; 592 593 static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc, 594 PowerPCCPU *cpu, Error **errp) 595 { 596 SpaprXive *xive = SPAPR_XIVE(intc); 597 Object *obj; 598 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 599 600 obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp); 601 if (!obj) { 602 return -1; 603 } 604 605 spapr_cpu->tctx = XIVE_TCTX(obj); 606 return 0; 607 } 608 609 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam) 610 { 611 uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam); 612 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 613 } 614 615 static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc, 616 PowerPCCPU *cpu) 617 { 618 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; 619 uint8_t nvt_blk; 620 uint32_t nvt_idx; 621 622 xive_tctx_reset(tctx); 623 624 /* 625 * When a Virtual Processor is scheduled to run on a HW thread, 626 * the hypervisor pushes its identifier in the OS CAM line. 627 * Emulate the same behavior under QEMU. 628 */ 629 spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx); 630 631 xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx)); 632 } 633 634 static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc, 635 PowerPCCPU *cpu) 636 { 637 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 638 639 xive_tctx_destroy(spapr_cpu->tctx); 640 spapr_cpu->tctx = NULL; 641 } 642 643 static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val) 644 { 645 SpaprXive *xive = SPAPR_XIVE(intc); 646 647 if (kvm_irqchip_in_kernel()) { 648 kvmppc_xive_source_set_irq(&xive->source, irq, val); 649 } else { 650 xive_source_set_irq(&xive->source, irq, val); 651 } 652 } 653 654 static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon) 655 { 656 SpaprXive *xive = SPAPR_XIVE(intc); 657 CPUState *cs; 658 659 CPU_FOREACH(cs) { 660 PowerPCCPU *cpu = POWERPC_CPU(cs); 661 662 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon); 663 } 664 665 spapr_xive_pic_print_info(xive, mon); 666 } 667 668 static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers, 669 void *fdt, uint32_t phandle) 670 { 671 SpaprXive *xive = SPAPR_XIVE(intc); 672 int node; 673 uint64_t timas[2 * 2]; 674 /* Interrupt number ranges for the IPIs */ 675 uint32_t lisn_ranges[] = { 676 cpu_to_be32(SPAPR_IRQ_IPI), 677 cpu_to_be32(SPAPR_IRQ_IPI + nr_servers), 678 }; 679 /* 680 * EQ size - the sizes of pages supported by the system 4K, 64K, 681 * 2M, 16M. We only advertise 64K for the moment. 682 */ 683 uint32_t eq_sizes[] = { 684 cpu_to_be32(16), /* 64K */ 685 }; 686 /* 687 * The following array is in sync with the reserved priorities 688 * defined by the 'spapr_xive_priority_is_reserved' routine. 689 */ 690 uint32_t plat_res_int_priorities[] = { 691 cpu_to_be32(7), /* start */ 692 cpu_to_be32(0xf8), /* count */ 693 }; 694 695 /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */ 696 timas[0] = cpu_to_be64(xive->tm_base + 697 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT)); 698 timas[1] = cpu_to_be64(1ull << TM_SHIFT); 699 timas[2] = cpu_to_be64(xive->tm_base + 700 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT)); 701 timas[3] = cpu_to_be64(1ull << TM_SHIFT); 702 703 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename)); 704 705 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe")); 706 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas))); 707 708 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe")); 709 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes, 710 sizeof(eq_sizes))); 711 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges, 712 sizeof(lisn_ranges))); 713 714 /* For Linux to link the LSIs to the interrupt controller. */ 715 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); 716 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); 717 718 /* For SLOF */ 719 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); 720 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); 721 722 /* 723 * The "ibm,plat-res-int-priorities" property defines the priority 724 * ranges reserved by the hypervisor 725 */ 726 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities", 727 plat_res_int_priorities, sizeof(plat_res_int_priorities))); 728 } 729 730 static int spapr_xive_activate(SpaprInterruptController *intc, 731 uint32_t nr_servers, Error **errp) 732 { 733 SpaprXive *xive = SPAPR_XIVE(intc); 734 735 if (kvm_enabled()) { 736 int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers, 737 errp); 738 if (rc < 0) { 739 return rc; 740 } 741 } 742 743 /* Activate the XIVE MMIOs */ 744 spapr_xive_mmio_set_enabled(xive, true); 745 746 return 0; 747 } 748 749 static void spapr_xive_deactivate(SpaprInterruptController *intc) 750 { 751 SpaprXive *xive = SPAPR_XIVE(intc); 752 753 spapr_xive_mmio_set_enabled(xive, false); 754 755 if (kvm_irqchip_in_kernel()) { 756 kvmppc_xive_disconnect(intc); 757 } 758 } 759 760 static void spapr_xive_class_init(ObjectClass *klass, void *data) 761 { 762 DeviceClass *dc = DEVICE_CLASS(klass); 763 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 764 SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass); 765 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 766 SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass); 767 768 dc->desc = "sPAPR XIVE Interrupt Controller"; 769 device_class_set_props(dc, spapr_xive_properties); 770 device_class_set_parent_realize(dc, spapr_xive_realize, 771 &sxc->parent_realize); 772 dc->vmsd = &vmstate_spapr_xive; 773 774 xrc->get_eas = spapr_xive_get_eas; 775 xrc->get_end = spapr_xive_get_end; 776 xrc->write_end = spapr_xive_write_end; 777 xrc->get_nvt = spapr_xive_get_nvt; 778 xrc->write_nvt = spapr_xive_write_nvt; 779 xrc->get_block_id = spapr_xive_get_block_id; 780 781 sicc->activate = spapr_xive_activate; 782 sicc->deactivate = spapr_xive_deactivate; 783 sicc->cpu_intc_create = spapr_xive_cpu_intc_create; 784 sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset; 785 sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy; 786 sicc->claim_irq = spapr_xive_claim_irq; 787 sicc->free_irq = spapr_xive_free_irq; 788 sicc->set_irq = spapr_xive_set_irq; 789 sicc->print_info = spapr_xive_print_info; 790 sicc->dt = spapr_xive_dt; 791 sicc->post_load = spapr_xive_post_load; 792 793 xpc->match_nvt = spapr_xive_match_nvt; 794 } 795 796 static const TypeInfo spapr_xive_info = { 797 .name = TYPE_SPAPR_XIVE, 798 .parent = TYPE_XIVE_ROUTER, 799 .instance_init = spapr_xive_instance_init, 800 .instance_size = sizeof(SpaprXive), 801 .class_init = spapr_xive_class_init, 802 .class_size = sizeof(SpaprXiveClass), 803 .interfaces = (InterfaceInfo[]) { 804 { TYPE_SPAPR_INTC }, 805 { } 806 }, 807 }; 808 809 static void spapr_xive_register_types(void) 810 { 811 type_register_static(&spapr_xive_info); 812 } 813 814 type_init(spapr_xive_register_types) 815 816 /* 817 * XIVE hcalls 818 * 819 * The terminology used by the XIVE hcalls is the following : 820 * 821 * TARGET vCPU number 822 * EQ Event Queue assigned by OS to receive event data 823 * ESB page for source interrupt management 824 * LISN Logical Interrupt Source Number identifying a source in the 825 * machine 826 * EISN Effective Interrupt Source Number used by guest OS to 827 * identify source in the guest 828 * 829 * The EAS, END, NVT structures are not exposed. 830 */ 831 832 /* 833 * Linux hosts under OPAL reserve priority 7 for their own escalation 834 * interrupts (DD2.X POWER9). So we only allow the guest to use 835 * priorities [0..6]. 836 */ 837 static bool spapr_xive_priority_is_reserved(uint8_t priority) 838 { 839 switch (priority) { 840 case 0 ... 6: 841 return false; 842 case 7: /* OPAL escalation queue */ 843 default: 844 return true; 845 } 846 } 847 848 /* 849 * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical 850 * real address of the MMIO page through which the Event State Buffer 851 * entry associated with the value of the "lisn" parameter is managed. 852 * 853 * Parameters: 854 * Input 855 * - R4: "flags" 856 * Bits 0-63 reserved 857 * - R5: "lisn" is per "interrupts", "interrupt-map", or 858 * "ibm,xive-lisn-ranges" properties, or as returned by the 859 * ibm,query-interrupt-source-number RTAS call, or as returned 860 * by the H_ALLOCATE_VAS_WINDOW hcall 861 * 862 * Output 863 * - R4: "flags" 864 * Bits 0-59: Reserved 865 * Bit 60: H_INT_ESB must be used for Event State Buffer 866 * management 867 * Bit 61: 1 == LSI 0 == MSI 868 * Bit 62: the full function page supports trigger 869 * Bit 63: Store EOI Supported 870 * - R5: Logical Real address of full function Event State Buffer 871 * management page, -1 if H_INT_ESB hcall flag is set to 1. 872 * - R6: Logical Real Address of trigger only Event State Buffer 873 * management page or -1. 874 * - R7: Power of 2 page size for the ESB management pages returned in 875 * R5 and R6. 876 */ 877 878 #define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */ 879 #define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */ 880 #define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management 881 on same page */ 882 #define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */ 883 884 static target_ulong h_int_get_source_info(PowerPCCPU *cpu, 885 SpaprMachineState *spapr, 886 target_ulong opcode, 887 target_ulong *args) 888 { 889 SpaprXive *xive = spapr->xive; 890 XiveSource *xsrc = &xive->source; 891 target_ulong flags = args[0]; 892 target_ulong lisn = args[1]; 893 894 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 895 return H_FUNCTION; 896 } 897 898 if (flags) { 899 return H_PARAMETER; 900 } 901 902 if (lisn >= xive->nr_irqs) { 903 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 904 lisn); 905 return H_P2; 906 } 907 908 if (!xive_eas_is_valid(&xive->eat[lisn])) { 909 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 910 lisn); 911 return H_P2; 912 } 913 914 /* 915 * All sources are emulated under the main XIVE object and share 916 * the same characteristics. 917 */ 918 args[0] = 0; 919 if (!xive_source_esb_has_2page(xsrc)) { 920 args[0] |= SPAPR_XIVE_SRC_TRIGGER; 921 } 922 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) { 923 args[0] |= SPAPR_XIVE_SRC_STORE_EOI; 924 } 925 926 /* 927 * Force the use of the H_INT_ESB hcall in case of an LSI 928 * interrupt. This is necessary under KVM to re-trigger the 929 * interrupt if the level is still asserted 930 */ 931 if (xive_source_irq_is_lsi(xsrc, lisn)) { 932 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI; 933 } 934 935 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 936 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn); 937 } else { 938 args[1] = -1; 939 } 940 941 if (xive_source_esb_has_2page(xsrc) && 942 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 943 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn); 944 } else { 945 args[2] = -1; 946 } 947 948 if (xive_source_esb_has_2page(xsrc)) { 949 args[3] = xsrc->esb_shift - 1; 950 } else { 951 args[3] = xsrc->esb_shift; 952 } 953 954 return H_SUCCESS; 955 } 956 957 /* 958 * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical 959 * Interrupt Source to a target. The Logical Interrupt Source is 960 * designated with the "lisn" parameter and the target is designated 961 * with the "target" and "priority" parameters. Upon return from the 962 * hcall(), no additional interrupts will be directed to the old EQ. 963 * 964 * Parameters: 965 * Input: 966 * - R4: "flags" 967 * Bits 0-61: Reserved 968 * Bit 62: set the "eisn" in the EAS 969 * Bit 63: masks the interrupt source in the hardware interrupt 970 * control structure. An interrupt masked by this mechanism will 971 * be dropped, but it's source state bits will still be 972 * set. There is no race-free way of unmasking and restoring the 973 * source. Thus this should only be used in interrupts that are 974 * also masked at the source, and only in cases where the 975 * interrupt is not meant to be used for a large amount of time 976 * because no valid target exists for it for example 977 * - R5: "lisn" is per "interrupts", "interrupt-map", or 978 * "ibm,xive-lisn-ranges" properties, or as returned by the 979 * ibm,query-interrupt-source-number RTAS call, or as returned by 980 * the H_ALLOCATE_VAS_WINDOW hcall 981 * - R6: "target" is per "ibm,ppc-interrupt-server#s" or 982 * "ibm,ppc-interrupt-gserver#s" 983 * - R7: "priority" is a valid priority not in 984 * "ibm,plat-res-int-priorities" 985 * - R8: "eisn" is the guest EISN associated with the "lisn" 986 * 987 * Output: 988 * - None 989 */ 990 991 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62) 992 #define SPAPR_XIVE_SRC_MASK PPC_BIT(63) 993 994 static target_ulong h_int_set_source_config(PowerPCCPU *cpu, 995 SpaprMachineState *spapr, 996 target_ulong opcode, 997 target_ulong *args) 998 { 999 SpaprXive *xive = spapr->xive; 1000 XiveEAS eas, new_eas; 1001 target_ulong flags = args[0]; 1002 target_ulong lisn = args[1]; 1003 target_ulong target = args[2]; 1004 target_ulong priority = args[3]; 1005 target_ulong eisn = args[4]; 1006 uint8_t end_blk; 1007 uint32_t end_idx; 1008 1009 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1010 return H_FUNCTION; 1011 } 1012 1013 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) { 1014 return H_PARAMETER; 1015 } 1016 1017 if (lisn >= xive->nr_irqs) { 1018 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1019 lisn); 1020 return H_P2; 1021 } 1022 1023 eas = xive->eat[lisn]; 1024 if (!xive_eas_is_valid(&eas)) { 1025 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1026 lisn); 1027 return H_P2; 1028 } 1029 1030 /* priority 0xff is used to reset the EAS */ 1031 if (priority == 0xff) { 1032 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED); 1033 goto out; 1034 } 1035 1036 if (flags & SPAPR_XIVE_SRC_MASK) { 1037 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED); 1038 } else { 1039 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED); 1040 } 1041 1042 if (spapr_xive_priority_is_reserved(priority)) { 1043 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1044 " is reserved\n", priority); 1045 return H_P4; 1046 } 1047 1048 /* 1049 * Validate that "target" is part of the list of threads allocated 1050 * to the partition. For that, find the END corresponding to the 1051 * target. 1052 */ 1053 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1054 return H_P3; 1055 } 1056 1057 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk); 1058 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx); 1059 1060 if (flags & SPAPR_XIVE_SRC_SET_EISN) { 1061 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); 1062 } 1063 1064 if (kvm_irqchip_in_kernel()) { 1065 Error *local_err = NULL; 1066 1067 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err); 1068 if (local_err) { 1069 error_report_err(local_err); 1070 return H_HARDWARE; 1071 } 1072 } 1073 1074 out: 1075 xive->eat[lisn] = new_eas; 1076 return H_SUCCESS; 1077 } 1078 1079 /* 1080 * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which 1081 * target/priority pair is assigned to the specified Logical Interrupt 1082 * Source. 1083 * 1084 * Parameters: 1085 * Input: 1086 * - R4: "flags" 1087 * Bits 0-63 Reserved 1088 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1089 * "ibm,xive-lisn-ranges" properties, or as returned by the 1090 * ibm,query-interrupt-source-number RTAS call, or as 1091 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1092 * 1093 * Output: 1094 * - R4: Target to which the specified Logical Interrupt Source is 1095 * assigned 1096 * - R5: Priority to which the specified Logical Interrupt Source is 1097 * assigned 1098 * - R6: EISN for the specified Logical Interrupt Source (this will be 1099 * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG) 1100 */ 1101 static target_ulong h_int_get_source_config(PowerPCCPU *cpu, 1102 SpaprMachineState *spapr, 1103 target_ulong opcode, 1104 target_ulong *args) 1105 { 1106 SpaprXive *xive = spapr->xive; 1107 target_ulong flags = args[0]; 1108 target_ulong lisn = args[1]; 1109 XiveEAS eas; 1110 XiveEND *end; 1111 uint8_t nvt_blk; 1112 uint32_t end_idx, nvt_idx; 1113 1114 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1115 return H_FUNCTION; 1116 } 1117 1118 if (flags) { 1119 return H_PARAMETER; 1120 } 1121 1122 if (lisn >= xive->nr_irqs) { 1123 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1124 lisn); 1125 return H_P2; 1126 } 1127 1128 eas = xive->eat[lisn]; 1129 if (!xive_eas_is_valid(&eas)) { 1130 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1131 lisn); 1132 return H_P2; 1133 } 1134 1135 /* EAS_END_BLOCK is unused on sPAPR */ 1136 end_idx = xive_get_field64(EAS_END_INDEX, eas.w); 1137 1138 assert(end_idx < xive->nr_ends); 1139 end = &xive->endt[end_idx]; 1140 1141 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); 1142 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); 1143 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 1144 1145 if (xive_eas_is_masked(&eas)) { 1146 args[1] = 0xff; 1147 } else { 1148 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 1149 } 1150 1151 args[2] = xive_get_field64(EAS_END_DATA, eas.w); 1152 1153 return H_SUCCESS; 1154 } 1155 1156 /* 1157 * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real 1158 * address of the notification management page associated with the 1159 * specified target and priority. 1160 * 1161 * Parameters: 1162 * Input: 1163 * - R4: "flags" 1164 * Bits 0-63 Reserved 1165 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1166 * "ibm,ppc-interrupt-gserver#s" 1167 * - R6: "priority" is a valid priority not in 1168 * "ibm,plat-res-int-priorities" 1169 * 1170 * Output: 1171 * - R4: Logical real address of notification page 1172 * - R5: Power of 2 page size of the notification page 1173 */ 1174 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu, 1175 SpaprMachineState *spapr, 1176 target_ulong opcode, 1177 target_ulong *args) 1178 { 1179 SpaprXive *xive = spapr->xive; 1180 XiveENDSource *end_xsrc = &xive->end_source; 1181 target_ulong flags = args[0]; 1182 target_ulong target = args[1]; 1183 target_ulong priority = args[2]; 1184 XiveEND *end; 1185 uint8_t end_blk; 1186 uint32_t end_idx; 1187 1188 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1189 return H_FUNCTION; 1190 } 1191 1192 if (flags) { 1193 return H_PARAMETER; 1194 } 1195 1196 /* 1197 * H_STATE should be returned if a H_INT_RESET is in progress. 1198 * This is not needed when running the emulation under QEMU 1199 */ 1200 1201 if (spapr_xive_priority_is_reserved(priority)) { 1202 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1203 " is reserved\n", priority); 1204 return H_P3; 1205 } 1206 1207 /* 1208 * Validate that "target" is part of the list of threads allocated 1209 * to the partition. For that, find the END corresponding to the 1210 * target. 1211 */ 1212 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1213 return H_P2; 1214 } 1215 1216 assert(end_idx < xive->nr_ends); 1217 end = &xive->endt[end_idx]; 1218 1219 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx; 1220 if (xive_end_is_enqueue(end)) { 1221 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 1222 } else { 1223 args[1] = 0; 1224 } 1225 1226 return H_SUCCESS; 1227 } 1228 1229 /* 1230 * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for 1231 * a given "target" and "priority". It is also used to set the 1232 * notification config associated with the EQ. An EQ size of 0 is 1233 * used to reset the EQ config for a given target and priority. If 1234 * resetting the EQ config, the END associated with the given "target" 1235 * and "priority" will be changed to disable queueing. 1236 * 1237 * Upon return from the hcall(), no additional interrupts will be 1238 * directed to the old EQ (if one was set). The old EQ (if one was 1239 * set) should be investigated for interrupts that occurred prior to 1240 * or during the hcall(). 1241 * 1242 * Parameters: 1243 * Input: 1244 * - R4: "flags" 1245 * Bits 0-62: Reserved 1246 * Bit 63: Unconditional Notify (n) per the XIVE spec 1247 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1248 * "ibm,ppc-interrupt-gserver#s" 1249 * - R6: "priority" is a valid priority not in 1250 * "ibm,plat-res-int-priorities" 1251 * - R7: "eventQueue": The logical real address of the start of the EQ 1252 * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes" 1253 * 1254 * Output: 1255 * - None 1256 */ 1257 1258 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63) 1259 1260 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, 1261 SpaprMachineState *spapr, 1262 target_ulong opcode, 1263 target_ulong *args) 1264 { 1265 SpaprXive *xive = spapr->xive; 1266 target_ulong flags = args[0]; 1267 target_ulong target = args[1]; 1268 target_ulong priority = args[2]; 1269 target_ulong qpage = args[3]; 1270 target_ulong qsize = args[4]; 1271 XiveEND end; 1272 uint8_t end_blk, nvt_blk; 1273 uint32_t end_idx, nvt_idx; 1274 1275 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1276 return H_FUNCTION; 1277 } 1278 1279 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) { 1280 return H_PARAMETER; 1281 } 1282 1283 /* 1284 * H_STATE should be returned if a H_INT_RESET is in progress. 1285 * This is not needed when running the emulation under QEMU 1286 */ 1287 1288 if (spapr_xive_priority_is_reserved(priority)) { 1289 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1290 " is reserved\n", priority); 1291 return H_P3; 1292 } 1293 1294 /* 1295 * Validate that "target" is part of the list of threads allocated 1296 * to the partition. For that, find the END corresponding to the 1297 * target. 1298 */ 1299 1300 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1301 return H_P2; 1302 } 1303 1304 assert(end_idx < xive->nr_ends); 1305 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND)); 1306 1307 switch (qsize) { 1308 case 12: 1309 case 16: 1310 case 21: 1311 case 24: 1312 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) { 1313 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx 1314 " is not naturally aligned with %" HWADDR_PRIx "\n", 1315 qpage, (hwaddr)1 << qsize); 1316 return H_P4; 1317 } 1318 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); 1319 end.w3 = cpu_to_be32(qpage & 0xffffffff); 1320 end.w0 |= cpu_to_be32(END_W0_ENQUEUE); 1321 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12); 1322 break; 1323 case 0: 1324 /* reset queue and disable queueing */ 1325 spapr_xive_end_reset(&end); 1326 goto out; 1327 1328 default: 1329 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n", 1330 qsize); 1331 return H_P5; 1332 } 1333 1334 if (qsize) { 1335 hwaddr plen = 1 << qsize; 1336 void *eq; 1337 1338 /* 1339 * Validate the guest EQ. We should also check that the queue 1340 * has been zeroed by the OS. 1341 */ 1342 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true, 1343 MEMTXATTRS_UNSPECIFIED); 1344 if (plen != 1 << qsize) { 1345 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%" 1346 HWADDR_PRIx "\n", qpage); 1347 return H_P4; 1348 } 1349 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen); 1350 } 1351 1352 /* "target" should have been validated above */ 1353 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) { 1354 g_assert_not_reached(); 1355 } 1356 1357 /* 1358 * Ensure the priority and target are correctly set (they will not 1359 * be right after allocation) 1360 */ 1361 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) | 1362 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx); 1363 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority); 1364 1365 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) { 1366 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY); 1367 } else { 1368 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY); 1369 } 1370 1371 /* 1372 * The generation bit for the END starts at 1 and The END page 1373 * offset counter starts at 0. 1374 */ 1375 end.w1 = cpu_to_be32(END_W1_GENERATION) | 1376 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul); 1377 end.w0 |= cpu_to_be32(END_W0_VALID); 1378 1379 /* 1380 * TODO: issue syncs required to ensure all in-flight interrupts 1381 * are complete on the old END 1382 */ 1383 1384 out: 1385 if (kvm_irqchip_in_kernel()) { 1386 Error *local_err = NULL; 1387 1388 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err); 1389 if (local_err) { 1390 error_report_err(local_err); 1391 return H_HARDWARE; 1392 } 1393 } 1394 1395 /* Update END */ 1396 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); 1397 return H_SUCCESS; 1398 } 1399 1400 /* 1401 * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given 1402 * target and priority. 1403 * 1404 * Parameters: 1405 * Input: 1406 * - R4: "flags" 1407 * Bits 0-62: Reserved 1408 * Bit 63: Debug: Return debug data 1409 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1410 * "ibm,ppc-interrupt-gserver#s" 1411 * - R6: "priority" is a valid priority not in 1412 * "ibm,plat-res-int-priorities" 1413 * 1414 * Output: 1415 * - R4: "flags": 1416 * Bits 0-61: Reserved 1417 * Bit 62: The value of Event Queue Generation Number (g) per 1418 * the XIVE spec if "Debug" = 1 1419 * Bit 63: The value of Unconditional Notify (n) per the XIVE spec 1420 * - R5: The logical real address of the start of the EQ 1421 * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes" 1422 * - R7: The value of Event Queue Offset Counter per XIVE spec 1423 * if "Debug" = 1, else 0 1424 * 1425 */ 1426 1427 #define SPAPR_XIVE_END_DEBUG PPC_BIT(63) 1428 1429 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, 1430 SpaprMachineState *spapr, 1431 target_ulong opcode, 1432 target_ulong *args) 1433 { 1434 SpaprXive *xive = spapr->xive; 1435 target_ulong flags = args[0]; 1436 target_ulong target = args[1]; 1437 target_ulong priority = args[2]; 1438 XiveEND *end; 1439 uint8_t end_blk; 1440 uint32_t end_idx; 1441 1442 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1443 return H_FUNCTION; 1444 } 1445 1446 if (flags & ~SPAPR_XIVE_END_DEBUG) { 1447 return H_PARAMETER; 1448 } 1449 1450 /* 1451 * H_STATE should be returned if a H_INT_RESET is in progress. 1452 * This is not needed when running the emulation under QEMU 1453 */ 1454 1455 if (spapr_xive_priority_is_reserved(priority)) { 1456 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1457 " is reserved\n", priority); 1458 return H_P3; 1459 } 1460 1461 /* 1462 * Validate that "target" is part of the list of threads allocated 1463 * to the partition. For that, find the END corresponding to the 1464 * target. 1465 */ 1466 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1467 return H_P2; 1468 } 1469 1470 assert(end_idx < xive->nr_ends); 1471 end = &xive->endt[end_idx]; 1472 1473 args[0] = 0; 1474 if (xive_end_is_notify(end)) { 1475 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY; 1476 } 1477 1478 if (xive_end_is_enqueue(end)) { 1479 args[1] = xive_end_qaddr(end); 1480 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 1481 } else { 1482 args[1] = 0; 1483 args[2] = 0; 1484 } 1485 1486 if (kvm_irqchip_in_kernel()) { 1487 Error *local_err = NULL; 1488 1489 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err); 1490 if (local_err) { 1491 error_report_err(local_err); 1492 return H_HARDWARE; 1493 } 1494 } 1495 1496 /* TODO: do we need any locking on the END ? */ 1497 if (flags & SPAPR_XIVE_END_DEBUG) { 1498 /* Load the event queue generation number into the return flags */ 1499 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62; 1500 1501 /* Load R7 with the event queue offset counter */ 1502 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1503 } else { 1504 args[3] = 0; 1505 } 1506 1507 return H_SUCCESS; 1508 } 1509 1510 /* 1511 * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the 1512 * reporting cache line pair for the calling thread. The reporting 1513 * cache lines will contain the OS interrupt context when the OS 1514 * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS 1515 * interrupt. The reporting cache lines can be reset by inputting -1 1516 * in "reportingLine". Issuing the CI store byte without reporting 1517 * cache lines registered will result in the data not being accessible 1518 * to the OS. 1519 * 1520 * Parameters: 1521 * Input: 1522 * - R4: "flags" 1523 * Bits 0-63: Reserved 1524 * - R5: "reportingLine": The logical real address of the reporting cache 1525 * line pair 1526 * 1527 * Output: 1528 * - None 1529 */ 1530 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu, 1531 SpaprMachineState *spapr, 1532 target_ulong opcode, 1533 target_ulong *args) 1534 { 1535 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1536 return H_FUNCTION; 1537 } 1538 1539 /* 1540 * H_STATE should be returned if a H_INT_RESET is in progress. 1541 * This is not needed when running the emulation under QEMU 1542 */ 1543 1544 /* TODO: H_INT_SET_OS_REPORTING_LINE */ 1545 return H_FUNCTION; 1546 } 1547 1548 /* 1549 * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical 1550 * real address of the reporting cache line pair set for the input 1551 * "target". If no reporting cache line pair has been set, -1 is 1552 * returned. 1553 * 1554 * Parameters: 1555 * Input: 1556 * - R4: "flags" 1557 * Bits 0-63: Reserved 1558 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1559 * "ibm,ppc-interrupt-gserver#s" 1560 * - R6: "reportingLine": The logical real address of the reporting 1561 * cache line pair 1562 * 1563 * Output: 1564 * - R4: The logical real address of the reporting line if set, else -1 1565 */ 1566 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu, 1567 SpaprMachineState *spapr, 1568 target_ulong opcode, 1569 target_ulong *args) 1570 { 1571 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1572 return H_FUNCTION; 1573 } 1574 1575 /* 1576 * H_STATE should be returned if a H_INT_RESET is in progress. 1577 * This is not needed when running the emulation under QEMU 1578 */ 1579 1580 /* TODO: H_INT_GET_OS_REPORTING_LINE */ 1581 return H_FUNCTION; 1582 } 1583 1584 /* 1585 * The H_INT_ESB hcall() is used to issue a load or store to the ESB 1586 * page for the input "lisn". This hcall is only supported for LISNs 1587 * that have the ESB hcall flag set to 1 when returned from hcall() 1588 * H_INT_GET_SOURCE_INFO. 1589 * 1590 * Parameters: 1591 * Input: 1592 * - R4: "flags" 1593 * Bits 0-62: Reserved 1594 * bit 63: Store: Store=1, store operation, else load operation 1595 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1596 * "ibm,xive-lisn-ranges" properties, or as returned by the 1597 * ibm,query-interrupt-source-number RTAS call, or as 1598 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1599 * - R6: "esbOffset" is the offset into the ESB page for the load or 1600 * store operation 1601 * - R7: "storeData" is the data to write for a store operation 1602 * 1603 * Output: 1604 * - R4: The value of the load if load operation, else -1 1605 */ 1606 1607 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63) 1608 1609 static target_ulong h_int_esb(PowerPCCPU *cpu, 1610 SpaprMachineState *spapr, 1611 target_ulong opcode, 1612 target_ulong *args) 1613 { 1614 SpaprXive *xive = spapr->xive; 1615 XiveEAS eas; 1616 target_ulong flags = args[0]; 1617 target_ulong lisn = args[1]; 1618 target_ulong offset = args[2]; 1619 target_ulong data = args[3]; 1620 hwaddr mmio_addr; 1621 XiveSource *xsrc = &xive->source; 1622 1623 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1624 return H_FUNCTION; 1625 } 1626 1627 if (flags & ~SPAPR_XIVE_ESB_STORE) { 1628 return H_PARAMETER; 1629 } 1630 1631 if (lisn >= xive->nr_irqs) { 1632 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1633 lisn); 1634 return H_P2; 1635 } 1636 1637 eas = xive->eat[lisn]; 1638 if (!xive_eas_is_valid(&eas)) { 1639 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1640 lisn); 1641 return H_P2; 1642 } 1643 1644 if (offset > (1ull << xsrc->esb_shift)) { 1645 return H_P3; 1646 } 1647 1648 if (kvm_irqchip_in_kernel()) { 1649 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data, 1650 flags & SPAPR_XIVE_ESB_STORE); 1651 } else { 1652 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; 1653 1654 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, 1655 (flags & SPAPR_XIVE_ESB_STORE))) { 1656 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" 1657 HWADDR_PRIx "\n", mmio_addr); 1658 return H_HARDWARE; 1659 } 1660 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; 1661 } 1662 return H_SUCCESS; 1663 } 1664 1665 /* 1666 * The H_INT_SYNC hcall() is used to issue hardware syncs that will 1667 * ensure any in flight events for the input lisn are in the event 1668 * queue. 1669 * 1670 * Parameters: 1671 * Input: 1672 * - R4: "flags" 1673 * Bits 0-63: Reserved 1674 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1675 * "ibm,xive-lisn-ranges" properties, or as returned by the 1676 * ibm,query-interrupt-source-number RTAS call, or as 1677 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1678 * 1679 * Output: 1680 * - None 1681 */ 1682 static target_ulong h_int_sync(PowerPCCPU *cpu, 1683 SpaprMachineState *spapr, 1684 target_ulong opcode, 1685 target_ulong *args) 1686 { 1687 SpaprXive *xive = spapr->xive; 1688 XiveEAS eas; 1689 target_ulong flags = args[0]; 1690 target_ulong lisn = args[1]; 1691 1692 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1693 return H_FUNCTION; 1694 } 1695 1696 if (flags) { 1697 return H_PARAMETER; 1698 } 1699 1700 if (lisn >= xive->nr_irqs) { 1701 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1702 lisn); 1703 return H_P2; 1704 } 1705 1706 eas = xive->eat[lisn]; 1707 if (!xive_eas_is_valid(&eas)) { 1708 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1709 lisn); 1710 return H_P2; 1711 } 1712 1713 /* 1714 * H_STATE should be returned if a H_INT_RESET is in progress. 1715 * This is not needed when running the emulation under QEMU 1716 */ 1717 1718 /* 1719 * This is not real hardware. Nothing to be done unless when 1720 * under KVM 1721 */ 1722 1723 if (kvm_irqchip_in_kernel()) { 1724 Error *local_err = NULL; 1725 1726 kvmppc_xive_sync_source(xive, lisn, &local_err); 1727 if (local_err) { 1728 error_report_err(local_err); 1729 return H_HARDWARE; 1730 } 1731 } 1732 return H_SUCCESS; 1733 } 1734 1735 /* 1736 * The H_INT_RESET hcall() is used to reset all of the partition's 1737 * interrupt exploitation structures to their initial state. This 1738 * means losing all previously set interrupt state set via 1739 * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. 1740 * 1741 * Parameters: 1742 * Input: 1743 * - R4: "flags" 1744 * Bits 0-63: Reserved 1745 * 1746 * Output: 1747 * - None 1748 */ 1749 static target_ulong h_int_reset(PowerPCCPU *cpu, 1750 SpaprMachineState *spapr, 1751 target_ulong opcode, 1752 target_ulong *args) 1753 { 1754 SpaprXive *xive = spapr->xive; 1755 target_ulong flags = args[0]; 1756 1757 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1758 return H_FUNCTION; 1759 } 1760 1761 if (flags) { 1762 return H_PARAMETER; 1763 } 1764 1765 device_legacy_reset(DEVICE(xive)); 1766 1767 if (kvm_irqchip_in_kernel()) { 1768 Error *local_err = NULL; 1769 1770 kvmppc_xive_reset(xive, &local_err); 1771 if (local_err) { 1772 error_report_err(local_err); 1773 return H_HARDWARE; 1774 } 1775 } 1776 return H_SUCCESS; 1777 } 1778 1779 void spapr_xive_hcall_init(SpaprMachineState *spapr) 1780 { 1781 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info); 1782 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config); 1783 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config); 1784 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info); 1785 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config); 1786 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config); 1787 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE, 1788 h_int_set_os_reporting_line); 1789 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE, 1790 h_int_get_os_reporting_line); 1791 spapr_register_hypercall(H_INT_ESB, h_int_esb); 1792 spapr_register_hypercall(H_INT_SYNC, h_int_sync); 1793 spapr_register_hypercall(H_INT_RESET, h_int_reset); 1794 } 1795