1 /* 2 * QEMU PowerPC sPAPR XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "qemu/error-report.h" 15 #include "target/ppc/cpu.h" 16 #include "sysemu/cpus.h" 17 #include "sysemu/reset.h" 18 #include "migration/vmstate.h" 19 #include "monitor/monitor.h" 20 #include "hw/ppc/fdt.h" 21 #include "hw/ppc/spapr.h" 22 #include "hw/ppc/spapr_cpu_core.h" 23 #include "hw/ppc/spapr_xive.h" 24 #include "hw/ppc/xive.h" 25 #include "hw/ppc/xive_regs.h" 26 #include "hw/qdev-properties.h" 27 28 /* 29 * XIVE Virtualization Controller BAR and Thread Managment BAR that we 30 * use for the ESB pages and the TIMA pages 31 */ 32 #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull 33 #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull 34 35 /* 36 * The allocation of VP blocks is a complex operation in OPAL and the 37 * VP identifiers have a relation with the number of HW chips, the 38 * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE 39 * controller model does not have the same constraints and can use a 40 * simple mapping scheme of the CPU vcpu_id 41 * 42 * These identifiers are never returned to the OS. 43 */ 44 45 #define SPAPR_XIVE_NVT_BASE 0x400 46 47 /* 48 * sPAPR NVT and END indexing helpers 49 */ 50 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) 51 { 52 return nvt_idx - SPAPR_XIVE_NVT_BASE; 53 } 54 55 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu, 56 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 57 { 58 assert(cpu); 59 60 if (out_nvt_blk) { 61 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID; 62 } 63 64 if (out_nvt_blk) { 65 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id; 66 } 67 } 68 69 static int spapr_xive_target_to_nvt(uint32_t target, 70 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 71 { 72 PowerPCCPU *cpu = spapr_find_cpu(target); 73 74 if (!cpu) { 75 return -1; 76 } 77 78 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx); 79 return 0; 80 } 81 82 /* 83 * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 84 * priorities per CPU 85 */ 86 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx, 87 uint32_t *out_server, uint8_t *out_prio) 88 { 89 90 assert(end_blk == SPAPR_XIVE_BLOCK_ID); 91 92 if (out_server) { 93 *out_server = end_idx >> 3; 94 } 95 96 if (out_prio) { 97 *out_prio = end_idx & 0x7; 98 } 99 return 0; 100 } 101 102 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, 103 uint8_t *out_end_blk, uint32_t *out_end_idx) 104 { 105 assert(cpu); 106 107 if (out_end_blk) { 108 *out_end_blk = SPAPR_XIVE_BLOCK_ID; 109 } 110 111 if (out_end_idx) { 112 *out_end_idx = (cpu->vcpu_id << 3) + prio; 113 } 114 } 115 116 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, 117 uint8_t *out_end_blk, uint32_t *out_end_idx) 118 { 119 PowerPCCPU *cpu = spapr_find_cpu(target); 120 121 if (!cpu) { 122 return -1; 123 } 124 125 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx); 126 return 0; 127 } 128 129 /* 130 * On sPAPR machines, use a simplified output for the XIVE END 131 * structure dumping only the information related to the OS EQ. 132 */ 133 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end, 134 Monitor *mon) 135 { 136 uint64_t qaddr_base = xive_end_qaddr(end); 137 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 138 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 139 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 140 uint32_t qentries = 1 << (qsize + 10); 141 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); 142 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 143 144 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d", 145 spapr_xive_nvt_to_target(0, nvt), 146 priority, qindex, qentries, qaddr_base, qgen); 147 148 xive_end_queue_pic_print_info(end, 6, mon); 149 } 150 151 void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) 152 { 153 XiveSource *xsrc = &xive->source; 154 int i; 155 156 if (kvm_irqchip_in_kernel()) { 157 Error *local_err = NULL; 158 159 kvmppc_xive_synchronize_state(xive, &local_err); 160 if (local_err) { 161 error_report_err(local_err); 162 return; 163 } 164 } 165 166 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n"); 167 168 for (i = 0; i < xive->nr_irqs; i++) { 169 uint8_t pq = xive_source_esb_get(xsrc, i); 170 XiveEAS *eas = &xive->eat[i]; 171 172 if (!xive_eas_is_valid(eas)) { 173 continue; 174 } 175 176 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i, 177 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 178 pq & XIVE_ESB_VAL_P ? 'P' : '-', 179 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 180 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ', 181 xive_eas_is_masked(eas) ? "M" : " ", 182 (int) xive_get_field64(EAS_END_DATA, eas->w)); 183 184 if (!xive_eas_is_masked(eas)) { 185 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 186 XiveEND *end; 187 188 assert(end_idx < xive->nr_ends); 189 end = &xive->endt[end_idx]; 190 191 if (xive_end_is_valid(end)) { 192 spapr_xive_end_pic_print_info(xive, end, mon); 193 } 194 } 195 monitor_printf(mon, "\n"); 196 } 197 } 198 199 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable) 200 { 201 memory_region_set_enabled(&xive->source.esb_mmio, enable); 202 memory_region_set_enabled(&xive->tm_mmio, enable); 203 204 /* Disable the END ESBs until a guest OS makes use of them */ 205 memory_region_set_enabled(&xive->end_source.esb_mmio, false); 206 } 207 208 static void spapr_xive_tm_write(void *opaque, hwaddr offset, 209 uint64_t value, unsigned size) 210 { 211 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; 212 213 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 214 } 215 216 static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size) 217 { 218 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; 219 220 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 221 } 222 223 const MemoryRegionOps spapr_xive_tm_ops = { 224 .read = spapr_xive_tm_read, 225 .write = spapr_xive_tm_write, 226 .endianness = DEVICE_BIG_ENDIAN, 227 .valid = { 228 .min_access_size = 1, 229 .max_access_size = 8, 230 }, 231 .impl = { 232 .min_access_size = 1, 233 .max_access_size = 8, 234 }, 235 }; 236 237 static void spapr_xive_end_reset(XiveEND *end) 238 { 239 memset(end, 0, sizeof(*end)); 240 241 /* switch off the escalation and notification ESBs */ 242 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q); 243 } 244 245 static void spapr_xive_reset(void *dev) 246 { 247 SpaprXive *xive = SPAPR_XIVE(dev); 248 int i; 249 250 /* 251 * The XiveSource has its own reset handler, which mask off all 252 * IRQs (!P|Q) 253 */ 254 255 /* Mask all valid EASs in the IRQ number space. */ 256 for (i = 0; i < xive->nr_irqs; i++) { 257 XiveEAS *eas = &xive->eat[i]; 258 if (xive_eas_is_valid(eas)) { 259 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED); 260 } else { 261 eas->w = 0; 262 } 263 } 264 265 /* Clear all ENDs */ 266 for (i = 0; i < xive->nr_ends; i++) { 267 spapr_xive_end_reset(&xive->endt[i]); 268 } 269 } 270 271 static void spapr_xive_instance_init(Object *obj) 272 { 273 SpaprXive *xive = SPAPR_XIVE(obj); 274 275 object_initialize_child(obj, "source", &xive->source, sizeof(xive->source), 276 TYPE_XIVE_SOURCE, &error_abort, NULL); 277 278 object_initialize_child(obj, "end_source", &xive->end_source, 279 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, 280 &error_abort, NULL); 281 282 /* Not connected to the KVM XIVE device */ 283 xive->fd = -1; 284 } 285 286 static void spapr_xive_realize(DeviceState *dev, Error **errp) 287 { 288 SpaprXive *xive = SPAPR_XIVE(dev); 289 XiveSource *xsrc = &xive->source; 290 XiveENDSource *end_xsrc = &xive->end_source; 291 Error *local_err = NULL; 292 293 if (!xive->nr_irqs) { 294 error_setg(errp, "Number of interrupt needs to be greater 0"); 295 return; 296 } 297 298 if (!xive->nr_ends) { 299 error_setg(errp, "Number of interrupt needs to be greater 0"); 300 return; 301 } 302 303 /* 304 * Initialize the internal sources, for IPIs and virtual devices. 305 */ 306 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs", 307 &error_fatal); 308 object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive", 309 &error_abort); 310 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 311 if (local_err) { 312 error_propagate(errp, local_err); 313 return; 314 } 315 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); 316 317 /* 318 * Initialize the END ESB source 319 */ 320 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends", 321 &error_fatal); 322 object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive", 323 &error_abort); 324 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); 325 if (local_err) { 326 error_propagate(errp, local_err); 327 return; 328 } 329 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); 330 331 /* Set the mapping address of the END ESB pages after the source ESBs */ 332 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs; 333 334 /* 335 * Allocate the routing tables 336 */ 337 xive->eat = g_new0(XiveEAS, xive->nr_irqs); 338 xive->endt = g_new0(XiveEND, xive->nr_ends); 339 340 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64, 341 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); 342 343 qemu_register_reset(spapr_xive_reset, dev); 344 345 /* TIMA initialization */ 346 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops, 347 xive, "xive.tima", 4ull << TM_SHIFT); 348 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); 349 350 /* 351 * Map all regions. These will be enabled or disabled at reset and 352 * can also be overridden by KVM memory regions if active 353 */ 354 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); 355 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); 356 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); 357 } 358 359 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, 360 uint32_t eas_idx, XiveEAS *eas) 361 { 362 SpaprXive *xive = SPAPR_XIVE(xrtr); 363 364 if (eas_idx >= xive->nr_irqs) { 365 return -1; 366 } 367 368 *eas = xive->eat[eas_idx]; 369 return 0; 370 } 371 372 static int spapr_xive_get_end(XiveRouter *xrtr, 373 uint8_t end_blk, uint32_t end_idx, XiveEND *end) 374 { 375 SpaprXive *xive = SPAPR_XIVE(xrtr); 376 377 if (end_idx >= xive->nr_ends) { 378 return -1; 379 } 380 381 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND)); 382 return 0; 383 } 384 385 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, 386 uint32_t end_idx, XiveEND *end, 387 uint8_t word_number) 388 { 389 SpaprXive *xive = SPAPR_XIVE(xrtr); 390 391 if (end_idx >= xive->nr_ends) { 392 return -1; 393 } 394 395 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND)); 396 return 0; 397 } 398 399 static int spapr_xive_get_nvt(XiveRouter *xrtr, 400 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt) 401 { 402 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 403 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 404 405 if (!cpu) { 406 /* TODO: should we assert() if we can find a NVT ? */ 407 return -1; 408 } 409 410 /* 411 * sPAPR does not maintain a NVT table. Return that the NVT is 412 * valid if we have found a matching CPU 413 */ 414 nvt->w0 = cpu_to_be32(NVT_W0_VALID); 415 return 0; 416 } 417 418 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, 419 uint32_t nvt_idx, XiveNVT *nvt, 420 uint8_t word_number) 421 { 422 /* 423 * We don't need to write back to the NVTs because the sPAPR 424 * machine should never hit a non-scheduled NVT. It should never 425 * get called. 426 */ 427 g_assert_not_reached(); 428 } 429 430 static XiveTCTX *spapr_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) 431 { 432 PowerPCCPU *cpu = POWERPC_CPU(cs); 433 434 return spapr_cpu_state(cpu)->tctx; 435 } 436 437 static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, 438 uint8_t nvt_blk, uint32_t nvt_idx, 439 bool cam_ignore, uint8_t priority, 440 uint32_t logic_serv, XiveTCTXMatch *match) 441 { 442 CPUState *cs; 443 int count = 0; 444 445 CPU_FOREACH(cs) { 446 PowerPCCPU *cpu = POWERPC_CPU(cs); 447 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; 448 int ring; 449 450 /* 451 * Skip partially initialized vCPUs. This can happen when 452 * vCPUs are hotplugged. 453 */ 454 if (!tctx) { 455 continue; 456 } 457 458 /* 459 * Check the thread context CAM lines and record matches. 460 */ 461 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx, 462 cam_ignore, logic_serv); 463 /* 464 * Save the matching thread interrupt context and follow on to 465 * check for duplicates which are invalid. 466 */ 467 if (ring != -1) { 468 if (match->tctx) { 469 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " 470 "context NVT %x/%x\n", nvt_blk, nvt_idx); 471 return -1; 472 } 473 474 match->ring = ring; 475 match->tctx = tctx; 476 count++; 477 } 478 } 479 480 return count; 481 } 482 483 static const VMStateDescription vmstate_spapr_xive_end = { 484 .name = TYPE_SPAPR_XIVE "/end", 485 .version_id = 1, 486 .minimum_version_id = 1, 487 .fields = (VMStateField []) { 488 VMSTATE_UINT32(w0, XiveEND), 489 VMSTATE_UINT32(w1, XiveEND), 490 VMSTATE_UINT32(w2, XiveEND), 491 VMSTATE_UINT32(w3, XiveEND), 492 VMSTATE_UINT32(w4, XiveEND), 493 VMSTATE_UINT32(w5, XiveEND), 494 VMSTATE_UINT32(w6, XiveEND), 495 VMSTATE_UINT32(w7, XiveEND), 496 VMSTATE_END_OF_LIST() 497 }, 498 }; 499 500 static const VMStateDescription vmstate_spapr_xive_eas = { 501 .name = TYPE_SPAPR_XIVE "/eas", 502 .version_id = 1, 503 .minimum_version_id = 1, 504 .fields = (VMStateField []) { 505 VMSTATE_UINT64(w, XiveEAS), 506 VMSTATE_END_OF_LIST() 507 }, 508 }; 509 510 static int vmstate_spapr_xive_pre_save(void *opaque) 511 { 512 if (kvm_irqchip_in_kernel()) { 513 return kvmppc_xive_pre_save(SPAPR_XIVE(opaque)); 514 } 515 516 return 0; 517 } 518 519 /* 520 * Called by the sPAPR IRQ backend 'post_load' method at the machine 521 * level. 522 */ 523 static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id) 524 { 525 if (kvm_irqchip_in_kernel()) { 526 return kvmppc_xive_post_load(SPAPR_XIVE(intc), version_id); 527 } 528 529 return 0; 530 } 531 532 static const VMStateDescription vmstate_spapr_xive = { 533 .name = TYPE_SPAPR_XIVE, 534 .version_id = 1, 535 .minimum_version_id = 1, 536 .pre_save = vmstate_spapr_xive_pre_save, 537 .post_load = NULL, /* handled at the machine level */ 538 .fields = (VMStateField[]) { 539 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL), 540 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs, 541 vmstate_spapr_xive_eas, XiveEAS), 542 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends, 543 vmstate_spapr_xive_end, XiveEND), 544 VMSTATE_END_OF_LIST() 545 }, 546 }; 547 548 static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn, 549 bool lsi, Error **errp) 550 { 551 SpaprXive *xive = SPAPR_XIVE(intc); 552 XiveSource *xsrc = &xive->source; 553 554 assert(lisn < xive->nr_irqs); 555 556 if (xive_eas_is_valid(&xive->eat[lisn])) { 557 error_setg(errp, "IRQ %d is not free", lisn); 558 return -EBUSY; 559 } 560 561 /* 562 * Set default values when allocating an IRQ number 563 */ 564 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED); 565 if (lsi) { 566 xive_source_irq_set_lsi(xsrc, lisn); 567 } 568 569 if (kvm_irqchip_in_kernel()) { 570 return kvmppc_xive_source_reset_one(xsrc, lisn, errp); 571 } 572 573 return 0; 574 } 575 576 static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn) 577 { 578 SpaprXive *xive = SPAPR_XIVE(intc); 579 assert(lisn < xive->nr_irqs); 580 581 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); 582 } 583 584 static Property spapr_xive_properties[] = { 585 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0), 586 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0), 587 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE), 588 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE), 589 DEFINE_PROP_END_OF_LIST(), 590 }; 591 592 static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc, 593 PowerPCCPU *cpu, Error **errp) 594 { 595 SpaprXive *xive = SPAPR_XIVE(intc); 596 Object *obj; 597 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 598 599 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(xive), errp); 600 if (!obj) { 601 return -1; 602 } 603 604 spapr_cpu->tctx = XIVE_TCTX(obj); 605 return 0; 606 } 607 608 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam) 609 { 610 uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam); 611 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); 612 } 613 614 static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc, 615 PowerPCCPU *cpu) 616 { 617 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; 618 uint8_t nvt_blk; 619 uint32_t nvt_idx; 620 621 xive_tctx_reset(tctx); 622 623 /* 624 * When a Virtual Processor is scheduled to run on a HW thread, 625 * the hypervisor pushes its identifier in the OS CAM line. 626 * Emulate the same behavior under QEMU. 627 */ 628 spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx); 629 630 xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx)); 631 } 632 633 static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc, 634 PowerPCCPU *cpu) 635 { 636 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 637 638 xive_tctx_destroy(spapr_cpu->tctx); 639 spapr_cpu->tctx = NULL; 640 } 641 642 static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val) 643 { 644 SpaprXive *xive = SPAPR_XIVE(intc); 645 646 if (kvm_irqchip_in_kernel()) { 647 kvmppc_xive_source_set_irq(&xive->source, irq, val); 648 } else { 649 xive_source_set_irq(&xive->source, irq, val); 650 } 651 } 652 653 static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon) 654 { 655 SpaprXive *xive = SPAPR_XIVE(intc); 656 CPUState *cs; 657 658 CPU_FOREACH(cs) { 659 PowerPCCPU *cpu = POWERPC_CPU(cs); 660 661 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon); 662 } 663 664 spapr_xive_pic_print_info(xive, mon); 665 } 666 667 static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers, 668 void *fdt, uint32_t phandle) 669 { 670 SpaprXive *xive = SPAPR_XIVE(intc); 671 int node; 672 uint64_t timas[2 * 2]; 673 /* Interrupt number ranges for the IPIs */ 674 uint32_t lisn_ranges[] = { 675 cpu_to_be32(0), 676 cpu_to_be32(nr_servers), 677 }; 678 /* 679 * EQ size - the sizes of pages supported by the system 4K, 64K, 680 * 2M, 16M. We only advertise 64K for the moment. 681 */ 682 uint32_t eq_sizes[] = { 683 cpu_to_be32(16), /* 64K */ 684 }; 685 /* 686 * The following array is in sync with the reserved priorities 687 * defined by the 'spapr_xive_priority_is_reserved' routine. 688 */ 689 uint32_t plat_res_int_priorities[] = { 690 cpu_to_be32(7), /* start */ 691 cpu_to_be32(0xf8), /* count */ 692 }; 693 694 /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */ 695 timas[0] = cpu_to_be64(xive->tm_base + 696 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT)); 697 timas[1] = cpu_to_be64(1ull << TM_SHIFT); 698 timas[2] = cpu_to_be64(xive->tm_base + 699 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT)); 700 timas[3] = cpu_to_be64(1ull << TM_SHIFT); 701 702 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename)); 703 704 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe")); 705 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas))); 706 707 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe")); 708 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes, 709 sizeof(eq_sizes))); 710 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges, 711 sizeof(lisn_ranges))); 712 713 /* For Linux to link the LSIs to the interrupt controller. */ 714 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); 715 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); 716 717 /* For SLOF */ 718 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); 719 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); 720 721 /* 722 * The "ibm,plat-res-int-priorities" property defines the priority 723 * ranges reserved by the hypervisor 724 */ 725 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities", 726 plat_res_int_priorities, sizeof(plat_res_int_priorities))); 727 } 728 729 static int spapr_xive_activate(SpaprInterruptController *intc, 730 uint32_t nr_servers, Error **errp) 731 { 732 SpaprXive *xive = SPAPR_XIVE(intc); 733 734 if (kvm_enabled()) { 735 int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers, 736 errp); 737 if (rc < 0) { 738 return rc; 739 } 740 } 741 742 /* Activate the XIVE MMIOs */ 743 spapr_xive_mmio_set_enabled(xive, true); 744 745 return 0; 746 } 747 748 static void spapr_xive_deactivate(SpaprInterruptController *intc) 749 { 750 SpaprXive *xive = SPAPR_XIVE(intc); 751 752 spapr_xive_mmio_set_enabled(xive, false); 753 754 if (kvm_irqchip_in_kernel()) { 755 kvmppc_xive_disconnect(intc); 756 } 757 } 758 759 static void spapr_xive_class_init(ObjectClass *klass, void *data) 760 { 761 DeviceClass *dc = DEVICE_CLASS(klass); 762 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 763 SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass); 764 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 765 766 dc->desc = "sPAPR XIVE Interrupt Controller"; 767 dc->props = spapr_xive_properties; 768 dc->realize = spapr_xive_realize; 769 dc->vmsd = &vmstate_spapr_xive; 770 771 xrc->get_eas = spapr_xive_get_eas; 772 xrc->get_end = spapr_xive_get_end; 773 xrc->write_end = spapr_xive_write_end; 774 xrc->get_nvt = spapr_xive_get_nvt; 775 xrc->write_nvt = spapr_xive_write_nvt; 776 xrc->get_tctx = spapr_xive_get_tctx; 777 778 sicc->activate = spapr_xive_activate; 779 sicc->deactivate = spapr_xive_deactivate; 780 sicc->cpu_intc_create = spapr_xive_cpu_intc_create; 781 sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset; 782 sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy; 783 sicc->claim_irq = spapr_xive_claim_irq; 784 sicc->free_irq = spapr_xive_free_irq; 785 sicc->set_irq = spapr_xive_set_irq; 786 sicc->print_info = spapr_xive_print_info; 787 sicc->dt = spapr_xive_dt; 788 sicc->post_load = spapr_xive_post_load; 789 790 xpc->match_nvt = spapr_xive_match_nvt; 791 } 792 793 static const TypeInfo spapr_xive_info = { 794 .name = TYPE_SPAPR_XIVE, 795 .parent = TYPE_XIVE_ROUTER, 796 .instance_init = spapr_xive_instance_init, 797 .instance_size = sizeof(SpaprXive), 798 .class_init = spapr_xive_class_init, 799 .interfaces = (InterfaceInfo[]) { 800 { TYPE_SPAPR_INTC }, 801 { } 802 }, 803 }; 804 805 static void spapr_xive_register_types(void) 806 { 807 type_register_static(&spapr_xive_info); 808 } 809 810 type_init(spapr_xive_register_types) 811 812 /* 813 * XIVE hcalls 814 * 815 * The terminology used by the XIVE hcalls is the following : 816 * 817 * TARGET vCPU number 818 * EQ Event Queue assigned by OS to receive event data 819 * ESB page for source interrupt management 820 * LISN Logical Interrupt Source Number identifying a source in the 821 * machine 822 * EISN Effective Interrupt Source Number used by guest OS to 823 * identify source in the guest 824 * 825 * The EAS, END, NVT structures are not exposed. 826 */ 827 828 /* 829 * Linux hosts under OPAL reserve priority 7 for their own escalation 830 * interrupts (DD2.X POWER9). So we only allow the guest to use 831 * priorities [0..6]. 832 */ 833 static bool spapr_xive_priority_is_reserved(uint8_t priority) 834 { 835 switch (priority) { 836 case 0 ... 6: 837 return false; 838 case 7: /* OPAL escalation queue */ 839 default: 840 return true; 841 } 842 } 843 844 /* 845 * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical 846 * real address of the MMIO page through which the Event State Buffer 847 * entry associated with the value of the "lisn" parameter is managed. 848 * 849 * Parameters: 850 * Input 851 * - R4: "flags" 852 * Bits 0-63 reserved 853 * - R5: "lisn" is per "interrupts", "interrupt-map", or 854 * "ibm,xive-lisn-ranges" properties, or as returned by the 855 * ibm,query-interrupt-source-number RTAS call, or as returned 856 * by the H_ALLOCATE_VAS_WINDOW hcall 857 * 858 * Output 859 * - R4: "flags" 860 * Bits 0-59: Reserved 861 * Bit 60: H_INT_ESB must be used for Event State Buffer 862 * management 863 * Bit 61: 1 == LSI 0 == MSI 864 * Bit 62: the full function page supports trigger 865 * Bit 63: Store EOI Supported 866 * - R5: Logical Real address of full function Event State Buffer 867 * management page, -1 if H_INT_ESB hcall flag is set to 1. 868 * - R6: Logical Real Address of trigger only Event State Buffer 869 * management page or -1. 870 * - R7: Power of 2 page size for the ESB management pages returned in 871 * R5 and R6. 872 */ 873 874 #define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */ 875 #define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */ 876 #define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management 877 on same page */ 878 #define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */ 879 880 static target_ulong h_int_get_source_info(PowerPCCPU *cpu, 881 SpaprMachineState *spapr, 882 target_ulong opcode, 883 target_ulong *args) 884 { 885 SpaprXive *xive = spapr->xive; 886 XiveSource *xsrc = &xive->source; 887 target_ulong flags = args[0]; 888 target_ulong lisn = args[1]; 889 890 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 891 return H_FUNCTION; 892 } 893 894 if (flags) { 895 return H_PARAMETER; 896 } 897 898 if (lisn >= xive->nr_irqs) { 899 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 900 lisn); 901 return H_P2; 902 } 903 904 if (!xive_eas_is_valid(&xive->eat[lisn])) { 905 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 906 lisn); 907 return H_P2; 908 } 909 910 /* 911 * All sources are emulated under the main XIVE object and share 912 * the same characteristics. 913 */ 914 args[0] = 0; 915 if (!xive_source_esb_has_2page(xsrc)) { 916 args[0] |= SPAPR_XIVE_SRC_TRIGGER; 917 } 918 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) { 919 args[0] |= SPAPR_XIVE_SRC_STORE_EOI; 920 } 921 922 /* 923 * Force the use of the H_INT_ESB hcall in case of an LSI 924 * interrupt. This is necessary under KVM to re-trigger the 925 * interrupt if the level is still asserted 926 */ 927 if (xive_source_irq_is_lsi(xsrc, lisn)) { 928 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI; 929 } 930 931 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 932 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn); 933 } else { 934 args[1] = -1; 935 } 936 937 if (xive_source_esb_has_2page(xsrc) && 938 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 939 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn); 940 } else { 941 args[2] = -1; 942 } 943 944 if (xive_source_esb_has_2page(xsrc)) { 945 args[3] = xsrc->esb_shift - 1; 946 } else { 947 args[3] = xsrc->esb_shift; 948 } 949 950 return H_SUCCESS; 951 } 952 953 /* 954 * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical 955 * Interrupt Source to a target. The Logical Interrupt Source is 956 * designated with the "lisn" parameter and the target is designated 957 * with the "target" and "priority" parameters. Upon return from the 958 * hcall(), no additional interrupts will be directed to the old EQ. 959 * 960 * Parameters: 961 * Input: 962 * - R4: "flags" 963 * Bits 0-61: Reserved 964 * Bit 62: set the "eisn" in the EAS 965 * Bit 63: masks the interrupt source in the hardware interrupt 966 * control structure. An interrupt masked by this mechanism will 967 * be dropped, but it's source state bits will still be 968 * set. There is no race-free way of unmasking and restoring the 969 * source. Thus this should only be used in interrupts that are 970 * also masked at the source, and only in cases where the 971 * interrupt is not meant to be used for a large amount of time 972 * because no valid target exists for it for example 973 * - R5: "lisn" is per "interrupts", "interrupt-map", or 974 * "ibm,xive-lisn-ranges" properties, or as returned by the 975 * ibm,query-interrupt-source-number RTAS call, or as returned by 976 * the H_ALLOCATE_VAS_WINDOW hcall 977 * - R6: "target" is per "ibm,ppc-interrupt-server#s" or 978 * "ibm,ppc-interrupt-gserver#s" 979 * - R7: "priority" is a valid priority not in 980 * "ibm,plat-res-int-priorities" 981 * - R8: "eisn" is the guest EISN associated with the "lisn" 982 * 983 * Output: 984 * - None 985 */ 986 987 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62) 988 #define SPAPR_XIVE_SRC_MASK PPC_BIT(63) 989 990 static target_ulong h_int_set_source_config(PowerPCCPU *cpu, 991 SpaprMachineState *spapr, 992 target_ulong opcode, 993 target_ulong *args) 994 { 995 SpaprXive *xive = spapr->xive; 996 XiveEAS eas, new_eas; 997 target_ulong flags = args[0]; 998 target_ulong lisn = args[1]; 999 target_ulong target = args[2]; 1000 target_ulong priority = args[3]; 1001 target_ulong eisn = args[4]; 1002 uint8_t end_blk; 1003 uint32_t end_idx; 1004 1005 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1006 return H_FUNCTION; 1007 } 1008 1009 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) { 1010 return H_PARAMETER; 1011 } 1012 1013 if (lisn >= xive->nr_irqs) { 1014 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1015 lisn); 1016 return H_P2; 1017 } 1018 1019 eas = xive->eat[lisn]; 1020 if (!xive_eas_is_valid(&eas)) { 1021 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1022 lisn); 1023 return H_P2; 1024 } 1025 1026 /* priority 0xff is used to reset the EAS */ 1027 if (priority == 0xff) { 1028 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED); 1029 goto out; 1030 } 1031 1032 if (flags & SPAPR_XIVE_SRC_MASK) { 1033 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED); 1034 } else { 1035 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED); 1036 } 1037 1038 if (spapr_xive_priority_is_reserved(priority)) { 1039 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1040 " is reserved\n", priority); 1041 return H_P4; 1042 } 1043 1044 /* 1045 * Validate that "target" is part of the list of threads allocated 1046 * to the partition. For that, find the END corresponding to the 1047 * target. 1048 */ 1049 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1050 return H_P3; 1051 } 1052 1053 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk); 1054 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx); 1055 1056 if (flags & SPAPR_XIVE_SRC_SET_EISN) { 1057 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); 1058 } 1059 1060 if (kvm_irqchip_in_kernel()) { 1061 Error *local_err = NULL; 1062 1063 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err); 1064 if (local_err) { 1065 error_report_err(local_err); 1066 return H_HARDWARE; 1067 } 1068 } 1069 1070 out: 1071 xive->eat[lisn] = new_eas; 1072 return H_SUCCESS; 1073 } 1074 1075 /* 1076 * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which 1077 * target/priority pair is assigned to the specified Logical Interrupt 1078 * Source. 1079 * 1080 * Parameters: 1081 * Input: 1082 * - R4: "flags" 1083 * Bits 0-63 Reserved 1084 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1085 * "ibm,xive-lisn-ranges" properties, or as returned by the 1086 * ibm,query-interrupt-source-number RTAS call, or as 1087 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1088 * 1089 * Output: 1090 * - R4: Target to which the specified Logical Interrupt Source is 1091 * assigned 1092 * - R5: Priority to which the specified Logical Interrupt Source is 1093 * assigned 1094 * - R6: EISN for the specified Logical Interrupt Source (this will be 1095 * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG) 1096 */ 1097 static target_ulong h_int_get_source_config(PowerPCCPU *cpu, 1098 SpaprMachineState *spapr, 1099 target_ulong opcode, 1100 target_ulong *args) 1101 { 1102 SpaprXive *xive = spapr->xive; 1103 target_ulong flags = args[0]; 1104 target_ulong lisn = args[1]; 1105 XiveEAS eas; 1106 XiveEND *end; 1107 uint8_t nvt_blk; 1108 uint32_t end_idx, nvt_idx; 1109 1110 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1111 return H_FUNCTION; 1112 } 1113 1114 if (flags) { 1115 return H_PARAMETER; 1116 } 1117 1118 if (lisn >= xive->nr_irqs) { 1119 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1120 lisn); 1121 return H_P2; 1122 } 1123 1124 eas = xive->eat[lisn]; 1125 if (!xive_eas_is_valid(&eas)) { 1126 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1127 lisn); 1128 return H_P2; 1129 } 1130 1131 /* EAS_END_BLOCK is unused on sPAPR */ 1132 end_idx = xive_get_field64(EAS_END_INDEX, eas.w); 1133 1134 assert(end_idx < xive->nr_ends); 1135 end = &xive->endt[end_idx]; 1136 1137 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); 1138 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); 1139 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 1140 1141 if (xive_eas_is_masked(&eas)) { 1142 args[1] = 0xff; 1143 } else { 1144 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 1145 } 1146 1147 args[2] = xive_get_field64(EAS_END_DATA, eas.w); 1148 1149 return H_SUCCESS; 1150 } 1151 1152 /* 1153 * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real 1154 * address of the notification management page associated with the 1155 * specified target and priority. 1156 * 1157 * Parameters: 1158 * Input: 1159 * - R4: "flags" 1160 * Bits 0-63 Reserved 1161 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1162 * "ibm,ppc-interrupt-gserver#s" 1163 * - R6: "priority" is a valid priority not in 1164 * "ibm,plat-res-int-priorities" 1165 * 1166 * Output: 1167 * - R4: Logical real address of notification page 1168 * - R5: Power of 2 page size of the notification page 1169 */ 1170 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu, 1171 SpaprMachineState *spapr, 1172 target_ulong opcode, 1173 target_ulong *args) 1174 { 1175 SpaprXive *xive = spapr->xive; 1176 XiveENDSource *end_xsrc = &xive->end_source; 1177 target_ulong flags = args[0]; 1178 target_ulong target = args[1]; 1179 target_ulong priority = args[2]; 1180 XiveEND *end; 1181 uint8_t end_blk; 1182 uint32_t end_idx; 1183 1184 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1185 return H_FUNCTION; 1186 } 1187 1188 if (flags) { 1189 return H_PARAMETER; 1190 } 1191 1192 /* 1193 * H_STATE should be returned if a H_INT_RESET is in progress. 1194 * This is not needed when running the emulation under QEMU 1195 */ 1196 1197 if (spapr_xive_priority_is_reserved(priority)) { 1198 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1199 " is reserved\n", priority); 1200 return H_P3; 1201 } 1202 1203 /* 1204 * Validate that "target" is part of the list of threads allocated 1205 * to the partition. For that, find the END corresponding to the 1206 * target. 1207 */ 1208 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1209 return H_P2; 1210 } 1211 1212 assert(end_idx < xive->nr_ends); 1213 end = &xive->endt[end_idx]; 1214 1215 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx; 1216 if (xive_end_is_enqueue(end)) { 1217 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 1218 } else { 1219 args[1] = 0; 1220 } 1221 1222 return H_SUCCESS; 1223 } 1224 1225 /* 1226 * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for 1227 * a given "target" and "priority". It is also used to set the 1228 * notification config associated with the EQ. An EQ size of 0 is 1229 * used to reset the EQ config for a given target and priority. If 1230 * resetting the EQ config, the END associated with the given "target" 1231 * and "priority" will be changed to disable queueing. 1232 * 1233 * Upon return from the hcall(), no additional interrupts will be 1234 * directed to the old EQ (if one was set). The old EQ (if one was 1235 * set) should be investigated for interrupts that occurred prior to 1236 * or during the hcall(). 1237 * 1238 * Parameters: 1239 * Input: 1240 * - R4: "flags" 1241 * Bits 0-62: Reserved 1242 * Bit 63: Unconditional Notify (n) per the XIVE spec 1243 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1244 * "ibm,ppc-interrupt-gserver#s" 1245 * - R6: "priority" is a valid priority not in 1246 * "ibm,plat-res-int-priorities" 1247 * - R7: "eventQueue": The logical real address of the start of the EQ 1248 * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes" 1249 * 1250 * Output: 1251 * - None 1252 */ 1253 1254 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63) 1255 1256 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, 1257 SpaprMachineState *spapr, 1258 target_ulong opcode, 1259 target_ulong *args) 1260 { 1261 SpaprXive *xive = spapr->xive; 1262 target_ulong flags = args[0]; 1263 target_ulong target = args[1]; 1264 target_ulong priority = args[2]; 1265 target_ulong qpage = args[3]; 1266 target_ulong qsize = args[4]; 1267 XiveEND end; 1268 uint8_t end_blk, nvt_blk; 1269 uint32_t end_idx, nvt_idx; 1270 1271 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1272 return H_FUNCTION; 1273 } 1274 1275 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) { 1276 return H_PARAMETER; 1277 } 1278 1279 /* 1280 * H_STATE should be returned if a H_INT_RESET is in progress. 1281 * This is not needed when running the emulation under QEMU 1282 */ 1283 1284 if (spapr_xive_priority_is_reserved(priority)) { 1285 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1286 " is reserved\n", priority); 1287 return H_P3; 1288 } 1289 1290 /* 1291 * Validate that "target" is part of the list of threads allocated 1292 * to the partition. For that, find the END corresponding to the 1293 * target. 1294 */ 1295 1296 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1297 return H_P2; 1298 } 1299 1300 assert(end_idx < xive->nr_ends); 1301 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND)); 1302 1303 switch (qsize) { 1304 case 12: 1305 case 16: 1306 case 21: 1307 case 24: 1308 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) { 1309 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx 1310 " is not naturally aligned with %" HWADDR_PRIx "\n", 1311 qpage, (hwaddr)1 << qsize); 1312 return H_P4; 1313 } 1314 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); 1315 end.w3 = cpu_to_be32(qpage & 0xffffffff); 1316 end.w0 |= cpu_to_be32(END_W0_ENQUEUE); 1317 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12); 1318 break; 1319 case 0: 1320 /* reset queue and disable queueing */ 1321 spapr_xive_end_reset(&end); 1322 goto out; 1323 1324 default: 1325 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n", 1326 qsize); 1327 return H_P5; 1328 } 1329 1330 if (qsize) { 1331 hwaddr plen = 1 << qsize; 1332 void *eq; 1333 1334 /* 1335 * Validate the guest EQ. We should also check that the queue 1336 * has been zeroed by the OS. 1337 */ 1338 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true, 1339 MEMTXATTRS_UNSPECIFIED); 1340 if (plen != 1 << qsize) { 1341 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%" 1342 HWADDR_PRIx "\n", qpage); 1343 return H_P4; 1344 } 1345 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen); 1346 } 1347 1348 /* "target" should have been validated above */ 1349 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) { 1350 g_assert_not_reached(); 1351 } 1352 1353 /* 1354 * Ensure the priority and target are correctly set (they will not 1355 * be right after allocation) 1356 */ 1357 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) | 1358 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx); 1359 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority); 1360 1361 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) { 1362 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY); 1363 } else { 1364 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY); 1365 } 1366 1367 /* 1368 * The generation bit for the END starts at 1 and The END page 1369 * offset counter starts at 0. 1370 */ 1371 end.w1 = cpu_to_be32(END_W1_GENERATION) | 1372 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul); 1373 end.w0 |= cpu_to_be32(END_W0_VALID); 1374 1375 /* 1376 * TODO: issue syncs required to ensure all in-flight interrupts 1377 * are complete on the old END 1378 */ 1379 1380 out: 1381 if (kvm_irqchip_in_kernel()) { 1382 Error *local_err = NULL; 1383 1384 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err); 1385 if (local_err) { 1386 error_report_err(local_err); 1387 return H_HARDWARE; 1388 } 1389 } 1390 1391 /* Update END */ 1392 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); 1393 return H_SUCCESS; 1394 } 1395 1396 /* 1397 * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given 1398 * target and priority. 1399 * 1400 * Parameters: 1401 * Input: 1402 * - R4: "flags" 1403 * Bits 0-62: Reserved 1404 * Bit 63: Debug: Return debug data 1405 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1406 * "ibm,ppc-interrupt-gserver#s" 1407 * - R6: "priority" is a valid priority not in 1408 * "ibm,plat-res-int-priorities" 1409 * 1410 * Output: 1411 * - R4: "flags": 1412 * Bits 0-61: Reserved 1413 * Bit 62: The value of Event Queue Generation Number (g) per 1414 * the XIVE spec if "Debug" = 1 1415 * Bit 63: The value of Unconditional Notify (n) per the XIVE spec 1416 * - R5: The logical real address of the start of the EQ 1417 * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes" 1418 * - R7: The value of Event Queue Offset Counter per XIVE spec 1419 * if "Debug" = 1, else 0 1420 * 1421 */ 1422 1423 #define SPAPR_XIVE_END_DEBUG PPC_BIT(63) 1424 1425 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, 1426 SpaprMachineState *spapr, 1427 target_ulong opcode, 1428 target_ulong *args) 1429 { 1430 SpaprXive *xive = spapr->xive; 1431 target_ulong flags = args[0]; 1432 target_ulong target = args[1]; 1433 target_ulong priority = args[2]; 1434 XiveEND *end; 1435 uint8_t end_blk; 1436 uint32_t end_idx; 1437 1438 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1439 return H_FUNCTION; 1440 } 1441 1442 if (flags & ~SPAPR_XIVE_END_DEBUG) { 1443 return H_PARAMETER; 1444 } 1445 1446 /* 1447 * H_STATE should be returned if a H_INT_RESET is in progress. 1448 * This is not needed when running the emulation under QEMU 1449 */ 1450 1451 if (spapr_xive_priority_is_reserved(priority)) { 1452 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1453 " is reserved\n", priority); 1454 return H_P3; 1455 } 1456 1457 /* 1458 * Validate that "target" is part of the list of threads allocated 1459 * to the partition. For that, find the END corresponding to the 1460 * target. 1461 */ 1462 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1463 return H_P2; 1464 } 1465 1466 assert(end_idx < xive->nr_ends); 1467 end = &xive->endt[end_idx]; 1468 1469 args[0] = 0; 1470 if (xive_end_is_notify(end)) { 1471 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY; 1472 } 1473 1474 if (xive_end_is_enqueue(end)) { 1475 args[1] = xive_end_qaddr(end); 1476 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 1477 } else { 1478 args[1] = 0; 1479 args[2] = 0; 1480 } 1481 1482 if (kvm_irqchip_in_kernel()) { 1483 Error *local_err = NULL; 1484 1485 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err); 1486 if (local_err) { 1487 error_report_err(local_err); 1488 return H_HARDWARE; 1489 } 1490 } 1491 1492 /* TODO: do we need any locking on the END ? */ 1493 if (flags & SPAPR_XIVE_END_DEBUG) { 1494 /* Load the event queue generation number into the return flags */ 1495 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62; 1496 1497 /* Load R7 with the event queue offset counter */ 1498 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1499 } else { 1500 args[3] = 0; 1501 } 1502 1503 return H_SUCCESS; 1504 } 1505 1506 /* 1507 * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the 1508 * reporting cache line pair for the calling thread. The reporting 1509 * cache lines will contain the OS interrupt context when the OS 1510 * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS 1511 * interrupt. The reporting cache lines can be reset by inputting -1 1512 * in "reportingLine". Issuing the CI store byte without reporting 1513 * cache lines registered will result in the data not being accessible 1514 * to the OS. 1515 * 1516 * Parameters: 1517 * Input: 1518 * - R4: "flags" 1519 * Bits 0-63: Reserved 1520 * - R5: "reportingLine": The logical real address of the reporting cache 1521 * line pair 1522 * 1523 * Output: 1524 * - None 1525 */ 1526 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu, 1527 SpaprMachineState *spapr, 1528 target_ulong opcode, 1529 target_ulong *args) 1530 { 1531 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1532 return H_FUNCTION; 1533 } 1534 1535 /* 1536 * H_STATE should be returned if a H_INT_RESET is in progress. 1537 * This is not needed when running the emulation under QEMU 1538 */ 1539 1540 /* TODO: H_INT_SET_OS_REPORTING_LINE */ 1541 return H_FUNCTION; 1542 } 1543 1544 /* 1545 * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical 1546 * real address of the reporting cache line pair set for the input 1547 * "target". If no reporting cache line pair has been set, -1 is 1548 * returned. 1549 * 1550 * Parameters: 1551 * Input: 1552 * - R4: "flags" 1553 * Bits 0-63: Reserved 1554 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1555 * "ibm,ppc-interrupt-gserver#s" 1556 * - R6: "reportingLine": The logical real address of the reporting 1557 * cache line pair 1558 * 1559 * Output: 1560 * - R4: The logical real address of the reporting line if set, else -1 1561 */ 1562 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu, 1563 SpaprMachineState *spapr, 1564 target_ulong opcode, 1565 target_ulong *args) 1566 { 1567 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1568 return H_FUNCTION; 1569 } 1570 1571 /* 1572 * H_STATE should be returned if a H_INT_RESET is in progress. 1573 * This is not needed when running the emulation under QEMU 1574 */ 1575 1576 /* TODO: H_INT_GET_OS_REPORTING_LINE */ 1577 return H_FUNCTION; 1578 } 1579 1580 /* 1581 * The H_INT_ESB hcall() is used to issue a load or store to the ESB 1582 * page for the input "lisn". This hcall is only supported for LISNs 1583 * that have the ESB hcall flag set to 1 when returned from hcall() 1584 * H_INT_GET_SOURCE_INFO. 1585 * 1586 * Parameters: 1587 * Input: 1588 * - R4: "flags" 1589 * Bits 0-62: Reserved 1590 * bit 63: Store: Store=1, store operation, else load operation 1591 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1592 * "ibm,xive-lisn-ranges" properties, or as returned by the 1593 * ibm,query-interrupt-source-number RTAS call, or as 1594 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1595 * - R6: "esbOffset" is the offset into the ESB page for the load or 1596 * store operation 1597 * - R7: "storeData" is the data to write for a store operation 1598 * 1599 * Output: 1600 * - R4: The value of the load if load operation, else -1 1601 */ 1602 1603 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63) 1604 1605 static target_ulong h_int_esb(PowerPCCPU *cpu, 1606 SpaprMachineState *spapr, 1607 target_ulong opcode, 1608 target_ulong *args) 1609 { 1610 SpaprXive *xive = spapr->xive; 1611 XiveEAS eas; 1612 target_ulong flags = args[0]; 1613 target_ulong lisn = args[1]; 1614 target_ulong offset = args[2]; 1615 target_ulong data = args[3]; 1616 hwaddr mmio_addr; 1617 XiveSource *xsrc = &xive->source; 1618 1619 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1620 return H_FUNCTION; 1621 } 1622 1623 if (flags & ~SPAPR_XIVE_ESB_STORE) { 1624 return H_PARAMETER; 1625 } 1626 1627 if (lisn >= xive->nr_irqs) { 1628 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1629 lisn); 1630 return H_P2; 1631 } 1632 1633 eas = xive->eat[lisn]; 1634 if (!xive_eas_is_valid(&eas)) { 1635 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1636 lisn); 1637 return H_P2; 1638 } 1639 1640 if (offset > (1ull << xsrc->esb_shift)) { 1641 return H_P3; 1642 } 1643 1644 if (kvm_irqchip_in_kernel()) { 1645 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data, 1646 flags & SPAPR_XIVE_ESB_STORE); 1647 } else { 1648 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; 1649 1650 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, 1651 (flags & SPAPR_XIVE_ESB_STORE))) { 1652 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" 1653 HWADDR_PRIx "\n", mmio_addr); 1654 return H_HARDWARE; 1655 } 1656 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; 1657 } 1658 return H_SUCCESS; 1659 } 1660 1661 /* 1662 * The H_INT_SYNC hcall() is used to issue hardware syncs that will 1663 * ensure any in flight events for the input lisn are in the event 1664 * queue. 1665 * 1666 * Parameters: 1667 * Input: 1668 * - R4: "flags" 1669 * Bits 0-63: Reserved 1670 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1671 * "ibm,xive-lisn-ranges" properties, or as returned by the 1672 * ibm,query-interrupt-source-number RTAS call, or as 1673 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1674 * 1675 * Output: 1676 * - None 1677 */ 1678 static target_ulong h_int_sync(PowerPCCPU *cpu, 1679 SpaprMachineState *spapr, 1680 target_ulong opcode, 1681 target_ulong *args) 1682 { 1683 SpaprXive *xive = spapr->xive; 1684 XiveEAS eas; 1685 target_ulong flags = args[0]; 1686 target_ulong lisn = args[1]; 1687 1688 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1689 return H_FUNCTION; 1690 } 1691 1692 if (flags) { 1693 return H_PARAMETER; 1694 } 1695 1696 if (lisn >= xive->nr_irqs) { 1697 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1698 lisn); 1699 return H_P2; 1700 } 1701 1702 eas = xive->eat[lisn]; 1703 if (!xive_eas_is_valid(&eas)) { 1704 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1705 lisn); 1706 return H_P2; 1707 } 1708 1709 /* 1710 * H_STATE should be returned if a H_INT_RESET is in progress. 1711 * This is not needed when running the emulation under QEMU 1712 */ 1713 1714 /* 1715 * This is not real hardware. Nothing to be done unless when 1716 * under KVM 1717 */ 1718 1719 if (kvm_irqchip_in_kernel()) { 1720 Error *local_err = NULL; 1721 1722 kvmppc_xive_sync_source(xive, lisn, &local_err); 1723 if (local_err) { 1724 error_report_err(local_err); 1725 return H_HARDWARE; 1726 } 1727 } 1728 return H_SUCCESS; 1729 } 1730 1731 /* 1732 * The H_INT_RESET hcall() is used to reset all of the partition's 1733 * interrupt exploitation structures to their initial state. This 1734 * means losing all previously set interrupt state set via 1735 * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. 1736 * 1737 * Parameters: 1738 * Input: 1739 * - R4: "flags" 1740 * Bits 0-63: Reserved 1741 * 1742 * Output: 1743 * - None 1744 */ 1745 static target_ulong h_int_reset(PowerPCCPU *cpu, 1746 SpaprMachineState *spapr, 1747 target_ulong opcode, 1748 target_ulong *args) 1749 { 1750 SpaprXive *xive = spapr->xive; 1751 target_ulong flags = args[0]; 1752 1753 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1754 return H_FUNCTION; 1755 } 1756 1757 if (flags) { 1758 return H_PARAMETER; 1759 } 1760 1761 device_reset(DEVICE(xive)); 1762 1763 if (kvm_irqchip_in_kernel()) { 1764 Error *local_err = NULL; 1765 1766 kvmppc_xive_reset(xive, &local_err); 1767 if (local_err) { 1768 error_report_err(local_err); 1769 return H_HARDWARE; 1770 } 1771 } 1772 return H_SUCCESS; 1773 } 1774 1775 void spapr_xive_hcall_init(SpaprMachineState *spapr) 1776 { 1777 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info); 1778 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config); 1779 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config); 1780 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info); 1781 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config); 1782 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config); 1783 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE, 1784 h_int_set_os_reporting_line); 1785 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE, 1786 h_int_get_os_reporting_line); 1787 spapr_register_hypercall(H_INT_ESB, h_int_esb); 1788 spapr_register_hypercall(H_INT_SYNC, h_int_sync); 1789 spapr_register_hypercall(H_INT_RESET, h_int_reset); 1790 } 1791