1 /* 2 * QEMU PowerPC sPAPR XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qapi/error.h" 13 #include "qemu/error-report.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "monitor/monitor.h" 17 #include "hw/ppc/fdt.h" 18 #include "hw/ppc/spapr.h" 19 #include "hw/ppc/spapr_xive.h" 20 #include "hw/ppc/xive.h" 21 #include "hw/ppc/xive_regs.h" 22 23 /* 24 * XIVE Virtualization Controller BAR and Thread Managment BAR that we 25 * use for the ESB pages and the TIMA pages 26 */ 27 #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull 28 #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull 29 30 /* 31 * The allocation of VP blocks is a complex operation in OPAL and the 32 * VP identifiers have a relation with the number of HW chips, the 33 * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE 34 * controller model does not have the same constraints and can use a 35 * simple mapping scheme of the CPU vcpu_id 36 * 37 * These identifiers are never returned to the OS. 38 */ 39 40 #define SPAPR_XIVE_NVT_BASE 0x400 41 42 /* 43 * The sPAPR machine has a unique XIVE IC device. Assign a fixed value 44 * to the controller block id value. It can nevertheless be changed 45 * for testing purpose. 46 */ 47 #define SPAPR_XIVE_BLOCK_ID 0x0 48 49 /* 50 * sPAPR NVT and END indexing helpers 51 */ 52 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) 53 { 54 return nvt_idx - SPAPR_XIVE_NVT_BASE; 55 } 56 57 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu, 58 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 59 { 60 assert(cpu); 61 62 if (out_nvt_blk) { 63 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID; 64 } 65 66 if (out_nvt_blk) { 67 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id; 68 } 69 } 70 71 static int spapr_xive_target_to_nvt(uint32_t target, 72 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) 73 { 74 PowerPCCPU *cpu = spapr_find_cpu(target); 75 76 if (!cpu) { 77 return -1; 78 } 79 80 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx); 81 return 0; 82 } 83 84 /* 85 * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 86 * priorities per CPU 87 */ 88 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, 89 uint8_t *out_end_blk, uint32_t *out_end_idx) 90 { 91 assert(cpu); 92 93 if (out_end_blk) { 94 *out_end_blk = SPAPR_XIVE_BLOCK_ID; 95 } 96 97 if (out_end_idx) { 98 *out_end_idx = (cpu->vcpu_id << 3) + prio; 99 } 100 } 101 102 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, 103 uint8_t *out_end_blk, uint32_t *out_end_idx) 104 { 105 PowerPCCPU *cpu = spapr_find_cpu(target); 106 107 if (!cpu) { 108 return -1; 109 } 110 111 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx); 112 return 0; 113 } 114 115 /* 116 * On sPAPR machines, use a simplified output for the XIVE END 117 * structure dumping only the information related to the OS EQ. 118 */ 119 static void spapr_xive_end_pic_print_info(sPAPRXive *xive, XiveEND *end, 120 Monitor *mon) 121 { 122 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 123 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); 124 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); 125 uint32_t qentries = 1 << (qsize + 10); 126 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); 127 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 128 129 monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", 130 spapr_xive_nvt_to_target(0, nvt), 131 priority, qindex, qentries, qgen); 132 133 xive_end_queue_pic_print_info(end, 6, mon); 134 monitor_printf(mon, "]"); 135 } 136 137 void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon) 138 { 139 XiveSource *xsrc = &xive->source; 140 int i; 141 142 monitor_printf(mon, " LSIN PQ EISN CPU/PRIO EQ\n"); 143 144 for (i = 0; i < xive->nr_irqs; i++) { 145 uint8_t pq = xive_source_esb_get(xsrc, i); 146 XiveEAS *eas = &xive->eat[i]; 147 148 if (!xive_eas_is_valid(eas)) { 149 continue; 150 } 151 152 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i, 153 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", 154 pq & XIVE_ESB_VAL_P ? 'P' : '-', 155 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 156 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ', 157 xive_eas_is_masked(eas) ? "M" : " ", 158 (int) xive_get_field64(EAS_END_DATA, eas->w)); 159 160 if (!xive_eas_is_masked(eas)) { 161 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 162 XiveEND *end; 163 164 assert(end_idx < xive->nr_ends); 165 end = &xive->endt[end_idx]; 166 167 if (xive_end_is_valid(end)) { 168 spapr_xive_end_pic_print_info(xive, end, mon); 169 } 170 } 171 monitor_printf(mon, "\n"); 172 } 173 } 174 175 static void spapr_xive_map_mmio(sPAPRXive *xive) 176 { 177 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); 178 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); 179 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); 180 } 181 182 void spapr_xive_mmio_set_enabled(sPAPRXive *xive, bool enable) 183 { 184 memory_region_set_enabled(&xive->source.esb_mmio, enable); 185 memory_region_set_enabled(&xive->tm_mmio, enable); 186 187 /* Disable the END ESBs until a guest OS makes use of them */ 188 memory_region_set_enabled(&xive->end_source.esb_mmio, false); 189 } 190 191 /* 192 * When a Virtual Processor is scheduled to run on a HW thread, the 193 * hypervisor pushes its identifier in the OS CAM line. Emulate the 194 * same behavior under QEMU. 195 */ 196 void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx) 197 { 198 uint8_t nvt_blk; 199 uint32_t nvt_idx; 200 uint32_t nvt_cam; 201 202 spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx); 203 204 nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx)); 205 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4); 206 } 207 208 static void spapr_xive_end_reset(XiveEND *end) 209 { 210 memset(end, 0, sizeof(*end)); 211 212 /* switch off the escalation and notification ESBs */ 213 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q); 214 } 215 216 static void spapr_xive_reset(void *dev) 217 { 218 sPAPRXive *xive = SPAPR_XIVE(dev); 219 int i; 220 221 /* 222 * The XiveSource has its own reset handler, which mask off all 223 * IRQs (!P|Q) 224 */ 225 226 /* Mask all valid EASs in the IRQ number space. */ 227 for (i = 0; i < xive->nr_irqs; i++) { 228 XiveEAS *eas = &xive->eat[i]; 229 if (xive_eas_is_valid(eas)) { 230 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED); 231 } else { 232 eas->w = 0; 233 } 234 } 235 236 /* Clear all ENDs */ 237 for (i = 0; i < xive->nr_ends; i++) { 238 spapr_xive_end_reset(&xive->endt[i]); 239 } 240 } 241 242 static void spapr_xive_instance_init(Object *obj) 243 { 244 sPAPRXive *xive = SPAPR_XIVE(obj); 245 246 object_initialize(&xive->source, sizeof(xive->source), TYPE_XIVE_SOURCE); 247 object_property_add_child(obj, "source", OBJECT(&xive->source), NULL); 248 249 object_initialize(&xive->end_source, sizeof(xive->end_source), 250 TYPE_XIVE_END_SOURCE); 251 object_property_add_child(obj, "end_source", OBJECT(&xive->end_source), 252 NULL); 253 } 254 255 static void spapr_xive_realize(DeviceState *dev, Error **errp) 256 { 257 sPAPRXive *xive = SPAPR_XIVE(dev); 258 XiveSource *xsrc = &xive->source; 259 XiveENDSource *end_xsrc = &xive->end_source; 260 Error *local_err = NULL; 261 262 if (!xive->nr_irqs) { 263 error_setg(errp, "Number of interrupt needs to be greater 0"); 264 return; 265 } 266 267 if (!xive->nr_ends) { 268 error_setg(errp, "Number of interrupt needs to be greater 0"); 269 return; 270 } 271 272 /* 273 * Initialize the internal sources, for IPIs and virtual devices. 274 */ 275 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs", 276 &error_fatal); 277 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive), 278 &error_fatal); 279 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 280 if (local_err) { 281 error_propagate(errp, local_err); 282 return; 283 } 284 285 /* 286 * Initialize the END ESB source 287 */ 288 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends", 289 &error_fatal); 290 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 291 &error_fatal); 292 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); 293 if (local_err) { 294 error_propagate(errp, local_err); 295 return; 296 } 297 298 /* Set the mapping address of the END ESB pages after the source ESBs */ 299 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs; 300 301 /* 302 * Allocate the routing tables 303 */ 304 xive->eat = g_new0(XiveEAS, xive->nr_irqs); 305 xive->endt = g_new0(XiveEND, xive->nr_ends); 306 307 /* TIMA initialization */ 308 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive, 309 "xive.tima", 4ull << TM_SHIFT); 310 311 /* Define all XIVE MMIO regions on SysBus */ 312 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); 313 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); 314 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); 315 316 /* Map all regions */ 317 spapr_xive_map_mmio(xive); 318 319 qemu_register_reset(spapr_xive_reset, dev); 320 } 321 322 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, 323 uint32_t eas_idx, XiveEAS *eas) 324 { 325 sPAPRXive *xive = SPAPR_XIVE(xrtr); 326 327 if (eas_idx >= xive->nr_irqs) { 328 return -1; 329 } 330 331 *eas = xive->eat[eas_idx]; 332 return 0; 333 } 334 335 static int spapr_xive_get_end(XiveRouter *xrtr, 336 uint8_t end_blk, uint32_t end_idx, XiveEND *end) 337 { 338 sPAPRXive *xive = SPAPR_XIVE(xrtr); 339 340 if (end_idx >= xive->nr_ends) { 341 return -1; 342 } 343 344 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND)); 345 return 0; 346 } 347 348 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, 349 uint32_t end_idx, XiveEND *end, 350 uint8_t word_number) 351 { 352 sPAPRXive *xive = SPAPR_XIVE(xrtr); 353 354 if (end_idx >= xive->nr_ends) { 355 return -1; 356 } 357 358 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND)); 359 return 0; 360 } 361 362 static int spapr_xive_get_nvt(XiveRouter *xrtr, 363 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt) 364 { 365 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 366 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 367 368 if (!cpu) { 369 /* TODO: should we assert() if we can find a NVT ? */ 370 return -1; 371 } 372 373 /* 374 * sPAPR does not maintain a NVT table. Return that the NVT is 375 * valid if we have found a matching CPU 376 */ 377 nvt->w0 = cpu_to_be32(NVT_W0_VALID); 378 return 0; 379 } 380 381 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, 382 uint32_t nvt_idx, XiveNVT *nvt, 383 uint8_t word_number) 384 { 385 /* 386 * We don't need to write back to the NVTs because the sPAPR 387 * machine should never hit a non-scheduled NVT. It should never 388 * get called. 389 */ 390 g_assert_not_reached(); 391 } 392 393 static const VMStateDescription vmstate_spapr_xive_end = { 394 .name = TYPE_SPAPR_XIVE "/end", 395 .version_id = 1, 396 .minimum_version_id = 1, 397 .fields = (VMStateField []) { 398 VMSTATE_UINT32(w0, XiveEND), 399 VMSTATE_UINT32(w1, XiveEND), 400 VMSTATE_UINT32(w2, XiveEND), 401 VMSTATE_UINT32(w3, XiveEND), 402 VMSTATE_UINT32(w4, XiveEND), 403 VMSTATE_UINT32(w5, XiveEND), 404 VMSTATE_UINT32(w6, XiveEND), 405 VMSTATE_UINT32(w7, XiveEND), 406 VMSTATE_END_OF_LIST() 407 }, 408 }; 409 410 static const VMStateDescription vmstate_spapr_xive_eas = { 411 .name = TYPE_SPAPR_XIVE "/eas", 412 .version_id = 1, 413 .minimum_version_id = 1, 414 .fields = (VMStateField []) { 415 VMSTATE_UINT64(w, XiveEAS), 416 VMSTATE_END_OF_LIST() 417 }, 418 }; 419 420 static const VMStateDescription vmstate_spapr_xive = { 421 .name = TYPE_SPAPR_XIVE, 422 .version_id = 1, 423 .minimum_version_id = 1, 424 .fields = (VMStateField[]) { 425 VMSTATE_UINT32_EQUAL(nr_irqs, sPAPRXive, NULL), 426 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, sPAPRXive, nr_irqs, 427 vmstate_spapr_xive_eas, XiveEAS), 428 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, sPAPRXive, nr_ends, 429 vmstate_spapr_xive_end, XiveEND), 430 VMSTATE_END_OF_LIST() 431 }, 432 }; 433 434 static Property spapr_xive_properties[] = { 435 DEFINE_PROP_UINT32("nr-irqs", sPAPRXive, nr_irqs, 0), 436 DEFINE_PROP_UINT32("nr-ends", sPAPRXive, nr_ends, 0), 437 DEFINE_PROP_UINT64("vc-base", sPAPRXive, vc_base, SPAPR_XIVE_VC_BASE), 438 DEFINE_PROP_UINT64("tm-base", sPAPRXive, tm_base, SPAPR_XIVE_TM_BASE), 439 DEFINE_PROP_END_OF_LIST(), 440 }; 441 442 static void spapr_xive_class_init(ObjectClass *klass, void *data) 443 { 444 DeviceClass *dc = DEVICE_CLASS(klass); 445 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 446 447 dc->desc = "sPAPR XIVE Interrupt Controller"; 448 dc->props = spapr_xive_properties; 449 dc->realize = spapr_xive_realize; 450 dc->vmsd = &vmstate_spapr_xive; 451 452 xrc->get_eas = spapr_xive_get_eas; 453 xrc->get_end = spapr_xive_get_end; 454 xrc->write_end = spapr_xive_write_end; 455 xrc->get_nvt = spapr_xive_get_nvt; 456 xrc->write_nvt = spapr_xive_write_nvt; 457 } 458 459 static const TypeInfo spapr_xive_info = { 460 .name = TYPE_SPAPR_XIVE, 461 .parent = TYPE_XIVE_ROUTER, 462 .instance_init = spapr_xive_instance_init, 463 .instance_size = sizeof(sPAPRXive), 464 .class_init = spapr_xive_class_init, 465 }; 466 467 static void spapr_xive_register_types(void) 468 { 469 type_register_static(&spapr_xive_info); 470 } 471 472 type_init(spapr_xive_register_types) 473 474 bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi) 475 { 476 XiveSource *xsrc = &xive->source; 477 478 if (lisn >= xive->nr_irqs) { 479 return false; 480 } 481 482 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID); 483 xive_source_irq_set(xsrc, lisn, lsi); 484 return true; 485 } 486 487 bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn) 488 { 489 XiveSource *xsrc = &xive->source; 490 491 if (lisn >= xive->nr_irqs) { 492 return false; 493 } 494 495 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); 496 xive_source_irq_set(xsrc, lisn, false); 497 return true; 498 } 499 500 /* 501 * XIVE hcalls 502 * 503 * The terminology used by the XIVE hcalls is the following : 504 * 505 * TARGET vCPU number 506 * EQ Event Queue assigned by OS to receive event data 507 * ESB page for source interrupt management 508 * LISN Logical Interrupt Source Number identifying a source in the 509 * machine 510 * EISN Effective Interrupt Source Number used by guest OS to 511 * identify source in the guest 512 * 513 * The EAS, END, NVT structures are not exposed. 514 */ 515 516 /* 517 * Linux hosts under OPAL reserve priority 7 for their own escalation 518 * interrupts (DD2.X POWER9). So we only allow the guest to use 519 * priorities [0..6]. 520 */ 521 static bool spapr_xive_priority_is_reserved(uint8_t priority) 522 { 523 switch (priority) { 524 case 0 ... 6: 525 return false; 526 case 7: /* OPAL escalation queue */ 527 default: 528 return true; 529 } 530 } 531 532 /* 533 * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical 534 * real address of the MMIO page through which the Event State Buffer 535 * entry associated with the value of the "lisn" parameter is managed. 536 * 537 * Parameters: 538 * Input 539 * - R4: "flags" 540 * Bits 0-63 reserved 541 * - R5: "lisn" is per "interrupts", "interrupt-map", or 542 * "ibm,xive-lisn-ranges" properties, or as returned by the 543 * ibm,query-interrupt-source-number RTAS call, or as returned 544 * by the H_ALLOCATE_VAS_WINDOW hcall 545 * 546 * Output 547 * - R4: "flags" 548 * Bits 0-59: Reserved 549 * Bit 60: H_INT_ESB must be used for Event State Buffer 550 * management 551 * Bit 61: 1 == LSI 0 == MSI 552 * Bit 62: the full function page supports trigger 553 * Bit 63: Store EOI Supported 554 * - R5: Logical Real address of full function Event State Buffer 555 * management page, -1 if H_INT_ESB hcall flag is set to 1. 556 * - R6: Logical Real Address of trigger only Event State Buffer 557 * management page or -1. 558 * - R7: Power of 2 page size for the ESB management pages returned in 559 * R5 and R6. 560 */ 561 562 #define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */ 563 #define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */ 564 #define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management 565 on same page */ 566 #define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */ 567 568 static target_ulong h_int_get_source_info(PowerPCCPU *cpu, 569 sPAPRMachineState *spapr, 570 target_ulong opcode, 571 target_ulong *args) 572 { 573 sPAPRXive *xive = spapr->xive; 574 XiveSource *xsrc = &xive->source; 575 target_ulong flags = args[0]; 576 target_ulong lisn = args[1]; 577 578 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 579 return H_FUNCTION; 580 } 581 582 if (flags) { 583 return H_PARAMETER; 584 } 585 586 if (lisn >= xive->nr_irqs) { 587 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 588 lisn); 589 return H_P2; 590 } 591 592 if (!xive_eas_is_valid(&xive->eat[lisn])) { 593 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 594 lisn); 595 return H_P2; 596 } 597 598 /* 599 * All sources are emulated under the main XIVE object and share 600 * the same characteristics. 601 */ 602 args[0] = 0; 603 if (!xive_source_esb_has_2page(xsrc)) { 604 args[0] |= SPAPR_XIVE_SRC_TRIGGER; 605 } 606 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) { 607 args[0] |= SPAPR_XIVE_SRC_STORE_EOI; 608 } 609 610 /* 611 * Force the use of the H_INT_ESB hcall in case of an LSI 612 * interrupt. This is necessary under KVM to re-trigger the 613 * interrupt if the level is still asserted 614 */ 615 if (xive_source_irq_is_lsi(xsrc, lisn)) { 616 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI; 617 } 618 619 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 620 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn); 621 } else { 622 args[1] = -1; 623 } 624 625 if (xive_source_esb_has_2page(xsrc) && 626 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { 627 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn); 628 } else { 629 args[2] = -1; 630 } 631 632 if (xive_source_esb_has_2page(xsrc)) { 633 args[3] = xsrc->esb_shift - 1; 634 } else { 635 args[3] = xsrc->esb_shift; 636 } 637 638 return H_SUCCESS; 639 } 640 641 /* 642 * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical 643 * Interrupt Source to a target. The Logical Interrupt Source is 644 * designated with the "lisn" parameter and the target is designated 645 * with the "target" and "priority" parameters. Upon return from the 646 * hcall(), no additional interrupts will be directed to the old EQ. 647 * 648 * Parameters: 649 * Input: 650 * - R4: "flags" 651 * Bits 0-61: Reserved 652 * Bit 62: set the "eisn" in the EAS 653 * Bit 63: masks the interrupt source in the hardware interrupt 654 * control structure. An interrupt masked by this mechanism will 655 * be dropped, but it's source state bits will still be 656 * set. There is no race-free way of unmasking and restoring the 657 * source. Thus this should only be used in interrupts that are 658 * also masked at the source, and only in cases where the 659 * interrupt is not meant to be used for a large amount of time 660 * because no valid target exists for it for example 661 * - R5: "lisn" is per "interrupts", "interrupt-map", or 662 * "ibm,xive-lisn-ranges" properties, or as returned by the 663 * ibm,query-interrupt-source-number RTAS call, or as returned by 664 * the H_ALLOCATE_VAS_WINDOW hcall 665 * - R6: "target" is per "ibm,ppc-interrupt-server#s" or 666 * "ibm,ppc-interrupt-gserver#s" 667 * - R7: "priority" is a valid priority not in 668 * "ibm,plat-res-int-priorities" 669 * - R8: "eisn" is the guest EISN associated with the "lisn" 670 * 671 * Output: 672 * - None 673 */ 674 675 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62) 676 #define SPAPR_XIVE_SRC_MASK PPC_BIT(63) 677 678 static target_ulong h_int_set_source_config(PowerPCCPU *cpu, 679 sPAPRMachineState *spapr, 680 target_ulong opcode, 681 target_ulong *args) 682 { 683 sPAPRXive *xive = spapr->xive; 684 XiveEAS eas, new_eas; 685 target_ulong flags = args[0]; 686 target_ulong lisn = args[1]; 687 target_ulong target = args[2]; 688 target_ulong priority = args[3]; 689 target_ulong eisn = args[4]; 690 uint8_t end_blk; 691 uint32_t end_idx; 692 693 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 694 return H_FUNCTION; 695 } 696 697 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) { 698 return H_PARAMETER; 699 } 700 701 if (lisn >= xive->nr_irqs) { 702 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 703 lisn); 704 return H_P2; 705 } 706 707 eas = xive->eat[lisn]; 708 if (!xive_eas_is_valid(&eas)) { 709 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 710 lisn); 711 return H_P2; 712 } 713 714 /* priority 0xff is used to reset the EAS */ 715 if (priority == 0xff) { 716 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED); 717 goto out; 718 } 719 720 if (flags & SPAPR_XIVE_SRC_MASK) { 721 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED); 722 } else { 723 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED); 724 } 725 726 if (spapr_xive_priority_is_reserved(priority)) { 727 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 728 " is reserved\n", priority); 729 return H_P4; 730 } 731 732 /* 733 * Validate that "target" is part of the list of threads allocated 734 * to the partition. For that, find the END corresponding to the 735 * target. 736 */ 737 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 738 return H_P3; 739 } 740 741 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk); 742 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx); 743 744 if (flags & SPAPR_XIVE_SRC_SET_EISN) { 745 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); 746 } 747 748 out: 749 xive->eat[lisn] = new_eas; 750 return H_SUCCESS; 751 } 752 753 /* 754 * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which 755 * target/priority pair is assigned to the specified Logical Interrupt 756 * Source. 757 * 758 * Parameters: 759 * Input: 760 * - R4: "flags" 761 * Bits 0-63 Reserved 762 * - R5: "lisn" is per "interrupts", "interrupt-map", or 763 * "ibm,xive-lisn-ranges" properties, or as returned by the 764 * ibm,query-interrupt-source-number RTAS call, or as 765 * returned by the H_ALLOCATE_VAS_WINDOW hcall 766 * 767 * Output: 768 * - R4: Target to which the specified Logical Interrupt Source is 769 * assigned 770 * - R5: Priority to which the specified Logical Interrupt Source is 771 * assigned 772 * - R6: EISN for the specified Logical Interrupt Source (this will be 773 * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG) 774 */ 775 static target_ulong h_int_get_source_config(PowerPCCPU *cpu, 776 sPAPRMachineState *spapr, 777 target_ulong opcode, 778 target_ulong *args) 779 { 780 sPAPRXive *xive = spapr->xive; 781 target_ulong flags = args[0]; 782 target_ulong lisn = args[1]; 783 XiveEAS eas; 784 XiveEND *end; 785 uint8_t nvt_blk; 786 uint32_t end_idx, nvt_idx; 787 788 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 789 return H_FUNCTION; 790 } 791 792 if (flags) { 793 return H_PARAMETER; 794 } 795 796 if (lisn >= xive->nr_irqs) { 797 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 798 lisn); 799 return H_P2; 800 } 801 802 eas = xive->eat[lisn]; 803 if (!xive_eas_is_valid(&eas)) { 804 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 805 lisn); 806 return H_P2; 807 } 808 809 /* EAS_END_BLOCK is unused on sPAPR */ 810 end_idx = xive_get_field64(EAS_END_INDEX, eas.w); 811 812 assert(end_idx < xive->nr_ends); 813 end = &xive->endt[end_idx]; 814 815 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); 816 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); 817 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); 818 819 if (xive_eas_is_masked(&eas)) { 820 args[1] = 0xff; 821 } else { 822 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7); 823 } 824 825 args[2] = xive_get_field64(EAS_END_DATA, eas.w); 826 827 return H_SUCCESS; 828 } 829 830 /* 831 * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real 832 * address of the notification management page associated with the 833 * specified target and priority. 834 * 835 * Parameters: 836 * Input: 837 * - R4: "flags" 838 * Bits 0-63 Reserved 839 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 840 * "ibm,ppc-interrupt-gserver#s" 841 * - R6: "priority" is a valid priority not in 842 * "ibm,plat-res-int-priorities" 843 * 844 * Output: 845 * - R4: Logical real address of notification page 846 * - R5: Power of 2 page size of the notification page 847 */ 848 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu, 849 sPAPRMachineState *spapr, 850 target_ulong opcode, 851 target_ulong *args) 852 { 853 sPAPRXive *xive = spapr->xive; 854 XiveENDSource *end_xsrc = &xive->end_source; 855 target_ulong flags = args[0]; 856 target_ulong target = args[1]; 857 target_ulong priority = args[2]; 858 XiveEND *end; 859 uint8_t end_blk; 860 uint32_t end_idx; 861 862 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 863 return H_FUNCTION; 864 } 865 866 if (flags) { 867 return H_PARAMETER; 868 } 869 870 /* 871 * H_STATE should be returned if a H_INT_RESET is in progress. 872 * This is not needed when running the emulation under QEMU 873 */ 874 875 if (spapr_xive_priority_is_reserved(priority)) { 876 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 877 " is reserved\n", priority); 878 return H_P3; 879 } 880 881 /* 882 * Validate that "target" is part of the list of threads allocated 883 * to the partition. For that, find the END corresponding to the 884 * target. 885 */ 886 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 887 return H_P2; 888 } 889 890 assert(end_idx < xive->nr_ends); 891 end = &xive->endt[end_idx]; 892 893 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx; 894 if (xive_end_is_enqueue(end)) { 895 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 896 } else { 897 args[1] = 0; 898 } 899 900 return H_SUCCESS; 901 } 902 903 /* 904 * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for 905 * a given "target" and "priority". It is also used to set the 906 * notification config associated with the EQ. An EQ size of 0 is 907 * used to reset the EQ config for a given target and priority. If 908 * resetting the EQ config, the END associated with the given "target" 909 * and "priority" will be changed to disable queueing. 910 * 911 * Upon return from the hcall(), no additional interrupts will be 912 * directed to the old EQ (if one was set). The old EQ (if one was 913 * set) should be investigated for interrupts that occurred prior to 914 * or during the hcall(). 915 * 916 * Parameters: 917 * Input: 918 * - R4: "flags" 919 * Bits 0-62: Reserved 920 * Bit 63: Unconditional Notify (n) per the XIVE spec 921 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 922 * "ibm,ppc-interrupt-gserver#s" 923 * - R6: "priority" is a valid priority not in 924 * "ibm,plat-res-int-priorities" 925 * - R7: "eventQueue": The logical real address of the start of the EQ 926 * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes" 927 * 928 * Output: 929 * - None 930 */ 931 932 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63) 933 934 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, 935 sPAPRMachineState *spapr, 936 target_ulong opcode, 937 target_ulong *args) 938 { 939 sPAPRXive *xive = spapr->xive; 940 target_ulong flags = args[0]; 941 target_ulong target = args[1]; 942 target_ulong priority = args[2]; 943 target_ulong qpage = args[3]; 944 target_ulong qsize = args[4]; 945 XiveEND end; 946 uint8_t end_blk, nvt_blk; 947 uint32_t end_idx, nvt_idx; 948 949 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 950 return H_FUNCTION; 951 } 952 953 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) { 954 return H_PARAMETER; 955 } 956 957 /* 958 * H_STATE should be returned if a H_INT_RESET is in progress. 959 * This is not needed when running the emulation under QEMU 960 */ 961 962 if (spapr_xive_priority_is_reserved(priority)) { 963 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 964 " is reserved\n", priority); 965 return H_P3; 966 } 967 968 /* 969 * Validate that "target" is part of the list of threads allocated 970 * to the partition. For that, find the END corresponding to the 971 * target. 972 */ 973 974 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 975 return H_P2; 976 } 977 978 assert(end_idx < xive->nr_ends); 979 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND)); 980 981 switch (qsize) { 982 case 12: 983 case 16: 984 case 21: 985 case 24: 986 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); 987 end.w3 = cpu_to_be32(qpage & 0xffffffff); 988 end.w0 |= cpu_to_be32(END_W0_ENQUEUE); 989 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12); 990 break; 991 case 0: 992 /* reset queue and disable queueing */ 993 spapr_xive_end_reset(&end); 994 goto out; 995 996 default: 997 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n", 998 qsize); 999 return H_P5; 1000 } 1001 1002 if (qsize) { 1003 hwaddr plen = 1 << qsize; 1004 void *eq; 1005 1006 /* 1007 * Validate the guest EQ. We should also check that the queue 1008 * has been zeroed by the OS. 1009 */ 1010 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true, 1011 MEMTXATTRS_UNSPECIFIED); 1012 if (plen != 1 << qsize) { 1013 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%" 1014 HWADDR_PRIx "\n", qpage); 1015 return H_P4; 1016 } 1017 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen); 1018 } 1019 1020 /* "target" should have been validated above */ 1021 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) { 1022 g_assert_not_reached(); 1023 } 1024 1025 /* 1026 * Ensure the priority and target are correctly set (they will not 1027 * be right after allocation) 1028 */ 1029 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) | 1030 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx); 1031 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority); 1032 1033 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) { 1034 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY); 1035 } else { 1036 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY); 1037 } 1038 1039 /* 1040 * The generation bit for the END starts at 1 and The END page 1041 * offset counter starts at 0. 1042 */ 1043 end.w1 = cpu_to_be32(END_W1_GENERATION) | 1044 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul); 1045 end.w0 |= cpu_to_be32(END_W0_VALID); 1046 1047 /* 1048 * TODO: issue syncs required to ensure all in-flight interrupts 1049 * are complete on the old END 1050 */ 1051 1052 out: 1053 /* Update END */ 1054 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); 1055 return H_SUCCESS; 1056 } 1057 1058 /* 1059 * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given 1060 * target and priority. 1061 * 1062 * Parameters: 1063 * Input: 1064 * - R4: "flags" 1065 * Bits 0-62: Reserved 1066 * Bit 63: Debug: Return debug data 1067 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1068 * "ibm,ppc-interrupt-gserver#s" 1069 * - R6: "priority" is a valid priority not in 1070 * "ibm,plat-res-int-priorities" 1071 * 1072 * Output: 1073 * - R4: "flags": 1074 * Bits 0-61: Reserved 1075 * Bit 62: The value of Event Queue Generation Number (g) per 1076 * the XIVE spec if "Debug" = 1 1077 * Bit 63: The value of Unconditional Notify (n) per the XIVE spec 1078 * - R5: The logical real address of the start of the EQ 1079 * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes" 1080 * - R7: The value of Event Queue Offset Counter per XIVE spec 1081 * if "Debug" = 1, else 0 1082 * 1083 */ 1084 1085 #define SPAPR_XIVE_END_DEBUG PPC_BIT(63) 1086 1087 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, 1088 sPAPRMachineState *spapr, 1089 target_ulong opcode, 1090 target_ulong *args) 1091 { 1092 sPAPRXive *xive = spapr->xive; 1093 target_ulong flags = args[0]; 1094 target_ulong target = args[1]; 1095 target_ulong priority = args[2]; 1096 XiveEND *end; 1097 uint8_t end_blk; 1098 uint32_t end_idx; 1099 1100 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1101 return H_FUNCTION; 1102 } 1103 1104 if (flags & ~SPAPR_XIVE_END_DEBUG) { 1105 return H_PARAMETER; 1106 } 1107 1108 /* 1109 * H_STATE should be returned if a H_INT_RESET is in progress. 1110 * This is not needed when running the emulation under QEMU 1111 */ 1112 1113 if (spapr_xive_priority_is_reserved(priority)) { 1114 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld 1115 " is reserved\n", priority); 1116 return H_P3; 1117 } 1118 1119 /* 1120 * Validate that "target" is part of the list of threads allocated 1121 * to the partition. For that, find the END corresponding to the 1122 * target. 1123 */ 1124 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { 1125 return H_P2; 1126 } 1127 1128 assert(end_idx < xive->nr_ends); 1129 end = &xive->endt[end_idx]; 1130 1131 args[0] = 0; 1132 if (xive_end_is_notify(end)) { 1133 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY; 1134 } 1135 1136 if (xive_end_is_enqueue(end)) { 1137 args[1] = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 1138 | be32_to_cpu(end->w3); 1139 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 1140 } else { 1141 args[1] = 0; 1142 args[2] = 0; 1143 } 1144 1145 /* TODO: do we need any locking on the END ? */ 1146 if (flags & SPAPR_XIVE_END_DEBUG) { 1147 /* Load the event queue generation number into the return flags */ 1148 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62; 1149 1150 /* Load R7 with the event queue offset counter */ 1151 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1); 1152 } else { 1153 args[3] = 0; 1154 } 1155 1156 return H_SUCCESS; 1157 } 1158 1159 /* 1160 * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the 1161 * reporting cache line pair for the calling thread. The reporting 1162 * cache lines will contain the OS interrupt context when the OS 1163 * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS 1164 * interrupt. The reporting cache lines can be reset by inputting -1 1165 * in "reportingLine". Issuing the CI store byte without reporting 1166 * cache lines registered will result in the data not being accessible 1167 * to the OS. 1168 * 1169 * Parameters: 1170 * Input: 1171 * - R4: "flags" 1172 * Bits 0-63: Reserved 1173 * - R5: "reportingLine": The logical real address of the reporting cache 1174 * line pair 1175 * 1176 * Output: 1177 * - None 1178 */ 1179 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu, 1180 sPAPRMachineState *spapr, 1181 target_ulong opcode, 1182 target_ulong *args) 1183 { 1184 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1185 return H_FUNCTION; 1186 } 1187 1188 /* 1189 * H_STATE should be returned if a H_INT_RESET is in progress. 1190 * This is not needed when running the emulation under QEMU 1191 */ 1192 1193 /* TODO: H_INT_SET_OS_REPORTING_LINE */ 1194 return H_FUNCTION; 1195 } 1196 1197 /* 1198 * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical 1199 * real address of the reporting cache line pair set for the input 1200 * "target". If no reporting cache line pair has been set, -1 is 1201 * returned. 1202 * 1203 * Parameters: 1204 * Input: 1205 * - R4: "flags" 1206 * Bits 0-63: Reserved 1207 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or 1208 * "ibm,ppc-interrupt-gserver#s" 1209 * - R6: "reportingLine": The logical real address of the reporting 1210 * cache line pair 1211 * 1212 * Output: 1213 * - R4: The logical real address of the reporting line if set, else -1 1214 */ 1215 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu, 1216 sPAPRMachineState *spapr, 1217 target_ulong opcode, 1218 target_ulong *args) 1219 { 1220 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1221 return H_FUNCTION; 1222 } 1223 1224 /* 1225 * H_STATE should be returned if a H_INT_RESET is in progress. 1226 * This is not needed when running the emulation under QEMU 1227 */ 1228 1229 /* TODO: H_INT_GET_OS_REPORTING_LINE */ 1230 return H_FUNCTION; 1231 } 1232 1233 /* 1234 * The H_INT_ESB hcall() is used to issue a load or store to the ESB 1235 * page for the input "lisn". This hcall is only supported for LISNs 1236 * that have the ESB hcall flag set to 1 when returned from hcall() 1237 * H_INT_GET_SOURCE_INFO. 1238 * 1239 * Parameters: 1240 * Input: 1241 * - R4: "flags" 1242 * Bits 0-62: Reserved 1243 * bit 63: Store: Store=1, store operation, else load operation 1244 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1245 * "ibm,xive-lisn-ranges" properties, or as returned by the 1246 * ibm,query-interrupt-source-number RTAS call, or as 1247 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1248 * - R6: "esbOffset" is the offset into the ESB page for the load or 1249 * store operation 1250 * - R7: "storeData" is the data to write for a store operation 1251 * 1252 * Output: 1253 * - R4: The value of the load if load operation, else -1 1254 */ 1255 1256 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63) 1257 1258 static target_ulong h_int_esb(PowerPCCPU *cpu, 1259 sPAPRMachineState *spapr, 1260 target_ulong opcode, 1261 target_ulong *args) 1262 { 1263 sPAPRXive *xive = spapr->xive; 1264 XiveEAS eas; 1265 target_ulong flags = args[0]; 1266 target_ulong lisn = args[1]; 1267 target_ulong offset = args[2]; 1268 target_ulong data = args[3]; 1269 hwaddr mmio_addr; 1270 XiveSource *xsrc = &xive->source; 1271 1272 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1273 return H_FUNCTION; 1274 } 1275 1276 if (flags & ~SPAPR_XIVE_ESB_STORE) { 1277 return H_PARAMETER; 1278 } 1279 1280 if (lisn >= xive->nr_irqs) { 1281 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1282 lisn); 1283 return H_P2; 1284 } 1285 1286 eas = xive->eat[lisn]; 1287 if (!xive_eas_is_valid(&eas)) { 1288 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1289 lisn); 1290 return H_P2; 1291 } 1292 1293 if (offset > (1ull << xsrc->esb_shift)) { 1294 return H_P3; 1295 } 1296 1297 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; 1298 1299 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, 1300 (flags & SPAPR_XIVE_ESB_STORE))) { 1301 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" 1302 HWADDR_PRIx "\n", mmio_addr); 1303 return H_HARDWARE; 1304 } 1305 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; 1306 return H_SUCCESS; 1307 } 1308 1309 /* 1310 * The H_INT_SYNC hcall() is used to issue hardware syncs that will 1311 * ensure any in flight events for the input lisn are in the event 1312 * queue. 1313 * 1314 * Parameters: 1315 * Input: 1316 * - R4: "flags" 1317 * Bits 0-63: Reserved 1318 * - R5: "lisn" is per "interrupts", "interrupt-map", or 1319 * "ibm,xive-lisn-ranges" properties, or as returned by the 1320 * ibm,query-interrupt-source-number RTAS call, or as 1321 * returned by the H_ALLOCATE_VAS_WINDOW hcall 1322 * 1323 * Output: 1324 * - None 1325 */ 1326 static target_ulong h_int_sync(PowerPCCPU *cpu, 1327 sPAPRMachineState *spapr, 1328 target_ulong opcode, 1329 target_ulong *args) 1330 { 1331 sPAPRXive *xive = spapr->xive; 1332 XiveEAS eas; 1333 target_ulong flags = args[0]; 1334 target_ulong lisn = args[1]; 1335 1336 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1337 return H_FUNCTION; 1338 } 1339 1340 if (flags) { 1341 return H_PARAMETER; 1342 } 1343 1344 if (lisn >= xive->nr_irqs) { 1345 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", 1346 lisn); 1347 return H_P2; 1348 } 1349 1350 eas = xive->eat[lisn]; 1351 if (!xive_eas_is_valid(&eas)) { 1352 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", 1353 lisn); 1354 return H_P2; 1355 } 1356 1357 /* 1358 * H_STATE should be returned if a H_INT_RESET is in progress. 1359 * This is not needed when running the emulation under QEMU 1360 */ 1361 1362 /* This is not real hardware. Nothing to be done */ 1363 return H_SUCCESS; 1364 } 1365 1366 /* 1367 * The H_INT_RESET hcall() is used to reset all of the partition's 1368 * interrupt exploitation structures to their initial state. This 1369 * means losing all previously set interrupt state set via 1370 * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. 1371 * 1372 * Parameters: 1373 * Input: 1374 * - R4: "flags" 1375 * Bits 0-63: Reserved 1376 * 1377 * Output: 1378 * - None 1379 */ 1380 static target_ulong h_int_reset(PowerPCCPU *cpu, 1381 sPAPRMachineState *spapr, 1382 target_ulong opcode, 1383 target_ulong *args) 1384 { 1385 sPAPRXive *xive = spapr->xive; 1386 target_ulong flags = args[0]; 1387 1388 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 1389 return H_FUNCTION; 1390 } 1391 1392 if (flags) { 1393 return H_PARAMETER; 1394 } 1395 1396 device_reset(DEVICE(xive)); 1397 return H_SUCCESS; 1398 } 1399 1400 void spapr_xive_hcall_init(sPAPRMachineState *spapr) 1401 { 1402 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info); 1403 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config); 1404 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config); 1405 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info); 1406 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config); 1407 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config); 1408 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE, 1409 h_int_set_os_reporting_line); 1410 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE, 1411 h_int_get_os_reporting_line); 1412 spapr_register_hypercall(H_INT_ESB, h_int_esb); 1413 spapr_register_hypercall(H_INT_SYNC, h_int_sync); 1414 spapr_register_hypercall(H_INT_RESET, h_int_reset); 1415 } 1416 1417 void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, 1418 uint32_t phandle) 1419 { 1420 sPAPRXive *xive = spapr->xive; 1421 int node; 1422 uint64_t timas[2 * 2]; 1423 /* Interrupt number ranges for the IPIs */ 1424 uint32_t lisn_ranges[] = { 1425 cpu_to_be32(0), 1426 cpu_to_be32(nr_servers), 1427 }; 1428 /* 1429 * EQ size - the sizes of pages supported by the system 4K, 64K, 1430 * 2M, 16M. We only advertise 64K for the moment. 1431 */ 1432 uint32_t eq_sizes[] = { 1433 cpu_to_be32(16), /* 64K */ 1434 }; 1435 /* 1436 * The following array is in sync with the reserved priorities 1437 * defined by the 'spapr_xive_priority_is_reserved' routine. 1438 */ 1439 uint32_t plat_res_int_priorities[] = { 1440 cpu_to_be32(7), /* start */ 1441 cpu_to_be32(0xf8), /* count */ 1442 }; 1443 gchar *nodename; 1444 1445 /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */ 1446 timas[0] = cpu_to_be64(xive->tm_base + 1447 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT)); 1448 timas[1] = cpu_to_be64(1ull << TM_SHIFT); 1449 timas[2] = cpu_to_be64(xive->tm_base + 1450 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT)); 1451 timas[3] = cpu_to_be64(1ull << TM_SHIFT); 1452 1453 nodename = g_strdup_printf("interrupt-controller@%" PRIx64, 1454 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); 1455 _FDT(node = fdt_add_subnode(fdt, 0, nodename)); 1456 g_free(nodename); 1457 1458 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe")); 1459 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas))); 1460 1461 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe")); 1462 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes, 1463 sizeof(eq_sizes))); 1464 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges, 1465 sizeof(lisn_ranges))); 1466 1467 /* For Linux to link the LSIs to the interrupt controller. */ 1468 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); 1469 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); 1470 1471 /* For SLOF */ 1472 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); 1473 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); 1474 1475 /* 1476 * The "ibm,plat-res-int-priorities" property defines the priority 1477 * ranges reserved by the hypervisor 1478 */ 1479 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities", 1480 plat_res_int_priorities, sizeof(plat_res_int_priorities))); 1481 } 1482