1 /* 2 * QEMU PowerPC sPAPR IRQ interface 3 * 4 * Copyright (c) 2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/error-report.h" 13 #include "qapi/error.h" 14 #include "hw/irq.h" 15 #include "hw/ppc/spapr.h" 16 #include "hw/ppc/spapr_cpu_core.h" 17 #include "hw/ppc/spapr_xive.h" 18 #include "hw/ppc/xics.h" 19 #include "hw/ppc/xics_spapr.h" 20 #include "hw/qdev-properties.h" 21 #include "cpu-models.h" 22 #include "sysemu/kvm.h" 23 24 #include "trace.h" 25 26 static const TypeInfo spapr_intc_info = { 27 .name = TYPE_SPAPR_INTC, 28 .parent = TYPE_INTERFACE, 29 .class_size = sizeof(SpaprInterruptControllerClass), 30 }; 31 32 void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis) 33 { 34 spapr->irq_map_nr = nr_msis; 35 spapr->irq_map = bitmap_new(spapr->irq_map_nr); 36 } 37 38 int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align, 39 Error **errp) 40 { 41 int irq; 42 43 /* 44 * The 'align_mask' parameter of bitmap_find_next_zero_area() 45 * should be one less than a power of 2; 0 means no 46 * alignment. Adapt the 'align' value of the former allocator 47 * to fit the requirements of bitmap_find_next_zero_area() 48 */ 49 align -= 1; 50 51 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num, 52 align); 53 if (irq == spapr->irq_map_nr) { 54 error_setg(errp, "can't find a free %d-IRQ block", num); 55 return -1; 56 } 57 58 bitmap_set(spapr->irq_map, irq, num); 59 60 return irq + SPAPR_IRQ_MSI; 61 } 62 63 void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num) 64 { 65 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num); 66 } 67 68 int spapr_irq_init_kvm(int (*fn)(SpaprInterruptController *, Error **), 69 SpaprInterruptController *intc, 70 Error **errp) 71 { 72 MachineState *machine = MACHINE(qdev_get_machine()); 73 Error *local_err = NULL; 74 75 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) { 76 if (fn(intc, &local_err) < 0) { 77 if (machine_kernel_irqchip_required(machine)) { 78 error_prepend(&local_err, 79 "kernel_irqchip requested but unavailable: "); 80 error_propagate(errp, local_err); 81 return -1; 82 } 83 84 /* 85 * We failed to initialize the KVM device, fallback to 86 * emulated mode 87 */ 88 error_prepend(&local_err, 89 "kernel_irqchip allowed but unavailable: "); 90 error_append_hint(&local_err, 91 "Falling back to kernel-irqchip=off\n"); 92 warn_report_err(local_err); 93 } 94 } 95 96 return 0; 97 } 98 99 /* 100 * XICS IRQ backend. 101 */ 102 103 static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id) 104 { 105 if (!kvm_irqchip_in_kernel()) { 106 CPUState *cs; 107 CPU_FOREACH(cs) { 108 PowerPCCPU *cpu = POWERPC_CPU(cs); 109 icp_resend(spapr_cpu_state(cpu)->icp); 110 } 111 } 112 return 0; 113 } 114 115 SpaprIrq spapr_irq_xics = { 116 .nr_xirqs = SPAPR_NR_XIRQS, 117 .nr_msis = SPAPR_NR_MSIS, 118 .xics = true, 119 .xive = false, 120 121 .post_load = spapr_irq_post_load_xics, 122 }; 123 124 /* 125 * XIVE IRQ backend. 126 */ 127 128 static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id) 129 { 130 return spapr_xive_post_load(spapr->xive, version_id); 131 } 132 133 SpaprIrq spapr_irq_xive = { 134 .nr_xirqs = SPAPR_NR_XIRQS, 135 .nr_msis = SPAPR_NR_MSIS, 136 .xics = false, 137 .xive = true, 138 139 .post_load = spapr_irq_post_load_xive, 140 }; 141 142 /* 143 * Dual XIVE and XICS IRQ backend. 144 * 145 * Both interrupt mode, XIVE and XICS, objects are created but the 146 * machine starts in legacy interrupt mode (XICS). It can be changed 147 * by the CAS negotiation process and, in that case, the new mode is 148 * activated after an extra machine reset. 149 */ 150 151 /* 152 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the 153 * default. 154 */ 155 static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr) 156 { 157 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ? 158 &spapr_irq_xive : &spapr_irq_xics; 159 } 160 161 static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id) 162 { 163 return spapr_irq_current(spapr)->post_load(spapr, version_id); 164 } 165 166 /* 167 * Define values in sync with the XIVE and XICS backend 168 */ 169 SpaprIrq spapr_irq_dual = { 170 .nr_xirqs = SPAPR_NR_XIRQS, 171 .nr_msis = SPAPR_NR_MSIS, 172 .xics = true, 173 .xive = true, 174 175 .post_load = spapr_irq_post_load_dual, 176 }; 177 178 179 static int spapr_irq_check(SpaprMachineState *spapr, Error **errp) 180 { 181 MachineState *machine = MACHINE(spapr); 182 183 /* 184 * Sanity checks on non-P9 machines. On these, XIVE is not 185 * advertised, see spapr_dt_ov5_platform_support() 186 */ 187 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 188 0, spapr->max_compat_pvr)) { 189 /* 190 * If the 'dual' interrupt mode is selected, force XICS as CAS 191 * negotiation is useless. 192 */ 193 if (spapr->irq == &spapr_irq_dual) { 194 spapr->irq = &spapr_irq_xics; 195 return 0; 196 } 197 198 /* 199 * Non-P9 machines using only XIVE is a bogus setup. We have two 200 * scenarios to take into account because of the compat mode: 201 * 202 * 1. POWER7/8 machines should fail to init later on when creating 203 * the XIVE interrupt presenters because a POWER9 exception 204 * model is required. 205 206 * 2. POWER9 machines using the POWER8 compat mode won't fail and 207 * will let the OS boot with a partial XIVE setup : DT 208 * properties but no hcalls. 209 * 210 * To cover both and not confuse the OS, add an early failure in 211 * QEMU. 212 */ 213 if (spapr->irq == &spapr_irq_xive) { 214 error_setg(errp, "XIVE-only machines require a POWER9 CPU"); 215 return -1; 216 } 217 } 218 219 /* 220 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and 221 * re-created. Detect that early to avoid QEMU to exit later when the 222 * guest reboots. 223 */ 224 if (kvm_enabled() && 225 spapr->irq == &spapr_irq_dual && 226 machine_kernel_irqchip_required(machine) && 227 xics_kvm_has_broken_disconnect(spapr)) { 228 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on"); 229 return -1; 230 } 231 232 return 0; 233 } 234 235 /* 236 * sPAPR IRQ frontend routines for devices 237 */ 238 #define ALL_INTCS(spapr_) \ 239 { SPAPR_INTC((spapr_)->ics), SPAPR_INTC((spapr_)->xive), } 240 241 int spapr_irq_cpu_intc_create(SpaprMachineState *spapr, 242 PowerPCCPU *cpu, Error **errp) 243 { 244 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 245 int i; 246 int rc; 247 248 for (i = 0; i < ARRAY_SIZE(intcs); i++) { 249 SpaprInterruptController *intc = intcs[i]; 250 if (intc) { 251 SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); 252 rc = sicc->cpu_intc_create(intc, cpu, errp); 253 if (rc < 0) { 254 return rc; 255 } 256 } 257 } 258 259 return 0; 260 } 261 262 static void spapr_set_irq(void *opaque, int irq, int level) 263 { 264 SpaprMachineState *spapr = SPAPR_MACHINE(opaque); 265 SpaprInterruptControllerClass *sicc 266 = SPAPR_INTC_GET_CLASS(spapr->active_intc); 267 268 sicc->set_irq(spapr->active_intc, irq, level); 269 } 270 271 void spapr_irq_print_info(SpaprMachineState *spapr, Monitor *mon) 272 { 273 SpaprInterruptControllerClass *sicc 274 = SPAPR_INTC_GET_CLASS(spapr->active_intc); 275 276 sicc->print_info(spapr->active_intc, mon); 277 } 278 279 void spapr_irq_dt(SpaprMachineState *spapr, uint32_t nr_servers, 280 void *fdt, uint32_t phandle) 281 { 282 SpaprInterruptControllerClass *sicc 283 = SPAPR_INTC_GET_CLASS(spapr->active_intc); 284 285 sicc->dt(spapr->active_intc, nr_servers, fdt, phandle); 286 } 287 288 void spapr_irq_init(SpaprMachineState *spapr, Error **errp) 289 { 290 MachineState *machine = MACHINE(spapr); 291 292 if (machine_kernel_irqchip_split(machine)) { 293 error_setg(errp, "kernel_irqchip split mode not supported on pseries"); 294 return; 295 } 296 297 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) { 298 error_setg(errp, 299 "kernel_irqchip requested but only available with KVM"); 300 return; 301 } 302 303 if (spapr_irq_check(spapr, errp) < 0) { 304 return; 305 } 306 307 /* Initialize the MSI IRQ allocator. */ 308 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 309 spapr_irq_msi_init(spapr, spapr->irq->nr_msis); 310 } 311 312 if (spapr->irq->xics) { 313 Error *local_err = NULL; 314 Object *obj; 315 316 obj = object_new(TYPE_ICS_SPAPR); 317 object_property_add_child(OBJECT(spapr), "ics", obj, &local_err); 318 if (local_err) { 319 error_propagate(errp, local_err); 320 return; 321 } 322 323 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), 324 &local_err); 325 if (local_err) { 326 error_propagate(errp, local_err); 327 return; 328 } 329 330 object_property_set_int(obj, spapr->irq->nr_xirqs, "nr-irqs", 331 &local_err); 332 if (local_err) { 333 error_propagate(errp, local_err); 334 return; 335 } 336 337 object_property_set_bool(obj, true, "realized", &local_err); 338 if (local_err) { 339 error_propagate(errp, local_err); 340 return; 341 } 342 343 spapr->ics = ICS_SPAPR(obj); 344 } 345 346 if (spapr->irq->xive) { 347 uint32_t nr_servers = spapr_max_server_number(spapr); 348 DeviceState *dev; 349 int i; 350 351 dev = qdev_create(NULL, TYPE_SPAPR_XIVE); 352 qdev_prop_set_uint32(dev, "nr-irqs", 353 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 354 /* 355 * 8 XIVE END structures per CPU. One for each available 356 * priority 357 */ 358 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3); 359 qdev_init_nofail(dev); 360 361 spapr->xive = SPAPR_XIVE(dev); 362 363 /* Enable the CPU IPIs */ 364 for (i = 0; i < nr_servers; ++i) { 365 SpaprInterruptControllerClass *sicc 366 = SPAPR_INTC_GET_CLASS(spapr->xive); 367 368 if (sicc->claim_irq(SPAPR_INTC(spapr->xive), SPAPR_IRQ_IPI + i, 369 false, errp) < 0) { 370 return; 371 } 372 } 373 374 spapr_xive_hcall_init(spapr); 375 } 376 377 spapr->qirqs = qemu_allocate_irqs(spapr_set_irq, spapr, 378 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 379 } 380 381 int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp) 382 { 383 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 384 int i; 385 int rc; 386 387 assert(irq >= SPAPR_XIRQ_BASE); 388 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 389 390 for (i = 0; i < ARRAY_SIZE(intcs); i++) { 391 SpaprInterruptController *intc = intcs[i]; 392 if (intc) { 393 SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); 394 rc = sicc->claim_irq(intc, irq, lsi, errp); 395 if (rc < 0) { 396 return rc; 397 } 398 } 399 } 400 401 return 0; 402 } 403 404 void spapr_irq_free(SpaprMachineState *spapr, int irq, int num) 405 { 406 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 407 int i, j; 408 409 assert(irq >= SPAPR_XIRQ_BASE); 410 assert((irq + num) <= (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 411 412 for (i = irq; i < (irq + num); i++) { 413 for (j = 0; j < ARRAY_SIZE(intcs); j++) { 414 SpaprInterruptController *intc = intcs[j]; 415 416 if (intc) { 417 SpaprInterruptControllerClass *sicc 418 = SPAPR_INTC_GET_CLASS(intc); 419 sicc->free_irq(intc, i); 420 } 421 } 422 } 423 } 424 425 qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq) 426 { 427 /* 428 * This interface is basically for VIO and PHB devices to find the 429 * right qemu_irq to manipulate, so we only allow access to the 430 * external irqs for now. Currently anything which needs to 431 * access the IPIs most naturally gets there via the guest side 432 * interfaces, we can change this if we need to in future. 433 */ 434 assert(irq >= SPAPR_XIRQ_BASE); 435 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 436 437 if (spapr->ics) { 438 assert(ics_valid_irq(spapr->ics, irq)); 439 } 440 if (spapr->xive) { 441 assert(irq < spapr->xive->nr_irqs); 442 assert(xive_eas_is_valid(&spapr->xive->eat[irq])); 443 } 444 445 return spapr->qirqs[irq]; 446 } 447 448 int spapr_irq_post_load(SpaprMachineState *spapr, int version_id) 449 { 450 spapr_irq_update_active_intc(spapr); 451 return spapr->irq->post_load(spapr, version_id); 452 } 453 454 void spapr_irq_reset(SpaprMachineState *spapr, Error **errp) 455 { 456 assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr)); 457 458 spapr_irq_update_active_intc(spapr); 459 } 460 461 int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp) 462 { 463 const char *nodename = "interrupt-controller"; 464 int offset, phandle; 465 466 offset = fdt_subnode_offset(fdt, 0, nodename); 467 if (offset < 0) { 468 error_setg(errp, "Can't find node \"%s\": %s", 469 nodename, fdt_strerror(offset)); 470 return -1; 471 } 472 473 phandle = fdt_get_phandle(fdt, offset); 474 if (!phandle) { 475 error_setg(errp, "Can't get phandle of node \"%s\"", nodename); 476 return -1; 477 } 478 479 return phandle; 480 } 481 482 static void set_active_intc(SpaprMachineState *spapr, 483 SpaprInterruptController *new_intc) 484 { 485 SpaprInterruptControllerClass *sicc; 486 487 assert(new_intc); 488 489 if (new_intc == spapr->active_intc) { 490 /* Nothing to do */ 491 return; 492 } 493 494 if (spapr->active_intc) { 495 sicc = SPAPR_INTC_GET_CLASS(spapr->active_intc); 496 if (sicc->deactivate) { 497 sicc->deactivate(spapr->active_intc); 498 } 499 } 500 501 sicc = SPAPR_INTC_GET_CLASS(new_intc); 502 if (sicc->activate) { 503 sicc->activate(new_intc, &error_fatal); 504 } 505 506 spapr->active_intc = new_intc; 507 } 508 509 void spapr_irq_update_active_intc(SpaprMachineState *spapr) 510 { 511 SpaprInterruptController *new_intc; 512 513 if (!spapr->ics) { 514 /* 515 * XXX before we run CAS, ov5_cas is initialized empty, which 516 * indicates XICS, even if we have ic-mode=xive. TODO: clean 517 * up the CAS path so that we have a clearer way of handling 518 * this. 519 */ 520 new_intc = SPAPR_INTC(spapr->xive); 521 } else if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 522 new_intc = SPAPR_INTC(spapr->xive); 523 } else { 524 new_intc = SPAPR_INTC(spapr->ics); 525 } 526 527 set_active_intc(spapr, new_intc); 528 } 529 530 /* 531 * XICS legacy routines - to deprecate one day 532 */ 533 534 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 535 { 536 int first, i; 537 538 for (first = 0; first < ics->nr_irqs; first += alignnum) { 539 if (num > (ics->nr_irqs - first)) { 540 return -1; 541 } 542 for (i = first; i < first + num; ++i) { 543 if (!ics_irq_free(ics, i)) { 544 break; 545 } 546 } 547 if (i == (first + num)) { 548 return first; 549 } 550 } 551 552 return -1; 553 } 554 555 int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp) 556 { 557 ICSState *ics = spapr->ics; 558 int first = -1; 559 560 assert(ics); 561 562 /* 563 * MSIMesage::data is used for storing VIRQ so 564 * it has to be aligned to num to support multiple 565 * MSI vectors. MSI-X is not affected by this. 566 * The hint is used for the first IRQ, the rest should 567 * be allocated continuously. 568 */ 569 if (align) { 570 assert((num == 1) || (num == 2) || (num == 4) || 571 (num == 8) || (num == 16) || (num == 32)); 572 first = ics_find_free_block(ics, num, num); 573 } else { 574 first = ics_find_free_block(ics, num, 1); 575 } 576 577 if (first < 0) { 578 error_setg(errp, "can't find a free %d-IRQ block", num); 579 return -1; 580 } 581 582 return first + ics->offset; 583 } 584 585 #define SPAPR_IRQ_XICS_LEGACY_NR_XIRQS 0x400 586 587 SpaprIrq spapr_irq_xics_legacy = { 588 .nr_xirqs = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 589 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 590 .xics = true, 591 .xive = false, 592 593 .post_load = spapr_irq_post_load_xics, 594 }; 595 596 static void spapr_irq_register_types(void) 597 { 598 type_register_static(&spapr_intc_info); 599 } 600 601 type_init(spapr_irq_register_types) 602