1 /* 2 * QEMU PowerPC sPAPR IRQ interface 3 * 4 * Copyright (c) 2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/error-report.h" 13 #include "qapi/error.h" 14 #include "hw/irq.h" 15 #include "hw/ppc/spapr.h" 16 #include "hw/ppc/spapr_cpu_core.h" 17 #include "hw/ppc/spapr_xive.h" 18 #include "hw/ppc/xics.h" 19 #include "hw/ppc/xics_spapr.h" 20 #include "hw/qdev-properties.h" 21 #include "cpu-models.h" 22 #include "sysemu/kvm.h" 23 24 #include "trace.h" 25 26 static const TypeInfo spapr_intc_info = { 27 .name = TYPE_SPAPR_INTC, 28 .parent = TYPE_INTERFACE, 29 .class_size = sizeof(SpaprInterruptControllerClass), 30 }; 31 32 void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis) 33 { 34 spapr->irq_map_nr = nr_msis; 35 spapr->irq_map = bitmap_new(spapr->irq_map_nr); 36 } 37 38 int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align, 39 Error **errp) 40 { 41 int irq; 42 43 /* 44 * The 'align_mask' parameter of bitmap_find_next_zero_area() 45 * should be one less than a power of 2; 0 means no 46 * alignment. Adapt the 'align' value of the former allocator 47 * to fit the requirements of bitmap_find_next_zero_area() 48 */ 49 align -= 1; 50 51 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num, 52 align); 53 if (irq == spapr->irq_map_nr) { 54 error_setg(errp, "can't find a free %d-IRQ block", num); 55 return -1; 56 } 57 58 bitmap_set(spapr->irq_map, irq, num); 59 60 return irq + SPAPR_IRQ_MSI; 61 } 62 63 void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num) 64 { 65 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num); 66 } 67 68 static void spapr_irq_init_kvm(SpaprMachineState *spapr, 69 SpaprIrq *irq, Error **errp) 70 { 71 MachineState *machine = MACHINE(spapr); 72 Error *local_err = NULL; 73 74 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) { 75 irq->init_kvm(spapr, &local_err); 76 if (local_err && machine_kernel_irqchip_required(machine)) { 77 error_prepend(&local_err, 78 "kernel_irqchip requested but unavailable: "); 79 error_propagate(errp, local_err); 80 return; 81 } 82 83 if (!local_err) { 84 return; 85 } 86 87 /* 88 * We failed to initialize the KVM device, fallback to 89 * emulated mode 90 */ 91 error_prepend(&local_err, "kernel_irqchip allowed but unavailable: "); 92 error_append_hint(&local_err, "Falling back to kernel-irqchip=off\n"); 93 warn_report_err(local_err); 94 } 95 } 96 97 /* 98 * XICS IRQ backend. 99 */ 100 101 static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi, 102 Error **errp) 103 { 104 ICSState *ics = spapr->ics; 105 106 assert(ics); 107 assert(ics_valid_irq(ics, irq)); 108 109 if (!ics_irq_free(ics, irq - ics->offset)) { 110 error_setg(errp, "IRQ %d is not free", irq); 111 return -1; 112 } 113 114 ics_set_irq_type(ics, irq - ics->offset, lsi); 115 return 0; 116 } 117 118 static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq) 119 { 120 ICSState *ics = spapr->ics; 121 uint32_t srcno = irq - ics->offset; 122 123 assert(ics_valid_irq(ics, irq)); 124 125 memset(&ics->irqs[srcno], 0, sizeof(ICSIRQState)); 126 } 127 128 static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon) 129 { 130 CPUState *cs; 131 132 CPU_FOREACH(cs) { 133 PowerPCCPU *cpu = POWERPC_CPU(cs); 134 135 icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon); 136 } 137 138 ics_pic_print_info(spapr->ics, mon); 139 } 140 141 static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr, 142 PowerPCCPU *cpu, Error **errp) 143 { 144 Error *local_err = NULL; 145 Object *obj; 146 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 147 148 obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr), 149 &local_err); 150 if (local_err) { 151 error_propagate(errp, local_err); 152 return; 153 } 154 155 spapr_cpu->icp = ICP(obj); 156 } 157 158 static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id) 159 { 160 if (!kvm_irqchip_in_kernel()) { 161 CPUState *cs; 162 CPU_FOREACH(cs) { 163 PowerPCCPU *cpu = POWERPC_CPU(cs); 164 icp_resend(spapr_cpu_state(cpu)->icp); 165 } 166 } 167 return 0; 168 } 169 170 static void spapr_irq_set_irq_xics(void *opaque, int irq, int val) 171 { 172 SpaprMachineState *spapr = opaque; 173 uint32_t srcno = irq - spapr->ics->offset; 174 175 ics_set_irq(spapr->ics, srcno, val); 176 } 177 178 static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp) 179 { 180 Error *local_err = NULL; 181 182 spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err); 183 if (local_err) { 184 error_propagate(errp, local_err); 185 return; 186 } 187 } 188 189 static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp) 190 { 191 if (kvm_enabled()) { 192 xics_kvm_connect(spapr, errp); 193 } 194 } 195 196 SpaprIrq spapr_irq_xics = { 197 .nr_xirqs = SPAPR_NR_XIRQS, 198 .nr_msis = SPAPR_NR_MSIS, 199 .xics = true, 200 .xive = false, 201 202 .claim = spapr_irq_claim_xics, 203 .free = spapr_irq_free_xics, 204 .print_info = spapr_irq_print_info_xics, 205 .dt_populate = spapr_dt_xics, 206 .cpu_intc_create = spapr_irq_cpu_intc_create_xics, 207 .post_load = spapr_irq_post_load_xics, 208 .reset = spapr_irq_reset_xics, 209 .set_irq = spapr_irq_set_irq_xics, 210 .init_kvm = spapr_irq_init_kvm_xics, 211 }; 212 213 /* 214 * XIVE IRQ backend. 215 */ 216 217 static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi, 218 Error **errp) 219 { 220 return spapr_xive_irq_claim(spapr->xive, irq, lsi, errp); 221 } 222 223 static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq) 224 { 225 spapr_xive_irq_free(spapr->xive, irq); 226 } 227 228 static void spapr_irq_print_info_xive(SpaprMachineState *spapr, 229 Monitor *mon) 230 { 231 CPUState *cs; 232 233 CPU_FOREACH(cs) { 234 PowerPCCPU *cpu = POWERPC_CPU(cs); 235 236 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon); 237 } 238 239 spapr_xive_pic_print_info(spapr->xive, mon); 240 } 241 242 static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr, 243 PowerPCCPU *cpu, Error **errp) 244 { 245 Error *local_err = NULL; 246 Object *obj; 247 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 248 249 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err); 250 if (local_err) { 251 error_propagate(errp, local_err); 252 return; 253 } 254 255 spapr_cpu->tctx = XIVE_TCTX(obj); 256 257 /* 258 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they 259 * don't beneficiate from the reset of the XIVE IRQ backend 260 */ 261 spapr_xive_set_tctx_os_cam(spapr_cpu->tctx); 262 } 263 264 static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id) 265 { 266 return spapr_xive_post_load(spapr->xive, version_id); 267 } 268 269 static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp) 270 { 271 CPUState *cs; 272 Error *local_err = NULL; 273 274 CPU_FOREACH(cs) { 275 PowerPCCPU *cpu = POWERPC_CPU(cs); 276 277 /* (TCG) Set the OS CAM line of the thread interrupt context. */ 278 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx); 279 } 280 281 spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err); 282 if (local_err) { 283 error_propagate(errp, local_err); 284 return; 285 } 286 287 /* Activate the XIVE MMIOs */ 288 spapr_xive_mmio_set_enabled(spapr->xive, true); 289 } 290 291 static void spapr_irq_set_irq_xive(void *opaque, int irq, int val) 292 { 293 SpaprMachineState *spapr = opaque; 294 295 if (kvm_irqchip_in_kernel()) { 296 kvmppc_xive_source_set_irq(&spapr->xive->source, irq, val); 297 } else { 298 xive_source_set_irq(&spapr->xive->source, irq, val); 299 } 300 } 301 302 static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp) 303 { 304 if (kvm_enabled()) { 305 kvmppc_xive_connect(spapr->xive, errp); 306 } 307 } 308 309 SpaprIrq spapr_irq_xive = { 310 .nr_xirqs = SPAPR_NR_XIRQS, 311 .nr_msis = SPAPR_NR_MSIS, 312 .xics = false, 313 .xive = true, 314 315 .claim = spapr_irq_claim_xive, 316 .free = spapr_irq_free_xive, 317 .print_info = spapr_irq_print_info_xive, 318 .dt_populate = spapr_dt_xive, 319 .cpu_intc_create = spapr_irq_cpu_intc_create_xive, 320 .post_load = spapr_irq_post_load_xive, 321 .reset = spapr_irq_reset_xive, 322 .set_irq = spapr_irq_set_irq_xive, 323 .init_kvm = spapr_irq_init_kvm_xive, 324 }; 325 326 /* 327 * Dual XIVE and XICS IRQ backend. 328 * 329 * Both interrupt mode, XIVE and XICS, objects are created but the 330 * machine starts in legacy interrupt mode (XICS). It can be changed 331 * by the CAS negotiation process and, in that case, the new mode is 332 * activated after an extra machine reset. 333 */ 334 335 /* 336 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the 337 * default. 338 */ 339 static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr) 340 { 341 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ? 342 &spapr_irq_xive : &spapr_irq_xics; 343 } 344 345 static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi, 346 Error **errp) 347 { 348 Error *local_err = NULL; 349 int ret; 350 351 ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err); 352 if (local_err) { 353 error_propagate(errp, local_err); 354 return ret; 355 } 356 357 ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err); 358 if (local_err) { 359 error_propagate(errp, local_err); 360 return ret; 361 } 362 363 return ret; 364 } 365 366 static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq) 367 { 368 spapr_irq_xics.free(spapr, irq); 369 spapr_irq_xive.free(spapr, irq); 370 } 371 372 static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon) 373 { 374 spapr_irq_current(spapr)->print_info(spapr, mon); 375 } 376 377 static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr, 378 uint32_t nr_servers, void *fdt, 379 uint32_t phandle) 380 { 381 spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle); 382 } 383 384 static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr, 385 PowerPCCPU *cpu, Error **errp) 386 { 387 Error *local_err = NULL; 388 389 spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err); 390 if (local_err) { 391 error_propagate(errp, local_err); 392 return; 393 } 394 395 spapr_irq_xics.cpu_intc_create(spapr, cpu, errp); 396 } 397 398 static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id) 399 { 400 /* 401 * Force a reset of the XIVE backend after migration. The machine 402 * defaults to XICS at startup. 403 */ 404 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 405 if (kvm_irqchip_in_kernel()) { 406 xics_kvm_disconnect(spapr, &error_fatal); 407 } 408 spapr_irq_xive.reset(spapr, &error_fatal); 409 } 410 411 return spapr_irq_current(spapr)->post_load(spapr, version_id); 412 } 413 414 static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp) 415 { 416 Error *local_err = NULL; 417 418 /* 419 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them 420 * if selected. 421 */ 422 spapr_xive_mmio_set_enabled(spapr->xive, false); 423 424 /* Destroy all KVM devices */ 425 if (kvm_irqchip_in_kernel()) { 426 xics_kvm_disconnect(spapr, &local_err); 427 if (local_err) { 428 error_propagate(errp, local_err); 429 error_prepend(errp, "KVM XICS disconnect failed: "); 430 return; 431 } 432 kvmppc_xive_disconnect(spapr->xive, &local_err); 433 if (local_err) { 434 error_propagate(errp, local_err); 435 error_prepend(errp, "KVM XIVE disconnect failed: "); 436 return; 437 } 438 } 439 440 spapr_irq_current(spapr)->reset(spapr, errp); 441 } 442 443 static void spapr_irq_set_irq_dual(void *opaque, int irq, int val) 444 { 445 SpaprMachineState *spapr = opaque; 446 447 spapr_irq_current(spapr)->set_irq(spapr, irq, val); 448 } 449 450 /* 451 * Define values in sync with the XIVE and XICS backend 452 */ 453 SpaprIrq spapr_irq_dual = { 454 .nr_xirqs = SPAPR_NR_XIRQS, 455 .nr_msis = SPAPR_NR_MSIS, 456 .xics = true, 457 .xive = true, 458 459 .claim = spapr_irq_claim_dual, 460 .free = spapr_irq_free_dual, 461 .print_info = spapr_irq_print_info_dual, 462 .dt_populate = spapr_irq_dt_populate_dual, 463 .cpu_intc_create = spapr_irq_cpu_intc_create_dual, 464 .post_load = spapr_irq_post_load_dual, 465 .reset = spapr_irq_reset_dual, 466 .set_irq = spapr_irq_set_irq_dual, 467 .init_kvm = NULL, /* should not be used */ 468 }; 469 470 471 static int spapr_irq_check(SpaprMachineState *spapr, Error **errp) 472 { 473 MachineState *machine = MACHINE(spapr); 474 475 /* 476 * Sanity checks on non-P9 machines. On these, XIVE is not 477 * advertised, see spapr_dt_ov5_platform_support() 478 */ 479 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 480 0, spapr->max_compat_pvr)) { 481 /* 482 * If the 'dual' interrupt mode is selected, force XICS as CAS 483 * negotiation is useless. 484 */ 485 if (spapr->irq == &spapr_irq_dual) { 486 spapr->irq = &spapr_irq_xics; 487 return 0; 488 } 489 490 /* 491 * Non-P9 machines using only XIVE is a bogus setup. We have two 492 * scenarios to take into account because of the compat mode: 493 * 494 * 1. POWER7/8 machines should fail to init later on when creating 495 * the XIVE interrupt presenters because a POWER9 exception 496 * model is required. 497 498 * 2. POWER9 machines using the POWER8 compat mode won't fail and 499 * will let the OS boot with a partial XIVE setup : DT 500 * properties but no hcalls. 501 * 502 * To cover both and not confuse the OS, add an early failure in 503 * QEMU. 504 */ 505 if (spapr->irq == &spapr_irq_xive) { 506 error_setg(errp, "XIVE-only machines require a POWER9 CPU"); 507 return -1; 508 } 509 } 510 511 /* 512 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and 513 * re-created. Detect that early to avoid QEMU to exit later when the 514 * guest reboots. 515 */ 516 if (kvm_enabled() && 517 spapr->irq == &spapr_irq_dual && 518 machine_kernel_irqchip_required(machine) && 519 xics_kvm_has_broken_disconnect(spapr)) { 520 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on"); 521 return -1; 522 } 523 524 return 0; 525 } 526 527 /* 528 * sPAPR IRQ frontend routines for devices 529 */ 530 void spapr_irq_init(SpaprMachineState *spapr, Error **errp) 531 { 532 MachineState *machine = MACHINE(spapr); 533 534 if (machine_kernel_irqchip_split(machine)) { 535 error_setg(errp, "kernel_irqchip split mode not supported on pseries"); 536 return; 537 } 538 539 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) { 540 error_setg(errp, 541 "kernel_irqchip requested but only available with KVM"); 542 return; 543 } 544 545 if (spapr_irq_check(spapr, errp) < 0) { 546 return; 547 } 548 549 /* Initialize the MSI IRQ allocator. */ 550 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 551 spapr_irq_msi_init(spapr, spapr->irq->nr_msis); 552 } 553 554 if (spapr->irq->xics) { 555 Error *local_err = NULL; 556 Object *obj; 557 558 obj = object_new(TYPE_ICS_SPAPR); 559 object_property_add_child(OBJECT(spapr), "ics", obj, &local_err); 560 if (local_err) { 561 error_propagate(errp, local_err); 562 return; 563 } 564 565 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), 566 &local_err); 567 if (local_err) { 568 error_propagate(errp, local_err); 569 return; 570 } 571 572 object_property_set_int(obj, spapr->irq->nr_xirqs, "nr-irqs", 573 &local_err); 574 if (local_err) { 575 error_propagate(errp, local_err); 576 return; 577 } 578 579 object_property_set_bool(obj, true, "realized", &local_err); 580 if (local_err) { 581 error_propagate(errp, local_err); 582 return; 583 } 584 585 spapr->ics = ICS_SPAPR(obj); 586 } 587 588 if (spapr->irq->xive) { 589 uint32_t nr_servers = spapr_max_server_number(spapr); 590 DeviceState *dev; 591 int i; 592 593 dev = qdev_create(NULL, TYPE_SPAPR_XIVE); 594 qdev_prop_set_uint32(dev, "nr-irqs", 595 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 596 /* 597 * 8 XIVE END structures per CPU. One for each available 598 * priority 599 */ 600 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3); 601 qdev_init_nofail(dev); 602 603 spapr->xive = SPAPR_XIVE(dev); 604 605 /* Enable the CPU IPIs */ 606 for (i = 0; i < nr_servers; ++i) { 607 if (spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, 608 false, errp) < 0) { 609 return; 610 } 611 } 612 613 spapr_xive_hcall_init(spapr); 614 } 615 616 spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr, 617 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 618 } 619 620 int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp) 621 { 622 assert(irq >= SPAPR_XIRQ_BASE); 623 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 624 625 return spapr->irq->claim(spapr, irq, lsi, errp); 626 } 627 628 void spapr_irq_free(SpaprMachineState *spapr, int irq, int num) 629 { 630 int i; 631 632 assert(irq >= SPAPR_XIRQ_BASE); 633 assert((irq + num) <= (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 634 635 for (i = irq; i < (irq + num); i++) { 636 spapr->irq->free(spapr, i); 637 } 638 } 639 640 qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq) 641 { 642 /* 643 * This interface is basically for VIO and PHB devices to find the 644 * right qemu_irq to manipulate, so we only allow access to the 645 * external irqs for now. Currently anything which needs to 646 * access the IPIs most naturally gets there via the guest side 647 * interfaces, we can change this if we need to in future. 648 */ 649 assert(irq >= SPAPR_XIRQ_BASE); 650 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 651 652 if (spapr->ics) { 653 assert(ics_valid_irq(spapr->ics, irq)); 654 } 655 if (spapr->xive) { 656 assert(irq < spapr->xive->nr_irqs); 657 assert(xive_eas_is_valid(&spapr->xive->eat[irq])); 658 } 659 660 return spapr->qirqs[irq]; 661 } 662 663 int spapr_irq_post_load(SpaprMachineState *spapr, int version_id) 664 { 665 return spapr->irq->post_load(spapr, version_id); 666 } 667 668 void spapr_irq_reset(SpaprMachineState *spapr, Error **errp) 669 { 670 assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr)); 671 672 if (spapr->irq->reset) { 673 spapr->irq->reset(spapr, errp); 674 } 675 } 676 677 int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp) 678 { 679 const char *nodename = "interrupt-controller"; 680 int offset, phandle; 681 682 offset = fdt_subnode_offset(fdt, 0, nodename); 683 if (offset < 0) { 684 error_setg(errp, "Can't find node \"%s\": %s", 685 nodename, fdt_strerror(offset)); 686 return -1; 687 } 688 689 phandle = fdt_get_phandle(fdt, offset); 690 if (!phandle) { 691 error_setg(errp, "Can't get phandle of node \"%s\"", nodename); 692 return -1; 693 } 694 695 return phandle; 696 } 697 698 /* 699 * XICS legacy routines - to deprecate one day 700 */ 701 702 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 703 { 704 int first, i; 705 706 for (first = 0; first < ics->nr_irqs; first += alignnum) { 707 if (num > (ics->nr_irqs - first)) { 708 return -1; 709 } 710 for (i = first; i < first + num; ++i) { 711 if (!ics_irq_free(ics, i)) { 712 break; 713 } 714 } 715 if (i == (first + num)) { 716 return first; 717 } 718 } 719 720 return -1; 721 } 722 723 int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp) 724 { 725 ICSState *ics = spapr->ics; 726 int first = -1; 727 728 assert(ics); 729 730 /* 731 * MSIMesage::data is used for storing VIRQ so 732 * it has to be aligned to num to support multiple 733 * MSI vectors. MSI-X is not affected by this. 734 * The hint is used for the first IRQ, the rest should 735 * be allocated continuously. 736 */ 737 if (align) { 738 assert((num == 1) || (num == 2) || (num == 4) || 739 (num == 8) || (num == 16) || (num == 32)); 740 first = ics_find_free_block(ics, num, num); 741 } else { 742 first = ics_find_free_block(ics, num, 1); 743 } 744 745 if (first < 0) { 746 error_setg(errp, "can't find a free %d-IRQ block", num); 747 return -1; 748 } 749 750 return first + ics->offset; 751 } 752 753 #define SPAPR_IRQ_XICS_LEGACY_NR_XIRQS 0x400 754 755 SpaprIrq spapr_irq_xics_legacy = { 756 .nr_xirqs = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 757 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 758 .xics = true, 759 .xive = false, 760 761 .claim = spapr_irq_claim_xics, 762 .free = spapr_irq_free_xics, 763 .print_info = spapr_irq_print_info_xics, 764 .dt_populate = spapr_dt_xics, 765 .cpu_intc_create = spapr_irq_cpu_intc_create_xics, 766 .post_load = spapr_irq_post_load_xics, 767 .reset = spapr_irq_reset_xics, 768 .set_irq = spapr_irq_set_irq_xics, 769 .init_kvm = spapr_irq_init_kvm_xics, 770 }; 771 772 static void spapr_irq_register_types(void) 773 { 774 type_register_static(&spapr_intc_info); 775 } 776 777 type_init(spapr_irq_register_types) 778