1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qapi/error.h" 30 #include "trace.h" 31 #include "qemu/timer.h" 32 #include "hw/ppc/xics.h" 33 #include "hw/qdev-properties.h" 34 #include "qemu/error-report.h" 35 #include "qemu/module.h" 36 #include "qapi/visitor.h" 37 #include "migration/vmstate.h" 38 #include "monitor/monitor.h" 39 #include "hw/intc/intc.h" 40 #include "hw/irq.h" 41 #include "sysemu/kvm.h" 42 #include "sysemu/reset.h" 43 #include "target/ppc/cpu.h" 44 45 void icp_pic_print_info(ICPState *icp, Monitor *mon) 46 { 47 int cpu_index; 48 49 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs 50 * are hot plugged or unplugged. 51 */ 52 if (!icp) { 53 return; 54 } 55 56 cpu_index = icp->cs ? icp->cs->cpu_index : -1; 57 58 if (!icp->output) { 59 return; 60 } 61 62 if (kvm_irqchip_in_kernel()) { 63 icp_synchronize_state(icp); 64 } 65 66 monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n", 67 cpu_index, icp->xirr, icp->xirr_owner, 68 icp->pending_priority, icp->mfrr); 69 } 70 71 void ics_pic_print_info(ICSState *ics, Monitor *mon) 72 { 73 uint32_t i; 74 75 monitor_printf(mon, "ICS %4x..%4x %p\n", 76 ics->offset, ics->offset + ics->nr_irqs - 1, ics); 77 78 if (!ics->irqs) { 79 return; 80 } 81 82 if (kvm_irqchip_in_kernel()) { 83 ics_synchronize_state(ics); 84 } 85 86 for (i = 0; i < ics->nr_irqs; i++) { 87 ICSIRQState *irq = ics->irqs + i; 88 89 if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) { 90 continue; 91 } 92 monitor_printf(mon, " %4x %s %02x %02x\n", 93 ics->offset + i, 94 (irq->flags & XICS_FLAGS_IRQ_LSI) ? 95 "LSI" : "MSI", 96 irq->priority, irq->status); 97 } 98 } 99 100 /* 101 * ICP: Presentation layer 102 */ 103 104 #define XISR_MASK 0x00ffffff 105 #define CPPR_MASK 0xff000000 106 107 #define XISR(icp) (((icp)->xirr) & XISR_MASK) 108 #define CPPR(icp) (((icp)->xirr) >> 24) 109 110 static void ics_reject(ICSState *ics, uint32_t nr); 111 static void ics_eoi(ICSState *ics, uint32_t nr); 112 113 static void icp_check_ipi(ICPState *icp) 114 { 115 if (XISR(icp) && (icp->pending_priority <= icp->mfrr)) { 116 return; 117 } 118 119 trace_xics_icp_check_ipi(icp->cs->cpu_index, icp->mfrr); 120 121 if (XISR(icp) && icp->xirr_owner) { 122 ics_reject(icp->xirr_owner, XISR(icp)); 123 } 124 125 icp->xirr = (icp->xirr & ~XISR_MASK) | XICS_IPI; 126 icp->pending_priority = icp->mfrr; 127 icp->xirr_owner = NULL; 128 qemu_irq_raise(icp->output); 129 } 130 131 void icp_resend(ICPState *icp) 132 { 133 XICSFabric *xi = icp->xics; 134 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi); 135 136 if (icp->mfrr < CPPR(icp)) { 137 icp_check_ipi(icp); 138 } 139 140 xic->ics_resend(xi); 141 } 142 143 void icp_set_cppr(ICPState *icp, uint8_t cppr) 144 { 145 uint8_t old_cppr; 146 uint32_t old_xisr; 147 148 old_cppr = CPPR(icp); 149 icp->xirr = (icp->xirr & ~CPPR_MASK) | (cppr << 24); 150 151 if (cppr < old_cppr) { 152 if (XISR(icp) && (cppr <= icp->pending_priority)) { 153 old_xisr = XISR(icp); 154 icp->xirr &= ~XISR_MASK; /* Clear XISR */ 155 icp->pending_priority = 0xff; 156 qemu_irq_lower(icp->output); 157 if (icp->xirr_owner) { 158 ics_reject(icp->xirr_owner, old_xisr); 159 icp->xirr_owner = NULL; 160 } 161 } 162 } else { 163 if (!XISR(icp)) { 164 icp_resend(icp); 165 } 166 } 167 } 168 169 void icp_set_mfrr(ICPState *icp, uint8_t mfrr) 170 { 171 icp->mfrr = mfrr; 172 if (mfrr < CPPR(icp)) { 173 icp_check_ipi(icp); 174 } 175 } 176 177 uint32_t icp_accept(ICPState *icp) 178 { 179 uint32_t xirr = icp->xirr; 180 181 qemu_irq_lower(icp->output); 182 icp->xirr = icp->pending_priority << 24; 183 icp->pending_priority = 0xff; 184 icp->xirr_owner = NULL; 185 186 trace_xics_icp_accept(xirr, icp->xirr); 187 188 return xirr; 189 } 190 191 uint32_t icp_ipoll(ICPState *icp, uint32_t *mfrr) 192 { 193 if (mfrr) { 194 *mfrr = icp->mfrr; 195 } 196 return icp->xirr; 197 } 198 199 void icp_eoi(ICPState *icp, uint32_t xirr) 200 { 201 XICSFabric *xi = icp->xics; 202 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi); 203 ICSState *ics; 204 uint32_t irq; 205 206 /* Send EOI -> ICS */ 207 icp->xirr = (icp->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 208 trace_xics_icp_eoi(icp->cs->cpu_index, xirr, icp->xirr); 209 irq = xirr & XISR_MASK; 210 211 ics = xic->ics_get(xi, irq); 212 if (ics) { 213 ics_eoi(ics, irq); 214 } 215 if (!XISR(icp)) { 216 icp_resend(icp); 217 } 218 } 219 220 void icp_irq(ICSState *ics, int server, int nr, uint8_t priority) 221 { 222 ICPState *icp = xics_icp_get(ics->xics, server); 223 224 trace_xics_icp_irq(server, nr, priority); 225 226 if ((priority >= CPPR(icp)) 227 || (XISR(icp) && (icp->pending_priority <= priority))) { 228 ics_reject(ics, nr); 229 } else { 230 if (XISR(icp) && icp->xirr_owner) { 231 ics_reject(icp->xirr_owner, XISR(icp)); 232 icp->xirr_owner = NULL; 233 } 234 icp->xirr = (icp->xirr & ~XISR_MASK) | (nr & XISR_MASK); 235 icp->xirr_owner = ics; 236 icp->pending_priority = priority; 237 trace_xics_icp_raise(icp->xirr, icp->pending_priority); 238 qemu_irq_raise(icp->output); 239 } 240 } 241 242 static int icp_pre_save(void *opaque) 243 { 244 ICPState *icp = opaque; 245 246 if (kvm_irqchip_in_kernel()) { 247 icp_get_kvm_state(icp); 248 } 249 250 return 0; 251 } 252 253 static int icp_post_load(void *opaque, int version_id) 254 { 255 ICPState *icp = opaque; 256 257 if (kvm_irqchip_in_kernel()) { 258 Error *local_err = NULL; 259 int ret; 260 261 ret = icp_set_kvm_state(icp, &local_err); 262 if (ret < 0) { 263 error_report_err(local_err); 264 return ret; 265 } 266 } 267 268 return 0; 269 } 270 271 static const VMStateDescription vmstate_icp_server = { 272 .name = "icp/server", 273 .version_id = 1, 274 .minimum_version_id = 1, 275 .pre_save = icp_pre_save, 276 .post_load = icp_post_load, 277 .fields = (const VMStateField[]) { 278 /* Sanity check */ 279 VMSTATE_UINT32(xirr, ICPState), 280 VMSTATE_UINT8(pending_priority, ICPState), 281 VMSTATE_UINT8(mfrr, ICPState), 282 VMSTATE_END_OF_LIST() 283 }, 284 }; 285 286 void icp_reset(ICPState *icp) 287 { 288 icp->xirr = 0; 289 icp->pending_priority = 0xff; 290 icp->mfrr = 0xff; 291 292 if (kvm_irqchip_in_kernel()) { 293 Error *local_err = NULL; 294 295 icp_set_kvm_state(icp, &local_err); 296 if (local_err) { 297 error_report_err(local_err); 298 } 299 } 300 } 301 302 static void icp_realize(DeviceState *dev, Error **errp) 303 { 304 ICPState *icp = ICP(dev); 305 PowerPCCPU *cpu; 306 CPUPPCState *env; 307 Error *err = NULL; 308 309 assert(icp->xics); 310 assert(icp->cs); 311 312 cpu = POWERPC_CPU(icp->cs); 313 env = &cpu->env; 314 switch (PPC_INPUT(env)) { 315 case PPC_FLAGS_INPUT_POWER7: 316 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER7_INPUT_INT); 317 break; 318 case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */ 319 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); 320 break; 321 322 case PPC_FLAGS_INPUT_970: 323 icp->output = qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT); 324 break; 325 326 default: 327 error_setg(errp, "XICS interrupt controller does not support this CPU bus model"); 328 return; 329 } 330 331 /* Connect the presenter to the VCPU (required for CPU hotplug) */ 332 if (kvm_irqchip_in_kernel()) { 333 icp_kvm_realize(dev, &err); 334 if (err) { 335 error_propagate(errp, err); 336 return; 337 } 338 } 339 /* 340 * The way that pre_2_10_icp is handling is really, really hacky. 341 * We used to have here this call: 342 * 343 * vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); 344 * 345 * But we were doing: 346 * pre_2_10_vmstate_register_dummy_icp() 347 * this vmstate_register() 348 * pre_2_10_vmstate_unregister_dummy_icp() 349 * 350 * So for a short amount of time we had to vmstate entries with 351 * the same name. This fixes it. 352 */ 353 vmstate_replace_hack_for_ppc(NULL, icp->cs->cpu_index, 354 &vmstate_icp_server, icp); 355 } 356 357 static void icp_unrealize(DeviceState *dev) 358 { 359 ICPState *icp = ICP(dev); 360 361 vmstate_unregister(NULL, &vmstate_icp_server, icp); 362 } 363 364 static Property icp_properties[] = { 365 DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC, 366 XICSFabric *), 367 DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *), 368 DEFINE_PROP_END_OF_LIST(), 369 }; 370 371 static void icp_class_init(ObjectClass *klass, void *data) 372 { 373 DeviceClass *dc = DEVICE_CLASS(klass); 374 375 dc->realize = icp_realize; 376 dc->unrealize = icp_unrealize; 377 device_class_set_props(dc, icp_properties); 378 /* 379 * Reason: part of XICS interrupt controller, needs to be wired up 380 * by icp_create(). 381 */ 382 dc->user_creatable = false; 383 } 384 385 static const TypeInfo icp_info = { 386 .name = TYPE_ICP, 387 .parent = TYPE_DEVICE, 388 .instance_size = sizeof(ICPState), 389 .class_init = icp_class_init, 390 .class_size = sizeof(ICPStateClass), 391 }; 392 393 Object *icp_create(Object *cpu, const char *type, XICSFabric *xi, Error **errp) 394 { 395 Object *obj; 396 397 obj = object_new(type); 398 object_property_add_child(cpu, type, obj); 399 object_unref(obj); 400 object_property_set_link(obj, ICP_PROP_XICS, OBJECT(xi), &error_abort); 401 object_property_set_link(obj, ICP_PROP_CPU, cpu, &error_abort); 402 if (!qdev_realize(DEVICE(obj), NULL, errp)) { 403 object_unparent(obj); 404 obj = NULL; 405 } 406 407 return obj; 408 } 409 410 void icp_destroy(ICPState *icp) 411 { 412 Object *obj = OBJECT(icp); 413 414 object_unparent(obj); 415 } 416 417 /* 418 * ICS: Source layer 419 */ 420 static void ics_resend_msi(ICSState *ics, int srcno) 421 { 422 ICSIRQState *irq = ics->irqs + srcno; 423 424 /* FIXME: filter by server#? */ 425 if (irq->status & XICS_STATUS_REJECTED) { 426 irq->status &= ~XICS_STATUS_REJECTED; 427 if (irq->priority != 0xff) { 428 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 429 } 430 } 431 } 432 433 static void ics_resend_lsi(ICSState *ics, int srcno) 434 { 435 ICSIRQState *irq = ics->irqs + srcno; 436 437 if ((irq->priority != 0xff) 438 && (irq->status & XICS_STATUS_ASSERTED) 439 && !(irq->status & XICS_STATUS_SENT)) { 440 irq->status |= XICS_STATUS_SENT; 441 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 442 } 443 } 444 445 static void ics_set_irq_msi(ICSState *ics, int srcno, int val) 446 { 447 ICSIRQState *irq = ics->irqs + srcno; 448 449 trace_xics_ics_set_irq_msi(srcno, srcno + ics->offset); 450 451 if (val) { 452 if (irq->priority == 0xff) { 453 irq->status |= XICS_STATUS_MASKED_PENDING; 454 trace_xics_masked_pending(); 455 } else { 456 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 457 } 458 } 459 } 460 461 static void ics_set_irq_lsi(ICSState *ics, int srcno, int val) 462 { 463 ICSIRQState *irq = ics->irqs + srcno; 464 465 trace_xics_ics_set_irq_lsi(srcno, srcno + ics->offset); 466 if (val) { 467 irq->status |= XICS_STATUS_ASSERTED; 468 } else { 469 irq->status &= ~XICS_STATUS_ASSERTED; 470 } 471 ics_resend_lsi(ics, srcno); 472 } 473 474 void ics_set_irq(void *opaque, int srcno, int val) 475 { 476 ICSState *ics = (ICSState *)opaque; 477 478 if (kvm_irqchip_in_kernel()) { 479 ics_kvm_set_irq(ics, srcno, val); 480 return; 481 } 482 483 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 484 ics_set_irq_lsi(ics, srcno, val); 485 } else { 486 ics_set_irq_msi(ics, srcno, val); 487 } 488 } 489 490 static void ics_write_xive_msi(ICSState *ics, int srcno) 491 { 492 ICSIRQState *irq = ics->irqs + srcno; 493 494 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 495 || (irq->priority == 0xff)) { 496 return; 497 } 498 499 irq->status &= ~XICS_STATUS_MASKED_PENDING; 500 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 501 } 502 503 static void ics_write_xive_lsi(ICSState *ics, int srcno) 504 { 505 ics_resend_lsi(ics, srcno); 506 } 507 508 void ics_write_xive(ICSState *ics, int srcno, int server, 509 uint8_t priority, uint8_t saved_priority) 510 { 511 ICSIRQState *irq = ics->irqs + srcno; 512 513 irq->server = server; 514 irq->priority = priority; 515 irq->saved_priority = saved_priority; 516 517 trace_xics_ics_write_xive(ics->offset + srcno, srcno, server, priority); 518 519 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 520 ics_write_xive_lsi(ics, srcno); 521 } else { 522 ics_write_xive_msi(ics, srcno); 523 } 524 } 525 526 static void ics_reject(ICSState *ics, uint32_t nr) 527 { 528 ICSStateClass *isc = ICS_GET_CLASS(ics); 529 ICSIRQState *irq = ics->irqs + nr - ics->offset; 530 531 if (isc->reject) { 532 isc->reject(ics, nr); 533 return; 534 } 535 536 trace_xics_ics_reject(nr, nr - ics->offset); 537 if (irq->flags & XICS_FLAGS_IRQ_MSI) { 538 irq->status |= XICS_STATUS_REJECTED; 539 } else if (irq->flags & XICS_FLAGS_IRQ_LSI) { 540 irq->status &= ~XICS_STATUS_SENT; 541 } 542 } 543 544 void ics_resend(ICSState *ics) 545 { 546 ICSStateClass *isc = ICS_GET_CLASS(ics); 547 int i; 548 549 if (isc->resend) { 550 isc->resend(ics); 551 return; 552 } 553 554 for (i = 0; i < ics->nr_irqs; i++) { 555 /* FIXME: filter by server#? */ 556 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 557 ics_resend_lsi(ics, i); 558 } else { 559 ics_resend_msi(ics, i); 560 } 561 } 562 } 563 564 static void ics_eoi(ICSState *ics, uint32_t nr) 565 { 566 int srcno = nr - ics->offset; 567 ICSIRQState *irq = ics->irqs + srcno; 568 569 trace_xics_ics_eoi(nr); 570 571 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 572 irq->status &= ~XICS_STATUS_SENT; 573 } 574 } 575 576 static void ics_reset_irq(ICSIRQState *irq) 577 { 578 irq->priority = 0xff; 579 irq->saved_priority = 0xff; 580 } 581 582 static void ics_reset_hold(Object *obj, ResetType type) 583 { 584 ICSState *ics = ICS(obj); 585 g_autofree uint8_t *flags = g_malloc(ics->nr_irqs); 586 int i; 587 588 for (i = 0; i < ics->nr_irqs; i++) { 589 flags[i] = ics->irqs[i].flags; 590 } 591 592 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 593 594 for (i = 0; i < ics->nr_irqs; i++) { 595 ics_reset_irq(ics->irqs + i); 596 ics->irqs[i].flags = flags[i]; 597 } 598 599 if (kvm_irqchip_in_kernel()) { 600 Error *local_err = NULL; 601 602 ics_set_kvm_state(ics, &local_err); 603 if (local_err) { 604 error_report_err(local_err); 605 } 606 } 607 } 608 609 static void ics_reset_handler(void *dev) 610 { 611 device_cold_reset(dev); 612 } 613 614 static void ics_realize(DeviceState *dev, Error **errp) 615 { 616 ICSState *ics = ICS(dev); 617 618 assert(ics->xics); 619 620 if (!ics->nr_irqs) { 621 error_setg(errp, "Number of interrupts needs to be greater 0"); 622 return; 623 } 624 ics->irqs = g_new0(ICSIRQState, ics->nr_irqs); 625 626 qemu_register_reset(ics_reset_handler, ics); 627 } 628 629 static void ics_instance_init(Object *obj) 630 { 631 ICSState *ics = ICS(obj); 632 633 ics->offset = XICS_IRQ_BASE; 634 } 635 636 static int ics_pre_save(void *opaque) 637 { 638 ICSState *ics = opaque; 639 640 if (kvm_irqchip_in_kernel()) { 641 ics_get_kvm_state(ics); 642 } 643 644 return 0; 645 } 646 647 static int ics_post_load(void *opaque, int version_id) 648 { 649 ICSState *ics = opaque; 650 651 if (kvm_irqchip_in_kernel()) { 652 Error *local_err = NULL; 653 int ret; 654 655 ret = ics_set_kvm_state(ics, &local_err); 656 if (ret < 0) { 657 error_report_err(local_err); 658 return ret; 659 } 660 } 661 662 return 0; 663 } 664 665 static const VMStateDescription vmstate_ics_irq = { 666 .name = "ics/irq", 667 .version_id = 2, 668 .minimum_version_id = 1, 669 .fields = (const VMStateField[]) { 670 VMSTATE_UINT32(server, ICSIRQState), 671 VMSTATE_UINT8(priority, ICSIRQState), 672 VMSTATE_UINT8(saved_priority, ICSIRQState), 673 VMSTATE_UINT8(status, ICSIRQState), 674 VMSTATE_UINT8(flags, ICSIRQState), 675 VMSTATE_END_OF_LIST() 676 }, 677 }; 678 679 static const VMStateDescription vmstate_ics = { 680 .name = "ics", 681 .version_id = 1, 682 .minimum_version_id = 1, 683 .pre_save = ics_pre_save, 684 .post_load = ics_post_load, 685 .fields = (const VMStateField[]) { 686 /* Sanity check */ 687 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL), 688 689 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 690 vmstate_ics_irq, 691 ICSIRQState), 692 VMSTATE_END_OF_LIST() 693 }, 694 }; 695 696 static Property ics_properties[] = { 697 DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0), 698 DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC, 699 XICSFabric *), 700 DEFINE_PROP_END_OF_LIST(), 701 }; 702 703 static void ics_class_init(ObjectClass *klass, void *data) 704 { 705 DeviceClass *dc = DEVICE_CLASS(klass); 706 ResettableClass *rc = RESETTABLE_CLASS(klass); 707 708 dc->realize = ics_realize; 709 device_class_set_props(dc, ics_properties); 710 dc->vmsd = &vmstate_ics; 711 /* 712 * Reason: part of XICS interrupt controller, needs to be wired up, 713 * e.g. by spapr_irq_init(). 714 */ 715 dc->user_creatable = false; 716 rc->phases.hold = ics_reset_hold; 717 } 718 719 static const TypeInfo ics_info = { 720 .name = TYPE_ICS, 721 .parent = TYPE_DEVICE, 722 .instance_size = sizeof(ICSState), 723 .instance_init = ics_instance_init, 724 .class_init = ics_class_init, 725 .class_size = sizeof(ICSStateClass), 726 }; 727 728 static const TypeInfo xics_fabric_info = { 729 .name = TYPE_XICS_FABRIC, 730 .parent = TYPE_INTERFACE, 731 .class_size = sizeof(XICSFabricClass), 732 }; 733 734 /* 735 * Exported functions 736 */ 737 ICPState *xics_icp_get(XICSFabric *xi, int server) 738 { 739 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi); 740 741 return xic->icp_get(xi, server); 742 } 743 744 void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 745 { 746 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 747 748 ics->irqs[srcno].flags |= 749 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 750 751 if (kvm_irqchip_in_kernel()) { 752 Error *local_err = NULL; 753 754 ics_reset_irq(ics->irqs + srcno); 755 ics_set_kvm_state_one(ics, srcno, &local_err); 756 if (local_err) { 757 error_report_err(local_err); 758 } 759 } 760 } 761 762 static void xics_register_types(void) 763 { 764 type_register_static(&ics_info); 765 type_register_static(&icp_info); 766 type_register_static(&xics_fabric_info); 767 } 768 769 type_init(xics_register_types) 770