1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qapi/error.h" 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "hw/hw.h" 33 #include "trace.h" 34 #include "qemu/timer.h" 35 #include "hw/ppc/spapr.h" 36 #include "hw/ppc/xics.h" 37 #include "qemu/error-report.h" 38 #include "qapi/visitor.h" 39 40 static int get_cpu_index_by_dt_id(int cpu_dt_id) 41 { 42 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); 43 44 if (cpu) { 45 return cpu->parent_obj.cpu_index; 46 } 47 48 return -1; 49 } 50 51 void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu) 52 { 53 CPUState *cs = CPU(cpu); 54 ICPState *ss = &icp->ss[cs->cpu_index]; 55 56 assert(cs->cpu_index < icp->nr_servers); 57 assert(cs == ss->cs); 58 59 ss->output = NULL; 60 ss->cs = NULL; 61 } 62 63 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) 64 { 65 CPUState *cs = CPU(cpu); 66 CPUPPCState *env = &cpu->env; 67 ICPState *ss = &icp->ss[cs->cpu_index]; 68 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 69 70 assert(cs->cpu_index < icp->nr_servers); 71 72 ss->cs = cs; 73 74 if (info->cpu_setup) { 75 info->cpu_setup(icp, cpu); 76 } 77 78 switch (PPC_INPUT(env)) { 79 case PPC_FLAGS_INPUT_POWER7: 80 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 81 break; 82 83 case PPC_FLAGS_INPUT_970: 84 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 85 break; 86 87 default: 88 error_report("XICS interrupt controller does not support this CPU " 89 "bus model"); 90 abort(); 91 } 92 } 93 94 /* 95 * XICS Common class - parent for emulated XICS and KVM-XICS 96 */ 97 static void xics_common_reset(DeviceState *d) 98 { 99 XICSState *icp = XICS_COMMON(d); 100 int i; 101 102 for (i = 0; i < icp->nr_servers; i++) { 103 device_reset(DEVICE(&icp->ss[i])); 104 } 105 106 device_reset(DEVICE(icp->ics)); 107 } 108 109 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name, 110 void *opaque, Error **errp) 111 { 112 XICSState *icp = XICS_COMMON(obj); 113 int64_t value = icp->nr_irqs; 114 115 visit_type_int(v, name, &value, errp); 116 } 117 118 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name, 119 void *opaque, Error **errp) 120 { 121 XICSState *icp = XICS_COMMON(obj); 122 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 123 Error *error = NULL; 124 int64_t value; 125 126 visit_type_int(v, name, &value, &error); 127 if (error) { 128 error_propagate(errp, error); 129 return; 130 } 131 if (icp->nr_irqs) { 132 error_setg(errp, "Number of interrupts is already set to %u", 133 icp->nr_irqs); 134 return; 135 } 136 137 assert(info->set_nr_irqs); 138 assert(icp->ics); 139 info->set_nr_irqs(icp, value, errp); 140 } 141 142 static void xics_prop_get_nr_servers(Object *obj, Visitor *v, 143 const char *name, void *opaque, 144 Error **errp) 145 { 146 XICSState *icp = XICS_COMMON(obj); 147 int64_t value = icp->nr_servers; 148 149 visit_type_int(v, name, &value, errp); 150 } 151 152 static void xics_prop_set_nr_servers(Object *obj, Visitor *v, 153 const char *name, void *opaque, 154 Error **errp) 155 { 156 XICSState *icp = XICS_COMMON(obj); 157 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 158 Error *error = NULL; 159 int64_t value; 160 161 visit_type_int(v, name, &value, &error); 162 if (error) { 163 error_propagate(errp, error); 164 return; 165 } 166 if (icp->nr_servers) { 167 error_setg(errp, "Number of servers is already set to %u", 168 icp->nr_servers); 169 return; 170 } 171 172 assert(info->set_nr_servers); 173 info->set_nr_servers(icp, value, errp); 174 } 175 176 static void xics_common_initfn(Object *obj) 177 { 178 object_property_add(obj, "nr_irqs", "int", 179 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, 180 NULL, NULL, NULL); 181 object_property_add(obj, "nr_servers", "int", 182 xics_prop_get_nr_servers, xics_prop_set_nr_servers, 183 NULL, NULL, NULL); 184 } 185 186 static void xics_common_class_init(ObjectClass *oc, void *data) 187 { 188 DeviceClass *dc = DEVICE_CLASS(oc); 189 190 dc->reset = xics_common_reset; 191 } 192 193 static const TypeInfo xics_common_info = { 194 .name = TYPE_XICS_COMMON, 195 .parent = TYPE_SYS_BUS_DEVICE, 196 .instance_size = sizeof(XICSState), 197 .class_size = sizeof(XICSStateClass), 198 .instance_init = xics_common_initfn, 199 .class_init = xics_common_class_init, 200 }; 201 202 /* 203 * ICP: Presentation layer 204 */ 205 206 #define XISR_MASK 0x00ffffff 207 #define CPPR_MASK 0xff000000 208 209 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 210 #define CPPR(ss) (((ss)->xirr) >> 24) 211 212 static void ics_reject(ICSState *ics, int nr); 213 static void ics_resend(ICSState *ics); 214 static void ics_eoi(ICSState *ics, int nr); 215 216 static void icp_check_ipi(XICSState *icp, int server) 217 { 218 ICPState *ss = icp->ss + server; 219 220 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 221 return; 222 } 223 224 trace_xics_icp_check_ipi(server, ss->mfrr); 225 226 if (XISR(ss)) { 227 ics_reject(icp->ics, XISR(ss)); 228 } 229 230 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 231 ss->pending_priority = ss->mfrr; 232 qemu_irq_raise(ss->output); 233 } 234 235 static void icp_resend(XICSState *icp, int server) 236 { 237 ICPState *ss = icp->ss + server; 238 239 if (ss->mfrr < CPPR(ss)) { 240 icp_check_ipi(icp, server); 241 } 242 ics_resend(icp->ics); 243 } 244 245 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) 246 { 247 ICPState *ss = icp->ss + server; 248 uint8_t old_cppr; 249 uint32_t old_xisr; 250 251 old_cppr = CPPR(ss); 252 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 253 254 if (cppr < old_cppr) { 255 if (XISR(ss) && (cppr <= ss->pending_priority)) { 256 old_xisr = XISR(ss); 257 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 258 ss->pending_priority = 0xff; 259 qemu_irq_lower(ss->output); 260 ics_reject(icp->ics, old_xisr); 261 } 262 } else { 263 if (!XISR(ss)) { 264 icp_resend(icp, server); 265 } 266 } 267 } 268 269 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) 270 { 271 ICPState *ss = icp->ss + server; 272 273 ss->mfrr = mfrr; 274 if (mfrr < CPPR(ss)) { 275 icp_check_ipi(icp, server); 276 } 277 } 278 279 static uint32_t icp_accept(ICPState *ss) 280 { 281 uint32_t xirr = ss->xirr; 282 283 qemu_irq_lower(ss->output); 284 ss->xirr = ss->pending_priority << 24; 285 ss->pending_priority = 0xff; 286 287 trace_xics_icp_accept(xirr, ss->xirr); 288 289 return xirr; 290 } 291 292 static void icp_eoi(XICSState *icp, int server, uint32_t xirr) 293 { 294 ICPState *ss = icp->ss + server; 295 296 /* Send EOI -> ICS */ 297 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 298 trace_xics_icp_eoi(server, xirr, ss->xirr); 299 ics_eoi(icp->ics, xirr & XISR_MASK); 300 if (!XISR(ss)) { 301 icp_resend(icp, server); 302 } 303 } 304 305 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) 306 { 307 ICPState *ss = icp->ss + server; 308 309 trace_xics_icp_irq(server, nr, priority); 310 311 if ((priority >= CPPR(ss)) 312 || (XISR(ss) && (ss->pending_priority <= priority))) { 313 ics_reject(icp->ics, nr); 314 } else { 315 if (XISR(ss)) { 316 ics_reject(icp->ics, XISR(ss)); 317 } 318 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 319 ss->pending_priority = priority; 320 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 321 qemu_irq_raise(ss->output); 322 } 323 } 324 325 static void icp_dispatch_pre_save(void *opaque) 326 { 327 ICPState *ss = opaque; 328 ICPStateClass *info = ICP_GET_CLASS(ss); 329 330 if (info->pre_save) { 331 info->pre_save(ss); 332 } 333 } 334 335 static int icp_dispatch_post_load(void *opaque, int version_id) 336 { 337 ICPState *ss = opaque; 338 ICPStateClass *info = ICP_GET_CLASS(ss); 339 340 if (info->post_load) { 341 return info->post_load(ss, version_id); 342 } 343 344 return 0; 345 } 346 347 static const VMStateDescription vmstate_icp_server = { 348 .name = "icp/server", 349 .version_id = 1, 350 .minimum_version_id = 1, 351 .pre_save = icp_dispatch_pre_save, 352 .post_load = icp_dispatch_post_load, 353 .fields = (VMStateField[]) { 354 /* Sanity check */ 355 VMSTATE_UINT32(xirr, ICPState), 356 VMSTATE_UINT8(pending_priority, ICPState), 357 VMSTATE_UINT8(mfrr, ICPState), 358 VMSTATE_END_OF_LIST() 359 }, 360 }; 361 362 static void icp_reset(DeviceState *dev) 363 { 364 ICPState *icp = ICP(dev); 365 366 icp->xirr = 0; 367 icp->pending_priority = 0xff; 368 icp->mfrr = 0xff; 369 370 /* Make all outputs are deasserted */ 371 qemu_set_irq(icp->output, 0); 372 } 373 374 static void icp_class_init(ObjectClass *klass, void *data) 375 { 376 DeviceClass *dc = DEVICE_CLASS(klass); 377 378 dc->reset = icp_reset; 379 dc->vmsd = &vmstate_icp_server; 380 } 381 382 static const TypeInfo icp_info = { 383 .name = TYPE_ICP, 384 .parent = TYPE_DEVICE, 385 .instance_size = sizeof(ICPState), 386 .class_init = icp_class_init, 387 .class_size = sizeof(ICPStateClass), 388 }; 389 390 /* 391 * ICS: Source layer 392 */ 393 static int ics_valid_irq(ICSState *ics, uint32_t nr) 394 { 395 return (nr >= ics->offset) 396 && (nr < (ics->offset + ics->nr_irqs)); 397 } 398 399 static void resend_msi(ICSState *ics, int srcno) 400 { 401 ICSIRQState *irq = ics->irqs + srcno; 402 403 /* FIXME: filter by server#? */ 404 if (irq->status & XICS_STATUS_REJECTED) { 405 irq->status &= ~XICS_STATUS_REJECTED; 406 if (irq->priority != 0xff) { 407 icp_irq(ics->icp, irq->server, srcno + ics->offset, 408 irq->priority); 409 } 410 } 411 } 412 413 static void resend_lsi(ICSState *ics, int srcno) 414 { 415 ICSIRQState *irq = ics->irqs + srcno; 416 417 if ((irq->priority != 0xff) 418 && (irq->status & XICS_STATUS_ASSERTED) 419 && !(irq->status & XICS_STATUS_SENT)) { 420 irq->status |= XICS_STATUS_SENT; 421 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 422 } 423 } 424 425 static void set_irq_msi(ICSState *ics, int srcno, int val) 426 { 427 ICSIRQState *irq = ics->irqs + srcno; 428 429 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 430 431 if (val) { 432 if (irq->priority == 0xff) { 433 irq->status |= XICS_STATUS_MASKED_PENDING; 434 trace_xics_masked_pending(); 435 } else { 436 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 437 } 438 } 439 } 440 441 static void set_irq_lsi(ICSState *ics, int srcno, int val) 442 { 443 ICSIRQState *irq = ics->irqs + srcno; 444 445 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 446 if (val) { 447 irq->status |= XICS_STATUS_ASSERTED; 448 } else { 449 irq->status &= ~XICS_STATUS_ASSERTED; 450 } 451 resend_lsi(ics, srcno); 452 } 453 454 static void ics_set_irq(void *opaque, int srcno, int val) 455 { 456 ICSState *ics = (ICSState *)opaque; 457 458 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 459 set_irq_lsi(ics, srcno, val); 460 } else { 461 set_irq_msi(ics, srcno, val); 462 } 463 } 464 465 static void write_xive_msi(ICSState *ics, int srcno) 466 { 467 ICSIRQState *irq = ics->irqs + srcno; 468 469 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 470 || (irq->priority == 0xff)) { 471 return; 472 } 473 474 irq->status &= ~XICS_STATUS_MASKED_PENDING; 475 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 476 } 477 478 static void write_xive_lsi(ICSState *ics, int srcno) 479 { 480 resend_lsi(ics, srcno); 481 } 482 483 static void ics_write_xive(ICSState *ics, int nr, int server, 484 uint8_t priority, uint8_t saved_priority) 485 { 486 int srcno = nr - ics->offset; 487 ICSIRQState *irq = ics->irqs + srcno; 488 489 irq->server = server; 490 irq->priority = priority; 491 irq->saved_priority = saved_priority; 492 493 trace_xics_ics_write_xive(nr, srcno, server, priority); 494 495 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 496 write_xive_lsi(ics, srcno); 497 } else { 498 write_xive_msi(ics, srcno); 499 } 500 } 501 502 static void ics_reject(ICSState *ics, int nr) 503 { 504 ICSIRQState *irq = ics->irqs + nr - ics->offset; 505 506 trace_xics_ics_reject(nr, nr - ics->offset); 507 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ 508 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ 509 } 510 511 static void ics_resend(ICSState *ics) 512 { 513 int i; 514 515 for (i = 0; i < ics->nr_irqs; i++) { 516 /* FIXME: filter by server#? */ 517 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 518 resend_lsi(ics, i); 519 } else { 520 resend_msi(ics, i); 521 } 522 } 523 } 524 525 static void ics_eoi(ICSState *ics, int nr) 526 { 527 int srcno = nr - ics->offset; 528 ICSIRQState *irq = ics->irqs + srcno; 529 530 trace_xics_ics_eoi(nr); 531 532 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 533 irq->status &= ~XICS_STATUS_SENT; 534 } 535 } 536 537 static void ics_reset(DeviceState *dev) 538 { 539 ICSState *ics = ICS(dev); 540 int i; 541 uint8_t flags[ics->nr_irqs]; 542 543 for (i = 0; i < ics->nr_irqs; i++) { 544 flags[i] = ics->irqs[i].flags; 545 } 546 547 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 548 549 for (i = 0; i < ics->nr_irqs; i++) { 550 ics->irqs[i].priority = 0xff; 551 ics->irqs[i].saved_priority = 0xff; 552 ics->irqs[i].flags = flags[i]; 553 } 554 } 555 556 static int ics_post_load(ICSState *ics, int version_id) 557 { 558 int i; 559 560 for (i = 0; i < ics->icp->nr_servers; i++) { 561 icp_resend(ics->icp, i); 562 } 563 564 return 0; 565 } 566 567 static void ics_dispatch_pre_save(void *opaque) 568 { 569 ICSState *ics = opaque; 570 ICSStateClass *info = ICS_GET_CLASS(ics); 571 572 if (info->pre_save) { 573 info->pre_save(ics); 574 } 575 } 576 577 static int ics_dispatch_post_load(void *opaque, int version_id) 578 { 579 ICSState *ics = opaque; 580 ICSStateClass *info = ICS_GET_CLASS(ics); 581 582 if (info->post_load) { 583 return info->post_load(ics, version_id); 584 } 585 586 return 0; 587 } 588 589 static const VMStateDescription vmstate_ics_irq = { 590 .name = "ics/irq", 591 .version_id = 2, 592 .minimum_version_id = 1, 593 .fields = (VMStateField[]) { 594 VMSTATE_UINT32(server, ICSIRQState), 595 VMSTATE_UINT8(priority, ICSIRQState), 596 VMSTATE_UINT8(saved_priority, ICSIRQState), 597 VMSTATE_UINT8(status, ICSIRQState), 598 VMSTATE_UINT8(flags, ICSIRQState), 599 VMSTATE_END_OF_LIST() 600 }, 601 }; 602 603 static const VMStateDescription vmstate_ics = { 604 .name = "ics", 605 .version_id = 1, 606 .minimum_version_id = 1, 607 .pre_save = ics_dispatch_pre_save, 608 .post_load = ics_dispatch_post_load, 609 .fields = (VMStateField[]) { 610 /* Sanity check */ 611 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 612 613 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 614 vmstate_ics_irq, ICSIRQState), 615 VMSTATE_END_OF_LIST() 616 }, 617 }; 618 619 static void ics_initfn(Object *obj) 620 { 621 ICSState *ics = ICS(obj); 622 623 ics->offset = XICS_IRQ_BASE; 624 } 625 626 static void ics_realize(DeviceState *dev, Error **errp) 627 { 628 ICSState *ics = ICS(dev); 629 630 if (!ics->nr_irqs) { 631 error_setg(errp, "Number of interrupts needs to be greater 0"); 632 return; 633 } 634 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 635 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 636 } 637 638 static void ics_class_init(ObjectClass *klass, void *data) 639 { 640 DeviceClass *dc = DEVICE_CLASS(klass); 641 ICSStateClass *isc = ICS_CLASS(klass); 642 643 dc->realize = ics_realize; 644 dc->vmsd = &vmstate_ics; 645 dc->reset = ics_reset; 646 isc->post_load = ics_post_load; 647 } 648 649 static const TypeInfo ics_info = { 650 .name = TYPE_ICS, 651 .parent = TYPE_DEVICE, 652 .instance_size = sizeof(ICSState), 653 .class_init = ics_class_init, 654 .class_size = sizeof(ICSStateClass), 655 .instance_init = ics_initfn, 656 }; 657 658 /* 659 * Exported functions 660 */ 661 static int xics_find_source(XICSState *icp, int irq) 662 { 663 int sources = 1; 664 int src; 665 666 /* FIXME: implement multiple sources */ 667 for (src = 0; src < sources; ++src) { 668 ICSState *ics = &icp->ics[src]; 669 if (ics_valid_irq(ics, irq)) { 670 return src; 671 } 672 } 673 674 return -1; 675 } 676 677 qemu_irq xics_get_qirq(XICSState *icp, int irq) 678 { 679 int src = xics_find_source(icp, irq); 680 681 if (src >= 0) { 682 ICSState *ics = &icp->ics[src]; 683 return ics->qirqs[irq - ics->offset]; 684 } 685 686 return NULL; 687 } 688 689 static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 690 { 691 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 692 693 ics->irqs[srcno].flags |= 694 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 695 } 696 697 void xics_set_irq_type(XICSState *icp, int irq, bool lsi) 698 { 699 int src = xics_find_source(icp, irq); 700 ICSState *ics; 701 702 assert(src >= 0); 703 704 ics = &icp->ics[src]; 705 ics_set_irq_type(ics, irq - ics->offset, lsi); 706 } 707 708 #define ICS_IRQ_FREE(ics, srcno) \ 709 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) 710 711 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 712 { 713 int first, i; 714 715 for (first = 0; first < ics->nr_irqs; first += alignnum) { 716 if (num > (ics->nr_irqs - first)) { 717 return -1; 718 } 719 for (i = first; i < first + num; ++i) { 720 if (!ICS_IRQ_FREE(ics, i)) { 721 break; 722 } 723 } 724 if (i == (first + num)) { 725 return first; 726 } 727 } 728 729 return -1; 730 } 731 732 int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp) 733 { 734 ICSState *ics = &icp->ics[src]; 735 int irq; 736 737 if (irq_hint) { 738 assert(src == xics_find_source(icp, irq_hint)); 739 if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { 740 error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint); 741 return -1; 742 } 743 irq = irq_hint; 744 } else { 745 irq = ics_find_free_block(ics, 1, 1); 746 if (irq < 0) { 747 error_setg(errp, "can't allocate IRQ: no IRQ left"); 748 return -1; 749 } 750 irq += ics->offset; 751 } 752 753 ics_set_irq_type(ics, irq - ics->offset, lsi); 754 trace_xics_alloc(src, irq); 755 756 return irq; 757 } 758 759 /* 760 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block. 761 * If align==true, aligns the first IRQ number to num. 762 */ 763 int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, 764 Error **errp) 765 { 766 int i, first = -1; 767 ICSState *ics = &icp->ics[src]; 768 769 assert(src == 0); 770 /* 771 * MSIMesage::data is used for storing VIRQ so 772 * it has to be aligned to num to support multiple 773 * MSI vectors. MSI-X is not affected by this. 774 * The hint is used for the first IRQ, the rest should 775 * be allocated continuously. 776 */ 777 if (align) { 778 assert((num == 1) || (num == 2) || (num == 4) || 779 (num == 8) || (num == 16) || (num == 32)); 780 first = ics_find_free_block(ics, num, num); 781 } else { 782 first = ics_find_free_block(ics, num, 1); 783 } 784 if (first < 0) { 785 error_setg(errp, "can't find a free %d-IRQ block", num); 786 return -1; 787 } 788 789 if (first >= 0) { 790 for (i = first; i < first + num; ++i) { 791 ics_set_irq_type(ics, i, lsi); 792 } 793 } 794 first += ics->offset; 795 796 trace_xics_alloc_block(src, first, num, lsi, align); 797 798 return first; 799 } 800 801 static void ics_free(ICSState *ics, int srcno, int num) 802 { 803 int i; 804 805 for (i = srcno; i < srcno + num; ++i) { 806 if (ICS_IRQ_FREE(ics, i)) { 807 trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset); 808 } 809 memset(&ics->irqs[i], 0, sizeof(ICSIRQState)); 810 } 811 } 812 813 void xics_free(XICSState *icp, int irq, int num) 814 { 815 int src = xics_find_source(icp, irq); 816 817 if (src >= 0) { 818 ICSState *ics = &icp->ics[src]; 819 820 /* FIXME: implement multiple sources */ 821 assert(src == 0); 822 823 trace_xics_ics_free(ics - icp->ics, irq, num); 824 ics_free(ics, irq - ics->offset, num); 825 } 826 } 827 828 /* 829 * Guest interfaces 830 */ 831 832 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 833 target_ulong opcode, target_ulong *args) 834 { 835 CPUState *cs = CPU(cpu); 836 target_ulong cppr = args[0]; 837 838 icp_set_cppr(spapr->icp, cs->cpu_index, cppr); 839 return H_SUCCESS; 840 } 841 842 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 843 target_ulong opcode, target_ulong *args) 844 { 845 target_ulong server = get_cpu_index_by_dt_id(args[0]); 846 target_ulong mfrr = args[1]; 847 848 if (server >= spapr->icp->nr_servers) { 849 return H_PARAMETER; 850 } 851 852 icp_set_mfrr(spapr->icp, server, mfrr); 853 return H_SUCCESS; 854 } 855 856 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 857 target_ulong opcode, target_ulong *args) 858 { 859 CPUState *cs = CPU(cpu); 860 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index); 861 862 args[0] = xirr; 863 return H_SUCCESS; 864 } 865 866 static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr, 867 target_ulong opcode, target_ulong *args) 868 { 869 CPUState *cs = CPU(cpu); 870 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 871 uint32_t xirr = icp_accept(ss); 872 873 args[0] = xirr; 874 args[1] = cpu_get_host_ticks(); 875 return H_SUCCESS; 876 } 877 878 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 879 target_ulong opcode, target_ulong *args) 880 { 881 CPUState *cs = CPU(cpu); 882 target_ulong xirr = args[0]; 883 884 icp_eoi(spapr->icp, cs->cpu_index, xirr); 885 return H_SUCCESS; 886 } 887 888 static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr, 889 target_ulong opcode, target_ulong *args) 890 { 891 CPUState *cs = CPU(cpu); 892 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 893 894 args[0] = ss->xirr; 895 args[1] = ss->mfrr; 896 897 return H_SUCCESS; 898 } 899 900 static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 901 uint32_t token, 902 uint32_t nargs, target_ulong args, 903 uint32_t nret, target_ulong rets) 904 { 905 ICSState *ics = spapr->icp->ics; 906 uint32_t nr, server, priority; 907 908 if ((nargs != 3) || (nret != 1)) { 909 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 910 return; 911 } 912 913 nr = rtas_ld(args, 0); 914 server = get_cpu_index_by_dt_id(rtas_ld(args, 1)); 915 priority = rtas_ld(args, 2); 916 917 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) 918 || (priority > 0xff)) { 919 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 920 return; 921 } 922 923 ics_write_xive(ics, nr, server, priority, priority); 924 925 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 926 } 927 928 static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 929 uint32_t token, 930 uint32_t nargs, target_ulong args, 931 uint32_t nret, target_ulong rets) 932 { 933 ICSState *ics = spapr->icp->ics; 934 uint32_t nr; 935 936 if ((nargs != 1) || (nret != 3)) { 937 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 938 return; 939 } 940 941 nr = rtas_ld(args, 0); 942 943 if (!ics_valid_irq(ics, nr)) { 944 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 945 return; 946 } 947 948 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 949 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server); 950 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority); 951 } 952 953 static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr, 954 uint32_t token, 955 uint32_t nargs, target_ulong args, 956 uint32_t nret, target_ulong rets) 957 { 958 ICSState *ics = spapr->icp->ics; 959 uint32_t nr; 960 961 if ((nargs != 1) || (nret != 1)) { 962 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 963 return; 964 } 965 966 nr = rtas_ld(args, 0); 967 968 if (!ics_valid_irq(ics, nr)) { 969 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 970 return; 971 } 972 973 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, 974 ics->irqs[nr - ics->offset].priority); 975 976 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 977 } 978 979 static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr, 980 uint32_t token, 981 uint32_t nargs, target_ulong args, 982 uint32_t nret, target_ulong rets) 983 { 984 ICSState *ics = spapr->icp->ics; 985 uint32_t nr; 986 987 if ((nargs != 1) || (nret != 1)) { 988 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 989 return; 990 } 991 992 nr = rtas_ld(args, 0); 993 994 if (!ics_valid_irq(ics, nr)) { 995 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 996 return; 997 } 998 999 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 1000 ics->irqs[nr - ics->offset].saved_priority, 1001 ics->irqs[nr - ics->offset].saved_priority); 1002 1003 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 1004 } 1005 1006 /* 1007 * XICS 1008 */ 1009 1010 static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp) 1011 { 1012 icp->nr_irqs = icp->ics->nr_irqs = nr_irqs; 1013 } 1014 1015 static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers, 1016 Error **errp) 1017 { 1018 int i; 1019 1020 icp->nr_servers = nr_servers; 1021 1022 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); 1023 for (i = 0; i < icp->nr_servers; i++) { 1024 char buffer[32]; 1025 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP); 1026 snprintf(buffer, sizeof(buffer), "icp[%d]", i); 1027 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), 1028 errp); 1029 } 1030 } 1031 1032 static void xics_realize(DeviceState *dev, Error **errp) 1033 { 1034 XICSState *icp = XICS(dev); 1035 Error *error = NULL; 1036 int i; 1037 1038 if (!icp->nr_servers) { 1039 error_setg(errp, "Number of servers needs to be greater 0"); 1040 return; 1041 } 1042 1043 /* Registration of global state belongs into realize */ 1044 spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); 1045 spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); 1046 spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); 1047 spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); 1048 1049 spapr_register_hypercall(H_CPPR, h_cppr); 1050 spapr_register_hypercall(H_IPI, h_ipi); 1051 spapr_register_hypercall(H_XIRR, h_xirr); 1052 spapr_register_hypercall(H_XIRR_X, h_xirr_x); 1053 spapr_register_hypercall(H_EOI, h_eoi); 1054 spapr_register_hypercall(H_IPOLL, h_ipoll); 1055 1056 object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); 1057 if (error) { 1058 error_propagate(errp, error); 1059 return; 1060 } 1061 1062 for (i = 0; i < icp->nr_servers; i++) { 1063 object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); 1064 if (error) { 1065 error_propagate(errp, error); 1066 return; 1067 } 1068 } 1069 } 1070 1071 static void xics_initfn(Object *obj) 1072 { 1073 XICSState *xics = XICS(obj); 1074 1075 xics->ics = ICS(object_new(TYPE_ICS)); 1076 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); 1077 xics->ics->icp = xics; 1078 } 1079 1080 static void xics_class_init(ObjectClass *oc, void *data) 1081 { 1082 DeviceClass *dc = DEVICE_CLASS(oc); 1083 XICSStateClass *xsc = XICS_CLASS(oc); 1084 1085 dc->realize = xics_realize; 1086 xsc->set_nr_irqs = xics_set_nr_irqs; 1087 xsc->set_nr_servers = xics_set_nr_servers; 1088 } 1089 1090 static const TypeInfo xics_info = { 1091 .name = TYPE_XICS, 1092 .parent = TYPE_XICS_COMMON, 1093 .instance_size = sizeof(XICSState), 1094 .class_size = sizeof(XICSStateClass), 1095 .class_init = xics_class_init, 1096 .instance_init = xics_initfn, 1097 }; 1098 1099 static void xics_register_types(void) 1100 { 1101 type_register_static(&xics_common_info); 1102 type_register_static(&xics_info); 1103 type_register_static(&ics_info); 1104 type_register_static(&icp_info); 1105 } 1106 1107 type_init(xics_register_types) 1108