1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qapi/error.h" 30 #include "hw/hw.h" 31 #include "trace.h" 32 #include "qemu/timer.h" 33 #include "hw/ppc/spapr.h" 34 #include "hw/ppc/xics.h" 35 #include "qemu/error-report.h" 36 #include "qapi/visitor.h" 37 38 static int get_cpu_index_by_dt_id(int cpu_dt_id) 39 { 40 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); 41 42 if (cpu) { 43 return cpu->parent_obj.cpu_index; 44 } 45 46 return -1; 47 } 48 49 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) 50 { 51 CPUState *cs = CPU(cpu); 52 CPUPPCState *env = &cpu->env; 53 ICPState *ss = &icp->ss[cs->cpu_index]; 54 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 55 56 assert(cs->cpu_index < icp->nr_servers); 57 58 if (info->cpu_setup) { 59 info->cpu_setup(icp, cpu); 60 } 61 62 switch (PPC_INPUT(env)) { 63 case PPC_FLAGS_INPUT_POWER7: 64 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 65 break; 66 67 case PPC_FLAGS_INPUT_970: 68 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 69 break; 70 71 default: 72 error_report("XICS interrupt controller does not support this CPU " 73 "bus model"); 74 abort(); 75 } 76 } 77 78 /* 79 * XICS Common class - parent for emulated XICS and KVM-XICS 80 */ 81 static void xics_common_reset(DeviceState *d) 82 { 83 XICSState *icp = XICS_COMMON(d); 84 int i; 85 86 for (i = 0; i < icp->nr_servers; i++) { 87 device_reset(DEVICE(&icp->ss[i])); 88 } 89 90 device_reset(DEVICE(icp->ics)); 91 } 92 93 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name, 94 void *opaque, Error **errp) 95 { 96 XICSState *icp = XICS_COMMON(obj); 97 int64_t value = icp->nr_irqs; 98 99 visit_type_int(v, name, &value, errp); 100 } 101 102 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name, 103 void *opaque, Error **errp) 104 { 105 XICSState *icp = XICS_COMMON(obj); 106 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 107 Error *error = NULL; 108 int64_t value; 109 110 visit_type_int(v, name, &value, &error); 111 if (error) { 112 error_propagate(errp, error); 113 return; 114 } 115 if (icp->nr_irqs) { 116 error_setg(errp, "Number of interrupts is already set to %u", 117 icp->nr_irqs); 118 return; 119 } 120 121 assert(info->set_nr_irqs); 122 assert(icp->ics); 123 info->set_nr_irqs(icp, value, errp); 124 } 125 126 static void xics_prop_get_nr_servers(Object *obj, Visitor *v, 127 const char *name, void *opaque, 128 Error **errp) 129 { 130 XICSState *icp = XICS_COMMON(obj); 131 int64_t value = icp->nr_servers; 132 133 visit_type_int(v, name, &value, errp); 134 } 135 136 static void xics_prop_set_nr_servers(Object *obj, Visitor *v, 137 const char *name, void *opaque, 138 Error **errp) 139 { 140 XICSState *icp = XICS_COMMON(obj); 141 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 142 Error *error = NULL; 143 int64_t value; 144 145 visit_type_int(v, name, &value, &error); 146 if (error) { 147 error_propagate(errp, error); 148 return; 149 } 150 if (icp->nr_servers) { 151 error_setg(errp, "Number of servers is already set to %u", 152 icp->nr_servers); 153 return; 154 } 155 156 assert(info->set_nr_servers); 157 info->set_nr_servers(icp, value, errp); 158 } 159 160 static void xics_common_initfn(Object *obj) 161 { 162 object_property_add(obj, "nr_irqs", "int", 163 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, 164 NULL, NULL, NULL); 165 object_property_add(obj, "nr_servers", "int", 166 xics_prop_get_nr_servers, xics_prop_set_nr_servers, 167 NULL, NULL, NULL); 168 } 169 170 static void xics_common_class_init(ObjectClass *oc, void *data) 171 { 172 DeviceClass *dc = DEVICE_CLASS(oc); 173 174 dc->reset = xics_common_reset; 175 } 176 177 static const TypeInfo xics_common_info = { 178 .name = TYPE_XICS_COMMON, 179 .parent = TYPE_SYS_BUS_DEVICE, 180 .instance_size = sizeof(XICSState), 181 .class_size = sizeof(XICSStateClass), 182 .instance_init = xics_common_initfn, 183 .class_init = xics_common_class_init, 184 }; 185 186 /* 187 * ICP: Presentation layer 188 */ 189 190 #define XISR_MASK 0x00ffffff 191 #define CPPR_MASK 0xff000000 192 193 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 194 #define CPPR(ss) (((ss)->xirr) >> 24) 195 196 static void ics_reject(ICSState *ics, int nr); 197 static void ics_resend(ICSState *ics); 198 static void ics_eoi(ICSState *ics, int nr); 199 200 static void icp_check_ipi(XICSState *icp, int server) 201 { 202 ICPState *ss = icp->ss + server; 203 204 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 205 return; 206 } 207 208 trace_xics_icp_check_ipi(server, ss->mfrr); 209 210 if (XISR(ss)) { 211 ics_reject(icp->ics, XISR(ss)); 212 } 213 214 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 215 ss->pending_priority = ss->mfrr; 216 qemu_irq_raise(ss->output); 217 } 218 219 static void icp_resend(XICSState *icp, int server) 220 { 221 ICPState *ss = icp->ss + server; 222 223 if (ss->mfrr < CPPR(ss)) { 224 icp_check_ipi(icp, server); 225 } 226 ics_resend(icp->ics); 227 } 228 229 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) 230 { 231 ICPState *ss = icp->ss + server; 232 uint8_t old_cppr; 233 uint32_t old_xisr; 234 235 old_cppr = CPPR(ss); 236 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 237 238 if (cppr < old_cppr) { 239 if (XISR(ss) && (cppr <= ss->pending_priority)) { 240 old_xisr = XISR(ss); 241 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 242 ss->pending_priority = 0xff; 243 qemu_irq_lower(ss->output); 244 ics_reject(icp->ics, old_xisr); 245 } 246 } else { 247 if (!XISR(ss)) { 248 icp_resend(icp, server); 249 } 250 } 251 } 252 253 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) 254 { 255 ICPState *ss = icp->ss + server; 256 257 ss->mfrr = mfrr; 258 if (mfrr < CPPR(ss)) { 259 icp_check_ipi(icp, server); 260 } 261 } 262 263 static uint32_t icp_accept(ICPState *ss) 264 { 265 uint32_t xirr = ss->xirr; 266 267 qemu_irq_lower(ss->output); 268 ss->xirr = ss->pending_priority << 24; 269 ss->pending_priority = 0xff; 270 271 trace_xics_icp_accept(xirr, ss->xirr); 272 273 return xirr; 274 } 275 276 static void icp_eoi(XICSState *icp, int server, uint32_t xirr) 277 { 278 ICPState *ss = icp->ss + server; 279 280 /* Send EOI -> ICS */ 281 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 282 trace_xics_icp_eoi(server, xirr, ss->xirr); 283 ics_eoi(icp->ics, xirr & XISR_MASK); 284 if (!XISR(ss)) { 285 icp_resend(icp, server); 286 } 287 } 288 289 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) 290 { 291 ICPState *ss = icp->ss + server; 292 293 trace_xics_icp_irq(server, nr, priority); 294 295 if ((priority >= CPPR(ss)) 296 || (XISR(ss) && (ss->pending_priority <= priority))) { 297 ics_reject(icp->ics, nr); 298 } else { 299 if (XISR(ss)) { 300 ics_reject(icp->ics, XISR(ss)); 301 } 302 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 303 ss->pending_priority = priority; 304 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 305 qemu_irq_raise(ss->output); 306 } 307 } 308 309 static void icp_dispatch_pre_save(void *opaque) 310 { 311 ICPState *ss = opaque; 312 ICPStateClass *info = ICP_GET_CLASS(ss); 313 314 if (info->pre_save) { 315 info->pre_save(ss); 316 } 317 } 318 319 static int icp_dispatch_post_load(void *opaque, int version_id) 320 { 321 ICPState *ss = opaque; 322 ICPStateClass *info = ICP_GET_CLASS(ss); 323 324 if (info->post_load) { 325 return info->post_load(ss, version_id); 326 } 327 328 return 0; 329 } 330 331 static const VMStateDescription vmstate_icp_server = { 332 .name = "icp/server", 333 .version_id = 1, 334 .minimum_version_id = 1, 335 .pre_save = icp_dispatch_pre_save, 336 .post_load = icp_dispatch_post_load, 337 .fields = (VMStateField[]) { 338 /* Sanity check */ 339 VMSTATE_UINT32(xirr, ICPState), 340 VMSTATE_UINT8(pending_priority, ICPState), 341 VMSTATE_UINT8(mfrr, ICPState), 342 VMSTATE_END_OF_LIST() 343 }, 344 }; 345 346 static void icp_reset(DeviceState *dev) 347 { 348 ICPState *icp = ICP(dev); 349 350 icp->xirr = 0; 351 icp->pending_priority = 0xff; 352 icp->mfrr = 0xff; 353 354 /* Make all outputs are deasserted */ 355 qemu_set_irq(icp->output, 0); 356 } 357 358 static void icp_class_init(ObjectClass *klass, void *data) 359 { 360 DeviceClass *dc = DEVICE_CLASS(klass); 361 362 dc->reset = icp_reset; 363 dc->vmsd = &vmstate_icp_server; 364 } 365 366 static const TypeInfo icp_info = { 367 .name = TYPE_ICP, 368 .parent = TYPE_DEVICE, 369 .instance_size = sizeof(ICPState), 370 .class_init = icp_class_init, 371 .class_size = sizeof(ICPStateClass), 372 }; 373 374 /* 375 * ICS: Source layer 376 */ 377 static int ics_valid_irq(ICSState *ics, uint32_t nr) 378 { 379 return (nr >= ics->offset) 380 && (nr < (ics->offset + ics->nr_irqs)); 381 } 382 383 static void resend_msi(ICSState *ics, int srcno) 384 { 385 ICSIRQState *irq = ics->irqs + srcno; 386 387 /* FIXME: filter by server#? */ 388 if (irq->status & XICS_STATUS_REJECTED) { 389 irq->status &= ~XICS_STATUS_REJECTED; 390 if (irq->priority != 0xff) { 391 icp_irq(ics->icp, irq->server, srcno + ics->offset, 392 irq->priority); 393 } 394 } 395 } 396 397 static void resend_lsi(ICSState *ics, int srcno) 398 { 399 ICSIRQState *irq = ics->irqs + srcno; 400 401 if ((irq->priority != 0xff) 402 && (irq->status & XICS_STATUS_ASSERTED) 403 && !(irq->status & XICS_STATUS_SENT)) { 404 irq->status |= XICS_STATUS_SENT; 405 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 406 } 407 } 408 409 static void set_irq_msi(ICSState *ics, int srcno, int val) 410 { 411 ICSIRQState *irq = ics->irqs + srcno; 412 413 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 414 415 if (val) { 416 if (irq->priority == 0xff) { 417 irq->status |= XICS_STATUS_MASKED_PENDING; 418 trace_xics_masked_pending(); 419 } else { 420 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 421 } 422 } 423 } 424 425 static void set_irq_lsi(ICSState *ics, int srcno, int val) 426 { 427 ICSIRQState *irq = ics->irqs + srcno; 428 429 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 430 if (val) { 431 irq->status |= XICS_STATUS_ASSERTED; 432 } else { 433 irq->status &= ~XICS_STATUS_ASSERTED; 434 } 435 resend_lsi(ics, srcno); 436 } 437 438 static void ics_set_irq(void *opaque, int srcno, int val) 439 { 440 ICSState *ics = (ICSState *)opaque; 441 442 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 443 set_irq_lsi(ics, srcno, val); 444 } else { 445 set_irq_msi(ics, srcno, val); 446 } 447 } 448 449 static void write_xive_msi(ICSState *ics, int srcno) 450 { 451 ICSIRQState *irq = ics->irqs + srcno; 452 453 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 454 || (irq->priority == 0xff)) { 455 return; 456 } 457 458 irq->status &= ~XICS_STATUS_MASKED_PENDING; 459 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 460 } 461 462 static void write_xive_lsi(ICSState *ics, int srcno) 463 { 464 resend_lsi(ics, srcno); 465 } 466 467 static void ics_write_xive(ICSState *ics, int nr, int server, 468 uint8_t priority, uint8_t saved_priority) 469 { 470 int srcno = nr - ics->offset; 471 ICSIRQState *irq = ics->irqs + srcno; 472 473 irq->server = server; 474 irq->priority = priority; 475 irq->saved_priority = saved_priority; 476 477 trace_xics_ics_write_xive(nr, srcno, server, priority); 478 479 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 480 write_xive_lsi(ics, srcno); 481 } else { 482 write_xive_msi(ics, srcno); 483 } 484 } 485 486 static void ics_reject(ICSState *ics, int nr) 487 { 488 ICSIRQState *irq = ics->irqs + nr - ics->offset; 489 490 trace_xics_ics_reject(nr, nr - ics->offset); 491 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ 492 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ 493 } 494 495 static void ics_resend(ICSState *ics) 496 { 497 int i; 498 499 for (i = 0; i < ics->nr_irqs; i++) { 500 /* FIXME: filter by server#? */ 501 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 502 resend_lsi(ics, i); 503 } else { 504 resend_msi(ics, i); 505 } 506 } 507 } 508 509 static void ics_eoi(ICSState *ics, int nr) 510 { 511 int srcno = nr - ics->offset; 512 ICSIRQState *irq = ics->irqs + srcno; 513 514 trace_xics_ics_eoi(nr); 515 516 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 517 irq->status &= ~XICS_STATUS_SENT; 518 } 519 } 520 521 static void ics_reset(DeviceState *dev) 522 { 523 ICSState *ics = ICS(dev); 524 int i; 525 uint8_t flags[ics->nr_irqs]; 526 527 for (i = 0; i < ics->nr_irqs; i++) { 528 flags[i] = ics->irqs[i].flags; 529 } 530 531 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 532 533 for (i = 0; i < ics->nr_irqs; i++) { 534 ics->irqs[i].priority = 0xff; 535 ics->irqs[i].saved_priority = 0xff; 536 ics->irqs[i].flags = flags[i]; 537 } 538 } 539 540 static int ics_post_load(ICSState *ics, int version_id) 541 { 542 int i; 543 544 for (i = 0; i < ics->icp->nr_servers; i++) { 545 icp_resend(ics->icp, i); 546 } 547 548 return 0; 549 } 550 551 static void ics_dispatch_pre_save(void *opaque) 552 { 553 ICSState *ics = opaque; 554 ICSStateClass *info = ICS_GET_CLASS(ics); 555 556 if (info->pre_save) { 557 info->pre_save(ics); 558 } 559 } 560 561 static int ics_dispatch_post_load(void *opaque, int version_id) 562 { 563 ICSState *ics = opaque; 564 ICSStateClass *info = ICS_GET_CLASS(ics); 565 566 if (info->post_load) { 567 return info->post_load(ics, version_id); 568 } 569 570 return 0; 571 } 572 573 static const VMStateDescription vmstate_ics_irq = { 574 .name = "ics/irq", 575 .version_id = 2, 576 .minimum_version_id = 1, 577 .fields = (VMStateField[]) { 578 VMSTATE_UINT32(server, ICSIRQState), 579 VMSTATE_UINT8(priority, ICSIRQState), 580 VMSTATE_UINT8(saved_priority, ICSIRQState), 581 VMSTATE_UINT8(status, ICSIRQState), 582 VMSTATE_UINT8(flags, ICSIRQState), 583 VMSTATE_END_OF_LIST() 584 }, 585 }; 586 587 static const VMStateDescription vmstate_ics = { 588 .name = "ics", 589 .version_id = 1, 590 .minimum_version_id = 1, 591 .pre_save = ics_dispatch_pre_save, 592 .post_load = ics_dispatch_post_load, 593 .fields = (VMStateField[]) { 594 /* Sanity check */ 595 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 596 597 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 598 vmstate_ics_irq, ICSIRQState), 599 VMSTATE_END_OF_LIST() 600 }, 601 }; 602 603 static void ics_initfn(Object *obj) 604 { 605 ICSState *ics = ICS(obj); 606 607 ics->offset = XICS_IRQ_BASE; 608 } 609 610 static void ics_realize(DeviceState *dev, Error **errp) 611 { 612 ICSState *ics = ICS(dev); 613 614 if (!ics->nr_irqs) { 615 error_setg(errp, "Number of interrupts needs to be greater 0"); 616 return; 617 } 618 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 619 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 620 } 621 622 static void ics_class_init(ObjectClass *klass, void *data) 623 { 624 DeviceClass *dc = DEVICE_CLASS(klass); 625 ICSStateClass *isc = ICS_CLASS(klass); 626 627 dc->realize = ics_realize; 628 dc->vmsd = &vmstate_ics; 629 dc->reset = ics_reset; 630 isc->post_load = ics_post_load; 631 } 632 633 static const TypeInfo ics_info = { 634 .name = TYPE_ICS, 635 .parent = TYPE_DEVICE, 636 .instance_size = sizeof(ICSState), 637 .class_init = ics_class_init, 638 .class_size = sizeof(ICSStateClass), 639 .instance_init = ics_initfn, 640 }; 641 642 /* 643 * Exported functions 644 */ 645 static int xics_find_source(XICSState *icp, int irq) 646 { 647 int sources = 1; 648 int src; 649 650 /* FIXME: implement multiple sources */ 651 for (src = 0; src < sources; ++src) { 652 ICSState *ics = &icp->ics[src]; 653 if (ics_valid_irq(ics, irq)) { 654 return src; 655 } 656 } 657 658 return -1; 659 } 660 661 qemu_irq xics_get_qirq(XICSState *icp, int irq) 662 { 663 int src = xics_find_source(icp, irq); 664 665 if (src >= 0) { 666 ICSState *ics = &icp->ics[src]; 667 return ics->qirqs[irq - ics->offset]; 668 } 669 670 return NULL; 671 } 672 673 static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 674 { 675 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 676 677 ics->irqs[srcno].flags |= 678 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 679 } 680 681 void xics_set_irq_type(XICSState *icp, int irq, bool lsi) 682 { 683 int src = xics_find_source(icp, irq); 684 ICSState *ics; 685 686 assert(src >= 0); 687 688 ics = &icp->ics[src]; 689 ics_set_irq_type(ics, irq - ics->offset, lsi); 690 } 691 692 #define ICS_IRQ_FREE(ics, srcno) \ 693 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) 694 695 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 696 { 697 int first, i; 698 699 for (first = 0; first < ics->nr_irqs; first += alignnum) { 700 if (num > (ics->nr_irqs - first)) { 701 return -1; 702 } 703 for (i = first; i < first + num; ++i) { 704 if (!ICS_IRQ_FREE(ics, i)) { 705 break; 706 } 707 } 708 if (i == (first + num)) { 709 return first; 710 } 711 } 712 713 return -1; 714 } 715 716 int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp) 717 { 718 ICSState *ics = &icp->ics[src]; 719 int irq; 720 721 if (irq_hint) { 722 assert(src == xics_find_source(icp, irq_hint)); 723 if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { 724 error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint); 725 return -1; 726 } 727 irq = irq_hint; 728 } else { 729 irq = ics_find_free_block(ics, 1, 1); 730 if (irq < 0) { 731 error_setg(errp, "can't allocate IRQ: no IRQ left"); 732 return -1; 733 } 734 irq += ics->offset; 735 } 736 737 ics_set_irq_type(ics, irq - ics->offset, lsi); 738 trace_xics_alloc(src, irq); 739 740 return irq; 741 } 742 743 /* 744 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block. 745 * If align==true, aligns the first IRQ number to num. 746 */ 747 int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, 748 Error **errp) 749 { 750 int i, first = -1; 751 ICSState *ics = &icp->ics[src]; 752 753 assert(src == 0); 754 /* 755 * MSIMesage::data is used for storing VIRQ so 756 * it has to be aligned to num to support multiple 757 * MSI vectors. MSI-X is not affected by this. 758 * The hint is used for the first IRQ, the rest should 759 * be allocated continuously. 760 */ 761 if (align) { 762 assert((num == 1) || (num == 2) || (num == 4) || 763 (num == 8) || (num == 16) || (num == 32)); 764 first = ics_find_free_block(ics, num, num); 765 } else { 766 first = ics_find_free_block(ics, num, 1); 767 } 768 if (first < 0) { 769 error_setg(errp, "can't find a free %d-IRQ block", num); 770 return -1; 771 } 772 773 if (first >= 0) { 774 for (i = first; i < first + num; ++i) { 775 ics_set_irq_type(ics, i, lsi); 776 } 777 } 778 first += ics->offset; 779 780 trace_xics_alloc_block(src, first, num, lsi, align); 781 782 return first; 783 } 784 785 static void ics_free(ICSState *ics, int srcno, int num) 786 { 787 int i; 788 789 for (i = srcno; i < srcno + num; ++i) { 790 if (ICS_IRQ_FREE(ics, i)) { 791 trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset); 792 } 793 memset(&ics->irqs[i], 0, sizeof(ICSIRQState)); 794 } 795 } 796 797 void xics_free(XICSState *icp, int irq, int num) 798 { 799 int src = xics_find_source(icp, irq); 800 801 if (src >= 0) { 802 ICSState *ics = &icp->ics[src]; 803 804 /* FIXME: implement multiple sources */ 805 assert(src == 0); 806 807 trace_xics_ics_free(ics - icp->ics, irq, num); 808 ics_free(ics, irq - ics->offset, num); 809 } 810 } 811 812 /* 813 * Guest interfaces 814 */ 815 816 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 817 target_ulong opcode, target_ulong *args) 818 { 819 CPUState *cs = CPU(cpu); 820 target_ulong cppr = args[0]; 821 822 icp_set_cppr(spapr->icp, cs->cpu_index, cppr); 823 return H_SUCCESS; 824 } 825 826 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 827 target_ulong opcode, target_ulong *args) 828 { 829 target_ulong server = get_cpu_index_by_dt_id(args[0]); 830 target_ulong mfrr = args[1]; 831 832 if (server >= spapr->icp->nr_servers) { 833 return H_PARAMETER; 834 } 835 836 icp_set_mfrr(spapr->icp, server, mfrr); 837 return H_SUCCESS; 838 } 839 840 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 841 target_ulong opcode, target_ulong *args) 842 { 843 CPUState *cs = CPU(cpu); 844 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index); 845 846 args[0] = xirr; 847 return H_SUCCESS; 848 } 849 850 static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr, 851 target_ulong opcode, target_ulong *args) 852 { 853 CPUState *cs = CPU(cpu); 854 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 855 uint32_t xirr = icp_accept(ss); 856 857 args[0] = xirr; 858 args[1] = cpu_get_host_ticks(); 859 return H_SUCCESS; 860 } 861 862 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 863 target_ulong opcode, target_ulong *args) 864 { 865 CPUState *cs = CPU(cpu); 866 target_ulong xirr = args[0]; 867 868 icp_eoi(spapr->icp, cs->cpu_index, xirr); 869 return H_SUCCESS; 870 } 871 872 static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr, 873 target_ulong opcode, target_ulong *args) 874 { 875 CPUState *cs = CPU(cpu); 876 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 877 878 args[0] = ss->xirr; 879 args[1] = ss->mfrr; 880 881 return H_SUCCESS; 882 } 883 884 static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 885 uint32_t token, 886 uint32_t nargs, target_ulong args, 887 uint32_t nret, target_ulong rets) 888 { 889 ICSState *ics = spapr->icp->ics; 890 uint32_t nr, server, priority; 891 892 if ((nargs != 3) || (nret != 1)) { 893 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 894 return; 895 } 896 897 nr = rtas_ld(args, 0); 898 server = get_cpu_index_by_dt_id(rtas_ld(args, 1)); 899 priority = rtas_ld(args, 2); 900 901 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) 902 || (priority > 0xff)) { 903 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 904 return; 905 } 906 907 ics_write_xive(ics, nr, server, priority, priority); 908 909 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 910 } 911 912 static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 913 uint32_t token, 914 uint32_t nargs, target_ulong args, 915 uint32_t nret, target_ulong rets) 916 { 917 ICSState *ics = spapr->icp->ics; 918 uint32_t nr; 919 920 if ((nargs != 1) || (nret != 3)) { 921 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 922 return; 923 } 924 925 nr = rtas_ld(args, 0); 926 927 if (!ics_valid_irq(ics, nr)) { 928 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 929 return; 930 } 931 932 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 933 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server); 934 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority); 935 } 936 937 static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr, 938 uint32_t token, 939 uint32_t nargs, target_ulong args, 940 uint32_t nret, target_ulong rets) 941 { 942 ICSState *ics = spapr->icp->ics; 943 uint32_t nr; 944 945 if ((nargs != 1) || (nret != 1)) { 946 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 947 return; 948 } 949 950 nr = rtas_ld(args, 0); 951 952 if (!ics_valid_irq(ics, nr)) { 953 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 954 return; 955 } 956 957 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, 958 ics->irqs[nr - ics->offset].priority); 959 960 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 961 } 962 963 static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr, 964 uint32_t token, 965 uint32_t nargs, target_ulong args, 966 uint32_t nret, target_ulong rets) 967 { 968 ICSState *ics = spapr->icp->ics; 969 uint32_t nr; 970 971 if ((nargs != 1) || (nret != 1)) { 972 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 973 return; 974 } 975 976 nr = rtas_ld(args, 0); 977 978 if (!ics_valid_irq(ics, nr)) { 979 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 980 return; 981 } 982 983 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 984 ics->irqs[nr - ics->offset].saved_priority, 985 ics->irqs[nr - ics->offset].saved_priority); 986 987 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 988 } 989 990 /* 991 * XICS 992 */ 993 994 static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp) 995 { 996 icp->nr_irqs = icp->ics->nr_irqs = nr_irqs; 997 } 998 999 static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers, 1000 Error **errp) 1001 { 1002 int i; 1003 1004 icp->nr_servers = nr_servers; 1005 1006 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); 1007 for (i = 0; i < icp->nr_servers; i++) { 1008 char buffer[32]; 1009 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP); 1010 snprintf(buffer, sizeof(buffer), "icp[%d]", i); 1011 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), 1012 errp); 1013 } 1014 } 1015 1016 static void xics_realize(DeviceState *dev, Error **errp) 1017 { 1018 XICSState *icp = XICS(dev); 1019 Error *error = NULL; 1020 int i; 1021 1022 if (!icp->nr_servers) { 1023 error_setg(errp, "Number of servers needs to be greater 0"); 1024 return; 1025 } 1026 1027 /* Registration of global state belongs into realize */ 1028 spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); 1029 spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); 1030 spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); 1031 spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); 1032 1033 spapr_register_hypercall(H_CPPR, h_cppr); 1034 spapr_register_hypercall(H_IPI, h_ipi); 1035 spapr_register_hypercall(H_XIRR, h_xirr); 1036 spapr_register_hypercall(H_XIRR_X, h_xirr_x); 1037 spapr_register_hypercall(H_EOI, h_eoi); 1038 spapr_register_hypercall(H_IPOLL, h_ipoll); 1039 1040 object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); 1041 if (error) { 1042 error_propagate(errp, error); 1043 return; 1044 } 1045 1046 for (i = 0; i < icp->nr_servers; i++) { 1047 object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); 1048 if (error) { 1049 error_propagate(errp, error); 1050 return; 1051 } 1052 } 1053 } 1054 1055 static void xics_initfn(Object *obj) 1056 { 1057 XICSState *xics = XICS(obj); 1058 1059 xics->ics = ICS(object_new(TYPE_ICS)); 1060 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); 1061 xics->ics->icp = xics; 1062 } 1063 1064 static void xics_class_init(ObjectClass *oc, void *data) 1065 { 1066 DeviceClass *dc = DEVICE_CLASS(oc); 1067 XICSStateClass *xsc = XICS_CLASS(oc); 1068 1069 dc->realize = xics_realize; 1070 xsc->set_nr_irqs = xics_set_nr_irqs; 1071 xsc->set_nr_servers = xics_set_nr_servers; 1072 } 1073 1074 static const TypeInfo xics_info = { 1075 .name = TYPE_XICS, 1076 .parent = TYPE_XICS_COMMON, 1077 .instance_size = sizeof(XICSState), 1078 .class_size = sizeof(XICSStateClass), 1079 .class_init = xics_class_init, 1080 .instance_init = xics_initfn, 1081 }; 1082 1083 static void xics_register_types(void) 1084 { 1085 type_register_static(&xics_common_info); 1086 type_register_static(&xics_info); 1087 type_register_static(&ics_info); 1088 type_register_static(&icp_info); 1089 } 1090 1091 type_init(xics_register_types) 1092