1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qapi/error.h" 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "hw/hw.h" 33 #include "trace.h" 34 #include "qemu/timer.h" 35 #include "hw/ppc/spapr.h" 36 #include "hw/ppc/xics.h" 37 #include "qemu/error-report.h" 38 #include "qapi/visitor.h" 39 40 static int get_cpu_index_by_dt_id(int cpu_dt_id) 41 { 42 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); 43 44 if (cpu) { 45 return cpu->parent_obj.cpu_index; 46 } 47 48 return -1; 49 } 50 51 void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu) 52 { 53 CPUState *cs = CPU(cpu); 54 ICPState *ss = &icp->ss[cs->cpu_index]; 55 56 assert(cs->cpu_index < icp->nr_servers); 57 assert(cs == ss->cs); 58 59 ss->output = NULL; 60 ss->cs = NULL; 61 } 62 63 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) 64 { 65 CPUState *cs = CPU(cpu); 66 CPUPPCState *env = &cpu->env; 67 ICPState *ss = &icp->ss[cs->cpu_index]; 68 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 69 70 assert(cs->cpu_index < icp->nr_servers); 71 72 ss->cs = cs; 73 74 if (info->cpu_setup) { 75 info->cpu_setup(icp, cpu); 76 } 77 78 switch (PPC_INPUT(env)) { 79 case PPC_FLAGS_INPUT_POWER7: 80 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 81 break; 82 83 case PPC_FLAGS_INPUT_970: 84 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 85 break; 86 87 default: 88 error_report("XICS interrupt controller does not support this CPU " 89 "bus model"); 90 abort(); 91 } 92 } 93 94 /* 95 * XICS Common class - parent for emulated XICS and KVM-XICS 96 */ 97 static void xics_common_reset(DeviceState *d) 98 { 99 XICSState *icp = XICS_COMMON(d); 100 int i; 101 102 for (i = 0; i < icp->nr_servers; i++) { 103 device_reset(DEVICE(&icp->ss[i])); 104 } 105 106 device_reset(DEVICE(icp->ics)); 107 } 108 109 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name, 110 void *opaque, Error **errp) 111 { 112 XICSState *icp = XICS_COMMON(obj); 113 int64_t value = icp->nr_irqs; 114 115 visit_type_int(v, name, &value, errp); 116 } 117 118 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name, 119 void *opaque, Error **errp) 120 { 121 XICSState *icp = XICS_COMMON(obj); 122 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 123 Error *error = NULL; 124 int64_t value; 125 126 visit_type_int(v, name, &value, &error); 127 if (error) { 128 error_propagate(errp, error); 129 return; 130 } 131 if (icp->nr_irqs) { 132 error_setg(errp, "Number of interrupts is already set to %u", 133 icp->nr_irqs); 134 return; 135 } 136 137 assert(info->set_nr_irqs); 138 assert(icp->ics); 139 info->set_nr_irqs(icp, value, errp); 140 } 141 142 static void xics_prop_get_nr_servers(Object *obj, Visitor *v, 143 const char *name, void *opaque, 144 Error **errp) 145 { 146 XICSState *icp = XICS_COMMON(obj); 147 int64_t value = icp->nr_servers; 148 149 visit_type_int(v, name, &value, errp); 150 } 151 152 static void xics_prop_set_nr_servers(Object *obj, Visitor *v, 153 const char *name, void *opaque, 154 Error **errp) 155 { 156 XICSState *icp = XICS_COMMON(obj); 157 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 158 Error *error = NULL; 159 int64_t value; 160 161 visit_type_int(v, name, &value, &error); 162 if (error) { 163 error_propagate(errp, error); 164 return; 165 } 166 if (icp->nr_servers) { 167 error_setg(errp, "Number of servers is already set to %u", 168 icp->nr_servers); 169 return; 170 } 171 172 assert(info->set_nr_servers); 173 info->set_nr_servers(icp, value, errp); 174 } 175 176 static void xics_common_initfn(Object *obj) 177 { 178 object_property_add(obj, "nr_irqs", "int", 179 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, 180 NULL, NULL, NULL); 181 object_property_add(obj, "nr_servers", "int", 182 xics_prop_get_nr_servers, xics_prop_set_nr_servers, 183 NULL, NULL, NULL); 184 } 185 186 static void xics_common_class_init(ObjectClass *oc, void *data) 187 { 188 DeviceClass *dc = DEVICE_CLASS(oc); 189 190 dc->reset = xics_common_reset; 191 } 192 193 static const TypeInfo xics_common_info = { 194 .name = TYPE_XICS_COMMON, 195 .parent = TYPE_SYS_BUS_DEVICE, 196 .instance_size = sizeof(XICSState), 197 .class_size = sizeof(XICSStateClass), 198 .instance_init = xics_common_initfn, 199 .class_init = xics_common_class_init, 200 }; 201 202 /* 203 * ICP: Presentation layer 204 */ 205 206 #define XISR_MASK 0x00ffffff 207 #define CPPR_MASK 0xff000000 208 209 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 210 #define CPPR(ss) (((ss)->xirr) >> 24) 211 212 static void ics_reject(ICSState *ics, int nr); 213 static void ics_resend(ICSState *ics); 214 static void ics_eoi(ICSState *ics, int nr); 215 216 static void icp_check_ipi(XICSState *icp, int server) 217 { 218 ICPState *ss = icp->ss + server; 219 220 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 221 return; 222 } 223 224 trace_xics_icp_check_ipi(server, ss->mfrr); 225 226 if (XISR(ss)) { 227 ics_reject(icp->ics, XISR(ss)); 228 } 229 230 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 231 ss->pending_priority = ss->mfrr; 232 qemu_irq_raise(ss->output); 233 } 234 235 static void icp_resend(XICSState *icp, int server) 236 { 237 ICPState *ss = icp->ss + server; 238 239 if (ss->mfrr < CPPR(ss)) { 240 icp_check_ipi(icp, server); 241 } 242 ics_resend(icp->ics); 243 } 244 245 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) 246 { 247 ICPState *ss = icp->ss + server; 248 uint8_t old_cppr; 249 uint32_t old_xisr; 250 251 old_cppr = CPPR(ss); 252 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 253 254 if (cppr < old_cppr) { 255 if (XISR(ss) && (cppr <= ss->pending_priority)) { 256 old_xisr = XISR(ss); 257 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 258 ss->pending_priority = 0xff; 259 qemu_irq_lower(ss->output); 260 ics_reject(icp->ics, old_xisr); 261 } 262 } else { 263 if (!XISR(ss)) { 264 icp_resend(icp, server); 265 } 266 } 267 } 268 269 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) 270 { 271 ICPState *ss = icp->ss + server; 272 273 ss->mfrr = mfrr; 274 if (mfrr < CPPR(ss)) { 275 icp_check_ipi(icp, server); 276 } 277 } 278 279 static uint32_t icp_accept(ICPState *ss) 280 { 281 uint32_t xirr = ss->xirr; 282 283 qemu_irq_lower(ss->output); 284 ss->xirr = ss->pending_priority << 24; 285 ss->pending_priority = 0xff; 286 287 trace_xics_icp_accept(xirr, ss->xirr); 288 289 return xirr; 290 } 291 292 static void icp_eoi(XICSState *icp, int server, uint32_t xirr) 293 { 294 ICPState *ss = icp->ss + server; 295 296 /* Send EOI -> ICS */ 297 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 298 trace_xics_icp_eoi(server, xirr, ss->xirr); 299 ics_eoi(icp->ics, xirr & XISR_MASK); 300 if (!XISR(ss)) { 301 icp_resend(icp, server); 302 } 303 } 304 305 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) 306 { 307 ICPState *ss = icp->ss + server; 308 309 trace_xics_icp_irq(server, nr, priority); 310 311 if ((priority >= CPPR(ss)) 312 || (XISR(ss) && (ss->pending_priority <= priority))) { 313 ics_reject(icp->ics, nr); 314 } else { 315 if (XISR(ss)) { 316 ics_reject(icp->ics, XISR(ss)); 317 } 318 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 319 ss->pending_priority = priority; 320 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 321 qemu_irq_raise(ss->output); 322 } 323 } 324 325 static void icp_dispatch_pre_save(void *opaque) 326 { 327 ICPState *ss = opaque; 328 ICPStateClass *info = ICP_GET_CLASS(ss); 329 330 if (info->pre_save) { 331 info->pre_save(ss); 332 } 333 } 334 335 static int icp_dispatch_post_load(void *opaque, int version_id) 336 { 337 ICPState *ss = opaque; 338 ICPStateClass *info = ICP_GET_CLASS(ss); 339 340 if (info->post_load) { 341 return info->post_load(ss, version_id); 342 } 343 344 return 0; 345 } 346 347 static const VMStateDescription vmstate_icp_server = { 348 .name = "icp/server", 349 .version_id = 1, 350 .minimum_version_id = 1, 351 .pre_save = icp_dispatch_pre_save, 352 .post_load = icp_dispatch_post_load, 353 .fields = (VMStateField[]) { 354 /* Sanity check */ 355 VMSTATE_UINT32(xirr, ICPState), 356 VMSTATE_UINT8(pending_priority, ICPState), 357 VMSTATE_UINT8(mfrr, ICPState), 358 VMSTATE_END_OF_LIST() 359 }, 360 }; 361 362 static void icp_reset(DeviceState *dev) 363 { 364 ICPState *icp = ICP(dev); 365 366 icp->xirr = 0; 367 icp->pending_priority = 0xff; 368 icp->mfrr = 0xff; 369 370 /* Make all outputs are deasserted */ 371 qemu_set_irq(icp->output, 0); 372 } 373 374 static void icp_class_init(ObjectClass *klass, void *data) 375 { 376 DeviceClass *dc = DEVICE_CLASS(klass); 377 378 dc->reset = icp_reset; 379 dc->vmsd = &vmstate_icp_server; 380 } 381 382 static const TypeInfo icp_info = { 383 .name = TYPE_ICP, 384 .parent = TYPE_DEVICE, 385 .instance_size = sizeof(ICPState), 386 .class_init = icp_class_init, 387 .class_size = sizeof(ICPStateClass), 388 }; 389 390 /* 391 * ICS: Source layer 392 */ 393 static int ics_valid_irq(ICSState *ics, uint32_t nr) 394 { 395 return (nr >= ics->offset) 396 && (nr < (ics->offset + ics->nr_irqs)); 397 } 398 399 static void resend_msi(ICSState *ics, int srcno) 400 { 401 ICSIRQState *irq = ics->irqs + srcno; 402 403 /* FIXME: filter by server#? */ 404 if (irq->status & XICS_STATUS_REJECTED) { 405 irq->status &= ~XICS_STATUS_REJECTED; 406 if (irq->priority != 0xff) { 407 icp_irq(ics->icp, irq->server, srcno + ics->offset, 408 irq->priority); 409 } 410 } 411 } 412 413 static void resend_lsi(ICSState *ics, int srcno) 414 { 415 ICSIRQState *irq = ics->irqs + srcno; 416 417 if ((irq->priority != 0xff) 418 && (irq->status & XICS_STATUS_ASSERTED) 419 && !(irq->status & XICS_STATUS_SENT)) { 420 irq->status |= XICS_STATUS_SENT; 421 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 422 } 423 } 424 425 static void set_irq_msi(ICSState *ics, int srcno, int val) 426 { 427 ICSIRQState *irq = ics->irqs + srcno; 428 429 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 430 431 if (val) { 432 if (irq->priority == 0xff) { 433 irq->status |= XICS_STATUS_MASKED_PENDING; 434 trace_xics_masked_pending(); 435 } else { 436 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 437 } 438 } 439 } 440 441 static void set_irq_lsi(ICSState *ics, int srcno, int val) 442 { 443 ICSIRQState *irq = ics->irqs + srcno; 444 445 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 446 if (val) { 447 irq->status |= XICS_STATUS_ASSERTED; 448 } else { 449 irq->status &= ~XICS_STATUS_ASSERTED; 450 } 451 resend_lsi(ics, srcno); 452 } 453 454 static void ics_set_irq(void *opaque, int srcno, int val) 455 { 456 ICSState *ics = (ICSState *)opaque; 457 458 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 459 set_irq_lsi(ics, srcno, val); 460 } else { 461 set_irq_msi(ics, srcno, val); 462 } 463 } 464 465 static void write_xive_msi(ICSState *ics, int srcno) 466 { 467 ICSIRQState *irq = ics->irqs + srcno; 468 469 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 470 || (irq->priority == 0xff)) { 471 return; 472 } 473 474 irq->status &= ~XICS_STATUS_MASKED_PENDING; 475 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 476 } 477 478 static void write_xive_lsi(ICSState *ics, int srcno) 479 { 480 resend_lsi(ics, srcno); 481 } 482 483 static void ics_write_xive(ICSState *ics, int nr, int server, 484 uint8_t priority, uint8_t saved_priority) 485 { 486 int srcno = nr - ics->offset; 487 ICSIRQState *irq = ics->irqs + srcno; 488 489 irq->server = server; 490 irq->priority = priority; 491 irq->saved_priority = saved_priority; 492 493 trace_xics_ics_write_xive(nr, srcno, server, priority); 494 495 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 496 write_xive_lsi(ics, srcno); 497 } else { 498 write_xive_msi(ics, srcno); 499 } 500 } 501 502 static void ics_reject(ICSState *ics, int nr) 503 { 504 ICSIRQState *irq = ics->irqs + nr - ics->offset; 505 506 trace_xics_ics_reject(nr, nr - ics->offset); 507 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ 508 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ 509 } 510 511 static void ics_resend(ICSState *ics) 512 { 513 int i; 514 515 for (i = 0; i < ics->nr_irqs; i++) { 516 /* FIXME: filter by server#? */ 517 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 518 resend_lsi(ics, i); 519 } else { 520 resend_msi(ics, i); 521 } 522 } 523 } 524 525 static void ics_eoi(ICSState *ics, int nr) 526 { 527 int srcno = nr - ics->offset; 528 ICSIRQState *irq = ics->irqs + srcno; 529 530 trace_xics_ics_eoi(nr); 531 532 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 533 irq->status &= ~XICS_STATUS_SENT; 534 } 535 } 536 537 static void ics_reset(DeviceState *dev) 538 { 539 ICSState *ics = ICS(dev); 540 int i; 541 uint8_t flags[ics->nr_irqs]; 542 543 for (i = 0; i < ics->nr_irqs; i++) { 544 flags[i] = ics->irqs[i].flags; 545 } 546 547 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 548 549 for (i = 0; i < ics->nr_irqs; i++) { 550 ics->irqs[i].priority = 0xff; 551 ics->irqs[i].saved_priority = 0xff; 552 ics->irqs[i].flags = flags[i]; 553 } 554 } 555 556 static int ics_post_load(ICSState *ics, int version_id) 557 { 558 int i; 559 560 for (i = 0; i < ics->icp->nr_servers; i++) { 561 icp_resend(ics->icp, i); 562 } 563 564 return 0; 565 } 566 567 static void ics_dispatch_pre_save(void *opaque) 568 { 569 ICSState *ics = opaque; 570 ICSStateClass *info = ICS_GET_CLASS(ics); 571 572 if (info->pre_save) { 573 info->pre_save(ics); 574 } 575 } 576 577 static int ics_dispatch_post_load(void *opaque, int version_id) 578 { 579 ICSState *ics = opaque; 580 ICSStateClass *info = ICS_GET_CLASS(ics); 581 582 if (info->post_load) { 583 return info->post_load(ics, version_id); 584 } 585 586 return 0; 587 } 588 589 static const VMStateDescription vmstate_ics_irq = { 590 .name = "ics/irq", 591 .version_id = 2, 592 .minimum_version_id = 1, 593 .fields = (VMStateField[]) { 594 VMSTATE_UINT32(server, ICSIRQState), 595 VMSTATE_UINT8(priority, ICSIRQState), 596 VMSTATE_UINT8(saved_priority, ICSIRQState), 597 VMSTATE_UINT8(status, ICSIRQState), 598 VMSTATE_UINT8(flags, ICSIRQState), 599 VMSTATE_END_OF_LIST() 600 }, 601 }; 602 603 static const VMStateDescription vmstate_ics = { 604 .name = "ics", 605 .version_id = 1, 606 .minimum_version_id = 1, 607 .pre_save = ics_dispatch_pre_save, 608 .post_load = ics_dispatch_post_load, 609 .fields = (VMStateField[]) { 610 /* Sanity check */ 611 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 612 613 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 614 vmstate_ics_irq, ICSIRQState), 615 VMSTATE_END_OF_LIST() 616 }, 617 }; 618 619 static void ics_initfn(Object *obj) 620 { 621 ICSState *ics = ICS(obj); 622 623 ics->offset = XICS_IRQ_BASE; 624 } 625 626 static void ics_realize(DeviceState *dev, Error **errp) 627 { 628 ICSState *ics = ICS(dev); 629 630 if (!ics->nr_irqs) { 631 error_setg(errp, "Number of interrupts needs to be greater 0"); 632 return; 633 } 634 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 635 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 636 } 637 638 static void ics_class_init(ObjectClass *klass, void *data) 639 { 640 DeviceClass *dc = DEVICE_CLASS(klass); 641 ICSStateClass *isc = ICS_CLASS(klass); 642 643 dc->realize = ics_realize; 644 dc->vmsd = &vmstate_ics; 645 dc->reset = ics_reset; 646 isc->post_load = ics_post_load; 647 } 648 649 static const TypeInfo ics_info = { 650 .name = TYPE_ICS, 651 .parent = TYPE_DEVICE, 652 .instance_size = sizeof(ICSState), 653 .class_init = ics_class_init, 654 .class_size = sizeof(ICSStateClass), 655 .instance_init = ics_initfn, 656 }; 657 658 /* 659 * Exported functions 660 */ 661 static int xics_find_source(XICSState *icp, int irq) 662 { 663 int sources = 1; 664 int src; 665 666 /* FIXME: implement multiple sources */ 667 for (src = 0; src < sources; ++src) { 668 ICSState *ics = &icp->ics[src]; 669 if (ics_valid_irq(ics, irq)) { 670 return src; 671 } 672 } 673 674 return -1; 675 } 676 677 qemu_irq xics_get_qirq(XICSState *icp, int irq) 678 { 679 int src = xics_find_source(icp, irq); 680 681 if (src >= 0) { 682 ICSState *ics = &icp->ics[src]; 683 return ics->qirqs[irq - ics->offset]; 684 } 685 686 return NULL; 687 } 688 689 static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 690 { 691 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 692 693 ics->irqs[srcno].flags |= 694 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 695 } 696 697 #define ICS_IRQ_FREE(ics, srcno) \ 698 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) 699 700 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 701 { 702 int first, i; 703 704 for (first = 0; first < ics->nr_irqs; first += alignnum) { 705 if (num > (ics->nr_irqs - first)) { 706 return -1; 707 } 708 for (i = first; i < first + num; ++i) { 709 if (!ICS_IRQ_FREE(ics, i)) { 710 break; 711 } 712 } 713 if (i == (first + num)) { 714 return first; 715 } 716 } 717 718 return -1; 719 } 720 721 int xics_spapr_alloc(XICSState *icp, int src, int irq_hint, bool lsi, 722 Error **errp) 723 { 724 ICSState *ics = &icp->ics[src]; 725 int irq; 726 727 if (irq_hint) { 728 assert(src == xics_find_source(icp, irq_hint)); 729 if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { 730 error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint); 731 return -1; 732 } 733 irq = irq_hint; 734 } else { 735 irq = ics_find_free_block(ics, 1, 1); 736 if (irq < 0) { 737 error_setg(errp, "can't allocate IRQ: no IRQ left"); 738 return -1; 739 } 740 irq += ics->offset; 741 } 742 743 ics_set_irq_type(ics, irq - ics->offset, lsi); 744 trace_xics_alloc(src, irq); 745 746 return irq; 747 } 748 749 /* 750 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block. 751 * If align==true, aligns the first IRQ number to num. 752 */ 753 int xics_spapr_alloc_block(XICSState *icp, int src, int num, bool lsi, 754 bool align, Error **errp) 755 { 756 int i, first = -1; 757 ICSState *ics = &icp->ics[src]; 758 759 assert(src == 0); 760 /* 761 * MSIMesage::data is used for storing VIRQ so 762 * it has to be aligned to num to support multiple 763 * MSI vectors. MSI-X is not affected by this. 764 * The hint is used for the first IRQ, the rest should 765 * be allocated continuously. 766 */ 767 if (align) { 768 assert((num == 1) || (num == 2) || (num == 4) || 769 (num == 8) || (num == 16) || (num == 32)); 770 first = ics_find_free_block(ics, num, num); 771 } else { 772 first = ics_find_free_block(ics, num, 1); 773 } 774 if (first < 0) { 775 error_setg(errp, "can't find a free %d-IRQ block", num); 776 return -1; 777 } 778 779 if (first >= 0) { 780 for (i = first; i < first + num; ++i) { 781 ics_set_irq_type(ics, i, lsi); 782 } 783 } 784 first += ics->offset; 785 786 trace_xics_alloc_block(src, first, num, lsi, align); 787 788 return first; 789 } 790 791 static void ics_free(ICSState *ics, int srcno, int num) 792 { 793 int i; 794 795 for (i = srcno; i < srcno + num; ++i) { 796 if (ICS_IRQ_FREE(ics, i)) { 797 trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset); 798 } 799 memset(&ics->irqs[i], 0, sizeof(ICSIRQState)); 800 } 801 } 802 803 void xics_spapr_free(XICSState *icp, int irq, int num) 804 { 805 int src = xics_find_source(icp, irq); 806 807 if (src >= 0) { 808 ICSState *ics = &icp->ics[src]; 809 810 /* FIXME: implement multiple sources */ 811 assert(src == 0); 812 813 trace_xics_ics_free(ics - icp->ics, irq, num); 814 ics_free(ics, irq - ics->offset, num); 815 } 816 } 817 818 /* 819 * Guest interfaces 820 */ 821 822 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 823 target_ulong opcode, target_ulong *args) 824 { 825 CPUState *cs = CPU(cpu); 826 target_ulong cppr = args[0]; 827 828 icp_set_cppr(spapr->icp, cs->cpu_index, cppr); 829 return H_SUCCESS; 830 } 831 832 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 833 target_ulong opcode, target_ulong *args) 834 { 835 target_ulong server = get_cpu_index_by_dt_id(args[0]); 836 target_ulong mfrr = args[1]; 837 838 if (server >= spapr->icp->nr_servers) { 839 return H_PARAMETER; 840 } 841 842 icp_set_mfrr(spapr->icp, server, mfrr); 843 return H_SUCCESS; 844 } 845 846 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 847 target_ulong opcode, target_ulong *args) 848 { 849 CPUState *cs = CPU(cpu); 850 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index); 851 852 args[0] = xirr; 853 return H_SUCCESS; 854 } 855 856 static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr, 857 target_ulong opcode, target_ulong *args) 858 { 859 CPUState *cs = CPU(cpu); 860 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 861 uint32_t xirr = icp_accept(ss); 862 863 args[0] = xirr; 864 args[1] = cpu_get_host_ticks(); 865 return H_SUCCESS; 866 } 867 868 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 869 target_ulong opcode, target_ulong *args) 870 { 871 CPUState *cs = CPU(cpu); 872 target_ulong xirr = args[0]; 873 874 icp_eoi(spapr->icp, cs->cpu_index, xirr); 875 return H_SUCCESS; 876 } 877 878 static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr, 879 target_ulong opcode, target_ulong *args) 880 { 881 CPUState *cs = CPU(cpu); 882 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 883 884 args[0] = ss->xirr; 885 args[1] = ss->mfrr; 886 887 return H_SUCCESS; 888 } 889 890 static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 891 uint32_t token, 892 uint32_t nargs, target_ulong args, 893 uint32_t nret, target_ulong rets) 894 { 895 ICSState *ics = spapr->icp->ics; 896 uint32_t nr, server, priority; 897 898 if ((nargs != 3) || (nret != 1)) { 899 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 900 return; 901 } 902 903 nr = rtas_ld(args, 0); 904 server = get_cpu_index_by_dt_id(rtas_ld(args, 1)); 905 priority = rtas_ld(args, 2); 906 907 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) 908 || (priority > 0xff)) { 909 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 910 return; 911 } 912 913 ics_write_xive(ics, nr, server, priority, priority); 914 915 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 916 } 917 918 static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 919 uint32_t token, 920 uint32_t nargs, target_ulong args, 921 uint32_t nret, target_ulong rets) 922 { 923 ICSState *ics = spapr->icp->ics; 924 uint32_t nr; 925 926 if ((nargs != 1) || (nret != 3)) { 927 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 928 return; 929 } 930 931 nr = rtas_ld(args, 0); 932 933 if (!ics_valid_irq(ics, nr)) { 934 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 935 return; 936 } 937 938 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 939 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server); 940 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority); 941 } 942 943 static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr, 944 uint32_t token, 945 uint32_t nargs, target_ulong args, 946 uint32_t nret, target_ulong rets) 947 { 948 ICSState *ics = spapr->icp->ics; 949 uint32_t nr; 950 951 if ((nargs != 1) || (nret != 1)) { 952 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 953 return; 954 } 955 956 nr = rtas_ld(args, 0); 957 958 if (!ics_valid_irq(ics, nr)) { 959 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 960 return; 961 } 962 963 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, 964 ics->irqs[nr - ics->offset].priority); 965 966 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 967 } 968 969 static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr, 970 uint32_t token, 971 uint32_t nargs, target_ulong args, 972 uint32_t nret, target_ulong rets) 973 { 974 ICSState *ics = spapr->icp->ics; 975 uint32_t nr; 976 977 if ((nargs != 1) || (nret != 1)) { 978 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 979 return; 980 } 981 982 nr = rtas_ld(args, 0); 983 984 if (!ics_valid_irq(ics, nr)) { 985 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 986 return; 987 } 988 989 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 990 ics->irqs[nr - ics->offset].saved_priority, 991 ics->irqs[nr - ics->offset].saved_priority); 992 993 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 994 } 995 996 /* 997 * XICS 998 */ 999 1000 static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp) 1001 { 1002 icp->nr_irqs = icp->ics->nr_irqs = nr_irqs; 1003 } 1004 1005 static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers, 1006 Error **errp) 1007 { 1008 int i; 1009 1010 icp->nr_servers = nr_servers; 1011 1012 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); 1013 for (i = 0; i < icp->nr_servers; i++) { 1014 char buffer[32]; 1015 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP); 1016 snprintf(buffer, sizeof(buffer), "icp[%d]", i); 1017 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), 1018 errp); 1019 } 1020 } 1021 1022 static void xics_spapr_realize(DeviceState *dev, Error **errp) 1023 { 1024 XICSState *icp = XICS_SPAPR(dev); 1025 Error *error = NULL; 1026 int i; 1027 1028 if (!icp->nr_servers) { 1029 error_setg(errp, "Number of servers needs to be greater 0"); 1030 return; 1031 } 1032 1033 /* Registration of global state belongs into realize */ 1034 spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); 1035 spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); 1036 spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); 1037 spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); 1038 1039 spapr_register_hypercall(H_CPPR, h_cppr); 1040 spapr_register_hypercall(H_IPI, h_ipi); 1041 spapr_register_hypercall(H_XIRR, h_xirr); 1042 spapr_register_hypercall(H_XIRR_X, h_xirr_x); 1043 spapr_register_hypercall(H_EOI, h_eoi); 1044 spapr_register_hypercall(H_IPOLL, h_ipoll); 1045 1046 object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); 1047 if (error) { 1048 error_propagate(errp, error); 1049 return; 1050 } 1051 1052 for (i = 0; i < icp->nr_servers; i++) { 1053 object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); 1054 if (error) { 1055 error_propagate(errp, error); 1056 return; 1057 } 1058 } 1059 } 1060 1061 static void xics_spapr_initfn(Object *obj) 1062 { 1063 XICSState *xics = XICS_SPAPR(obj); 1064 1065 xics->ics = ICS(object_new(TYPE_ICS)); 1066 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); 1067 xics->ics->icp = xics; 1068 } 1069 1070 static void xics_spapr_class_init(ObjectClass *oc, void *data) 1071 { 1072 DeviceClass *dc = DEVICE_CLASS(oc); 1073 XICSStateClass *xsc = XICS_SPAPR_CLASS(oc); 1074 1075 dc->realize = xics_spapr_realize; 1076 xsc->set_nr_irqs = xics_set_nr_irqs; 1077 xsc->set_nr_servers = xics_set_nr_servers; 1078 } 1079 1080 static const TypeInfo xics_spapr_info = { 1081 .name = TYPE_XICS_SPAPR, 1082 .parent = TYPE_XICS_COMMON, 1083 .instance_size = sizeof(XICSState), 1084 .class_size = sizeof(XICSStateClass), 1085 .class_init = xics_spapr_class_init, 1086 .instance_init = xics_spapr_initfn, 1087 }; 1088 1089 static void xics_register_types(void) 1090 { 1091 type_register_static(&xics_common_info); 1092 type_register_static(&xics_spapr_info); 1093 type_register_static(&ics_info); 1094 type_register_static(&icp_info); 1095 } 1096 1097 type_init(xics_register_types) 1098