1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "hw/hw.h" 30 #include "trace.h" 31 #include "qemu/timer.h" 32 #include "hw/ppc/spapr.h" 33 #include "hw/ppc/xics.h" 34 #include "qemu/error-report.h" 35 #include "qapi/visitor.h" 36 37 static int get_cpu_index_by_dt_id(int cpu_dt_id) 38 { 39 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); 40 41 if (cpu) { 42 return cpu->parent_obj.cpu_index; 43 } 44 45 return -1; 46 } 47 48 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) 49 { 50 CPUState *cs = CPU(cpu); 51 CPUPPCState *env = &cpu->env; 52 ICPState *ss = &icp->ss[cs->cpu_index]; 53 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 54 55 assert(cs->cpu_index < icp->nr_servers); 56 57 if (info->cpu_setup) { 58 info->cpu_setup(icp, cpu); 59 } 60 61 switch (PPC_INPUT(env)) { 62 case PPC_FLAGS_INPUT_POWER7: 63 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 64 break; 65 66 case PPC_FLAGS_INPUT_970: 67 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 68 break; 69 70 default: 71 error_report("XICS interrupt controller does not support this CPU " 72 "bus model"); 73 abort(); 74 } 75 } 76 77 /* 78 * XICS Common class - parent for emulated XICS and KVM-XICS 79 */ 80 static void xics_common_reset(DeviceState *d) 81 { 82 XICSState *icp = XICS_COMMON(d); 83 int i; 84 85 for (i = 0; i < icp->nr_servers; i++) { 86 device_reset(DEVICE(&icp->ss[i])); 87 } 88 89 device_reset(DEVICE(icp->ics)); 90 } 91 92 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, 93 void *opaque, const char *name, Error **errp) 94 { 95 XICSState *icp = XICS_COMMON(obj); 96 int64_t value = icp->nr_irqs; 97 98 visit_type_int(v, &value, name, errp); 99 } 100 101 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, 102 void *opaque, const char *name, Error **errp) 103 { 104 XICSState *icp = XICS_COMMON(obj); 105 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 106 Error *error = NULL; 107 int64_t value; 108 109 visit_type_int(v, &value, name, &error); 110 if (error) { 111 error_propagate(errp, error); 112 return; 113 } 114 if (icp->nr_irqs) { 115 error_setg(errp, "Number of interrupts is already set to %u", 116 icp->nr_irqs); 117 return; 118 } 119 120 assert(info->set_nr_irqs); 121 assert(icp->ics); 122 info->set_nr_irqs(icp, value, errp); 123 } 124 125 static void xics_prop_get_nr_servers(Object *obj, Visitor *v, 126 void *opaque, const char *name, 127 Error **errp) 128 { 129 XICSState *icp = XICS_COMMON(obj); 130 int64_t value = icp->nr_servers; 131 132 visit_type_int(v, &value, name, errp); 133 } 134 135 static void xics_prop_set_nr_servers(Object *obj, Visitor *v, 136 void *opaque, const char *name, 137 Error **errp) 138 { 139 XICSState *icp = XICS_COMMON(obj); 140 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 141 Error *error = NULL; 142 int64_t value; 143 144 visit_type_int(v, &value, name, &error); 145 if (error) { 146 error_propagate(errp, error); 147 return; 148 } 149 if (icp->nr_servers) { 150 error_setg(errp, "Number of servers is already set to %u", 151 icp->nr_servers); 152 return; 153 } 154 155 assert(info->set_nr_servers); 156 info->set_nr_servers(icp, value, errp); 157 } 158 159 static void xics_common_initfn(Object *obj) 160 { 161 object_property_add(obj, "nr_irqs", "int", 162 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, 163 NULL, NULL, NULL); 164 object_property_add(obj, "nr_servers", "int", 165 xics_prop_get_nr_servers, xics_prop_set_nr_servers, 166 NULL, NULL, NULL); 167 } 168 169 static void xics_common_class_init(ObjectClass *oc, void *data) 170 { 171 DeviceClass *dc = DEVICE_CLASS(oc); 172 173 dc->reset = xics_common_reset; 174 } 175 176 static const TypeInfo xics_common_info = { 177 .name = TYPE_XICS_COMMON, 178 .parent = TYPE_SYS_BUS_DEVICE, 179 .instance_size = sizeof(XICSState), 180 .class_size = sizeof(XICSStateClass), 181 .instance_init = xics_common_initfn, 182 .class_init = xics_common_class_init, 183 }; 184 185 /* 186 * ICP: Presentation layer 187 */ 188 189 #define XISR_MASK 0x00ffffff 190 #define CPPR_MASK 0xff000000 191 192 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 193 #define CPPR(ss) (((ss)->xirr) >> 24) 194 195 static void ics_reject(ICSState *ics, int nr); 196 static void ics_resend(ICSState *ics); 197 static void ics_eoi(ICSState *ics, int nr); 198 199 static void icp_check_ipi(XICSState *icp, int server) 200 { 201 ICPState *ss = icp->ss + server; 202 203 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 204 return; 205 } 206 207 trace_xics_icp_check_ipi(server, ss->mfrr); 208 209 if (XISR(ss)) { 210 ics_reject(icp->ics, XISR(ss)); 211 } 212 213 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 214 ss->pending_priority = ss->mfrr; 215 qemu_irq_raise(ss->output); 216 } 217 218 static void icp_resend(XICSState *icp, int server) 219 { 220 ICPState *ss = icp->ss + server; 221 222 if (ss->mfrr < CPPR(ss)) { 223 icp_check_ipi(icp, server); 224 } 225 ics_resend(icp->ics); 226 } 227 228 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) 229 { 230 ICPState *ss = icp->ss + server; 231 uint8_t old_cppr; 232 uint32_t old_xisr; 233 234 old_cppr = CPPR(ss); 235 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 236 237 if (cppr < old_cppr) { 238 if (XISR(ss) && (cppr <= ss->pending_priority)) { 239 old_xisr = XISR(ss); 240 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 241 ss->pending_priority = 0xff; 242 qemu_irq_lower(ss->output); 243 ics_reject(icp->ics, old_xisr); 244 } 245 } else { 246 if (!XISR(ss)) { 247 icp_resend(icp, server); 248 } 249 } 250 } 251 252 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) 253 { 254 ICPState *ss = icp->ss + server; 255 256 ss->mfrr = mfrr; 257 if (mfrr < CPPR(ss)) { 258 icp_check_ipi(icp, server); 259 } 260 } 261 262 static uint32_t icp_accept(ICPState *ss) 263 { 264 uint32_t xirr = ss->xirr; 265 266 qemu_irq_lower(ss->output); 267 ss->xirr = ss->pending_priority << 24; 268 ss->pending_priority = 0xff; 269 270 trace_xics_icp_accept(xirr, ss->xirr); 271 272 return xirr; 273 } 274 275 static void icp_eoi(XICSState *icp, int server, uint32_t xirr) 276 { 277 ICPState *ss = icp->ss + server; 278 279 /* Send EOI -> ICS */ 280 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 281 trace_xics_icp_eoi(server, xirr, ss->xirr); 282 ics_eoi(icp->ics, xirr & XISR_MASK); 283 if (!XISR(ss)) { 284 icp_resend(icp, server); 285 } 286 } 287 288 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) 289 { 290 ICPState *ss = icp->ss + server; 291 292 trace_xics_icp_irq(server, nr, priority); 293 294 if ((priority >= CPPR(ss)) 295 || (XISR(ss) && (ss->pending_priority <= priority))) { 296 ics_reject(icp->ics, nr); 297 } else { 298 if (XISR(ss)) { 299 ics_reject(icp->ics, XISR(ss)); 300 } 301 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 302 ss->pending_priority = priority; 303 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 304 qemu_irq_raise(ss->output); 305 } 306 } 307 308 static void icp_dispatch_pre_save(void *opaque) 309 { 310 ICPState *ss = opaque; 311 ICPStateClass *info = ICP_GET_CLASS(ss); 312 313 if (info->pre_save) { 314 info->pre_save(ss); 315 } 316 } 317 318 static int icp_dispatch_post_load(void *opaque, int version_id) 319 { 320 ICPState *ss = opaque; 321 ICPStateClass *info = ICP_GET_CLASS(ss); 322 323 if (info->post_load) { 324 return info->post_load(ss, version_id); 325 } 326 327 return 0; 328 } 329 330 static const VMStateDescription vmstate_icp_server = { 331 .name = "icp/server", 332 .version_id = 1, 333 .minimum_version_id = 1, 334 .pre_save = icp_dispatch_pre_save, 335 .post_load = icp_dispatch_post_load, 336 .fields = (VMStateField[]) { 337 /* Sanity check */ 338 VMSTATE_UINT32(xirr, ICPState), 339 VMSTATE_UINT8(pending_priority, ICPState), 340 VMSTATE_UINT8(mfrr, ICPState), 341 VMSTATE_END_OF_LIST() 342 }, 343 }; 344 345 static void icp_reset(DeviceState *dev) 346 { 347 ICPState *icp = ICP(dev); 348 349 icp->xirr = 0; 350 icp->pending_priority = 0xff; 351 icp->mfrr = 0xff; 352 353 /* Make all outputs are deasserted */ 354 qemu_set_irq(icp->output, 0); 355 } 356 357 static void icp_class_init(ObjectClass *klass, void *data) 358 { 359 DeviceClass *dc = DEVICE_CLASS(klass); 360 361 dc->reset = icp_reset; 362 dc->vmsd = &vmstate_icp_server; 363 } 364 365 static const TypeInfo icp_info = { 366 .name = TYPE_ICP, 367 .parent = TYPE_DEVICE, 368 .instance_size = sizeof(ICPState), 369 .class_init = icp_class_init, 370 .class_size = sizeof(ICPStateClass), 371 }; 372 373 /* 374 * ICS: Source layer 375 */ 376 static int ics_valid_irq(ICSState *ics, uint32_t nr) 377 { 378 return (nr >= ics->offset) 379 && (nr < (ics->offset + ics->nr_irqs)); 380 } 381 382 static void resend_msi(ICSState *ics, int srcno) 383 { 384 ICSIRQState *irq = ics->irqs + srcno; 385 386 /* FIXME: filter by server#? */ 387 if (irq->status & XICS_STATUS_REJECTED) { 388 irq->status &= ~XICS_STATUS_REJECTED; 389 if (irq->priority != 0xff) { 390 icp_irq(ics->icp, irq->server, srcno + ics->offset, 391 irq->priority); 392 } 393 } 394 } 395 396 static void resend_lsi(ICSState *ics, int srcno) 397 { 398 ICSIRQState *irq = ics->irqs + srcno; 399 400 if ((irq->priority != 0xff) 401 && (irq->status & XICS_STATUS_ASSERTED) 402 && !(irq->status & XICS_STATUS_SENT)) { 403 irq->status |= XICS_STATUS_SENT; 404 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 405 } 406 } 407 408 static void set_irq_msi(ICSState *ics, int srcno, int val) 409 { 410 ICSIRQState *irq = ics->irqs + srcno; 411 412 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 413 414 if (val) { 415 if (irq->priority == 0xff) { 416 irq->status |= XICS_STATUS_MASKED_PENDING; 417 trace_xics_masked_pending(); 418 } else { 419 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 420 } 421 } 422 } 423 424 static void set_irq_lsi(ICSState *ics, int srcno, int val) 425 { 426 ICSIRQState *irq = ics->irqs + srcno; 427 428 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 429 if (val) { 430 irq->status |= XICS_STATUS_ASSERTED; 431 } else { 432 irq->status &= ~XICS_STATUS_ASSERTED; 433 } 434 resend_lsi(ics, srcno); 435 } 436 437 static void ics_set_irq(void *opaque, int srcno, int val) 438 { 439 ICSState *ics = (ICSState *)opaque; 440 441 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 442 set_irq_lsi(ics, srcno, val); 443 } else { 444 set_irq_msi(ics, srcno, val); 445 } 446 } 447 448 static void write_xive_msi(ICSState *ics, int srcno) 449 { 450 ICSIRQState *irq = ics->irqs + srcno; 451 452 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 453 || (irq->priority == 0xff)) { 454 return; 455 } 456 457 irq->status &= ~XICS_STATUS_MASKED_PENDING; 458 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 459 } 460 461 static void write_xive_lsi(ICSState *ics, int srcno) 462 { 463 resend_lsi(ics, srcno); 464 } 465 466 static void ics_write_xive(ICSState *ics, int nr, int server, 467 uint8_t priority, uint8_t saved_priority) 468 { 469 int srcno = nr - ics->offset; 470 ICSIRQState *irq = ics->irqs + srcno; 471 472 irq->server = server; 473 irq->priority = priority; 474 irq->saved_priority = saved_priority; 475 476 trace_xics_ics_write_xive(nr, srcno, server, priority); 477 478 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 479 write_xive_lsi(ics, srcno); 480 } else { 481 write_xive_msi(ics, srcno); 482 } 483 } 484 485 static void ics_reject(ICSState *ics, int nr) 486 { 487 ICSIRQState *irq = ics->irqs + nr - ics->offset; 488 489 trace_xics_ics_reject(nr, nr - ics->offset); 490 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ 491 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ 492 } 493 494 static void ics_resend(ICSState *ics) 495 { 496 int i; 497 498 for (i = 0; i < ics->nr_irqs; i++) { 499 /* FIXME: filter by server#? */ 500 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 501 resend_lsi(ics, i); 502 } else { 503 resend_msi(ics, i); 504 } 505 } 506 } 507 508 static void ics_eoi(ICSState *ics, int nr) 509 { 510 int srcno = nr - ics->offset; 511 ICSIRQState *irq = ics->irqs + srcno; 512 513 trace_xics_ics_eoi(nr); 514 515 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 516 irq->status &= ~XICS_STATUS_SENT; 517 } 518 } 519 520 static void ics_reset(DeviceState *dev) 521 { 522 ICSState *ics = ICS(dev); 523 int i; 524 uint8_t flags[ics->nr_irqs]; 525 526 for (i = 0; i < ics->nr_irqs; i++) { 527 flags[i] = ics->irqs[i].flags; 528 } 529 530 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 531 532 for (i = 0; i < ics->nr_irqs; i++) { 533 ics->irqs[i].priority = 0xff; 534 ics->irqs[i].saved_priority = 0xff; 535 ics->irqs[i].flags = flags[i]; 536 } 537 } 538 539 static int ics_post_load(ICSState *ics, int version_id) 540 { 541 int i; 542 543 for (i = 0; i < ics->icp->nr_servers; i++) { 544 icp_resend(ics->icp, i); 545 } 546 547 return 0; 548 } 549 550 static void ics_dispatch_pre_save(void *opaque) 551 { 552 ICSState *ics = opaque; 553 ICSStateClass *info = ICS_GET_CLASS(ics); 554 555 if (info->pre_save) { 556 info->pre_save(ics); 557 } 558 } 559 560 static int ics_dispatch_post_load(void *opaque, int version_id) 561 { 562 ICSState *ics = opaque; 563 ICSStateClass *info = ICS_GET_CLASS(ics); 564 565 if (info->post_load) { 566 return info->post_load(ics, version_id); 567 } 568 569 return 0; 570 } 571 572 static const VMStateDescription vmstate_ics_irq = { 573 .name = "ics/irq", 574 .version_id = 2, 575 .minimum_version_id = 1, 576 .fields = (VMStateField[]) { 577 VMSTATE_UINT32(server, ICSIRQState), 578 VMSTATE_UINT8(priority, ICSIRQState), 579 VMSTATE_UINT8(saved_priority, ICSIRQState), 580 VMSTATE_UINT8(status, ICSIRQState), 581 VMSTATE_UINT8(flags, ICSIRQState), 582 VMSTATE_END_OF_LIST() 583 }, 584 }; 585 586 static const VMStateDescription vmstate_ics = { 587 .name = "ics", 588 .version_id = 1, 589 .minimum_version_id = 1, 590 .pre_save = ics_dispatch_pre_save, 591 .post_load = ics_dispatch_post_load, 592 .fields = (VMStateField[]) { 593 /* Sanity check */ 594 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 595 596 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 597 vmstate_ics_irq, ICSIRQState), 598 VMSTATE_END_OF_LIST() 599 }, 600 }; 601 602 static void ics_initfn(Object *obj) 603 { 604 ICSState *ics = ICS(obj); 605 606 ics->offset = XICS_IRQ_BASE; 607 } 608 609 static void ics_realize(DeviceState *dev, Error **errp) 610 { 611 ICSState *ics = ICS(dev); 612 613 if (!ics->nr_irqs) { 614 error_setg(errp, "Number of interrupts needs to be greater 0"); 615 return; 616 } 617 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 618 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 619 } 620 621 static void ics_class_init(ObjectClass *klass, void *data) 622 { 623 DeviceClass *dc = DEVICE_CLASS(klass); 624 ICSStateClass *isc = ICS_CLASS(klass); 625 626 dc->realize = ics_realize; 627 dc->vmsd = &vmstate_ics; 628 dc->reset = ics_reset; 629 isc->post_load = ics_post_load; 630 } 631 632 static const TypeInfo ics_info = { 633 .name = TYPE_ICS, 634 .parent = TYPE_DEVICE, 635 .instance_size = sizeof(ICSState), 636 .class_init = ics_class_init, 637 .class_size = sizeof(ICSStateClass), 638 .instance_init = ics_initfn, 639 }; 640 641 /* 642 * Exported functions 643 */ 644 static int xics_find_source(XICSState *icp, int irq) 645 { 646 int sources = 1; 647 int src; 648 649 /* FIXME: implement multiple sources */ 650 for (src = 0; src < sources; ++src) { 651 ICSState *ics = &icp->ics[src]; 652 if (ics_valid_irq(ics, irq)) { 653 return src; 654 } 655 } 656 657 return -1; 658 } 659 660 qemu_irq xics_get_qirq(XICSState *icp, int irq) 661 { 662 int src = xics_find_source(icp, irq); 663 664 if (src >= 0) { 665 ICSState *ics = &icp->ics[src]; 666 return ics->qirqs[irq - ics->offset]; 667 } 668 669 return NULL; 670 } 671 672 static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 673 { 674 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 675 676 ics->irqs[srcno].flags |= 677 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 678 } 679 680 void xics_set_irq_type(XICSState *icp, int irq, bool lsi) 681 { 682 int src = xics_find_source(icp, irq); 683 ICSState *ics; 684 685 assert(src >= 0); 686 687 ics = &icp->ics[src]; 688 ics_set_irq_type(ics, irq - ics->offset, lsi); 689 } 690 691 #define ICS_IRQ_FREE(ics, srcno) \ 692 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) 693 694 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 695 { 696 int first, i; 697 698 for (first = 0; first < ics->nr_irqs; first += alignnum) { 699 if (num > (ics->nr_irqs - first)) { 700 return -1; 701 } 702 for (i = first; i < first + num; ++i) { 703 if (!ICS_IRQ_FREE(ics, i)) { 704 break; 705 } 706 } 707 if (i == (first + num)) { 708 return first; 709 } 710 } 711 712 return -1; 713 } 714 715 int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) 716 { 717 ICSState *ics = &icp->ics[src]; 718 int irq; 719 720 if (irq_hint) { 721 assert(src == xics_find_source(icp, irq_hint)); 722 if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { 723 trace_xics_alloc_failed_hint(src, irq_hint); 724 return -1; 725 } 726 irq = irq_hint; 727 } else { 728 irq = ics_find_free_block(ics, 1, 1); 729 if (irq < 0) { 730 trace_xics_alloc_failed_no_left(src); 731 return -1; 732 } 733 irq += ics->offset; 734 } 735 736 ics_set_irq_type(ics, irq - ics->offset, lsi); 737 trace_xics_alloc(src, irq); 738 739 return irq; 740 } 741 742 /* 743 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block. 744 * If align==true, aligns the first IRQ number to num. 745 */ 746 int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align) 747 { 748 int i, first = -1; 749 ICSState *ics = &icp->ics[src]; 750 751 assert(src == 0); 752 /* 753 * MSIMesage::data is used for storing VIRQ so 754 * it has to be aligned to num to support multiple 755 * MSI vectors. MSI-X is not affected by this. 756 * The hint is used for the first IRQ, the rest should 757 * be allocated continuously. 758 */ 759 if (align) { 760 assert((num == 1) || (num == 2) || (num == 4) || 761 (num == 8) || (num == 16) || (num == 32)); 762 first = ics_find_free_block(ics, num, num); 763 } else { 764 first = ics_find_free_block(ics, num, 1); 765 } 766 767 if (first >= 0) { 768 for (i = first; i < first + num; ++i) { 769 ics_set_irq_type(ics, i, lsi); 770 } 771 } 772 first += ics->offset; 773 774 trace_xics_alloc_block(src, first, num, lsi, align); 775 776 return first; 777 } 778 779 static void ics_free(ICSState *ics, int srcno, int num) 780 { 781 int i; 782 783 for (i = srcno; i < srcno + num; ++i) { 784 if (ICS_IRQ_FREE(ics, i)) { 785 trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset); 786 } 787 memset(&ics->irqs[i], 0, sizeof(ICSIRQState)); 788 } 789 } 790 791 void xics_free(XICSState *icp, int irq, int num) 792 { 793 int src = xics_find_source(icp, irq); 794 795 if (src >= 0) { 796 ICSState *ics = &icp->ics[src]; 797 798 /* FIXME: implement multiple sources */ 799 assert(src == 0); 800 801 trace_xics_ics_free(ics - icp->ics, irq, num); 802 ics_free(ics, irq - ics->offset, num); 803 } 804 } 805 806 /* 807 * Guest interfaces 808 */ 809 810 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 811 target_ulong opcode, target_ulong *args) 812 { 813 CPUState *cs = CPU(cpu); 814 target_ulong cppr = args[0]; 815 816 icp_set_cppr(spapr->icp, cs->cpu_index, cppr); 817 return H_SUCCESS; 818 } 819 820 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 821 target_ulong opcode, target_ulong *args) 822 { 823 target_ulong server = get_cpu_index_by_dt_id(args[0]); 824 target_ulong mfrr = args[1]; 825 826 if (server >= spapr->icp->nr_servers) { 827 return H_PARAMETER; 828 } 829 830 icp_set_mfrr(spapr->icp, server, mfrr); 831 return H_SUCCESS; 832 } 833 834 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 835 target_ulong opcode, target_ulong *args) 836 { 837 CPUState *cs = CPU(cpu); 838 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index); 839 840 args[0] = xirr; 841 return H_SUCCESS; 842 } 843 844 static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr, 845 target_ulong opcode, target_ulong *args) 846 { 847 CPUState *cs = CPU(cpu); 848 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 849 uint32_t xirr = icp_accept(ss); 850 851 args[0] = xirr; 852 args[1] = cpu_get_host_ticks(); 853 return H_SUCCESS; 854 } 855 856 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 857 target_ulong opcode, target_ulong *args) 858 { 859 CPUState *cs = CPU(cpu); 860 target_ulong xirr = args[0]; 861 862 icp_eoi(spapr->icp, cs->cpu_index, xirr); 863 return H_SUCCESS; 864 } 865 866 static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr, 867 target_ulong opcode, target_ulong *args) 868 { 869 CPUState *cs = CPU(cpu); 870 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 871 872 args[0] = ss->xirr; 873 args[1] = ss->mfrr; 874 875 return H_SUCCESS; 876 } 877 878 static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 879 uint32_t token, 880 uint32_t nargs, target_ulong args, 881 uint32_t nret, target_ulong rets) 882 { 883 ICSState *ics = spapr->icp->ics; 884 uint32_t nr, server, priority; 885 886 if ((nargs != 3) || (nret != 1)) { 887 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 888 return; 889 } 890 891 nr = rtas_ld(args, 0); 892 server = get_cpu_index_by_dt_id(rtas_ld(args, 1)); 893 priority = rtas_ld(args, 2); 894 895 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) 896 || (priority > 0xff)) { 897 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 898 return; 899 } 900 901 ics_write_xive(ics, nr, server, priority, priority); 902 903 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 904 } 905 906 static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 907 uint32_t token, 908 uint32_t nargs, target_ulong args, 909 uint32_t nret, target_ulong rets) 910 { 911 ICSState *ics = spapr->icp->ics; 912 uint32_t nr; 913 914 if ((nargs != 1) || (nret != 3)) { 915 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 916 return; 917 } 918 919 nr = rtas_ld(args, 0); 920 921 if (!ics_valid_irq(ics, nr)) { 922 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 923 return; 924 } 925 926 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 927 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server); 928 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority); 929 } 930 931 static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr, 932 uint32_t token, 933 uint32_t nargs, target_ulong args, 934 uint32_t nret, target_ulong rets) 935 { 936 ICSState *ics = spapr->icp->ics; 937 uint32_t nr; 938 939 if ((nargs != 1) || (nret != 1)) { 940 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 941 return; 942 } 943 944 nr = rtas_ld(args, 0); 945 946 if (!ics_valid_irq(ics, nr)) { 947 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 948 return; 949 } 950 951 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, 952 ics->irqs[nr - ics->offset].priority); 953 954 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 955 } 956 957 static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr, 958 uint32_t token, 959 uint32_t nargs, target_ulong args, 960 uint32_t nret, target_ulong rets) 961 { 962 ICSState *ics = spapr->icp->ics; 963 uint32_t nr; 964 965 if ((nargs != 1) || (nret != 1)) { 966 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 967 return; 968 } 969 970 nr = rtas_ld(args, 0); 971 972 if (!ics_valid_irq(ics, nr)) { 973 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 974 return; 975 } 976 977 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 978 ics->irqs[nr - ics->offset].saved_priority, 979 ics->irqs[nr - ics->offset].saved_priority); 980 981 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 982 } 983 984 /* 985 * XICS 986 */ 987 988 static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp) 989 { 990 icp->nr_irqs = icp->ics->nr_irqs = nr_irqs; 991 } 992 993 static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers, 994 Error **errp) 995 { 996 int i; 997 998 icp->nr_servers = nr_servers; 999 1000 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); 1001 for (i = 0; i < icp->nr_servers; i++) { 1002 char buffer[32]; 1003 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP); 1004 snprintf(buffer, sizeof(buffer), "icp[%d]", i); 1005 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), 1006 errp); 1007 } 1008 } 1009 1010 static void xics_realize(DeviceState *dev, Error **errp) 1011 { 1012 XICSState *icp = XICS(dev); 1013 Error *error = NULL; 1014 int i; 1015 1016 if (!icp->nr_servers) { 1017 error_setg(errp, "Number of servers needs to be greater 0"); 1018 return; 1019 } 1020 1021 /* Registration of global state belongs into realize */ 1022 spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); 1023 spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); 1024 spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); 1025 spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); 1026 1027 spapr_register_hypercall(H_CPPR, h_cppr); 1028 spapr_register_hypercall(H_IPI, h_ipi); 1029 spapr_register_hypercall(H_XIRR, h_xirr); 1030 spapr_register_hypercall(H_XIRR_X, h_xirr_x); 1031 spapr_register_hypercall(H_EOI, h_eoi); 1032 spapr_register_hypercall(H_IPOLL, h_ipoll); 1033 1034 object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); 1035 if (error) { 1036 error_propagate(errp, error); 1037 return; 1038 } 1039 1040 for (i = 0; i < icp->nr_servers; i++) { 1041 object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); 1042 if (error) { 1043 error_propagate(errp, error); 1044 return; 1045 } 1046 } 1047 } 1048 1049 static void xics_initfn(Object *obj) 1050 { 1051 XICSState *xics = XICS(obj); 1052 1053 xics->ics = ICS(object_new(TYPE_ICS)); 1054 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); 1055 xics->ics->icp = xics; 1056 } 1057 1058 static void xics_class_init(ObjectClass *oc, void *data) 1059 { 1060 DeviceClass *dc = DEVICE_CLASS(oc); 1061 XICSStateClass *xsc = XICS_CLASS(oc); 1062 1063 dc->realize = xics_realize; 1064 xsc->set_nr_irqs = xics_set_nr_irqs; 1065 xsc->set_nr_servers = xics_set_nr_servers; 1066 } 1067 1068 static const TypeInfo xics_info = { 1069 .name = TYPE_XICS, 1070 .parent = TYPE_XICS_COMMON, 1071 .instance_size = sizeof(XICSState), 1072 .class_size = sizeof(XICSStateClass), 1073 .class_init = xics_class_init, 1074 .instance_init = xics_initfn, 1075 }; 1076 1077 static void xics_register_types(void) 1078 { 1079 type_register_static(&xics_common_info); 1080 type_register_static(&xics_info); 1081 type_register_static(&ics_info); 1082 type_register_static(&icp_info); 1083 } 1084 1085 type_init(xics_register_types) 1086