1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qapi/error.h" 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "hw/hw.h" 33 #include "trace.h" 34 #include "qemu/timer.h" 35 #include "hw/ppc/xics.h" 36 #include "qemu/error-report.h" 37 #include "qapi/visitor.h" 38 39 int xics_get_cpu_index_by_dt_id(int cpu_dt_id) 40 { 41 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); 42 43 if (cpu) { 44 return cpu->parent_obj.cpu_index; 45 } 46 47 return -1; 48 } 49 50 void xics_cpu_destroy(XICSState *xics, PowerPCCPU *cpu) 51 { 52 CPUState *cs = CPU(cpu); 53 ICPState *ss = &xics->ss[cs->cpu_index]; 54 55 assert(cs->cpu_index < xics->nr_servers); 56 assert(cs == ss->cs); 57 58 ss->output = NULL; 59 ss->cs = NULL; 60 } 61 62 void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu) 63 { 64 CPUState *cs = CPU(cpu); 65 CPUPPCState *env = &cpu->env; 66 ICPState *ss = &xics->ss[cs->cpu_index]; 67 XICSStateClass *info = XICS_COMMON_GET_CLASS(xics); 68 69 assert(cs->cpu_index < xics->nr_servers); 70 71 ss->cs = cs; 72 73 if (info->cpu_setup) { 74 info->cpu_setup(xics, cpu); 75 } 76 77 switch (PPC_INPUT(env)) { 78 case PPC_FLAGS_INPUT_POWER7: 79 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 80 break; 81 82 case PPC_FLAGS_INPUT_970: 83 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 84 break; 85 86 default: 87 error_report("XICS interrupt controller does not support this CPU " 88 "bus model"); 89 abort(); 90 } 91 } 92 93 /* 94 * XICS Common class - parent for emulated XICS and KVM-XICS 95 */ 96 static void xics_common_reset(DeviceState *d) 97 { 98 XICSState *xics = XICS_COMMON(d); 99 ICSState *ics; 100 int i; 101 102 for (i = 0; i < xics->nr_servers; i++) { 103 device_reset(DEVICE(&xics->ss[i])); 104 } 105 106 QLIST_FOREACH(ics, &xics->ics, list) { 107 device_reset(DEVICE(ics)); 108 } 109 } 110 111 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name, 112 void *opaque, Error **errp) 113 { 114 XICSState *xics = XICS_COMMON(obj); 115 int64_t value = xics->nr_irqs; 116 117 visit_type_int(v, name, &value, errp); 118 } 119 120 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name, 121 void *opaque, Error **errp) 122 { 123 XICSState *xics = XICS_COMMON(obj); 124 XICSStateClass *info = XICS_COMMON_GET_CLASS(xics); 125 Error *error = NULL; 126 int64_t value; 127 128 visit_type_int(v, name, &value, &error); 129 if (error) { 130 error_propagate(errp, error); 131 return; 132 } 133 if (xics->nr_irqs) { 134 error_setg(errp, "Number of interrupts is already set to %u", 135 xics->nr_irqs); 136 return; 137 } 138 139 assert(info->set_nr_irqs); 140 info->set_nr_irqs(xics, value, errp); 141 } 142 143 static void xics_prop_get_nr_servers(Object *obj, Visitor *v, 144 const char *name, void *opaque, 145 Error **errp) 146 { 147 XICSState *xics = XICS_COMMON(obj); 148 int64_t value = xics->nr_servers; 149 150 visit_type_int(v, name, &value, errp); 151 } 152 153 static void xics_prop_set_nr_servers(Object *obj, Visitor *v, 154 const char *name, void *opaque, 155 Error **errp) 156 { 157 XICSState *xics = XICS_COMMON(obj); 158 XICSStateClass *info = XICS_COMMON_GET_CLASS(xics); 159 Error *error = NULL; 160 int64_t value; 161 162 visit_type_int(v, name, &value, &error); 163 if (error) { 164 error_propagate(errp, error); 165 return; 166 } 167 if (xics->nr_servers) { 168 error_setg(errp, "Number of servers is already set to %u", 169 xics->nr_servers); 170 return; 171 } 172 173 assert(info->set_nr_servers); 174 info->set_nr_servers(xics, value, errp); 175 } 176 177 static void xics_common_initfn(Object *obj) 178 { 179 XICSState *xics = XICS_COMMON(obj); 180 181 QLIST_INIT(&xics->ics); 182 object_property_add(obj, "nr_irqs", "int", 183 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, 184 NULL, NULL, NULL); 185 object_property_add(obj, "nr_servers", "int", 186 xics_prop_get_nr_servers, xics_prop_set_nr_servers, 187 NULL, NULL, NULL); 188 } 189 190 static void xics_common_class_init(ObjectClass *oc, void *data) 191 { 192 DeviceClass *dc = DEVICE_CLASS(oc); 193 194 dc->reset = xics_common_reset; 195 } 196 197 static const TypeInfo xics_common_info = { 198 .name = TYPE_XICS_COMMON, 199 .parent = TYPE_SYS_BUS_DEVICE, 200 .instance_size = sizeof(XICSState), 201 .class_size = sizeof(XICSStateClass), 202 .instance_init = xics_common_initfn, 203 .class_init = xics_common_class_init, 204 }; 205 206 /* 207 * ICP: Presentation layer 208 */ 209 210 #define XISR_MASK 0x00ffffff 211 #define CPPR_MASK 0xff000000 212 213 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 214 #define CPPR(ss) (((ss)->xirr) >> 24) 215 216 static void ics_reject(ICSState *ics, int nr); 217 static void ics_resend(ICSState *ics); 218 static void ics_eoi(ICSState *ics, int nr); 219 220 static void icp_check_ipi(ICPState *ss) 221 { 222 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 223 return; 224 } 225 226 trace_xics_icp_check_ipi(ss->cs->cpu_index, ss->mfrr); 227 228 if (XISR(ss) && ss->xirr_owner) { 229 ics_reject(ss->xirr_owner, XISR(ss)); 230 } 231 232 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 233 ss->pending_priority = ss->mfrr; 234 ss->xirr_owner = NULL; 235 qemu_irq_raise(ss->output); 236 } 237 238 static void icp_resend(XICSState *xics, int server) 239 { 240 ICPState *ss = xics->ss + server; 241 ICSState *ics; 242 243 if (ss->mfrr < CPPR(ss)) { 244 icp_check_ipi(ss); 245 } 246 QLIST_FOREACH(ics, &xics->ics, list) { 247 ics_resend(ics); 248 } 249 } 250 251 void icp_set_cppr(XICSState *xics, int server, uint8_t cppr) 252 { 253 ICPState *ss = xics->ss + server; 254 uint8_t old_cppr; 255 uint32_t old_xisr; 256 257 old_cppr = CPPR(ss); 258 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 259 260 if (cppr < old_cppr) { 261 if (XISR(ss) && (cppr <= ss->pending_priority)) { 262 old_xisr = XISR(ss); 263 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 264 ss->pending_priority = 0xff; 265 qemu_irq_lower(ss->output); 266 if (ss->xirr_owner) { 267 ics_reject(ss->xirr_owner, old_xisr); 268 ss->xirr_owner = NULL; 269 } 270 } 271 } else { 272 if (!XISR(ss)) { 273 icp_resend(xics, server); 274 } 275 } 276 } 277 278 void icp_set_mfrr(XICSState *xics, int server, uint8_t mfrr) 279 { 280 ICPState *ss = xics->ss + server; 281 282 ss->mfrr = mfrr; 283 if (mfrr < CPPR(ss)) { 284 icp_check_ipi(ss); 285 } 286 } 287 288 uint32_t icp_accept(ICPState *ss) 289 { 290 uint32_t xirr = ss->xirr; 291 292 qemu_irq_lower(ss->output); 293 ss->xirr = ss->pending_priority << 24; 294 ss->pending_priority = 0xff; 295 ss->xirr_owner = NULL; 296 297 trace_xics_icp_accept(xirr, ss->xirr); 298 299 return xirr; 300 } 301 302 uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr) 303 { 304 if (mfrr) { 305 *mfrr = ss->mfrr; 306 } 307 return ss->xirr; 308 } 309 310 void icp_eoi(XICSState *xics, int server, uint32_t xirr) 311 { 312 ICPState *ss = xics->ss + server; 313 ICSState *ics; 314 uint32_t irq; 315 316 /* Send EOI -> ICS */ 317 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 318 trace_xics_icp_eoi(server, xirr, ss->xirr); 319 irq = xirr & XISR_MASK; 320 QLIST_FOREACH(ics, &xics->ics, list) { 321 if (ics_valid_irq(ics, irq)) { 322 ics_eoi(ics, irq); 323 } 324 } 325 if (!XISR(ss)) { 326 icp_resend(xics, server); 327 } 328 } 329 330 static void icp_irq(ICSState *ics, int server, int nr, uint8_t priority) 331 { 332 XICSState *xics = ics->xics; 333 ICPState *ss = xics->ss + server; 334 335 trace_xics_icp_irq(server, nr, priority); 336 337 if ((priority >= CPPR(ss)) 338 || (XISR(ss) && (ss->pending_priority <= priority))) { 339 ics_reject(ics, nr); 340 } else { 341 if (XISR(ss) && ss->xirr_owner) { 342 ics_reject(ss->xirr_owner, XISR(ss)); 343 ss->xirr_owner = NULL; 344 } 345 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 346 ss->xirr_owner = ics; 347 ss->pending_priority = priority; 348 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 349 qemu_irq_raise(ss->output); 350 } 351 } 352 353 static void icp_dispatch_pre_save(void *opaque) 354 { 355 ICPState *ss = opaque; 356 ICPStateClass *info = ICP_GET_CLASS(ss); 357 358 if (info->pre_save) { 359 info->pre_save(ss); 360 } 361 } 362 363 static int icp_dispatch_post_load(void *opaque, int version_id) 364 { 365 ICPState *ss = opaque; 366 ICPStateClass *info = ICP_GET_CLASS(ss); 367 368 if (info->post_load) { 369 return info->post_load(ss, version_id); 370 } 371 372 return 0; 373 } 374 375 static const VMStateDescription vmstate_icp_server = { 376 .name = "icp/server", 377 .version_id = 1, 378 .minimum_version_id = 1, 379 .pre_save = icp_dispatch_pre_save, 380 .post_load = icp_dispatch_post_load, 381 .fields = (VMStateField[]) { 382 /* Sanity check */ 383 VMSTATE_UINT32(xirr, ICPState), 384 VMSTATE_UINT8(pending_priority, ICPState), 385 VMSTATE_UINT8(mfrr, ICPState), 386 VMSTATE_END_OF_LIST() 387 }, 388 }; 389 390 static void icp_reset(DeviceState *dev) 391 { 392 ICPState *icp = ICP(dev); 393 394 icp->xirr = 0; 395 icp->pending_priority = 0xff; 396 icp->mfrr = 0xff; 397 398 /* Make all outputs are deasserted */ 399 qemu_set_irq(icp->output, 0); 400 } 401 402 static void icp_class_init(ObjectClass *klass, void *data) 403 { 404 DeviceClass *dc = DEVICE_CLASS(klass); 405 406 dc->reset = icp_reset; 407 dc->vmsd = &vmstate_icp_server; 408 } 409 410 static const TypeInfo icp_info = { 411 .name = TYPE_ICP, 412 .parent = TYPE_DEVICE, 413 .instance_size = sizeof(ICPState), 414 .class_init = icp_class_init, 415 .class_size = sizeof(ICPStateClass), 416 }; 417 418 /* 419 * ICS: Source layer 420 */ 421 static void resend_msi(ICSState *ics, int srcno) 422 { 423 ICSIRQState *irq = ics->irqs + srcno; 424 425 /* FIXME: filter by server#? */ 426 if (irq->status & XICS_STATUS_REJECTED) { 427 irq->status &= ~XICS_STATUS_REJECTED; 428 if (irq->priority != 0xff) { 429 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 430 } 431 } 432 } 433 434 static void resend_lsi(ICSState *ics, int srcno) 435 { 436 ICSIRQState *irq = ics->irqs + srcno; 437 438 if ((irq->priority != 0xff) 439 && (irq->status & XICS_STATUS_ASSERTED) 440 && !(irq->status & XICS_STATUS_SENT)) { 441 irq->status |= XICS_STATUS_SENT; 442 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 443 } 444 } 445 446 static void set_irq_msi(ICSState *ics, int srcno, int val) 447 { 448 ICSIRQState *irq = ics->irqs + srcno; 449 450 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 451 452 if (val) { 453 if (irq->priority == 0xff) { 454 irq->status |= XICS_STATUS_MASKED_PENDING; 455 trace_xics_masked_pending(); 456 } else { 457 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 458 } 459 } 460 } 461 462 static void set_irq_lsi(ICSState *ics, int srcno, int val) 463 { 464 ICSIRQState *irq = ics->irqs + srcno; 465 466 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 467 if (val) { 468 irq->status |= XICS_STATUS_ASSERTED; 469 } else { 470 irq->status &= ~XICS_STATUS_ASSERTED; 471 } 472 resend_lsi(ics, srcno); 473 } 474 475 static void ics_set_irq(void *opaque, int srcno, int val) 476 { 477 ICSState *ics = (ICSState *)opaque; 478 479 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 480 set_irq_lsi(ics, srcno, val); 481 } else { 482 set_irq_msi(ics, srcno, val); 483 } 484 } 485 486 static void write_xive_msi(ICSState *ics, int srcno) 487 { 488 ICSIRQState *irq = ics->irqs + srcno; 489 490 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 491 || (irq->priority == 0xff)) { 492 return; 493 } 494 495 irq->status &= ~XICS_STATUS_MASKED_PENDING; 496 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); 497 } 498 499 static void write_xive_lsi(ICSState *ics, int srcno) 500 { 501 resend_lsi(ics, srcno); 502 } 503 504 void ics_write_xive(ICSState *ics, int nr, int server, 505 uint8_t priority, uint8_t saved_priority) 506 { 507 int srcno = nr - ics->offset; 508 ICSIRQState *irq = ics->irqs + srcno; 509 510 irq->server = server; 511 irq->priority = priority; 512 irq->saved_priority = saved_priority; 513 514 trace_xics_ics_write_xive(nr, srcno, server, priority); 515 516 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 517 write_xive_lsi(ics, srcno); 518 } else { 519 write_xive_msi(ics, srcno); 520 } 521 } 522 523 static void ics_reject(ICSState *ics, int nr) 524 { 525 ICSIRQState *irq = ics->irqs + nr - ics->offset; 526 527 trace_xics_ics_reject(nr, nr - ics->offset); 528 if (irq->flags & XICS_FLAGS_IRQ_MSI) { 529 irq->status |= XICS_STATUS_REJECTED; 530 } else if (irq->flags & XICS_FLAGS_IRQ_LSI) { 531 irq->status &= ~XICS_STATUS_SENT; 532 } 533 } 534 535 static void ics_resend(ICSState *ics) 536 { 537 int i; 538 539 for (i = 0; i < ics->nr_irqs; i++) { 540 /* FIXME: filter by server#? */ 541 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 542 resend_lsi(ics, i); 543 } else { 544 resend_msi(ics, i); 545 } 546 } 547 } 548 549 static void ics_eoi(ICSState *ics, int nr) 550 { 551 int srcno = nr - ics->offset; 552 ICSIRQState *irq = ics->irqs + srcno; 553 554 trace_xics_ics_eoi(nr); 555 556 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 557 irq->status &= ~XICS_STATUS_SENT; 558 } 559 } 560 561 static void ics_reset(DeviceState *dev) 562 { 563 ICSState *ics = ICS(dev); 564 int i; 565 uint8_t flags[ics->nr_irqs]; 566 567 for (i = 0; i < ics->nr_irqs; i++) { 568 flags[i] = ics->irqs[i].flags; 569 } 570 571 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 572 573 for (i = 0; i < ics->nr_irqs; i++) { 574 ics->irqs[i].priority = 0xff; 575 ics->irqs[i].saved_priority = 0xff; 576 ics->irqs[i].flags = flags[i]; 577 } 578 } 579 580 static int ics_post_load(ICSState *ics, int version_id) 581 { 582 int i; 583 584 for (i = 0; i < ics->xics->nr_servers; i++) { 585 icp_resend(ics->xics, i); 586 } 587 588 return 0; 589 } 590 591 static void ics_dispatch_pre_save(void *opaque) 592 { 593 ICSState *ics = opaque; 594 ICSStateClass *info = ICS_GET_CLASS(ics); 595 596 if (info->pre_save) { 597 info->pre_save(ics); 598 } 599 } 600 601 static int ics_dispatch_post_load(void *opaque, int version_id) 602 { 603 ICSState *ics = opaque; 604 ICSStateClass *info = ICS_GET_CLASS(ics); 605 606 if (info->post_load) { 607 return info->post_load(ics, version_id); 608 } 609 610 return 0; 611 } 612 613 static const VMStateDescription vmstate_ics_irq = { 614 .name = "ics/irq", 615 .version_id = 2, 616 .minimum_version_id = 1, 617 .fields = (VMStateField[]) { 618 VMSTATE_UINT32(server, ICSIRQState), 619 VMSTATE_UINT8(priority, ICSIRQState), 620 VMSTATE_UINT8(saved_priority, ICSIRQState), 621 VMSTATE_UINT8(status, ICSIRQState), 622 VMSTATE_UINT8(flags, ICSIRQState), 623 VMSTATE_END_OF_LIST() 624 }, 625 }; 626 627 static const VMStateDescription vmstate_ics = { 628 .name = "ics", 629 .version_id = 1, 630 .minimum_version_id = 1, 631 .pre_save = ics_dispatch_pre_save, 632 .post_load = ics_dispatch_post_load, 633 .fields = (VMStateField[]) { 634 /* Sanity check */ 635 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 636 637 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 638 vmstate_ics_irq, ICSIRQState), 639 VMSTATE_END_OF_LIST() 640 }, 641 }; 642 643 static void ics_initfn(Object *obj) 644 { 645 ICSState *ics = ICS(obj); 646 647 ics->offset = XICS_IRQ_BASE; 648 } 649 650 static void ics_realize(DeviceState *dev, Error **errp) 651 { 652 ICSState *ics = ICS(dev); 653 654 if (!ics->nr_irqs) { 655 error_setg(errp, "Number of interrupts needs to be greater 0"); 656 return; 657 } 658 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 659 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 660 } 661 662 static void ics_class_init(ObjectClass *klass, void *data) 663 { 664 DeviceClass *dc = DEVICE_CLASS(klass); 665 ICSStateClass *isc = ICS_CLASS(klass); 666 667 dc->realize = ics_realize; 668 dc->vmsd = &vmstate_ics; 669 dc->reset = ics_reset; 670 isc->post_load = ics_post_load; 671 } 672 673 static const TypeInfo ics_info = { 674 .name = TYPE_ICS, 675 .parent = TYPE_DEVICE, 676 .instance_size = sizeof(ICSState), 677 .class_init = ics_class_init, 678 .class_size = sizeof(ICSStateClass), 679 .instance_init = ics_initfn, 680 }; 681 682 /* 683 * Exported functions 684 */ 685 ICSState *xics_find_source(XICSState *xics, int irq) 686 { 687 ICSState *ics; 688 689 QLIST_FOREACH(ics, &xics->ics, list) { 690 if (ics_valid_irq(ics, irq)) { 691 return ics; 692 } 693 } 694 return NULL; 695 } 696 697 qemu_irq xics_get_qirq(XICSState *xics, int irq) 698 { 699 ICSState *ics = xics_find_source(xics, irq); 700 701 if (ics) { 702 return ics->qirqs[irq - ics->offset]; 703 } 704 705 return NULL; 706 } 707 708 void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 709 { 710 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 711 712 ics->irqs[srcno].flags |= 713 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 714 } 715 716 static void xics_register_types(void) 717 { 718 type_register_static(&xics_common_info); 719 type_register_static(&ics_info); 720 type_register_static(&icp_info); 721 } 722 723 type_init(xics_register_types) 724