1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qapi/error.h" 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "hw/hw.h" 33 #include "trace.h" 34 #include "qemu/timer.h" 35 #include "hw/ppc/xics.h" 36 #include "qemu/error-report.h" 37 #include "qapi/visitor.h" 38 39 int xics_get_cpu_index_by_dt_id(int cpu_dt_id) 40 { 41 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); 42 43 if (cpu) { 44 return cpu->parent_obj.cpu_index; 45 } 46 47 return -1; 48 } 49 50 void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu) 51 { 52 CPUState *cs = CPU(cpu); 53 ICPState *ss = &icp->ss[cs->cpu_index]; 54 55 assert(cs->cpu_index < icp->nr_servers); 56 assert(cs == ss->cs); 57 58 ss->output = NULL; 59 ss->cs = NULL; 60 } 61 62 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) 63 { 64 CPUState *cs = CPU(cpu); 65 CPUPPCState *env = &cpu->env; 66 ICPState *ss = &icp->ss[cs->cpu_index]; 67 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 68 69 assert(cs->cpu_index < icp->nr_servers); 70 71 ss->cs = cs; 72 73 if (info->cpu_setup) { 74 info->cpu_setup(icp, cpu); 75 } 76 77 switch (PPC_INPUT(env)) { 78 case PPC_FLAGS_INPUT_POWER7: 79 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 80 break; 81 82 case PPC_FLAGS_INPUT_970: 83 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 84 break; 85 86 default: 87 error_report("XICS interrupt controller does not support this CPU " 88 "bus model"); 89 abort(); 90 } 91 } 92 93 /* 94 * XICS Common class - parent for emulated XICS and KVM-XICS 95 */ 96 static void xics_common_reset(DeviceState *d) 97 { 98 XICSState *icp = XICS_COMMON(d); 99 int i; 100 101 for (i = 0; i < icp->nr_servers; i++) { 102 device_reset(DEVICE(&icp->ss[i])); 103 } 104 105 device_reset(DEVICE(icp->ics)); 106 } 107 108 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name, 109 void *opaque, Error **errp) 110 { 111 XICSState *icp = XICS_COMMON(obj); 112 int64_t value = icp->nr_irqs; 113 114 visit_type_int(v, name, &value, errp); 115 } 116 117 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name, 118 void *opaque, Error **errp) 119 { 120 XICSState *icp = XICS_COMMON(obj); 121 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 122 Error *error = NULL; 123 int64_t value; 124 125 visit_type_int(v, name, &value, &error); 126 if (error) { 127 error_propagate(errp, error); 128 return; 129 } 130 if (icp->nr_irqs) { 131 error_setg(errp, "Number of interrupts is already set to %u", 132 icp->nr_irqs); 133 return; 134 } 135 136 assert(info->set_nr_irqs); 137 assert(icp->ics); 138 info->set_nr_irqs(icp, value, errp); 139 } 140 141 static void xics_prop_get_nr_servers(Object *obj, Visitor *v, 142 const char *name, void *opaque, 143 Error **errp) 144 { 145 XICSState *icp = XICS_COMMON(obj); 146 int64_t value = icp->nr_servers; 147 148 visit_type_int(v, name, &value, errp); 149 } 150 151 static void xics_prop_set_nr_servers(Object *obj, Visitor *v, 152 const char *name, void *opaque, 153 Error **errp) 154 { 155 XICSState *icp = XICS_COMMON(obj); 156 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 157 Error *error = NULL; 158 int64_t value; 159 160 visit_type_int(v, name, &value, &error); 161 if (error) { 162 error_propagate(errp, error); 163 return; 164 } 165 if (icp->nr_servers) { 166 error_setg(errp, "Number of servers is already set to %u", 167 icp->nr_servers); 168 return; 169 } 170 171 assert(info->set_nr_servers); 172 info->set_nr_servers(icp, value, errp); 173 } 174 175 static void xics_common_initfn(Object *obj) 176 { 177 object_property_add(obj, "nr_irqs", "int", 178 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, 179 NULL, NULL, NULL); 180 object_property_add(obj, "nr_servers", "int", 181 xics_prop_get_nr_servers, xics_prop_set_nr_servers, 182 NULL, NULL, NULL); 183 } 184 185 static void xics_common_class_init(ObjectClass *oc, void *data) 186 { 187 DeviceClass *dc = DEVICE_CLASS(oc); 188 189 dc->reset = xics_common_reset; 190 } 191 192 static const TypeInfo xics_common_info = { 193 .name = TYPE_XICS_COMMON, 194 .parent = TYPE_SYS_BUS_DEVICE, 195 .instance_size = sizeof(XICSState), 196 .class_size = sizeof(XICSStateClass), 197 .instance_init = xics_common_initfn, 198 .class_init = xics_common_class_init, 199 }; 200 201 /* 202 * ICP: Presentation layer 203 */ 204 205 #define XISR_MASK 0x00ffffff 206 #define CPPR_MASK 0xff000000 207 208 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 209 #define CPPR(ss) (((ss)->xirr) >> 24) 210 211 static void ics_reject(ICSState *ics, int nr); 212 static void ics_resend(ICSState *ics); 213 static void ics_eoi(ICSState *ics, int nr); 214 215 static void icp_check_ipi(XICSState *icp, int server) 216 { 217 ICPState *ss = icp->ss + server; 218 219 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 220 return; 221 } 222 223 trace_xics_icp_check_ipi(server, ss->mfrr); 224 225 if (XISR(ss)) { 226 ics_reject(icp->ics, XISR(ss)); 227 } 228 229 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 230 ss->pending_priority = ss->mfrr; 231 qemu_irq_raise(ss->output); 232 } 233 234 static void icp_resend(XICSState *icp, int server) 235 { 236 ICPState *ss = icp->ss + server; 237 238 if (ss->mfrr < CPPR(ss)) { 239 icp_check_ipi(icp, server); 240 } 241 ics_resend(icp->ics); 242 } 243 244 void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) 245 { 246 ICPState *ss = icp->ss + server; 247 uint8_t old_cppr; 248 uint32_t old_xisr; 249 250 old_cppr = CPPR(ss); 251 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 252 253 if (cppr < old_cppr) { 254 if (XISR(ss) && (cppr <= ss->pending_priority)) { 255 old_xisr = XISR(ss); 256 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 257 ss->pending_priority = 0xff; 258 qemu_irq_lower(ss->output); 259 ics_reject(icp->ics, old_xisr); 260 } 261 } else { 262 if (!XISR(ss)) { 263 icp_resend(icp, server); 264 } 265 } 266 } 267 268 void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) 269 { 270 ICPState *ss = icp->ss + server; 271 272 ss->mfrr = mfrr; 273 if (mfrr < CPPR(ss)) { 274 icp_check_ipi(icp, server); 275 } 276 } 277 278 uint32_t icp_accept(ICPState *ss) 279 { 280 uint32_t xirr = ss->xirr; 281 282 qemu_irq_lower(ss->output); 283 ss->xirr = ss->pending_priority << 24; 284 ss->pending_priority = 0xff; 285 286 trace_xics_icp_accept(xirr, ss->xirr); 287 288 return xirr; 289 } 290 291 void icp_eoi(XICSState *icp, int server, uint32_t xirr) 292 { 293 ICPState *ss = icp->ss + server; 294 295 /* Send EOI -> ICS */ 296 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 297 trace_xics_icp_eoi(server, xirr, ss->xirr); 298 ics_eoi(icp->ics, xirr & XISR_MASK); 299 if (!XISR(ss)) { 300 icp_resend(icp, server); 301 } 302 } 303 304 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) 305 { 306 ICPState *ss = icp->ss + server; 307 308 trace_xics_icp_irq(server, nr, priority); 309 310 if ((priority >= CPPR(ss)) 311 || (XISR(ss) && (ss->pending_priority <= priority))) { 312 ics_reject(icp->ics, nr); 313 } else { 314 if (XISR(ss)) { 315 ics_reject(icp->ics, XISR(ss)); 316 } 317 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 318 ss->pending_priority = priority; 319 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 320 qemu_irq_raise(ss->output); 321 } 322 } 323 324 static void icp_dispatch_pre_save(void *opaque) 325 { 326 ICPState *ss = opaque; 327 ICPStateClass *info = ICP_GET_CLASS(ss); 328 329 if (info->pre_save) { 330 info->pre_save(ss); 331 } 332 } 333 334 static int icp_dispatch_post_load(void *opaque, int version_id) 335 { 336 ICPState *ss = opaque; 337 ICPStateClass *info = ICP_GET_CLASS(ss); 338 339 if (info->post_load) { 340 return info->post_load(ss, version_id); 341 } 342 343 return 0; 344 } 345 346 static const VMStateDescription vmstate_icp_server = { 347 .name = "icp/server", 348 .version_id = 1, 349 .minimum_version_id = 1, 350 .pre_save = icp_dispatch_pre_save, 351 .post_load = icp_dispatch_post_load, 352 .fields = (VMStateField[]) { 353 /* Sanity check */ 354 VMSTATE_UINT32(xirr, ICPState), 355 VMSTATE_UINT8(pending_priority, ICPState), 356 VMSTATE_UINT8(mfrr, ICPState), 357 VMSTATE_END_OF_LIST() 358 }, 359 }; 360 361 static void icp_reset(DeviceState *dev) 362 { 363 ICPState *icp = ICP(dev); 364 365 icp->xirr = 0; 366 icp->pending_priority = 0xff; 367 icp->mfrr = 0xff; 368 369 /* Make all outputs are deasserted */ 370 qemu_set_irq(icp->output, 0); 371 } 372 373 static void icp_class_init(ObjectClass *klass, void *data) 374 { 375 DeviceClass *dc = DEVICE_CLASS(klass); 376 377 dc->reset = icp_reset; 378 dc->vmsd = &vmstate_icp_server; 379 } 380 381 static const TypeInfo icp_info = { 382 .name = TYPE_ICP, 383 .parent = TYPE_DEVICE, 384 .instance_size = sizeof(ICPState), 385 .class_init = icp_class_init, 386 .class_size = sizeof(ICPStateClass), 387 }; 388 389 /* 390 * ICS: Source layer 391 */ 392 static void resend_msi(ICSState *ics, int srcno) 393 { 394 ICSIRQState *irq = ics->irqs + srcno; 395 396 /* FIXME: filter by server#? */ 397 if (irq->status & XICS_STATUS_REJECTED) { 398 irq->status &= ~XICS_STATUS_REJECTED; 399 if (irq->priority != 0xff) { 400 icp_irq(ics->icp, irq->server, srcno + ics->offset, 401 irq->priority); 402 } 403 } 404 } 405 406 static void resend_lsi(ICSState *ics, int srcno) 407 { 408 ICSIRQState *irq = ics->irqs + srcno; 409 410 if ((irq->priority != 0xff) 411 && (irq->status & XICS_STATUS_ASSERTED) 412 && !(irq->status & XICS_STATUS_SENT)) { 413 irq->status |= XICS_STATUS_SENT; 414 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 415 } 416 } 417 418 static void set_irq_msi(ICSState *ics, int srcno, int val) 419 { 420 ICSIRQState *irq = ics->irqs + srcno; 421 422 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 423 424 if (val) { 425 if (irq->priority == 0xff) { 426 irq->status |= XICS_STATUS_MASKED_PENDING; 427 trace_xics_masked_pending(); 428 } else { 429 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 430 } 431 } 432 } 433 434 static void set_irq_lsi(ICSState *ics, int srcno, int val) 435 { 436 ICSIRQState *irq = ics->irqs + srcno; 437 438 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 439 if (val) { 440 irq->status |= XICS_STATUS_ASSERTED; 441 } else { 442 irq->status &= ~XICS_STATUS_ASSERTED; 443 } 444 resend_lsi(ics, srcno); 445 } 446 447 static void ics_set_irq(void *opaque, int srcno, int val) 448 { 449 ICSState *ics = (ICSState *)opaque; 450 451 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 452 set_irq_lsi(ics, srcno, val); 453 } else { 454 set_irq_msi(ics, srcno, val); 455 } 456 } 457 458 static void write_xive_msi(ICSState *ics, int srcno) 459 { 460 ICSIRQState *irq = ics->irqs + srcno; 461 462 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 463 || (irq->priority == 0xff)) { 464 return; 465 } 466 467 irq->status &= ~XICS_STATUS_MASKED_PENDING; 468 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 469 } 470 471 static void write_xive_lsi(ICSState *ics, int srcno) 472 { 473 resend_lsi(ics, srcno); 474 } 475 476 void ics_write_xive(ICSState *ics, int nr, int server, 477 uint8_t priority, uint8_t saved_priority) 478 { 479 int srcno = nr - ics->offset; 480 ICSIRQState *irq = ics->irqs + srcno; 481 482 irq->server = server; 483 irq->priority = priority; 484 irq->saved_priority = saved_priority; 485 486 trace_xics_ics_write_xive(nr, srcno, server, priority); 487 488 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 489 write_xive_lsi(ics, srcno); 490 } else { 491 write_xive_msi(ics, srcno); 492 } 493 } 494 495 static void ics_reject(ICSState *ics, int nr) 496 { 497 ICSIRQState *irq = ics->irqs + nr - ics->offset; 498 499 trace_xics_ics_reject(nr, nr - ics->offset); 500 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ 501 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ 502 } 503 504 static void ics_resend(ICSState *ics) 505 { 506 int i; 507 508 for (i = 0; i < ics->nr_irqs; i++) { 509 /* FIXME: filter by server#? */ 510 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 511 resend_lsi(ics, i); 512 } else { 513 resend_msi(ics, i); 514 } 515 } 516 } 517 518 static void ics_eoi(ICSState *ics, int nr) 519 { 520 int srcno = nr - ics->offset; 521 ICSIRQState *irq = ics->irqs + srcno; 522 523 trace_xics_ics_eoi(nr); 524 525 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 526 irq->status &= ~XICS_STATUS_SENT; 527 } 528 } 529 530 static void ics_reset(DeviceState *dev) 531 { 532 ICSState *ics = ICS(dev); 533 int i; 534 uint8_t flags[ics->nr_irqs]; 535 536 for (i = 0; i < ics->nr_irqs; i++) { 537 flags[i] = ics->irqs[i].flags; 538 } 539 540 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 541 542 for (i = 0; i < ics->nr_irqs; i++) { 543 ics->irqs[i].priority = 0xff; 544 ics->irqs[i].saved_priority = 0xff; 545 ics->irqs[i].flags = flags[i]; 546 } 547 } 548 549 static int ics_post_load(ICSState *ics, int version_id) 550 { 551 int i; 552 553 for (i = 0; i < ics->icp->nr_servers; i++) { 554 icp_resend(ics->icp, i); 555 } 556 557 return 0; 558 } 559 560 static void ics_dispatch_pre_save(void *opaque) 561 { 562 ICSState *ics = opaque; 563 ICSStateClass *info = ICS_GET_CLASS(ics); 564 565 if (info->pre_save) { 566 info->pre_save(ics); 567 } 568 } 569 570 static int ics_dispatch_post_load(void *opaque, int version_id) 571 { 572 ICSState *ics = opaque; 573 ICSStateClass *info = ICS_GET_CLASS(ics); 574 575 if (info->post_load) { 576 return info->post_load(ics, version_id); 577 } 578 579 return 0; 580 } 581 582 static const VMStateDescription vmstate_ics_irq = { 583 .name = "ics/irq", 584 .version_id = 2, 585 .minimum_version_id = 1, 586 .fields = (VMStateField[]) { 587 VMSTATE_UINT32(server, ICSIRQState), 588 VMSTATE_UINT8(priority, ICSIRQState), 589 VMSTATE_UINT8(saved_priority, ICSIRQState), 590 VMSTATE_UINT8(status, ICSIRQState), 591 VMSTATE_UINT8(flags, ICSIRQState), 592 VMSTATE_END_OF_LIST() 593 }, 594 }; 595 596 static const VMStateDescription vmstate_ics = { 597 .name = "ics", 598 .version_id = 1, 599 .minimum_version_id = 1, 600 .pre_save = ics_dispatch_pre_save, 601 .post_load = ics_dispatch_post_load, 602 .fields = (VMStateField[]) { 603 /* Sanity check */ 604 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 605 606 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 607 vmstate_ics_irq, ICSIRQState), 608 VMSTATE_END_OF_LIST() 609 }, 610 }; 611 612 static void ics_initfn(Object *obj) 613 { 614 ICSState *ics = ICS(obj); 615 616 ics->offset = XICS_IRQ_BASE; 617 } 618 619 static void ics_realize(DeviceState *dev, Error **errp) 620 { 621 ICSState *ics = ICS(dev); 622 623 if (!ics->nr_irqs) { 624 error_setg(errp, "Number of interrupts needs to be greater 0"); 625 return; 626 } 627 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 628 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 629 } 630 631 static void ics_class_init(ObjectClass *klass, void *data) 632 { 633 DeviceClass *dc = DEVICE_CLASS(klass); 634 ICSStateClass *isc = ICS_CLASS(klass); 635 636 dc->realize = ics_realize; 637 dc->vmsd = &vmstate_ics; 638 dc->reset = ics_reset; 639 isc->post_load = ics_post_load; 640 } 641 642 static const TypeInfo ics_info = { 643 .name = TYPE_ICS, 644 .parent = TYPE_DEVICE, 645 .instance_size = sizeof(ICSState), 646 .class_init = ics_class_init, 647 .class_size = sizeof(ICSStateClass), 648 .instance_init = ics_initfn, 649 }; 650 651 /* 652 * Exported functions 653 */ 654 int xics_find_source(XICSState *icp, int irq) 655 { 656 int sources = 1; 657 int src; 658 659 /* FIXME: implement multiple sources */ 660 for (src = 0; src < sources; ++src) { 661 ICSState *ics = &icp->ics[src]; 662 if (ics_valid_irq(ics, irq)) { 663 return src; 664 } 665 } 666 667 return -1; 668 } 669 670 qemu_irq xics_get_qirq(XICSState *icp, int irq) 671 { 672 int src = xics_find_source(icp, irq); 673 674 if (src >= 0) { 675 ICSState *ics = &icp->ics[src]; 676 return ics->qirqs[irq - ics->offset]; 677 } 678 679 return NULL; 680 } 681 682 void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 683 { 684 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 685 686 ics->irqs[srcno].flags |= 687 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 688 } 689 690 static void xics_register_types(void) 691 { 692 type_register_static(&xics_common_info); 693 type_register_static(&ics_info); 694 type_register_static(&icp_info); 695 } 696 697 type_init(xics_register_types) 698