1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "hw/hw.h" 29 #include "trace.h" 30 #include "hw/ppc/spapr.h" 31 #include "hw/ppc/xics.h" 32 33 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) 34 { 35 CPUState *cs = CPU(cpu); 36 CPUPPCState *env = &cpu->env; 37 ICPState *ss = &icp->ss[cs->cpu_index]; 38 39 assert(cs->cpu_index < icp->nr_servers); 40 41 switch (PPC_INPUT(env)) { 42 case PPC_FLAGS_INPUT_POWER7: 43 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 44 break; 45 46 case PPC_FLAGS_INPUT_970: 47 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 48 break; 49 50 default: 51 fprintf(stderr, "XICS interrupt controller does not support this CPU " 52 "bus model\n"); 53 abort(); 54 } 55 } 56 57 static void xics_reset(DeviceState *d) 58 { 59 XICSState *icp = XICS(d); 60 int i; 61 62 for (i = 0; i < icp->nr_servers; i++) { 63 device_reset(DEVICE(&icp->ss[i])); 64 } 65 66 device_reset(DEVICE(icp->ics)); 67 } 68 69 /* 70 * ICP: Presentation layer 71 */ 72 73 #define XISR_MASK 0x00ffffff 74 #define CPPR_MASK 0xff000000 75 76 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 77 #define CPPR(ss) (((ss)->xirr) >> 24) 78 79 static void ics_reject(ICSState *ics, int nr); 80 static void ics_resend(ICSState *ics); 81 static void ics_eoi(ICSState *ics, int nr); 82 83 static void icp_check_ipi(XICSState *icp, int server) 84 { 85 ICPState *ss = icp->ss + server; 86 87 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 88 return; 89 } 90 91 trace_xics_icp_check_ipi(server, ss->mfrr); 92 93 if (XISR(ss)) { 94 ics_reject(icp->ics, XISR(ss)); 95 } 96 97 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 98 ss->pending_priority = ss->mfrr; 99 qemu_irq_raise(ss->output); 100 } 101 102 static void icp_resend(XICSState *icp, int server) 103 { 104 ICPState *ss = icp->ss + server; 105 106 if (ss->mfrr < CPPR(ss)) { 107 icp_check_ipi(icp, server); 108 } 109 ics_resend(icp->ics); 110 } 111 112 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) 113 { 114 ICPState *ss = icp->ss + server; 115 uint8_t old_cppr; 116 uint32_t old_xisr; 117 118 old_cppr = CPPR(ss); 119 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 120 121 if (cppr < old_cppr) { 122 if (XISR(ss) && (cppr <= ss->pending_priority)) { 123 old_xisr = XISR(ss); 124 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 125 ss->pending_priority = 0xff; 126 qemu_irq_lower(ss->output); 127 ics_reject(icp->ics, old_xisr); 128 } 129 } else { 130 if (!XISR(ss)) { 131 icp_resend(icp, server); 132 } 133 } 134 } 135 136 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) 137 { 138 ICPState *ss = icp->ss + server; 139 140 ss->mfrr = mfrr; 141 if (mfrr < CPPR(ss)) { 142 icp_check_ipi(icp, server); 143 } 144 } 145 146 static uint32_t icp_accept(ICPState *ss) 147 { 148 uint32_t xirr = ss->xirr; 149 150 qemu_irq_lower(ss->output); 151 ss->xirr = ss->pending_priority << 24; 152 ss->pending_priority = 0xff; 153 154 trace_xics_icp_accept(xirr, ss->xirr); 155 156 return xirr; 157 } 158 159 static void icp_eoi(XICSState *icp, int server, uint32_t xirr) 160 { 161 ICPState *ss = icp->ss + server; 162 163 /* Send EOI -> ICS */ 164 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 165 trace_xics_icp_eoi(server, xirr, ss->xirr); 166 ics_eoi(icp->ics, xirr & XISR_MASK); 167 if (!XISR(ss)) { 168 icp_resend(icp, server); 169 } 170 } 171 172 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) 173 { 174 ICPState *ss = icp->ss + server; 175 176 trace_xics_icp_irq(server, nr, priority); 177 178 if ((priority >= CPPR(ss)) 179 || (XISR(ss) && (ss->pending_priority <= priority))) { 180 ics_reject(icp->ics, nr); 181 } else { 182 if (XISR(ss)) { 183 ics_reject(icp->ics, XISR(ss)); 184 } 185 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 186 ss->pending_priority = priority; 187 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 188 qemu_irq_raise(ss->output); 189 } 190 } 191 192 static const VMStateDescription vmstate_icp_server = { 193 .name = "icp/server", 194 .version_id = 1, 195 .minimum_version_id = 1, 196 .minimum_version_id_old = 1, 197 .fields = (VMStateField []) { 198 /* Sanity check */ 199 VMSTATE_UINT32(xirr, ICPState), 200 VMSTATE_UINT8(pending_priority, ICPState), 201 VMSTATE_UINT8(mfrr, ICPState), 202 VMSTATE_END_OF_LIST() 203 }, 204 }; 205 206 static void icp_reset(DeviceState *dev) 207 { 208 ICPState *icp = ICP(dev); 209 210 icp->xirr = 0; 211 icp->pending_priority = 0xff; 212 icp->mfrr = 0xff; 213 214 /* Make all outputs are deasserted */ 215 qemu_set_irq(icp->output, 0); 216 } 217 218 static void icp_class_init(ObjectClass *klass, void *data) 219 { 220 DeviceClass *dc = DEVICE_CLASS(klass); 221 222 dc->reset = icp_reset; 223 dc->vmsd = &vmstate_icp_server; 224 } 225 226 static TypeInfo icp_info = { 227 .name = TYPE_ICP, 228 .parent = TYPE_DEVICE, 229 .instance_size = sizeof(ICPState), 230 .class_init = icp_class_init, 231 }; 232 233 /* 234 * ICS: Source layer 235 */ 236 static int ics_valid_irq(ICSState *ics, uint32_t nr) 237 { 238 return (nr >= ics->offset) 239 && (nr < (ics->offset + ics->nr_irqs)); 240 } 241 242 static void resend_msi(ICSState *ics, int srcno) 243 { 244 ICSIRQState *irq = ics->irqs + srcno; 245 246 /* FIXME: filter by server#? */ 247 if (irq->status & XICS_STATUS_REJECTED) { 248 irq->status &= ~XICS_STATUS_REJECTED; 249 if (irq->priority != 0xff) { 250 icp_irq(ics->icp, irq->server, srcno + ics->offset, 251 irq->priority); 252 } 253 } 254 } 255 256 static void resend_lsi(ICSState *ics, int srcno) 257 { 258 ICSIRQState *irq = ics->irqs + srcno; 259 260 if ((irq->priority != 0xff) 261 && (irq->status & XICS_STATUS_ASSERTED) 262 && !(irq->status & XICS_STATUS_SENT)) { 263 irq->status |= XICS_STATUS_SENT; 264 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 265 } 266 } 267 268 static void set_irq_msi(ICSState *ics, int srcno, int val) 269 { 270 ICSIRQState *irq = ics->irqs + srcno; 271 272 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 273 274 if (val) { 275 if (irq->priority == 0xff) { 276 irq->status |= XICS_STATUS_MASKED_PENDING; 277 trace_xics_masked_pending(); 278 } else { 279 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 280 } 281 } 282 } 283 284 static void set_irq_lsi(ICSState *ics, int srcno, int val) 285 { 286 ICSIRQState *irq = ics->irqs + srcno; 287 288 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 289 if (val) { 290 irq->status |= XICS_STATUS_ASSERTED; 291 } else { 292 irq->status &= ~XICS_STATUS_ASSERTED; 293 } 294 resend_lsi(ics, srcno); 295 } 296 297 static void ics_set_irq(void *opaque, int srcno, int val) 298 { 299 ICSState *ics = (ICSState *)opaque; 300 301 if (ics->islsi[srcno]) { 302 set_irq_lsi(ics, srcno, val); 303 } else { 304 set_irq_msi(ics, srcno, val); 305 } 306 } 307 308 static void write_xive_msi(ICSState *ics, int srcno) 309 { 310 ICSIRQState *irq = ics->irqs + srcno; 311 312 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 313 || (irq->priority == 0xff)) { 314 return; 315 } 316 317 irq->status &= ~XICS_STATUS_MASKED_PENDING; 318 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 319 } 320 321 static void write_xive_lsi(ICSState *ics, int srcno) 322 { 323 resend_lsi(ics, srcno); 324 } 325 326 static void ics_write_xive(ICSState *ics, int nr, int server, 327 uint8_t priority, uint8_t saved_priority) 328 { 329 int srcno = nr - ics->offset; 330 ICSIRQState *irq = ics->irqs + srcno; 331 332 irq->server = server; 333 irq->priority = priority; 334 irq->saved_priority = saved_priority; 335 336 trace_xics_ics_write_xive(nr, srcno, server, priority); 337 338 if (ics->islsi[srcno]) { 339 write_xive_lsi(ics, srcno); 340 } else { 341 write_xive_msi(ics, srcno); 342 } 343 } 344 345 static void ics_reject(ICSState *ics, int nr) 346 { 347 ICSIRQState *irq = ics->irqs + nr - ics->offset; 348 349 trace_xics_ics_reject(nr, nr - ics->offset); 350 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ 351 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ 352 } 353 354 static void ics_resend(ICSState *ics) 355 { 356 int i; 357 358 for (i = 0; i < ics->nr_irqs; i++) { 359 /* FIXME: filter by server#? */ 360 if (ics->islsi[i]) { 361 resend_lsi(ics, i); 362 } else { 363 resend_msi(ics, i); 364 } 365 } 366 } 367 368 static void ics_eoi(ICSState *ics, int nr) 369 { 370 int srcno = nr - ics->offset; 371 ICSIRQState *irq = ics->irqs + srcno; 372 373 trace_xics_ics_eoi(nr); 374 375 if (ics->islsi[srcno]) { 376 irq->status &= ~XICS_STATUS_SENT; 377 } 378 } 379 380 static void ics_reset(DeviceState *dev) 381 { 382 ICSState *ics = ICS(dev); 383 int i; 384 385 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 386 for (i = 0; i < ics->nr_irqs; i++) { 387 ics->irqs[i].priority = 0xff; 388 ics->irqs[i].saved_priority = 0xff; 389 } 390 } 391 392 static int ics_post_load(void *opaque, int version_id) 393 { 394 int i; 395 ICSState *ics = opaque; 396 397 for (i = 0; i < ics->icp->nr_servers; i++) { 398 icp_resend(ics->icp, i); 399 } 400 401 return 0; 402 } 403 404 static const VMStateDescription vmstate_ics_irq = { 405 .name = "ics/irq", 406 .version_id = 1, 407 .minimum_version_id = 1, 408 .minimum_version_id_old = 1, 409 .fields = (VMStateField []) { 410 VMSTATE_UINT32(server, ICSIRQState), 411 VMSTATE_UINT8(priority, ICSIRQState), 412 VMSTATE_UINT8(saved_priority, ICSIRQState), 413 VMSTATE_UINT8(status, ICSIRQState), 414 VMSTATE_END_OF_LIST() 415 }, 416 }; 417 418 static const VMStateDescription vmstate_ics = { 419 .name = "ics", 420 .version_id = 1, 421 .minimum_version_id = 1, 422 .minimum_version_id_old = 1, 423 .post_load = ics_post_load, 424 .fields = (VMStateField []) { 425 /* Sanity check */ 426 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 427 428 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 429 vmstate_ics_irq, ICSIRQState), 430 VMSTATE_END_OF_LIST() 431 }, 432 }; 433 434 static int ics_realize(DeviceState *dev) 435 { 436 ICSState *ics = ICS(dev); 437 438 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 439 ics->islsi = g_malloc0(ics->nr_irqs * sizeof(bool)); 440 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 441 442 return 0; 443 } 444 445 static void ics_class_init(ObjectClass *klass, void *data) 446 { 447 DeviceClass *dc = DEVICE_CLASS(klass); 448 449 dc->init = ics_realize; 450 dc->vmsd = &vmstate_ics; 451 dc->reset = ics_reset; 452 } 453 454 static TypeInfo ics_info = { 455 .name = TYPE_ICS, 456 .parent = TYPE_DEVICE, 457 .instance_size = sizeof(ICSState), 458 .class_init = ics_class_init, 459 }; 460 461 /* 462 * Exported functions 463 */ 464 465 qemu_irq xics_get_qirq(XICSState *icp, int irq) 466 { 467 if (!ics_valid_irq(icp->ics, irq)) { 468 return NULL; 469 } 470 471 return icp->ics->qirqs[irq - icp->ics->offset]; 472 } 473 474 void xics_set_irq_type(XICSState *icp, int irq, bool lsi) 475 { 476 assert(ics_valid_irq(icp->ics, irq)); 477 478 icp->ics->islsi[irq - icp->ics->offset] = lsi; 479 } 480 481 /* 482 * Guest interfaces 483 */ 484 485 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr, 486 target_ulong opcode, target_ulong *args) 487 { 488 CPUState *cs = CPU(cpu); 489 target_ulong cppr = args[0]; 490 491 icp_set_cppr(spapr->icp, cs->cpu_index, cppr); 492 return H_SUCCESS; 493 } 494 495 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr, 496 target_ulong opcode, target_ulong *args) 497 { 498 target_ulong server = args[0]; 499 target_ulong mfrr = args[1]; 500 501 if (server >= spapr->icp->nr_servers) { 502 return H_PARAMETER; 503 } 504 505 icp_set_mfrr(spapr->icp, server, mfrr); 506 return H_SUCCESS; 507 } 508 509 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr, 510 target_ulong opcode, target_ulong *args) 511 { 512 CPUState *cs = CPU(cpu); 513 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index); 514 515 args[0] = xirr; 516 return H_SUCCESS; 517 } 518 519 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr, 520 target_ulong opcode, target_ulong *args) 521 { 522 CPUState *cs = CPU(cpu); 523 target_ulong xirr = args[0]; 524 525 icp_eoi(spapr->icp, cs->cpu_index, xirr); 526 return H_SUCCESS; 527 } 528 529 static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr, 530 uint32_t token, 531 uint32_t nargs, target_ulong args, 532 uint32_t nret, target_ulong rets) 533 { 534 ICSState *ics = spapr->icp->ics; 535 uint32_t nr, server, priority; 536 537 if ((nargs != 3) || (nret != 1)) { 538 rtas_st(rets, 0, -3); 539 return; 540 } 541 542 nr = rtas_ld(args, 0); 543 server = rtas_ld(args, 1); 544 priority = rtas_ld(args, 2); 545 546 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) 547 || (priority > 0xff)) { 548 rtas_st(rets, 0, -3); 549 return; 550 } 551 552 ics_write_xive(ics, nr, server, priority, priority); 553 554 rtas_st(rets, 0, 0); /* Success */ 555 } 556 557 static void rtas_get_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr, 558 uint32_t token, 559 uint32_t nargs, target_ulong args, 560 uint32_t nret, target_ulong rets) 561 { 562 ICSState *ics = spapr->icp->ics; 563 uint32_t nr; 564 565 if ((nargs != 1) || (nret != 3)) { 566 rtas_st(rets, 0, -3); 567 return; 568 } 569 570 nr = rtas_ld(args, 0); 571 572 if (!ics_valid_irq(ics, nr)) { 573 rtas_st(rets, 0, -3); 574 return; 575 } 576 577 rtas_st(rets, 0, 0); /* Success */ 578 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server); 579 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority); 580 } 581 582 static void rtas_int_off(PowerPCCPU *cpu, sPAPREnvironment *spapr, 583 uint32_t token, 584 uint32_t nargs, target_ulong args, 585 uint32_t nret, target_ulong rets) 586 { 587 ICSState *ics = spapr->icp->ics; 588 uint32_t nr; 589 590 if ((nargs != 1) || (nret != 1)) { 591 rtas_st(rets, 0, -3); 592 return; 593 } 594 595 nr = rtas_ld(args, 0); 596 597 if (!ics_valid_irq(ics, nr)) { 598 rtas_st(rets, 0, -3); 599 return; 600 } 601 602 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, 603 ics->irqs[nr - ics->offset].priority); 604 605 rtas_st(rets, 0, 0); /* Success */ 606 } 607 608 static void rtas_int_on(PowerPCCPU *cpu, sPAPREnvironment *spapr, 609 uint32_t token, 610 uint32_t nargs, target_ulong args, 611 uint32_t nret, target_ulong rets) 612 { 613 ICSState *ics = spapr->icp->ics; 614 uint32_t nr; 615 616 if ((nargs != 1) || (nret != 1)) { 617 rtas_st(rets, 0, -3); 618 return; 619 } 620 621 nr = rtas_ld(args, 0); 622 623 if (!ics_valid_irq(ics, nr)) { 624 rtas_st(rets, 0, -3); 625 return; 626 } 627 628 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 629 ics->irqs[nr - ics->offset].saved_priority, 630 ics->irqs[nr - ics->offset].saved_priority); 631 632 rtas_st(rets, 0, 0); /* Success */ 633 } 634 635 /* 636 * XICS 637 */ 638 639 static void xics_realize(DeviceState *dev, Error **errp) 640 { 641 XICSState *icp = XICS(dev); 642 ICSState *ics = icp->ics; 643 int i; 644 645 /* Registration of global state belongs into realize */ 646 spapr_rtas_register("ibm,set-xive", rtas_set_xive); 647 spapr_rtas_register("ibm,get-xive", rtas_get_xive); 648 spapr_rtas_register("ibm,int-off", rtas_int_off); 649 spapr_rtas_register("ibm,int-on", rtas_int_on); 650 651 spapr_register_hypercall(H_CPPR, h_cppr); 652 spapr_register_hypercall(H_IPI, h_ipi); 653 spapr_register_hypercall(H_XIRR, h_xirr); 654 spapr_register_hypercall(H_EOI, h_eoi); 655 656 ics->nr_irqs = icp->nr_irqs; 657 ics->offset = XICS_IRQ_BASE; 658 ics->icp = icp; 659 qdev_init_nofail(DEVICE(ics)); 660 661 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); 662 for (i = 0; i < icp->nr_servers; i++) { 663 char buffer[32]; 664 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP); 665 snprintf(buffer, sizeof(buffer), "icp[%d]", i); 666 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), NULL); 667 qdev_init_nofail(DEVICE(&icp->ss[i])); 668 } 669 } 670 671 static void xics_initfn(Object *obj) 672 { 673 XICSState *xics = XICS(obj); 674 675 xics->ics = ICS(object_new(TYPE_ICS)); 676 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); 677 } 678 679 static Property xics_properties[] = { 680 DEFINE_PROP_UINT32("nr_servers", XICSState, nr_servers, -1), 681 DEFINE_PROP_UINT32("nr_irqs", XICSState, nr_irqs, -1), 682 DEFINE_PROP_END_OF_LIST(), 683 }; 684 685 static void xics_class_init(ObjectClass *oc, void *data) 686 { 687 DeviceClass *dc = DEVICE_CLASS(oc); 688 689 dc->realize = xics_realize; 690 dc->props = xics_properties; 691 dc->reset = xics_reset; 692 } 693 694 static const TypeInfo xics_info = { 695 .name = TYPE_XICS, 696 .parent = TYPE_SYS_BUS_DEVICE, 697 .instance_size = sizeof(XICSState), 698 .class_init = xics_class_init, 699 .instance_init = xics_initfn, 700 }; 701 702 static void xics_register_types(void) 703 { 704 type_register_static(&xics_info); 705 type_register_static(&ics_info); 706 type_register_static(&icp_info); 707 } 708 709 type_init(xics_register_types) 710