1 /* 2 * QEMU PowerPC sPAPR XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/error-report.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/kvm.h" 17 #include "sysemu/runstate.h" 18 #include "hw/ppc/spapr.h" 19 #include "hw/ppc/spapr_cpu_core.h" 20 #include "hw/ppc/spapr_xive.h" 21 #include "hw/ppc/xive.h" 22 #include "kvm_ppc.h" 23 24 #include <sys/ioctl.h> 25 26 /* 27 * Helpers for CPU hotplug 28 * 29 * TODO: make a common KVMEnabledCPU layer for XICS and XIVE 30 */ 31 typedef struct KVMEnabledCPU { 32 unsigned long vcpu_id; 33 QLIST_ENTRY(KVMEnabledCPU) node; 34 } KVMEnabledCPU; 35 36 static QLIST_HEAD(, KVMEnabledCPU) 37 kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus); 38 39 static bool kvm_cpu_is_enabled(CPUState *cs) 40 { 41 KVMEnabledCPU *enabled_cpu; 42 unsigned long vcpu_id = kvm_arch_vcpu_id(cs); 43 44 QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) { 45 if (enabled_cpu->vcpu_id == vcpu_id) { 46 return true; 47 } 48 } 49 return false; 50 } 51 52 static void kvm_cpu_enable(CPUState *cs) 53 { 54 KVMEnabledCPU *enabled_cpu; 55 unsigned long vcpu_id = kvm_arch_vcpu_id(cs); 56 57 enabled_cpu = g_malloc(sizeof(*enabled_cpu)); 58 enabled_cpu->vcpu_id = vcpu_id; 59 QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node); 60 } 61 62 static void kvm_cpu_disable_all(void) 63 { 64 KVMEnabledCPU *enabled_cpu, *next; 65 66 QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) { 67 QLIST_REMOVE(enabled_cpu, node); 68 g_free(enabled_cpu); 69 } 70 } 71 72 /* 73 * XIVE Thread Interrupt Management context (KVM) 74 */ 75 76 int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp) 77 { 78 SpaprXive *xive = SPAPR_XIVE(tctx->xptr); 79 uint64_t state[2]; 80 int ret; 81 82 assert(xive->fd != -1); 83 84 /* word0 and word1 of the OS ring. */ 85 state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]); 86 87 ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); 88 if (ret != 0) { 89 error_setg_errno(errp, -ret, 90 "XIVE: could not restore KVM state of CPU %ld", 91 kvm_arch_vcpu_id(tctx->cs)); 92 return ret; 93 } 94 95 return 0; 96 } 97 98 int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp) 99 { 100 SpaprXive *xive = SPAPR_XIVE(tctx->xptr); 101 uint64_t state[2] = { 0 }; 102 int ret; 103 104 assert(xive->fd != -1); 105 106 ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); 107 if (ret != 0) { 108 error_setg_errno(errp, -ret, 109 "XIVE: could not capture KVM state of CPU %ld", 110 kvm_arch_vcpu_id(tctx->cs)); 111 return ret; 112 } 113 114 /* word0 and word1 of the OS ring. */ 115 *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0]; 116 117 return 0; 118 } 119 120 typedef struct { 121 XiveTCTX *tctx; 122 Error **errp; 123 int ret; 124 } XiveCpuGetState; 125 126 static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu, 127 run_on_cpu_data arg) 128 { 129 XiveCpuGetState *s = arg.host_ptr; 130 131 s->ret = kvmppc_xive_cpu_get_state(s->tctx, s->errp); 132 } 133 134 int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp) 135 { 136 XiveCpuGetState s = { 137 .tctx = tctx, 138 .errp = errp, 139 }; 140 141 /* 142 * Kick the vCPU to make sure they are available for the KVM ioctl. 143 */ 144 run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state, 145 RUN_ON_CPU_HOST_PTR(&s)); 146 147 return s.ret; 148 } 149 150 int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp) 151 { 152 ERRP_GUARD(); 153 SpaprXive *xive = SPAPR_XIVE(tctx->xptr); 154 unsigned long vcpu_id; 155 int ret; 156 157 assert(xive->fd != -1); 158 159 /* Check if CPU was hot unplugged and replugged. */ 160 if (kvm_cpu_is_enabled(tctx->cs)) { 161 return 0; 162 } 163 164 vcpu_id = kvm_arch_vcpu_id(tctx->cs); 165 166 ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd, 167 vcpu_id, 0); 168 if (ret < 0) { 169 error_setg_errno(errp, -ret, 170 "XIVE: unable to connect CPU%ld to KVM device", 171 vcpu_id); 172 if (ret == -ENOSPC) { 173 error_append_hint(errp, "Try -smp maxcpus=N with N < %u\n", 174 MACHINE(qdev_get_machine())->smp.max_cpus); 175 } 176 return ret; 177 } 178 179 kvm_cpu_enable(tctx->cs); 180 return 0; 181 } 182 183 /* 184 * XIVE Interrupt Source (KVM) 185 */ 186 187 int kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, 188 Error **errp) 189 { 190 uint32_t end_idx; 191 uint32_t end_blk; 192 uint8_t priority; 193 uint32_t server; 194 bool masked; 195 uint32_t eisn; 196 uint64_t kvm_src; 197 198 assert(xive_eas_is_valid(eas)); 199 200 end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 201 end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); 202 eisn = xive_get_field64(EAS_END_DATA, eas->w); 203 masked = xive_eas_is_masked(eas); 204 205 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); 206 207 kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT & 208 KVM_XIVE_SOURCE_PRIORITY_MASK; 209 kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT & 210 KVM_XIVE_SOURCE_SERVER_MASK; 211 kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) & 212 KVM_XIVE_SOURCE_MASKED_MASK; 213 kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) & 214 KVM_XIVE_SOURCE_EISN_MASK; 215 216 return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn, 217 &kvm_src, true, errp); 218 } 219 220 void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp) 221 { 222 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn, 223 NULL, true, errp); 224 } 225 226 /* 227 * At reset, the interrupt sources are simply created and MASKED. We 228 * only need to inform the KVM XIVE device about their type: LSI or 229 * MSI. 230 */ 231 int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) 232 { 233 SpaprXive *xive = SPAPR_XIVE(xsrc->xive); 234 uint64_t state = 0; 235 236 assert(xive->fd != -1); 237 238 if (xive_source_irq_is_lsi(xsrc, srcno)) { 239 state |= KVM_XIVE_LEVEL_SENSITIVE; 240 if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { 241 state |= KVM_XIVE_LEVEL_ASSERTED; 242 } 243 } 244 245 return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state, 246 true, errp); 247 } 248 249 static int kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp) 250 { 251 SpaprXive *xive = SPAPR_XIVE(xsrc->xive); 252 int i; 253 254 for (i = 0; i < xsrc->nr_irqs; i++) { 255 int ret; 256 257 if (!xive_eas_is_valid(&xive->eat[i])) { 258 continue; 259 } 260 261 ret = kvmppc_xive_source_reset_one(xsrc, i, errp); 262 if (ret < 0) { 263 return ret; 264 } 265 } 266 267 return 0; 268 } 269 270 /* 271 * This is used to perform the magic loads on the ESB pages, described 272 * in xive.h. 273 * 274 * Memory barriers should not be needed for loads (no store for now). 275 */ 276 static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, 277 uint64_t data, bool write) 278 { 279 uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) + 280 offset; 281 282 if (write) { 283 *addr = cpu_to_be64(data); 284 return -1; 285 } else { 286 /* Prevent the compiler from optimizing away the load */ 287 volatile uint64_t value = be64_to_cpu(*addr); 288 return value; 289 } 290 } 291 292 static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset) 293 { 294 return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3; 295 } 296 297 static void xive_esb_trigger(XiveSource *xsrc, int srcno) 298 { 299 uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno); 300 301 *addr = 0x0; 302 } 303 304 uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, 305 uint64_t data, bool write) 306 { 307 if (write) { 308 return xive_esb_rw(xsrc, srcno, offset, data, 1); 309 } 310 311 /* 312 * Special Load EOI handling for LSI sources. Q bit is never set 313 * and the interrupt should be re-triggered if the level is still 314 * asserted. 315 */ 316 if (xive_source_irq_is_lsi(xsrc, srcno) && 317 offset == XIVE_ESB_LOAD_EOI) { 318 xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00); 319 if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { 320 xive_esb_trigger(xsrc, srcno); 321 } 322 return 0; 323 } else { 324 return xive_esb_rw(xsrc, srcno, offset, 0, 0); 325 } 326 } 327 328 static void kvmppc_xive_source_get_state(XiveSource *xsrc) 329 { 330 SpaprXive *xive = SPAPR_XIVE(xsrc->xive); 331 int i; 332 333 for (i = 0; i < xsrc->nr_irqs; i++) { 334 uint8_t pq; 335 336 if (!xive_eas_is_valid(&xive->eat[i])) { 337 continue; 338 } 339 340 /* Perform a load without side effect to retrieve the PQ bits */ 341 pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); 342 343 /* and save PQ locally */ 344 xive_source_esb_set(xsrc, i, pq); 345 } 346 } 347 348 void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val) 349 { 350 XiveSource *xsrc = opaque; 351 352 if (!xive_source_irq_is_lsi(xsrc, srcno)) { 353 if (!val) { 354 return; 355 } 356 } else { 357 if (val) { 358 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; 359 } else { 360 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; 361 } 362 } 363 364 xive_esb_trigger(xsrc, srcno); 365 } 366 367 /* 368 * sPAPR XIVE interrupt controller (KVM) 369 */ 370 int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, 371 uint32_t end_idx, XiveEND *end, 372 Error **errp) 373 { 374 struct kvm_ppc_xive_eq kvm_eq = { 0 }; 375 uint64_t kvm_eq_idx; 376 uint8_t priority; 377 uint32_t server; 378 int ret; 379 380 assert(xive_end_is_valid(end)); 381 382 /* Encode the tuple (server, prio) as a KVM EQ index */ 383 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); 384 385 kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & 386 KVM_XIVE_EQ_PRIORITY_MASK; 387 kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & 388 KVM_XIVE_EQ_SERVER_MASK; 389 390 ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, 391 &kvm_eq, false, errp); 392 if (ret < 0) { 393 return ret; 394 } 395 396 /* 397 * The EQ index and toggle bit are updated by HW. These are the 398 * only fields from KVM we want to update QEMU with. The other END 399 * fields should already be in the QEMU END table. 400 */ 401 end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) | 402 xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex); 403 404 return 0; 405 } 406 407 int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk, 408 uint32_t end_idx, XiveEND *end, 409 Error **errp) 410 { 411 struct kvm_ppc_xive_eq kvm_eq = { 0 }; 412 uint64_t kvm_eq_idx; 413 uint8_t priority; 414 uint32_t server; 415 416 /* 417 * Build the KVM state from the local END structure. 418 */ 419 420 kvm_eq.flags = 0; 421 if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) { 422 kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY; 423 } 424 425 /* 426 * If the hcall is disabling the EQ, set the size and page address 427 * to zero. When migrating, only valid ENDs are taken into 428 * account. 429 */ 430 if (xive_end_is_valid(end)) { 431 kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 432 kvm_eq.qaddr = xive_end_qaddr(end); 433 /* 434 * The EQ toggle bit and index should only be relevant when 435 * restoring the EQ state 436 */ 437 kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1); 438 kvm_eq.qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 439 } else { 440 kvm_eq.qshift = 0; 441 kvm_eq.qaddr = 0; 442 } 443 444 /* Encode the tuple (server, prio) as a KVM EQ index */ 445 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); 446 447 kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & 448 KVM_XIVE_EQ_PRIORITY_MASK; 449 kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & 450 KVM_XIVE_EQ_SERVER_MASK; 451 452 return 453 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, 454 &kvm_eq, true, errp); 455 } 456 457 void kvmppc_xive_reset(SpaprXive *xive, Error **errp) 458 { 459 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET, 460 NULL, true, errp); 461 } 462 463 static int kvmppc_xive_get_queues(SpaprXive *xive, Error **errp) 464 { 465 int i; 466 int ret; 467 468 for (i = 0; i < xive->nr_ends; i++) { 469 if (!xive_end_is_valid(&xive->endt[i])) { 470 continue; 471 } 472 473 ret = kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, 474 &xive->endt[i], errp); 475 if (ret < 0) { 476 return ret; 477 } 478 } 479 480 return 0; 481 } 482 483 /* 484 * The primary goal of the XIVE VM change handler is to mark the EQ 485 * pages dirty when all XIVE event notifications have stopped. 486 * 487 * Whenever the VM is stopped, the VM change handler sets the source 488 * PQs to PENDING to stop the flow of events and to possibly catch a 489 * triggered interrupt occuring while the VM is stopped. The previous 490 * state is saved in anticipation of a migration. The XIVE controller 491 * is then synced through KVM to flush any in-flight event 492 * notification and stabilize the EQs. 493 * 494 * At this stage, we can mark the EQ page dirty and let a migration 495 * sequence transfer the EQ pages to the destination, which is done 496 * just after the stop state. 497 * 498 * The previous configuration of the sources is restored when the VM 499 * runs again. If an interrupt was queued while the VM was stopped, 500 * simply generate a trigger. 501 */ 502 static void kvmppc_xive_change_state_handler(void *opaque, int running, 503 RunState state) 504 { 505 SpaprXive *xive = opaque; 506 XiveSource *xsrc = &xive->source; 507 Error *local_err = NULL; 508 int i; 509 510 /* 511 * Restore the sources to their initial state. This is called when 512 * the VM resumes after a stop or a migration. 513 */ 514 if (running) { 515 for (i = 0; i < xsrc->nr_irqs; i++) { 516 uint8_t pq; 517 uint8_t old_pq; 518 519 if (!xive_eas_is_valid(&xive->eat[i])) { 520 continue; 521 } 522 523 pq = xive_source_esb_get(xsrc, i); 524 old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8)); 525 526 /* 527 * An interrupt was queued while the VM was stopped, 528 * generate a trigger. 529 */ 530 if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) { 531 xive_esb_trigger(xsrc, i); 532 } 533 } 534 535 return; 536 } 537 538 /* 539 * Mask the sources, to stop the flow of event notifications, and 540 * save the PQs locally in the XiveSource object. The XiveSource 541 * state will be collected later on by its vmstate handler if a 542 * migration is in progress. 543 */ 544 for (i = 0; i < xsrc->nr_irqs; i++) { 545 uint8_t pq; 546 547 if (!xive_eas_is_valid(&xive->eat[i])) { 548 continue; 549 } 550 551 pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); 552 553 /* 554 * PQ is set to PENDING to possibly catch a triggered 555 * interrupt occuring while the VM is stopped (hotplug event 556 * for instance) . 557 */ 558 if (pq != XIVE_ESB_OFF) { 559 pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10); 560 } 561 xive_source_esb_set(xsrc, i, pq); 562 } 563 564 /* 565 * Sync the XIVE controller in KVM, to flush in-flight event 566 * notification that should be enqueued in the EQs and mark the 567 * XIVE EQ pages dirty to collect all updates. 568 */ 569 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, 570 KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err); 571 if (local_err) { 572 error_report_err(local_err); 573 return; 574 } 575 } 576 577 void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp) 578 { 579 assert(xive->fd != -1); 580 581 /* 582 * When the VM is stopped, the sources are masked and the previous 583 * state is saved in anticipation of a migration. We should not 584 * synchronize the source state in that case else we will override 585 * the saved state. 586 */ 587 if (runstate_is_running()) { 588 kvmppc_xive_source_get_state(&xive->source); 589 } 590 591 /* EAT: there is no extra state to query from KVM */ 592 593 /* ENDT */ 594 kvmppc_xive_get_queues(xive, errp); 595 } 596 597 /* 598 * The SpaprXive 'pre_save' method is called by the vmstate handler of 599 * the SpaprXive model, after the XIVE controller is synced in the VM 600 * change handler. 601 */ 602 int kvmppc_xive_pre_save(SpaprXive *xive) 603 { 604 Error *local_err = NULL; 605 int ret; 606 607 assert(xive->fd != -1); 608 609 /* EAT: there is no extra state to query from KVM */ 610 611 /* ENDT */ 612 ret = kvmppc_xive_get_queues(xive, &local_err); 613 if (ret < 0) { 614 error_report_err(local_err); 615 return ret; 616 } 617 618 return 0; 619 } 620 621 /* 622 * The SpaprXive 'post_load' method is not called by a vmstate 623 * handler. It is called at the sPAPR machine level at the end of the 624 * migration sequence by the sPAPR IRQ backend 'post_load' method, 625 * when all XIVE states have been transferred and loaded. 626 */ 627 int kvmppc_xive_post_load(SpaprXive *xive, int version_id) 628 { 629 Error *local_err = NULL; 630 CPUState *cs; 631 int i; 632 int ret; 633 634 /* The KVM XIVE device should be in use */ 635 assert(xive->fd != -1); 636 637 /* Restore the ENDT first. The targetting depends on it. */ 638 for (i = 0; i < xive->nr_ends; i++) { 639 if (!xive_end_is_valid(&xive->endt[i])) { 640 continue; 641 } 642 643 ret = kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, 644 &xive->endt[i], &local_err); 645 if (ret < 0) { 646 goto fail; 647 } 648 } 649 650 /* Restore the EAT */ 651 for (i = 0; i < xive->nr_irqs; i++) { 652 if (!xive_eas_is_valid(&xive->eat[i])) { 653 continue; 654 } 655 656 /* 657 * We can only restore the source config if the source has been 658 * previously set in KVM. Since we don't do that for all interrupts 659 * at reset time anymore, let's do it now. 660 */ 661 ret = kvmppc_xive_source_reset_one(&xive->source, i, &local_err); 662 if (ret < 0) { 663 goto fail; 664 } 665 666 ret = kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err); 667 if (ret < 0) { 668 goto fail; 669 } 670 } 671 672 /* 673 * Restore the thread interrupt contexts of initial CPUs. 674 * 675 * The context of hotplugged CPUs is restored later, by the 676 * 'post_load' handler of the XiveTCTX model because they are not 677 * available at the time the SpaprXive 'post_load' method is 678 * called. We can not restore the context of all CPUs in the 679 * 'post_load' handler of XiveTCTX because the machine is not 680 * necessarily connected to the KVM device at that time. 681 */ 682 CPU_FOREACH(cs) { 683 PowerPCCPU *cpu = POWERPC_CPU(cs); 684 685 ret = kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err); 686 if (ret < 0) { 687 goto fail; 688 } 689 } 690 691 /* The source states will be restored when the machine starts running */ 692 return 0; 693 694 fail: 695 error_report_err(local_err); 696 return ret; 697 } 698 699 /* Returns MAP_FAILED on error and sets errno */ 700 static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len, 701 Error **errp) 702 { 703 void *addr; 704 uint32_t page_shift = 16; /* TODO: fix page_shift */ 705 706 addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd, 707 pgoff << page_shift); 708 if (addr == MAP_FAILED) { 709 error_setg_errno(errp, errno, "XIVE: unable to set memory mapping"); 710 } 711 712 return addr; 713 } 714 715 /* 716 * All the XIVE memory regions are now backed by mappings from the KVM 717 * XIVE device. 718 */ 719 int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, 720 Error **errp) 721 { 722 SpaprXive *xive = SPAPR_XIVE(intc); 723 XiveSource *xsrc = &xive->source; 724 size_t esb_len = xive_source_esb_len(xsrc); 725 size_t tima_len = 4ull << TM_SHIFT; 726 CPUState *cs; 727 int fd; 728 void *addr; 729 int ret; 730 731 /* 732 * The KVM XIVE device already in use. This is the case when 733 * rebooting under the XIVE-only interrupt mode. 734 */ 735 if (xive->fd != -1) { 736 return 0; 737 } 738 739 if (!kvmppc_has_cap_xive()) { 740 error_setg(errp, "IRQ_XIVE capability must be present for KVM"); 741 return -1; 742 } 743 744 /* First, create the KVM XIVE device */ 745 fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false); 746 if (fd < 0) { 747 error_setg_errno(errp, -fd, "XIVE: error creating KVM device"); 748 return -1; 749 } 750 xive->fd = fd; 751 752 /* Tell KVM about the # of VCPUs we may have */ 753 if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL, 754 KVM_DEV_XIVE_NR_SERVERS)) { 755 ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, 756 KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true, 757 errp); 758 if (ret < 0) { 759 goto fail; 760 } 761 } 762 763 /* 764 * 1. Source ESB pages - KVM mapping 765 */ 766 addr = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, errp); 767 if (addr == MAP_FAILED) { 768 goto fail; 769 } 770 xsrc->esb_mmap = addr; 771 772 memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc), 773 "xive.esb-kvm", esb_len, xsrc->esb_mmap); 774 memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0, 775 &xsrc->esb_mmio_kvm, 1); 776 777 /* 778 * 2. END ESB pages (No KVM support yet) 779 */ 780 781 /* 782 * 3. TIMA pages - KVM mapping 783 */ 784 addr = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, errp); 785 if (addr == MAP_FAILED) { 786 goto fail; 787 } 788 xive->tm_mmap = addr; 789 790 memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive), 791 "xive.tima", tima_len, xive->tm_mmap); 792 memory_region_add_subregion_overlap(&xive->tm_mmio, 0, 793 &xive->tm_mmio_kvm, 1); 794 795 xive->change = qemu_add_vm_change_state_handler( 796 kvmppc_xive_change_state_handler, xive); 797 798 /* Connect the presenters to the initial VCPUs of the machine */ 799 CPU_FOREACH(cs) { 800 PowerPCCPU *cpu = POWERPC_CPU(cs); 801 802 ret = kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, errp); 803 if (ret < 0) { 804 goto fail; 805 } 806 } 807 808 /* Update the KVM sources */ 809 ret = kvmppc_xive_source_reset(xsrc, errp); 810 if (ret < 0) { 811 goto fail; 812 } 813 814 kvm_kernel_irqchip = true; 815 kvm_msi_via_irqfd_allowed = true; 816 kvm_gsi_direct_mapping = true; 817 return 0; 818 819 fail: 820 kvmppc_xive_disconnect(intc); 821 return -1; 822 } 823 824 void kvmppc_xive_disconnect(SpaprInterruptController *intc) 825 { 826 SpaprXive *xive = SPAPR_XIVE(intc); 827 XiveSource *xsrc; 828 size_t esb_len; 829 830 assert(xive->fd != -1); 831 832 /* Clear the KVM mapping */ 833 xsrc = &xive->source; 834 esb_len = xive_source_esb_len(xsrc); 835 836 if (xsrc->esb_mmap) { 837 memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm); 838 object_unparent(OBJECT(&xsrc->esb_mmio_kvm)); 839 munmap(xsrc->esb_mmap, esb_len); 840 xsrc->esb_mmap = NULL; 841 } 842 843 if (xive->tm_mmap) { 844 memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm); 845 object_unparent(OBJECT(&xive->tm_mmio_kvm)); 846 munmap(xive->tm_mmap, 4ull << TM_SHIFT); 847 xive->tm_mmap = NULL; 848 } 849 850 /* 851 * When the KVM device fd is closed, the KVM device is destroyed 852 * and removed from the list of devices of the VM. The VCPU 853 * presenters are also detached from the device. 854 */ 855 close(xive->fd); 856 xive->fd = -1; 857 858 kvm_kernel_irqchip = false; 859 kvm_msi_via_irqfd_allowed = false; 860 kvm_gsi_direct_mapping = false; 861 862 /* Clear the local list of presenter (hotplug) */ 863 kvm_cpu_disable_all(); 864 865 /* VM Change state handler is not needed anymore */ 866 if (xive->change) { 867 qemu_del_vm_change_state_handler(xive->change); 868 xive->change = NULL; 869 } 870 } 871