1 /* 2 * QEMU PowerPC sPAPR XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/error-report.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/kvm.h" 17 #include "sysemu/runstate.h" 18 #include "hw/ppc/spapr.h" 19 #include "hw/ppc/spapr_cpu_core.h" 20 #include "hw/ppc/spapr_xive.h" 21 #include "hw/ppc/xive.h" 22 #include "kvm_ppc.h" 23 24 #include <sys/ioctl.h> 25 26 /* 27 * Helpers for CPU hotplug 28 * 29 * TODO: make a common KVMEnabledCPU layer for XICS and XIVE 30 */ 31 typedef struct KVMEnabledCPU { 32 unsigned long vcpu_id; 33 QLIST_ENTRY(KVMEnabledCPU) node; 34 } KVMEnabledCPU; 35 36 static QLIST_HEAD(, KVMEnabledCPU) 37 kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus); 38 39 static bool kvm_cpu_is_enabled(CPUState *cs) 40 { 41 KVMEnabledCPU *enabled_cpu; 42 unsigned long vcpu_id = kvm_arch_vcpu_id(cs); 43 44 QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) { 45 if (enabled_cpu->vcpu_id == vcpu_id) { 46 return true; 47 } 48 } 49 return false; 50 } 51 52 static void kvm_cpu_enable(CPUState *cs) 53 { 54 KVMEnabledCPU *enabled_cpu; 55 unsigned long vcpu_id = kvm_arch_vcpu_id(cs); 56 57 enabled_cpu = g_malloc(sizeof(*enabled_cpu)); 58 enabled_cpu->vcpu_id = vcpu_id; 59 QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node); 60 } 61 62 static void kvm_cpu_disable_all(void) 63 { 64 KVMEnabledCPU *enabled_cpu, *next; 65 66 QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) { 67 QLIST_REMOVE(enabled_cpu, node); 68 g_free(enabled_cpu); 69 } 70 } 71 72 /* 73 * XIVE Thread Interrupt Management context (KVM) 74 */ 75 76 void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp) 77 { 78 SpaprXive *xive = SPAPR_XIVE(tctx->xptr); 79 uint64_t state[2]; 80 int ret; 81 82 /* The KVM XIVE device is not in use yet */ 83 if (xive->fd == -1) { 84 return; 85 } 86 87 /* word0 and word1 of the OS ring. */ 88 state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]); 89 90 ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); 91 if (ret != 0) { 92 error_setg_errno(errp, errno, 93 "XIVE: could not restore KVM state of CPU %ld", 94 kvm_arch_vcpu_id(tctx->cs)); 95 } 96 } 97 98 void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp) 99 { 100 SpaprXive *xive = SPAPR_XIVE(tctx->xptr); 101 uint64_t state[2] = { 0 }; 102 int ret; 103 104 /* The KVM XIVE device is not in use */ 105 if (xive->fd == -1) { 106 return; 107 } 108 109 ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); 110 if (ret != 0) { 111 error_setg_errno(errp, errno, 112 "XIVE: could not capture KVM state of CPU %ld", 113 kvm_arch_vcpu_id(tctx->cs)); 114 return; 115 } 116 117 /* word0 and word1 of the OS ring. */ 118 *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0]; 119 } 120 121 typedef struct { 122 XiveTCTX *tctx; 123 Error *err; 124 } XiveCpuGetState; 125 126 static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu, 127 run_on_cpu_data arg) 128 { 129 XiveCpuGetState *s = arg.host_ptr; 130 131 kvmppc_xive_cpu_get_state(s->tctx, &s->err); 132 } 133 134 void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp) 135 { 136 XiveCpuGetState s = { 137 .tctx = tctx, 138 .err = NULL, 139 }; 140 141 /* 142 * Kick the vCPU to make sure they are available for the KVM ioctl. 143 */ 144 run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state, 145 RUN_ON_CPU_HOST_PTR(&s)); 146 147 if (s.err) { 148 error_propagate(errp, s.err); 149 return; 150 } 151 } 152 153 void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp) 154 { 155 SpaprXive *xive = SPAPR_XIVE(tctx->xptr); 156 unsigned long vcpu_id; 157 int ret; 158 159 /* The KVM XIVE device is not in use */ 160 if (xive->fd == -1) { 161 return; 162 } 163 164 /* Check if CPU was hot unplugged and replugged. */ 165 if (kvm_cpu_is_enabled(tctx->cs)) { 166 return; 167 } 168 169 vcpu_id = kvm_arch_vcpu_id(tctx->cs); 170 171 ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd, 172 vcpu_id, 0); 173 if (ret < 0) { 174 Error *local_err = NULL; 175 176 error_setg(&local_err, 177 "XIVE: unable to connect CPU%ld to KVM device: %s", 178 vcpu_id, strerror(errno)); 179 if (errno == ENOSPC) { 180 error_append_hint(&local_err, "Try -smp maxcpus=N with N < %u\n", 181 MACHINE(qdev_get_machine())->smp.max_cpus); 182 } 183 error_propagate(errp, local_err); 184 return; 185 } 186 187 kvm_cpu_enable(tctx->cs); 188 } 189 190 /* 191 * XIVE Interrupt Source (KVM) 192 */ 193 194 void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, 195 Error **errp) 196 { 197 uint32_t end_idx; 198 uint32_t end_blk; 199 uint8_t priority; 200 uint32_t server; 201 bool masked; 202 uint32_t eisn; 203 uint64_t kvm_src; 204 Error *local_err = NULL; 205 206 assert(xive_eas_is_valid(eas)); 207 208 end_idx = xive_get_field64(EAS_END_INDEX, eas->w); 209 end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); 210 eisn = xive_get_field64(EAS_END_DATA, eas->w); 211 masked = xive_eas_is_masked(eas); 212 213 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); 214 215 kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT & 216 KVM_XIVE_SOURCE_PRIORITY_MASK; 217 kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT & 218 KVM_XIVE_SOURCE_SERVER_MASK; 219 kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) & 220 KVM_XIVE_SOURCE_MASKED_MASK; 221 kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) & 222 KVM_XIVE_SOURCE_EISN_MASK; 223 224 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn, 225 &kvm_src, true, &local_err); 226 if (local_err) { 227 error_propagate(errp, local_err); 228 return; 229 } 230 } 231 232 void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp) 233 { 234 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn, 235 NULL, true, errp); 236 } 237 238 /* 239 * At reset, the interrupt sources are simply created and MASKED. We 240 * only need to inform the KVM XIVE device about their type: LSI or 241 * MSI. 242 */ 243 int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) 244 { 245 SpaprXive *xive = SPAPR_XIVE(xsrc->xive); 246 uint64_t state = 0; 247 248 /* The KVM XIVE device is not in use */ 249 if (xive->fd == -1) { 250 return -ENODEV; 251 } 252 253 if (xive_source_irq_is_lsi(xsrc, srcno)) { 254 state |= KVM_XIVE_LEVEL_SENSITIVE; 255 if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { 256 state |= KVM_XIVE_LEVEL_ASSERTED; 257 } 258 } 259 260 return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state, 261 true, errp); 262 } 263 264 static void kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp) 265 { 266 SpaprXive *xive = SPAPR_XIVE(xsrc->xive); 267 int i; 268 269 for (i = 0; i < xsrc->nr_irqs; i++) { 270 Error *local_err = NULL; 271 272 if (!xive_eas_is_valid(&xive->eat[i])) { 273 continue; 274 } 275 276 kvmppc_xive_source_reset_one(xsrc, i, &local_err); 277 if (local_err) { 278 error_propagate(errp, local_err); 279 return; 280 } 281 } 282 } 283 284 /* 285 * This is used to perform the magic loads on the ESB pages, described 286 * in xive.h. 287 * 288 * Memory barriers should not be needed for loads (no store for now). 289 */ 290 static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, 291 uint64_t data, bool write) 292 { 293 uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) + 294 offset; 295 296 if (write) { 297 *addr = cpu_to_be64(data); 298 return -1; 299 } else { 300 /* Prevent the compiler from optimizing away the load */ 301 volatile uint64_t value = be64_to_cpu(*addr); 302 return value; 303 } 304 } 305 306 static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset) 307 { 308 return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3; 309 } 310 311 static void xive_esb_trigger(XiveSource *xsrc, int srcno) 312 { 313 uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno); 314 315 *addr = 0x0; 316 } 317 318 uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, 319 uint64_t data, bool write) 320 { 321 if (write) { 322 return xive_esb_rw(xsrc, srcno, offset, data, 1); 323 } 324 325 /* 326 * Special Load EOI handling for LSI sources. Q bit is never set 327 * and the interrupt should be re-triggered if the level is still 328 * asserted. 329 */ 330 if (xive_source_irq_is_lsi(xsrc, srcno) && 331 offset == XIVE_ESB_LOAD_EOI) { 332 xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00); 333 if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { 334 xive_esb_trigger(xsrc, srcno); 335 } 336 return 0; 337 } else { 338 return xive_esb_rw(xsrc, srcno, offset, 0, 0); 339 } 340 } 341 342 static void kvmppc_xive_source_get_state(XiveSource *xsrc) 343 { 344 SpaprXive *xive = SPAPR_XIVE(xsrc->xive); 345 int i; 346 347 for (i = 0; i < xsrc->nr_irqs; i++) { 348 uint8_t pq; 349 350 if (!xive_eas_is_valid(&xive->eat[i])) { 351 continue; 352 } 353 354 /* Perform a load without side effect to retrieve the PQ bits */ 355 pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); 356 357 /* and save PQ locally */ 358 xive_source_esb_set(xsrc, i, pq); 359 } 360 } 361 362 void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val) 363 { 364 XiveSource *xsrc = opaque; 365 366 if (!xive_source_irq_is_lsi(xsrc, srcno)) { 367 if (!val) { 368 return; 369 } 370 } else { 371 if (val) { 372 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; 373 } else { 374 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; 375 } 376 } 377 378 xive_esb_trigger(xsrc, srcno); 379 } 380 381 /* 382 * sPAPR XIVE interrupt controller (KVM) 383 */ 384 void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, 385 uint32_t end_idx, XiveEND *end, 386 Error **errp) 387 { 388 struct kvm_ppc_xive_eq kvm_eq = { 0 }; 389 uint64_t kvm_eq_idx; 390 uint8_t priority; 391 uint32_t server; 392 Error *local_err = NULL; 393 394 assert(xive_end_is_valid(end)); 395 396 /* Encode the tuple (server, prio) as a KVM EQ index */ 397 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); 398 399 kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & 400 KVM_XIVE_EQ_PRIORITY_MASK; 401 kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & 402 KVM_XIVE_EQ_SERVER_MASK; 403 404 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, 405 &kvm_eq, false, &local_err); 406 if (local_err) { 407 error_propagate(errp, local_err); 408 return; 409 } 410 411 /* 412 * The EQ index and toggle bit are updated by HW. These are the 413 * only fields from KVM we want to update QEMU with. The other END 414 * fields should already be in the QEMU END table. 415 */ 416 end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) | 417 xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex); 418 } 419 420 void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk, 421 uint32_t end_idx, XiveEND *end, 422 Error **errp) 423 { 424 struct kvm_ppc_xive_eq kvm_eq = { 0 }; 425 uint64_t kvm_eq_idx; 426 uint8_t priority; 427 uint32_t server; 428 Error *local_err = NULL; 429 430 /* 431 * Build the KVM state from the local END structure. 432 */ 433 434 kvm_eq.flags = 0; 435 if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) { 436 kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY; 437 } 438 439 /* 440 * If the hcall is disabling the EQ, set the size and page address 441 * to zero. When migrating, only valid ENDs are taken into 442 * account. 443 */ 444 if (xive_end_is_valid(end)) { 445 kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12; 446 kvm_eq.qaddr = xive_end_qaddr(end); 447 /* 448 * The EQ toggle bit and index should only be relevant when 449 * restoring the EQ state 450 */ 451 kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1); 452 kvm_eq.qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); 453 } else { 454 kvm_eq.qshift = 0; 455 kvm_eq.qaddr = 0; 456 } 457 458 /* Encode the tuple (server, prio) as a KVM EQ index */ 459 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); 460 461 kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & 462 KVM_XIVE_EQ_PRIORITY_MASK; 463 kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & 464 KVM_XIVE_EQ_SERVER_MASK; 465 466 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, 467 &kvm_eq, true, &local_err); 468 if (local_err) { 469 error_propagate(errp, local_err); 470 return; 471 } 472 } 473 474 void kvmppc_xive_reset(SpaprXive *xive, Error **errp) 475 { 476 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET, 477 NULL, true, errp); 478 } 479 480 static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp) 481 { 482 Error *local_err = NULL; 483 int i; 484 485 for (i = 0; i < xive->nr_ends; i++) { 486 if (!xive_end_is_valid(&xive->endt[i])) { 487 continue; 488 } 489 490 kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, 491 &xive->endt[i], &local_err); 492 if (local_err) { 493 error_propagate(errp, local_err); 494 return; 495 } 496 } 497 } 498 499 /* 500 * The primary goal of the XIVE VM change handler is to mark the EQ 501 * pages dirty when all XIVE event notifications have stopped. 502 * 503 * Whenever the VM is stopped, the VM change handler sets the source 504 * PQs to PENDING to stop the flow of events and to possibly catch a 505 * triggered interrupt occuring while the VM is stopped. The previous 506 * state is saved in anticipation of a migration. The XIVE controller 507 * is then synced through KVM to flush any in-flight event 508 * notification and stabilize the EQs. 509 * 510 * At this stage, we can mark the EQ page dirty and let a migration 511 * sequence transfer the EQ pages to the destination, which is done 512 * just after the stop state. 513 * 514 * The previous configuration of the sources is restored when the VM 515 * runs again. If an interrupt was queued while the VM was stopped, 516 * simply generate a trigger. 517 */ 518 static void kvmppc_xive_change_state_handler(void *opaque, int running, 519 RunState state) 520 { 521 SpaprXive *xive = opaque; 522 XiveSource *xsrc = &xive->source; 523 Error *local_err = NULL; 524 int i; 525 526 /* 527 * Restore the sources to their initial state. This is called when 528 * the VM resumes after a stop or a migration. 529 */ 530 if (running) { 531 for (i = 0; i < xsrc->nr_irqs; i++) { 532 uint8_t pq; 533 uint8_t old_pq; 534 535 if (!xive_eas_is_valid(&xive->eat[i])) { 536 continue; 537 } 538 539 pq = xive_source_esb_get(xsrc, i); 540 old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8)); 541 542 /* 543 * An interrupt was queued while the VM was stopped, 544 * generate a trigger. 545 */ 546 if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) { 547 xive_esb_trigger(xsrc, i); 548 } 549 } 550 551 return; 552 } 553 554 /* 555 * Mask the sources, to stop the flow of event notifications, and 556 * save the PQs locally in the XiveSource object. The XiveSource 557 * state will be collected later on by its vmstate handler if a 558 * migration is in progress. 559 */ 560 for (i = 0; i < xsrc->nr_irqs; i++) { 561 uint8_t pq; 562 563 if (!xive_eas_is_valid(&xive->eat[i])) { 564 continue; 565 } 566 567 pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); 568 569 /* 570 * PQ is set to PENDING to possibly catch a triggered 571 * interrupt occuring while the VM is stopped (hotplug event 572 * for instance) . 573 */ 574 if (pq != XIVE_ESB_OFF) { 575 pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10); 576 } 577 xive_source_esb_set(xsrc, i, pq); 578 } 579 580 /* 581 * Sync the XIVE controller in KVM, to flush in-flight event 582 * notification that should be enqueued in the EQs and mark the 583 * XIVE EQ pages dirty to collect all updates. 584 */ 585 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, 586 KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err); 587 if (local_err) { 588 error_report_err(local_err); 589 return; 590 } 591 } 592 593 void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp) 594 { 595 /* The KVM XIVE device is not in use */ 596 if (xive->fd == -1) { 597 return; 598 } 599 600 /* 601 * When the VM is stopped, the sources are masked and the previous 602 * state is saved in anticipation of a migration. We should not 603 * synchronize the source state in that case else we will override 604 * the saved state. 605 */ 606 if (runstate_is_running()) { 607 kvmppc_xive_source_get_state(&xive->source); 608 } 609 610 /* EAT: there is no extra state to query from KVM */ 611 612 /* ENDT */ 613 kvmppc_xive_get_queues(xive, errp); 614 } 615 616 /* 617 * The SpaprXive 'pre_save' method is called by the vmstate handler of 618 * the SpaprXive model, after the XIVE controller is synced in the VM 619 * change handler. 620 */ 621 int kvmppc_xive_pre_save(SpaprXive *xive) 622 { 623 Error *local_err = NULL; 624 625 /* The KVM XIVE device is not in use */ 626 if (xive->fd == -1) { 627 return 0; 628 } 629 630 /* EAT: there is no extra state to query from KVM */ 631 632 /* ENDT */ 633 kvmppc_xive_get_queues(xive, &local_err); 634 if (local_err) { 635 error_report_err(local_err); 636 return -1; 637 } 638 639 return 0; 640 } 641 642 /* 643 * The SpaprXive 'post_load' method is not called by a vmstate 644 * handler. It is called at the sPAPR machine level at the end of the 645 * migration sequence by the sPAPR IRQ backend 'post_load' method, 646 * when all XIVE states have been transferred and loaded. 647 */ 648 int kvmppc_xive_post_load(SpaprXive *xive, int version_id) 649 { 650 Error *local_err = NULL; 651 CPUState *cs; 652 int i; 653 654 /* The KVM XIVE device should be in use */ 655 assert(xive->fd != -1); 656 657 /* Restore the ENDT first. The targetting depends on it. */ 658 for (i = 0; i < xive->nr_ends; i++) { 659 if (!xive_end_is_valid(&xive->endt[i])) { 660 continue; 661 } 662 663 kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, 664 &xive->endt[i], &local_err); 665 if (local_err) { 666 error_report_err(local_err); 667 return -1; 668 } 669 } 670 671 /* Restore the EAT */ 672 for (i = 0; i < xive->nr_irqs; i++) { 673 if (!xive_eas_is_valid(&xive->eat[i])) { 674 continue; 675 } 676 677 /* 678 * We can only restore the source config if the source has been 679 * previously set in KVM. Since we don't do that for all interrupts 680 * at reset time anymore, let's do it now. 681 */ 682 kvmppc_xive_source_reset_one(&xive->source, i, &local_err); 683 if (local_err) { 684 error_report_err(local_err); 685 return -1; 686 } 687 688 kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err); 689 if (local_err) { 690 error_report_err(local_err); 691 return -1; 692 } 693 } 694 695 /* 696 * Restore the thread interrupt contexts of initial CPUs. 697 * 698 * The context of hotplugged CPUs is restored later, by the 699 * 'post_load' handler of the XiveTCTX model because they are not 700 * available at the time the SpaprXive 'post_load' method is 701 * called. We can not restore the context of all CPUs in the 702 * 'post_load' handler of XiveTCTX because the machine is not 703 * necessarily connected to the KVM device at that time. 704 */ 705 CPU_FOREACH(cs) { 706 PowerPCCPU *cpu = POWERPC_CPU(cs); 707 708 kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err); 709 if (local_err) { 710 error_report_err(local_err); 711 return -1; 712 } 713 } 714 715 /* The source states will be restored when the machine starts running */ 716 return 0; 717 } 718 719 static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len, 720 Error **errp) 721 { 722 void *addr; 723 uint32_t page_shift = 16; /* TODO: fix page_shift */ 724 725 addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd, 726 pgoff << page_shift); 727 if (addr == MAP_FAILED) { 728 error_setg_errno(errp, errno, "XIVE: unable to set memory mapping"); 729 return NULL; 730 } 731 732 return addr; 733 } 734 735 /* 736 * All the XIVE memory regions are now backed by mappings from the KVM 737 * XIVE device. 738 */ 739 int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, 740 Error **errp) 741 { 742 SpaprXive *xive = SPAPR_XIVE(intc); 743 XiveSource *xsrc = &xive->source; 744 Error *local_err = NULL; 745 size_t esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs; 746 size_t tima_len = 4ull << TM_SHIFT; 747 CPUState *cs; 748 749 /* 750 * The KVM XIVE device already in use. This is the case when 751 * rebooting under the XIVE-only interrupt mode. 752 */ 753 if (xive->fd != -1) { 754 return 0; 755 } 756 757 if (!kvmppc_has_cap_xive()) { 758 error_setg(errp, "IRQ_XIVE capability must be present for KVM"); 759 return -1; 760 } 761 762 /* First, create the KVM XIVE device */ 763 xive->fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false); 764 if (xive->fd < 0) { 765 error_setg_errno(errp, -xive->fd, "XIVE: error creating KVM device"); 766 return -1; 767 } 768 769 /* Tell KVM about the # of VCPUs we may have */ 770 if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL, 771 KVM_DEV_XIVE_NR_SERVERS)) { 772 if (kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, 773 KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true, 774 &local_err)) { 775 goto fail; 776 } 777 } 778 779 /* 780 * 1. Source ESB pages - KVM mapping 781 */ 782 xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, 783 &local_err); 784 if (local_err) { 785 goto fail; 786 } 787 788 memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc), 789 "xive.esb", esb_len, xsrc->esb_mmap); 790 memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0, 791 &xsrc->esb_mmio_kvm, 1); 792 793 /* 794 * 2. END ESB pages (No KVM support yet) 795 */ 796 797 /* 798 * 3. TIMA pages - KVM mapping 799 */ 800 xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, 801 &local_err); 802 if (local_err) { 803 goto fail; 804 } 805 memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive), 806 "xive.tima", tima_len, xive->tm_mmap); 807 memory_region_add_subregion_overlap(&xive->tm_mmio, 0, 808 &xive->tm_mmio_kvm, 1); 809 810 xive->change = qemu_add_vm_change_state_handler( 811 kvmppc_xive_change_state_handler, xive); 812 813 /* Connect the presenters to the initial VCPUs of the machine */ 814 CPU_FOREACH(cs) { 815 PowerPCCPU *cpu = POWERPC_CPU(cs); 816 817 kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err); 818 if (local_err) { 819 goto fail; 820 } 821 } 822 823 /* Update the KVM sources */ 824 kvmppc_xive_source_reset(xsrc, &local_err); 825 if (local_err) { 826 goto fail; 827 } 828 829 kvm_kernel_irqchip = true; 830 kvm_msi_via_irqfd_allowed = true; 831 kvm_gsi_direct_mapping = true; 832 return 0; 833 834 fail: 835 error_propagate(errp, local_err); 836 kvmppc_xive_disconnect(intc); 837 return -1; 838 } 839 840 void kvmppc_xive_disconnect(SpaprInterruptController *intc) 841 { 842 SpaprXive *xive = SPAPR_XIVE(intc); 843 XiveSource *xsrc; 844 size_t esb_len; 845 846 /* The KVM XIVE device is not in use */ 847 if (!xive || xive->fd == -1) { 848 return; 849 } 850 851 /* Clear the KVM mapping */ 852 xsrc = &xive->source; 853 esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs; 854 855 if (xsrc->esb_mmap) { 856 memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm); 857 object_unparent(OBJECT(&xsrc->esb_mmio_kvm)); 858 munmap(xsrc->esb_mmap, esb_len); 859 xsrc->esb_mmap = NULL; 860 } 861 862 if (xive->tm_mmap) { 863 memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm); 864 object_unparent(OBJECT(&xive->tm_mmio_kvm)); 865 munmap(xive->tm_mmap, 4ull << TM_SHIFT); 866 xive->tm_mmap = NULL; 867 } 868 869 /* 870 * When the KVM device fd is closed, the KVM device is destroyed 871 * and removed from the list of devices of the VM. The VCPU 872 * presenters are also detached from the device. 873 */ 874 if (xive->fd != -1) { 875 close(xive->fd); 876 xive->fd = -1; 877 } 878 879 kvm_kernel_irqchip = false; 880 kvm_msi_via_irqfd_allowed = false; 881 kvm_gsi_direct_mapping = false; 882 883 /* Clear the local list of presenter (hotplug) */ 884 kvm_cpu_disable_all(); 885 886 /* VM Change state handler is not needed anymore */ 887 if (xive->change) { 888 qemu_del_vm_change_state_handler(xive->change); 889 xive->change = NULL; 890 } 891 } 892