1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright IBM Corp. 2007 5 * 6 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 8 */ 9 10 #include <linux/errno.h> 11 #include <linux/err.h> 12 #include <linux/kvm_host.h> 13 #include <linux/vmalloc.h> 14 #include <linux/hrtimer.h> 15 #include <linux/sched/signal.h> 16 #include <linux/fs.h> 17 #include <linux/slab.h> 18 #include <linux/file.h> 19 #include <linux/module.h> 20 #include <linux/irqbypass.h> 21 #include <linux/kvm_irqfd.h> 22 #include <linux/of.h> 23 #include <asm/cputable.h> 24 #include <linux/uaccess.h> 25 #include <asm/kvm_ppc.h> 26 #include <asm/cputhreads.h> 27 #include <asm/irqflags.h> 28 #include <asm/iommu.h> 29 #include <asm/switch_to.h> 30 #include <asm/xive.h> 31 #ifdef CONFIG_PPC_PSERIES 32 #include <asm/hvcall.h> 33 #include <asm/plpar_wrappers.h> 34 #endif 35 #include <asm/ultravisor.h> 36 #include <asm/setup.h> 37 38 #include "timing.h" 39 #include "../mm/mmu_decl.h" 40 41 #define CREATE_TRACE_POINTS 42 #include "trace.h" 43 44 struct kvmppc_ops *kvmppc_hv_ops; 45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 46 struct kvmppc_ops *kvmppc_pr_ops; 47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 48 49 50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 51 { 52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); 53 } 54 55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 56 { 57 return kvm_arch_vcpu_runnable(vcpu); 58 } 59 60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 61 { 62 return false; 63 } 64 65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 66 { 67 return 1; 68 } 69 70 /* 71 * Common checks before entering the guest world. Call with interrupts 72 * disabled. 73 * 74 * returns: 75 * 76 * == 1 if we're ready to go into guest state 77 * <= 0 if we need to go back to the host with return value 78 */ 79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 80 { 81 int r; 82 83 WARN_ON(irqs_disabled()); 84 hard_irq_disable(); 85 86 while (true) { 87 if (need_resched()) { 88 local_irq_enable(); 89 cond_resched(); 90 hard_irq_disable(); 91 continue; 92 } 93 94 if (signal_pending(current)) { 95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 96 vcpu->run->exit_reason = KVM_EXIT_INTR; 97 r = -EINTR; 98 break; 99 } 100 101 vcpu->mode = IN_GUEST_MODE; 102 103 /* 104 * Reading vcpu->requests must happen after setting vcpu->mode, 105 * so we don't miss a request because the requester sees 106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 107 * before next entering the guest (and thus doesn't IPI). 108 * This also orders the write to mode from any reads 109 * to the page tables done while the VCPU is running. 110 * Please see the comment in kvm_flush_remote_tlbs. 111 */ 112 smp_mb(); 113 114 if (kvm_request_pending(vcpu)) { 115 /* Make sure we process requests preemptable */ 116 local_irq_enable(); 117 trace_kvm_check_requests(vcpu); 118 r = kvmppc_core_check_requests(vcpu); 119 hard_irq_disable(); 120 if (r > 0) 121 continue; 122 break; 123 } 124 125 if (kvmppc_core_prepare_to_enter(vcpu)) { 126 /* interrupts got enabled in between, so we 127 are back at square 1 */ 128 continue; 129 } 130 131 guest_enter_irqoff(); 132 return 1; 133 } 134 135 /* return to host */ 136 local_irq_enable(); 137 return r; 138 } 139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 140 141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 143 { 144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 145 int i; 146 147 shared->sprg0 = swab64(shared->sprg0); 148 shared->sprg1 = swab64(shared->sprg1); 149 shared->sprg2 = swab64(shared->sprg2); 150 shared->sprg3 = swab64(shared->sprg3); 151 shared->srr0 = swab64(shared->srr0); 152 shared->srr1 = swab64(shared->srr1); 153 shared->dar = swab64(shared->dar); 154 shared->msr = swab64(shared->msr); 155 shared->dsisr = swab32(shared->dsisr); 156 shared->int_pending = swab32(shared->int_pending); 157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 158 shared->sr[i] = swab32(shared->sr[i]); 159 } 160 #endif 161 162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 163 { 164 int nr = kvmppc_get_gpr(vcpu, 11); 165 int r; 166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 170 unsigned long r2 = 0; 171 172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 173 /* 32 bit mode */ 174 param1 &= 0xffffffff; 175 param2 &= 0xffffffff; 176 param3 &= 0xffffffff; 177 param4 &= 0xffffffff; 178 } 179 180 switch (nr) { 181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 182 { 183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 184 /* Book3S can be little endian, find it out here */ 185 int shared_big_endian = true; 186 if (vcpu->arch.intr_msr & MSR_LE) 187 shared_big_endian = false; 188 if (shared_big_endian != vcpu->arch.shared_big_endian) 189 kvmppc_swab_shared(vcpu); 190 vcpu->arch.shared_big_endian = shared_big_endian; 191 #endif 192 193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { 194 /* 195 * Older versions of the Linux magic page code had 196 * a bug where they would map their trampoline code 197 * NX. If that's the case, remove !PR NX capability. 198 */ 199 vcpu->arch.disable_kernel_nx = true; 200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 201 } 202 203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 205 206 #ifdef CONFIG_PPC_64K_PAGES 207 /* 208 * Make sure our 4k magic page is in the same window of a 64k 209 * page within the guest and within the host's page. 210 */ 211 if ((vcpu->arch.magic_page_pa & 0xf000) != 212 ((ulong)vcpu->arch.shared & 0xf000)) { 213 void *old_shared = vcpu->arch.shared; 214 ulong shared = (ulong)vcpu->arch.shared; 215 void *new_shared; 216 217 shared &= PAGE_MASK; 218 shared |= vcpu->arch.magic_page_pa & 0xf000; 219 new_shared = (void*)shared; 220 memcpy(new_shared, old_shared, 0x1000); 221 vcpu->arch.shared = new_shared; 222 } 223 #endif 224 225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 226 227 r = EV_SUCCESS; 228 break; 229 } 230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 231 r = EV_SUCCESS; 232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 234 #endif 235 236 /* Second return value is in r4 */ 237 break; 238 case EV_HCALL_TOKEN(EV_IDLE): 239 r = EV_SUCCESS; 240 kvm_vcpu_halt(vcpu); 241 break; 242 default: 243 r = EV_UNIMPLEMENTED; 244 break; 245 } 246 247 kvmppc_set_gpr(vcpu, 4, r2); 248 249 return r; 250 } 251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 252 253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 254 { 255 int r = false; 256 257 /* We have to know what CPU to virtualize */ 258 if (!vcpu->arch.pvr) 259 goto out; 260 261 /* PAPR only works with book3s_64 */ 262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 263 goto out; 264 265 /* HV KVM can only do PAPR mode for now */ 266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 267 goto out; 268 269 #ifdef CONFIG_KVM_BOOKE_HV 270 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 271 goto out; 272 #endif 273 274 r = true; 275 276 out: 277 vcpu->arch.sane = r; 278 return r ? 0 : -EINVAL; 279 } 280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 281 282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) 283 { 284 enum emulation_result er; 285 int r; 286 287 er = kvmppc_emulate_loadstore(vcpu); 288 switch (er) { 289 case EMULATE_DONE: 290 /* Future optimization: only reload non-volatiles if they were 291 * actually modified. */ 292 r = RESUME_GUEST_NV; 293 break; 294 case EMULATE_AGAIN: 295 r = RESUME_GUEST; 296 break; 297 case EMULATE_DO_MMIO: 298 vcpu->run->exit_reason = KVM_EXIT_MMIO; 299 /* We must reload nonvolatiles because "update" load/store 300 * instructions modify register state. */ 301 /* Future optimization: only reload non-volatiles if they were 302 * actually modified. */ 303 r = RESUME_HOST_NV; 304 break; 305 case EMULATE_FAIL: 306 { 307 u32 last_inst; 308 309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 310 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n", 311 last_inst); 312 313 /* 314 * Injecting a Data Storage here is a bit more 315 * accurate since the instruction that caused the 316 * access could still be a valid one. 317 */ 318 if (!IS_ENABLED(CONFIG_BOOKE)) { 319 ulong dsisr = DSISR_BADACCESS; 320 321 if (vcpu->mmio_is_write) 322 dsisr |= DSISR_ISSTORE; 323 324 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr); 325 } else { 326 /* 327 * BookE does not send a SIGBUS on a bad 328 * fault, so use a Program interrupt instead 329 * to avoid a fault loop. 330 */ 331 kvmppc_core_queue_program(vcpu, 0); 332 } 333 334 r = RESUME_GUEST; 335 break; 336 } 337 default: 338 WARN_ON(1); 339 r = RESUME_GUEST; 340 } 341 342 return r; 343 } 344 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 345 346 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 347 bool data) 348 { 349 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 350 struct kvmppc_pte pte; 351 int r = -EINVAL; 352 353 vcpu->stat.st++; 354 355 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) 356 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, 357 size); 358 359 if ((!r) || (r == -EAGAIN)) 360 return r; 361 362 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 363 XLATE_WRITE, &pte); 364 if (r < 0) 365 return r; 366 367 *eaddr = pte.raddr; 368 369 if (!pte.may_write) 370 return -EPERM; 371 372 /* Magic page override */ 373 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 374 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 375 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 376 void *magic = vcpu->arch.shared; 377 magic += pte.eaddr & 0xfff; 378 memcpy(magic, ptr, size); 379 return EMULATE_DONE; 380 } 381 382 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 383 return EMULATE_DO_MMIO; 384 385 return EMULATE_DONE; 386 } 387 EXPORT_SYMBOL_GPL(kvmppc_st); 388 389 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 390 bool data) 391 { 392 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 393 struct kvmppc_pte pte; 394 int rc = -EINVAL; 395 396 vcpu->stat.ld++; 397 398 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) 399 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, 400 size); 401 402 if ((!rc) || (rc == -EAGAIN)) 403 return rc; 404 405 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 406 XLATE_READ, &pte); 407 if (rc) 408 return rc; 409 410 *eaddr = pte.raddr; 411 412 if (!pte.may_read) 413 return -EPERM; 414 415 if (!data && !pte.may_execute) 416 return -ENOEXEC; 417 418 /* Magic page override */ 419 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 420 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 421 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 422 void *magic = vcpu->arch.shared; 423 magic += pte.eaddr & 0xfff; 424 memcpy(ptr, magic, size); 425 return EMULATE_DONE; 426 } 427 428 kvm_vcpu_srcu_read_lock(vcpu); 429 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); 430 kvm_vcpu_srcu_read_unlock(vcpu); 431 if (rc) 432 return EMULATE_DO_MMIO; 433 434 return EMULATE_DONE; 435 } 436 EXPORT_SYMBOL_GPL(kvmppc_ld); 437 438 int kvm_arch_hardware_enable(void) 439 { 440 return 0; 441 } 442 443 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 444 { 445 struct kvmppc_ops *kvm_ops = NULL; 446 int r; 447 448 /* 449 * if we have both HV and PR enabled, default is HV 450 */ 451 if (type == 0) { 452 if (kvmppc_hv_ops) 453 kvm_ops = kvmppc_hv_ops; 454 else 455 kvm_ops = kvmppc_pr_ops; 456 if (!kvm_ops) 457 goto err_out; 458 } else if (type == KVM_VM_PPC_HV) { 459 if (!kvmppc_hv_ops) 460 goto err_out; 461 kvm_ops = kvmppc_hv_ops; 462 } else if (type == KVM_VM_PPC_PR) { 463 if (!kvmppc_pr_ops) 464 goto err_out; 465 kvm_ops = kvmppc_pr_ops; 466 } else 467 goto err_out; 468 469 if (!try_module_get(kvm_ops->owner)) 470 return -ENOENT; 471 472 kvm->arch.kvm_ops = kvm_ops; 473 r = kvmppc_core_init_vm(kvm); 474 if (r) 475 module_put(kvm_ops->owner); 476 return r; 477 err_out: 478 return -EINVAL; 479 } 480 481 void kvm_arch_destroy_vm(struct kvm *kvm) 482 { 483 #ifdef CONFIG_KVM_XICS 484 /* 485 * We call kick_all_cpus_sync() to ensure that all 486 * CPUs have executed any pending IPIs before we 487 * continue and free VCPUs structures below. 488 */ 489 if (is_kvmppc_hv_enabled(kvm)) 490 kick_all_cpus_sync(); 491 #endif 492 493 kvm_destroy_vcpus(kvm); 494 495 mutex_lock(&kvm->lock); 496 497 kvmppc_core_destroy_vm(kvm); 498 499 mutex_unlock(&kvm->lock); 500 501 /* drop the module reference */ 502 module_put(kvm->arch.kvm_ops->owner); 503 } 504 505 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 506 { 507 int r; 508 /* Assume we're using HV mode when the HV module is loaded */ 509 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 510 511 if (kvm) { 512 /* 513 * Hooray - we know which VM type we're running on. Depend on 514 * that rather than the guess above. 515 */ 516 hv_enabled = is_kvmppc_hv_enabled(kvm); 517 } 518 519 switch (ext) { 520 #ifdef CONFIG_BOOKE 521 case KVM_CAP_PPC_BOOKE_SREGS: 522 case KVM_CAP_PPC_BOOKE_WATCHDOG: 523 case KVM_CAP_PPC_EPR: 524 #else 525 case KVM_CAP_PPC_SEGSTATE: 526 case KVM_CAP_PPC_HIOR: 527 case KVM_CAP_PPC_PAPR: 528 #endif 529 case KVM_CAP_PPC_UNSET_IRQ: 530 case KVM_CAP_PPC_IRQ_LEVEL: 531 case KVM_CAP_ENABLE_CAP: 532 case KVM_CAP_ONE_REG: 533 case KVM_CAP_IOEVENTFD: 534 case KVM_CAP_DEVICE_CTRL: 535 case KVM_CAP_IMMEDIATE_EXIT: 536 case KVM_CAP_SET_GUEST_DEBUG: 537 r = 1; 538 break; 539 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: 540 case KVM_CAP_PPC_PAIRED_SINGLES: 541 case KVM_CAP_PPC_OSI: 542 case KVM_CAP_PPC_GET_PVINFO: 543 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 544 case KVM_CAP_SW_TLB: 545 #endif 546 /* We support this only for PR */ 547 r = !hv_enabled; 548 break; 549 #ifdef CONFIG_KVM_MPIC 550 case KVM_CAP_IRQ_MPIC: 551 r = 1; 552 break; 553 #endif 554 555 #ifdef CONFIG_PPC_BOOK3S_64 556 case KVM_CAP_SPAPR_TCE: 557 case KVM_CAP_SPAPR_TCE_64: 558 r = 1; 559 break; 560 case KVM_CAP_SPAPR_TCE_VFIO: 561 r = !!cpu_has_feature(CPU_FTR_HVMODE); 562 break; 563 case KVM_CAP_PPC_RTAS: 564 case KVM_CAP_PPC_FIXUP_HCALL: 565 case KVM_CAP_PPC_ENABLE_HCALL: 566 #ifdef CONFIG_KVM_XICS 567 case KVM_CAP_IRQ_XICS: 568 #endif 569 case KVM_CAP_PPC_GET_CPU_CHAR: 570 r = 1; 571 break; 572 #ifdef CONFIG_KVM_XIVE 573 case KVM_CAP_PPC_IRQ_XIVE: 574 /* 575 * We need XIVE to be enabled on the platform (implies 576 * a POWER9 processor) and the PowerNV platform, as 577 * nested is not yet supported. 578 */ 579 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) && 580 kvmppc_xive_native_supported(); 581 break; 582 #endif 583 584 case KVM_CAP_PPC_ALLOC_HTAB: 585 r = hv_enabled; 586 break; 587 #endif /* CONFIG_PPC_BOOK3S_64 */ 588 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 589 case KVM_CAP_PPC_SMT: 590 r = 0; 591 if (kvm) { 592 if (kvm->arch.emul_smt_mode > 1) 593 r = kvm->arch.emul_smt_mode; 594 else 595 r = kvm->arch.smt_mode; 596 } else if (hv_enabled) { 597 if (cpu_has_feature(CPU_FTR_ARCH_300)) 598 r = 1; 599 else 600 r = threads_per_subcore; 601 } 602 break; 603 case KVM_CAP_PPC_SMT_POSSIBLE: 604 r = 1; 605 if (hv_enabled) { 606 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 607 r = ((threads_per_subcore << 1) - 1); 608 else 609 /* P9 can emulate dbells, so allow any mode */ 610 r = 8 | 4 | 2 | 1; 611 } 612 break; 613 case KVM_CAP_PPC_RMA: 614 r = 0; 615 break; 616 case KVM_CAP_PPC_HWRNG: 617 r = kvmppc_hwrng_present(); 618 break; 619 case KVM_CAP_PPC_MMU_RADIX: 620 r = !!(hv_enabled && radix_enabled()); 621 break; 622 case KVM_CAP_PPC_MMU_HASH_V3: 623 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible && 624 kvmppc_hv_ops->hash_v3_possible()); 625 break; 626 case KVM_CAP_PPC_NESTED_HV: 627 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && 628 !kvmppc_hv_ops->enable_nested(NULL)); 629 break; 630 #endif 631 case KVM_CAP_SYNC_MMU: 632 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 633 r = hv_enabled; 634 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 635 r = 1; 636 #else 637 r = 0; 638 #endif 639 break; 640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 641 case KVM_CAP_PPC_HTAB_FD: 642 r = hv_enabled; 643 break; 644 #endif 645 case KVM_CAP_NR_VCPUS: 646 /* 647 * Recommending a number of CPUs is somewhat arbitrary; we 648 * return the number of present CPUs for -HV (since a host 649 * will have secondary threads "offline"), and for other KVM 650 * implementations just count online CPUs. 651 */ 652 if (hv_enabled) 653 r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS); 654 else 655 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 656 break; 657 case KVM_CAP_MAX_VCPUS: 658 r = KVM_MAX_VCPUS; 659 break; 660 case KVM_CAP_MAX_VCPU_ID: 661 r = KVM_MAX_VCPU_IDS; 662 break; 663 #ifdef CONFIG_PPC_BOOK3S_64 664 case KVM_CAP_PPC_GET_SMMU_INFO: 665 r = 1; 666 break; 667 case KVM_CAP_SPAPR_MULTITCE: 668 r = 1; 669 break; 670 case KVM_CAP_SPAPR_RESIZE_HPT: 671 r = !!hv_enabled; 672 break; 673 #endif 674 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 675 case KVM_CAP_PPC_FWNMI: 676 r = hv_enabled; 677 break; 678 #endif 679 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 680 case KVM_CAP_PPC_HTM: 681 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || 682 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); 683 break; 684 #endif 685 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 686 case KVM_CAP_PPC_SECURE_GUEST: 687 r = hv_enabled && kvmppc_hv_ops->enable_svm && 688 !kvmppc_hv_ops->enable_svm(NULL); 689 break; 690 case KVM_CAP_PPC_DAWR1: 691 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && 692 !kvmppc_hv_ops->enable_dawr1(NULL)); 693 break; 694 case KVM_CAP_PPC_RPT_INVALIDATE: 695 r = 1; 696 break; 697 #endif 698 case KVM_CAP_PPC_AIL_MODE_3: 699 r = 0; 700 /* 701 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode. 702 * The POWER9s can support it if the guest runs in hash mode, 703 * but QEMU doesn't necessarily query the capability in time. 704 */ 705 if (hv_enabled) { 706 if (kvmhv_on_pseries()) { 707 if (pseries_reloc_on_exception()) 708 r = 1; 709 } else if (cpu_has_feature(CPU_FTR_ARCH_207S) && 710 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { 711 r = 1; 712 } 713 } 714 break; 715 default: 716 r = 0; 717 break; 718 } 719 return r; 720 721 } 722 723 long kvm_arch_dev_ioctl(struct file *filp, 724 unsigned int ioctl, unsigned long arg) 725 { 726 return -EINVAL; 727 } 728 729 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 730 { 731 kvmppc_core_free_memslot(kvm, slot); 732 } 733 734 int kvm_arch_prepare_memory_region(struct kvm *kvm, 735 const struct kvm_memory_slot *old, 736 struct kvm_memory_slot *new, 737 enum kvm_mr_change change) 738 { 739 return kvmppc_core_prepare_memory_region(kvm, old, new, change); 740 } 741 742 void kvm_arch_commit_memory_region(struct kvm *kvm, 743 struct kvm_memory_slot *old, 744 const struct kvm_memory_slot *new, 745 enum kvm_mr_change change) 746 { 747 kvmppc_core_commit_memory_region(kvm, old, new, change); 748 } 749 750 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 751 struct kvm_memory_slot *slot) 752 { 753 kvmppc_core_flush_memslot(kvm, slot); 754 } 755 756 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 757 { 758 return 0; 759 } 760 761 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 762 { 763 struct kvm_vcpu *vcpu; 764 765 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 766 kvmppc_decrementer_func(vcpu); 767 768 return HRTIMER_NORESTART; 769 } 770 771 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 772 { 773 int err; 774 775 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 776 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 777 778 #ifdef CONFIG_KVM_EXIT_TIMING 779 mutex_init(&vcpu->arch.exit_timing_lock); 780 #endif 781 err = kvmppc_subarch_vcpu_init(vcpu); 782 if (err) 783 return err; 784 785 err = kvmppc_core_vcpu_create(vcpu); 786 if (err) 787 goto out_vcpu_uninit; 788 789 rcuwait_init(&vcpu->arch.wait); 790 vcpu->arch.waitp = &vcpu->arch.wait; 791 return 0; 792 793 out_vcpu_uninit: 794 kvmppc_subarch_vcpu_uninit(vcpu); 795 return err; 796 } 797 798 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 799 { 800 } 801 802 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 803 { 804 /* Make sure we're not using the vcpu anymore */ 805 hrtimer_cancel(&vcpu->arch.dec_timer); 806 807 switch (vcpu->arch.irq_type) { 808 case KVMPPC_IRQ_MPIC: 809 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 810 break; 811 case KVMPPC_IRQ_XICS: 812 if (xics_on_xive()) 813 kvmppc_xive_cleanup_vcpu(vcpu); 814 else 815 kvmppc_xics_free_icp(vcpu); 816 break; 817 case KVMPPC_IRQ_XIVE: 818 kvmppc_xive_native_cleanup_vcpu(vcpu); 819 break; 820 } 821 822 kvmppc_core_vcpu_free(vcpu); 823 824 kvmppc_subarch_vcpu_uninit(vcpu); 825 } 826 827 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 828 { 829 return kvmppc_core_pending_dec(vcpu); 830 } 831 832 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 833 { 834 #ifdef CONFIG_BOOKE 835 /* 836 * vrsave (formerly usprg0) isn't used by Linux, but may 837 * be used by the guest. 838 * 839 * On non-booke this is associated with Altivec and 840 * is handled by code in book3s.c. 841 */ 842 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 843 #endif 844 kvmppc_core_vcpu_load(vcpu, cpu); 845 } 846 847 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 848 { 849 kvmppc_core_vcpu_put(vcpu); 850 #ifdef CONFIG_BOOKE 851 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 852 #endif 853 } 854 855 /* 856 * irq_bypass_add_producer and irq_bypass_del_producer are only 857 * useful if the architecture supports PCI passthrough. 858 * irq_bypass_stop and irq_bypass_start are not needed and so 859 * kvm_ops are not defined for them. 860 */ 861 bool kvm_arch_has_irq_bypass(void) 862 { 863 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || 864 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); 865 } 866 867 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 868 struct irq_bypass_producer *prod) 869 { 870 struct kvm_kernel_irqfd *irqfd = 871 container_of(cons, struct kvm_kernel_irqfd, consumer); 872 struct kvm *kvm = irqfd->kvm; 873 874 if (kvm->arch.kvm_ops->irq_bypass_add_producer) 875 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); 876 877 return 0; 878 } 879 880 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 881 struct irq_bypass_producer *prod) 882 { 883 struct kvm_kernel_irqfd *irqfd = 884 container_of(cons, struct kvm_kernel_irqfd, consumer); 885 struct kvm *kvm = irqfd->kvm; 886 887 if (kvm->arch.kvm_ops->irq_bypass_del_producer) 888 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); 889 } 890 891 #ifdef CONFIG_VSX 892 static inline int kvmppc_get_vsr_dword_offset(int index) 893 { 894 int offset; 895 896 if ((index != 0) && (index != 1)) 897 return -1; 898 899 #ifdef __BIG_ENDIAN 900 offset = index; 901 #else 902 offset = 1 - index; 903 #endif 904 905 return offset; 906 } 907 908 static inline int kvmppc_get_vsr_word_offset(int index) 909 { 910 int offset; 911 912 if ((index > 3) || (index < 0)) 913 return -1; 914 915 #ifdef __BIG_ENDIAN 916 offset = index; 917 #else 918 offset = 3 - index; 919 #endif 920 return offset; 921 } 922 923 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, 924 u64 gpr) 925 { 926 union kvmppc_one_reg val; 927 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 928 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 929 930 if (offset == -1) 931 return; 932 933 if (index >= 32) { 934 val.vval = VCPU_VSX_VR(vcpu, index - 32); 935 val.vsxval[offset] = gpr; 936 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 937 } else { 938 VCPU_VSX_FPR(vcpu, index, offset) = gpr; 939 } 940 } 941 942 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, 943 u64 gpr) 944 { 945 union kvmppc_one_reg val; 946 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 947 948 if (index >= 32) { 949 val.vval = VCPU_VSX_VR(vcpu, index - 32); 950 val.vsxval[0] = gpr; 951 val.vsxval[1] = gpr; 952 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 953 } else { 954 VCPU_VSX_FPR(vcpu, index, 0) = gpr; 955 VCPU_VSX_FPR(vcpu, index, 1) = gpr; 956 } 957 } 958 959 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, 960 u32 gpr) 961 { 962 union kvmppc_one_reg val; 963 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 964 965 if (index >= 32) { 966 val.vsx32val[0] = gpr; 967 val.vsx32val[1] = gpr; 968 val.vsx32val[2] = gpr; 969 val.vsx32val[3] = gpr; 970 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 971 } else { 972 val.vsx32val[0] = gpr; 973 val.vsx32val[1] = gpr; 974 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; 975 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; 976 } 977 } 978 979 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, 980 u32 gpr32) 981 { 982 union kvmppc_one_reg val; 983 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 984 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 985 int dword_offset, word_offset; 986 987 if (offset == -1) 988 return; 989 990 if (index >= 32) { 991 val.vval = VCPU_VSX_VR(vcpu, index - 32); 992 val.vsx32val[offset] = gpr32; 993 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 994 } else { 995 dword_offset = offset / 2; 996 word_offset = offset % 2; 997 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); 998 val.vsx32val[word_offset] = gpr32; 999 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; 1000 } 1001 } 1002 #endif /* CONFIG_VSX */ 1003 1004 #ifdef CONFIG_ALTIVEC 1005 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, 1006 int index, int element_size) 1007 { 1008 int offset; 1009 int elts = sizeof(vector128)/element_size; 1010 1011 if ((index < 0) || (index >= elts)) 1012 return -1; 1013 1014 if (kvmppc_need_byteswap(vcpu)) 1015 offset = elts - index - 1; 1016 else 1017 offset = index; 1018 1019 return offset; 1020 } 1021 1022 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, 1023 int index) 1024 { 1025 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); 1026 } 1027 1028 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, 1029 int index) 1030 { 1031 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); 1032 } 1033 1034 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, 1035 int index) 1036 { 1037 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); 1038 } 1039 1040 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, 1041 int index) 1042 { 1043 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); 1044 } 1045 1046 1047 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, 1048 u64 gpr) 1049 { 1050 union kvmppc_one_reg val; 1051 int offset = kvmppc_get_vmx_dword_offset(vcpu, 1052 vcpu->arch.mmio_vmx_offset); 1053 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1054 1055 if (offset == -1) 1056 return; 1057 1058 val.vval = VCPU_VSX_VR(vcpu, index); 1059 val.vsxval[offset] = gpr; 1060 VCPU_VSX_VR(vcpu, index) = val.vval; 1061 } 1062 1063 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, 1064 u32 gpr32) 1065 { 1066 union kvmppc_one_reg val; 1067 int offset = kvmppc_get_vmx_word_offset(vcpu, 1068 vcpu->arch.mmio_vmx_offset); 1069 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1070 1071 if (offset == -1) 1072 return; 1073 1074 val.vval = VCPU_VSX_VR(vcpu, index); 1075 val.vsx32val[offset] = gpr32; 1076 VCPU_VSX_VR(vcpu, index) = val.vval; 1077 } 1078 1079 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, 1080 u16 gpr16) 1081 { 1082 union kvmppc_one_reg val; 1083 int offset = kvmppc_get_vmx_hword_offset(vcpu, 1084 vcpu->arch.mmio_vmx_offset); 1085 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1086 1087 if (offset == -1) 1088 return; 1089 1090 val.vval = VCPU_VSX_VR(vcpu, index); 1091 val.vsx16val[offset] = gpr16; 1092 VCPU_VSX_VR(vcpu, index) = val.vval; 1093 } 1094 1095 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, 1096 u8 gpr8) 1097 { 1098 union kvmppc_one_reg val; 1099 int offset = kvmppc_get_vmx_byte_offset(vcpu, 1100 vcpu->arch.mmio_vmx_offset); 1101 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1102 1103 if (offset == -1) 1104 return; 1105 1106 val.vval = VCPU_VSX_VR(vcpu, index); 1107 val.vsx8val[offset] = gpr8; 1108 VCPU_VSX_VR(vcpu, index) = val.vval; 1109 } 1110 #endif /* CONFIG_ALTIVEC */ 1111 1112 #ifdef CONFIG_PPC_FPU 1113 static inline u64 sp_to_dp(u32 fprs) 1114 { 1115 u64 fprd; 1116 1117 preempt_disable(); 1118 enable_kernel_fp(); 1119 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs) 1120 : "fr0"); 1121 preempt_enable(); 1122 return fprd; 1123 } 1124 1125 static inline u32 dp_to_sp(u64 fprd) 1126 { 1127 u32 fprs; 1128 1129 preempt_disable(); 1130 enable_kernel_fp(); 1131 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd) 1132 : "fr0"); 1133 preempt_enable(); 1134 return fprs; 1135 } 1136 1137 #else 1138 #define sp_to_dp(x) (x) 1139 #define dp_to_sp(x) (x) 1140 #endif /* CONFIG_PPC_FPU */ 1141 1142 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) 1143 { 1144 struct kvm_run *run = vcpu->run; 1145 u64 gpr; 1146 1147 if (run->mmio.len > sizeof(gpr)) 1148 return; 1149 1150 if (!vcpu->arch.mmio_host_swabbed) { 1151 switch (run->mmio.len) { 1152 case 8: gpr = *(u64 *)run->mmio.data; break; 1153 case 4: gpr = *(u32 *)run->mmio.data; break; 1154 case 2: gpr = *(u16 *)run->mmio.data; break; 1155 case 1: gpr = *(u8 *)run->mmio.data; break; 1156 } 1157 } else { 1158 switch (run->mmio.len) { 1159 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; 1160 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; 1161 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; 1162 case 1: gpr = *(u8 *)run->mmio.data; break; 1163 } 1164 } 1165 1166 /* conversion between single and double precision */ 1167 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) 1168 gpr = sp_to_dp(gpr); 1169 1170 if (vcpu->arch.mmio_sign_extend) { 1171 switch (run->mmio.len) { 1172 #ifdef CONFIG_PPC64 1173 case 4: 1174 gpr = (s64)(s32)gpr; 1175 break; 1176 #endif 1177 case 2: 1178 gpr = (s64)(s16)gpr; 1179 break; 1180 case 1: 1181 gpr = (s64)(s8)gpr; 1182 break; 1183 } 1184 } 1185 1186 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 1187 case KVM_MMIO_REG_GPR: 1188 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 1189 break; 1190 case KVM_MMIO_REG_FPR: 1191 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1192 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); 1193 1194 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1195 break; 1196 #ifdef CONFIG_PPC_BOOK3S 1197 case KVM_MMIO_REG_QPR: 1198 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1199 break; 1200 case KVM_MMIO_REG_FQPR: 1201 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1202 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1203 break; 1204 #endif 1205 #ifdef CONFIG_VSX 1206 case KVM_MMIO_REG_VSX: 1207 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1208 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); 1209 1210 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) 1211 kvmppc_set_vsr_dword(vcpu, gpr); 1212 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) 1213 kvmppc_set_vsr_word(vcpu, gpr); 1214 else if (vcpu->arch.mmio_copy_type == 1215 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) 1216 kvmppc_set_vsr_dword_dump(vcpu, gpr); 1217 else if (vcpu->arch.mmio_copy_type == 1218 KVMPPC_VSX_COPY_WORD_LOAD_DUMP) 1219 kvmppc_set_vsr_word_dump(vcpu, gpr); 1220 break; 1221 #endif 1222 #ifdef CONFIG_ALTIVEC 1223 case KVM_MMIO_REG_VMX: 1224 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1225 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); 1226 1227 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) 1228 kvmppc_set_vmx_dword(vcpu, gpr); 1229 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) 1230 kvmppc_set_vmx_word(vcpu, gpr); 1231 else if (vcpu->arch.mmio_copy_type == 1232 KVMPPC_VMX_COPY_HWORD) 1233 kvmppc_set_vmx_hword(vcpu, gpr); 1234 else if (vcpu->arch.mmio_copy_type == 1235 KVMPPC_VMX_COPY_BYTE) 1236 kvmppc_set_vmx_byte(vcpu, gpr); 1237 break; 1238 #endif 1239 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1240 case KVM_MMIO_REG_NESTED_GPR: 1241 if (kvmppc_need_byteswap(vcpu)) 1242 gpr = swab64(gpr); 1243 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, 1244 sizeof(gpr)); 1245 break; 1246 #endif 1247 default: 1248 BUG(); 1249 } 1250 } 1251 1252 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, 1253 unsigned int rt, unsigned int bytes, 1254 int is_default_endian, int sign_extend) 1255 { 1256 struct kvm_run *run = vcpu->run; 1257 int idx, ret; 1258 bool host_swabbed; 1259 1260 /* Pity C doesn't have a logical XOR operator */ 1261 if (kvmppc_need_byteswap(vcpu)) { 1262 host_swabbed = is_default_endian; 1263 } else { 1264 host_swabbed = !is_default_endian; 1265 } 1266 1267 if (bytes > sizeof(run->mmio.data)) 1268 return EMULATE_FAIL; 1269 1270 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1271 run->mmio.len = bytes; 1272 run->mmio.is_write = 0; 1273 1274 vcpu->arch.io_gpr = rt; 1275 vcpu->arch.mmio_host_swabbed = host_swabbed; 1276 vcpu->mmio_needed = 1; 1277 vcpu->mmio_is_write = 0; 1278 vcpu->arch.mmio_sign_extend = sign_extend; 1279 1280 idx = srcu_read_lock(&vcpu->kvm->srcu); 1281 1282 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1283 bytes, &run->mmio.data); 1284 1285 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1286 1287 if (!ret) { 1288 kvmppc_complete_mmio_load(vcpu); 1289 vcpu->mmio_needed = 0; 1290 return EMULATE_DONE; 1291 } 1292 1293 return EMULATE_DO_MMIO; 1294 } 1295 1296 int kvmppc_handle_load(struct kvm_vcpu *vcpu, 1297 unsigned int rt, unsigned int bytes, 1298 int is_default_endian) 1299 { 1300 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); 1301 } 1302 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 1303 1304 /* Same as above, but sign extends */ 1305 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, 1306 unsigned int rt, unsigned int bytes, 1307 int is_default_endian) 1308 { 1309 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); 1310 } 1311 1312 #ifdef CONFIG_VSX 1313 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, 1314 unsigned int rt, unsigned int bytes, 1315 int is_default_endian, int mmio_sign_extend) 1316 { 1317 enum emulation_result emulated = EMULATE_DONE; 1318 1319 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1320 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1321 return EMULATE_FAIL; 1322 1323 while (vcpu->arch.mmio_vsx_copy_nums) { 1324 emulated = __kvmppc_handle_load(vcpu, rt, bytes, 1325 is_default_endian, mmio_sign_extend); 1326 1327 if (emulated != EMULATE_DONE) 1328 break; 1329 1330 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1331 1332 vcpu->arch.mmio_vsx_copy_nums--; 1333 vcpu->arch.mmio_vsx_offset++; 1334 } 1335 return emulated; 1336 } 1337 #endif /* CONFIG_VSX */ 1338 1339 int kvmppc_handle_store(struct kvm_vcpu *vcpu, 1340 u64 val, unsigned int bytes, int is_default_endian) 1341 { 1342 struct kvm_run *run = vcpu->run; 1343 void *data = run->mmio.data; 1344 int idx, ret; 1345 bool host_swabbed; 1346 1347 /* Pity C doesn't have a logical XOR operator */ 1348 if (kvmppc_need_byteswap(vcpu)) { 1349 host_swabbed = is_default_endian; 1350 } else { 1351 host_swabbed = !is_default_endian; 1352 } 1353 1354 if (bytes > sizeof(run->mmio.data)) 1355 return EMULATE_FAIL; 1356 1357 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1358 run->mmio.len = bytes; 1359 run->mmio.is_write = 1; 1360 vcpu->mmio_needed = 1; 1361 vcpu->mmio_is_write = 1; 1362 1363 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) 1364 val = dp_to_sp(val); 1365 1366 /* Store the value at the lowest bytes in 'data'. */ 1367 if (!host_swabbed) { 1368 switch (bytes) { 1369 case 8: *(u64 *)data = val; break; 1370 case 4: *(u32 *)data = val; break; 1371 case 2: *(u16 *)data = val; break; 1372 case 1: *(u8 *)data = val; break; 1373 } 1374 } else { 1375 switch (bytes) { 1376 case 8: *(u64 *)data = swab64(val); break; 1377 case 4: *(u32 *)data = swab32(val); break; 1378 case 2: *(u16 *)data = swab16(val); break; 1379 case 1: *(u8 *)data = val; break; 1380 } 1381 } 1382 1383 idx = srcu_read_lock(&vcpu->kvm->srcu); 1384 1385 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1386 bytes, &run->mmio.data); 1387 1388 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1389 1390 if (!ret) { 1391 vcpu->mmio_needed = 0; 1392 return EMULATE_DONE; 1393 } 1394 1395 return EMULATE_DO_MMIO; 1396 } 1397 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 1398 1399 #ifdef CONFIG_VSX 1400 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) 1401 { 1402 u32 dword_offset, word_offset; 1403 union kvmppc_one_reg reg; 1404 int vsx_offset = 0; 1405 int copy_type = vcpu->arch.mmio_copy_type; 1406 int result = 0; 1407 1408 switch (copy_type) { 1409 case KVMPPC_VSX_COPY_DWORD: 1410 vsx_offset = 1411 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 1412 1413 if (vsx_offset == -1) { 1414 result = -1; 1415 break; 1416 } 1417 1418 if (rs < 32) { 1419 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); 1420 } else { 1421 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); 1422 *val = reg.vsxval[vsx_offset]; 1423 } 1424 break; 1425 1426 case KVMPPC_VSX_COPY_WORD: 1427 vsx_offset = 1428 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 1429 1430 if (vsx_offset == -1) { 1431 result = -1; 1432 break; 1433 } 1434 1435 if (rs < 32) { 1436 dword_offset = vsx_offset / 2; 1437 word_offset = vsx_offset % 2; 1438 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); 1439 *val = reg.vsx32val[word_offset]; 1440 } else { 1441 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); 1442 *val = reg.vsx32val[vsx_offset]; 1443 } 1444 break; 1445 1446 default: 1447 result = -1; 1448 break; 1449 } 1450 1451 return result; 1452 } 1453 1454 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, 1455 int rs, unsigned int bytes, int is_default_endian) 1456 { 1457 u64 val; 1458 enum emulation_result emulated = EMULATE_DONE; 1459 1460 vcpu->arch.io_gpr = rs; 1461 1462 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1463 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1464 return EMULATE_FAIL; 1465 1466 while (vcpu->arch.mmio_vsx_copy_nums) { 1467 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) 1468 return EMULATE_FAIL; 1469 1470 emulated = kvmppc_handle_store(vcpu, 1471 val, bytes, is_default_endian); 1472 1473 if (emulated != EMULATE_DONE) 1474 break; 1475 1476 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1477 1478 vcpu->arch.mmio_vsx_copy_nums--; 1479 vcpu->arch.mmio_vsx_offset++; 1480 } 1481 1482 return emulated; 1483 } 1484 1485 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) 1486 { 1487 struct kvm_run *run = vcpu->run; 1488 enum emulation_result emulated = EMULATE_FAIL; 1489 int r; 1490 1491 vcpu->arch.paddr_accessed += run->mmio.len; 1492 1493 if (!vcpu->mmio_is_write) { 1494 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, 1495 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); 1496 } else { 1497 emulated = kvmppc_handle_vsx_store(vcpu, 1498 vcpu->arch.io_gpr, run->mmio.len, 1); 1499 } 1500 1501 switch (emulated) { 1502 case EMULATE_DO_MMIO: 1503 run->exit_reason = KVM_EXIT_MMIO; 1504 r = RESUME_HOST; 1505 break; 1506 case EMULATE_FAIL: 1507 pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); 1508 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1509 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1510 r = RESUME_HOST; 1511 break; 1512 default: 1513 r = RESUME_GUEST; 1514 break; 1515 } 1516 return r; 1517 } 1518 #endif /* CONFIG_VSX */ 1519 1520 #ifdef CONFIG_ALTIVEC 1521 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, 1522 unsigned int rt, unsigned int bytes, int is_default_endian) 1523 { 1524 enum emulation_result emulated = EMULATE_DONE; 1525 1526 if (vcpu->arch.mmio_vmx_copy_nums > 2) 1527 return EMULATE_FAIL; 1528 1529 while (vcpu->arch.mmio_vmx_copy_nums) { 1530 emulated = __kvmppc_handle_load(vcpu, rt, bytes, 1531 is_default_endian, 0); 1532 1533 if (emulated != EMULATE_DONE) 1534 break; 1535 1536 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1537 vcpu->arch.mmio_vmx_copy_nums--; 1538 vcpu->arch.mmio_vmx_offset++; 1539 } 1540 1541 return emulated; 1542 } 1543 1544 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) 1545 { 1546 union kvmppc_one_reg reg; 1547 int vmx_offset = 0; 1548 int result = 0; 1549 1550 vmx_offset = 1551 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1552 1553 if (vmx_offset == -1) 1554 return -1; 1555 1556 reg.vval = VCPU_VSX_VR(vcpu, index); 1557 *val = reg.vsxval[vmx_offset]; 1558 1559 return result; 1560 } 1561 1562 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) 1563 { 1564 union kvmppc_one_reg reg; 1565 int vmx_offset = 0; 1566 int result = 0; 1567 1568 vmx_offset = 1569 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1570 1571 if (vmx_offset == -1) 1572 return -1; 1573 1574 reg.vval = VCPU_VSX_VR(vcpu, index); 1575 *val = reg.vsx32val[vmx_offset]; 1576 1577 return result; 1578 } 1579 1580 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) 1581 { 1582 union kvmppc_one_reg reg; 1583 int vmx_offset = 0; 1584 int result = 0; 1585 1586 vmx_offset = 1587 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1588 1589 if (vmx_offset == -1) 1590 return -1; 1591 1592 reg.vval = VCPU_VSX_VR(vcpu, index); 1593 *val = reg.vsx16val[vmx_offset]; 1594 1595 return result; 1596 } 1597 1598 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) 1599 { 1600 union kvmppc_one_reg reg; 1601 int vmx_offset = 0; 1602 int result = 0; 1603 1604 vmx_offset = 1605 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1606 1607 if (vmx_offset == -1) 1608 return -1; 1609 1610 reg.vval = VCPU_VSX_VR(vcpu, index); 1611 *val = reg.vsx8val[vmx_offset]; 1612 1613 return result; 1614 } 1615 1616 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, 1617 unsigned int rs, unsigned int bytes, int is_default_endian) 1618 { 1619 u64 val = 0; 1620 unsigned int index = rs & KVM_MMIO_REG_MASK; 1621 enum emulation_result emulated = EMULATE_DONE; 1622 1623 if (vcpu->arch.mmio_vmx_copy_nums > 2) 1624 return EMULATE_FAIL; 1625 1626 vcpu->arch.io_gpr = rs; 1627 1628 while (vcpu->arch.mmio_vmx_copy_nums) { 1629 switch (vcpu->arch.mmio_copy_type) { 1630 case KVMPPC_VMX_COPY_DWORD: 1631 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) 1632 return EMULATE_FAIL; 1633 1634 break; 1635 case KVMPPC_VMX_COPY_WORD: 1636 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) 1637 return EMULATE_FAIL; 1638 break; 1639 case KVMPPC_VMX_COPY_HWORD: 1640 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) 1641 return EMULATE_FAIL; 1642 break; 1643 case KVMPPC_VMX_COPY_BYTE: 1644 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) 1645 return EMULATE_FAIL; 1646 break; 1647 default: 1648 return EMULATE_FAIL; 1649 } 1650 1651 emulated = kvmppc_handle_store(vcpu, val, bytes, 1652 is_default_endian); 1653 if (emulated != EMULATE_DONE) 1654 break; 1655 1656 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1657 vcpu->arch.mmio_vmx_copy_nums--; 1658 vcpu->arch.mmio_vmx_offset++; 1659 } 1660 1661 return emulated; 1662 } 1663 1664 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) 1665 { 1666 struct kvm_run *run = vcpu->run; 1667 enum emulation_result emulated = EMULATE_FAIL; 1668 int r; 1669 1670 vcpu->arch.paddr_accessed += run->mmio.len; 1671 1672 if (!vcpu->mmio_is_write) { 1673 emulated = kvmppc_handle_vmx_load(vcpu, 1674 vcpu->arch.io_gpr, run->mmio.len, 1); 1675 } else { 1676 emulated = kvmppc_handle_vmx_store(vcpu, 1677 vcpu->arch.io_gpr, run->mmio.len, 1); 1678 } 1679 1680 switch (emulated) { 1681 case EMULATE_DO_MMIO: 1682 run->exit_reason = KVM_EXIT_MMIO; 1683 r = RESUME_HOST; 1684 break; 1685 case EMULATE_FAIL: 1686 pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); 1687 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1688 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1689 r = RESUME_HOST; 1690 break; 1691 default: 1692 r = RESUME_GUEST; 1693 break; 1694 } 1695 return r; 1696 } 1697 #endif /* CONFIG_ALTIVEC */ 1698 1699 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1700 { 1701 int r = 0; 1702 union kvmppc_one_reg val; 1703 int size; 1704 1705 size = one_reg_size(reg->id); 1706 if (size > sizeof(val)) 1707 return -EINVAL; 1708 1709 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1710 if (r == -EINVAL) { 1711 r = 0; 1712 switch (reg->id) { 1713 #ifdef CONFIG_ALTIVEC 1714 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1715 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1716 r = -ENXIO; 1717 break; 1718 } 1719 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 1720 break; 1721 case KVM_REG_PPC_VSCR: 1722 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1723 r = -ENXIO; 1724 break; 1725 } 1726 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 1727 break; 1728 case KVM_REG_PPC_VRSAVE: 1729 val = get_reg_val(reg->id, vcpu->arch.vrsave); 1730 break; 1731 #endif /* CONFIG_ALTIVEC */ 1732 default: 1733 r = -EINVAL; 1734 break; 1735 } 1736 } 1737 1738 if (r) 1739 return r; 1740 1741 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 1742 r = -EFAULT; 1743 1744 return r; 1745 } 1746 1747 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1748 { 1749 int r; 1750 union kvmppc_one_reg val; 1751 int size; 1752 1753 size = one_reg_size(reg->id); 1754 if (size > sizeof(val)) 1755 return -EINVAL; 1756 1757 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 1758 return -EFAULT; 1759 1760 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1761 if (r == -EINVAL) { 1762 r = 0; 1763 switch (reg->id) { 1764 #ifdef CONFIG_ALTIVEC 1765 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1766 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1767 r = -ENXIO; 1768 break; 1769 } 1770 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 1771 break; 1772 case KVM_REG_PPC_VSCR: 1773 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1774 r = -ENXIO; 1775 break; 1776 } 1777 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 1778 break; 1779 case KVM_REG_PPC_VRSAVE: 1780 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1781 r = -ENXIO; 1782 break; 1783 } 1784 vcpu->arch.vrsave = set_reg_val(reg->id, val); 1785 break; 1786 #endif /* CONFIG_ALTIVEC */ 1787 default: 1788 r = -EINVAL; 1789 break; 1790 } 1791 } 1792 1793 return r; 1794 } 1795 1796 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 1797 { 1798 struct kvm_run *run = vcpu->run; 1799 int r; 1800 1801 vcpu_load(vcpu); 1802 1803 if (vcpu->mmio_needed) { 1804 vcpu->mmio_needed = 0; 1805 if (!vcpu->mmio_is_write) 1806 kvmppc_complete_mmio_load(vcpu); 1807 #ifdef CONFIG_VSX 1808 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1809 vcpu->arch.mmio_vsx_copy_nums--; 1810 vcpu->arch.mmio_vsx_offset++; 1811 } 1812 1813 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1814 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); 1815 if (r == RESUME_HOST) { 1816 vcpu->mmio_needed = 1; 1817 goto out; 1818 } 1819 } 1820 #endif 1821 #ifdef CONFIG_ALTIVEC 1822 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1823 vcpu->arch.mmio_vmx_copy_nums--; 1824 vcpu->arch.mmio_vmx_offset++; 1825 } 1826 1827 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1828 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); 1829 if (r == RESUME_HOST) { 1830 vcpu->mmio_needed = 1; 1831 goto out; 1832 } 1833 } 1834 #endif 1835 } else if (vcpu->arch.osi_needed) { 1836 u64 *gprs = run->osi.gprs; 1837 int i; 1838 1839 for (i = 0; i < 32; i++) 1840 kvmppc_set_gpr(vcpu, i, gprs[i]); 1841 vcpu->arch.osi_needed = 0; 1842 } else if (vcpu->arch.hcall_needed) { 1843 int i; 1844 1845 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 1846 for (i = 0; i < 9; ++i) 1847 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 1848 vcpu->arch.hcall_needed = 0; 1849 #ifdef CONFIG_BOOKE 1850 } else if (vcpu->arch.epr_needed) { 1851 kvmppc_set_epr(vcpu, run->epr.epr); 1852 vcpu->arch.epr_needed = 0; 1853 #endif 1854 } 1855 1856 kvm_sigset_activate(vcpu); 1857 1858 if (run->immediate_exit) 1859 r = -EINTR; 1860 else 1861 r = kvmppc_vcpu_run(vcpu); 1862 1863 kvm_sigset_deactivate(vcpu); 1864 1865 #ifdef CONFIG_ALTIVEC 1866 out: 1867 #endif 1868 1869 /* 1870 * We're already returning to userspace, don't pass the 1871 * RESUME_HOST flags along. 1872 */ 1873 if (r > 0) 1874 r = 0; 1875 1876 vcpu_put(vcpu); 1877 return r; 1878 } 1879 1880 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 1881 { 1882 if (irq->irq == KVM_INTERRUPT_UNSET) { 1883 kvmppc_core_dequeue_external(vcpu); 1884 return 0; 1885 } 1886 1887 kvmppc_core_queue_external(vcpu, irq); 1888 1889 kvm_vcpu_kick(vcpu); 1890 1891 return 0; 1892 } 1893 1894 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1895 struct kvm_enable_cap *cap) 1896 { 1897 int r; 1898 1899 if (cap->flags) 1900 return -EINVAL; 1901 1902 switch (cap->cap) { 1903 case KVM_CAP_PPC_OSI: 1904 r = 0; 1905 vcpu->arch.osi_enabled = true; 1906 break; 1907 case KVM_CAP_PPC_PAPR: 1908 r = 0; 1909 vcpu->arch.papr_enabled = true; 1910 break; 1911 case KVM_CAP_PPC_EPR: 1912 r = 0; 1913 if (cap->args[0]) 1914 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 1915 else 1916 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 1917 break; 1918 #ifdef CONFIG_BOOKE 1919 case KVM_CAP_PPC_BOOKE_WATCHDOG: 1920 r = 0; 1921 vcpu->arch.watchdog_enabled = true; 1922 break; 1923 #endif 1924 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1925 case KVM_CAP_SW_TLB: { 1926 struct kvm_config_tlb cfg; 1927 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 1928 1929 r = -EFAULT; 1930 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 1931 break; 1932 1933 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 1934 break; 1935 } 1936 #endif 1937 #ifdef CONFIG_KVM_MPIC 1938 case KVM_CAP_IRQ_MPIC: { 1939 struct fd f; 1940 struct kvm_device *dev; 1941 1942 r = -EBADF; 1943 f = fdget(cap->args[0]); 1944 if (!f.file) 1945 break; 1946 1947 r = -EPERM; 1948 dev = kvm_device_from_filp(f.file); 1949 if (dev) 1950 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 1951 1952 fdput(f); 1953 break; 1954 } 1955 #endif 1956 #ifdef CONFIG_KVM_XICS 1957 case KVM_CAP_IRQ_XICS: { 1958 struct fd f; 1959 struct kvm_device *dev; 1960 1961 r = -EBADF; 1962 f = fdget(cap->args[0]); 1963 if (!f.file) 1964 break; 1965 1966 r = -EPERM; 1967 dev = kvm_device_from_filp(f.file); 1968 if (dev) { 1969 if (xics_on_xive()) 1970 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); 1971 else 1972 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 1973 } 1974 1975 fdput(f); 1976 break; 1977 } 1978 #endif /* CONFIG_KVM_XICS */ 1979 #ifdef CONFIG_KVM_XIVE 1980 case KVM_CAP_PPC_IRQ_XIVE: { 1981 struct fd f; 1982 struct kvm_device *dev; 1983 1984 r = -EBADF; 1985 f = fdget(cap->args[0]); 1986 if (!f.file) 1987 break; 1988 1989 r = -ENXIO; 1990 if (!xive_enabled()) 1991 break; 1992 1993 r = -EPERM; 1994 dev = kvm_device_from_filp(f.file); 1995 if (dev) 1996 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, 1997 cap->args[1]); 1998 1999 fdput(f); 2000 break; 2001 } 2002 #endif /* CONFIG_KVM_XIVE */ 2003 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2004 case KVM_CAP_PPC_FWNMI: 2005 r = -EINVAL; 2006 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 2007 break; 2008 r = 0; 2009 vcpu->kvm->arch.fwnmi_enabled = true; 2010 break; 2011 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 2012 default: 2013 r = -EINVAL; 2014 break; 2015 } 2016 2017 if (!r) 2018 r = kvmppc_sanity_check(vcpu); 2019 2020 return r; 2021 } 2022 2023 bool kvm_arch_intc_initialized(struct kvm *kvm) 2024 { 2025 #ifdef CONFIG_KVM_MPIC 2026 if (kvm->arch.mpic) 2027 return true; 2028 #endif 2029 #ifdef CONFIG_KVM_XICS 2030 if (kvm->arch.xics || kvm->arch.xive) 2031 return true; 2032 #endif 2033 return false; 2034 } 2035 2036 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 2037 struct kvm_mp_state *mp_state) 2038 { 2039 return -EINVAL; 2040 } 2041 2042 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 2043 struct kvm_mp_state *mp_state) 2044 { 2045 return -EINVAL; 2046 } 2047 2048 long kvm_arch_vcpu_async_ioctl(struct file *filp, 2049 unsigned int ioctl, unsigned long arg) 2050 { 2051 struct kvm_vcpu *vcpu = filp->private_data; 2052 void __user *argp = (void __user *)arg; 2053 2054 if (ioctl == KVM_INTERRUPT) { 2055 struct kvm_interrupt irq; 2056 if (copy_from_user(&irq, argp, sizeof(irq))) 2057 return -EFAULT; 2058 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); 2059 } 2060 return -ENOIOCTLCMD; 2061 } 2062 2063 long kvm_arch_vcpu_ioctl(struct file *filp, 2064 unsigned int ioctl, unsigned long arg) 2065 { 2066 struct kvm_vcpu *vcpu = filp->private_data; 2067 void __user *argp = (void __user *)arg; 2068 long r; 2069 2070 switch (ioctl) { 2071 case KVM_ENABLE_CAP: 2072 { 2073 struct kvm_enable_cap cap; 2074 r = -EFAULT; 2075 if (copy_from_user(&cap, argp, sizeof(cap))) 2076 goto out; 2077 vcpu_load(vcpu); 2078 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2079 vcpu_put(vcpu); 2080 break; 2081 } 2082 2083 case KVM_SET_ONE_REG: 2084 case KVM_GET_ONE_REG: 2085 { 2086 struct kvm_one_reg reg; 2087 r = -EFAULT; 2088 if (copy_from_user(®, argp, sizeof(reg))) 2089 goto out; 2090 if (ioctl == KVM_SET_ONE_REG) 2091 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 2092 else 2093 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 2094 break; 2095 } 2096 2097 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 2098 case KVM_DIRTY_TLB: { 2099 struct kvm_dirty_tlb dirty; 2100 r = -EFAULT; 2101 if (copy_from_user(&dirty, argp, sizeof(dirty))) 2102 goto out; 2103 vcpu_load(vcpu); 2104 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 2105 vcpu_put(vcpu); 2106 break; 2107 } 2108 #endif 2109 default: 2110 r = -EINVAL; 2111 } 2112 2113 out: 2114 return r; 2115 } 2116 2117 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 2118 { 2119 return VM_FAULT_SIGBUS; 2120 } 2121 2122 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 2123 { 2124 u32 inst_nop = 0x60000000; 2125 #ifdef CONFIG_KVM_BOOKE_HV 2126 u32 inst_sc1 = 0x44000022; 2127 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); 2128 pvinfo->hcall[1] = cpu_to_be32(inst_nop); 2129 pvinfo->hcall[2] = cpu_to_be32(inst_nop); 2130 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2131 #else 2132 u32 inst_lis = 0x3c000000; 2133 u32 inst_ori = 0x60000000; 2134 u32 inst_sc = 0x44000002; 2135 u32 inst_imm_mask = 0xffff; 2136 2137 /* 2138 * The hypercall to get into KVM from within guest context is as 2139 * follows: 2140 * 2141 * lis r0, r0, KVM_SC_MAGIC_R0@h 2142 * ori r0, KVM_SC_MAGIC_R0@l 2143 * sc 2144 * nop 2145 */ 2146 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); 2147 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); 2148 pvinfo->hcall[2] = cpu_to_be32(inst_sc); 2149 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2150 #endif 2151 2152 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 2153 2154 return 0; 2155 } 2156 2157 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) 2158 { 2159 int ret = 0; 2160 2161 #ifdef CONFIG_KVM_MPIC 2162 ret = ret || (kvm->arch.mpic != NULL); 2163 #endif 2164 #ifdef CONFIG_KVM_XICS 2165 ret = ret || (kvm->arch.xics != NULL); 2166 ret = ret || (kvm->arch.xive != NULL); 2167 #endif 2168 smp_rmb(); 2169 return ret; 2170 } 2171 2172 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 2173 bool line_status) 2174 { 2175 if (!kvm_arch_irqchip_in_kernel(kvm)) 2176 return -ENXIO; 2177 2178 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 2179 irq_event->irq, irq_event->level, 2180 line_status); 2181 return 0; 2182 } 2183 2184 2185 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 2186 struct kvm_enable_cap *cap) 2187 { 2188 int r; 2189 2190 if (cap->flags) 2191 return -EINVAL; 2192 2193 switch (cap->cap) { 2194 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2195 case KVM_CAP_PPC_ENABLE_HCALL: { 2196 unsigned long hcall = cap->args[0]; 2197 2198 r = -EINVAL; 2199 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || 2200 cap->args[1] > 1) 2201 break; 2202 if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) 2203 break; 2204 if (cap->args[1]) 2205 set_bit(hcall / 4, kvm->arch.enabled_hcalls); 2206 else 2207 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); 2208 r = 0; 2209 break; 2210 } 2211 case KVM_CAP_PPC_SMT: { 2212 unsigned long mode = cap->args[0]; 2213 unsigned long flags = cap->args[1]; 2214 2215 r = -EINVAL; 2216 if (kvm->arch.kvm_ops->set_smt_mode) 2217 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); 2218 break; 2219 } 2220 2221 case KVM_CAP_PPC_NESTED_HV: 2222 r = -EINVAL; 2223 if (!is_kvmppc_hv_enabled(kvm) || 2224 !kvm->arch.kvm_ops->enable_nested) 2225 break; 2226 r = kvm->arch.kvm_ops->enable_nested(kvm); 2227 break; 2228 #endif 2229 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 2230 case KVM_CAP_PPC_SECURE_GUEST: 2231 r = -EINVAL; 2232 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) 2233 break; 2234 r = kvm->arch.kvm_ops->enable_svm(kvm); 2235 break; 2236 case KVM_CAP_PPC_DAWR1: 2237 r = -EINVAL; 2238 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) 2239 break; 2240 r = kvm->arch.kvm_ops->enable_dawr1(kvm); 2241 break; 2242 #endif 2243 default: 2244 r = -EINVAL; 2245 break; 2246 } 2247 2248 return r; 2249 } 2250 2251 #ifdef CONFIG_PPC_BOOK3S_64 2252 /* 2253 * These functions check whether the underlying hardware is safe 2254 * against attacks based on observing the effects of speculatively 2255 * executed instructions, and whether it supplies instructions for 2256 * use in workarounds. The information comes from firmware, either 2257 * via the device tree on powernv platforms or from an hcall on 2258 * pseries platforms. 2259 */ 2260 #ifdef CONFIG_PPC_PSERIES 2261 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2262 { 2263 struct h_cpu_char_result c; 2264 unsigned long rc; 2265 2266 if (!machine_is(pseries)) 2267 return -ENOTTY; 2268 2269 rc = plpar_get_cpu_characteristics(&c); 2270 if (rc == H_SUCCESS) { 2271 cp->character = c.character; 2272 cp->behaviour = c.behaviour; 2273 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2274 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2275 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2276 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2277 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2278 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | 2279 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | 2280 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | 2281 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; 2282 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2283 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2284 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | 2285 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; 2286 } 2287 return 0; 2288 } 2289 #else 2290 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2291 { 2292 return -ENOTTY; 2293 } 2294 #endif 2295 2296 static inline bool have_fw_feat(struct device_node *fw_features, 2297 const char *state, const char *name) 2298 { 2299 struct device_node *np; 2300 bool r = false; 2301 2302 np = of_get_child_by_name(fw_features, name); 2303 if (np) { 2304 r = of_property_read_bool(np, state); 2305 of_node_put(np); 2306 } 2307 return r; 2308 } 2309 2310 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2311 { 2312 struct device_node *np, *fw_features; 2313 int r; 2314 2315 memset(cp, 0, sizeof(*cp)); 2316 r = pseries_get_cpu_char(cp); 2317 if (r != -ENOTTY) 2318 return r; 2319 2320 np = of_find_node_by_name(NULL, "ibm,opal"); 2321 if (np) { 2322 fw_features = of_get_child_by_name(np, "fw-features"); 2323 of_node_put(np); 2324 if (!fw_features) 2325 return 0; 2326 if (have_fw_feat(fw_features, "enabled", 2327 "inst-spec-barrier-ori31,31,0")) 2328 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; 2329 if (have_fw_feat(fw_features, "enabled", 2330 "fw-bcctrl-serialized")) 2331 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; 2332 if (have_fw_feat(fw_features, "enabled", 2333 "inst-l1d-flush-ori30,30,0")) 2334 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; 2335 if (have_fw_feat(fw_features, "enabled", 2336 "inst-l1d-flush-trig2")) 2337 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; 2338 if (have_fw_feat(fw_features, "enabled", 2339 "fw-l1d-thread-split")) 2340 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; 2341 if (have_fw_feat(fw_features, "enabled", 2342 "fw-count-cache-disabled")) 2343 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2344 if (have_fw_feat(fw_features, "enabled", 2345 "fw-count-cache-flush-bcctr2,0,0")) 2346 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; 2347 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2348 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2349 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2350 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2351 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2352 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | 2353 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; 2354 2355 if (have_fw_feat(fw_features, "enabled", 2356 "speculation-policy-favor-security")) 2357 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; 2358 if (!have_fw_feat(fw_features, "disabled", 2359 "needs-l1d-flush-msr-pr-0-to-1")) 2360 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; 2361 if (!have_fw_feat(fw_features, "disabled", 2362 "needs-spec-barrier-for-bound-checks")) 2363 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2364 if (have_fw_feat(fw_features, "enabled", 2365 "needs-count-cache-flush-on-context-switch")) 2366 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; 2367 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2368 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2369 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | 2370 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; 2371 2372 of_node_put(fw_features); 2373 } 2374 2375 return 0; 2376 } 2377 #endif 2378 2379 long kvm_arch_vm_ioctl(struct file *filp, 2380 unsigned int ioctl, unsigned long arg) 2381 { 2382 struct kvm *kvm __maybe_unused = filp->private_data; 2383 void __user *argp = (void __user *)arg; 2384 long r; 2385 2386 switch (ioctl) { 2387 case KVM_PPC_GET_PVINFO: { 2388 struct kvm_ppc_pvinfo pvinfo; 2389 memset(&pvinfo, 0, sizeof(pvinfo)); 2390 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 2391 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 2392 r = -EFAULT; 2393 goto out; 2394 } 2395 2396 break; 2397 } 2398 #ifdef CONFIG_SPAPR_TCE_IOMMU 2399 case KVM_CREATE_SPAPR_TCE_64: { 2400 struct kvm_create_spapr_tce_64 create_tce_64; 2401 2402 r = -EFAULT; 2403 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) 2404 goto out; 2405 if (create_tce_64.flags) { 2406 r = -EINVAL; 2407 goto out; 2408 } 2409 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2410 goto out; 2411 } 2412 case KVM_CREATE_SPAPR_TCE: { 2413 struct kvm_create_spapr_tce create_tce; 2414 struct kvm_create_spapr_tce_64 create_tce_64; 2415 2416 r = -EFAULT; 2417 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 2418 goto out; 2419 2420 create_tce_64.liobn = create_tce.liobn; 2421 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; 2422 create_tce_64.offset = 0; 2423 create_tce_64.size = create_tce.window_size >> 2424 IOMMU_PAGE_SHIFT_4K; 2425 create_tce_64.flags = 0; 2426 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2427 goto out; 2428 } 2429 #endif 2430 #ifdef CONFIG_PPC_BOOK3S_64 2431 case KVM_PPC_GET_SMMU_INFO: { 2432 struct kvm_ppc_smmu_info info; 2433 struct kvm *kvm = filp->private_data; 2434 2435 memset(&info, 0, sizeof(info)); 2436 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 2437 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2438 r = -EFAULT; 2439 break; 2440 } 2441 case KVM_PPC_RTAS_DEFINE_TOKEN: { 2442 struct kvm *kvm = filp->private_data; 2443 2444 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 2445 break; 2446 } 2447 case KVM_PPC_CONFIGURE_V3_MMU: { 2448 struct kvm *kvm = filp->private_data; 2449 struct kvm_ppc_mmuv3_cfg cfg; 2450 2451 r = -EINVAL; 2452 if (!kvm->arch.kvm_ops->configure_mmu) 2453 goto out; 2454 r = -EFAULT; 2455 if (copy_from_user(&cfg, argp, sizeof(cfg))) 2456 goto out; 2457 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); 2458 break; 2459 } 2460 case KVM_PPC_GET_RMMU_INFO: { 2461 struct kvm *kvm = filp->private_data; 2462 struct kvm_ppc_rmmu_info info; 2463 2464 r = -EINVAL; 2465 if (!kvm->arch.kvm_ops->get_rmmu_info) 2466 goto out; 2467 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); 2468 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2469 r = -EFAULT; 2470 break; 2471 } 2472 case KVM_PPC_GET_CPU_CHAR: { 2473 struct kvm_ppc_cpu_char cpuchar; 2474 2475 r = kvmppc_get_cpu_char(&cpuchar); 2476 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) 2477 r = -EFAULT; 2478 break; 2479 } 2480 case KVM_PPC_SVM_OFF: { 2481 struct kvm *kvm = filp->private_data; 2482 2483 r = 0; 2484 if (!kvm->arch.kvm_ops->svm_off) 2485 goto out; 2486 2487 r = kvm->arch.kvm_ops->svm_off(kvm); 2488 break; 2489 } 2490 default: { 2491 struct kvm *kvm = filp->private_data; 2492 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 2493 } 2494 #else /* CONFIG_PPC_BOOK3S_64 */ 2495 default: 2496 r = -ENOTTY; 2497 #endif 2498 } 2499 out: 2500 return r; 2501 } 2502 2503 static DEFINE_IDA(lpid_inuse); 2504 static unsigned long nr_lpids; 2505 2506 long kvmppc_alloc_lpid(void) 2507 { 2508 int lpid; 2509 2510 /* The host LPID must always be 0 (allocation starts at 1) */ 2511 lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL); 2512 if (lpid < 0) { 2513 if (lpid == -ENOMEM) 2514 pr_err("%s: Out of memory\n", __func__); 2515 else 2516 pr_err("%s: No LPIDs free\n", __func__); 2517 return -ENOMEM; 2518 } 2519 2520 return lpid; 2521 } 2522 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 2523 2524 void kvmppc_free_lpid(long lpid) 2525 { 2526 ida_free(&lpid_inuse, lpid); 2527 } 2528 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 2529 2530 /* nr_lpids_param includes the host LPID */ 2531 void kvmppc_init_lpid(unsigned long nr_lpids_param) 2532 { 2533 nr_lpids = nr_lpids_param; 2534 } 2535 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 2536 2537 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); 2538 2539 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) 2540 { 2541 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) 2542 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); 2543 } 2544 2545 int kvm_arch_create_vm_debugfs(struct kvm *kvm) 2546 { 2547 if (kvm->arch.kvm_ops->create_vm_debugfs) 2548 kvm->arch.kvm_ops->create_vm_debugfs(kvm); 2549 return 0; 2550 } 2551