1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/sched/signal.h> 27 #include <linux/fs.h> 28 #include <linux/slab.h> 29 #include <linux/file.h> 30 #include <linux/module.h> 31 #include <linux/irqbypass.h> 32 #include <linux/kvm_irqfd.h> 33 #include <asm/cputable.h> 34 #include <linux/uaccess.h> 35 #include <asm/kvm_ppc.h> 36 #include <asm/tlbflush.h> 37 #include <asm/cputhreads.h> 38 #include <asm/irqflags.h> 39 #include <asm/iommu.h> 40 #include <asm/switch_to.h> 41 #include <asm/xive.h> 42 #ifdef CONFIG_PPC_PSERIES 43 #include <asm/hvcall.h> 44 #include <asm/plpar_wrappers.h> 45 #endif 46 47 #include "timing.h" 48 #include "irq.h" 49 #include "../mm/mmu_decl.h" 50 51 #define CREATE_TRACE_POINTS 52 #include "trace.h" 53 54 struct kvmppc_ops *kvmppc_hv_ops; 55 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 56 struct kvmppc_ops *kvmppc_pr_ops; 57 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 58 59 60 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 61 { 62 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); 63 } 64 65 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 66 { 67 return false; 68 } 69 70 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 71 { 72 return 1; 73 } 74 75 /* 76 * Common checks before entering the guest world. Call with interrupts 77 * disabled. 78 * 79 * returns: 80 * 81 * == 1 if we're ready to go into guest state 82 * <= 0 if we need to go back to the host with return value 83 */ 84 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 85 { 86 int r; 87 88 WARN_ON(irqs_disabled()); 89 hard_irq_disable(); 90 91 while (true) { 92 if (need_resched()) { 93 local_irq_enable(); 94 cond_resched(); 95 hard_irq_disable(); 96 continue; 97 } 98 99 if (signal_pending(current)) { 100 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 101 vcpu->run->exit_reason = KVM_EXIT_INTR; 102 r = -EINTR; 103 break; 104 } 105 106 vcpu->mode = IN_GUEST_MODE; 107 108 /* 109 * Reading vcpu->requests must happen after setting vcpu->mode, 110 * so we don't miss a request because the requester sees 111 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 112 * before next entering the guest (and thus doesn't IPI). 113 * This also orders the write to mode from any reads 114 * to the page tables done while the VCPU is running. 115 * Please see the comment in kvm_flush_remote_tlbs. 116 */ 117 smp_mb(); 118 119 if (kvm_request_pending(vcpu)) { 120 /* Make sure we process requests preemptable */ 121 local_irq_enable(); 122 trace_kvm_check_requests(vcpu); 123 r = kvmppc_core_check_requests(vcpu); 124 hard_irq_disable(); 125 if (r > 0) 126 continue; 127 break; 128 } 129 130 if (kvmppc_core_prepare_to_enter(vcpu)) { 131 /* interrupts got enabled in between, so we 132 are back at square 1 */ 133 continue; 134 } 135 136 guest_enter_irqoff(); 137 return 1; 138 } 139 140 /* return to host */ 141 local_irq_enable(); 142 return r; 143 } 144 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 145 146 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 147 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 148 { 149 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 150 int i; 151 152 shared->sprg0 = swab64(shared->sprg0); 153 shared->sprg1 = swab64(shared->sprg1); 154 shared->sprg2 = swab64(shared->sprg2); 155 shared->sprg3 = swab64(shared->sprg3); 156 shared->srr0 = swab64(shared->srr0); 157 shared->srr1 = swab64(shared->srr1); 158 shared->dar = swab64(shared->dar); 159 shared->msr = swab64(shared->msr); 160 shared->dsisr = swab32(shared->dsisr); 161 shared->int_pending = swab32(shared->int_pending); 162 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 163 shared->sr[i] = swab32(shared->sr[i]); 164 } 165 #endif 166 167 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 168 { 169 int nr = kvmppc_get_gpr(vcpu, 11); 170 int r; 171 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 172 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 173 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 174 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 175 unsigned long r2 = 0; 176 177 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 178 /* 32 bit mode */ 179 param1 &= 0xffffffff; 180 param2 &= 0xffffffff; 181 param3 &= 0xffffffff; 182 param4 &= 0xffffffff; 183 } 184 185 switch (nr) { 186 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 187 { 188 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 189 /* Book3S can be little endian, find it out here */ 190 int shared_big_endian = true; 191 if (vcpu->arch.intr_msr & MSR_LE) 192 shared_big_endian = false; 193 if (shared_big_endian != vcpu->arch.shared_big_endian) 194 kvmppc_swab_shared(vcpu); 195 vcpu->arch.shared_big_endian = shared_big_endian; 196 #endif 197 198 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { 199 /* 200 * Older versions of the Linux magic page code had 201 * a bug where they would map their trampoline code 202 * NX. If that's the case, remove !PR NX capability. 203 */ 204 vcpu->arch.disable_kernel_nx = true; 205 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 206 } 207 208 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 209 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 210 211 #ifdef CONFIG_PPC_64K_PAGES 212 /* 213 * Make sure our 4k magic page is in the same window of a 64k 214 * page within the guest and within the host's page. 215 */ 216 if ((vcpu->arch.magic_page_pa & 0xf000) != 217 ((ulong)vcpu->arch.shared & 0xf000)) { 218 void *old_shared = vcpu->arch.shared; 219 ulong shared = (ulong)vcpu->arch.shared; 220 void *new_shared; 221 222 shared &= PAGE_MASK; 223 shared |= vcpu->arch.magic_page_pa & 0xf000; 224 new_shared = (void*)shared; 225 memcpy(new_shared, old_shared, 0x1000); 226 vcpu->arch.shared = new_shared; 227 } 228 #endif 229 230 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 231 232 r = EV_SUCCESS; 233 break; 234 } 235 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 236 r = EV_SUCCESS; 237 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 238 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 239 #endif 240 241 /* Second return value is in r4 */ 242 break; 243 case EV_HCALL_TOKEN(EV_IDLE): 244 r = EV_SUCCESS; 245 kvm_vcpu_block(vcpu); 246 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 247 break; 248 default: 249 r = EV_UNIMPLEMENTED; 250 break; 251 } 252 253 kvmppc_set_gpr(vcpu, 4, r2); 254 255 return r; 256 } 257 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 258 259 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 260 { 261 int r = false; 262 263 /* We have to know what CPU to virtualize */ 264 if (!vcpu->arch.pvr) 265 goto out; 266 267 /* PAPR only works with book3s_64 */ 268 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 269 goto out; 270 271 /* HV KVM can only do PAPR mode for now */ 272 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 273 goto out; 274 275 #ifdef CONFIG_KVM_BOOKE_HV 276 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 277 goto out; 278 #endif 279 280 r = true; 281 282 out: 283 vcpu->arch.sane = r; 284 return r ? 0 : -EINVAL; 285 } 286 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 287 288 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 289 { 290 enum emulation_result er; 291 int r; 292 293 er = kvmppc_emulate_loadstore(vcpu); 294 switch (er) { 295 case EMULATE_DONE: 296 /* Future optimization: only reload non-volatiles if they were 297 * actually modified. */ 298 r = RESUME_GUEST_NV; 299 break; 300 case EMULATE_AGAIN: 301 r = RESUME_GUEST; 302 break; 303 case EMULATE_DO_MMIO: 304 run->exit_reason = KVM_EXIT_MMIO; 305 /* We must reload nonvolatiles because "update" load/store 306 * instructions modify register state. */ 307 /* Future optimization: only reload non-volatiles if they were 308 * actually modified. */ 309 r = RESUME_HOST_NV; 310 break; 311 case EMULATE_FAIL: 312 { 313 u32 last_inst; 314 315 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 316 /* XXX Deliver Program interrupt to guest. */ 317 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); 318 r = RESUME_HOST; 319 break; 320 } 321 default: 322 WARN_ON(1); 323 r = RESUME_GUEST; 324 } 325 326 return r; 327 } 328 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 329 330 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 331 bool data) 332 { 333 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 334 struct kvmppc_pte pte; 335 int r; 336 337 vcpu->stat.st++; 338 339 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 340 XLATE_WRITE, &pte); 341 if (r < 0) 342 return r; 343 344 *eaddr = pte.raddr; 345 346 if (!pte.may_write) 347 return -EPERM; 348 349 /* Magic page override */ 350 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 351 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 352 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 353 void *magic = vcpu->arch.shared; 354 magic += pte.eaddr & 0xfff; 355 memcpy(magic, ptr, size); 356 return EMULATE_DONE; 357 } 358 359 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 360 return EMULATE_DO_MMIO; 361 362 return EMULATE_DONE; 363 } 364 EXPORT_SYMBOL_GPL(kvmppc_st); 365 366 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 367 bool data) 368 { 369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 370 struct kvmppc_pte pte; 371 int rc; 372 373 vcpu->stat.ld++; 374 375 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 376 XLATE_READ, &pte); 377 if (rc) 378 return rc; 379 380 *eaddr = pte.raddr; 381 382 if (!pte.may_read) 383 return -EPERM; 384 385 if (!data && !pte.may_execute) 386 return -ENOEXEC; 387 388 /* Magic page override */ 389 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 390 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 391 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 392 void *magic = vcpu->arch.shared; 393 magic += pte.eaddr & 0xfff; 394 memcpy(ptr, magic, size); 395 return EMULATE_DONE; 396 } 397 398 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) 399 return EMULATE_DO_MMIO; 400 401 return EMULATE_DONE; 402 } 403 EXPORT_SYMBOL_GPL(kvmppc_ld); 404 405 int kvm_arch_hardware_enable(void) 406 { 407 return 0; 408 } 409 410 int kvm_arch_hardware_setup(void) 411 { 412 return 0; 413 } 414 415 void kvm_arch_check_processor_compat(void *rtn) 416 { 417 *(int *)rtn = kvmppc_core_check_processor_compat(); 418 } 419 420 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 421 { 422 struct kvmppc_ops *kvm_ops = NULL; 423 /* 424 * if we have both HV and PR enabled, default is HV 425 */ 426 if (type == 0) { 427 if (kvmppc_hv_ops) 428 kvm_ops = kvmppc_hv_ops; 429 else 430 kvm_ops = kvmppc_pr_ops; 431 if (!kvm_ops) 432 goto err_out; 433 } else if (type == KVM_VM_PPC_HV) { 434 if (!kvmppc_hv_ops) 435 goto err_out; 436 kvm_ops = kvmppc_hv_ops; 437 } else if (type == KVM_VM_PPC_PR) { 438 if (!kvmppc_pr_ops) 439 goto err_out; 440 kvm_ops = kvmppc_pr_ops; 441 } else 442 goto err_out; 443 444 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) 445 return -ENOENT; 446 447 kvm->arch.kvm_ops = kvm_ops; 448 return kvmppc_core_init_vm(kvm); 449 err_out: 450 return -EINVAL; 451 } 452 453 bool kvm_arch_has_vcpu_debugfs(void) 454 { 455 return false; 456 } 457 458 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 459 { 460 return 0; 461 } 462 463 void kvm_arch_destroy_vm(struct kvm *kvm) 464 { 465 unsigned int i; 466 struct kvm_vcpu *vcpu; 467 468 #ifdef CONFIG_KVM_XICS 469 /* 470 * We call kick_all_cpus_sync() to ensure that all 471 * CPUs have executed any pending IPIs before we 472 * continue and free VCPUs structures below. 473 */ 474 if (is_kvmppc_hv_enabled(kvm)) 475 kick_all_cpus_sync(); 476 #endif 477 478 kvm_for_each_vcpu(i, vcpu, kvm) 479 kvm_arch_vcpu_free(vcpu); 480 481 mutex_lock(&kvm->lock); 482 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 483 kvm->vcpus[i] = NULL; 484 485 atomic_set(&kvm->online_vcpus, 0); 486 487 kvmppc_core_destroy_vm(kvm); 488 489 mutex_unlock(&kvm->lock); 490 491 /* drop the module reference */ 492 module_put(kvm->arch.kvm_ops->owner); 493 } 494 495 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 496 { 497 int r; 498 /* Assume we're using HV mode when the HV module is loaded */ 499 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 500 501 if (kvm) { 502 /* 503 * Hooray - we know which VM type we're running on. Depend on 504 * that rather than the guess above. 505 */ 506 hv_enabled = is_kvmppc_hv_enabled(kvm); 507 } 508 509 switch (ext) { 510 #ifdef CONFIG_BOOKE 511 case KVM_CAP_PPC_BOOKE_SREGS: 512 case KVM_CAP_PPC_BOOKE_WATCHDOG: 513 case KVM_CAP_PPC_EPR: 514 #else 515 case KVM_CAP_PPC_SEGSTATE: 516 case KVM_CAP_PPC_HIOR: 517 case KVM_CAP_PPC_PAPR: 518 #endif 519 case KVM_CAP_PPC_UNSET_IRQ: 520 case KVM_CAP_PPC_IRQ_LEVEL: 521 case KVM_CAP_ENABLE_CAP: 522 case KVM_CAP_ENABLE_CAP_VM: 523 case KVM_CAP_ONE_REG: 524 case KVM_CAP_IOEVENTFD: 525 case KVM_CAP_DEVICE_CTRL: 526 case KVM_CAP_IMMEDIATE_EXIT: 527 r = 1; 528 break; 529 case KVM_CAP_PPC_PAIRED_SINGLES: 530 case KVM_CAP_PPC_OSI: 531 case KVM_CAP_PPC_GET_PVINFO: 532 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 533 case KVM_CAP_SW_TLB: 534 #endif 535 /* We support this only for PR */ 536 r = !hv_enabled; 537 break; 538 #ifdef CONFIG_KVM_MPIC 539 case KVM_CAP_IRQ_MPIC: 540 r = 1; 541 break; 542 #endif 543 544 #ifdef CONFIG_PPC_BOOK3S_64 545 case KVM_CAP_SPAPR_TCE: 546 case KVM_CAP_SPAPR_TCE_64: 547 /* fallthrough */ 548 case KVM_CAP_SPAPR_TCE_VFIO: 549 case KVM_CAP_PPC_RTAS: 550 case KVM_CAP_PPC_FIXUP_HCALL: 551 case KVM_CAP_PPC_ENABLE_HCALL: 552 #ifdef CONFIG_KVM_XICS 553 case KVM_CAP_IRQ_XICS: 554 #endif 555 case KVM_CAP_PPC_GET_CPU_CHAR: 556 r = 1; 557 break; 558 559 case KVM_CAP_PPC_ALLOC_HTAB: 560 r = hv_enabled; 561 break; 562 #endif /* CONFIG_PPC_BOOK3S_64 */ 563 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 564 case KVM_CAP_PPC_SMT: 565 r = 0; 566 if (kvm) { 567 if (kvm->arch.emul_smt_mode > 1) 568 r = kvm->arch.emul_smt_mode; 569 else 570 r = kvm->arch.smt_mode; 571 } else if (hv_enabled) { 572 if (cpu_has_feature(CPU_FTR_ARCH_300)) 573 r = 1; 574 else 575 r = threads_per_subcore; 576 } 577 break; 578 case KVM_CAP_PPC_SMT_POSSIBLE: 579 r = 1; 580 if (hv_enabled) { 581 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 582 r = ((threads_per_subcore << 1) - 1); 583 else 584 /* P9 can emulate dbells, so allow any mode */ 585 r = 8 | 4 | 2 | 1; 586 } 587 break; 588 case KVM_CAP_PPC_RMA: 589 r = 0; 590 break; 591 case KVM_CAP_PPC_HWRNG: 592 r = kvmppc_hwrng_present(); 593 break; 594 case KVM_CAP_PPC_MMU_RADIX: 595 r = !!(hv_enabled && radix_enabled()); 596 break; 597 case KVM_CAP_PPC_MMU_HASH_V3: 598 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300)); 599 break; 600 #endif 601 case KVM_CAP_SYNC_MMU: 602 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 603 r = hv_enabled; 604 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 605 r = 1; 606 #else 607 r = 0; 608 #endif 609 break; 610 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 611 case KVM_CAP_PPC_HTAB_FD: 612 r = hv_enabled; 613 break; 614 #endif 615 case KVM_CAP_NR_VCPUS: 616 /* 617 * Recommending a number of CPUs is somewhat arbitrary; we 618 * return the number of present CPUs for -HV (since a host 619 * will have secondary threads "offline"), and for other KVM 620 * implementations just count online CPUs. 621 */ 622 if (hv_enabled) 623 r = num_present_cpus(); 624 else 625 r = num_online_cpus(); 626 break; 627 case KVM_CAP_NR_MEMSLOTS: 628 r = KVM_USER_MEM_SLOTS; 629 break; 630 case KVM_CAP_MAX_VCPUS: 631 r = KVM_MAX_VCPUS; 632 break; 633 #ifdef CONFIG_PPC_BOOK3S_64 634 case KVM_CAP_PPC_GET_SMMU_INFO: 635 r = 1; 636 break; 637 case KVM_CAP_SPAPR_MULTITCE: 638 r = 1; 639 break; 640 case KVM_CAP_SPAPR_RESIZE_HPT: 641 r = !!hv_enabled; 642 break; 643 #endif 644 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 645 case KVM_CAP_PPC_FWNMI: 646 r = hv_enabled; 647 break; 648 #endif 649 case KVM_CAP_PPC_HTM: 650 r = hv_enabled && 651 (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); 652 break; 653 default: 654 r = 0; 655 break; 656 } 657 return r; 658 659 } 660 661 long kvm_arch_dev_ioctl(struct file *filp, 662 unsigned int ioctl, unsigned long arg) 663 { 664 return -EINVAL; 665 } 666 667 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 668 struct kvm_memory_slot *dont) 669 { 670 kvmppc_core_free_memslot(kvm, free, dont); 671 } 672 673 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 674 unsigned long npages) 675 { 676 return kvmppc_core_create_memslot(kvm, slot, npages); 677 } 678 679 int kvm_arch_prepare_memory_region(struct kvm *kvm, 680 struct kvm_memory_slot *memslot, 681 const struct kvm_userspace_memory_region *mem, 682 enum kvm_mr_change change) 683 { 684 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 685 } 686 687 void kvm_arch_commit_memory_region(struct kvm *kvm, 688 const struct kvm_userspace_memory_region *mem, 689 const struct kvm_memory_slot *old, 690 const struct kvm_memory_slot *new, 691 enum kvm_mr_change change) 692 { 693 kvmppc_core_commit_memory_region(kvm, mem, old, new); 694 } 695 696 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 697 struct kvm_memory_slot *slot) 698 { 699 kvmppc_core_flush_memslot(kvm, slot); 700 } 701 702 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 703 { 704 struct kvm_vcpu *vcpu; 705 vcpu = kvmppc_core_vcpu_create(kvm, id); 706 if (!IS_ERR(vcpu)) { 707 vcpu->arch.wqp = &vcpu->wq; 708 kvmppc_create_vcpu_debugfs(vcpu, id); 709 } 710 return vcpu; 711 } 712 713 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 714 { 715 } 716 717 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 718 { 719 /* Make sure we're not using the vcpu anymore */ 720 hrtimer_cancel(&vcpu->arch.dec_timer); 721 722 kvmppc_remove_vcpu_debugfs(vcpu); 723 724 switch (vcpu->arch.irq_type) { 725 case KVMPPC_IRQ_MPIC: 726 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 727 break; 728 case KVMPPC_IRQ_XICS: 729 if (xive_enabled()) 730 kvmppc_xive_cleanup_vcpu(vcpu); 731 else 732 kvmppc_xics_free_icp(vcpu); 733 break; 734 } 735 736 kvmppc_core_vcpu_free(vcpu); 737 } 738 739 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 740 { 741 kvm_arch_vcpu_free(vcpu); 742 } 743 744 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 745 { 746 return kvmppc_core_pending_dec(vcpu); 747 } 748 749 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 750 { 751 struct kvm_vcpu *vcpu; 752 753 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 754 kvmppc_decrementer_func(vcpu); 755 756 return HRTIMER_NORESTART; 757 } 758 759 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 760 { 761 int ret; 762 763 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 764 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 765 vcpu->arch.dec_expires = get_tb(); 766 767 #ifdef CONFIG_KVM_EXIT_TIMING 768 mutex_init(&vcpu->arch.exit_timing_lock); 769 #endif 770 ret = kvmppc_subarch_vcpu_init(vcpu); 771 return ret; 772 } 773 774 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 775 { 776 kvmppc_mmu_destroy(vcpu); 777 kvmppc_subarch_vcpu_uninit(vcpu); 778 } 779 780 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 781 { 782 #ifdef CONFIG_BOOKE 783 /* 784 * vrsave (formerly usprg0) isn't used by Linux, but may 785 * be used by the guest. 786 * 787 * On non-booke this is associated with Altivec and 788 * is handled by code in book3s.c. 789 */ 790 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 791 #endif 792 kvmppc_core_vcpu_load(vcpu, cpu); 793 } 794 795 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 796 { 797 kvmppc_core_vcpu_put(vcpu); 798 #ifdef CONFIG_BOOKE 799 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 800 #endif 801 } 802 803 /* 804 * irq_bypass_add_producer and irq_bypass_del_producer are only 805 * useful if the architecture supports PCI passthrough. 806 * irq_bypass_stop and irq_bypass_start are not needed and so 807 * kvm_ops are not defined for them. 808 */ 809 bool kvm_arch_has_irq_bypass(void) 810 { 811 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || 812 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); 813 } 814 815 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 816 struct irq_bypass_producer *prod) 817 { 818 struct kvm_kernel_irqfd *irqfd = 819 container_of(cons, struct kvm_kernel_irqfd, consumer); 820 struct kvm *kvm = irqfd->kvm; 821 822 if (kvm->arch.kvm_ops->irq_bypass_add_producer) 823 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); 824 825 return 0; 826 } 827 828 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 829 struct irq_bypass_producer *prod) 830 { 831 struct kvm_kernel_irqfd *irqfd = 832 container_of(cons, struct kvm_kernel_irqfd, consumer); 833 struct kvm *kvm = irqfd->kvm; 834 835 if (kvm->arch.kvm_ops->irq_bypass_del_producer) 836 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); 837 } 838 839 #ifdef CONFIG_VSX 840 static inline int kvmppc_get_vsr_dword_offset(int index) 841 { 842 int offset; 843 844 if ((index != 0) && (index != 1)) 845 return -1; 846 847 #ifdef __BIG_ENDIAN 848 offset = index; 849 #else 850 offset = 1 - index; 851 #endif 852 853 return offset; 854 } 855 856 static inline int kvmppc_get_vsr_word_offset(int index) 857 { 858 int offset; 859 860 if ((index > 3) || (index < 0)) 861 return -1; 862 863 #ifdef __BIG_ENDIAN 864 offset = index; 865 #else 866 offset = 3 - index; 867 #endif 868 return offset; 869 } 870 871 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, 872 u64 gpr) 873 { 874 union kvmppc_one_reg val; 875 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 876 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 877 878 if (offset == -1) 879 return; 880 881 if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 882 val.vval = VCPU_VSX_VR(vcpu, index); 883 val.vsxval[offset] = gpr; 884 VCPU_VSX_VR(vcpu, index) = val.vval; 885 } else { 886 VCPU_VSX_FPR(vcpu, index, offset) = gpr; 887 } 888 } 889 890 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, 891 u64 gpr) 892 { 893 union kvmppc_one_reg val; 894 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 895 896 if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 897 val.vval = VCPU_VSX_VR(vcpu, index); 898 val.vsxval[0] = gpr; 899 val.vsxval[1] = gpr; 900 VCPU_VSX_VR(vcpu, index) = val.vval; 901 } else { 902 VCPU_VSX_FPR(vcpu, index, 0) = gpr; 903 VCPU_VSX_FPR(vcpu, index, 1) = gpr; 904 } 905 } 906 907 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, 908 u32 gpr32) 909 { 910 union kvmppc_one_reg val; 911 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 912 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 913 int dword_offset, word_offset; 914 915 if (offset == -1) 916 return; 917 918 if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 919 val.vval = VCPU_VSX_VR(vcpu, index); 920 val.vsx32val[offset] = gpr32; 921 VCPU_VSX_VR(vcpu, index) = val.vval; 922 } else { 923 dword_offset = offset / 2; 924 word_offset = offset % 2; 925 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); 926 val.vsx32val[word_offset] = gpr32; 927 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; 928 } 929 } 930 #endif /* CONFIG_VSX */ 931 932 #ifdef CONFIG_ALTIVEC 933 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, 934 u64 gpr) 935 { 936 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 937 u32 hi, lo; 938 u32 di; 939 940 #ifdef __BIG_ENDIAN 941 hi = gpr >> 32; 942 lo = gpr & 0xffffffff; 943 #else 944 lo = gpr >> 32; 945 hi = gpr & 0xffffffff; 946 #endif 947 948 di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ 949 if (di > 1) 950 return; 951 952 if (vcpu->arch.mmio_host_swabbed) 953 di = 1 - di; 954 955 VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; 956 VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; 957 } 958 #endif /* CONFIG_ALTIVEC */ 959 960 #ifdef CONFIG_PPC_FPU 961 static inline u64 sp_to_dp(u32 fprs) 962 { 963 u64 fprd; 964 965 preempt_disable(); 966 enable_kernel_fp(); 967 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) 968 : "fr0"); 969 preempt_enable(); 970 return fprd; 971 } 972 973 static inline u32 dp_to_sp(u64 fprd) 974 { 975 u32 fprs; 976 977 preempt_disable(); 978 enable_kernel_fp(); 979 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) 980 : "fr0"); 981 preempt_enable(); 982 return fprs; 983 } 984 985 #else 986 #define sp_to_dp(x) (x) 987 #define dp_to_sp(x) (x) 988 #endif /* CONFIG_PPC_FPU */ 989 990 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 991 struct kvm_run *run) 992 { 993 u64 uninitialized_var(gpr); 994 995 if (run->mmio.len > sizeof(gpr)) { 996 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 997 return; 998 } 999 1000 if (!vcpu->arch.mmio_host_swabbed) { 1001 switch (run->mmio.len) { 1002 case 8: gpr = *(u64 *)run->mmio.data; break; 1003 case 4: gpr = *(u32 *)run->mmio.data; break; 1004 case 2: gpr = *(u16 *)run->mmio.data; break; 1005 case 1: gpr = *(u8 *)run->mmio.data; break; 1006 } 1007 } else { 1008 switch (run->mmio.len) { 1009 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; 1010 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; 1011 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; 1012 case 1: gpr = *(u8 *)run->mmio.data; break; 1013 } 1014 } 1015 1016 /* conversion between single and double precision */ 1017 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) 1018 gpr = sp_to_dp(gpr); 1019 1020 if (vcpu->arch.mmio_sign_extend) { 1021 switch (run->mmio.len) { 1022 #ifdef CONFIG_PPC64 1023 case 4: 1024 gpr = (s64)(s32)gpr; 1025 break; 1026 #endif 1027 case 2: 1028 gpr = (s64)(s16)gpr; 1029 break; 1030 case 1: 1031 gpr = (s64)(s8)gpr; 1032 break; 1033 } 1034 } 1035 1036 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 1037 case KVM_MMIO_REG_GPR: 1038 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 1039 break; 1040 case KVM_MMIO_REG_FPR: 1041 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1042 break; 1043 #ifdef CONFIG_PPC_BOOK3S 1044 case KVM_MMIO_REG_QPR: 1045 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1046 break; 1047 case KVM_MMIO_REG_FQPR: 1048 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1049 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1050 break; 1051 #endif 1052 #ifdef CONFIG_VSX 1053 case KVM_MMIO_REG_VSX: 1054 if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) 1055 kvmppc_set_vsr_dword(vcpu, gpr); 1056 else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) 1057 kvmppc_set_vsr_word(vcpu, gpr); 1058 else if (vcpu->arch.mmio_vsx_copy_type == 1059 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) 1060 kvmppc_set_vsr_dword_dump(vcpu, gpr); 1061 break; 1062 #endif 1063 #ifdef CONFIG_ALTIVEC 1064 case KVM_MMIO_REG_VMX: 1065 kvmppc_set_vmx_dword(vcpu, gpr); 1066 break; 1067 #endif 1068 default: 1069 BUG(); 1070 } 1071 } 1072 1073 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1074 unsigned int rt, unsigned int bytes, 1075 int is_default_endian, int sign_extend) 1076 { 1077 int idx, ret; 1078 bool host_swabbed; 1079 1080 /* Pity C doesn't have a logical XOR operator */ 1081 if (kvmppc_need_byteswap(vcpu)) { 1082 host_swabbed = is_default_endian; 1083 } else { 1084 host_swabbed = !is_default_endian; 1085 } 1086 1087 if (bytes > sizeof(run->mmio.data)) { 1088 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 1089 run->mmio.len); 1090 } 1091 1092 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1093 run->mmio.len = bytes; 1094 run->mmio.is_write = 0; 1095 1096 vcpu->arch.io_gpr = rt; 1097 vcpu->arch.mmio_host_swabbed = host_swabbed; 1098 vcpu->mmio_needed = 1; 1099 vcpu->mmio_is_write = 0; 1100 vcpu->arch.mmio_sign_extend = sign_extend; 1101 1102 idx = srcu_read_lock(&vcpu->kvm->srcu); 1103 1104 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1105 bytes, &run->mmio.data); 1106 1107 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1108 1109 if (!ret) { 1110 kvmppc_complete_mmio_load(vcpu, run); 1111 vcpu->mmio_needed = 0; 1112 return EMULATE_DONE; 1113 } 1114 1115 return EMULATE_DO_MMIO; 1116 } 1117 1118 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1119 unsigned int rt, unsigned int bytes, 1120 int is_default_endian) 1121 { 1122 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); 1123 } 1124 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 1125 1126 /* Same as above, but sign extends */ 1127 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 1128 unsigned int rt, unsigned int bytes, 1129 int is_default_endian) 1130 { 1131 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); 1132 } 1133 1134 #ifdef CONFIG_VSX 1135 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1136 unsigned int rt, unsigned int bytes, 1137 int is_default_endian, int mmio_sign_extend) 1138 { 1139 enum emulation_result emulated = EMULATE_DONE; 1140 1141 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1142 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1143 return EMULATE_FAIL; 1144 1145 while (vcpu->arch.mmio_vsx_copy_nums) { 1146 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 1147 is_default_endian, mmio_sign_extend); 1148 1149 if (emulated != EMULATE_DONE) 1150 break; 1151 1152 vcpu->arch.paddr_accessed += run->mmio.len; 1153 1154 vcpu->arch.mmio_vsx_copy_nums--; 1155 vcpu->arch.mmio_vsx_offset++; 1156 } 1157 return emulated; 1158 } 1159 #endif /* CONFIG_VSX */ 1160 1161 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1162 u64 val, unsigned int bytes, int is_default_endian) 1163 { 1164 void *data = run->mmio.data; 1165 int idx, ret; 1166 bool host_swabbed; 1167 1168 /* Pity C doesn't have a logical XOR operator */ 1169 if (kvmppc_need_byteswap(vcpu)) { 1170 host_swabbed = is_default_endian; 1171 } else { 1172 host_swabbed = !is_default_endian; 1173 } 1174 1175 if (bytes > sizeof(run->mmio.data)) { 1176 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 1177 run->mmio.len); 1178 } 1179 1180 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1181 run->mmio.len = bytes; 1182 run->mmio.is_write = 1; 1183 vcpu->mmio_needed = 1; 1184 vcpu->mmio_is_write = 1; 1185 1186 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) 1187 val = dp_to_sp(val); 1188 1189 /* Store the value at the lowest bytes in 'data'. */ 1190 if (!host_swabbed) { 1191 switch (bytes) { 1192 case 8: *(u64 *)data = val; break; 1193 case 4: *(u32 *)data = val; break; 1194 case 2: *(u16 *)data = val; break; 1195 case 1: *(u8 *)data = val; break; 1196 } 1197 } else { 1198 switch (bytes) { 1199 case 8: *(u64 *)data = swab64(val); break; 1200 case 4: *(u32 *)data = swab32(val); break; 1201 case 2: *(u16 *)data = swab16(val); break; 1202 case 1: *(u8 *)data = val; break; 1203 } 1204 } 1205 1206 idx = srcu_read_lock(&vcpu->kvm->srcu); 1207 1208 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1209 bytes, &run->mmio.data); 1210 1211 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1212 1213 if (!ret) { 1214 vcpu->mmio_needed = 0; 1215 return EMULATE_DONE; 1216 } 1217 1218 return EMULATE_DO_MMIO; 1219 } 1220 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 1221 1222 #ifdef CONFIG_VSX 1223 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) 1224 { 1225 u32 dword_offset, word_offset; 1226 union kvmppc_one_reg reg; 1227 int vsx_offset = 0; 1228 int copy_type = vcpu->arch.mmio_vsx_copy_type; 1229 int result = 0; 1230 1231 switch (copy_type) { 1232 case KVMPPC_VSX_COPY_DWORD: 1233 vsx_offset = 1234 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 1235 1236 if (vsx_offset == -1) { 1237 result = -1; 1238 break; 1239 } 1240 1241 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { 1242 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); 1243 } else { 1244 reg.vval = VCPU_VSX_VR(vcpu, rs); 1245 *val = reg.vsxval[vsx_offset]; 1246 } 1247 break; 1248 1249 case KVMPPC_VSX_COPY_WORD: 1250 vsx_offset = 1251 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 1252 1253 if (vsx_offset == -1) { 1254 result = -1; 1255 break; 1256 } 1257 1258 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { 1259 dword_offset = vsx_offset / 2; 1260 word_offset = vsx_offset % 2; 1261 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); 1262 *val = reg.vsx32val[word_offset]; 1263 } else { 1264 reg.vval = VCPU_VSX_VR(vcpu, rs); 1265 *val = reg.vsx32val[vsx_offset]; 1266 } 1267 break; 1268 1269 default: 1270 result = -1; 1271 break; 1272 } 1273 1274 return result; 1275 } 1276 1277 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1278 int rs, unsigned int bytes, int is_default_endian) 1279 { 1280 u64 val; 1281 enum emulation_result emulated = EMULATE_DONE; 1282 1283 vcpu->arch.io_gpr = rs; 1284 1285 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1286 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1287 return EMULATE_FAIL; 1288 1289 while (vcpu->arch.mmio_vsx_copy_nums) { 1290 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) 1291 return EMULATE_FAIL; 1292 1293 emulated = kvmppc_handle_store(run, vcpu, 1294 val, bytes, is_default_endian); 1295 1296 if (emulated != EMULATE_DONE) 1297 break; 1298 1299 vcpu->arch.paddr_accessed += run->mmio.len; 1300 1301 vcpu->arch.mmio_vsx_copy_nums--; 1302 vcpu->arch.mmio_vsx_offset++; 1303 } 1304 1305 return emulated; 1306 } 1307 1308 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, 1309 struct kvm_run *run) 1310 { 1311 enum emulation_result emulated = EMULATE_FAIL; 1312 int r; 1313 1314 vcpu->arch.paddr_accessed += run->mmio.len; 1315 1316 if (!vcpu->mmio_is_write) { 1317 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, 1318 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); 1319 } else { 1320 emulated = kvmppc_handle_vsx_store(run, vcpu, 1321 vcpu->arch.io_gpr, run->mmio.len, 1); 1322 } 1323 1324 switch (emulated) { 1325 case EMULATE_DO_MMIO: 1326 run->exit_reason = KVM_EXIT_MMIO; 1327 r = RESUME_HOST; 1328 break; 1329 case EMULATE_FAIL: 1330 pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); 1331 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1332 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1333 r = RESUME_HOST; 1334 break; 1335 default: 1336 r = RESUME_GUEST; 1337 break; 1338 } 1339 return r; 1340 } 1341 #endif /* CONFIG_VSX */ 1342 1343 #ifdef CONFIG_ALTIVEC 1344 /* handle quadword load access in two halves */ 1345 int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, 1346 unsigned int rt, int is_default_endian) 1347 { 1348 enum emulation_result emulated; 1349 1350 while (vcpu->arch.mmio_vmx_copy_nums) { 1351 emulated = __kvmppc_handle_load(run, vcpu, rt, 8, 1352 is_default_endian, 0); 1353 1354 if (emulated != EMULATE_DONE) 1355 break; 1356 1357 vcpu->arch.paddr_accessed += run->mmio.len; 1358 vcpu->arch.mmio_vmx_copy_nums--; 1359 } 1360 1361 return emulated; 1362 } 1363 1364 static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) 1365 { 1366 vector128 vrs = VCPU_VSX_VR(vcpu, rs); 1367 u32 di; 1368 u64 w0, w1; 1369 1370 di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ 1371 if (di > 1) 1372 return -1; 1373 1374 if (vcpu->arch.mmio_host_swabbed) 1375 di = 1 - di; 1376 1377 w0 = vrs.u[di * 2]; 1378 w1 = vrs.u[di * 2 + 1]; 1379 1380 #ifdef __BIG_ENDIAN 1381 *val = (w0 << 32) | w1; 1382 #else 1383 *val = (w1 << 32) | w0; 1384 #endif 1385 return 0; 1386 } 1387 1388 /* handle quadword store in two halves */ 1389 int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, 1390 unsigned int rs, int is_default_endian) 1391 { 1392 u64 val = 0; 1393 enum emulation_result emulated = EMULATE_DONE; 1394 1395 vcpu->arch.io_gpr = rs; 1396 1397 while (vcpu->arch.mmio_vmx_copy_nums) { 1398 if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) 1399 return EMULATE_FAIL; 1400 1401 emulated = kvmppc_handle_store(run, vcpu, val, 8, 1402 is_default_endian); 1403 if (emulated != EMULATE_DONE) 1404 break; 1405 1406 vcpu->arch.paddr_accessed += run->mmio.len; 1407 vcpu->arch.mmio_vmx_copy_nums--; 1408 } 1409 1410 return emulated; 1411 } 1412 1413 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, 1414 struct kvm_run *run) 1415 { 1416 enum emulation_result emulated = EMULATE_FAIL; 1417 int r; 1418 1419 vcpu->arch.paddr_accessed += run->mmio.len; 1420 1421 if (!vcpu->mmio_is_write) { 1422 emulated = kvmppc_handle_load128_by2x64(run, vcpu, 1423 vcpu->arch.io_gpr, 1); 1424 } else { 1425 emulated = kvmppc_handle_store128_by2x64(run, vcpu, 1426 vcpu->arch.io_gpr, 1); 1427 } 1428 1429 switch (emulated) { 1430 case EMULATE_DO_MMIO: 1431 run->exit_reason = KVM_EXIT_MMIO; 1432 r = RESUME_HOST; 1433 break; 1434 case EMULATE_FAIL: 1435 pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); 1436 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1437 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1438 r = RESUME_HOST; 1439 break; 1440 default: 1441 r = RESUME_GUEST; 1442 break; 1443 } 1444 return r; 1445 } 1446 #endif /* CONFIG_ALTIVEC */ 1447 1448 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1449 { 1450 int r = 0; 1451 union kvmppc_one_reg val; 1452 int size; 1453 1454 size = one_reg_size(reg->id); 1455 if (size > sizeof(val)) 1456 return -EINVAL; 1457 1458 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1459 if (r == -EINVAL) { 1460 r = 0; 1461 switch (reg->id) { 1462 #ifdef CONFIG_ALTIVEC 1463 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1464 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1465 r = -ENXIO; 1466 break; 1467 } 1468 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 1469 break; 1470 case KVM_REG_PPC_VSCR: 1471 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1472 r = -ENXIO; 1473 break; 1474 } 1475 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 1476 break; 1477 case KVM_REG_PPC_VRSAVE: 1478 val = get_reg_val(reg->id, vcpu->arch.vrsave); 1479 break; 1480 #endif /* CONFIG_ALTIVEC */ 1481 default: 1482 r = -EINVAL; 1483 break; 1484 } 1485 } 1486 1487 if (r) 1488 return r; 1489 1490 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 1491 r = -EFAULT; 1492 1493 return r; 1494 } 1495 1496 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1497 { 1498 int r; 1499 union kvmppc_one_reg val; 1500 int size; 1501 1502 size = one_reg_size(reg->id); 1503 if (size > sizeof(val)) 1504 return -EINVAL; 1505 1506 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 1507 return -EFAULT; 1508 1509 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1510 if (r == -EINVAL) { 1511 r = 0; 1512 switch (reg->id) { 1513 #ifdef CONFIG_ALTIVEC 1514 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1515 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1516 r = -ENXIO; 1517 break; 1518 } 1519 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 1520 break; 1521 case KVM_REG_PPC_VSCR: 1522 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1523 r = -ENXIO; 1524 break; 1525 } 1526 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 1527 break; 1528 case KVM_REG_PPC_VRSAVE: 1529 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1530 r = -ENXIO; 1531 break; 1532 } 1533 vcpu->arch.vrsave = set_reg_val(reg->id, val); 1534 break; 1535 #endif /* CONFIG_ALTIVEC */ 1536 default: 1537 r = -EINVAL; 1538 break; 1539 } 1540 } 1541 1542 return r; 1543 } 1544 1545 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 1546 { 1547 int r; 1548 1549 vcpu_load(vcpu); 1550 1551 if (vcpu->mmio_needed) { 1552 vcpu->mmio_needed = 0; 1553 if (!vcpu->mmio_is_write) 1554 kvmppc_complete_mmio_load(vcpu, run); 1555 #ifdef CONFIG_VSX 1556 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1557 vcpu->arch.mmio_vsx_copy_nums--; 1558 vcpu->arch.mmio_vsx_offset++; 1559 } 1560 1561 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1562 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); 1563 if (r == RESUME_HOST) { 1564 vcpu->mmio_needed = 1; 1565 goto out; 1566 } 1567 } 1568 #endif 1569 #ifdef CONFIG_ALTIVEC 1570 if (vcpu->arch.mmio_vmx_copy_nums > 0) 1571 vcpu->arch.mmio_vmx_copy_nums--; 1572 1573 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1574 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); 1575 if (r == RESUME_HOST) { 1576 vcpu->mmio_needed = 1; 1577 goto out; 1578 } 1579 } 1580 #endif 1581 } else if (vcpu->arch.osi_needed) { 1582 u64 *gprs = run->osi.gprs; 1583 int i; 1584 1585 for (i = 0; i < 32; i++) 1586 kvmppc_set_gpr(vcpu, i, gprs[i]); 1587 vcpu->arch.osi_needed = 0; 1588 } else if (vcpu->arch.hcall_needed) { 1589 int i; 1590 1591 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 1592 for (i = 0; i < 9; ++i) 1593 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 1594 vcpu->arch.hcall_needed = 0; 1595 #ifdef CONFIG_BOOKE 1596 } else if (vcpu->arch.epr_needed) { 1597 kvmppc_set_epr(vcpu, run->epr.epr); 1598 vcpu->arch.epr_needed = 0; 1599 #endif 1600 } 1601 1602 kvm_sigset_activate(vcpu); 1603 1604 if (run->immediate_exit) 1605 r = -EINTR; 1606 else 1607 r = kvmppc_vcpu_run(run, vcpu); 1608 1609 kvm_sigset_deactivate(vcpu); 1610 1611 out: 1612 vcpu_put(vcpu); 1613 return r; 1614 } 1615 1616 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 1617 { 1618 if (irq->irq == KVM_INTERRUPT_UNSET) { 1619 kvmppc_core_dequeue_external(vcpu); 1620 return 0; 1621 } 1622 1623 kvmppc_core_queue_external(vcpu, irq); 1624 1625 kvm_vcpu_kick(vcpu); 1626 1627 return 0; 1628 } 1629 1630 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1631 struct kvm_enable_cap *cap) 1632 { 1633 int r; 1634 1635 if (cap->flags) 1636 return -EINVAL; 1637 1638 switch (cap->cap) { 1639 case KVM_CAP_PPC_OSI: 1640 r = 0; 1641 vcpu->arch.osi_enabled = true; 1642 break; 1643 case KVM_CAP_PPC_PAPR: 1644 r = 0; 1645 vcpu->arch.papr_enabled = true; 1646 break; 1647 case KVM_CAP_PPC_EPR: 1648 r = 0; 1649 if (cap->args[0]) 1650 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 1651 else 1652 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 1653 break; 1654 #ifdef CONFIG_BOOKE 1655 case KVM_CAP_PPC_BOOKE_WATCHDOG: 1656 r = 0; 1657 vcpu->arch.watchdog_enabled = true; 1658 break; 1659 #endif 1660 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1661 case KVM_CAP_SW_TLB: { 1662 struct kvm_config_tlb cfg; 1663 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 1664 1665 r = -EFAULT; 1666 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 1667 break; 1668 1669 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 1670 break; 1671 } 1672 #endif 1673 #ifdef CONFIG_KVM_MPIC 1674 case KVM_CAP_IRQ_MPIC: { 1675 struct fd f; 1676 struct kvm_device *dev; 1677 1678 r = -EBADF; 1679 f = fdget(cap->args[0]); 1680 if (!f.file) 1681 break; 1682 1683 r = -EPERM; 1684 dev = kvm_device_from_filp(f.file); 1685 if (dev) 1686 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 1687 1688 fdput(f); 1689 break; 1690 } 1691 #endif 1692 #ifdef CONFIG_KVM_XICS 1693 case KVM_CAP_IRQ_XICS: { 1694 struct fd f; 1695 struct kvm_device *dev; 1696 1697 r = -EBADF; 1698 f = fdget(cap->args[0]); 1699 if (!f.file) 1700 break; 1701 1702 r = -EPERM; 1703 dev = kvm_device_from_filp(f.file); 1704 if (dev) { 1705 if (xive_enabled()) 1706 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); 1707 else 1708 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 1709 } 1710 1711 fdput(f); 1712 break; 1713 } 1714 #endif /* CONFIG_KVM_XICS */ 1715 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1716 case KVM_CAP_PPC_FWNMI: 1717 r = -EINVAL; 1718 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 1719 break; 1720 r = 0; 1721 vcpu->kvm->arch.fwnmi_enabled = true; 1722 break; 1723 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 1724 default: 1725 r = -EINVAL; 1726 break; 1727 } 1728 1729 if (!r) 1730 r = kvmppc_sanity_check(vcpu); 1731 1732 return r; 1733 } 1734 1735 bool kvm_arch_intc_initialized(struct kvm *kvm) 1736 { 1737 #ifdef CONFIG_KVM_MPIC 1738 if (kvm->arch.mpic) 1739 return true; 1740 #endif 1741 #ifdef CONFIG_KVM_XICS 1742 if (kvm->arch.xics || kvm->arch.xive) 1743 return true; 1744 #endif 1745 return false; 1746 } 1747 1748 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1749 struct kvm_mp_state *mp_state) 1750 { 1751 return -EINVAL; 1752 } 1753 1754 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1755 struct kvm_mp_state *mp_state) 1756 { 1757 return -EINVAL; 1758 } 1759 1760 long kvm_arch_vcpu_async_ioctl(struct file *filp, 1761 unsigned int ioctl, unsigned long arg) 1762 { 1763 struct kvm_vcpu *vcpu = filp->private_data; 1764 void __user *argp = (void __user *)arg; 1765 1766 if (ioctl == KVM_INTERRUPT) { 1767 struct kvm_interrupt irq; 1768 if (copy_from_user(&irq, argp, sizeof(irq))) 1769 return -EFAULT; 1770 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); 1771 } 1772 return -ENOIOCTLCMD; 1773 } 1774 1775 long kvm_arch_vcpu_ioctl(struct file *filp, 1776 unsigned int ioctl, unsigned long arg) 1777 { 1778 struct kvm_vcpu *vcpu = filp->private_data; 1779 void __user *argp = (void __user *)arg; 1780 long r; 1781 1782 vcpu_load(vcpu); 1783 1784 switch (ioctl) { 1785 case KVM_ENABLE_CAP: 1786 { 1787 struct kvm_enable_cap cap; 1788 r = -EFAULT; 1789 if (copy_from_user(&cap, argp, sizeof(cap))) 1790 goto out; 1791 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1792 break; 1793 } 1794 1795 case KVM_SET_ONE_REG: 1796 case KVM_GET_ONE_REG: 1797 { 1798 struct kvm_one_reg reg; 1799 r = -EFAULT; 1800 if (copy_from_user(®, argp, sizeof(reg))) 1801 goto out; 1802 if (ioctl == KVM_SET_ONE_REG) 1803 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 1804 else 1805 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 1806 break; 1807 } 1808 1809 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1810 case KVM_DIRTY_TLB: { 1811 struct kvm_dirty_tlb dirty; 1812 r = -EFAULT; 1813 if (copy_from_user(&dirty, argp, sizeof(dirty))) 1814 goto out; 1815 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 1816 break; 1817 } 1818 #endif 1819 default: 1820 r = -EINVAL; 1821 } 1822 1823 out: 1824 vcpu_put(vcpu); 1825 return r; 1826 } 1827 1828 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 1829 { 1830 return VM_FAULT_SIGBUS; 1831 } 1832 1833 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 1834 { 1835 u32 inst_nop = 0x60000000; 1836 #ifdef CONFIG_KVM_BOOKE_HV 1837 u32 inst_sc1 = 0x44000022; 1838 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); 1839 pvinfo->hcall[1] = cpu_to_be32(inst_nop); 1840 pvinfo->hcall[2] = cpu_to_be32(inst_nop); 1841 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 1842 #else 1843 u32 inst_lis = 0x3c000000; 1844 u32 inst_ori = 0x60000000; 1845 u32 inst_sc = 0x44000002; 1846 u32 inst_imm_mask = 0xffff; 1847 1848 /* 1849 * The hypercall to get into KVM from within guest context is as 1850 * follows: 1851 * 1852 * lis r0, r0, KVM_SC_MAGIC_R0@h 1853 * ori r0, KVM_SC_MAGIC_R0@l 1854 * sc 1855 * nop 1856 */ 1857 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); 1858 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); 1859 pvinfo->hcall[2] = cpu_to_be32(inst_sc); 1860 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 1861 #endif 1862 1863 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 1864 1865 return 0; 1866 } 1867 1868 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 1869 bool line_status) 1870 { 1871 if (!irqchip_in_kernel(kvm)) 1872 return -ENXIO; 1873 1874 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1875 irq_event->irq, irq_event->level, 1876 line_status); 1877 return 0; 1878 } 1879 1880 1881 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 1882 struct kvm_enable_cap *cap) 1883 { 1884 int r; 1885 1886 if (cap->flags) 1887 return -EINVAL; 1888 1889 switch (cap->cap) { 1890 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1891 case KVM_CAP_PPC_ENABLE_HCALL: { 1892 unsigned long hcall = cap->args[0]; 1893 1894 r = -EINVAL; 1895 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || 1896 cap->args[1] > 1) 1897 break; 1898 if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) 1899 break; 1900 if (cap->args[1]) 1901 set_bit(hcall / 4, kvm->arch.enabled_hcalls); 1902 else 1903 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); 1904 r = 0; 1905 break; 1906 } 1907 case KVM_CAP_PPC_SMT: { 1908 unsigned long mode = cap->args[0]; 1909 unsigned long flags = cap->args[1]; 1910 1911 r = -EINVAL; 1912 if (kvm->arch.kvm_ops->set_smt_mode) 1913 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); 1914 break; 1915 } 1916 #endif 1917 default: 1918 r = -EINVAL; 1919 break; 1920 } 1921 1922 return r; 1923 } 1924 1925 #ifdef CONFIG_PPC_BOOK3S_64 1926 /* 1927 * These functions check whether the underlying hardware is safe 1928 * against attacks based on observing the effects of speculatively 1929 * executed instructions, and whether it supplies instructions for 1930 * use in workarounds. The information comes from firmware, either 1931 * via the device tree on powernv platforms or from an hcall on 1932 * pseries platforms. 1933 */ 1934 #ifdef CONFIG_PPC_PSERIES 1935 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 1936 { 1937 struct h_cpu_char_result c; 1938 unsigned long rc; 1939 1940 if (!machine_is(pseries)) 1941 return -ENOTTY; 1942 1943 rc = plpar_get_cpu_characteristics(&c); 1944 if (rc == H_SUCCESS) { 1945 cp->character = c.character; 1946 cp->behaviour = c.behaviour; 1947 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 1948 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 1949 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 1950 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 1951 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 1952 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | 1953 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | 1954 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 1955 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 1956 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 1957 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 1958 } 1959 return 0; 1960 } 1961 #else 1962 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 1963 { 1964 return -ENOTTY; 1965 } 1966 #endif 1967 1968 static inline bool have_fw_feat(struct device_node *fw_features, 1969 const char *state, const char *name) 1970 { 1971 struct device_node *np; 1972 bool r = false; 1973 1974 np = of_get_child_by_name(fw_features, name); 1975 if (np) { 1976 r = of_property_read_bool(np, state); 1977 of_node_put(np); 1978 } 1979 return r; 1980 } 1981 1982 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) 1983 { 1984 struct device_node *np, *fw_features; 1985 int r; 1986 1987 memset(cp, 0, sizeof(*cp)); 1988 r = pseries_get_cpu_char(cp); 1989 if (r != -ENOTTY) 1990 return r; 1991 1992 np = of_find_node_by_name(NULL, "ibm,opal"); 1993 if (np) { 1994 fw_features = of_get_child_by_name(np, "fw-features"); 1995 of_node_put(np); 1996 if (!fw_features) 1997 return 0; 1998 if (have_fw_feat(fw_features, "enabled", 1999 "inst-spec-barrier-ori31,31,0")) 2000 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; 2001 if (have_fw_feat(fw_features, "enabled", 2002 "fw-bcctrl-serialized")) 2003 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; 2004 if (have_fw_feat(fw_features, "enabled", 2005 "inst-l1d-flush-ori30,30,0")) 2006 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; 2007 if (have_fw_feat(fw_features, "enabled", 2008 "inst-l1d-flush-trig2")) 2009 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; 2010 if (have_fw_feat(fw_features, "enabled", 2011 "fw-l1d-thread-split")) 2012 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; 2013 if (have_fw_feat(fw_features, "enabled", 2014 "fw-count-cache-disabled")) 2015 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2016 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2017 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2018 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2019 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2020 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2021 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2022 2023 if (have_fw_feat(fw_features, "enabled", 2024 "speculation-policy-favor-security")) 2025 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; 2026 if (!have_fw_feat(fw_features, "disabled", 2027 "needs-l1d-flush-msr-pr-0-to-1")) 2028 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; 2029 if (!have_fw_feat(fw_features, "disabled", 2030 "needs-spec-barrier-for-bound-checks")) 2031 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2032 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2033 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2034 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2035 2036 of_node_put(fw_features); 2037 } 2038 2039 return 0; 2040 } 2041 #endif 2042 2043 long kvm_arch_vm_ioctl(struct file *filp, 2044 unsigned int ioctl, unsigned long arg) 2045 { 2046 struct kvm *kvm __maybe_unused = filp->private_data; 2047 void __user *argp = (void __user *)arg; 2048 long r; 2049 2050 switch (ioctl) { 2051 case KVM_PPC_GET_PVINFO: { 2052 struct kvm_ppc_pvinfo pvinfo; 2053 memset(&pvinfo, 0, sizeof(pvinfo)); 2054 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 2055 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 2056 r = -EFAULT; 2057 goto out; 2058 } 2059 2060 break; 2061 } 2062 case KVM_ENABLE_CAP: 2063 { 2064 struct kvm_enable_cap cap; 2065 r = -EFAULT; 2066 if (copy_from_user(&cap, argp, sizeof(cap))) 2067 goto out; 2068 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 2069 break; 2070 } 2071 #ifdef CONFIG_SPAPR_TCE_IOMMU 2072 case KVM_CREATE_SPAPR_TCE_64: { 2073 struct kvm_create_spapr_tce_64 create_tce_64; 2074 2075 r = -EFAULT; 2076 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) 2077 goto out; 2078 if (create_tce_64.flags) { 2079 r = -EINVAL; 2080 goto out; 2081 } 2082 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2083 goto out; 2084 } 2085 case KVM_CREATE_SPAPR_TCE: { 2086 struct kvm_create_spapr_tce create_tce; 2087 struct kvm_create_spapr_tce_64 create_tce_64; 2088 2089 r = -EFAULT; 2090 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 2091 goto out; 2092 2093 create_tce_64.liobn = create_tce.liobn; 2094 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; 2095 create_tce_64.offset = 0; 2096 create_tce_64.size = create_tce.window_size >> 2097 IOMMU_PAGE_SHIFT_4K; 2098 create_tce_64.flags = 0; 2099 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2100 goto out; 2101 } 2102 #endif 2103 #ifdef CONFIG_PPC_BOOK3S_64 2104 case KVM_PPC_GET_SMMU_INFO: { 2105 struct kvm_ppc_smmu_info info; 2106 struct kvm *kvm = filp->private_data; 2107 2108 memset(&info, 0, sizeof(info)); 2109 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 2110 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2111 r = -EFAULT; 2112 break; 2113 } 2114 case KVM_PPC_RTAS_DEFINE_TOKEN: { 2115 struct kvm *kvm = filp->private_data; 2116 2117 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 2118 break; 2119 } 2120 case KVM_PPC_CONFIGURE_V3_MMU: { 2121 struct kvm *kvm = filp->private_data; 2122 struct kvm_ppc_mmuv3_cfg cfg; 2123 2124 r = -EINVAL; 2125 if (!kvm->arch.kvm_ops->configure_mmu) 2126 goto out; 2127 r = -EFAULT; 2128 if (copy_from_user(&cfg, argp, sizeof(cfg))) 2129 goto out; 2130 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); 2131 break; 2132 } 2133 case KVM_PPC_GET_RMMU_INFO: { 2134 struct kvm *kvm = filp->private_data; 2135 struct kvm_ppc_rmmu_info info; 2136 2137 r = -EINVAL; 2138 if (!kvm->arch.kvm_ops->get_rmmu_info) 2139 goto out; 2140 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); 2141 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2142 r = -EFAULT; 2143 break; 2144 } 2145 case KVM_PPC_GET_CPU_CHAR: { 2146 struct kvm_ppc_cpu_char cpuchar; 2147 2148 r = kvmppc_get_cpu_char(&cpuchar); 2149 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) 2150 r = -EFAULT; 2151 break; 2152 } 2153 default: { 2154 struct kvm *kvm = filp->private_data; 2155 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 2156 } 2157 #else /* CONFIG_PPC_BOOK3S_64 */ 2158 default: 2159 r = -ENOTTY; 2160 #endif 2161 } 2162 out: 2163 return r; 2164 } 2165 2166 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 2167 static unsigned long nr_lpids; 2168 2169 long kvmppc_alloc_lpid(void) 2170 { 2171 long lpid; 2172 2173 do { 2174 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 2175 if (lpid >= nr_lpids) { 2176 pr_err("%s: No LPIDs free\n", __func__); 2177 return -ENOMEM; 2178 } 2179 } while (test_and_set_bit(lpid, lpid_inuse)); 2180 2181 return lpid; 2182 } 2183 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 2184 2185 void kvmppc_claim_lpid(long lpid) 2186 { 2187 set_bit(lpid, lpid_inuse); 2188 } 2189 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); 2190 2191 void kvmppc_free_lpid(long lpid) 2192 { 2193 clear_bit(lpid, lpid_inuse); 2194 } 2195 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 2196 2197 void kvmppc_init_lpid(unsigned long nr_lpids_param) 2198 { 2199 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 2200 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 2201 } 2202 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 2203 2204 int kvm_arch_init(void *opaque) 2205 { 2206 return 0; 2207 } 2208 2209 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); 2210