1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/sched/signal.h> 27 #include <linux/fs.h> 28 #include <linux/slab.h> 29 #include <linux/file.h> 30 #include <linux/module.h> 31 #include <linux/irqbypass.h> 32 #include <linux/kvm_irqfd.h> 33 #include <asm/cputable.h> 34 #include <linux/uaccess.h> 35 #include <asm/kvm_ppc.h> 36 #include <asm/tlbflush.h> 37 #include <asm/cputhreads.h> 38 #include <asm/irqflags.h> 39 #include <asm/iommu.h> 40 #include <asm/switch_to.h> 41 #include <asm/xive.h> 42 #ifdef CONFIG_PPC_PSERIES 43 #include <asm/hvcall.h> 44 #include <asm/plpar_wrappers.h> 45 #endif 46 47 #include "timing.h" 48 #include "irq.h" 49 #include "../mm/mmu_decl.h" 50 51 #define CREATE_TRACE_POINTS 52 #include "trace.h" 53 54 struct kvmppc_ops *kvmppc_hv_ops; 55 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 56 struct kvmppc_ops *kvmppc_pr_ops; 57 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 58 59 60 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 61 { 62 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); 63 } 64 65 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 66 { 67 return false; 68 } 69 70 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 71 { 72 return 1; 73 } 74 75 /* 76 * Common checks before entering the guest world. Call with interrupts 77 * disabled. 78 * 79 * returns: 80 * 81 * == 1 if we're ready to go into guest state 82 * <= 0 if we need to go back to the host with return value 83 */ 84 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 85 { 86 int r; 87 88 WARN_ON(irqs_disabled()); 89 hard_irq_disable(); 90 91 while (true) { 92 if (need_resched()) { 93 local_irq_enable(); 94 cond_resched(); 95 hard_irq_disable(); 96 continue; 97 } 98 99 if (signal_pending(current)) { 100 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 101 vcpu->run->exit_reason = KVM_EXIT_INTR; 102 r = -EINTR; 103 break; 104 } 105 106 vcpu->mode = IN_GUEST_MODE; 107 108 /* 109 * Reading vcpu->requests must happen after setting vcpu->mode, 110 * so we don't miss a request because the requester sees 111 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 112 * before next entering the guest (and thus doesn't IPI). 113 * This also orders the write to mode from any reads 114 * to the page tables done while the VCPU is running. 115 * Please see the comment in kvm_flush_remote_tlbs. 116 */ 117 smp_mb(); 118 119 if (kvm_request_pending(vcpu)) { 120 /* Make sure we process requests preemptable */ 121 local_irq_enable(); 122 trace_kvm_check_requests(vcpu); 123 r = kvmppc_core_check_requests(vcpu); 124 hard_irq_disable(); 125 if (r > 0) 126 continue; 127 break; 128 } 129 130 if (kvmppc_core_prepare_to_enter(vcpu)) { 131 /* interrupts got enabled in between, so we 132 are back at square 1 */ 133 continue; 134 } 135 136 guest_enter_irqoff(); 137 return 1; 138 } 139 140 /* return to host */ 141 local_irq_enable(); 142 return r; 143 } 144 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 145 146 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 147 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 148 { 149 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 150 int i; 151 152 shared->sprg0 = swab64(shared->sprg0); 153 shared->sprg1 = swab64(shared->sprg1); 154 shared->sprg2 = swab64(shared->sprg2); 155 shared->sprg3 = swab64(shared->sprg3); 156 shared->srr0 = swab64(shared->srr0); 157 shared->srr1 = swab64(shared->srr1); 158 shared->dar = swab64(shared->dar); 159 shared->msr = swab64(shared->msr); 160 shared->dsisr = swab32(shared->dsisr); 161 shared->int_pending = swab32(shared->int_pending); 162 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 163 shared->sr[i] = swab32(shared->sr[i]); 164 } 165 #endif 166 167 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 168 { 169 int nr = kvmppc_get_gpr(vcpu, 11); 170 int r; 171 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 172 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 173 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 174 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 175 unsigned long r2 = 0; 176 177 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 178 /* 32 bit mode */ 179 param1 &= 0xffffffff; 180 param2 &= 0xffffffff; 181 param3 &= 0xffffffff; 182 param4 &= 0xffffffff; 183 } 184 185 switch (nr) { 186 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 187 { 188 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 189 /* Book3S can be little endian, find it out here */ 190 int shared_big_endian = true; 191 if (vcpu->arch.intr_msr & MSR_LE) 192 shared_big_endian = false; 193 if (shared_big_endian != vcpu->arch.shared_big_endian) 194 kvmppc_swab_shared(vcpu); 195 vcpu->arch.shared_big_endian = shared_big_endian; 196 #endif 197 198 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { 199 /* 200 * Older versions of the Linux magic page code had 201 * a bug where they would map their trampoline code 202 * NX. If that's the case, remove !PR NX capability. 203 */ 204 vcpu->arch.disable_kernel_nx = true; 205 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 206 } 207 208 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 209 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 210 211 #ifdef CONFIG_PPC_64K_PAGES 212 /* 213 * Make sure our 4k magic page is in the same window of a 64k 214 * page within the guest and within the host's page. 215 */ 216 if ((vcpu->arch.magic_page_pa & 0xf000) != 217 ((ulong)vcpu->arch.shared & 0xf000)) { 218 void *old_shared = vcpu->arch.shared; 219 ulong shared = (ulong)vcpu->arch.shared; 220 void *new_shared; 221 222 shared &= PAGE_MASK; 223 shared |= vcpu->arch.magic_page_pa & 0xf000; 224 new_shared = (void*)shared; 225 memcpy(new_shared, old_shared, 0x1000); 226 vcpu->arch.shared = new_shared; 227 } 228 #endif 229 230 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 231 232 r = EV_SUCCESS; 233 break; 234 } 235 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 236 r = EV_SUCCESS; 237 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 238 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 239 #endif 240 241 /* Second return value is in r4 */ 242 break; 243 case EV_HCALL_TOKEN(EV_IDLE): 244 r = EV_SUCCESS; 245 kvm_vcpu_block(vcpu); 246 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 247 break; 248 default: 249 r = EV_UNIMPLEMENTED; 250 break; 251 } 252 253 kvmppc_set_gpr(vcpu, 4, r2); 254 255 return r; 256 } 257 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 258 259 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 260 { 261 int r = false; 262 263 /* We have to know what CPU to virtualize */ 264 if (!vcpu->arch.pvr) 265 goto out; 266 267 /* PAPR only works with book3s_64 */ 268 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 269 goto out; 270 271 /* HV KVM can only do PAPR mode for now */ 272 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 273 goto out; 274 275 #ifdef CONFIG_KVM_BOOKE_HV 276 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 277 goto out; 278 #endif 279 280 r = true; 281 282 out: 283 vcpu->arch.sane = r; 284 return r ? 0 : -EINVAL; 285 } 286 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 287 288 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 289 { 290 enum emulation_result er; 291 int r; 292 293 er = kvmppc_emulate_loadstore(vcpu); 294 switch (er) { 295 case EMULATE_DONE: 296 /* Future optimization: only reload non-volatiles if they were 297 * actually modified. */ 298 r = RESUME_GUEST_NV; 299 break; 300 case EMULATE_AGAIN: 301 r = RESUME_GUEST; 302 break; 303 case EMULATE_DO_MMIO: 304 run->exit_reason = KVM_EXIT_MMIO; 305 /* We must reload nonvolatiles because "update" load/store 306 * instructions modify register state. */ 307 /* Future optimization: only reload non-volatiles if they were 308 * actually modified. */ 309 r = RESUME_HOST_NV; 310 break; 311 case EMULATE_FAIL: 312 { 313 u32 last_inst; 314 315 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 316 /* XXX Deliver Program interrupt to guest. */ 317 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); 318 r = RESUME_HOST; 319 break; 320 } 321 default: 322 WARN_ON(1); 323 r = RESUME_GUEST; 324 } 325 326 return r; 327 } 328 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 329 330 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 331 bool data) 332 { 333 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 334 struct kvmppc_pte pte; 335 int r; 336 337 vcpu->stat.st++; 338 339 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 340 XLATE_WRITE, &pte); 341 if (r < 0) 342 return r; 343 344 *eaddr = pte.raddr; 345 346 if (!pte.may_write) 347 return -EPERM; 348 349 /* Magic page override */ 350 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 351 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 352 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 353 void *magic = vcpu->arch.shared; 354 magic += pte.eaddr & 0xfff; 355 memcpy(magic, ptr, size); 356 return EMULATE_DONE; 357 } 358 359 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 360 return EMULATE_DO_MMIO; 361 362 return EMULATE_DONE; 363 } 364 EXPORT_SYMBOL_GPL(kvmppc_st); 365 366 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 367 bool data) 368 { 369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 370 struct kvmppc_pte pte; 371 int rc; 372 373 vcpu->stat.ld++; 374 375 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 376 XLATE_READ, &pte); 377 if (rc) 378 return rc; 379 380 *eaddr = pte.raddr; 381 382 if (!pte.may_read) 383 return -EPERM; 384 385 if (!data && !pte.may_execute) 386 return -ENOEXEC; 387 388 /* Magic page override */ 389 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 390 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 391 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 392 void *magic = vcpu->arch.shared; 393 magic += pte.eaddr & 0xfff; 394 memcpy(ptr, magic, size); 395 return EMULATE_DONE; 396 } 397 398 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) 399 return EMULATE_DO_MMIO; 400 401 return EMULATE_DONE; 402 } 403 EXPORT_SYMBOL_GPL(kvmppc_ld); 404 405 int kvm_arch_hardware_enable(void) 406 { 407 return 0; 408 } 409 410 int kvm_arch_hardware_setup(void) 411 { 412 return 0; 413 } 414 415 void kvm_arch_check_processor_compat(void *rtn) 416 { 417 *(int *)rtn = kvmppc_core_check_processor_compat(); 418 } 419 420 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 421 { 422 struct kvmppc_ops *kvm_ops = NULL; 423 /* 424 * if we have both HV and PR enabled, default is HV 425 */ 426 if (type == 0) { 427 if (kvmppc_hv_ops) 428 kvm_ops = kvmppc_hv_ops; 429 else 430 kvm_ops = kvmppc_pr_ops; 431 if (!kvm_ops) 432 goto err_out; 433 } else if (type == KVM_VM_PPC_HV) { 434 if (!kvmppc_hv_ops) 435 goto err_out; 436 kvm_ops = kvmppc_hv_ops; 437 } else if (type == KVM_VM_PPC_PR) { 438 if (!kvmppc_pr_ops) 439 goto err_out; 440 kvm_ops = kvmppc_pr_ops; 441 } else 442 goto err_out; 443 444 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) 445 return -ENOENT; 446 447 kvm->arch.kvm_ops = kvm_ops; 448 return kvmppc_core_init_vm(kvm); 449 err_out: 450 return -EINVAL; 451 } 452 453 bool kvm_arch_has_vcpu_debugfs(void) 454 { 455 return false; 456 } 457 458 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 459 { 460 return 0; 461 } 462 463 void kvm_arch_destroy_vm(struct kvm *kvm) 464 { 465 unsigned int i; 466 struct kvm_vcpu *vcpu; 467 468 #ifdef CONFIG_KVM_XICS 469 /* 470 * We call kick_all_cpus_sync() to ensure that all 471 * CPUs have executed any pending IPIs before we 472 * continue and free VCPUs structures below. 473 */ 474 if (is_kvmppc_hv_enabled(kvm)) 475 kick_all_cpus_sync(); 476 #endif 477 478 kvm_for_each_vcpu(i, vcpu, kvm) 479 kvm_arch_vcpu_free(vcpu); 480 481 mutex_lock(&kvm->lock); 482 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 483 kvm->vcpus[i] = NULL; 484 485 atomic_set(&kvm->online_vcpus, 0); 486 487 kvmppc_core_destroy_vm(kvm); 488 489 mutex_unlock(&kvm->lock); 490 491 /* drop the module reference */ 492 module_put(kvm->arch.kvm_ops->owner); 493 } 494 495 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 496 { 497 int r; 498 /* Assume we're using HV mode when the HV module is loaded */ 499 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 500 501 if (kvm) { 502 /* 503 * Hooray - we know which VM type we're running on. Depend on 504 * that rather than the guess above. 505 */ 506 hv_enabled = is_kvmppc_hv_enabled(kvm); 507 } 508 509 switch (ext) { 510 #ifdef CONFIG_BOOKE 511 case KVM_CAP_PPC_BOOKE_SREGS: 512 case KVM_CAP_PPC_BOOKE_WATCHDOG: 513 case KVM_CAP_PPC_EPR: 514 #else 515 case KVM_CAP_PPC_SEGSTATE: 516 case KVM_CAP_PPC_HIOR: 517 case KVM_CAP_PPC_PAPR: 518 #endif 519 case KVM_CAP_PPC_UNSET_IRQ: 520 case KVM_CAP_PPC_IRQ_LEVEL: 521 case KVM_CAP_ENABLE_CAP: 522 case KVM_CAP_ENABLE_CAP_VM: 523 case KVM_CAP_ONE_REG: 524 case KVM_CAP_IOEVENTFD: 525 case KVM_CAP_DEVICE_CTRL: 526 case KVM_CAP_IMMEDIATE_EXIT: 527 r = 1; 528 break; 529 case KVM_CAP_PPC_PAIRED_SINGLES: 530 case KVM_CAP_PPC_OSI: 531 case KVM_CAP_PPC_GET_PVINFO: 532 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 533 case KVM_CAP_SW_TLB: 534 #endif 535 /* We support this only for PR */ 536 r = !hv_enabled; 537 break; 538 #ifdef CONFIG_KVM_MPIC 539 case KVM_CAP_IRQ_MPIC: 540 r = 1; 541 break; 542 #endif 543 544 #ifdef CONFIG_PPC_BOOK3S_64 545 case KVM_CAP_SPAPR_TCE: 546 case KVM_CAP_SPAPR_TCE_64: 547 /* fallthrough */ 548 case KVM_CAP_SPAPR_TCE_VFIO: 549 case KVM_CAP_PPC_RTAS: 550 case KVM_CAP_PPC_FIXUP_HCALL: 551 case KVM_CAP_PPC_ENABLE_HCALL: 552 #ifdef CONFIG_KVM_XICS 553 case KVM_CAP_IRQ_XICS: 554 #endif 555 case KVM_CAP_PPC_GET_CPU_CHAR: 556 r = 1; 557 break; 558 559 case KVM_CAP_PPC_ALLOC_HTAB: 560 r = hv_enabled; 561 break; 562 #endif /* CONFIG_PPC_BOOK3S_64 */ 563 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 564 case KVM_CAP_PPC_SMT: 565 r = 0; 566 if (kvm) { 567 if (kvm->arch.emul_smt_mode > 1) 568 r = kvm->arch.emul_smt_mode; 569 else 570 r = kvm->arch.smt_mode; 571 } else if (hv_enabled) { 572 if (cpu_has_feature(CPU_FTR_ARCH_300)) 573 r = 1; 574 else 575 r = threads_per_subcore; 576 } 577 break; 578 case KVM_CAP_PPC_SMT_POSSIBLE: 579 r = 1; 580 if (hv_enabled) { 581 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 582 r = ((threads_per_subcore << 1) - 1); 583 else 584 /* P9 can emulate dbells, so allow any mode */ 585 r = 8 | 4 | 2 | 1; 586 } 587 break; 588 case KVM_CAP_PPC_RMA: 589 r = 0; 590 break; 591 case KVM_CAP_PPC_HWRNG: 592 r = kvmppc_hwrng_present(); 593 break; 594 case KVM_CAP_PPC_MMU_RADIX: 595 r = !!(hv_enabled && radix_enabled()); 596 break; 597 case KVM_CAP_PPC_MMU_HASH_V3: 598 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300)); 599 break; 600 #endif 601 case KVM_CAP_SYNC_MMU: 602 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 603 r = hv_enabled; 604 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 605 r = 1; 606 #else 607 r = 0; 608 #endif 609 break; 610 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 611 case KVM_CAP_PPC_HTAB_FD: 612 r = hv_enabled; 613 break; 614 #endif 615 case KVM_CAP_NR_VCPUS: 616 /* 617 * Recommending a number of CPUs is somewhat arbitrary; we 618 * return the number of present CPUs for -HV (since a host 619 * will have secondary threads "offline"), and for other KVM 620 * implementations just count online CPUs. 621 */ 622 if (hv_enabled) 623 r = num_present_cpus(); 624 else 625 r = num_online_cpus(); 626 break; 627 case KVM_CAP_NR_MEMSLOTS: 628 r = KVM_USER_MEM_SLOTS; 629 break; 630 case KVM_CAP_MAX_VCPUS: 631 r = KVM_MAX_VCPUS; 632 break; 633 #ifdef CONFIG_PPC_BOOK3S_64 634 case KVM_CAP_PPC_GET_SMMU_INFO: 635 r = 1; 636 break; 637 case KVM_CAP_SPAPR_MULTITCE: 638 r = 1; 639 break; 640 case KVM_CAP_SPAPR_RESIZE_HPT: 641 r = !!hv_enabled; 642 break; 643 #endif 644 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 645 case KVM_CAP_PPC_FWNMI: 646 r = hv_enabled; 647 break; 648 #endif 649 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 650 case KVM_CAP_PPC_HTM: 651 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || 652 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); 653 break; 654 #endif 655 default: 656 r = 0; 657 break; 658 } 659 return r; 660 661 } 662 663 long kvm_arch_dev_ioctl(struct file *filp, 664 unsigned int ioctl, unsigned long arg) 665 { 666 return -EINVAL; 667 } 668 669 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 670 struct kvm_memory_slot *dont) 671 { 672 kvmppc_core_free_memslot(kvm, free, dont); 673 } 674 675 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 676 unsigned long npages) 677 { 678 return kvmppc_core_create_memslot(kvm, slot, npages); 679 } 680 681 int kvm_arch_prepare_memory_region(struct kvm *kvm, 682 struct kvm_memory_slot *memslot, 683 const struct kvm_userspace_memory_region *mem, 684 enum kvm_mr_change change) 685 { 686 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 687 } 688 689 void kvm_arch_commit_memory_region(struct kvm *kvm, 690 const struct kvm_userspace_memory_region *mem, 691 const struct kvm_memory_slot *old, 692 const struct kvm_memory_slot *new, 693 enum kvm_mr_change change) 694 { 695 kvmppc_core_commit_memory_region(kvm, mem, old, new); 696 } 697 698 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 699 struct kvm_memory_slot *slot) 700 { 701 kvmppc_core_flush_memslot(kvm, slot); 702 } 703 704 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 705 { 706 struct kvm_vcpu *vcpu; 707 vcpu = kvmppc_core_vcpu_create(kvm, id); 708 if (!IS_ERR(vcpu)) { 709 vcpu->arch.wqp = &vcpu->wq; 710 kvmppc_create_vcpu_debugfs(vcpu, id); 711 } 712 return vcpu; 713 } 714 715 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 716 { 717 } 718 719 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 720 { 721 /* Make sure we're not using the vcpu anymore */ 722 hrtimer_cancel(&vcpu->arch.dec_timer); 723 724 kvmppc_remove_vcpu_debugfs(vcpu); 725 726 switch (vcpu->arch.irq_type) { 727 case KVMPPC_IRQ_MPIC: 728 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 729 break; 730 case KVMPPC_IRQ_XICS: 731 if (xive_enabled()) 732 kvmppc_xive_cleanup_vcpu(vcpu); 733 else 734 kvmppc_xics_free_icp(vcpu); 735 break; 736 } 737 738 kvmppc_core_vcpu_free(vcpu); 739 } 740 741 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 742 { 743 kvm_arch_vcpu_free(vcpu); 744 } 745 746 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 747 { 748 return kvmppc_core_pending_dec(vcpu); 749 } 750 751 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 752 { 753 struct kvm_vcpu *vcpu; 754 755 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 756 kvmppc_decrementer_func(vcpu); 757 758 return HRTIMER_NORESTART; 759 } 760 761 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 762 { 763 int ret; 764 765 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 766 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 767 vcpu->arch.dec_expires = get_tb(); 768 769 #ifdef CONFIG_KVM_EXIT_TIMING 770 mutex_init(&vcpu->arch.exit_timing_lock); 771 #endif 772 ret = kvmppc_subarch_vcpu_init(vcpu); 773 return ret; 774 } 775 776 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 777 { 778 kvmppc_mmu_destroy(vcpu); 779 kvmppc_subarch_vcpu_uninit(vcpu); 780 } 781 782 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 783 { 784 #ifdef CONFIG_BOOKE 785 /* 786 * vrsave (formerly usprg0) isn't used by Linux, but may 787 * be used by the guest. 788 * 789 * On non-booke this is associated with Altivec and 790 * is handled by code in book3s.c. 791 */ 792 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 793 #endif 794 kvmppc_core_vcpu_load(vcpu, cpu); 795 } 796 797 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 798 { 799 kvmppc_core_vcpu_put(vcpu); 800 #ifdef CONFIG_BOOKE 801 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 802 #endif 803 } 804 805 /* 806 * irq_bypass_add_producer and irq_bypass_del_producer are only 807 * useful if the architecture supports PCI passthrough. 808 * irq_bypass_stop and irq_bypass_start are not needed and so 809 * kvm_ops are not defined for them. 810 */ 811 bool kvm_arch_has_irq_bypass(void) 812 { 813 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || 814 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); 815 } 816 817 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 818 struct irq_bypass_producer *prod) 819 { 820 struct kvm_kernel_irqfd *irqfd = 821 container_of(cons, struct kvm_kernel_irqfd, consumer); 822 struct kvm *kvm = irqfd->kvm; 823 824 if (kvm->arch.kvm_ops->irq_bypass_add_producer) 825 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); 826 827 return 0; 828 } 829 830 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 831 struct irq_bypass_producer *prod) 832 { 833 struct kvm_kernel_irqfd *irqfd = 834 container_of(cons, struct kvm_kernel_irqfd, consumer); 835 struct kvm *kvm = irqfd->kvm; 836 837 if (kvm->arch.kvm_ops->irq_bypass_del_producer) 838 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); 839 } 840 841 #ifdef CONFIG_VSX 842 static inline int kvmppc_get_vsr_dword_offset(int index) 843 { 844 int offset; 845 846 if ((index != 0) && (index != 1)) 847 return -1; 848 849 #ifdef __BIG_ENDIAN 850 offset = index; 851 #else 852 offset = 1 - index; 853 #endif 854 855 return offset; 856 } 857 858 static inline int kvmppc_get_vsr_word_offset(int index) 859 { 860 int offset; 861 862 if ((index > 3) || (index < 0)) 863 return -1; 864 865 #ifdef __BIG_ENDIAN 866 offset = index; 867 #else 868 offset = 3 - index; 869 #endif 870 return offset; 871 } 872 873 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, 874 u64 gpr) 875 { 876 union kvmppc_one_reg val; 877 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 878 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 879 880 if (offset == -1) 881 return; 882 883 if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 884 val.vval = VCPU_VSX_VR(vcpu, index); 885 val.vsxval[offset] = gpr; 886 VCPU_VSX_VR(vcpu, index) = val.vval; 887 } else { 888 VCPU_VSX_FPR(vcpu, index, offset) = gpr; 889 } 890 } 891 892 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, 893 u64 gpr) 894 { 895 union kvmppc_one_reg val; 896 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 897 898 if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 899 val.vval = VCPU_VSX_VR(vcpu, index); 900 val.vsxval[0] = gpr; 901 val.vsxval[1] = gpr; 902 VCPU_VSX_VR(vcpu, index) = val.vval; 903 } else { 904 VCPU_VSX_FPR(vcpu, index, 0) = gpr; 905 VCPU_VSX_FPR(vcpu, index, 1) = gpr; 906 } 907 } 908 909 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, 910 u32 gpr) 911 { 912 union kvmppc_one_reg val; 913 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 914 915 if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 916 val.vsx32val[0] = gpr; 917 val.vsx32val[1] = gpr; 918 val.vsx32val[2] = gpr; 919 val.vsx32val[3] = gpr; 920 VCPU_VSX_VR(vcpu, index) = val.vval; 921 } else { 922 val.vsx32val[0] = gpr; 923 val.vsx32val[1] = gpr; 924 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; 925 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; 926 } 927 } 928 929 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, 930 u32 gpr32) 931 { 932 union kvmppc_one_reg val; 933 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 934 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 935 int dword_offset, word_offset; 936 937 if (offset == -1) 938 return; 939 940 if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 941 val.vval = VCPU_VSX_VR(vcpu, index); 942 val.vsx32val[offset] = gpr32; 943 VCPU_VSX_VR(vcpu, index) = val.vval; 944 } else { 945 dword_offset = offset / 2; 946 word_offset = offset % 2; 947 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); 948 val.vsx32val[word_offset] = gpr32; 949 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; 950 } 951 } 952 #endif /* CONFIG_VSX */ 953 954 #ifdef CONFIG_ALTIVEC 955 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, 956 int index, int element_size) 957 { 958 int offset; 959 int elts = sizeof(vector128)/element_size; 960 961 if ((index < 0) || (index >= elts)) 962 return -1; 963 964 if (kvmppc_need_byteswap(vcpu)) 965 offset = elts - index - 1; 966 else 967 offset = index; 968 969 return offset; 970 } 971 972 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, 973 int index) 974 { 975 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); 976 } 977 978 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, 979 int index) 980 { 981 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); 982 } 983 984 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, 985 int index) 986 { 987 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); 988 } 989 990 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, 991 int index) 992 { 993 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); 994 } 995 996 997 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, 998 u64 gpr) 999 { 1000 union kvmppc_one_reg val; 1001 int offset = kvmppc_get_vmx_dword_offset(vcpu, 1002 vcpu->arch.mmio_vmx_offset); 1003 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1004 1005 if (offset == -1) 1006 return; 1007 1008 val.vval = VCPU_VSX_VR(vcpu, index); 1009 val.vsxval[offset] = gpr; 1010 VCPU_VSX_VR(vcpu, index) = val.vval; 1011 } 1012 1013 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, 1014 u32 gpr32) 1015 { 1016 union kvmppc_one_reg val; 1017 int offset = kvmppc_get_vmx_word_offset(vcpu, 1018 vcpu->arch.mmio_vmx_offset); 1019 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1020 1021 if (offset == -1) 1022 return; 1023 1024 val.vval = VCPU_VSX_VR(vcpu, index); 1025 val.vsx32val[offset] = gpr32; 1026 VCPU_VSX_VR(vcpu, index) = val.vval; 1027 } 1028 1029 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, 1030 u16 gpr16) 1031 { 1032 union kvmppc_one_reg val; 1033 int offset = kvmppc_get_vmx_hword_offset(vcpu, 1034 vcpu->arch.mmio_vmx_offset); 1035 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1036 1037 if (offset == -1) 1038 return; 1039 1040 val.vval = VCPU_VSX_VR(vcpu, index); 1041 val.vsx16val[offset] = gpr16; 1042 VCPU_VSX_VR(vcpu, index) = val.vval; 1043 } 1044 1045 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, 1046 u8 gpr8) 1047 { 1048 union kvmppc_one_reg val; 1049 int offset = kvmppc_get_vmx_byte_offset(vcpu, 1050 vcpu->arch.mmio_vmx_offset); 1051 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1052 1053 if (offset == -1) 1054 return; 1055 1056 val.vval = VCPU_VSX_VR(vcpu, index); 1057 val.vsx8val[offset] = gpr8; 1058 VCPU_VSX_VR(vcpu, index) = val.vval; 1059 } 1060 #endif /* CONFIG_ALTIVEC */ 1061 1062 #ifdef CONFIG_PPC_FPU 1063 static inline u64 sp_to_dp(u32 fprs) 1064 { 1065 u64 fprd; 1066 1067 preempt_disable(); 1068 enable_kernel_fp(); 1069 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) 1070 : "fr0"); 1071 preempt_enable(); 1072 return fprd; 1073 } 1074 1075 static inline u32 dp_to_sp(u64 fprd) 1076 { 1077 u32 fprs; 1078 1079 preempt_disable(); 1080 enable_kernel_fp(); 1081 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) 1082 : "fr0"); 1083 preempt_enable(); 1084 return fprs; 1085 } 1086 1087 #else 1088 #define sp_to_dp(x) (x) 1089 #define dp_to_sp(x) (x) 1090 #endif /* CONFIG_PPC_FPU */ 1091 1092 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 1093 struct kvm_run *run) 1094 { 1095 u64 uninitialized_var(gpr); 1096 1097 if (run->mmio.len > sizeof(gpr)) { 1098 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 1099 return; 1100 } 1101 1102 if (!vcpu->arch.mmio_host_swabbed) { 1103 switch (run->mmio.len) { 1104 case 8: gpr = *(u64 *)run->mmio.data; break; 1105 case 4: gpr = *(u32 *)run->mmio.data; break; 1106 case 2: gpr = *(u16 *)run->mmio.data; break; 1107 case 1: gpr = *(u8 *)run->mmio.data; break; 1108 } 1109 } else { 1110 switch (run->mmio.len) { 1111 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; 1112 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; 1113 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; 1114 case 1: gpr = *(u8 *)run->mmio.data; break; 1115 } 1116 } 1117 1118 /* conversion between single and double precision */ 1119 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) 1120 gpr = sp_to_dp(gpr); 1121 1122 if (vcpu->arch.mmio_sign_extend) { 1123 switch (run->mmio.len) { 1124 #ifdef CONFIG_PPC64 1125 case 4: 1126 gpr = (s64)(s32)gpr; 1127 break; 1128 #endif 1129 case 2: 1130 gpr = (s64)(s16)gpr; 1131 break; 1132 case 1: 1133 gpr = (s64)(s8)gpr; 1134 break; 1135 } 1136 } 1137 1138 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 1139 case KVM_MMIO_REG_GPR: 1140 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 1141 break; 1142 case KVM_MMIO_REG_FPR: 1143 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1144 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); 1145 1146 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1147 break; 1148 #ifdef CONFIG_PPC_BOOK3S 1149 case KVM_MMIO_REG_QPR: 1150 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1151 break; 1152 case KVM_MMIO_REG_FQPR: 1153 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1154 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1155 break; 1156 #endif 1157 #ifdef CONFIG_VSX 1158 case KVM_MMIO_REG_VSX: 1159 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1160 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); 1161 1162 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) 1163 kvmppc_set_vsr_dword(vcpu, gpr); 1164 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) 1165 kvmppc_set_vsr_word(vcpu, gpr); 1166 else if (vcpu->arch.mmio_copy_type == 1167 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) 1168 kvmppc_set_vsr_dword_dump(vcpu, gpr); 1169 else if (vcpu->arch.mmio_copy_type == 1170 KVMPPC_VSX_COPY_WORD_LOAD_DUMP) 1171 kvmppc_set_vsr_word_dump(vcpu, gpr); 1172 break; 1173 #endif 1174 #ifdef CONFIG_ALTIVEC 1175 case KVM_MMIO_REG_VMX: 1176 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1177 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); 1178 1179 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) 1180 kvmppc_set_vmx_dword(vcpu, gpr); 1181 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) 1182 kvmppc_set_vmx_word(vcpu, gpr); 1183 else if (vcpu->arch.mmio_copy_type == 1184 KVMPPC_VMX_COPY_HWORD) 1185 kvmppc_set_vmx_hword(vcpu, gpr); 1186 else if (vcpu->arch.mmio_copy_type == 1187 KVMPPC_VMX_COPY_BYTE) 1188 kvmppc_set_vmx_byte(vcpu, gpr); 1189 break; 1190 #endif 1191 default: 1192 BUG(); 1193 } 1194 } 1195 1196 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1197 unsigned int rt, unsigned int bytes, 1198 int is_default_endian, int sign_extend) 1199 { 1200 int idx, ret; 1201 bool host_swabbed; 1202 1203 /* Pity C doesn't have a logical XOR operator */ 1204 if (kvmppc_need_byteswap(vcpu)) { 1205 host_swabbed = is_default_endian; 1206 } else { 1207 host_swabbed = !is_default_endian; 1208 } 1209 1210 if (bytes > sizeof(run->mmio.data)) { 1211 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 1212 run->mmio.len); 1213 } 1214 1215 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1216 run->mmio.len = bytes; 1217 run->mmio.is_write = 0; 1218 1219 vcpu->arch.io_gpr = rt; 1220 vcpu->arch.mmio_host_swabbed = host_swabbed; 1221 vcpu->mmio_needed = 1; 1222 vcpu->mmio_is_write = 0; 1223 vcpu->arch.mmio_sign_extend = sign_extend; 1224 1225 idx = srcu_read_lock(&vcpu->kvm->srcu); 1226 1227 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1228 bytes, &run->mmio.data); 1229 1230 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1231 1232 if (!ret) { 1233 kvmppc_complete_mmio_load(vcpu, run); 1234 vcpu->mmio_needed = 0; 1235 return EMULATE_DONE; 1236 } 1237 1238 return EMULATE_DO_MMIO; 1239 } 1240 1241 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1242 unsigned int rt, unsigned int bytes, 1243 int is_default_endian) 1244 { 1245 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); 1246 } 1247 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 1248 1249 /* Same as above, but sign extends */ 1250 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 1251 unsigned int rt, unsigned int bytes, 1252 int is_default_endian) 1253 { 1254 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); 1255 } 1256 1257 #ifdef CONFIG_VSX 1258 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1259 unsigned int rt, unsigned int bytes, 1260 int is_default_endian, int mmio_sign_extend) 1261 { 1262 enum emulation_result emulated = EMULATE_DONE; 1263 1264 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1265 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1266 return EMULATE_FAIL; 1267 1268 while (vcpu->arch.mmio_vsx_copy_nums) { 1269 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 1270 is_default_endian, mmio_sign_extend); 1271 1272 if (emulated != EMULATE_DONE) 1273 break; 1274 1275 vcpu->arch.paddr_accessed += run->mmio.len; 1276 1277 vcpu->arch.mmio_vsx_copy_nums--; 1278 vcpu->arch.mmio_vsx_offset++; 1279 } 1280 return emulated; 1281 } 1282 #endif /* CONFIG_VSX */ 1283 1284 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1285 u64 val, unsigned int bytes, int is_default_endian) 1286 { 1287 void *data = run->mmio.data; 1288 int idx, ret; 1289 bool host_swabbed; 1290 1291 /* Pity C doesn't have a logical XOR operator */ 1292 if (kvmppc_need_byteswap(vcpu)) { 1293 host_swabbed = is_default_endian; 1294 } else { 1295 host_swabbed = !is_default_endian; 1296 } 1297 1298 if (bytes > sizeof(run->mmio.data)) { 1299 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 1300 run->mmio.len); 1301 } 1302 1303 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1304 run->mmio.len = bytes; 1305 run->mmio.is_write = 1; 1306 vcpu->mmio_needed = 1; 1307 vcpu->mmio_is_write = 1; 1308 1309 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) 1310 val = dp_to_sp(val); 1311 1312 /* Store the value at the lowest bytes in 'data'. */ 1313 if (!host_swabbed) { 1314 switch (bytes) { 1315 case 8: *(u64 *)data = val; break; 1316 case 4: *(u32 *)data = val; break; 1317 case 2: *(u16 *)data = val; break; 1318 case 1: *(u8 *)data = val; break; 1319 } 1320 } else { 1321 switch (bytes) { 1322 case 8: *(u64 *)data = swab64(val); break; 1323 case 4: *(u32 *)data = swab32(val); break; 1324 case 2: *(u16 *)data = swab16(val); break; 1325 case 1: *(u8 *)data = val; break; 1326 } 1327 } 1328 1329 idx = srcu_read_lock(&vcpu->kvm->srcu); 1330 1331 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1332 bytes, &run->mmio.data); 1333 1334 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1335 1336 if (!ret) { 1337 vcpu->mmio_needed = 0; 1338 return EMULATE_DONE; 1339 } 1340 1341 return EMULATE_DO_MMIO; 1342 } 1343 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 1344 1345 #ifdef CONFIG_VSX 1346 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) 1347 { 1348 u32 dword_offset, word_offset; 1349 union kvmppc_one_reg reg; 1350 int vsx_offset = 0; 1351 int copy_type = vcpu->arch.mmio_copy_type; 1352 int result = 0; 1353 1354 switch (copy_type) { 1355 case KVMPPC_VSX_COPY_DWORD: 1356 vsx_offset = 1357 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 1358 1359 if (vsx_offset == -1) { 1360 result = -1; 1361 break; 1362 } 1363 1364 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { 1365 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); 1366 } else { 1367 reg.vval = VCPU_VSX_VR(vcpu, rs); 1368 *val = reg.vsxval[vsx_offset]; 1369 } 1370 break; 1371 1372 case KVMPPC_VSX_COPY_WORD: 1373 vsx_offset = 1374 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 1375 1376 if (vsx_offset == -1) { 1377 result = -1; 1378 break; 1379 } 1380 1381 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { 1382 dword_offset = vsx_offset / 2; 1383 word_offset = vsx_offset % 2; 1384 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); 1385 *val = reg.vsx32val[word_offset]; 1386 } else { 1387 reg.vval = VCPU_VSX_VR(vcpu, rs); 1388 *val = reg.vsx32val[vsx_offset]; 1389 } 1390 break; 1391 1392 default: 1393 result = -1; 1394 break; 1395 } 1396 1397 return result; 1398 } 1399 1400 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1401 int rs, unsigned int bytes, int is_default_endian) 1402 { 1403 u64 val; 1404 enum emulation_result emulated = EMULATE_DONE; 1405 1406 vcpu->arch.io_gpr = rs; 1407 1408 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1409 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1410 return EMULATE_FAIL; 1411 1412 while (vcpu->arch.mmio_vsx_copy_nums) { 1413 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) 1414 return EMULATE_FAIL; 1415 1416 emulated = kvmppc_handle_store(run, vcpu, 1417 val, bytes, is_default_endian); 1418 1419 if (emulated != EMULATE_DONE) 1420 break; 1421 1422 vcpu->arch.paddr_accessed += run->mmio.len; 1423 1424 vcpu->arch.mmio_vsx_copy_nums--; 1425 vcpu->arch.mmio_vsx_offset++; 1426 } 1427 1428 return emulated; 1429 } 1430 1431 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, 1432 struct kvm_run *run) 1433 { 1434 enum emulation_result emulated = EMULATE_FAIL; 1435 int r; 1436 1437 vcpu->arch.paddr_accessed += run->mmio.len; 1438 1439 if (!vcpu->mmio_is_write) { 1440 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, 1441 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); 1442 } else { 1443 emulated = kvmppc_handle_vsx_store(run, vcpu, 1444 vcpu->arch.io_gpr, run->mmio.len, 1); 1445 } 1446 1447 switch (emulated) { 1448 case EMULATE_DO_MMIO: 1449 run->exit_reason = KVM_EXIT_MMIO; 1450 r = RESUME_HOST; 1451 break; 1452 case EMULATE_FAIL: 1453 pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); 1454 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1455 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1456 r = RESUME_HOST; 1457 break; 1458 default: 1459 r = RESUME_GUEST; 1460 break; 1461 } 1462 return r; 1463 } 1464 #endif /* CONFIG_VSX */ 1465 1466 #ifdef CONFIG_ALTIVEC 1467 int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1468 unsigned int rt, unsigned int bytes, int is_default_endian) 1469 { 1470 enum emulation_result emulated = EMULATE_DONE; 1471 1472 if (vcpu->arch.mmio_vsx_copy_nums > 2) 1473 return EMULATE_FAIL; 1474 1475 while (vcpu->arch.mmio_vmx_copy_nums) { 1476 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 1477 is_default_endian, 0); 1478 1479 if (emulated != EMULATE_DONE) 1480 break; 1481 1482 vcpu->arch.paddr_accessed += run->mmio.len; 1483 vcpu->arch.mmio_vmx_copy_nums--; 1484 vcpu->arch.mmio_vmx_offset++; 1485 } 1486 1487 return emulated; 1488 } 1489 1490 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) 1491 { 1492 union kvmppc_one_reg reg; 1493 int vmx_offset = 0; 1494 int result = 0; 1495 1496 vmx_offset = 1497 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1498 1499 if (vmx_offset == -1) 1500 return -1; 1501 1502 reg.vval = VCPU_VSX_VR(vcpu, index); 1503 *val = reg.vsxval[vmx_offset]; 1504 1505 return result; 1506 } 1507 1508 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) 1509 { 1510 union kvmppc_one_reg reg; 1511 int vmx_offset = 0; 1512 int result = 0; 1513 1514 vmx_offset = 1515 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1516 1517 if (vmx_offset == -1) 1518 return -1; 1519 1520 reg.vval = VCPU_VSX_VR(vcpu, index); 1521 *val = reg.vsx32val[vmx_offset]; 1522 1523 return result; 1524 } 1525 1526 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) 1527 { 1528 union kvmppc_one_reg reg; 1529 int vmx_offset = 0; 1530 int result = 0; 1531 1532 vmx_offset = 1533 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1534 1535 if (vmx_offset == -1) 1536 return -1; 1537 1538 reg.vval = VCPU_VSX_VR(vcpu, index); 1539 *val = reg.vsx16val[vmx_offset]; 1540 1541 return result; 1542 } 1543 1544 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) 1545 { 1546 union kvmppc_one_reg reg; 1547 int vmx_offset = 0; 1548 int result = 0; 1549 1550 vmx_offset = 1551 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1552 1553 if (vmx_offset == -1) 1554 return -1; 1555 1556 reg.vval = VCPU_VSX_VR(vcpu, index); 1557 *val = reg.vsx8val[vmx_offset]; 1558 1559 return result; 1560 } 1561 1562 int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1563 unsigned int rs, unsigned int bytes, int is_default_endian) 1564 { 1565 u64 val = 0; 1566 unsigned int index = rs & KVM_MMIO_REG_MASK; 1567 enum emulation_result emulated = EMULATE_DONE; 1568 1569 if (vcpu->arch.mmio_vsx_copy_nums > 2) 1570 return EMULATE_FAIL; 1571 1572 vcpu->arch.io_gpr = rs; 1573 1574 while (vcpu->arch.mmio_vmx_copy_nums) { 1575 switch (vcpu->arch.mmio_copy_type) { 1576 case KVMPPC_VMX_COPY_DWORD: 1577 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) 1578 return EMULATE_FAIL; 1579 1580 break; 1581 case KVMPPC_VMX_COPY_WORD: 1582 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) 1583 return EMULATE_FAIL; 1584 break; 1585 case KVMPPC_VMX_COPY_HWORD: 1586 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) 1587 return EMULATE_FAIL; 1588 break; 1589 case KVMPPC_VMX_COPY_BYTE: 1590 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) 1591 return EMULATE_FAIL; 1592 break; 1593 default: 1594 return EMULATE_FAIL; 1595 } 1596 1597 emulated = kvmppc_handle_store(run, vcpu, val, bytes, 1598 is_default_endian); 1599 if (emulated != EMULATE_DONE) 1600 break; 1601 1602 vcpu->arch.paddr_accessed += run->mmio.len; 1603 vcpu->arch.mmio_vmx_copy_nums--; 1604 vcpu->arch.mmio_vmx_offset++; 1605 } 1606 1607 return emulated; 1608 } 1609 1610 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, 1611 struct kvm_run *run) 1612 { 1613 enum emulation_result emulated = EMULATE_FAIL; 1614 int r; 1615 1616 vcpu->arch.paddr_accessed += run->mmio.len; 1617 1618 if (!vcpu->mmio_is_write) { 1619 emulated = kvmppc_handle_vmx_load(run, vcpu, 1620 vcpu->arch.io_gpr, run->mmio.len, 1); 1621 } else { 1622 emulated = kvmppc_handle_vmx_store(run, vcpu, 1623 vcpu->arch.io_gpr, run->mmio.len, 1); 1624 } 1625 1626 switch (emulated) { 1627 case EMULATE_DO_MMIO: 1628 run->exit_reason = KVM_EXIT_MMIO; 1629 r = RESUME_HOST; 1630 break; 1631 case EMULATE_FAIL: 1632 pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); 1633 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1634 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1635 r = RESUME_HOST; 1636 break; 1637 default: 1638 r = RESUME_GUEST; 1639 break; 1640 } 1641 return r; 1642 } 1643 #endif /* CONFIG_ALTIVEC */ 1644 1645 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1646 { 1647 int r = 0; 1648 union kvmppc_one_reg val; 1649 int size; 1650 1651 size = one_reg_size(reg->id); 1652 if (size > sizeof(val)) 1653 return -EINVAL; 1654 1655 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1656 if (r == -EINVAL) { 1657 r = 0; 1658 switch (reg->id) { 1659 #ifdef CONFIG_ALTIVEC 1660 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1661 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1662 r = -ENXIO; 1663 break; 1664 } 1665 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 1666 break; 1667 case KVM_REG_PPC_VSCR: 1668 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1669 r = -ENXIO; 1670 break; 1671 } 1672 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 1673 break; 1674 case KVM_REG_PPC_VRSAVE: 1675 val = get_reg_val(reg->id, vcpu->arch.vrsave); 1676 break; 1677 #endif /* CONFIG_ALTIVEC */ 1678 default: 1679 r = -EINVAL; 1680 break; 1681 } 1682 } 1683 1684 if (r) 1685 return r; 1686 1687 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 1688 r = -EFAULT; 1689 1690 return r; 1691 } 1692 1693 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1694 { 1695 int r; 1696 union kvmppc_one_reg val; 1697 int size; 1698 1699 size = one_reg_size(reg->id); 1700 if (size > sizeof(val)) 1701 return -EINVAL; 1702 1703 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 1704 return -EFAULT; 1705 1706 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1707 if (r == -EINVAL) { 1708 r = 0; 1709 switch (reg->id) { 1710 #ifdef CONFIG_ALTIVEC 1711 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1712 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1713 r = -ENXIO; 1714 break; 1715 } 1716 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 1717 break; 1718 case KVM_REG_PPC_VSCR: 1719 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1720 r = -ENXIO; 1721 break; 1722 } 1723 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 1724 break; 1725 case KVM_REG_PPC_VRSAVE: 1726 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1727 r = -ENXIO; 1728 break; 1729 } 1730 vcpu->arch.vrsave = set_reg_val(reg->id, val); 1731 break; 1732 #endif /* CONFIG_ALTIVEC */ 1733 default: 1734 r = -EINVAL; 1735 break; 1736 } 1737 } 1738 1739 return r; 1740 } 1741 1742 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 1743 { 1744 int r; 1745 1746 vcpu_load(vcpu); 1747 1748 if (vcpu->mmio_needed) { 1749 vcpu->mmio_needed = 0; 1750 if (!vcpu->mmio_is_write) 1751 kvmppc_complete_mmio_load(vcpu, run); 1752 #ifdef CONFIG_VSX 1753 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1754 vcpu->arch.mmio_vsx_copy_nums--; 1755 vcpu->arch.mmio_vsx_offset++; 1756 } 1757 1758 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1759 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); 1760 if (r == RESUME_HOST) { 1761 vcpu->mmio_needed = 1; 1762 goto out; 1763 } 1764 } 1765 #endif 1766 #ifdef CONFIG_ALTIVEC 1767 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1768 vcpu->arch.mmio_vmx_copy_nums--; 1769 vcpu->arch.mmio_vmx_offset++; 1770 } 1771 1772 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1773 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); 1774 if (r == RESUME_HOST) { 1775 vcpu->mmio_needed = 1; 1776 goto out; 1777 } 1778 } 1779 #endif 1780 } else if (vcpu->arch.osi_needed) { 1781 u64 *gprs = run->osi.gprs; 1782 int i; 1783 1784 for (i = 0; i < 32; i++) 1785 kvmppc_set_gpr(vcpu, i, gprs[i]); 1786 vcpu->arch.osi_needed = 0; 1787 } else if (vcpu->arch.hcall_needed) { 1788 int i; 1789 1790 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 1791 for (i = 0; i < 9; ++i) 1792 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 1793 vcpu->arch.hcall_needed = 0; 1794 #ifdef CONFIG_BOOKE 1795 } else if (vcpu->arch.epr_needed) { 1796 kvmppc_set_epr(vcpu, run->epr.epr); 1797 vcpu->arch.epr_needed = 0; 1798 #endif 1799 } 1800 1801 kvm_sigset_activate(vcpu); 1802 1803 if (run->immediate_exit) 1804 r = -EINTR; 1805 else 1806 r = kvmppc_vcpu_run(run, vcpu); 1807 1808 kvm_sigset_deactivate(vcpu); 1809 1810 #ifdef CONFIG_ALTIVEC 1811 out: 1812 #endif 1813 vcpu_put(vcpu); 1814 return r; 1815 } 1816 1817 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 1818 { 1819 if (irq->irq == KVM_INTERRUPT_UNSET) { 1820 kvmppc_core_dequeue_external(vcpu); 1821 return 0; 1822 } 1823 1824 kvmppc_core_queue_external(vcpu, irq); 1825 1826 kvm_vcpu_kick(vcpu); 1827 1828 return 0; 1829 } 1830 1831 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1832 struct kvm_enable_cap *cap) 1833 { 1834 int r; 1835 1836 if (cap->flags) 1837 return -EINVAL; 1838 1839 switch (cap->cap) { 1840 case KVM_CAP_PPC_OSI: 1841 r = 0; 1842 vcpu->arch.osi_enabled = true; 1843 break; 1844 case KVM_CAP_PPC_PAPR: 1845 r = 0; 1846 vcpu->arch.papr_enabled = true; 1847 break; 1848 case KVM_CAP_PPC_EPR: 1849 r = 0; 1850 if (cap->args[0]) 1851 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 1852 else 1853 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 1854 break; 1855 #ifdef CONFIG_BOOKE 1856 case KVM_CAP_PPC_BOOKE_WATCHDOG: 1857 r = 0; 1858 vcpu->arch.watchdog_enabled = true; 1859 break; 1860 #endif 1861 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1862 case KVM_CAP_SW_TLB: { 1863 struct kvm_config_tlb cfg; 1864 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 1865 1866 r = -EFAULT; 1867 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 1868 break; 1869 1870 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 1871 break; 1872 } 1873 #endif 1874 #ifdef CONFIG_KVM_MPIC 1875 case KVM_CAP_IRQ_MPIC: { 1876 struct fd f; 1877 struct kvm_device *dev; 1878 1879 r = -EBADF; 1880 f = fdget(cap->args[0]); 1881 if (!f.file) 1882 break; 1883 1884 r = -EPERM; 1885 dev = kvm_device_from_filp(f.file); 1886 if (dev) 1887 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 1888 1889 fdput(f); 1890 break; 1891 } 1892 #endif 1893 #ifdef CONFIG_KVM_XICS 1894 case KVM_CAP_IRQ_XICS: { 1895 struct fd f; 1896 struct kvm_device *dev; 1897 1898 r = -EBADF; 1899 f = fdget(cap->args[0]); 1900 if (!f.file) 1901 break; 1902 1903 r = -EPERM; 1904 dev = kvm_device_from_filp(f.file); 1905 if (dev) { 1906 if (xive_enabled()) 1907 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); 1908 else 1909 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 1910 } 1911 1912 fdput(f); 1913 break; 1914 } 1915 #endif /* CONFIG_KVM_XICS */ 1916 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1917 case KVM_CAP_PPC_FWNMI: 1918 r = -EINVAL; 1919 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 1920 break; 1921 r = 0; 1922 vcpu->kvm->arch.fwnmi_enabled = true; 1923 break; 1924 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 1925 default: 1926 r = -EINVAL; 1927 break; 1928 } 1929 1930 if (!r) 1931 r = kvmppc_sanity_check(vcpu); 1932 1933 return r; 1934 } 1935 1936 bool kvm_arch_intc_initialized(struct kvm *kvm) 1937 { 1938 #ifdef CONFIG_KVM_MPIC 1939 if (kvm->arch.mpic) 1940 return true; 1941 #endif 1942 #ifdef CONFIG_KVM_XICS 1943 if (kvm->arch.xics || kvm->arch.xive) 1944 return true; 1945 #endif 1946 return false; 1947 } 1948 1949 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1950 struct kvm_mp_state *mp_state) 1951 { 1952 return -EINVAL; 1953 } 1954 1955 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1956 struct kvm_mp_state *mp_state) 1957 { 1958 return -EINVAL; 1959 } 1960 1961 long kvm_arch_vcpu_async_ioctl(struct file *filp, 1962 unsigned int ioctl, unsigned long arg) 1963 { 1964 struct kvm_vcpu *vcpu = filp->private_data; 1965 void __user *argp = (void __user *)arg; 1966 1967 if (ioctl == KVM_INTERRUPT) { 1968 struct kvm_interrupt irq; 1969 if (copy_from_user(&irq, argp, sizeof(irq))) 1970 return -EFAULT; 1971 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); 1972 } 1973 return -ENOIOCTLCMD; 1974 } 1975 1976 long kvm_arch_vcpu_ioctl(struct file *filp, 1977 unsigned int ioctl, unsigned long arg) 1978 { 1979 struct kvm_vcpu *vcpu = filp->private_data; 1980 void __user *argp = (void __user *)arg; 1981 long r; 1982 1983 switch (ioctl) { 1984 case KVM_ENABLE_CAP: 1985 { 1986 struct kvm_enable_cap cap; 1987 r = -EFAULT; 1988 vcpu_load(vcpu); 1989 if (copy_from_user(&cap, argp, sizeof(cap))) 1990 goto out; 1991 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1992 vcpu_put(vcpu); 1993 break; 1994 } 1995 1996 case KVM_SET_ONE_REG: 1997 case KVM_GET_ONE_REG: 1998 { 1999 struct kvm_one_reg reg; 2000 r = -EFAULT; 2001 if (copy_from_user(®, argp, sizeof(reg))) 2002 goto out; 2003 if (ioctl == KVM_SET_ONE_REG) 2004 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 2005 else 2006 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 2007 break; 2008 } 2009 2010 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 2011 case KVM_DIRTY_TLB: { 2012 struct kvm_dirty_tlb dirty; 2013 r = -EFAULT; 2014 vcpu_load(vcpu); 2015 if (copy_from_user(&dirty, argp, sizeof(dirty))) 2016 goto out; 2017 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 2018 vcpu_put(vcpu); 2019 break; 2020 } 2021 #endif 2022 default: 2023 r = -EINVAL; 2024 } 2025 2026 out: 2027 return r; 2028 } 2029 2030 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 2031 { 2032 return VM_FAULT_SIGBUS; 2033 } 2034 2035 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 2036 { 2037 u32 inst_nop = 0x60000000; 2038 #ifdef CONFIG_KVM_BOOKE_HV 2039 u32 inst_sc1 = 0x44000022; 2040 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); 2041 pvinfo->hcall[1] = cpu_to_be32(inst_nop); 2042 pvinfo->hcall[2] = cpu_to_be32(inst_nop); 2043 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2044 #else 2045 u32 inst_lis = 0x3c000000; 2046 u32 inst_ori = 0x60000000; 2047 u32 inst_sc = 0x44000002; 2048 u32 inst_imm_mask = 0xffff; 2049 2050 /* 2051 * The hypercall to get into KVM from within guest context is as 2052 * follows: 2053 * 2054 * lis r0, r0, KVM_SC_MAGIC_R0@h 2055 * ori r0, KVM_SC_MAGIC_R0@l 2056 * sc 2057 * nop 2058 */ 2059 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); 2060 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); 2061 pvinfo->hcall[2] = cpu_to_be32(inst_sc); 2062 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2063 #endif 2064 2065 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 2066 2067 return 0; 2068 } 2069 2070 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 2071 bool line_status) 2072 { 2073 if (!irqchip_in_kernel(kvm)) 2074 return -ENXIO; 2075 2076 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 2077 irq_event->irq, irq_event->level, 2078 line_status); 2079 return 0; 2080 } 2081 2082 2083 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 2084 struct kvm_enable_cap *cap) 2085 { 2086 int r; 2087 2088 if (cap->flags) 2089 return -EINVAL; 2090 2091 switch (cap->cap) { 2092 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2093 case KVM_CAP_PPC_ENABLE_HCALL: { 2094 unsigned long hcall = cap->args[0]; 2095 2096 r = -EINVAL; 2097 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || 2098 cap->args[1] > 1) 2099 break; 2100 if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) 2101 break; 2102 if (cap->args[1]) 2103 set_bit(hcall / 4, kvm->arch.enabled_hcalls); 2104 else 2105 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); 2106 r = 0; 2107 break; 2108 } 2109 case KVM_CAP_PPC_SMT: { 2110 unsigned long mode = cap->args[0]; 2111 unsigned long flags = cap->args[1]; 2112 2113 r = -EINVAL; 2114 if (kvm->arch.kvm_ops->set_smt_mode) 2115 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); 2116 break; 2117 } 2118 #endif 2119 default: 2120 r = -EINVAL; 2121 break; 2122 } 2123 2124 return r; 2125 } 2126 2127 #ifdef CONFIG_PPC_BOOK3S_64 2128 /* 2129 * These functions check whether the underlying hardware is safe 2130 * against attacks based on observing the effects of speculatively 2131 * executed instructions, and whether it supplies instructions for 2132 * use in workarounds. The information comes from firmware, either 2133 * via the device tree on powernv platforms or from an hcall on 2134 * pseries platforms. 2135 */ 2136 #ifdef CONFIG_PPC_PSERIES 2137 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2138 { 2139 struct h_cpu_char_result c; 2140 unsigned long rc; 2141 2142 if (!machine_is(pseries)) 2143 return -ENOTTY; 2144 2145 rc = plpar_get_cpu_characteristics(&c); 2146 if (rc == H_SUCCESS) { 2147 cp->character = c.character; 2148 cp->behaviour = c.behaviour; 2149 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2150 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2151 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2152 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2153 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2154 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | 2155 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | 2156 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2157 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2158 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2159 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2160 } 2161 return 0; 2162 } 2163 #else 2164 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2165 { 2166 return -ENOTTY; 2167 } 2168 #endif 2169 2170 static inline bool have_fw_feat(struct device_node *fw_features, 2171 const char *state, const char *name) 2172 { 2173 struct device_node *np; 2174 bool r = false; 2175 2176 np = of_get_child_by_name(fw_features, name); 2177 if (np) { 2178 r = of_property_read_bool(np, state); 2179 of_node_put(np); 2180 } 2181 return r; 2182 } 2183 2184 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2185 { 2186 struct device_node *np, *fw_features; 2187 int r; 2188 2189 memset(cp, 0, sizeof(*cp)); 2190 r = pseries_get_cpu_char(cp); 2191 if (r != -ENOTTY) 2192 return r; 2193 2194 np = of_find_node_by_name(NULL, "ibm,opal"); 2195 if (np) { 2196 fw_features = of_get_child_by_name(np, "fw-features"); 2197 of_node_put(np); 2198 if (!fw_features) 2199 return 0; 2200 if (have_fw_feat(fw_features, "enabled", 2201 "inst-spec-barrier-ori31,31,0")) 2202 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; 2203 if (have_fw_feat(fw_features, "enabled", 2204 "fw-bcctrl-serialized")) 2205 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; 2206 if (have_fw_feat(fw_features, "enabled", 2207 "inst-l1d-flush-ori30,30,0")) 2208 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; 2209 if (have_fw_feat(fw_features, "enabled", 2210 "inst-l1d-flush-trig2")) 2211 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; 2212 if (have_fw_feat(fw_features, "enabled", 2213 "fw-l1d-thread-split")) 2214 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; 2215 if (have_fw_feat(fw_features, "enabled", 2216 "fw-count-cache-disabled")) 2217 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2218 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2219 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2220 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2221 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2222 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2223 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2224 2225 if (have_fw_feat(fw_features, "enabled", 2226 "speculation-policy-favor-security")) 2227 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; 2228 if (!have_fw_feat(fw_features, "disabled", 2229 "needs-l1d-flush-msr-pr-0-to-1")) 2230 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; 2231 if (!have_fw_feat(fw_features, "disabled", 2232 "needs-spec-barrier-for-bound-checks")) 2233 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2234 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2235 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2236 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2237 2238 of_node_put(fw_features); 2239 } 2240 2241 return 0; 2242 } 2243 #endif 2244 2245 long kvm_arch_vm_ioctl(struct file *filp, 2246 unsigned int ioctl, unsigned long arg) 2247 { 2248 struct kvm *kvm __maybe_unused = filp->private_data; 2249 void __user *argp = (void __user *)arg; 2250 long r; 2251 2252 switch (ioctl) { 2253 case KVM_PPC_GET_PVINFO: { 2254 struct kvm_ppc_pvinfo pvinfo; 2255 memset(&pvinfo, 0, sizeof(pvinfo)); 2256 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 2257 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 2258 r = -EFAULT; 2259 goto out; 2260 } 2261 2262 break; 2263 } 2264 case KVM_ENABLE_CAP: 2265 { 2266 struct kvm_enable_cap cap; 2267 r = -EFAULT; 2268 if (copy_from_user(&cap, argp, sizeof(cap))) 2269 goto out; 2270 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 2271 break; 2272 } 2273 #ifdef CONFIG_SPAPR_TCE_IOMMU 2274 case KVM_CREATE_SPAPR_TCE_64: { 2275 struct kvm_create_spapr_tce_64 create_tce_64; 2276 2277 r = -EFAULT; 2278 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) 2279 goto out; 2280 if (create_tce_64.flags) { 2281 r = -EINVAL; 2282 goto out; 2283 } 2284 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2285 goto out; 2286 } 2287 case KVM_CREATE_SPAPR_TCE: { 2288 struct kvm_create_spapr_tce create_tce; 2289 struct kvm_create_spapr_tce_64 create_tce_64; 2290 2291 r = -EFAULT; 2292 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 2293 goto out; 2294 2295 create_tce_64.liobn = create_tce.liobn; 2296 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; 2297 create_tce_64.offset = 0; 2298 create_tce_64.size = create_tce.window_size >> 2299 IOMMU_PAGE_SHIFT_4K; 2300 create_tce_64.flags = 0; 2301 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2302 goto out; 2303 } 2304 #endif 2305 #ifdef CONFIG_PPC_BOOK3S_64 2306 case KVM_PPC_GET_SMMU_INFO: { 2307 struct kvm_ppc_smmu_info info; 2308 struct kvm *kvm = filp->private_data; 2309 2310 memset(&info, 0, sizeof(info)); 2311 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 2312 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2313 r = -EFAULT; 2314 break; 2315 } 2316 case KVM_PPC_RTAS_DEFINE_TOKEN: { 2317 struct kvm *kvm = filp->private_data; 2318 2319 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 2320 break; 2321 } 2322 case KVM_PPC_CONFIGURE_V3_MMU: { 2323 struct kvm *kvm = filp->private_data; 2324 struct kvm_ppc_mmuv3_cfg cfg; 2325 2326 r = -EINVAL; 2327 if (!kvm->arch.kvm_ops->configure_mmu) 2328 goto out; 2329 r = -EFAULT; 2330 if (copy_from_user(&cfg, argp, sizeof(cfg))) 2331 goto out; 2332 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); 2333 break; 2334 } 2335 case KVM_PPC_GET_RMMU_INFO: { 2336 struct kvm *kvm = filp->private_data; 2337 struct kvm_ppc_rmmu_info info; 2338 2339 r = -EINVAL; 2340 if (!kvm->arch.kvm_ops->get_rmmu_info) 2341 goto out; 2342 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); 2343 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2344 r = -EFAULT; 2345 break; 2346 } 2347 case KVM_PPC_GET_CPU_CHAR: { 2348 struct kvm_ppc_cpu_char cpuchar; 2349 2350 r = kvmppc_get_cpu_char(&cpuchar); 2351 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) 2352 r = -EFAULT; 2353 break; 2354 } 2355 default: { 2356 struct kvm *kvm = filp->private_data; 2357 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 2358 } 2359 #else /* CONFIG_PPC_BOOK3S_64 */ 2360 default: 2361 r = -ENOTTY; 2362 #endif 2363 } 2364 out: 2365 return r; 2366 } 2367 2368 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 2369 static unsigned long nr_lpids; 2370 2371 long kvmppc_alloc_lpid(void) 2372 { 2373 long lpid; 2374 2375 do { 2376 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 2377 if (lpid >= nr_lpids) { 2378 pr_err("%s: No LPIDs free\n", __func__); 2379 return -ENOMEM; 2380 } 2381 } while (test_and_set_bit(lpid, lpid_inuse)); 2382 2383 return lpid; 2384 } 2385 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 2386 2387 void kvmppc_claim_lpid(long lpid) 2388 { 2389 set_bit(lpid, lpid_inuse); 2390 } 2391 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); 2392 2393 void kvmppc_free_lpid(long lpid) 2394 { 2395 clear_bit(lpid, lpid_inuse); 2396 } 2397 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 2398 2399 void kvmppc_init_lpid(unsigned long nr_lpids_param) 2400 { 2401 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 2402 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 2403 } 2404 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 2405 2406 int kvm_arch_init(void *opaque) 2407 { 2408 return 0; 2409 } 2410 2411 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); 2412