1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/sched/signal.h> 27 #include <linux/fs.h> 28 #include <linux/slab.h> 29 #include <linux/file.h> 30 #include <linux/module.h> 31 #include <linux/irqbypass.h> 32 #include <linux/kvm_irqfd.h> 33 #include <asm/cputable.h> 34 #include <linux/uaccess.h> 35 #include <asm/kvm_ppc.h> 36 #include <asm/cputhreads.h> 37 #include <asm/irqflags.h> 38 #include <asm/iommu.h> 39 #include <asm/switch_to.h> 40 #include <asm/xive.h> 41 #ifdef CONFIG_PPC_PSERIES 42 #include <asm/hvcall.h> 43 #include <asm/plpar_wrappers.h> 44 #endif 45 46 #include "timing.h" 47 #include "irq.h" 48 #include "../mm/mmu_decl.h" 49 50 #define CREATE_TRACE_POINTS 51 #include "trace.h" 52 53 struct kvmppc_ops *kvmppc_hv_ops; 54 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 55 struct kvmppc_ops *kvmppc_pr_ops; 56 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 57 58 59 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 60 { 61 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); 62 } 63 64 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 65 { 66 return false; 67 } 68 69 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 70 { 71 return 1; 72 } 73 74 /* 75 * Common checks before entering the guest world. Call with interrupts 76 * disabled. 77 * 78 * returns: 79 * 80 * == 1 if we're ready to go into guest state 81 * <= 0 if we need to go back to the host with return value 82 */ 83 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 84 { 85 int r; 86 87 WARN_ON(irqs_disabled()); 88 hard_irq_disable(); 89 90 while (true) { 91 if (need_resched()) { 92 local_irq_enable(); 93 cond_resched(); 94 hard_irq_disable(); 95 continue; 96 } 97 98 if (signal_pending(current)) { 99 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 100 vcpu->run->exit_reason = KVM_EXIT_INTR; 101 r = -EINTR; 102 break; 103 } 104 105 vcpu->mode = IN_GUEST_MODE; 106 107 /* 108 * Reading vcpu->requests must happen after setting vcpu->mode, 109 * so we don't miss a request because the requester sees 110 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 111 * before next entering the guest (and thus doesn't IPI). 112 * This also orders the write to mode from any reads 113 * to the page tables done while the VCPU is running. 114 * Please see the comment in kvm_flush_remote_tlbs. 115 */ 116 smp_mb(); 117 118 if (kvm_request_pending(vcpu)) { 119 /* Make sure we process requests preemptable */ 120 local_irq_enable(); 121 trace_kvm_check_requests(vcpu); 122 r = kvmppc_core_check_requests(vcpu); 123 hard_irq_disable(); 124 if (r > 0) 125 continue; 126 break; 127 } 128 129 if (kvmppc_core_prepare_to_enter(vcpu)) { 130 /* interrupts got enabled in between, so we 131 are back at square 1 */ 132 continue; 133 } 134 135 guest_enter_irqoff(); 136 return 1; 137 } 138 139 /* return to host */ 140 local_irq_enable(); 141 return r; 142 } 143 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 144 145 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 146 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 147 { 148 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 149 int i; 150 151 shared->sprg0 = swab64(shared->sprg0); 152 shared->sprg1 = swab64(shared->sprg1); 153 shared->sprg2 = swab64(shared->sprg2); 154 shared->sprg3 = swab64(shared->sprg3); 155 shared->srr0 = swab64(shared->srr0); 156 shared->srr1 = swab64(shared->srr1); 157 shared->dar = swab64(shared->dar); 158 shared->msr = swab64(shared->msr); 159 shared->dsisr = swab32(shared->dsisr); 160 shared->int_pending = swab32(shared->int_pending); 161 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 162 shared->sr[i] = swab32(shared->sr[i]); 163 } 164 #endif 165 166 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 167 { 168 int nr = kvmppc_get_gpr(vcpu, 11); 169 int r; 170 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 171 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 172 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 173 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 174 unsigned long r2 = 0; 175 176 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 177 /* 32 bit mode */ 178 param1 &= 0xffffffff; 179 param2 &= 0xffffffff; 180 param3 &= 0xffffffff; 181 param4 &= 0xffffffff; 182 } 183 184 switch (nr) { 185 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 186 { 187 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 188 /* Book3S can be little endian, find it out here */ 189 int shared_big_endian = true; 190 if (vcpu->arch.intr_msr & MSR_LE) 191 shared_big_endian = false; 192 if (shared_big_endian != vcpu->arch.shared_big_endian) 193 kvmppc_swab_shared(vcpu); 194 vcpu->arch.shared_big_endian = shared_big_endian; 195 #endif 196 197 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { 198 /* 199 * Older versions of the Linux magic page code had 200 * a bug where they would map their trampoline code 201 * NX. If that's the case, remove !PR NX capability. 202 */ 203 vcpu->arch.disable_kernel_nx = true; 204 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 205 } 206 207 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 208 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 209 210 #ifdef CONFIG_PPC_64K_PAGES 211 /* 212 * Make sure our 4k magic page is in the same window of a 64k 213 * page within the guest and within the host's page. 214 */ 215 if ((vcpu->arch.magic_page_pa & 0xf000) != 216 ((ulong)vcpu->arch.shared & 0xf000)) { 217 void *old_shared = vcpu->arch.shared; 218 ulong shared = (ulong)vcpu->arch.shared; 219 void *new_shared; 220 221 shared &= PAGE_MASK; 222 shared |= vcpu->arch.magic_page_pa & 0xf000; 223 new_shared = (void*)shared; 224 memcpy(new_shared, old_shared, 0x1000); 225 vcpu->arch.shared = new_shared; 226 } 227 #endif 228 229 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 230 231 r = EV_SUCCESS; 232 break; 233 } 234 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 235 r = EV_SUCCESS; 236 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 237 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 238 #endif 239 240 /* Second return value is in r4 */ 241 break; 242 case EV_HCALL_TOKEN(EV_IDLE): 243 r = EV_SUCCESS; 244 kvm_vcpu_block(vcpu); 245 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 246 break; 247 default: 248 r = EV_UNIMPLEMENTED; 249 break; 250 } 251 252 kvmppc_set_gpr(vcpu, 4, r2); 253 254 return r; 255 } 256 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 257 258 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 259 { 260 int r = false; 261 262 /* We have to know what CPU to virtualize */ 263 if (!vcpu->arch.pvr) 264 goto out; 265 266 /* PAPR only works with book3s_64 */ 267 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 268 goto out; 269 270 /* HV KVM can only do PAPR mode for now */ 271 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 272 goto out; 273 274 #ifdef CONFIG_KVM_BOOKE_HV 275 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 276 goto out; 277 #endif 278 279 r = true; 280 281 out: 282 vcpu->arch.sane = r; 283 return r ? 0 : -EINVAL; 284 } 285 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 286 287 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 288 { 289 enum emulation_result er; 290 int r; 291 292 er = kvmppc_emulate_loadstore(vcpu); 293 switch (er) { 294 case EMULATE_DONE: 295 /* Future optimization: only reload non-volatiles if they were 296 * actually modified. */ 297 r = RESUME_GUEST_NV; 298 break; 299 case EMULATE_AGAIN: 300 r = RESUME_GUEST; 301 break; 302 case EMULATE_DO_MMIO: 303 run->exit_reason = KVM_EXIT_MMIO; 304 /* We must reload nonvolatiles because "update" load/store 305 * instructions modify register state. */ 306 /* Future optimization: only reload non-volatiles if they were 307 * actually modified. */ 308 r = RESUME_HOST_NV; 309 break; 310 case EMULATE_FAIL: 311 { 312 u32 last_inst; 313 314 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 315 /* XXX Deliver Program interrupt to guest. */ 316 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); 317 r = RESUME_HOST; 318 break; 319 } 320 default: 321 WARN_ON(1); 322 r = RESUME_GUEST; 323 } 324 325 return r; 326 } 327 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 328 329 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 330 bool data) 331 { 332 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 333 struct kvmppc_pte pte; 334 int r; 335 336 vcpu->stat.st++; 337 338 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 339 XLATE_WRITE, &pte); 340 if (r < 0) 341 return r; 342 343 *eaddr = pte.raddr; 344 345 if (!pte.may_write) 346 return -EPERM; 347 348 /* Magic page override */ 349 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 350 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 351 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 352 void *magic = vcpu->arch.shared; 353 magic += pte.eaddr & 0xfff; 354 memcpy(magic, ptr, size); 355 return EMULATE_DONE; 356 } 357 358 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 359 return EMULATE_DO_MMIO; 360 361 return EMULATE_DONE; 362 } 363 EXPORT_SYMBOL_GPL(kvmppc_st); 364 365 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 366 bool data) 367 { 368 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 369 struct kvmppc_pte pte; 370 int rc; 371 372 vcpu->stat.ld++; 373 374 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 375 XLATE_READ, &pte); 376 if (rc) 377 return rc; 378 379 *eaddr = pte.raddr; 380 381 if (!pte.may_read) 382 return -EPERM; 383 384 if (!data && !pte.may_execute) 385 return -ENOEXEC; 386 387 /* Magic page override */ 388 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 389 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 390 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 391 void *magic = vcpu->arch.shared; 392 magic += pte.eaddr & 0xfff; 393 memcpy(ptr, magic, size); 394 return EMULATE_DONE; 395 } 396 397 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) 398 return EMULATE_DO_MMIO; 399 400 return EMULATE_DONE; 401 } 402 EXPORT_SYMBOL_GPL(kvmppc_ld); 403 404 int kvm_arch_hardware_enable(void) 405 { 406 return 0; 407 } 408 409 int kvm_arch_hardware_setup(void) 410 { 411 return 0; 412 } 413 414 void kvm_arch_check_processor_compat(void *rtn) 415 { 416 *(int *)rtn = kvmppc_core_check_processor_compat(); 417 } 418 419 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 420 { 421 struct kvmppc_ops *kvm_ops = NULL; 422 /* 423 * if we have both HV and PR enabled, default is HV 424 */ 425 if (type == 0) { 426 if (kvmppc_hv_ops) 427 kvm_ops = kvmppc_hv_ops; 428 else 429 kvm_ops = kvmppc_pr_ops; 430 if (!kvm_ops) 431 goto err_out; 432 } else if (type == KVM_VM_PPC_HV) { 433 if (!kvmppc_hv_ops) 434 goto err_out; 435 kvm_ops = kvmppc_hv_ops; 436 } else if (type == KVM_VM_PPC_PR) { 437 if (!kvmppc_pr_ops) 438 goto err_out; 439 kvm_ops = kvmppc_pr_ops; 440 } else 441 goto err_out; 442 443 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) 444 return -ENOENT; 445 446 kvm->arch.kvm_ops = kvm_ops; 447 return kvmppc_core_init_vm(kvm); 448 err_out: 449 return -EINVAL; 450 } 451 452 bool kvm_arch_has_vcpu_debugfs(void) 453 { 454 return false; 455 } 456 457 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 458 { 459 return 0; 460 } 461 462 void kvm_arch_destroy_vm(struct kvm *kvm) 463 { 464 unsigned int i; 465 struct kvm_vcpu *vcpu; 466 467 #ifdef CONFIG_KVM_XICS 468 /* 469 * We call kick_all_cpus_sync() to ensure that all 470 * CPUs have executed any pending IPIs before we 471 * continue and free VCPUs structures below. 472 */ 473 if (is_kvmppc_hv_enabled(kvm)) 474 kick_all_cpus_sync(); 475 #endif 476 477 kvm_for_each_vcpu(i, vcpu, kvm) 478 kvm_arch_vcpu_free(vcpu); 479 480 mutex_lock(&kvm->lock); 481 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 482 kvm->vcpus[i] = NULL; 483 484 atomic_set(&kvm->online_vcpus, 0); 485 486 kvmppc_core_destroy_vm(kvm); 487 488 mutex_unlock(&kvm->lock); 489 490 /* drop the module reference */ 491 module_put(kvm->arch.kvm_ops->owner); 492 } 493 494 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 495 { 496 int r; 497 /* Assume we're using HV mode when the HV module is loaded */ 498 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 499 500 if (kvm) { 501 /* 502 * Hooray - we know which VM type we're running on. Depend on 503 * that rather than the guess above. 504 */ 505 hv_enabled = is_kvmppc_hv_enabled(kvm); 506 } 507 508 switch (ext) { 509 #ifdef CONFIG_BOOKE 510 case KVM_CAP_PPC_BOOKE_SREGS: 511 case KVM_CAP_PPC_BOOKE_WATCHDOG: 512 case KVM_CAP_PPC_EPR: 513 #else 514 case KVM_CAP_PPC_SEGSTATE: 515 case KVM_CAP_PPC_HIOR: 516 case KVM_CAP_PPC_PAPR: 517 #endif 518 case KVM_CAP_PPC_UNSET_IRQ: 519 case KVM_CAP_PPC_IRQ_LEVEL: 520 case KVM_CAP_ENABLE_CAP: 521 case KVM_CAP_ENABLE_CAP_VM: 522 case KVM_CAP_ONE_REG: 523 case KVM_CAP_IOEVENTFD: 524 case KVM_CAP_DEVICE_CTRL: 525 case KVM_CAP_IMMEDIATE_EXIT: 526 r = 1; 527 break; 528 case KVM_CAP_PPC_PAIRED_SINGLES: 529 case KVM_CAP_PPC_OSI: 530 case KVM_CAP_PPC_GET_PVINFO: 531 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 532 case KVM_CAP_SW_TLB: 533 #endif 534 /* We support this only for PR */ 535 r = !hv_enabled; 536 break; 537 #ifdef CONFIG_KVM_MPIC 538 case KVM_CAP_IRQ_MPIC: 539 r = 1; 540 break; 541 #endif 542 543 #ifdef CONFIG_PPC_BOOK3S_64 544 case KVM_CAP_SPAPR_TCE: 545 case KVM_CAP_SPAPR_TCE_64: 546 /* fallthrough */ 547 case KVM_CAP_SPAPR_TCE_VFIO: 548 case KVM_CAP_PPC_RTAS: 549 case KVM_CAP_PPC_FIXUP_HCALL: 550 case KVM_CAP_PPC_ENABLE_HCALL: 551 #ifdef CONFIG_KVM_XICS 552 case KVM_CAP_IRQ_XICS: 553 #endif 554 case KVM_CAP_PPC_GET_CPU_CHAR: 555 r = 1; 556 break; 557 558 case KVM_CAP_PPC_ALLOC_HTAB: 559 r = hv_enabled; 560 break; 561 #endif /* CONFIG_PPC_BOOK3S_64 */ 562 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 563 case KVM_CAP_PPC_SMT: 564 r = 0; 565 if (kvm) { 566 if (kvm->arch.emul_smt_mode > 1) 567 r = kvm->arch.emul_smt_mode; 568 else 569 r = kvm->arch.smt_mode; 570 } else if (hv_enabled) { 571 if (cpu_has_feature(CPU_FTR_ARCH_300)) 572 r = 1; 573 else 574 r = threads_per_subcore; 575 } 576 break; 577 case KVM_CAP_PPC_SMT_POSSIBLE: 578 r = 1; 579 if (hv_enabled) { 580 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 581 r = ((threads_per_subcore << 1) - 1); 582 else 583 /* P9 can emulate dbells, so allow any mode */ 584 r = 8 | 4 | 2 | 1; 585 } 586 break; 587 case KVM_CAP_PPC_RMA: 588 r = 0; 589 break; 590 case KVM_CAP_PPC_HWRNG: 591 r = kvmppc_hwrng_present(); 592 break; 593 case KVM_CAP_PPC_MMU_RADIX: 594 r = !!(hv_enabled && radix_enabled()); 595 break; 596 case KVM_CAP_PPC_MMU_HASH_V3: 597 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && 598 cpu_has_feature(CPU_FTR_HVMODE)); 599 break; 600 case KVM_CAP_PPC_NESTED_HV: 601 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && 602 !kvmppc_hv_ops->enable_nested(NULL)); 603 break; 604 #endif 605 case KVM_CAP_SYNC_MMU: 606 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 607 r = hv_enabled; 608 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 609 r = 1; 610 #else 611 r = 0; 612 #endif 613 break; 614 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 615 case KVM_CAP_PPC_HTAB_FD: 616 r = hv_enabled; 617 break; 618 #endif 619 case KVM_CAP_NR_VCPUS: 620 /* 621 * Recommending a number of CPUs is somewhat arbitrary; we 622 * return the number of present CPUs for -HV (since a host 623 * will have secondary threads "offline"), and for other KVM 624 * implementations just count online CPUs. 625 */ 626 if (hv_enabled) 627 r = num_present_cpus(); 628 else 629 r = num_online_cpus(); 630 break; 631 case KVM_CAP_NR_MEMSLOTS: 632 r = KVM_USER_MEM_SLOTS; 633 break; 634 case KVM_CAP_MAX_VCPUS: 635 r = KVM_MAX_VCPUS; 636 break; 637 #ifdef CONFIG_PPC_BOOK3S_64 638 case KVM_CAP_PPC_GET_SMMU_INFO: 639 r = 1; 640 break; 641 case KVM_CAP_SPAPR_MULTITCE: 642 r = 1; 643 break; 644 case KVM_CAP_SPAPR_RESIZE_HPT: 645 r = !!hv_enabled; 646 break; 647 #endif 648 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 649 case KVM_CAP_PPC_FWNMI: 650 r = hv_enabled; 651 break; 652 #endif 653 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 654 case KVM_CAP_PPC_HTM: 655 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || 656 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); 657 break; 658 #endif 659 default: 660 r = 0; 661 break; 662 } 663 return r; 664 665 } 666 667 long kvm_arch_dev_ioctl(struct file *filp, 668 unsigned int ioctl, unsigned long arg) 669 { 670 return -EINVAL; 671 } 672 673 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 674 struct kvm_memory_slot *dont) 675 { 676 kvmppc_core_free_memslot(kvm, free, dont); 677 } 678 679 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 680 unsigned long npages) 681 { 682 return kvmppc_core_create_memslot(kvm, slot, npages); 683 } 684 685 int kvm_arch_prepare_memory_region(struct kvm *kvm, 686 struct kvm_memory_slot *memslot, 687 const struct kvm_userspace_memory_region *mem, 688 enum kvm_mr_change change) 689 { 690 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 691 } 692 693 void kvm_arch_commit_memory_region(struct kvm *kvm, 694 const struct kvm_userspace_memory_region *mem, 695 const struct kvm_memory_slot *old, 696 const struct kvm_memory_slot *new, 697 enum kvm_mr_change change) 698 { 699 kvmppc_core_commit_memory_region(kvm, mem, old, new); 700 } 701 702 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 703 struct kvm_memory_slot *slot) 704 { 705 kvmppc_core_flush_memslot(kvm, slot); 706 } 707 708 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 709 { 710 struct kvm_vcpu *vcpu; 711 vcpu = kvmppc_core_vcpu_create(kvm, id); 712 if (!IS_ERR(vcpu)) { 713 vcpu->arch.wqp = &vcpu->wq; 714 kvmppc_create_vcpu_debugfs(vcpu, id); 715 } 716 return vcpu; 717 } 718 719 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 720 { 721 } 722 723 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 724 { 725 /* Make sure we're not using the vcpu anymore */ 726 hrtimer_cancel(&vcpu->arch.dec_timer); 727 728 kvmppc_remove_vcpu_debugfs(vcpu); 729 730 switch (vcpu->arch.irq_type) { 731 case KVMPPC_IRQ_MPIC: 732 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 733 break; 734 case KVMPPC_IRQ_XICS: 735 if (xive_enabled()) 736 kvmppc_xive_cleanup_vcpu(vcpu); 737 else 738 kvmppc_xics_free_icp(vcpu); 739 break; 740 } 741 742 kvmppc_core_vcpu_free(vcpu); 743 } 744 745 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 746 { 747 kvm_arch_vcpu_free(vcpu); 748 } 749 750 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 751 { 752 return kvmppc_core_pending_dec(vcpu); 753 } 754 755 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 756 { 757 struct kvm_vcpu *vcpu; 758 759 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 760 kvmppc_decrementer_func(vcpu); 761 762 return HRTIMER_NORESTART; 763 } 764 765 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 766 { 767 int ret; 768 769 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 770 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 771 vcpu->arch.dec_expires = get_tb(); 772 773 #ifdef CONFIG_KVM_EXIT_TIMING 774 mutex_init(&vcpu->arch.exit_timing_lock); 775 #endif 776 ret = kvmppc_subarch_vcpu_init(vcpu); 777 return ret; 778 } 779 780 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 781 { 782 kvmppc_mmu_destroy(vcpu); 783 kvmppc_subarch_vcpu_uninit(vcpu); 784 } 785 786 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 787 { 788 #ifdef CONFIG_BOOKE 789 /* 790 * vrsave (formerly usprg0) isn't used by Linux, but may 791 * be used by the guest. 792 * 793 * On non-booke this is associated with Altivec and 794 * is handled by code in book3s.c. 795 */ 796 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 797 #endif 798 kvmppc_core_vcpu_load(vcpu, cpu); 799 } 800 801 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 802 { 803 kvmppc_core_vcpu_put(vcpu); 804 #ifdef CONFIG_BOOKE 805 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 806 #endif 807 } 808 809 /* 810 * irq_bypass_add_producer and irq_bypass_del_producer are only 811 * useful if the architecture supports PCI passthrough. 812 * irq_bypass_stop and irq_bypass_start are not needed and so 813 * kvm_ops are not defined for them. 814 */ 815 bool kvm_arch_has_irq_bypass(void) 816 { 817 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || 818 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); 819 } 820 821 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 822 struct irq_bypass_producer *prod) 823 { 824 struct kvm_kernel_irqfd *irqfd = 825 container_of(cons, struct kvm_kernel_irqfd, consumer); 826 struct kvm *kvm = irqfd->kvm; 827 828 if (kvm->arch.kvm_ops->irq_bypass_add_producer) 829 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); 830 831 return 0; 832 } 833 834 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 835 struct irq_bypass_producer *prod) 836 { 837 struct kvm_kernel_irqfd *irqfd = 838 container_of(cons, struct kvm_kernel_irqfd, consumer); 839 struct kvm *kvm = irqfd->kvm; 840 841 if (kvm->arch.kvm_ops->irq_bypass_del_producer) 842 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); 843 } 844 845 #ifdef CONFIG_VSX 846 static inline int kvmppc_get_vsr_dword_offset(int index) 847 { 848 int offset; 849 850 if ((index != 0) && (index != 1)) 851 return -1; 852 853 #ifdef __BIG_ENDIAN 854 offset = index; 855 #else 856 offset = 1 - index; 857 #endif 858 859 return offset; 860 } 861 862 static inline int kvmppc_get_vsr_word_offset(int index) 863 { 864 int offset; 865 866 if ((index > 3) || (index < 0)) 867 return -1; 868 869 #ifdef __BIG_ENDIAN 870 offset = index; 871 #else 872 offset = 3 - index; 873 #endif 874 return offset; 875 } 876 877 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, 878 u64 gpr) 879 { 880 union kvmppc_one_reg val; 881 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 882 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 883 884 if (offset == -1) 885 return; 886 887 if (index >= 32) { 888 val.vval = VCPU_VSX_VR(vcpu, index - 32); 889 val.vsxval[offset] = gpr; 890 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 891 } else { 892 VCPU_VSX_FPR(vcpu, index, offset) = gpr; 893 } 894 } 895 896 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, 897 u64 gpr) 898 { 899 union kvmppc_one_reg val; 900 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 901 902 if (index >= 32) { 903 val.vval = VCPU_VSX_VR(vcpu, index - 32); 904 val.vsxval[0] = gpr; 905 val.vsxval[1] = gpr; 906 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 907 } else { 908 VCPU_VSX_FPR(vcpu, index, 0) = gpr; 909 VCPU_VSX_FPR(vcpu, index, 1) = gpr; 910 } 911 } 912 913 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, 914 u32 gpr) 915 { 916 union kvmppc_one_reg val; 917 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 918 919 if (index >= 32) { 920 val.vsx32val[0] = gpr; 921 val.vsx32val[1] = gpr; 922 val.vsx32val[2] = gpr; 923 val.vsx32val[3] = gpr; 924 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 925 } else { 926 val.vsx32val[0] = gpr; 927 val.vsx32val[1] = gpr; 928 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; 929 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; 930 } 931 } 932 933 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, 934 u32 gpr32) 935 { 936 union kvmppc_one_reg val; 937 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 938 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 939 int dword_offset, word_offset; 940 941 if (offset == -1) 942 return; 943 944 if (index >= 32) { 945 val.vval = VCPU_VSX_VR(vcpu, index - 32); 946 val.vsx32val[offset] = gpr32; 947 VCPU_VSX_VR(vcpu, index - 32) = val.vval; 948 } else { 949 dword_offset = offset / 2; 950 word_offset = offset % 2; 951 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); 952 val.vsx32val[word_offset] = gpr32; 953 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; 954 } 955 } 956 #endif /* CONFIG_VSX */ 957 958 #ifdef CONFIG_ALTIVEC 959 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, 960 int index, int element_size) 961 { 962 int offset; 963 int elts = sizeof(vector128)/element_size; 964 965 if ((index < 0) || (index >= elts)) 966 return -1; 967 968 if (kvmppc_need_byteswap(vcpu)) 969 offset = elts - index - 1; 970 else 971 offset = index; 972 973 return offset; 974 } 975 976 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, 977 int index) 978 { 979 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); 980 } 981 982 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, 983 int index) 984 { 985 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); 986 } 987 988 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, 989 int index) 990 { 991 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); 992 } 993 994 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, 995 int index) 996 { 997 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); 998 } 999 1000 1001 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, 1002 u64 gpr) 1003 { 1004 union kvmppc_one_reg val; 1005 int offset = kvmppc_get_vmx_dword_offset(vcpu, 1006 vcpu->arch.mmio_vmx_offset); 1007 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1008 1009 if (offset == -1) 1010 return; 1011 1012 val.vval = VCPU_VSX_VR(vcpu, index); 1013 val.vsxval[offset] = gpr; 1014 VCPU_VSX_VR(vcpu, index) = val.vval; 1015 } 1016 1017 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, 1018 u32 gpr32) 1019 { 1020 union kvmppc_one_reg val; 1021 int offset = kvmppc_get_vmx_word_offset(vcpu, 1022 vcpu->arch.mmio_vmx_offset); 1023 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1024 1025 if (offset == -1) 1026 return; 1027 1028 val.vval = VCPU_VSX_VR(vcpu, index); 1029 val.vsx32val[offset] = gpr32; 1030 VCPU_VSX_VR(vcpu, index) = val.vval; 1031 } 1032 1033 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, 1034 u16 gpr16) 1035 { 1036 union kvmppc_one_reg val; 1037 int offset = kvmppc_get_vmx_hword_offset(vcpu, 1038 vcpu->arch.mmio_vmx_offset); 1039 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1040 1041 if (offset == -1) 1042 return; 1043 1044 val.vval = VCPU_VSX_VR(vcpu, index); 1045 val.vsx16val[offset] = gpr16; 1046 VCPU_VSX_VR(vcpu, index) = val.vval; 1047 } 1048 1049 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, 1050 u8 gpr8) 1051 { 1052 union kvmppc_one_reg val; 1053 int offset = kvmppc_get_vmx_byte_offset(vcpu, 1054 vcpu->arch.mmio_vmx_offset); 1055 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1056 1057 if (offset == -1) 1058 return; 1059 1060 val.vval = VCPU_VSX_VR(vcpu, index); 1061 val.vsx8val[offset] = gpr8; 1062 VCPU_VSX_VR(vcpu, index) = val.vval; 1063 } 1064 #endif /* CONFIG_ALTIVEC */ 1065 1066 #ifdef CONFIG_PPC_FPU 1067 static inline u64 sp_to_dp(u32 fprs) 1068 { 1069 u64 fprd; 1070 1071 preempt_disable(); 1072 enable_kernel_fp(); 1073 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) 1074 : "fr0"); 1075 preempt_enable(); 1076 return fprd; 1077 } 1078 1079 static inline u32 dp_to_sp(u64 fprd) 1080 { 1081 u32 fprs; 1082 1083 preempt_disable(); 1084 enable_kernel_fp(); 1085 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) 1086 : "fr0"); 1087 preempt_enable(); 1088 return fprs; 1089 } 1090 1091 #else 1092 #define sp_to_dp(x) (x) 1093 #define dp_to_sp(x) (x) 1094 #endif /* CONFIG_PPC_FPU */ 1095 1096 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 1097 struct kvm_run *run) 1098 { 1099 u64 uninitialized_var(gpr); 1100 1101 if (run->mmio.len > sizeof(gpr)) { 1102 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 1103 return; 1104 } 1105 1106 if (!vcpu->arch.mmio_host_swabbed) { 1107 switch (run->mmio.len) { 1108 case 8: gpr = *(u64 *)run->mmio.data; break; 1109 case 4: gpr = *(u32 *)run->mmio.data; break; 1110 case 2: gpr = *(u16 *)run->mmio.data; break; 1111 case 1: gpr = *(u8 *)run->mmio.data; break; 1112 } 1113 } else { 1114 switch (run->mmio.len) { 1115 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; 1116 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; 1117 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; 1118 case 1: gpr = *(u8 *)run->mmio.data; break; 1119 } 1120 } 1121 1122 /* conversion between single and double precision */ 1123 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) 1124 gpr = sp_to_dp(gpr); 1125 1126 if (vcpu->arch.mmio_sign_extend) { 1127 switch (run->mmio.len) { 1128 #ifdef CONFIG_PPC64 1129 case 4: 1130 gpr = (s64)(s32)gpr; 1131 break; 1132 #endif 1133 case 2: 1134 gpr = (s64)(s16)gpr; 1135 break; 1136 case 1: 1137 gpr = (s64)(s8)gpr; 1138 break; 1139 } 1140 } 1141 1142 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 1143 case KVM_MMIO_REG_GPR: 1144 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 1145 break; 1146 case KVM_MMIO_REG_FPR: 1147 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1148 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); 1149 1150 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1151 break; 1152 #ifdef CONFIG_PPC_BOOK3S 1153 case KVM_MMIO_REG_QPR: 1154 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1155 break; 1156 case KVM_MMIO_REG_FQPR: 1157 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 1158 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1159 break; 1160 #endif 1161 #ifdef CONFIG_VSX 1162 case KVM_MMIO_REG_VSX: 1163 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1164 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); 1165 1166 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) 1167 kvmppc_set_vsr_dword(vcpu, gpr); 1168 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) 1169 kvmppc_set_vsr_word(vcpu, gpr); 1170 else if (vcpu->arch.mmio_copy_type == 1171 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) 1172 kvmppc_set_vsr_dword_dump(vcpu, gpr); 1173 else if (vcpu->arch.mmio_copy_type == 1174 KVMPPC_VSX_COPY_WORD_LOAD_DUMP) 1175 kvmppc_set_vsr_word_dump(vcpu, gpr); 1176 break; 1177 #endif 1178 #ifdef CONFIG_ALTIVEC 1179 case KVM_MMIO_REG_VMX: 1180 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1181 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); 1182 1183 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) 1184 kvmppc_set_vmx_dword(vcpu, gpr); 1185 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) 1186 kvmppc_set_vmx_word(vcpu, gpr); 1187 else if (vcpu->arch.mmio_copy_type == 1188 KVMPPC_VMX_COPY_HWORD) 1189 kvmppc_set_vmx_hword(vcpu, gpr); 1190 else if (vcpu->arch.mmio_copy_type == 1191 KVMPPC_VMX_COPY_BYTE) 1192 kvmppc_set_vmx_byte(vcpu, gpr); 1193 break; 1194 #endif 1195 default: 1196 BUG(); 1197 } 1198 } 1199 1200 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1201 unsigned int rt, unsigned int bytes, 1202 int is_default_endian, int sign_extend) 1203 { 1204 int idx, ret; 1205 bool host_swabbed; 1206 1207 /* Pity C doesn't have a logical XOR operator */ 1208 if (kvmppc_need_byteswap(vcpu)) { 1209 host_swabbed = is_default_endian; 1210 } else { 1211 host_swabbed = !is_default_endian; 1212 } 1213 1214 if (bytes > sizeof(run->mmio.data)) { 1215 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 1216 run->mmio.len); 1217 } 1218 1219 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1220 run->mmio.len = bytes; 1221 run->mmio.is_write = 0; 1222 1223 vcpu->arch.io_gpr = rt; 1224 vcpu->arch.mmio_host_swabbed = host_swabbed; 1225 vcpu->mmio_needed = 1; 1226 vcpu->mmio_is_write = 0; 1227 vcpu->arch.mmio_sign_extend = sign_extend; 1228 1229 idx = srcu_read_lock(&vcpu->kvm->srcu); 1230 1231 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1232 bytes, &run->mmio.data); 1233 1234 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1235 1236 if (!ret) { 1237 kvmppc_complete_mmio_load(vcpu, run); 1238 vcpu->mmio_needed = 0; 1239 return EMULATE_DONE; 1240 } 1241 1242 return EMULATE_DO_MMIO; 1243 } 1244 1245 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1246 unsigned int rt, unsigned int bytes, 1247 int is_default_endian) 1248 { 1249 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); 1250 } 1251 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 1252 1253 /* Same as above, but sign extends */ 1254 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 1255 unsigned int rt, unsigned int bytes, 1256 int is_default_endian) 1257 { 1258 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); 1259 } 1260 1261 #ifdef CONFIG_VSX 1262 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1263 unsigned int rt, unsigned int bytes, 1264 int is_default_endian, int mmio_sign_extend) 1265 { 1266 enum emulation_result emulated = EMULATE_DONE; 1267 1268 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1269 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1270 return EMULATE_FAIL; 1271 1272 while (vcpu->arch.mmio_vsx_copy_nums) { 1273 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 1274 is_default_endian, mmio_sign_extend); 1275 1276 if (emulated != EMULATE_DONE) 1277 break; 1278 1279 vcpu->arch.paddr_accessed += run->mmio.len; 1280 1281 vcpu->arch.mmio_vsx_copy_nums--; 1282 vcpu->arch.mmio_vsx_offset++; 1283 } 1284 return emulated; 1285 } 1286 #endif /* CONFIG_VSX */ 1287 1288 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1289 u64 val, unsigned int bytes, int is_default_endian) 1290 { 1291 void *data = run->mmio.data; 1292 int idx, ret; 1293 bool host_swabbed; 1294 1295 /* Pity C doesn't have a logical XOR operator */ 1296 if (kvmppc_need_byteswap(vcpu)) { 1297 host_swabbed = is_default_endian; 1298 } else { 1299 host_swabbed = !is_default_endian; 1300 } 1301 1302 if (bytes > sizeof(run->mmio.data)) { 1303 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 1304 run->mmio.len); 1305 } 1306 1307 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1308 run->mmio.len = bytes; 1309 run->mmio.is_write = 1; 1310 vcpu->mmio_needed = 1; 1311 vcpu->mmio_is_write = 1; 1312 1313 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) 1314 val = dp_to_sp(val); 1315 1316 /* Store the value at the lowest bytes in 'data'. */ 1317 if (!host_swabbed) { 1318 switch (bytes) { 1319 case 8: *(u64 *)data = val; break; 1320 case 4: *(u32 *)data = val; break; 1321 case 2: *(u16 *)data = val; break; 1322 case 1: *(u8 *)data = val; break; 1323 } 1324 } else { 1325 switch (bytes) { 1326 case 8: *(u64 *)data = swab64(val); break; 1327 case 4: *(u32 *)data = swab32(val); break; 1328 case 2: *(u16 *)data = swab16(val); break; 1329 case 1: *(u8 *)data = val; break; 1330 } 1331 } 1332 1333 idx = srcu_read_lock(&vcpu->kvm->srcu); 1334 1335 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1336 bytes, &run->mmio.data); 1337 1338 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1339 1340 if (!ret) { 1341 vcpu->mmio_needed = 0; 1342 return EMULATE_DONE; 1343 } 1344 1345 return EMULATE_DO_MMIO; 1346 } 1347 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 1348 1349 #ifdef CONFIG_VSX 1350 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) 1351 { 1352 u32 dword_offset, word_offset; 1353 union kvmppc_one_reg reg; 1354 int vsx_offset = 0; 1355 int copy_type = vcpu->arch.mmio_copy_type; 1356 int result = 0; 1357 1358 switch (copy_type) { 1359 case KVMPPC_VSX_COPY_DWORD: 1360 vsx_offset = 1361 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 1362 1363 if (vsx_offset == -1) { 1364 result = -1; 1365 break; 1366 } 1367 1368 if (rs < 32) { 1369 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); 1370 } else { 1371 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); 1372 *val = reg.vsxval[vsx_offset]; 1373 } 1374 break; 1375 1376 case KVMPPC_VSX_COPY_WORD: 1377 vsx_offset = 1378 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 1379 1380 if (vsx_offset == -1) { 1381 result = -1; 1382 break; 1383 } 1384 1385 if (rs < 32) { 1386 dword_offset = vsx_offset / 2; 1387 word_offset = vsx_offset % 2; 1388 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); 1389 *val = reg.vsx32val[word_offset]; 1390 } else { 1391 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); 1392 *val = reg.vsx32val[vsx_offset]; 1393 } 1394 break; 1395 1396 default: 1397 result = -1; 1398 break; 1399 } 1400 1401 return result; 1402 } 1403 1404 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1405 int rs, unsigned int bytes, int is_default_endian) 1406 { 1407 u64 val; 1408 enum emulation_result emulated = EMULATE_DONE; 1409 1410 vcpu->arch.io_gpr = rs; 1411 1412 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1413 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1414 return EMULATE_FAIL; 1415 1416 while (vcpu->arch.mmio_vsx_copy_nums) { 1417 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) 1418 return EMULATE_FAIL; 1419 1420 emulated = kvmppc_handle_store(run, vcpu, 1421 val, bytes, is_default_endian); 1422 1423 if (emulated != EMULATE_DONE) 1424 break; 1425 1426 vcpu->arch.paddr_accessed += run->mmio.len; 1427 1428 vcpu->arch.mmio_vsx_copy_nums--; 1429 vcpu->arch.mmio_vsx_offset++; 1430 } 1431 1432 return emulated; 1433 } 1434 1435 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, 1436 struct kvm_run *run) 1437 { 1438 enum emulation_result emulated = EMULATE_FAIL; 1439 int r; 1440 1441 vcpu->arch.paddr_accessed += run->mmio.len; 1442 1443 if (!vcpu->mmio_is_write) { 1444 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, 1445 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); 1446 } else { 1447 emulated = kvmppc_handle_vsx_store(run, vcpu, 1448 vcpu->arch.io_gpr, run->mmio.len, 1); 1449 } 1450 1451 switch (emulated) { 1452 case EMULATE_DO_MMIO: 1453 run->exit_reason = KVM_EXIT_MMIO; 1454 r = RESUME_HOST; 1455 break; 1456 case EMULATE_FAIL: 1457 pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); 1458 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1459 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1460 r = RESUME_HOST; 1461 break; 1462 default: 1463 r = RESUME_GUEST; 1464 break; 1465 } 1466 return r; 1467 } 1468 #endif /* CONFIG_VSX */ 1469 1470 #ifdef CONFIG_ALTIVEC 1471 int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1472 unsigned int rt, unsigned int bytes, int is_default_endian) 1473 { 1474 enum emulation_result emulated = EMULATE_DONE; 1475 1476 if (vcpu->arch.mmio_vsx_copy_nums > 2) 1477 return EMULATE_FAIL; 1478 1479 while (vcpu->arch.mmio_vmx_copy_nums) { 1480 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 1481 is_default_endian, 0); 1482 1483 if (emulated != EMULATE_DONE) 1484 break; 1485 1486 vcpu->arch.paddr_accessed += run->mmio.len; 1487 vcpu->arch.mmio_vmx_copy_nums--; 1488 vcpu->arch.mmio_vmx_offset++; 1489 } 1490 1491 return emulated; 1492 } 1493 1494 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) 1495 { 1496 union kvmppc_one_reg reg; 1497 int vmx_offset = 0; 1498 int result = 0; 1499 1500 vmx_offset = 1501 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1502 1503 if (vmx_offset == -1) 1504 return -1; 1505 1506 reg.vval = VCPU_VSX_VR(vcpu, index); 1507 *val = reg.vsxval[vmx_offset]; 1508 1509 return result; 1510 } 1511 1512 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) 1513 { 1514 union kvmppc_one_reg reg; 1515 int vmx_offset = 0; 1516 int result = 0; 1517 1518 vmx_offset = 1519 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1520 1521 if (vmx_offset == -1) 1522 return -1; 1523 1524 reg.vval = VCPU_VSX_VR(vcpu, index); 1525 *val = reg.vsx32val[vmx_offset]; 1526 1527 return result; 1528 } 1529 1530 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) 1531 { 1532 union kvmppc_one_reg reg; 1533 int vmx_offset = 0; 1534 int result = 0; 1535 1536 vmx_offset = 1537 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1538 1539 if (vmx_offset == -1) 1540 return -1; 1541 1542 reg.vval = VCPU_VSX_VR(vcpu, index); 1543 *val = reg.vsx16val[vmx_offset]; 1544 1545 return result; 1546 } 1547 1548 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) 1549 { 1550 union kvmppc_one_reg reg; 1551 int vmx_offset = 0; 1552 int result = 0; 1553 1554 vmx_offset = 1555 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1556 1557 if (vmx_offset == -1) 1558 return -1; 1559 1560 reg.vval = VCPU_VSX_VR(vcpu, index); 1561 *val = reg.vsx8val[vmx_offset]; 1562 1563 return result; 1564 } 1565 1566 int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1567 unsigned int rs, unsigned int bytes, int is_default_endian) 1568 { 1569 u64 val = 0; 1570 unsigned int index = rs & KVM_MMIO_REG_MASK; 1571 enum emulation_result emulated = EMULATE_DONE; 1572 1573 if (vcpu->arch.mmio_vsx_copy_nums > 2) 1574 return EMULATE_FAIL; 1575 1576 vcpu->arch.io_gpr = rs; 1577 1578 while (vcpu->arch.mmio_vmx_copy_nums) { 1579 switch (vcpu->arch.mmio_copy_type) { 1580 case KVMPPC_VMX_COPY_DWORD: 1581 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) 1582 return EMULATE_FAIL; 1583 1584 break; 1585 case KVMPPC_VMX_COPY_WORD: 1586 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) 1587 return EMULATE_FAIL; 1588 break; 1589 case KVMPPC_VMX_COPY_HWORD: 1590 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) 1591 return EMULATE_FAIL; 1592 break; 1593 case KVMPPC_VMX_COPY_BYTE: 1594 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) 1595 return EMULATE_FAIL; 1596 break; 1597 default: 1598 return EMULATE_FAIL; 1599 } 1600 1601 emulated = kvmppc_handle_store(run, vcpu, val, bytes, 1602 is_default_endian); 1603 if (emulated != EMULATE_DONE) 1604 break; 1605 1606 vcpu->arch.paddr_accessed += run->mmio.len; 1607 vcpu->arch.mmio_vmx_copy_nums--; 1608 vcpu->arch.mmio_vmx_offset++; 1609 } 1610 1611 return emulated; 1612 } 1613 1614 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, 1615 struct kvm_run *run) 1616 { 1617 enum emulation_result emulated = EMULATE_FAIL; 1618 int r; 1619 1620 vcpu->arch.paddr_accessed += run->mmio.len; 1621 1622 if (!vcpu->mmio_is_write) { 1623 emulated = kvmppc_handle_vmx_load(run, vcpu, 1624 vcpu->arch.io_gpr, run->mmio.len, 1); 1625 } else { 1626 emulated = kvmppc_handle_vmx_store(run, vcpu, 1627 vcpu->arch.io_gpr, run->mmio.len, 1); 1628 } 1629 1630 switch (emulated) { 1631 case EMULATE_DO_MMIO: 1632 run->exit_reason = KVM_EXIT_MMIO; 1633 r = RESUME_HOST; 1634 break; 1635 case EMULATE_FAIL: 1636 pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); 1637 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1638 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1639 r = RESUME_HOST; 1640 break; 1641 default: 1642 r = RESUME_GUEST; 1643 break; 1644 } 1645 return r; 1646 } 1647 #endif /* CONFIG_ALTIVEC */ 1648 1649 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1650 { 1651 int r = 0; 1652 union kvmppc_one_reg val; 1653 int size; 1654 1655 size = one_reg_size(reg->id); 1656 if (size > sizeof(val)) 1657 return -EINVAL; 1658 1659 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1660 if (r == -EINVAL) { 1661 r = 0; 1662 switch (reg->id) { 1663 #ifdef CONFIG_ALTIVEC 1664 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1665 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1666 r = -ENXIO; 1667 break; 1668 } 1669 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 1670 break; 1671 case KVM_REG_PPC_VSCR: 1672 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1673 r = -ENXIO; 1674 break; 1675 } 1676 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 1677 break; 1678 case KVM_REG_PPC_VRSAVE: 1679 val = get_reg_val(reg->id, vcpu->arch.vrsave); 1680 break; 1681 #endif /* CONFIG_ALTIVEC */ 1682 default: 1683 r = -EINVAL; 1684 break; 1685 } 1686 } 1687 1688 if (r) 1689 return r; 1690 1691 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 1692 r = -EFAULT; 1693 1694 return r; 1695 } 1696 1697 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1698 { 1699 int r; 1700 union kvmppc_one_reg val; 1701 int size; 1702 1703 size = one_reg_size(reg->id); 1704 if (size > sizeof(val)) 1705 return -EINVAL; 1706 1707 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 1708 return -EFAULT; 1709 1710 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1711 if (r == -EINVAL) { 1712 r = 0; 1713 switch (reg->id) { 1714 #ifdef CONFIG_ALTIVEC 1715 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1716 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1717 r = -ENXIO; 1718 break; 1719 } 1720 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 1721 break; 1722 case KVM_REG_PPC_VSCR: 1723 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1724 r = -ENXIO; 1725 break; 1726 } 1727 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 1728 break; 1729 case KVM_REG_PPC_VRSAVE: 1730 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1731 r = -ENXIO; 1732 break; 1733 } 1734 vcpu->arch.vrsave = set_reg_val(reg->id, val); 1735 break; 1736 #endif /* CONFIG_ALTIVEC */ 1737 default: 1738 r = -EINVAL; 1739 break; 1740 } 1741 } 1742 1743 return r; 1744 } 1745 1746 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 1747 { 1748 int r; 1749 1750 vcpu_load(vcpu); 1751 1752 if (vcpu->mmio_needed) { 1753 vcpu->mmio_needed = 0; 1754 if (!vcpu->mmio_is_write) 1755 kvmppc_complete_mmio_load(vcpu, run); 1756 #ifdef CONFIG_VSX 1757 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1758 vcpu->arch.mmio_vsx_copy_nums--; 1759 vcpu->arch.mmio_vsx_offset++; 1760 } 1761 1762 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1763 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); 1764 if (r == RESUME_HOST) { 1765 vcpu->mmio_needed = 1; 1766 goto out; 1767 } 1768 } 1769 #endif 1770 #ifdef CONFIG_ALTIVEC 1771 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1772 vcpu->arch.mmio_vmx_copy_nums--; 1773 vcpu->arch.mmio_vmx_offset++; 1774 } 1775 1776 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1777 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); 1778 if (r == RESUME_HOST) { 1779 vcpu->mmio_needed = 1; 1780 goto out; 1781 } 1782 } 1783 #endif 1784 } else if (vcpu->arch.osi_needed) { 1785 u64 *gprs = run->osi.gprs; 1786 int i; 1787 1788 for (i = 0; i < 32; i++) 1789 kvmppc_set_gpr(vcpu, i, gprs[i]); 1790 vcpu->arch.osi_needed = 0; 1791 } else if (vcpu->arch.hcall_needed) { 1792 int i; 1793 1794 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 1795 for (i = 0; i < 9; ++i) 1796 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 1797 vcpu->arch.hcall_needed = 0; 1798 #ifdef CONFIG_BOOKE 1799 } else if (vcpu->arch.epr_needed) { 1800 kvmppc_set_epr(vcpu, run->epr.epr); 1801 vcpu->arch.epr_needed = 0; 1802 #endif 1803 } 1804 1805 kvm_sigset_activate(vcpu); 1806 1807 if (run->immediate_exit) 1808 r = -EINTR; 1809 else 1810 r = kvmppc_vcpu_run(run, vcpu); 1811 1812 kvm_sigset_deactivate(vcpu); 1813 1814 #ifdef CONFIG_ALTIVEC 1815 out: 1816 #endif 1817 vcpu_put(vcpu); 1818 return r; 1819 } 1820 1821 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 1822 { 1823 if (irq->irq == KVM_INTERRUPT_UNSET) { 1824 kvmppc_core_dequeue_external(vcpu); 1825 return 0; 1826 } 1827 1828 kvmppc_core_queue_external(vcpu, irq); 1829 1830 kvm_vcpu_kick(vcpu); 1831 1832 return 0; 1833 } 1834 1835 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1836 struct kvm_enable_cap *cap) 1837 { 1838 int r; 1839 1840 if (cap->flags) 1841 return -EINVAL; 1842 1843 switch (cap->cap) { 1844 case KVM_CAP_PPC_OSI: 1845 r = 0; 1846 vcpu->arch.osi_enabled = true; 1847 break; 1848 case KVM_CAP_PPC_PAPR: 1849 r = 0; 1850 vcpu->arch.papr_enabled = true; 1851 break; 1852 case KVM_CAP_PPC_EPR: 1853 r = 0; 1854 if (cap->args[0]) 1855 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 1856 else 1857 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 1858 break; 1859 #ifdef CONFIG_BOOKE 1860 case KVM_CAP_PPC_BOOKE_WATCHDOG: 1861 r = 0; 1862 vcpu->arch.watchdog_enabled = true; 1863 break; 1864 #endif 1865 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1866 case KVM_CAP_SW_TLB: { 1867 struct kvm_config_tlb cfg; 1868 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 1869 1870 r = -EFAULT; 1871 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 1872 break; 1873 1874 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 1875 break; 1876 } 1877 #endif 1878 #ifdef CONFIG_KVM_MPIC 1879 case KVM_CAP_IRQ_MPIC: { 1880 struct fd f; 1881 struct kvm_device *dev; 1882 1883 r = -EBADF; 1884 f = fdget(cap->args[0]); 1885 if (!f.file) 1886 break; 1887 1888 r = -EPERM; 1889 dev = kvm_device_from_filp(f.file); 1890 if (dev) 1891 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 1892 1893 fdput(f); 1894 break; 1895 } 1896 #endif 1897 #ifdef CONFIG_KVM_XICS 1898 case KVM_CAP_IRQ_XICS: { 1899 struct fd f; 1900 struct kvm_device *dev; 1901 1902 r = -EBADF; 1903 f = fdget(cap->args[0]); 1904 if (!f.file) 1905 break; 1906 1907 r = -EPERM; 1908 dev = kvm_device_from_filp(f.file); 1909 if (dev) { 1910 if (xive_enabled()) 1911 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); 1912 else 1913 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 1914 } 1915 1916 fdput(f); 1917 break; 1918 } 1919 #endif /* CONFIG_KVM_XICS */ 1920 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1921 case KVM_CAP_PPC_FWNMI: 1922 r = -EINVAL; 1923 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 1924 break; 1925 r = 0; 1926 vcpu->kvm->arch.fwnmi_enabled = true; 1927 break; 1928 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 1929 default: 1930 r = -EINVAL; 1931 break; 1932 } 1933 1934 if (!r) 1935 r = kvmppc_sanity_check(vcpu); 1936 1937 return r; 1938 } 1939 1940 bool kvm_arch_intc_initialized(struct kvm *kvm) 1941 { 1942 #ifdef CONFIG_KVM_MPIC 1943 if (kvm->arch.mpic) 1944 return true; 1945 #endif 1946 #ifdef CONFIG_KVM_XICS 1947 if (kvm->arch.xics || kvm->arch.xive) 1948 return true; 1949 #endif 1950 return false; 1951 } 1952 1953 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1954 struct kvm_mp_state *mp_state) 1955 { 1956 return -EINVAL; 1957 } 1958 1959 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1960 struct kvm_mp_state *mp_state) 1961 { 1962 return -EINVAL; 1963 } 1964 1965 long kvm_arch_vcpu_async_ioctl(struct file *filp, 1966 unsigned int ioctl, unsigned long arg) 1967 { 1968 struct kvm_vcpu *vcpu = filp->private_data; 1969 void __user *argp = (void __user *)arg; 1970 1971 if (ioctl == KVM_INTERRUPT) { 1972 struct kvm_interrupt irq; 1973 if (copy_from_user(&irq, argp, sizeof(irq))) 1974 return -EFAULT; 1975 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); 1976 } 1977 return -ENOIOCTLCMD; 1978 } 1979 1980 long kvm_arch_vcpu_ioctl(struct file *filp, 1981 unsigned int ioctl, unsigned long arg) 1982 { 1983 struct kvm_vcpu *vcpu = filp->private_data; 1984 void __user *argp = (void __user *)arg; 1985 long r; 1986 1987 switch (ioctl) { 1988 case KVM_ENABLE_CAP: 1989 { 1990 struct kvm_enable_cap cap; 1991 r = -EFAULT; 1992 vcpu_load(vcpu); 1993 if (copy_from_user(&cap, argp, sizeof(cap))) 1994 goto out; 1995 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1996 vcpu_put(vcpu); 1997 break; 1998 } 1999 2000 case KVM_SET_ONE_REG: 2001 case KVM_GET_ONE_REG: 2002 { 2003 struct kvm_one_reg reg; 2004 r = -EFAULT; 2005 if (copy_from_user(®, argp, sizeof(reg))) 2006 goto out; 2007 if (ioctl == KVM_SET_ONE_REG) 2008 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 2009 else 2010 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 2011 break; 2012 } 2013 2014 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 2015 case KVM_DIRTY_TLB: { 2016 struct kvm_dirty_tlb dirty; 2017 r = -EFAULT; 2018 vcpu_load(vcpu); 2019 if (copy_from_user(&dirty, argp, sizeof(dirty))) 2020 goto out; 2021 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 2022 vcpu_put(vcpu); 2023 break; 2024 } 2025 #endif 2026 default: 2027 r = -EINVAL; 2028 } 2029 2030 out: 2031 return r; 2032 } 2033 2034 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 2035 { 2036 return VM_FAULT_SIGBUS; 2037 } 2038 2039 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 2040 { 2041 u32 inst_nop = 0x60000000; 2042 #ifdef CONFIG_KVM_BOOKE_HV 2043 u32 inst_sc1 = 0x44000022; 2044 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); 2045 pvinfo->hcall[1] = cpu_to_be32(inst_nop); 2046 pvinfo->hcall[2] = cpu_to_be32(inst_nop); 2047 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2048 #else 2049 u32 inst_lis = 0x3c000000; 2050 u32 inst_ori = 0x60000000; 2051 u32 inst_sc = 0x44000002; 2052 u32 inst_imm_mask = 0xffff; 2053 2054 /* 2055 * The hypercall to get into KVM from within guest context is as 2056 * follows: 2057 * 2058 * lis r0, r0, KVM_SC_MAGIC_R0@h 2059 * ori r0, KVM_SC_MAGIC_R0@l 2060 * sc 2061 * nop 2062 */ 2063 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); 2064 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); 2065 pvinfo->hcall[2] = cpu_to_be32(inst_sc); 2066 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2067 #endif 2068 2069 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 2070 2071 return 0; 2072 } 2073 2074 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 2075 bool line_status) 2076 { 2077 if (!irqchip_in_kernel(kvm)) 2078 return -ENXIO; 2079 2080 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 2081 irq_event->irq, irq_event->level, 2082 line_status); 2083 return 0; 2084 } 2085 2086 2087 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 2088 struct kvm_enable_cap *cap) 2089 { 2090 int r; 2091 2092 if (cap->flags) 2093 return -EINVAL; 2094 2095 switch (cap->cap) { 2096 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2097 case KVM_CAP_PPC_ENABLE_HCALL: { 2098 unsigned long hcall = cap->args[0]; 2099 2100 r = -EINVAL; 2101 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || 2102 cap->args[1] > 1) 2103 break; 2104 if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) 2105 break; 2106 if (cap->args[1]) 2107 set_bit(hcall / 4, kvm->arch.enabled_hcalls); 2108 else 2109 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); 2110 r = 0; 2111 break; 2112 } 2113 case KVM_CAP_PPC_SMT: { 2114 unsigned long mode = cap->args[0]; 2115 unsigned long flags = cap->args[1]; 2116 2117 r = -EINVAL; 2118 if (kvm->arch.kvm_ops->set_smt_mode) 2119 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); 2120 break; 2121 } 2122 2123 case KVM_CAP_PPC_NESTED_HV: 2124 r = -EINVAL; 2125 if (!is_kvmppc_hv_enabled(kvm) || 2126 !kvm->arch.kvm_ops->enable_nested) 2127 break; 2128 r = kvm->arch.kvm_ops->enable_nested(kvm); 2129 break; 2130 #endif 2131 default: 2132 r = -EINVAL; 2133 break; 2134 } 2135 2136 return r; 2137 } 2138 2139 #ifdef CONFIG_PPC_BOOK3S_64 2140 /* 2141 * These functions check whether the underlying hardware is safe 2142 * against attacks based on observing the effects of speculatively 2143 * executed instructions, and whether it supplies instructions for 2144 * use in workarounds. The information comes from firmware, either 2145 * via the device tree on powernv platforms or from an hcall on 2146 * pseries platforms. 2147 */ 2148 #ifdef CONFIG_PPC_PSERIES 2149 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2150 { 2151 struct h_cpu_char_result c; 2152 unsigned long rc; 2153 2154 if (!machine_is(pseries)) 2155 return -ENOTTY; 2156 2157 rc = plpar_get_cpu_characteristics(&c); 2158 if (rc == H_SUCCESS) { 2159 cp->character = c.character; 2160 cp->behaviour = c.behaviour; 2161 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2162 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2163 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2164 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2165 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2166 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | 2167 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | 2168 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2169 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2170 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2171 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2172 } 2173 return 0; 2174 } 2175 #else 2176 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2177 { 2178 return -ENOTTY; 2179 } 2180 #endif 2181 2182 static inline bool have_fw_feat(struct device_node *fw_features, 2183 const char *state, const char *name) 2184 { 2185 struct device_node *np; 2186 bool r = false; 2187 2188 np = of_get_child_by_name(fw_features, name); 2189 if (np) { 2190 r = of_property_read_bool(np, state); 2191 of_node_put(np); 2192 } 2193 return r; 2194 } 2195 2196 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2197 { 2198 struct device_node *np, *fw_features; 2199 int r; 2200 2201 memset(cp, 0, sizeof(*cp)); 2202 r = pseries_get_cpu_char(cp); 2203 if (r != -ENOTTY) 2204 return r; 2205 2206 np = of_find_node_by_name(NULL, "ibm,opal"); 2207 if (np) { 2208 fw_features = of_get_child_by_name(np, "fw-features"); 2209 of_node_put(np); 2210 if (!fw_features) 2211 return 0; 2212 if (have_fw_feat(fw_features, "enabled", 2213 "inst-spec-barrier-ori31,31,0")) 2214 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; 2215 if (have_fw_feat(fw_features, "enabled", 2216 "fw-bcctrl-serialized")) 2217 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; 2218 if (have_fw_feat(fw_features, "enabled", 2219 "inst-l1d-flush-ori30,30,0")) 2220 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; 2221 if (have_fw_feat(fw_features, "enabled", 2222 "inst-l1d-flush-trig2")) 2223 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; 2224 if (have_fw_feat(fw_features, "enabled", 2225 "fw-l1d-thread-split")) 2226 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; 2227 if (have_fw_feat(fw_features, "enabled", 2228 "fw-count-cache-disabled")) 2229 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2230 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2231 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2232 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2233 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2234 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2235 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2236 2237 if (have_fw_feat(fw_features, "enabled", 2238 "speculation-policy-favor-security")) 2239 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; 2240 if (!have_fw_feat(fw_features, "disabled", 2241 "needs-l1d-flush-msr-pr-0-to-1")) 2242 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; 2243 if (!have_fw_feat(fw_features, "disabled", 2244 "needs-spec-barrier-for-bound-checks")) 2245 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2246 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2247 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2248 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2249 2250 of_node_put(fw_features); 2251 } 2252 2253 return 0; 2254 } 2255 #endif 2256 2257 long kvm_arch_vm_ioctl(struct file *filp, 2258 unsigned int ioctl, unsigned long arg) 2259 { 2260 struct kvm *kvm __maybe_unused = filp->private_data; 2261 void __user *argp = (void __user *)arg; 2262 long r; 2263 2264 switch (ioctl) { 2265 case KVM_PPC_GET_PVINFO: { 2266 struct kvm_ppc_pvinfo pvinfo; 2267 memset(&pvinfo, 0, sizeof(pvinfo)); 2268 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 2269 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 2270 r = -EFAULT; 2271 goto out; 2272 } 2273 2274 break; 2275 } 2276 case KVM_ENABLE_CAP: 2277 { 2278 struct kvm_enable_cap cap; 2279 r = -EFAULT; 2280 if (copy_from_user(&cap, argp, sizeof(cap))) 2281 goto out; 2282 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 2283 break; 2284 } 2285 #ifdef CONFIG_SPAPR_TCE_IOMMU 2286 case KVM_CREATE_SPAPR_TCE_64: { 2287 struct kvm_create_spapr_tce_64 create_tce_64; 2288 2289 r = -EFAULT; 2290 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) 2291 goto out; 2292 if (create_tce_64.flags) { 2293 r = -EINVAL; 2294 goto out; 2295 } 2296 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2297 goto out; 2298 } 2299 case KVM_CREATE_SPAPR_TCE: { 2300 struct kvm_create_spapr_tce create_tce; 2301 struct kvm_create_spapr_tce_64 create_tce_64; 2302 2303 r = -EFAULT; 2304 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 2305 goto out; 2306 2307 create_tce_64.liobn = create_tce.liobn; 2308 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; 2309 create_tce_64.offset = 0; 2310 create_tce_64.size = create_tce.window_size >> 2311 IOMMU_PAGE_SHIFT_4K; 2312 create_tce_64.flags = 0; 2313 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2314 goto out; 2315 } 2316 #endif 2317 #ifdef CONFIG_PPC_BOOK3S_64 2318 case KVM_PPC_GET_SMMU_INFO: { 2319 struct kvm_ppc_smmu_info info; 2320 struct kvm *kvm = filp->private_data; 2321 2322 memset(&info, 0, sizeof(info)); 2323 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 2324 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2325 r = -EFAULT; 2326 break; 2327 } 2328 case KVM_PPC_RTAS_DEFINE_TOKEN: { 2329 struct kvm *kvm = filp->private_data; 2330 2331 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 2332 break; 2333 } 2334 case KVM_PPC_CONFIGURE_V3_MMU: { 2335 struct kvm *kvm = filp->private_data; 2336 struct kvm_ppc_mmuv3_cfg cfg; 2337 2338 r = -EINVAL; 2339 if (!kvm->arch.kvm_ops->configure_mmu) 2340 goto out; 2341 r = -EFAULT; 2342 if (copy_from_user(&cfg, argp, sizeof(cfg))) 2343 goto out; 2344 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); 2345 break; 2346 } 2347 case KVM_PPC_GET_RMMU_INFO: { 2348 struct kvm *kvm = filp->private_data; 2349 struct kvm_ppc_rmmu_info info; 2350 2351 r = -EINVAL; 2352 if (!kvm->arch.kvm_ops->get_rmmu_info) 2353 goto out; 2354 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); 2355 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2356 r = -EFAULT; 2357 break; 2358 } 2359 case KVM_PPC_GET_CPU_CHAR: { 2360 struct kvm_ppc_cpu_char cpuchar; 2361 2362 r = kvmppc_get_cpu_char(&cpuchar); 2363 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) 2364 r = -EFAULT; 2365 break; 2366 } 2367 default: { 2368 struct kvm *kvm = filp->private_data; 2369 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 2370 } 2371 #else /* CONFIG_PPC_BOOK3S_64 */ 2372 default: 2373 r = -ENOTTY; 2374 #endif 2375 } 2376 out: 2377 return r; 2378 } 2379 2380 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 2381 static unsigned long nr_lpids; 2382 2383 long kvmppc_alloc_lpid(void) 2384 { 2385 long lpid; 2386 2387 do { 2388 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 2389 if (lpid >= nr_lpids) { 2390 pr_err("%s: No LPIDs free\n", __func__); 2391 return -ENOMEM; 2392 } 2393 } while (test_and_set_bit(lpid, lpid_inuse)); 2394 2395 return lpid; 2396 } 2397 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 2398 2399 void kvmppc_claim_lpid(long lpid) 2400 { 2401 set_bit(lpid, lpid_inuse); 2402 } 2403 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); 2404 2405 void kvmppc_free_lpid(long lpid) 2406 { 2407 clear_bit(lpid, lpid_inuse); 2408 } 2409 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 2410 2411 void kvmppc_init_lpid(unsigned long nr_lpids_param) 2412 { 2413 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 2414 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 2415 } 2416 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 2417 2418 int kvm_arch_init(void *opaque) 2419 { 2420 return 0; 2421 } 2422 2423 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); 2424