1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/module.h> 30 #include <asm/cputable.h> 31 #include <asm/uaccess.h> 32 #include <asm/kvm_ppc.h> 33 #include <asm/tlbflush.h> 34 #include <asm/cputhreads.h> 35 #include <asm/irqflags.h> 36 #include "timing.h" 37 #include "irq.h" 38 #include "../mm/mmu_decl.h" 39 40 #define CREATE_TRACE_POINTS 41 #include "trace.h" 42 43 struct kvmppc_ops *kvmppc_hv_ops; 44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 45 struct kvmppc_ops *kvmppc_pr_ops; 46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 47 48 49 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 50 { 51 return !!(v->arch.pending_exceptions) || 52 v->requests; 53 } 54 55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 56 { 57 return 1; 58 } 59 60 /* 61 * Common checks before entering the guest world. Call with interrupts 62 * disabled. 63 * 64 * returns: 65 * 66 * == 1 if we're ready to go into guest state 67 * <= 0 if we need to go back to the host with return value 68 */ 69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 70 { 71 int r; 72 73 WARN_ON(irqs_disabled()); 74 hard_irq_disable(); 75 76 while (true) { 77 if (need_resched()) { 78 local_irq_enable(); 79 cond_resched(); 80 hard_irq_disable(); 81 continue; 82 } 83 84 if (signal_pending(current)) { 85 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 86 vcpu->run->exit_reason = KVM_EXIT_INTR; 87 r = -EINTR; 88 break; 89 } 90 91 vcpu->mode = IN_GUEST_MODE; 92 93 /* 94 * Reading vcpu->requests must happen after setting vcpu->mode, 95 * so we don't miss a request because the requester sees 96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 97 * before next entering the guest (and thus doesn't IPI). 98 */ 99 smp_mb(); 100 101 if (vcpu->requests) { 102 /* Make sure we process requests preemptable */ 103 local_irq_enable(); 104 trace_kvm_check_requests(vcpu); 105 r = kvmppc_core_check_requests(vcpu); 106 hard_irq_disable(); 107 if (r > 0) 108 continue; 109 break; 110 } 111 112 if (kvmppc_core_prepare_to_enter(vcpu)) { 113 /* interrupts got enabled in between, so we 114 are back at square 1 */ 115 continue; 116 } 117 118 kvm_guest_enter(); 119 return 1; 120 } 121 122 /* return to host */ 123 local_irq_enable(); 124 return r; 125 } 126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 127 128 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 129 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 130 { 131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 132 int i; 133 134 shared->sprg0 = swab64(shared->sprg0); 135 shared->sprg1 = swab64(shared->sprg1); 136 shared->sprg2 = swab64(shared->sprg2); 137 shared->sprg3 = swab64(shared->sprg3); 138 shared->srr0 = swab64(shared->srr0); 139 shared->srr1 = swab64(shared->srr1); 140 shared->dar = swab64(shared->dar); 141 shared->msr = swab64(shared->msr); 142 shared->dsisr = swab32(shared->dsisr); 143 shared->int_pending = swab32(shared->int_pending); 144 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 145 shared->sr[i] = swab32(shared->sr[i]); 146 } 147 #endif 148 149 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 150 { 151 int nr = kvmppc_get_gpr(vcpu, 11); 152 int r; 153 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 154 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 155 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 156 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 157 unsigned long r2 = 0; 158 159 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 160 /* 32 bit mode */ 161 param1 &= 0xffffffff; 162 param2 &= 0xffffffff; 163 param3 &= 0xffffffff; 164 param4 &= 0xffffffff; 165 } 166 167 switch (nr) { 168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 169 { 170 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 171 /* Book3S can be little endian, find it out here */ 172 int shared_big_endian = true; 173 if (vcpu->arch.intr_msr & MSR_LE) 174 shared_big_endian = false; 175 if (shared_big_endian != vcpu->arch.shared_big_endian) 176 kvmppc_swab_shared(vcpu); 177 vcpu->arch.shared_big_endian = shared_big_endian; 178 #endif 179 180 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { 181 /* 182 * Older versions of the Linux magic page code had 183 * a bug where they would map their trampoline code 184 * NX. If that's the case, remove !PR NX capability. 185 */ 186 vcpu->arch.disable_kernel_nx = true; 187 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 188 } 189 190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 192 193 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 194 195 r = EV_SUCCESS; 196 break; 197 } 198 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 199 r = EV_SUCCESS; 200 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 201 /* XXX Missing magic page on 44x */ 202 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 203 #endif 204 205 /* Second return value is in r4 */ 206 break; 207 case EV_HCALL_TOKEN(EV_IDLE): 208 r = EV_SUCCESS; 209 kvm_vcpu_block(vcpu); 210 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 211 break; 212 default: 213 r = EV_UNIMPLEMENTED; 214 break; 215 } 216 217 kvmppc_set_gpr(vcpu, 4, r2); 218 219 return r; 220 } 221 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 222 223 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 224 { 225 int r = false; 226 227 /* We have to know what CPU to virtualize */ 228 if (!vcpu->arch.pvr) 229 goto out; 230 231 /* PAPR only works with book3s_64 */ 232 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 233 goto out; 234 235 /* HV KVM can only do PAPR mode for now */ 236 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 237 goto out; 238 239 #ifdef CONFIG_KVM_BOOKE_HV 240 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 241 goto out; 242 #endif 243 244 r = true; 245 246 out: 247 vcpu->arch.sane = r; 248 return r ? 0 : -EINVAL; 249 } 250 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 251 252 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 253 { 254 enum emulation_result er; 255 int r; 256 257 er = kvmppc_emulate_instruction(run, vcpu); 258 switch (er) { 259 case EMULATE_DONE: 260 /* Future optimization: only reload non-volatiles if they were 261 * actually modified. */ 262 r = RESUME_GUEST_NV; 263 break; 264 case EMULATE_DO_MMIO: 265 run->exit_reason = KVM_EXIT_MMIO; 266 /* We must reload nonvolatiles because "update" load/store 267 * instructions modify register state. */ 268 /* Future optimization: only reload non-volatiles if they were 269 * actually modified. */ 270 r = RESUME_HOST_NV; 271 break; 272 case EMULATE_FAIL: 273 /* XXX Deliver Program interrupt to guest. */ 274 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 275 kvmppc_get_last_inst(vcpu)); 276 r = RESUME_HOST; 277 break; 278 default: 279 WARN_ON(1); 280 r = RESUME_GUEST; 281 } 282 283 return r; 284 } 285 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 286 287 int kvm_arch_hardware_enable(void *garbage) 288 { 289 return 0; 290 } 291 292 void kvm_arch_hardware_disable(void *garbage) 293 { 294 } 295 296 int kvm_arch_hardware_setup(void) 297 { 298 return 0; 299 } 300 301 void kvm_arch_hardware_unsetup(void) 302 { 303 } 304 305 void kvm_arch_check_processor_compat(void *rtn) 306 { 307 *(int *)rtn = kvmppc_core_check_processor_compat(); 308 } 309 310 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 311 { 312 struct kvmppc_ops *kvm_ops = NULL; 313 /* 314 * if we have both HV and PR enabled, default is HV 315 */ 316 if (type == 0) { 317 if (kvmppc_hv_ops) 318 kvm_ops = kvmppc_hv_ops; 319 else 320 kvm_ops = kvmppc_pr_ops; 321 if (!kvm_ops) 322 goto err_out; 323 } else if (type == KVM_VM_PPC_HV) { 324 if (!kvmppc_hv_ops) 325 goto err_out; 326 kvm_ops = kvmppc_hv_ops; 327 } else if (type == KVM_VM_PPC_PR) { 328 if (!kvmppc_pr_ops) 329 goto err_out; 330 kvm_ops = kvmppc_pr_ops; 331 } else 332 goto err_out; 333 334 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) 335 return -ENOENT; 336 337 kvm->arch.kvm_ops = kvm_ops; 338 return kvmppc_core_init_vm(kvm); 339 err_out: 340 return -EINVAL; 341 } 342 343 void kvm_arch_destroy_vm(struct kvm *kvm) 344 { 345 unsigned int i; 346 struct kvm_vcpu *vcpu; 347 348 kvm_for_each_vcpu(i, vcpu, kvm) 349 kvm_arch_vcpu_free(vcpu); 350 351 mutex_lock(&kvm->lock); 352 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 353 kvm->vcpus[i] = NULL; 354 355 atomic_set(&kvm->online_vcpus, 0); 356 357 kvmppc_core_destroy_vm(kvm); 358 359 mutex_unlock(&kvm->lock); 360 361 /* drop the module reference */ 362 module_put(kvm->arch.kvm_ops->owner); 363 } 364 365 void kvm_arch_sync_events(struct kvm *kvm) 366 { 367 } 368 369 int kvm_dev_ioctl_check_extension(long ext) 370 { 371 int r; 372 /* FIXME!! 373 * Should some of this be vm ioctl ? is it possible now ? 374 */ 375 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 376 377 switch (ext) { 378 #ifdef CONFIG_BOOKE 379 case KVM_CAP_PPC_BOOKE_SREGS: 380 case KVM_CAP_PPC_BOOKE_WATCHDOG: 381 case KVM_CAP_PPC_EPR: 382 #else 383 case KVM_CAP_PPC_SEGSTATE: 384 case KVM_CAP_PPC_HIOR: 385 case KVM_CAP_PPC_PAPR: 386 #endif 387 case KVM_CAP_PPC_UNSET_IRQ: 388 case KVM_CAP_PPC_IRQ_LEVEL: 389 case KVM_CAP_ENABLE_CAP: 390 case KVM_CAP_ONE_REG: 391 case KVM_CAP_IOEVENTFD: 392 case KVM_CAP_DEVICE_CTRL: 393 r = 1; 394 break; 395 case KVM_CAP_PPC_PAIRED_SINGLES: 396 case KVM_CAP_PPC_OSI: 397 case KVM_CAP_PPC_GET_PVINFO: 398 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 399 case KVM_CAP_SW_TLB: 400 #endif 401 /* We support this only for PR */ 402 r = !hv_enabled; 403 break; 404 #ifdef CONFIG_KVM_MMIO 405 case KVM_CAP_COALESCED_MMIO: 406 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 407 break; 408 #endif 409 #ifdef CONFIG_KVM_MPIC 410 case KVM_CAP_IRQ_MPIC: 411 r = 1; 412 break; 413 #endif 414 415 #ifdef CONFIG_PPC_BOOK3S_64 416 case KVM_CAP_SPAPR_TCE: 417 case KVM_CAP_PPC_ALLOC_HTAB: 418 case KVM_CAP_PPC_RTAS: 419 case KVM_CAP_PPC_FIXUP_HCALL: 420 #ifdef CONFIG_KVM_XICS 421 case KVM_CAP_IRQ_XICS: 422 #endif 423 r = 1; 424 break; 425 #endif /* CONFIG_PPC_BOOK3S_64 */ 426 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 427 case KVM_CAP_PPC_SMT: 428 if (hv_enabled) 429 r = threads_per_core; 430 else 431 r = 0; 432 break; 433 case KVM_CAP_PPC_RMA: 434 r = hv_enabled; 435 /* PPC970 requires an RMA */ 436 if (r && cpu_has_feature(CPU_FTR_ARCH_201)) 437 r = 2; 438 break; 439 #endif 440 case KVM_CAP_SYNC_MMU: 441 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 442 if (hv_enabled) 443 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 444 else 445 r = 0; 446 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 447 r = 1; 448 #else 449 r = 0; 450 #endif 451 break; 452 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 453 case KVM_CAP_PPC_HTAB_FD: 454 r = hv_enabled; 455 break; 456 #endif 457 case KVM_CAP_NR_VCPUS: 458 /* 459 * Recommending a number of CPUs is somewhat arbitrary; we 460 * return the number of present CPUs for -HV (since a host 461 * will have secondary threads "offline"), and for other KVM 462 * implementations just count online CPUs. 463 */ 464 if (hv_enabled) 465 r = num_present_cpus(); 466 else 467 r = num_online_cpus(); 468 break; 469 case KVM_CAP_MAX_VCPUS: 470 r = KVM_MAX_VCPUS; 471 break; 472 #ifdef CONFIG_PPC_BOOK3S_64 473 case KVM_CAP_PPC_GET_SMMU_INFO: 474 r = 1; 475 break; 476 #endif 477 default: 478 r = 0; 479 break; 480 } 481 return r; 482 483 } 484 485 long kvm_arch_dev_ioctl(struct file *filp, 486 unsigned int ioctl, unsigned long arg) 487 { 488 return -EINVAL; 489 } 490 491 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 492 struct kvm_memory_slot *dont) 493 { 494 kvmppc_core_free_memslot(kvm, free, dont); 495 } 496 497 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 498 unsigned long npages) 499 { 500 return kvmppc_core_create_memslot(kvm, slot, npages); 501 } 502 503 void kvm_arch_memslots_updated(struct kvm *kvm) 504 { 505 } 506 507 int kvm_arch_prepare_memory_region(struct kvm *kvm, 508 struct kvm_memory_slot *memslot, 509 struct kvm_userspace_memory_region *mem, 510 enum kvm_mr_change change) 511 { 512 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 513 } 514 515 void kvm_arch_commit_memory_region(struct kvm *kvm, 516 struct kvm_userspace_memory_region *mem, 517 const struct kvm_memory_slot *old, 518 enum kvm_mr_change change) 519 { 520 kvmppc_core_commit_memory_region(kvm, mem, old); 521 } 522 523 void kvm_arch_flush_shadow_all(struct kvm *kvm) 524 { 525 } 526 527 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 528 struct kvm_memory_slot *slot) 529 { 530 kvmppc_core_flush_memslot(kvm, slot); 531 } 532 533 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 534 { 535 struct kvm_vcpu *vcpu; 536 vcpu = kvmppc_core_vcpu_create(kvm, id); 537 if (!IS_ERR(vcpu)) { 538 vcpu->arch.wqp = &vcpu->wq; 539 kvmppc_create_vcpu_debugfs(vcpu, id); 540 } 541 return vcpu; 542 } 543 544 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 545 { 546 return 0; 547 } 548 549 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 550 { 551 /* Make sure we're not using the vcpu anymore */ 552 hrtimer_cancel(&vcpu->arch.dec_timer); 553 tasklet_kill(&vcpu->arch.tasklet); 554 555 kvmppc_remove_vcpu_debugfs(vcpu); 556 557 switch (vcpu->arch.irq_type) { 558 case KVMPPC_IRQ_MPIC: 559 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 560 break; 561 case KVMPPC_IRQ_XICS: 562 kvmppc_xics_free_icp(vcpu); 563 break; 564 } 565 566 kvmppc_core_vcpu_free(vcpu); 567 } 568 569 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 570 { 571 kvm_arch_vcpu_free(vcpu); 572 } 573 574 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 575 { 576 return kvmppc_core_pending_dec(vcpu); 577 } 578 579 /* 580 * low level hrtimer wake routine. Because this runs in hardirq context 581 * we schedule a tasklet to do the real work. 582 */ 583 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 584 { 585 struct kvm_vcpu *vcpu; 586 587 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 588 tasklet_schedule(&vcpu->arch.tasklet); 589 590 return HRTIMER_NORESTART; 591 } 592 593 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 594 { 595 int ret; 596 597 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 598 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 599 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 600 vcpu->arch.dec_expires = ~(u64)0; 601 602 #ifdef CONFIG_KVM_EXIT_TIMING 603 mutex_init(&vcpu->arch.exit_timing_lock); 604 #endif 605 ret = kvmppc_subarch_vcpu_init(vcpu); 606 return ret; 607 } 608 609 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 610 { 611 kvmppc_mmu_destroy(vcpu); 612 kvmppc_subarch_vcpu_uninit(vcpu); 613 } 614 615 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 616 { 617 #ifdef CONFIG_BOOKE 618 /* 619 * vrsave (formerly usprg0) isn't used by Linux, but may 620 * be used by the guest. 621 * 622 * On non-booke this is associated with Altivec and 623 * is handled by code in book3s.c. 624 */ 625 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 626 #endif 627 kvmppc_core_vcpu_load(vcpu, cpu); 628 } 629 630 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 631 { 632 kvmppc_core_vcpu_put(vcpu); 633 #ifdef CONFIG_BOOKE 634 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 635 #endif 636 } 637 638 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 639 struct kvm_run *run) 640 { 641 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 642 } 643 644 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 645 struct kvm_run *run) 646 { 647 u64 uninitialized_var(gpr); 648 649 if (run->mmio.len > sizeof(gpr)) { 650 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 651 return; 652 } 653 654 if (vcpu->arch.mmio_is_bigendian) { 655 switch (run->mmio.len) { 656 case 8: gpr = *(u64 *)run->mmio.data; break; 657 case 4: gpr = *(u32 *)run->mmio.data; break; 658 case 2: gpr = *(u16 *)run->mmio.data; break; 659 case 1: gpr = *(u8 *)run->mmio.data; break; 660 } 661 } else { 662 /* Convert BE data from userland back to LE. */ 663 switch (run->mmio.len) { 664 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 665 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 666 case 1: gpr = *(u8 *)run->mmio.data; break; 667 } 668 } 669 670 if (vcpu->arch.mmio_sign_extend) { 671 switch (run->mmio.len) { 672 #ifdef CONFIG_PPC64 673 case 4: 674 gpr = (s64)(s32)gpr; 675 break; 676 #endif 677 case 2: 678 gpr = (s64)(s16)gpr; 679 break; 680 case 1: 681 gpr = (s64)(s8)gpr; 682 break; 683 } 684 } 685 686 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 687 688 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 689 case KVM_MMIO_REG_GPR: 690 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 691 break; 692 case KVM_MMIO_REG_FPR: 693 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 694 break; 695 #ifdef CONFIG_PPC_BOOK3S 696 case KVM_MMIO_REG_QPR: 697 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 698 break; 699 case KVM_MMIO_REG_FQPR: 700 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 701 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 702 break; 703 #endif 704 default: 705 BUG(); 706 } 707 } 708 709 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 710 unsigned int rt, unsigned int bytes, 711 int is_default_endian) 712 { 713 int idx, ret; 714 int is_bigendian; 715 716 if (kvmppc_need_byteswap(vcpu)) { 717 /* Default endianness is "little endian". */ 718 is_bigendian = !is_default_endian; 719 } else { 720 /* Default endianness is "big endian". */ 721 is_bigendian = is_default_endian; 722 } 723 724 if (bytes > sizeof(run->mmio.data)) { 725 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 726 run->mmio.len); 727 } 728 729 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 730 run->mmio.len = bytes; 731 run->mmio.is_write = 0; 732 733 vcpu->arch.io_gpr = rt; 734 vcpu->arch.mmio_is_bigendian = is_bigendian; 735 vcpu->mmio_needed = 1; 736 vcpu->mmio_is_write = 0; 737 vcpu->arch.mmio_sign_extend = 0; 738 739 idx = srcu_read_lock(&vcpu->kvm->srcu); 740 741 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 742 bytes, &run->mmio.data); 743 744 srcu_read_unlock(&vcpu->kvm->srcu, idx); 745 746 if (!ret) { 747 kvmppc_complete_mmio_load(vcpu, run); 748 vcpu->mmio_needed = 0; 749 return EMULATE_DONE; 750 } 751 752 return EMULATE_DO_MMIO; 753 } 754 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 755 756 /* Same as above, but sign extends */ 757 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 758 unsigned int rt, unsigned int bytes, 759 int is_default_endian) 760 { 761 int r; 762 763 vcpu->arch.mmio_sign_extend = 1; 764 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); 765 766 return r; 767 } 768 769 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 770 u64 val, unsigned int bytes, int is_default_endian) 771 { 772 void *data = run->mmio.data; 773 int idx, ret; 774 int is_bigendian; 775 776 if (kvmppc_need_byteswap(vcpu)) { 777 /* Default endianness is "little endian". */ 778 is_bigendian = !is_default_endian; 779 } else { 780 /* Default endianness is "big endian". */ 781 is_bigendian = is_default_endian; 782 } 783 784 if (bytes > sizeof(run->mmio.data)) { 785 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 786 run->mmio.len); 787 } 788 789 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 790 run->mmio.len = bytes; 791 run->mmio.is_write = 1; 792 vcpu->mmio_needed = 1; 793 vcpu->mmio_is_write = 1; 794 795 /* Store the value at the lowest bytes in 'data'. */ 796 if (is_bigendian) { 797 switch (bytes) { 798 case 8: *(u64 *)data = val; break; 799 case 4: *(u32 *)data = val; break; 800 case 2: *(u16 *)data = val; break; 801 case 1: *(u8 *)data = val; break; 802 } 803 } else { 804 /* Store LE value into 'data'. */ 805 switch (bytes) { 806 case 4: st_le32(data, val); break; 807 case 2: st_le16(data, val); break; 808 case 1: *(u8 *)data = val; break; 809 } 810 } 811 812 idx = srcu_read_lock(&vcpu->kvm->srcu); 813 814 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 815 bytes, &run->mmio.data); 816 817 srcu_read_unlock(&vcpu->kvm->srcu, idx); 818 819 if (!ret) { 820 vcpu->mmio_needed = 0; 821 return EMULATE_DONE; 822 } 823 824 return EMULATE_DO_MMIO; 825 } 826 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 827 828 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 829 { 830 int r; 831 sigset_t sigsaved; 832 833 if (vcpu->sigset_active) 834 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 835 836 if (vcpu->mmio_needed) { 837 if (!vcpu->mmio_is_write) 838 kvmppc_complete_mmio_load(vcpu, run); 839 vcpu->mmio_needed = 0; 840 } else if (vcpu->arch.dcr_needed) { 841 if (!vcpu->arch.dcr_is_write) 842 kvmppc_complete_dcr_load(vcpu, run); 843 vcpu->arch.dcr_needed = 0; 844 } else if (vcpu->arch.osi_needed) { 845 u64 *gprs = run->osi.gprs; 846 int i; 847 848 for (i = 0; i < 32; i++) 849 kvmppc_set_gpr(vcpu, i, gprs[i]); 850 vcpu->arch.osi_needed = 0; 851 } else if (vcpu->arch.hcall_needed) { 852 int i; 853 854 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 855 for (i = 0; i < 9; ++i) 856 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 857 vcpu->arch.hcall_needed = 0; 858 #ifdef CONFIG_BOOKE 859 } else if (vcpu->arch.epr_needed) { 860 kvmppc_set_epr(vcpu, run->epr.epr); 861 vcpu->arch.epr_needed = 0; 862 #endif 863 } 864 865 r = kvmppc_vcpu_run(run, vcpu); 866 867 if (vcpu->sigset_active) 868 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 869 870 return r; 871 } 872 873 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 874 { 875 if (irq->irq == KVM_INTERRUPT_UNSET) { 876 kvmppc_core_dequeue_external(vcpu); 877 return 0; 878 } 879 880 kvmppc_core_queue_external(vcpu, irq); 881 882 kvm_vcpu_kick(vcpu); 883 884 return 0; 885 } 886 887 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 888 struct kvm_enable_cap *cap) 889 { 890 int r; 891 892 if (cap->flags) 893 return -EINVAL; 894 895 switch (cap->cap) { 896 case KVM_CAP_PPC_OSI: 897 r = 0; 898 vcpu->arch.osi_enabled = true; 899 break; 900 case KVM_CAP_PPC_PAPR: 901 r = 0; 902 vcpu->arch.papr_enabled = true; 903 break; 904 case KVM_CAP_PPC_EPR: 905 r = 0; 906 if (cap->args[0]) 907 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 908 else 909 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 910 break; 911 #ifdef CONFIG_BOOKE 912 case KVM_CAP_PPC_BOOKE_WATCHDOG: 913 r = 0; 914 vcpu->arch.watchdog_enabled = true; 915 break; 916 #endif 917 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 918 case KVM_CAP_SW_TLB: { 919 struct kvm_config_tlb cfg; 920 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 921 922 r = -EFAULT; 923 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 924 break; 925 926 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 927 break; 928 } 929 #endif 930 #ifdef CONFIG_KVM_MPIC 931 case KVM_CAP_IRQ_MPIC: { 932 struct fd f; 933 struct kvm_device *dev; 934 935 r = -EBADF; 936 f = fdget(cap->args[0]); 937 if (!f.file) 938 break; 939 940 r = -EPERM; 941 dev = kvm_device_from_filp(f.file); 942 if (dev) 943 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 944 945 fdput(f); 946 break; 947 } 948 #endif 949 #ifdef CONFIG_KVM_XICS 950 case KVM_CAP_IRQ_XICS: { 951 struct fd f; 952 struct kvm_device *dev; 953 954 r = -EBADF; 955 f = fdget(cap->args[0]); 956 if (!f.file) 957 break; 958 959 r = -EPERM; 960 dev = kvm_device_from_filp(f.file); 961 if (dev) 962 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 963 964 fdput(f); 965 break; 966 } 967 #endif /* CONFIG_KVM_XICS */ 968 default: 969 r = -EINVAL; 970 break; 971 } 972 973 if (!r) 974 r = kvmppc_sanity_check(vcpu); 975 976 return r; 977 } 978 979 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 980 struct kvm_mp_state *mp_state) 981 { 982 return -EINVAL; 983 } 984 985 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 986 struct kvm_mp_state *mp_state) 987 { 988 return -EINVAL; 989 } 990 991 long kvm_arch_vcpu_ioctl(struct file *filp, 992 unsigned int ioctl, unsigned long arg) 993 { 994 struct kvm_vcpu *vcpu = filp->private_data; 995 void __user *argp = (void __user *)arg; 996 long r; 997 998 switch (ioctl) { 999 case KVM_INTERRUPT: { 1000 struct kvm_interrupt irq; 1001 r = -EFAULT; 1002 if (copy_from_user(&irq, argp, sizeof(irq))) 1003 goto out; 1004 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 1005 goto out; 1006 } 1007 1008 case KVM_ENABLE_CAP: 1009 { 1010 struct kvm_enable_cap cap; 1011 r = -EFAULT; 1012 if (copy_from_user(&cap, argp, sizeof(cap))) 1013 goto out; 1014 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1015 break; 1016 } 1017 1018 case KVM_SET_ONE_REG: 1019 case KVM_GET_ONE_REG: 1020 { 1021 struct kvm_one_reg reg; 1022 r = -EFAULT; 1023 if (copy_from_user(®, argp, sizeof(reg))) 1024 goto out; 1025 if (ioctl == KVM_SET_ONE_REG) 1026 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 1027 else 1028 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 1029 break; 1030 } 1031 1032 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1033 case KVM_DIRTY_TLB: { 1034 struct kvm_dirty_tlb dirty; 1035 r = -EFAULT; 1036 if (copy_from_user(&dirty, argp, sizeof(dirty))) 1037 goto out; 1038 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 1039 break; 1040 } 1041 #endif 1042 default: 1043 r = -EINVAL; 1044 } 1045 1046 out: 1047 return r; 1048 } 1049 1050 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 1051 { 1052 return VM_FAULT_SIGBUS; 1053 } 1054 1055 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 1056 { 1057 u32 inst_nop = 0x60000000; 1058 #ifdef CONFIG_KVM_BOOKE_HV 1059 u32 inst_sc1 = 0x44000022; 1060 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); 1061 pvinfo->hcall[1] = cpu_to_be32(inst_nop); 1062 pvinfo->hcall[2] = cpu_to_be32(inst_nop); 1063 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 1064 #else 1065 u32 inst_lis = 0x3c000000; 1066 u32 inst_ori = 0x60000000; 1067 u32 inst_sc = 0x44000002; 1068 u32 inst_imm_mask = 0xffff; 1069 1070 /* 1071 * The hypercall to get into KVM from within guest context is as 1072 * follows: 1073 * 1074 * lis r0, r0, KVM_SC_MAGIC_R0@h 1075 * ori r0, KVM_SC_MAGIC_R0@l 1076 * sc 1077 * nop 1078 */ 1079 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); 1080 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); 1081 pvinfo->hcall[2] = cpu_to_be32(inst_sc); 1082 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 1083 #endif 1084 1085 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 1086 1087 return 0; 1088 } 1089 1090 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 1091 bool line_status) 1092 { 1093 if (!irqchip_in_kernel(kvm)) 1094 return -ENXIO; 1095 1096 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1097 irq_event->irq, irq_event->level, 1098 line_status); 1099 return 0; 1100 } 1101 1102 long kvm_arch_vm_ioctl(struct file *filp, 1103 unsigned int ioctl, unsigned long arg) 1104 { 1105 struct kvm *kvm __maybe_unused = filp->private_data; 1106 void __user *argp = (void __user *)arg; 1107 long r; 1108 1109 switch (ioctl) { 1110 case KVM_PPC_GET_PVINFO: { 1111 struct kvm_ppc_pvinfo pvinfo; 1112 memset(&pvinfo, 0, sizeof(pvinfo)); 1113 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 1114 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 1115 r = -EFAULT; 1116 goto out; 1117 } 1118 1119 break; 1120 } 1121 #ifdef CONFIG_PPC_BOOK3S_64 1122 case KVM_CREATE_SPAPR_TCE: { 1123 struct kvm_create_spapr_tce create_tce; 1124 1125 r = -EFAULT; 1126 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 1127 goto out; 1128 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 1129 goto out; 1130 } 1131 case KVM_PPC_GET_SMMU_INFO: { 1132 struct kvm_ppc_smmu_info info; 1133 struct kvm *kvm = filp->private_data; 1134 1135 memset(&info, 0, sizeof(info)); 1136 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 1137 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 1138 r = -EFAULT; 1139 break; 1140 } 1141 case KVM_PPC_RTAS_DEFINE_TOKEN: { 1142 struct kvm *kvm = filp->private_data; 1143 1144 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 1145 break; 1146 } 1147 default: { 1148 struct kvm *kvm = filp->private_data; 1149 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 1150 } 1151 #else /* CONFIG_PPC_BOOK3S_64 */ 1152 default: 1153 r = -ENOTTY; 1154 #endif 1155 } 1156 out: 1157 return r; 1158 } 1159 1160 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 1161 static unsigned long nr_lpids; 1162 1163 long kvmppc_alloc_lpid(void) 1164 { 1165 long lpid; 1166 1167 do { 1168 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 1169 if (lpid >= nr_lpids) { 1170 pr_err("%s: No LPIDs free\n", __func__); 1171 return -ENOMEM; 1172 } 1173 } while (test_and_set_bit(lpid, lpid_inuse)); 1174 1175 return lpid; 1176 } 1177 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 1178 1179 void kvmppc_claim_lpid(long lpid) 1180 { 1181 set_bit(lpid, lpid_inuse); 1182 } 1183 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); 1184 1185 void kvmppc_free_lpid(long lpid) 1186 { 1187 clear_bit(lpid, lpid_inuse); 1188 } 1189 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 1190 1191 void kvmppc_init_lpid(unsigned long nr_lpids_param) 1192 { 1193 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1194 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1195 } 1196 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 1197 1198 int kvm_arch_init(void *opaque) 1199 { 1200 return 0; 1201 } 1202 1203 void kvm_arch_exit(void) 1204 { 1205 1206 } 1207