1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/module.h> 30 #include <asm/cputable.h> 31 #include <asm/uaccess.h> 32 #include <asm/kvm_ppc.h> 33 #include <asm/tlbflush.h> 34 #include <asm/cputhreads.h> 35 #include <asm/irqflags.h> 36 #include "timing.h" 37 #include "irq.h" 38 #include "../mm/mmu_decl.h" 39 40 #define CREATE_TRACE_POINTS 41 #include "trace.h" 42 43 struct kvmppc_ops *kvmppc_hv_ops; 44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 45 struct kvmppc_ops *kvmppc_pr_ops; 46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 47 48 49 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 50 { 51 return !!(v->arch.pending_exceptions) || 52 v->requests; 53 } 54 55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 56 { 57 return 1; 58 } 59 60 /* 61 * Common checks before entering the guest world. Call with interrupts 62 * disabled. 63 * 64 * returns: 65 * 66 * == 1 if we're ready to go into guest state 67 * <= 0 if we need to go back to the host with return value 68 */ 69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 70 { 71 int r = 1; 72 73 WARN_ON_ONCE(!irqs_disabled()); 74 while (true) { 75 if (need_resched()) { 76 local_irq_enable(); 77 cond_resched(); 78 local_irq_disable(); 79 continue; 80 } 81 82 if (signal_pending(current)) { 83 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 84 vcpu->run->exit_reason = KVM_EXIT_INTR; 85 r = -EINTR; 86 break; 87 } 88 89 vcpu->mode = IN_GUEST_MODE; 90 91 /* 92 * Reading vcpu->requests must happen after setting vcpu->mode, 93 * so we don't miss a request because the requester sees 94 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 95 * before next entering the guest (and thus doesn't IPI). 96 */ 97 smp_mb(); 98 99 if (vcpu->requests) { 100 /* Make sure we process requests preemptable */ 101 local_irq_enable(); 102 trace_kvm_check_requests(vcpu); 103 r = kvmppc_core_check_requests(vcpu); 104 local_irq_disable(); 105 if (r > 0) 106 continue; 107 break; 108 } 109 110 if (kvmppc_core_prepare_to_enter(vcpu)) { 111 /* interrupts got enabled in between, so we 112 are back at square 1 */ 113 continue; 114 } 115 116 #ifdef CONFIG_PPC64 117 /* lazy EE magic */ 118 hard_irq_disable(); 119 if (lazy_irq_pending()) { 120 /* Got an interrupt in between, try again */ 121 local_irq_enable(); 122 local_irq_disable(); 123 kvm_guest_exit(); 124 continue; 125 } 126 #endif 127 128 kvm_guest_enter(); 129 break; 130 } 131 132 return r; 133 } 134 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 135 136 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 137 { 138 int nr = kvmppc_get_gpr(vcpu, 11); 139 int r; 140 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 141 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 142 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 143 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 144 unsigned long r2 = 0; 145 146 if (!(vcpu->arch.shared->msr & MSR_SF)) { 147 /* 32 bit mode */ 148 param1 &= 0xffffffff; 149 param2 &= 0xffffffff; 150 param3 &= 0xffffffff; 151 param4 &= 0xffffffff; 152 } 153 154 switch (nr) { 155 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 156 { 157 vcpu->arch.magic_page_pa = param1; 158 vcpu->arch.magic_page_ea = param2; 159 160 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 161 162 r = EV_SUCCESS; 163 break; 164 } 165 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 166 r = EV_SUCCESS; 167 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 168 /* XXX Missing magic page on 44x */ 169 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 170 #endif 171 172 /* Second return value is in r4 */ 173 break; 174 case EV_HCALL_TOKEN(EV_IDLE): 175 r = EV_SUCCESS; 176 kvm_vcpu_block(vcpu); 177 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 178 break; 179 default: 180 r = EV_UNIMPLEMENTED; 181 break; 182 } 183 184 kvmppc_set_gpr(vcpu, 4, r2); 185 186 return r; 187 } 188 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 189 190 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 191 { 192 int r = false; 193 194 /* We have to know what CPU to virtualize */ 195 if (!vcpu->arch.pvr) 196 goto out; 197 198 /* PAPR only works with book3s_64 */ 199 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 200 goto out; 201 202 /* HV KVM can only do PAPR mode for now */ 203 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 204 goto out; 205 206 #ifdef CONFIG_KVM_BOOKE_HV 207 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 208 goto out; 209 #endif 210 211 r = true; 212 213 out: 214 vcpu->arch.sane = r; 215 return r ? 0 : -EINVAL; 216 } 217 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 218 219 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 220 { 221 enum emulation_result er; 222 int r; 223 224 er = kvmppc_emulate_instruction(run, vcpu); 225 switch (er) { 226 case EMULATE_DONE: 227 /* Future optimization: only reload non-volatiles if they were 228 * actually modified. */ 229 r = RESUME_GUEST_NV; 230 break; 231 case EMULATE_DO_MMIO: 232 run->exit_reason = KVM_EXIT_MMIO; 233 /* We must reload nonvolatiles because "update" load/store 234 * instructions modify register state. */ 235 /* Future optimization: only reload non-volatiles if they were 236 * actually modified. */ 237 r = RESUME_HOST_NV; 238 break; 239 case EMULATE_FAIL: 240 /* XXX Deliver Program interrupt to guest. */ 241 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 242 kvmppc_get_last_inst(vcpu)); 243 r = RESUME_HOST; 244 break; 245 default: 246 WARN_ON(1); 247 r = RESUME_GUEST; 248 } 249 250 return r; 251 } 252 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 253 254 int kvm_arch_hardware_enable(void *garbage) 255 { 256 return 0; 257 } 258 259 void kvm_arch_hardware_disable(void *garbage) 260 { 261 } 262 263 int kvm_arch_hardware_setup(void) 264 { 265 return 0; 266 } 267 268 void kvm_arch_hardware_unsetup(void) 269 { 270 } 271 272 void kvm_arch_check_processor_compat(void *rtn) 273 { 274 *(int *)rtn = kvmppc_core_check_processor_compat(); 275 } 276 277 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 278 { 279 struct kvmppc_ops *kvm_ops = NULL; 280 /* 281 * if we have both HV and PR enabled, default is HV 282 */ 283 if (type == 0) { 284 if (kvmppc_hv_ops) 285 kvm_ops = kvmppc_hv_ops; 286 else 287 kvm_ops = kvmppc_pr_ops; 288 if (!kvm_ops) 289 goto err_out; 290 } else if (type == KVM_VM_PPC_HV) { 291 if (!kvmppc_hv_ops) 292 goto err_out; 293 kvm_ops = kvmppc_hv_ops; 294 } else if (type == KVM_VM_PPC_PR) { 295 if (!kvmppc_pr_ops) 296 goto err_out; 297 kvm_ops = kvmppc_pr_ops; 298 } else 299 goto err_out; 300 301 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) 302 return -ENOENT; 303 304 kvm->arch.kvm_ops = kvm_ops; 305 return kvmppc_core_init_vm(kvm); 306 err_out: 307 return -EINVAL; 308 } 309 310 void kvm_arch_destroy_vm(struct kvm *kvm) 311 { 312 unsigned int i; 313 struct kvm_vcpu *vcpu; 314 315 kvm_for_each_vcpu(i, vcpu, kvm) 316 kvm_arch_vcpu_free(vcpu); 317 318 mutex_lock(&kvm->lock); 319 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 320 kvm->vcpus[i] = NULL; 321 322 atomic_set(&kvm->online_vcpus, 0); 323 324 kvmppc_core_destroy_vm(kvm); 325 326 mutex_unlock(&kvm->lock); 327 328 /* drop the module reference */ 329 module_put(kvm->arch.kvm_ops->owner); 330 } 331 332 void kvm_arch_sync_events(struct kvm *kvm) 333 { 334 } 335 336 int kvm_dev_ioctl_check_extension(long ext) 337 { 338 int r; 339 /* FIXME!! 340 * Should some of this be vm ioctl ? is it possible now ? 341 */ 342 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 343 344 switch (ext) { 345 #ifdef CONFIG_BOOKE 346 case KVM_CAP_PPC_BOOKE_SREGS: 347 case KVM_CAP_PPC_BOOKE_WATCHDOG: 348 case KVM_CAP_PPC_EPR: 349 #else 350 case KVM_CAP_PPC_SEGSTATE: 351 case KVM_CAP_PPC_HIOR: 352 case KVM_CAP_PPC_PAPR: 353 #endif 354 case KVM_CAP_PPC_UNSET_IRQ: 355 case KVM_CAP_PPC_IRQ_LEVEL: 356 case KVM_CAP_ENABLE_CAP: 357 case KVM_CAP_ONE_REG: 358 case KVM_CAP_IOEVENTFD: 359 case KVM_CAP_DEVICE_CTRL: 360 r = 1; 361 break; 362 case KVM_CAP_PPC_PAIRED_SINGLES: 363 case KVM_CAP_PPC_OSI: 364 case KVM_CAP_PPC_GET_PVINFO: 365 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 366 case KVM_CAP_SW_TLB: 367 #endif 368 /* We support this only for PR */ 369 r = !hv_enabled; 370 break; 371 #ifdef CONFIG_KVM_MMIO 372 case KVM_CAP_COALESCED_MMIO: 373 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 374 break; 375 #endif 376 #ifdef CONFIG_KVM_MPIC 377 case KVM_CAP_IRQ_MPIC: 378 r = 1; 379 break; 380 #endif 381 382 #ifdef CONFIG_PPC_BOOK3S_64 383 case KVM_CAP_SPAPR_TCE: 384 case KVM_CAP_PPC_ALLOC_HTAB: 385 case KVM_CAP_PPC_RTAS: 386 #ifdef CONFIG_KVM_XICS 387 case KVM_CAP_IRQ_XICS: 388 #endif 389 r = 1; 390 break; 391 #endif /* CONFIG_PPC_BOOK3S_64 */ 392 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 393 case KVM_CAP_PPC_SMT: 394 if (hv_enabled) 395 r = threads_per_core; 396 else 397 r = 0; 398 break; 399 case KVM_CAP_PPC_RMA: 400 r = hv_enabled; 401 /* PPC970 requires an RMA */ 402 if (r && cpu_has_feature(CPU_FTR_ARCH_201)) 403 r = 2; 404 break; 405 #endif 406 case KVM_CAP_SYNC_MMU: 407 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 408 if (hv_enabled) 409 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 410 else 411 r = 0; 412 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 413 r = 1; 414 #else 415 r = 0; 416 #endif 417 break; 418 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 419 case KVM_CAP_PPC_HTAB_FD: 420 r = hv_enabled; 421 break; 422 #endif 423 case KVM_CAP_NR_VCPUS: 424 /* 425 * Recommending a number of CPUs is somewhat arbitrary; we 426 * return the number of present CPUs for -HV (since a host 427 * will have secondary threads "offline"), and for other KVM 428 * implementations just count online CPUs. 429 */ 430 if (hv_enabled) 431 r = num_present_cpus(); 432 else 433 r = num_online_cpus(); 434 break; 435 case KVM_CAP_MAX_VCPUS: 436 r = KVM_MAX_VCPUS; 437 break; 438 #ifdef CONFIG_PPC_BOOK3S_64 439 case KVM_CAP_PPC_GET_SMMU_INFO: 440 r = 1; 441 break; 442 #endif 443 default: 444 r = 0; 445 break; 446 } 447 return r; 448 449 } 450 451 long kvm_arch_dev_ioctl(struct file *filp, 452 unsigned int ioctl, unsigned long arg) 453 { 454 return -EINVAL; 455 } 456 457 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 458 struct kvm_memory_slot *dont) 459 { 460 kvmppc_core_free_memslot(kvm, free, dont); 461 } 462 463 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 464 unsigned long npages) 465 { 466 return kvmppc_core_create_memslot(kvm, slot, npages); 467 } 468 469 void kvm_arch_memslots_updated(struct kvm *kvm) 470 { 471 } 472 473 int kvm_arch_prepare_memory_region(struct kvm *kvm, 474 struct kvm_memory_slot *memslot, 475 struct kvm_userspace_memory_region *mem, 476 enum kvm_mr_change change) 477 { 478 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 479 } 480 481 void kvm_arch_commit_memory_region(struct kvm *kvm, 482 struct kvm_userspace_memory_region *mem, 483 const struct kvm_memory_slot *old, 484 enum kvm_mr_change change) 485 { 486 kvmppc_core_commit_memory_region(kvm, mem, old); 487 } 488 489 void kvm_arch_flush_shadow_all(struct kvm *kvm) 490 { 491 } 492 493 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 494 struct kvm_memory_slot *slot) 495 { 496 kvmppc_core_flush_memslot(kvm, slot); 497 } 498 499 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 500 { 501 struct kvm_vcpu *vcpu; 502 vcpu = kvmppc_core_vcpu_create(kvm, id); 503 if (!IS_ERR(vcpu)) { 504 vcpu->arch.wqp = &vcpu->wq; 505 kvmppc_create_vcpu_debugfs(vcpu, id); 506 } 507 return vcpu; 508 } 509 510 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 511 { 512 return 0; 513 } 514 515 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 516 { 517 /* Make sure we're not using the vcpu anymore */ 518 hrtimer_cancel(&vcpu->arch.dec_timer); 519 tasklet_kill(&vcpu->arch.tasklet); 520 521 kvmppc_remove_vcpu_debugfs(vcpu); 522 523 switch (vcpu->arch.irq_type) { 524 case KVMPPC_IRQ_MPIC: 525 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 526 break; 527 case KVMPPC_IRQ_XICS: 528 kvmppc_xics_free_icp(vcpu); 529 break; 530 } 531 532 kvmppc_core_vcpu_free(vcpu); 533 } 534 535 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 536 { 537 kvm_arch_vcpu_free(vcpu); 538 } 539 540 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 541 { 542 return kvmppc_core_pending_dec(vcpu); 543 } 544 545 /* 546 * low level hrtimer wake routine. Because this runs in hardirq context 547 * we schedule a tasklet to do the real work. 548 */ 549 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 550 { 551 struct kvm_vcpu *vcpu; 552 553 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 554 tasklet_schedule(&vcpu->arch.tasklet); 555 556 return HRTIMER_NORESTART; 557 } 558 559 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 560 { 561 int ret; 562 563 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 564 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 565 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 566 vcpu->arch.dec_expires = ~(u64)0; 567 568 #ifdef CONFIG_KVM_EXIT_TIMING 569 mutex_init(&vcpu->arch.exit_timing_lock); 570 #endif 571 ret = kvmppc_subarch_vcpu_init(vcpu); 572 return ret; 573 } 574 575 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 576 { 577 kvmppc_mmu_destroy(vcpu); 578 kvmppc_subarch_vcpu_uninit(vcpu); 579 } 580 581 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 582 { 583 #ifdef CONFIG_BOOKE 584 /* 585 * vrsave (formerly usprg0) isn't used by Linux, but may 586 * be used by the guest. 587 * 588 * On non-booke this is associated with Altivec and 589 * is handled by code in book3s.c. 590 */ 591 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 592 #endif 593 kvmppc_core_vcpu_load(vcpu, cpu); 594 } 595 596 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 597 { 598 kvmppc_core_vcpu_put(vcpu); 599 #ifdef CONFIG_BOOKE 600 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 601 #endif 602 } 603 604 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 605 struct kvm_run *run) 606 { 607 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 608 } 609 610 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 611 struct kvm_run *run) 612 { 613 u64 uninitialized_var(gpr); 614 615 if (run->mmio.len > sizeof(gpr)) { 616 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 617 return; 618 } 619 620 if (vcpu->arch.mmio_is_bigendian) { 621 switch (run->mmio.len) { 622 case 8: gpr = *(u64 *)run->mmio.data; break; 623 case 4: gpr = *(u32 *)run->mmio.data; break; 624 case 2: gpr = *(u16 *)run->mmio.data; break; 625 case 1: gpr = *(u8 *)run->mmio.data; break; 626 } 627 } else { 628 /* Convert BE data from userland back to LE. */ 629 switch (run->mmio.len) { 630 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 631 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 632 case 1: gpr = *(u8 *)run->mmio.data; break; 633 } 634 } 635 636 if (vcpu->arch.mmio_sign_extend) { 637 switch (run->mmio.len) { 638 #ifdef CONFIG_PPC64 639 case 4: 640 gpr = (s64)(s32)gpr; 641 break; 642 #endif 643 case 2: 644 gpr = (s64)(s16)gpr; 645 break; 646 case 1: 647 gpr = (s64)(s8)gpr; 648 break; 649 } 650 } 651 652 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 653 654 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 655 case KVM_MMIO_REG_GPR: 656 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 657 break; 658 case KVM_MMIO_REG_FPR: 659 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 660 break; 661 #ifdef CONFIG_PPC_BOOK3S 662 case KVM_MMIO_REG_QPR: 663 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 664 break; 665 case KVM_MMIO_REG_FQPR: 666 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 667 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 668 break; 669 #endif 670 default: 671 BUG(); 672 } 673 } 674 675 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 676 unsigned int rt, unsigned int bytes, int is_bigendian) 677 { 678 int idx, ret; 679 680 if (bytes > sizeof(run->mmio.data)) { 681 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 682 run->mmio.len); 683 } 684 685 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 686 run->mmio.len = bytes; 687 run->mmio.is_write = 0; 688 689 vcpu->arch.io_gpr = rt; 690 vcpu->arch.mmio_is_bigendian = is_bigendian; 691 vcpu->mmio_needed = 1; 692 vcpu->mmio_is_write = 0; 693 vcpu->arch.mmio_sign_extend = 0; 694 695 idx = srcu_read_lock(&vcpu->kvm->srcu); 696 697 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 698 bytes, &run->mmio.data); 699 700 srcu_read_unlock(&vcpu->kvm->srcu, idx); 701 702 if (!ret) { 703 kvmppc_complete_mmio_load(vcpu, run); 704 vcpu->mmio_needed = 0; 705 return EMULATE_DONE; 706 } 707 708 return EMULATE_DO_MMIO; 709 } 710 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 711 712 /* Same as above, but sign extends */ 713 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 714 unsigned int rt, unsigned int bytes, int is_bigendian) 715 { 716 int r; 717 718 vcpu->arch.mmio_sign_extend = 1; 719 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 720 721 return r; 722 } 723 724 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 725 u64 val, unsigned int bytes, int is_bigendian) 726 { 727 void *data = run->mmio.data; 728 int idx, ret; 729 730 if (bytes > sizeof(run->mmio.data)) { 731 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 732 run->mmio.len); 733 } 734 735 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 736 run->mmio.len = bytes; 737 run->mmio.is_write = 1; 738 vcpu->mmio_needed = 1; 739 vcpu->mmio_is_write = 1; 740 741 /* Store the value at the lowest bytes in 'data'. */ 742 if (is_bigendian) { 743 switch (bytes) { 744 case 8: *(u64 *)data = val; break; 745 case 4: *(u32 *)data = val; break; 746 case 2: *(u16 *)data = val; break; 747 case 1: *(u8 *)data = val; break; 748 } 749 } else { 750 /* Store LE value into 'data'. */ 751 switch (bytes) { 752 case 4: st_le32(data, val); break; 753 case 2: st_le16(data, val); break; 754 case 1: *(u8 *)data = val; break; 755 } 756 } 757 758 idx = srcu_read_lock(&vcpu->kvm->srcu); 759 760 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 761 bytes, &run->mmio.data); 762 763 srcu_read_unlock(&vcpu->kvm->srcu, idx); 764 765 if (!ret) { 766 vcpu->mmio_needed = 0; 767 return EMULATE_DONE; 768 } 769 770 return EMULATE_DO_MMIO; 771 } 772 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 773 774 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 775 { 776 int r; 777 sigset_t sigsaved; 778 779 if (vcpu->sigset_active) 780 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 781 782 if (vcpu->mmio_needed) { 783 if (!vcpu->mmio_is_write) 784 kvmppc_complete_mmio_load(vcpu, run); 785 vcpu->mmio_needed = 0; 786 } else if (vcpu->arch.dcr_needed) { 787 if (!vcpu->arch.dcr_is_write) 788 kvmppc_complete_dcr_load(vcpu, run); 789 vcpu->arch.dcr_needed = 0; 790 } else if (vcpu->arch.osi_needed) { 791 u64 *gprs = run->osi.gprs; 792 int i; 793 794 for (i = 0; i < 32; i++) 795 kvmppc_set_gpr(vcpu, i, gprs[i]); 796 vcpu->arch.osi_needed = 0; 797 } else if (vcpu->arch.hcall_needed) { 798 int i; 799 800 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 801 for (i = 0; i < 9; ++i) 802 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 803 vcpu->arch.hcall_needed = 0; 804 #ifdef CONFIG_BOOKE 805 } else if (vcpu->arch.epr_needed) { 806 kvmppc_set_epr(vcpu, run->epr.epr); 807 vcpu->arch.epr_needed = 0; 808 #endif 809 } 810 811 r = kvmppc_vcpu_run(run, vcpu); 812 813 if (vcpu->sigset_active) 814 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 815 816 return r; 817 } 818 819 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 820 { 821 if (irq->irq == KVM_INTERRUPT_UNSET) { 822 kvmppc_core_dequeue_external(vcpu); 823 return 0; 824 } 825 826 kvmppc_core_queue_external(vcpu, irq); 827 828 kvm_vcpu_kick(vcpu); 829 830 return 0; 831 } 832 833 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 834 struct kvm_enable_cap *cap) 835 { 836 int r; 837 838 if (cap->flags) 839 return -EINVAL; 840 841 switch (cap->cap) { 842 case KVM_CAP_PPC_OSI: 843 r = 0; 844 vcpu->arch.osi_enabled = true; 845 break; 846 case KVM_CAP_PPC_PAPR: 847 r = 0; 848 vcpu->arch.papr_enabled = true; 849 break; 850 case KVM_CAP_PPC_EPR: 851 r = 0; 852 if (cap->args[0]) 853 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 854 else 855 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 856 break; 857 #ifdef CONFIG_BOOKE 858 case KVM_CAP_PPC_BOOKE_WATCHDOG: 859 r = 0; 860 vcpu->arch.watchdog_enabled = true; 861 break; 862 #endif 863 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 864 case KVM_CAP_SW_TLB: { 865 struct kvm_config_tlb cfg; 866 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 867 868 r = -EFAULT; 869 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 870 break; 871 872 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 873 break; 874 } 875 #endif 876 #ifdef CONFIG_KVM_MPIC 877 case KVM_CAP_IRQ_MPIC: { 878 struct fd f; 879 struct kvm_device *dev; 880 881 r = -EBADF; 882 f = fdget(cap->args[0]); 883 if (!f.file) 884 break; 885 886 r = -EPERM; 887 dev = kvm_device_from_filp(f.file); 888 if (dev) 889 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 890 891 fdput(f); 892 break; 893 } 894 #endif 895 #ifdef CONFIG_KVM_XICS 896 case KVM_CAP_IRQ_XICS: { 897 struct fd f; 898 struct kvm_device *dev; 899 900 r = -EBADF; 901 f = fdget(cap->args[0]); 902 if (!f.file) 903 break; 904 905 r = -EPERM; 906 dev = kvm_device_from_filp(f.file); 907 if (dev) 908 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 909 910 fdput(f); 911 break; 912 } 913 #endif /* CONFIG_KVM_XICS */ 914 default: 915 r = -EINVAL; 916 break; 917 } 918 919 if (!r) 920 r = kvmppc_sanity_check(vcpu); 921 922 return r; 923 } 924 925 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 926 struct kvm_mp_state *mp_state) 927 { 928 return -EINVAL; 929 } 930 931 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 932 struct kvm_mp_state *mp_state) 933 { 934 return -EINVAL; 935 } 936 937 long kvm_arch_vcpu_ioctl(struct file *filp, 938 unsigned int ioctl, unsigned long arg) 939 { 940 struct kvm_vcpu *vcpu = filp->private_data; 941 void __user *argp = (void __user *)arg; 942 long r; 943 944 switch (ioctl) { 945 case KVM_INTERRUPT: { 946 struct kvm_interrupt irq; 947 r = -EFAULT; 948 if (copy_from_user(&irq, argp, sizeof(irq))) 949 goto out; 950 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 951 goto out; 952 } 953 954 case KVM_ENABLE_CAP: 955 { 956 struct kvm_enable_cap cap; 957 r = -EFAULT; 958 if (copy_from_user(&cap, argp, sizeof(cap))) 959 goto out; 960 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 961 break; 962 } 963 964 case KVM_SET_ONE_REG: 965 case KVM_GET_ONE_REG: 966 { 967 struct kvm_one_reg reg; 968 r = -EFAULT; 969 if (copy_from_user(®, argp, sizeof(reg))) 970 goto out; 971 if (ioctl == KVM_SET_ONE_REG) 972 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 973 else 974 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 975 break; 976 } 977 978 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 979 case KVM_DIRTY_TLB: { 980 struct kvm_dirty_tlb dirty; 981 r = -EFAULT; 982 if (copy_from_user(&dirty, argp, sizeof(dirty))) 983 goto out; 984 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 985 break; 986 } 987 #endif 988 default: 989 r = -EINVAL; 990 } 991 992 out: 993 return r; 994 } 995 996 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 997 { 998 return VM_FAULT_SIGBUS; 999 } 1000 1001 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 1002 { 1003 u32 inst_nop = 0x60000000; 1004 #ifdef CONFIG_KVM_BOOKE_HV 1005 u32 inst_sc1 = 0x44000022; 1006 pvinfo->hcall[0] = inst_sc1; 1007 pvinfo->hcall[1] = inst_nop; 1008 pvinfo->hcall[2] = inst_nop; 1009 pvinfo->hcall[3] = inst_nop; 1010 #else 1011 u32 inst_lis = 0x3c000000; 1012 u32 inst_ori = 0x60000000; 1013 u32 inst_sc = 0x44000002; 1014 u32 inst_imm_mask = 0xffff; 1015 1016 /* 1017 * The hypercall to get into KVM from within guest context is as 1018 * follows: 1019 * 1020 * lis r0, r0, KVM_SC_MAGIC_R0@h 1021 * ori r0, KVM_SC_MAGIC_R0@l 1022 * sc 1023 * nop 1024 */ 1025 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); 1026 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 1027 pvinfo->hcall[2] = inst_sc; 1028 pvinfo->hcall[3] = inst_nop; 1029 #endif 1030 1031 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 1032 1033 return 0; 1034 } 1035 1036 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 1037 bool line_status) 1038 { 1039 if (!irqchip_in_kernel(kvm)) 1040 return -ENXIO; 1041 1042 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1043 irq_event->irq, irq_event->level, 1044 line_status); 1045 return 0; 1046 } 1047 1048 long kvm_arch_vm_ioctl(struct file *filp, 1049 unsigned int ioctl, unsigned long arg) 1050 { 1051 struct kvm *kvm __maybe_unused = filp->private_data; 1052 void __user *argp = (void __user *)arg; 1053 long r; 1054 1055 switch (ioctl) { 1056 case KVM_PPC_GET_PVINFO: { 1057 struct kvm_ppc_pvinfo pvinfo; 1058 memset(&pvinfo, 0, sizeof(pvinfo)); 1059 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 1060 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 1061 r = -EFAULT; 1062 goto out; 1063 } 1064 1065 break; 1066 } 1067 #ifdef CONFIG_PPC_BOOK3S_64 1068 case KVM_CREATE_SPAPR_TCE: { 1069 struct kvm_create_spapr_tce create_tce; 1070 1071 r = -EFAULT; 1072 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 1073 goto out; 1074 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 1075 goto out; 1076 } 1077 case KVM_PPC_GET_SMMU_INFO: { 1078 struct kvm_ppc_smmu_info info; 1079 struct kvm *kvm = filp->private_data; 1080 1081 memset(&info, 0, sizeof(info)); 1082 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 1083 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 1084 r = -EFAULT; 1085 break; 1086 } 1087 case KVM_PPC_RTAS_DEFINE_TOKEN: { 1088 struct kvm *kvm = filp->private_data; 1089 1090 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 1091 break; 1092 } 1093 default: { 1094 struct kvm *kvm = filp->private_data; 1095 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 1096 } 1097 #else /* CONFIG_PPC_BOOK3S_64 */ 1098 default: 1099 r = -ENOTTY; 1100 #endif 1101 } 1102 out: 1103 return r; 1104 } 1105 1106 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 1107 static unsigned long nr_lpids; 1108 1109 long kvmppc_alloc_lpid(void) 1110 { 1111 long lpid; 1112 1113 do { 1114 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 1115 if (lpid >= nr_lpids) { 1116 pr_err("%s: No LPIDs free\n", __func__); 1117 return -ENOMEM; 1118 } 1119 } while (test_and_set_bit(lpid, lpid_inuse)); 1120 1121 return lpid; 1122 } 1123 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 1124 1125 void kvmppc_claim_lpid(long lpid) 1126 { 1127 set_bit(lpid, lpid_inuse); 1128 } 1129 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); 1130 1131 void kvmppc_free_lpid(long lpid) 1132 { 1133 clear_bit(lpid, lpid_inuse); 1134 } 1135 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 1136 1137 void kvmppc_init_lpid(unsigned long nr_lpids_param) 1138 { 1139 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1140 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1141 } 1142 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 1143 1144 int kvm_arch_init(void *opaque) 1145 { 1146 return 0; 1147 } 1148 1149 void kvm_arch_exit(void) 1150 { 1151 1152 } 1153