1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/module.h> 30 #include <asm/cputable.h> 31 #include <asm/uaccess.h> 32 #include <asm/kvm_ppc.h> 33 #include <asm/tlbflush.h> 34 #include <asm/cputhreads.h> 35 #include <asm/irqflags.h> 36 #include "timing.h" 37 #include "irq.h" 38 #include "../mm/mmu_decl.h" 39 40 #define CREATE_TRACE_POINTS 41 #include "trace.h" 42 43 struct kvmppc_ops *kvmppc_hv_ops; 44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 45 struct kvmppc_ops *kvmppc_pr_ops; 46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 47 48 49 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 50 { 51 return !!(v->arch.pending_exceptions) || 52 v->requests; 53 } 54 55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 56 { 57 return 1; 58 } 59 60 /* 61 * Common checks before entering the guest world. Call with interrupts 62 * disabled. 63 * 64 * returns: 65 * 66 * == 1 if we're ready to go into guest state 67 * <= 0 if we need to go back to the host with return value 68 */ 69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 70 { 71 int r; 72 73 WARN_ON(irqs_disabled()); 74 hard_irq_disable(); 75 76 while (true) { 77 if (need_resched()) { 78 local_irq_enable(); 79 cond_resched(); 80 hard_irq_disable(); 81 continue; 82 } 83 84 if (signal_pending(current)) { 85 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 86 vcpu->run->exit_reason = KVM_EXIT_INTR; 87 r = -EINTR; 88 break; 89 } 90 91 vcpu->mode = IN_GUEST_MODE; 92 93 /* 94 * Reading vcpu->requests must happen after setting vcpu->mode, 95 * so we don't miss a request because the requester sees 96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 97 * before next entering the guest (and thus doesn't IPI). 98 */ 99 smp_mb(); 100 101 if (vcpu->requests) { 102 /* Make sure we process requests preemptable */ 103 local_irq_enable(); 104 trace_kvm_check_requests(vcpu); 105 r = kvmppc_core_check_requests(vcpu); 106 hard_irq_disable(); 107 if (r > 0) 108 continue; 109 break; 110 } 111 112 if (kvmppc_core_prepare_to_enter(vcpu)) { 113 /* interrupts got enabled in between, so we 114 are back at square 1 */ 115 continue; 116 } 117 118 kvm_guest_enter(); 119 return 1; 120 } 121 122 /* return to host */ 123 local_irq_enable(); 124 return r; 125 } 126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 127 128 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 129 { 130 int nr = kvmppc_get_gpr(vcpu, 11); 131 int r; 132 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 133 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 134 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 135 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 136 unsigned long r2 = 0; 137 138 if (!(vcpu->arch.shared->msr & MSR_SF)) { 139 /* 32 bit mode */ 140 param1 &= 0xffffffff; 141 param2 &= 0xffffffff; 142 param3 &= 0xffffffff; 143 param4 &= 0xffffffff; 144 } 145 146 switch (nr) { 147 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 148 { 149 vcpu->arch.magic_page_pa = param1; 150 vcpu->arch.magic_page_ea = param2; 151 152 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 153 154 r = EV_SUCCESS; 155 break; 156 } 157 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 158 r = EV_SUCCESS; 159 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 160 /* XXX Missing magic page on 44x */ 161 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 162 #endif 163 164 /* Second return value is in r4 */ 165 break; 166 case EV_HCALL_TOKEN(EV_IDLE): 167 r = EV_SUCCESS; 168 kvm_vcpu_block(vcpu); 169 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 170 break; 171 default: 172 r = EV_UNIMPLEMENTED; 173 break; 174 } 175 176 kvmppc_set_gpr(vcpu, 4, r2); 177 178 return r; 179 } 180 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 181 182 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 183 { 184 int r = false; 185 186 /* We have to know what CPU to virtualize */ 187 if (!vcpu->arch.pvr) 188 goto out; 189 190 /* PAPR only works with book3s_64 */ 191 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 192 goto out; 193 194 /* HV KVM can only do PAPR mode for now */ 195 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 196 goto out; 197 198 #ifdef CONFIG_KVM_BOOKE_HV 199 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 200 goto out; 201 #endif 202 203 r = true; 204 205 out: 206 vcpu->arch.sane = r; 207 return r ? 0 : -EINVAL; 208 } 209 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 210 211 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 212 { 213 enum emulation_result er; 214 int r; 215 216 er = kvmppc_emulate_instruction(run, vcpu); 217 switch (er) { 218 case EMULATE_DONE: 219 /* Future optimization: only reload non-volatiles if they were 220 * actually modified. */ 221 r = RESUME_GUEST_NV; 222 break; 223 case EMULATE_DO_MMIO: 224 run->exit_reason = KVM_EXIT_MMIO; 225 /* We must reload nonvolatiles because "update" load/store 226 * instructions modify register state. */ 227 /* Future optimization: only reload non-volatiles if they were 228 * actually modified. */ 229 r = RESUME_HOST_NV; 230 break; 231 case EMULATE_FAIL: 232 /* XXX Deliver Program interrupt to guest. */ 233 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 234 kvmppc_get_last_inst(vcpu)); 235 r = RESUME_HOST; 236 break; 237 default: 238 WARN_ON(1); 239 r = RESUME_GUEST; 240 } 241 242 return r; 243 } 244 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 245 246 int kvm_arch_hardware_enable(void *garbage) 247 { 248 return 0; 249 } 250 251 void kvm_arch_hardware_disable(void *garbage) 252 { 253 } 254 255 int kvm_arch_hardware_setup(void) 256 { 257 return 0; 258 } 259 260 void kvm_arch_hardware_unsetup(void) 261 { 262 } 263 264 void kvm_arch_check_processor_compat(void *rtn) 265 { 266 *(int *)rtn = kvmppc_core_check_processor_compat(); 267 } 268 269 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 270 { 271 struct kvmppc_ops *kvm_ops = NULL; 272 /* 273 * if we have both HV and PR enabled, default is HV 274 */ 275 if (type == 0) { 276 if (kvmppc_hv_ops) 277 kvm_ops = kvmppc_hv_ops; 278 else 279 kvm_ops = kvmppc_pr_ops; 280 if (!kvm_ops) 281 goto err_out; 282 } else if (type == KVM_VM_PPC_HV) { 283 if (!kvmppc_hv_ops) 284 goto err_out; 285 kvm_ops = kvmppc_hv_ops; 286 } else if (type == KVM_VM_PPC_PR) { 287 if (!kvmppc_pr_ops) 288 goto err_out; 289 kvm_ops = kvmppc_pr_ops; 290 } else 291 goto err_out; 292 293 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) 294 return -ENOENT; 295 296 kvm->arch.kvm_ops = kvm_ops; 297 return kvmppc_core_init_vm(kvm); 298 err_out: 299 return -EINVAL; 300 } 301 302 void kvm_arch_destroy_vm(struct kvm *kvm) 303 { 304 unsigned int i; 305 struct kvm_vcpu *vcpu; 306 307 kvm_for_each_vcpu(i, vcpu, kvm) 308 kvm_arch_vcpu_free(vcpu); 309 310 mutex_lock(&kvm->lock); 311 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 312 kvm->vcpus[i] = NULL; 313 314 atomic_set(&kvm->online_vcpus, 0); 315 316 kvmppc_core_destroy_vm(kvm); 317 318 mutex_unlock(&kvm->lock); 319 320 /* drop the module reference */ 321 module_put(kvm->arch.kvm_ops->owner); 322 } 323 324 void kvm_arch_sync_events(struct kvm *kvm) 325 { 326 } 327 328 int kvm_dev_ioctl_check_extension(long ext) 329 { 330 int r; 331 /* FIXME!! 332 * Should some of this be vm ioctl ? is it possible now ? 333 */ 334 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 335 336 switch (ext) { 337 #ifdef CONFIG_BOOKE 338 case KVM_CAP_PPC_BOOKE_SREGS: 339 case KVM_CAP_PPC_BOOKE_WATCHDOG: 340 case KVM_CAP_PPC_EPR: 341 #else 342 case KVM_CAP_PPC_SEGSTATE: 343 case KVM_CAP_PPC_HIOR: 344 case KVM_CAP_PPC_PAPR: 345 #endif 346 case KVM_CAP_PPC_UNSET_IRQ: 347 case KVM_CAP_PPC_IRQ_LEVEL: 348 case KVM_CAP_ENABLE_CAP: 349 case KVM_CAP_ONE_REG: 350 case KVM_CAP_IOEVENTFD: 351 case KVM_CAP_DEVICE_CTRL: 352 r = 1; 353 break; 354 case KVM_CAP_PPC_PAIRED_SINGLES: 355 case KVM_CAP_PPC_OSI: 356 case KVM_CAP_PPC_GET_PVINFO: 357 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 358 case KVM_CAP_SW_TLB: 359 #endif 360 /* We support this only for PR */ 361 r = !hv_enabled; 362 break; 363 #ifdef CONFIG_KVM_MMIO 364 case KVM_CAP_COALESCED_MMIO: 365 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 366 break; 367 #endif 368 #ifdef CONFIG_KVM_MPIC 369 case KVM_CAP_IRQ_MPIC: 370 r = 1; 371 break; 372 #endif 373 374 #ifdef CONFIG_PPC_BOOK3S_64 375 case KVM_CAP_SPAPR_TCE: 376 case KVM_CAP_PPC_ALLOC_HTAB: 377 case KVM_CAP_PPC_RTAS: 378 #ifdef CONFIG_KVM_XICS 379 case KVM_CAP_IRQ_XICS: 380 #endif 381 r = 1; 382 break; 383 #endif /* CONFIG_PPC_BOOK3S_64 */ 384 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 385 case KVM_CAP_PPC_SMT: 386 if (hv_enabled) 387 r = threads_per_core; 388 else 389 r = 0; 390 break; 391 case KVM_CAP_PPC_RMA: 392 r = hv_enabled; 393 /* PPC970 requires an RMA */ 394 if (r && cpu_has_feature(CPU_FTR_ARCH_201)) 395 r = 2; 396 break; 397 #endif 398 case KVM_CAP_SYNC_MMU: 399 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 400 if (hv_enabled) 401 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 402 else 403 r = 0; 404 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 405 r = 1; 406 #else 407 r = 0; 408 #endif 409 break; 410 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 411 case KVM_CAP_PPC_HTAB_FD: 412 r = hv_enabled; 413 break; 414 #endif 415 case KVM_CAP_NR_VCPUS: 416 /* 417 * Recommending a number of CPUs is somewhat arbitrary; we 418 * return the number of present CPUs for -HV (since a host 419 * will have secondary threads "offline"), and for other KVM 420 * implementations just count online CPUs. 421 */ 422 if (hv_enabled) 423 r = num_present_cpus(); 424 else 425 r = num_online_cpus(); 426 break; 427 case KVM_CAP_MAX_VCPUS: 428 r = KVM_MAX_VCPUS; 429 break; 430 #ifdef CONFIG_PPC_BOOK3S_64 431 case KVM_CAP_PPC_GET_SMMU_INFO: 432 r = 1; 433 break; 434 #endif 435 default: 436 r = 0; 437 break; 438 } 439 return r; 440 441 } 442 443 long kvm_arch_dev_ioctl(struct file *filp, 444 unsigned int ioctl, unsigned long arg) 445 { 446 return -EINVAL; 447 } 448 449 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 450 struct kvm_memory_slot *dont) 451 { 452 kvmppc_core_free_memslot(kvm, free, dont); 453 } 454 455 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 456 unsigned long npages) 457 { 458 return kvmppc_core_create_memslot(kvm, slot, npages); 459 } 460 461 void kvm_arch_memslots_updated(struct kvm *kvm) 462 { 463 } 464 465 int kvm_arch_prepare_memory_region(struct kvm *kvm, 466 struct kvm_memory_slot *memslot, 467 struct kvm_userspace_memory_region *mem, 468 enum kvm_mr_change change) 469 { 470 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 471 } 472 473 void kvm_arch_commit_memory_region(struct kvm *kvm, 474 struct kvm_userspace_memory_region *mem, 475 const struct kvm_memory_slot *old, 476 enum kvm_mr_change change) 477 { 478 kvmppc_core_commit_memory_region(kvm, mem, old); 479 } 480 481 void kvm_arch_flush_shadow_all(struct kvm *kvm) 482 { 483 } 484 485 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 486 struct kvm_memory_slot *slot) 487 { 488 kvmppc_core_flush_memslot(kvm, slot); 489 } 490 491 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 492 { 493 struct kvm_vcpu *vcpu; 494 vcpu = kvmppc_core_vcpu_create(kvm, id); 495 if (!IS_ERR(vcpu)) { 496 vcpu->arch.wqp = &vcpu->wq; 497 kvmppc_create_vcpu_debugfs(vcpu, id); 498 } 499 return vcpu; 500 } 501 502 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 503 { 504 return 0; 505 } 506 507 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 508 { 509 /* Make sure we're not using the vcpu anymore */ 510 hrtimer_cancel(&vcpu->arch.dec_timer); 511 tasklet_kill(&vcpu->arch.tasklet); 512 513 kvmppc_remove_vcpu_debugfs(vcpu); 514 515 switch (vcpu->arch.irq_type) { 516 case KVMPPC_IRQ_MPIC: 517 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 518 break; 519 case KVMPPC_IRQ_XICS: 520 kvmppc_xics_free_icp(vcpu); 521 break; 522 } 523 524 kvmppc_core_vcpu_free(vcpu); 525 } 526 527 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 528 { 529 kvm_arch_vcpu_free(vcpu); 530 } 531 532 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 533 { 534 return kvmppc_core_pending_dec(vcpu); 535 } 536 537 /* 538 * low level hrtimer wake routine. Because this runs in hardirq context 539 * we schedule a tasklet to do the real work. 540 */ 541 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 542 { 543 struct kvm_vcpu *vcpu; 544 545 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 546 tasklet_schedule(&vcpu->arch.tasklet); 547 548 return HRTIMER_NORESTART; 549 } 550 551 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 552 { 553 int ret; 554 555 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 556 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 557 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 558 vcpu->arch.dec_expires = ~(u64)0; 559 560 #ifdef CONFIG_KVM_EXIT_TIMING 561 mutex_init(&vcpu->arch.exit_timing_lock); 562 #endif 563 ret = kvmppc_subarch_vcpu_init(vcpu); 564 return ret; 565 } 566 567 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 568 { 569 kvmppc_mmu_destroy(vcpu); 570 kvmppc_subarch_vcpu_uninit(vcpu); 571 } 572 573 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 574 { 575 #ifdef CONFIG_BOOKE 576 /* 577 * vrsave (formerly usprg0) isn't used by Linux, but may 578 * be used by the guest. 579 * 580 * On non-booke this is associated with Altivec and 581 * is handled by code in book3s.c. 582 */ 583 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 584 #endif 585 kvmppc_core_vcpu_load(vcpu, cpu); 586 } 587 588 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 589 { 590 kvmppc_core_vcpu_put(vcpu); 591 #ifdef CONFIG_BOOKE 592 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 593 #endif 594 } 595 596 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 597 struct kvm_run *run) 598 { 599 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 600 } 601 602 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 603 struct kvm_run *run) 604 { 605 u64 uninitialized_var(gpr); 606 607 if (run->mmio.len > sizeof(gpr)) { 608 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 609 return; 610 } 611 612 if (vcpu->arch.mmio_is_bigendian) { 613 switch (run->mmio.len) { 614 case 8: gpr = *(u64 *)run->mmio.data; break; 615 case 4: gpr = *(u32 *)run->mmio.data; break; 616 case 2: gpr = *(u16 *)run->mmio.data; break; 617 case 1: gpr = *(u8 *)run->mmio.data; break; 618 } 619 } else { 620 /* Convert BE data from userland back to LE. */ 621 switch (run->mmio.len) { 622 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 623 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 624 case 1: gpr = *(u8 *)run->mmio.data; break; 625 } 626 } 627 628 if (vcpu->arch.mmio_sign_extend) { 629 switch (run->mmio.len) { 630 #ifdef CONFIG_PPC64 631 case 4: 632 gpr = (s64)(s32)gpr; 633 break; 634 #endif 635 case 2: 636 gpr = (s64)(s16)gpr; 637 break; 638 case 1: 639 gpr = (s64)(s8)gpr; 640 break; 641 } 642 } 643 644 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 645 646 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 647 case KVM_MMIO_REG_GPR: 648 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 649 break; 650 case KVM_MMIO_REG_FPR: 651 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 652 break; 653 #ifdef CONFIG_PPC_BOOK3S 654 case KVM_MMIO_REG_QPR: 655 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 656 break; 657 case KVM_MMIO_REG_FQPR: 658 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 659 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 660 break; 661 #endif 662 default: 663 BUG(); 664 } 665 } 666 667 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 668 unsigned int rt, unsigned int bytes, 669 int is_default_endian) 670 { 671 int idx, ret; 672 int is_bigendian; 673 674 if (kvmppc_need_byteswap(vcpu)) { 675 /* Default endianness is "little endian". */ 676 is_bigendian = !is_default_endian; 677 } else { 678 /* Default endianness is "big endian". */ 679 is_bigendian = is_default_endian; 680 } 681 682 if (bytes > sizeof(run->mmio.data)) { 683 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 684 run->mmio.len); 685 } 686 687 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 688 run->mmio.len = bytes; 689 run->mmio.is_write = 0; 690 691 vcpu->arch.io_gpr = rt; 692 vcpu->arch.mmio_is_bigendian = is_bigendian; 693 vcpu->mmio_needed = 1; 694 vcpu->mmio_is_write = 0; 695 vcpu->arch.mmio_sign_extend = 0; 696 697 idx = srcu_read_lock(&vcpu->kvm->srcu); 698 699 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 700 bytes, &run->mmio.data); 701 702 srcu_read_unlock(&vcpu->kvm->srcu, idx); 703 704 if (!ret) { 705 kvmppc_complete_mmio_load(vcpu, run); 706 vcpu->mmio_needed = 0; 707 return EMULATE_DONE; 708 } 709 710 return EMULATE_DO_MMIO; 711 } 712 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 713 714 /* Same as above, but sign extends */ 715 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 716 unsigned int rt, unsigned int bytes, 717 int is_default_endian) 718 { 719 int r; 720 721 vcpu->arch.mmio_sign_extend = 1; 722 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); 723 724 return r; 725 } 726 727 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 728 u64 val, unsigned int bytes, int is_default_endian) 729 { 730 void *data = run->mmio.data; 731 int idx, ret; 732 int is_bigendian; 733 734 if (kvmppc_need_byteswap(vcpu)) { 735 /* Default endianness is "little endian". */ 736 is_bigendian = !is_default_endian; 737 } else { 738 /* Default endianness is "big endian". */ 739 is_bigendian = is_default_endian; 740 } 741 742 if (bytes > sizeof(run->mmio.data)) { 743 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 744 run->mmio.len); 745 } 746 747 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 748 run->mmio.len = bytes; 749 run->mmio.is_write = 1; 750 vcpu->mmio_needed = 1; 751 vcpu->mmio_is_write = 1; 752 753 /* Store the value at the lowest bytes in 'data'. */ 754 if (is_bigendian) { 755 switch (bytes) { 756 case 8: *(u64 *)data = val; break; 757 case 4: *(u32 *)data = val; break; 758 case 2: *(u16 *)data = val; break; 759 case 1: *(u8 *)data = val; break; 760 } 761 } else { 762 /* Store LE value into 'data'. */ 763 switch (bytes) { 764 case 4: st_le32(data, val); break; 765 case 2: st_le16(data, val); break; 766 case 1: *(u8 *)data = val; break; 767 } 768 } 769 770 idx = srcu_read_lock(&vcpu->kvm->srcu); 771 772 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 773 bytes, &run->mmio.data); 774 775 srcu_read_unlock(&vcpu->kvm->srcu, idx); 776 777 if (!ret) { 778 vcpu->mmio_needed = 0; 779 return EMULATE_DONE; 780 } 781 782 return EMULATE_DO_MMIO; 783 } 784 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 785 786 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 787 { 788 int r; 789 sigset_t sigsaved; 790 791 if (vcpu->sigset_active) 792 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 793 794 if (vcpu->mmio_needed) { 795 if (!vcpu->mmio_is_write) 796 kvmppc_complete_mmio_load(vcpu, run); 797 vcpu->mmio_needed = 0; 798 } else if (vcpu->arch.dcr_needed) { 799 if (!vcpu->arch.dcr_is_write) 800 kvmppc_complete_dcr_load(vcpu, run); 801 vcpu->arch.dcr_needed = 0; 802 } else if (vcpu->arch.osi_needed) { 803 u64 *gprs = run->osi.gprs; 804 int i; 805 806 for (i = 0; i < 32; i++) 807 kvmppc_set_gpr(vcpu, i, gprs[i]); 808 vcpu->arch.osi_needed = 0; 809 } else if (vcpu->arch.hcall_needed) { 810 int i; 811 812 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 813 for (i = 0; i < 9; ++i) 814 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 815 vcpu->arch.hcall_needed = 0; 816 #ifdef CONFIG_BOOKE 817 } else if (vcpu->arch.epr_needed) { 818 kvmppc_set_epr(vcpu, run->epr.epr); 819 vcpu->arch.epr_needed = 0; 820 #endif 821 } 822 823 r = kvmppc_vcpu_run(run, vcpu); 824 825 if (vcpu->sigset_active) 826 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 827 828 return r; 829 } 830 831 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 832 { 833 if (irq->irq == KVM_INTERRUPT_UNSET) { 834 kvmppc_core_dequeue_external(vcpu); 835 return 0; 836 } 837 838 kvmppc_core_queue_external(vcpu, irq); 839 840 kvm_vcpu_kick(vcpu); 841 842 return 0; 843 } 844 845 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 846 struct kvm_enable_cap *cap) 847 { 848 int r; 849 850 if (cap->flags) 851 return -EINVAL; 852 853 switch (cap->cap) { 854 case KVM_CAP_PPC_OSI: 855 r = 0; 856 vcpu->arch.osi_enabled = true; 857 break; 858 case KVM_CAP_PPC_PAPR: 859 r = 0; 860 vcpu->arch.papr_enabled = true; 861 break; 862 case KVM_CAP_PPC_EPR: 863 r = 0; 864 if (cap->args[0]) 865 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 866 else 867 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 868 break; 869 #ifdef CONFIG_BOOKE 870 case KVM_CAP_PPC_BOOKE_WATCHDOG: 871 r = 0; 872 vcpu->arch.watchdog_enabled = true; 873 break; 874 #endif 875 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 876 case KVM_CAP_SW_TLB: { 877 struct kvm_config_tlb cfg; 878 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 879 880 r = -EFAULT; 881 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 882 break; 883 884 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 885 break; 886 } 887 #endif 888 #ifdef CONFIG_KVM_MPIC 889 case KVM_CAP_IRQ_MPIC: { 890 struct fd f; 891 struct kvm_device *dev; 892 893 r = -EBADF; 894 f = fdget(cap->args[0]); 895 if (!f.file) 896 break; 897 898 r = -EPERM; 899 dev = kvm_device_from_filp(f.file); 900 if (dev) 901 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 902 903 fdput(f); 904 break; 905 } 906 #endif 907 #ifdef CONFIG_KVM_XICS 908 case KVM_CAP_IRQ_XICS: { 909 struct fd f; 910 struct kvm_device *dev; 911 912 r = -EBADF; 913 f = fdget(cap->args[0]); 914 if (!f.file) 915 break; 916 917 r = -EPERM; 918 dev = kvm_device_from_filp(f.file); 919 if (dev) 920 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 921 922 fdput(f); 923 break; 924 } 925 #endif /* CONFIG_KVM_XICS */ 926 default: 927 r = -EINVAL; 928 break; 929 } 930 931 if (!r) 932 r = kvmppc_sanity_check(vcpu); 933 934 return r; 935 } 936 937 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 938 struct kvm_mp_state *mp_state) 939 { 940 return -EINVAL; 941 } 942 943 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 944 struct kvm_mp_state *mp_state) 945 { 946 return -EINVAL; 947 } 948 949 long kvm_arch_vcpu_ioctl(struct file *filp, 950 unsigned int ioctl, unsigned long arg) 951 { 952 struct kvm_vcpu *vcpu = filp->private_data; 953 void __user *argp = (void __user *)arg; 954 long r; 955 956 switch (ioctl) { 957 case KVM_INTERRUPT: { 958 struct kvm_interrupt irq; 959 r = -EFAULT; 960 if (copy_from_user(&irq, argp, sizeof(irq))) 961 goto out; 962 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 963 goto out; 964 } 965 966 case KVM_ENABLE_CAP: 967 { 968 struct kvm_enable_cap cap; 969 r = -EFAULT; 970 if (copy_from_user(&cap, argp, sizeof(cap))) 971 goto out; 972 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 973 break; 974 } 975 976 case KVM_SET_ONE_REG: 977 case KVM_GET_ONE_REG: 978 { 979 struct kvm_one_reg reg; 980 r = -EFAULT; 981 if (copy_from_user(®, argp, sizeof(reg))) 982 goto out; 983 if (ioctl == KVM_SET_ONE_REG) 984 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 985 else 986 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 987 break; 988 } 989 990 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 991 case KVM_DIRTY_TLB: { 992 struct kvm_dirty_tlb dirty; 993 r = -EFAULT; 994 if (copy_from_user(&dirty, argp, sizeof(dirty))) 995 goto out; 996 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 997 break; 998 } 999 #endif 1000 default: 1001 r = -EINVAL; 1002 } 1003 1004 out: 1005 return r; 1006 } 1007 1008 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 1009 { 1010 return VM_FAULT_SIGBUS; 1011 } 1012 1013 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 1014 { 1015 u32 inst_nop = 0x60000000; 1016 #ifdef CONFIG_KVM_BOOKE_HV 1017 u32 inst_sc1 = 0x44000022; 1018 pvinfo->hcall[0] = inst_sc1; 1019 pvinfo->hcall[1] = inst_nop; 1020 pvinfo->hcall[2] = inst_nop; 1021 pvinfo->hcall[3] = inst_nop; 1022 #else 1023 u32 inst_lis = 0x3c000000; 1024 u32 inst_ori = 0x60000000; 1025 u32 inst_sc = 0x44000002; 1026 u32 inst_imm_mask = 0xffff; 1027 1028 /* 1029 * The hypercall to get into KVM from within guest context is as 1030 * follows: 1031 * 1032 * lis r0, r0, KVM_SC_MAGIC_R0@h 1033 * ori r0, KVM_SC_MAGIC_R0@l 1034 * sc 1035 * nop 1036 */ 1037 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); 1038 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 1039 pvinfo->hcall[2] = inst_sc; 1040 pvinfo->hcall[3] = inst_nop; 1041 #endif 1042 1043 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 1044 1045 return 0; 1046 } 1047 1048 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 1049 bool line_status) 1050 { 1051 if (!irqchip_in_kernel(kvm)) 1052 return -ENXIO; 1053 1054 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1055 irq_event->irq, irq_event->level, 1056 line_status); 1057 return 0; 1058 } 1059 1060 long kvm_arch_vm_ioctl(struct file *filp, 1061 unsigned int ioctl, unsigned long arg) 1062 { 1063 struct kvm *kvm __maybe_unused = filp->private_data; 1064 void __user *argp = (void __user *)arg; 1065 long r; 1066 1067 switch (ioctl) { 1068 case KVM_PPC_GET_PVINFO: { 1069 struct kvm_ppc_pvinfo pvinfo; 1070 memset(&pvinfo, 0, sizeof(pvinfo)); 1071 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 1072 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 1073 r = -EFAULT; 1074 goto out; 1075 } 1076 1077 break; 1078 } 1079 #ifdef CONFIG_PPC_BOOK3S_64 1080 case KVM_CREATE_SPAPR_TCE: { 1081 struct kvm_create_spapr_tce create_tce; 1082 1083 r = -EFAULT; 1084 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 1085 goto out; 1086 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 1087 goto out; 1088 } 1089 case KVM_PPC_GET_SMMU_INFO: { 1090 struct kvm_ppc_smmu_info info; 1091 struct kvm *kvm = filp->private_data; 1092 1093 memset(&info, 0, sizeof(info)); 1094 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 1095 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 1096 r = -EFAULT; 1097 break; 1098 } 1099 case KVM_PPC_RTAS_DEFINE_TOKEN: { 1100 struct kvm *kvm = filp->private_data; 1101 1102 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 1103 break; 1104 } 1105 default: { 1106 struct kvm *kvm = filp->private_data; 1107 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 1108 } 1109 #else /* CONFIG_PPC_BOOK3S_64 */ 1110 default: 1111 r = -ENOTTY; 1112 #endif 1113 } 1114 out: 1115 return r; 1116 } 1117 1118 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 1119 static unsigned long nr_lpids; 1120 1121 long kvmppc_alloc_lpid(void) 1122 { 1123 long lpid; 1124 1125 do { 1126 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 1127 if (lpid >= nr_lpids) { 1128 pr_err("%s: No LPIDs free\n", __func__); 1129 return -ENOMEM; 1130 } 1131 } while (test_and_set_bit(lpid, lpid_inuse)); 1132 1133 return lpid; 1134 } 1135 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 1136 1137 void kvmppc_claim_lpid(long lpid) 1138 { 1139 set_bit(lpid, lpid_inuse); 1140 } 1141 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); 1142 1143 void kvmppc_free_lpid(long lpid) 1144 { 1145 clear_bit(lpid, lpid_inuse); 1146 } 1147 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 1148 1149 void kvmppc_init_lpid(unsigned long nr_lpids_param) 1150 { 1151 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1152 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1153 } 1154 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 1155 1156 int kvm_arch_init(void *opaque) 1157 { 1158 return 0; 1159 } 1160 1161 void kvm_arch_exit(void) 1162 { 1163 1164 } 1165