1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <asm/cputable.h> 30 #include <asm/uaccess.h> 31 #include <asm/kvm_ppc.h> 32 #include <asm/tlbflush.h> 33 #include <asm/cputhreads.h> 34 #include <asm/irqflags.h> 35 #include "timing.h" 36 #include "irq.h" 37 #include "../mm/mmu_decl.h" 38 39 #define CREATE_TRACE_POINTS 40 #include "trace.h" 41 42 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 43 { 44 return !!(v->arch.pending_exceptions) || 45 v->requests; 46 } 47 48 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 49 { 50 return 1; 51 } 52 53 #ifndef CONFIG_KVM_BOOK3S_64_HV 54 /* 55 * Common checks before entering the guest world. Call with interrupts 56 * disabled. 57 * 58 * returns: 59 * 60 * == 1 if we're ready to go into guest state 61 * <= 0 if we need to go back to the host with return value 62 */ 63 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 64 { 65 int r = 1; 66 67 WARN_ON_ONCE(!irqs_disabled()); 68 while (true) { 69 if (need_resched()) { 70 local_irq_enable(); 71 cond_resched(); 72 local_irq_disable(); 73 continue; 74 } 75 76 if (signal_pending(current)) { 77 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 78 vcpu->run->exit_reason = KVM_EXIT_INTR; 79 r = -EINTR; 80 break; 81 } 82 83 vcpu->mode = IN_GUEST_MODE; 84 85 /* 86 * Reading vcpu->requests must happen after setting vcpu->mode, 87 * so we don't miss a request because the requester sees 88 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 89 * before next entering the guest (and thus doesn't IPI). 90 */ 91 smp_mb(); 92 93 if (vcpu->requests) { 94 /* Make sure we process requests preemptable */ 95 local_irq_enable(); 96 trace_kvm_check_requests(vcpu); 97 r = kvmppc_core_check_requests(vcpu); 98 local_irq_disable(); 99 if (r > 0) 100 continue; 101 break; 102 } 103 104 if (kvmppc_core_prepare_to_enter(vcpu)) { 105 /* interrupts got enabled in between, so we 106 are back at square 1 */ 107 continue; 108 } 109 110 #ifdef CONFIG_PPC64 111 /* lazy EE magic */ 112 hard_irq_disable(); 113 if (lazy_irq_pending()) { 114 /* Got an interrupt in between, try again */ 115 local_irq_enable(); 116 local_irq_disable(); 117 kvm_guest_exit(); 118 continue; 119 } 120 #endif 121 122 kvm_guest_enter(); 123 break; 124 } 125 126 return r; 127 } 128 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 129 130 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 131 { 132 int nr = kvmppc_get_gpr(vcpu, 11); 133 int r; 134 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 135 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 136 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 137 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 138 unsigned long r2 = 0; 139 140 if (!(vcpu->arch.shared->msr & MSR_SF)) { 141 /* 32 bit mode */ 142 param1 &= 0xffffffff; 143 param2 &= 0xffffffff; 144 param3 &= 0xffffffff; 145 param4 &= 0xffffffff; 146 } 147 148 switch (nr) { 149 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 150 { 151 vcpu->arch.magic_page_pa = param1; 152 vcpu->arch.magic_page_ea = param2; 153 154 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 155 156 r = EV_SUCCESS; 157 break; 158 } 159 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 160 r = EV_SUCCESS; 161 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 162 /* XXX Missing magic page on 44x */ 163 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 164 #endif 165 166 /* Second return value is in r4 */ 167 break; 168 case EV_HCALL_TOKEN(EV_IDLE): 169 r = EV_SUCCESS; 170 kvm_vcpu_block(vcpu); 171 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 172 break; 173 default: 174 r = EV_UNIMPLEMENTED; 175 break; 176 } 177 178 kvmppc_set_gpr(vcpu, 4, r2); 179 180 return r; 181 } 182 183 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 184 { 185 int r = false; 186 187 /* We have to know what CPU to virtualize */ 188 if (!vcpu->arch.pvr) 189 goto out; 190 191 /* PAPR only works with book3s_64 */ 192 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 193 goto out; 194 195 #ifdef CONFIG_KVM_BOOK3S_64_HV 196 /* HV KVM can only do PAPR mode for now */ 197 if (!vcpu->arch.papr_enabled) 198 goto out; 199 #endif 200 201 #ifdef CONFIG_KVM_BOOKE_HV 202 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 203 goto out; 204 #endif 205 206 r = true; 207 208 out: 209 vcpu->arch.sane = r; 210 return r ? 0 : -EINVAL; 211 } 212 213 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 214 { 215 enum emulation_result er; 216 int r; 217 218 er = kvmppc_emulate_instruction(run, vcpu); 219 switch (er) { 220 case EMULATE_DONE: 221 /* Future optimization: only reload non-volatiles if they were 222 * actually modified. */ 223 r = RESUME_GUEST_NV; 224 break; 225 case EMULATE_DO_MMIO: 226 run->exit_reason = KVM_EXIT_MMIO; 227 /* We must reload nonvolatiles because "update" load/store 228 * instructions modify register state. */ 229 /* Future optimization: only reload non-volatiles if they were 230 * actually modified. */ 231 r = RESUME_HOST_NV; 232 break; 233 case EMULATE_FAIL: 234 /* XXX Deliver Program interrupt to guest. */ 235 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 236 kvmppc_get_last_inst(vcpu)); 237 r = RESUME_HOST; 238 break; 239 default: 240 WARN_ON(1); 241 r = RESUME_GUEST; 242 } 243 244 return r; 245 } 246 247 int kvm_arch_hardware_enable(void *garbage) 248 { 249 return 0; 250 } 251 252 void kvm_arch_hardware_disable(void *garbage) 253 { 254 } 255 256 int kvm_arch_hardware_setup(void) 257 { 258 return 0; 259 } 260 261 void kvm_arch_hardware_unsetup(void) 262 { 263 } 264 265 void kvm_arch_check_processor_compat(void *rtn) 266 { 267 *(int *)rtn = kvmppc_core_check_processor_compat(); 268 } 269 270 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 271 { 272 if (type) 273 return -EINVAL; 274 275 return kvmppc_core_init_vm(kvm); 276 } 277 278 void kvm_arch_destroy_vm(struct kvm *kvm) 279 { 280 unsigned int i; 281 struct kvm_vcpu *vcpu; 282 283 kvm_for_each_vcpu(i, vcpu, kvm) 284 kvm_arch_vcpu_free(vcpu); 285 286 mutex_lock(&kvm->lock); 287 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 288 kvm->vcpus[i] = NULL; 289 290 atomic_set(&kvm->online_vcpus, 0); 291 292 kvmppc_core_destroy_vm(kvm); 293 294 mutex_unlock(&kvm->lock); 295 } 296 297 void kvm_arch_sync_events(struct kvm *kvm) 298 { 299 } 300 301 int kvm_dev_ioctl_check_extension(long ext) 302 { 303 int r; 304 305 switch (ext) { 306 #ifdef CONFIG_BOOKE 307 case KVM_CAP_PPC_BOOKE_SREGS: 308 case KVM_CAP_PPC_BOOKE_WATCHDOG: 309 case KVM_CAP_PPC_EPR: 310 #else 311 case KVM_CAP_PPC_SEGSTATE: 312 case KVM_CAP_PPC_HIOR: 313 case KVM_CAP_PPC_PAPR: 314 #endif 315 case KVM_CAP_PPC_UNSET_IRQ: 316 case KVM_CAP_PPC_IRQ_LEVEL: 317 case KVM_CAP_ENABLE_CAP: 318 case KVM_CAP_ONE_REG: 319 case KVM_CAP_IOEVENTFD: 320 case KVM_CAP_DEVICE_CTRL: 321 r = 1; 322 break; 323 #ifndef CONFIG_KVM_BOOK3S_64_HV 324 case KVM_CAP_PPC_PAIRED_SINGLES: 325 case KVM_CAP_PPC_OSI: 326 case KVM_CAP_PPC_GET_PVINFO: 327 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 328 case KVM_CAP_SW_TLB: 329 #endif 330 #ifdef CONFIG_KVM_MPIC 331 case KVM_CAP_IRQ_MPIC: 332 #endif 333 r = 1; 334 break; 335 case KVM_CAP_COALESCED_MMIO: 336 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 337 break; 338 #endif 339 #ifdef CONFIG_PPC_BOOK3S_64 340 case KVM_CAP_SPAPR_TCE: 341 case KVM_CAP_PPC_ALLOC_HTAB: 342 case KVM_CAP_PPC_RTAS: 343 #ifdef CONFIG_KVM_XICS 344 case KVM_CAP_IRQ_XICS: 345 #endif 346 r = 1; 347 break; 348 #endif /* CONFIG_PPC_BOOK3S_64 */ 349 #ifdef CONFIG_KVM_BOOK3S_64_HV 350 case KVM_CAP_PPC_SMT: 351 r = threads_per_core; 352 break; 353 case KVM_CAP_PPC_RMA: 354 r = 1; 355 /* PPC970 requires an RMA */ 356 if (cpu_has_feature(CPU_FTR_ARCH_201)) 357 r = 2; 358 break; 359 #endif 360 case KVM_CAP_SYNC_MMU: 361 #ifdef CONFIG_KVM_BOOK3S_64_HV 362 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 363 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 364 r = 1; 365 #else 366 r = 0; 367 break; 368 #endif 369 #ifdef CONFIG_KVM_BOOK3S_64_HV 370 case KVM_CAP_PPC_HTAB_FD: 371 r = 1; 372 break; 373 #endif 374 break; 375 case KVM_CAP_NR_VCPUS: 376 /* 377 * Recommending a number of CPUs is somewhat arbitrary; we 378 * return the number of present CPUs for -HV (since a host 379 * will have secondary threads "offline"), and for other KVM 380 * implementations just count online CPUs. 381 */ 382 #ifdef CONFIG_KVM_BOOK3S_64_HV 383 r = num_present_cpus(); 384 #else 385 r = num_online_cpus(); 386 #endif 387 break; 388 case KVM_CAP_MAX_VCPUS: 389 r = KVM_MAX_VCPUS; 390 break; 391 #ifdef CONFIG_PPC_BOOK3S_64 392 case KVM_CAP_PPC_GET_SMMU_INFO: 393 r = 1; 394 break; 395 #endif 396 default: 397 r = 0; 398 break; 399 } 400 return r; 401 402 } 403 404 long kvm_arch_dev_ioctl(struct file *filp, 405 unsigned int ioctl, unsigned long arg) 406 { 407 return -EINVAL; 408 } 409 410 void kvm_arch_free_memslot(struct kvm_memory_slot *free, 411 struct kvm_memory_slot *dont) 412 { 413 kvmppc_core_free_memslot(free, dont); 414 } 415 416 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 417 { 418 return kvmppc_core_create_memslot(slot, npages); 419 } 420 421 void kvm_arch_memslots_updated(struct kvm *kvm) 422 { 423 } 424 425 int kvm_arch_prepare_memory_region(struct kvm *kvm, 426 struct kvm_memory_slot *memslot, 427 struct kvm_userspace_memory_region *mem, 428 enum kvm_mr_change change) 429 { 430 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 431 } 432 433 void kvm_arch_commit_memory_region(struct kvm *kvm, 434 struct kvm_userspace_memory_region *mem, 435 const struct kvm_memory_slot *old, 436 enum kvm_mr_change change) 437 { 438 kvmppc_core_commit_memory_region(kvm, mem, old); 439 } 440 441 void kvm_arch_flush_shadow_all(struct kvm *kvm) 442 { 443 } 444 445 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 446 struct kvm_memory_slot *slot) 447 { 448 kvmppc_core_flush_memslot(kvm, slot); 449 } 450 451 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 452 { 453 struct kvm_vcpu *vcpu; 454 vcpu = kvmppc_core_vcpu_create(kvm, id); 455 if (!IS_ERR(vcpu)) { 456 vcpu->arch.wqp = &vcpu->wq; 457 kvmppc_create_vcpu_debugfs(vcpu, id); 458 } 459 return vcpu; 460 } 461 462 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 463 { 464 return 0; 465 } 466 467 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 468 { 469 /* Make sure we're not using the vcpu anymore */ 470 hrtimer_cancel(&vcpu->arch.dec_timer); 471 tasklet_kill(&vcpu->arch.tasklet); 472 473 kvmppc_remove_vcpu_debugfs(vcpu); 474 475 switch (vcpu->arch.irq_type) { 476 case KVMPPC_IRQ_MPIC: 477 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 478 break; 479 case KVMPPC_IRQ_XICS: 480 kvmppc_xics_free_icp(vcpu); 481 break; 482 } 483 484 kvmppc_core_vcpu_free(vcpu); 485 } 486 487 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 488 { 489 kvm_arch_vcpu_free(vcpu); 490 } 491 492 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 493 { 494 return kvmppc_core_pending_dec(vcpu); 495 } 496 497 /* 498 * low level hrtimer wake routine. Because this runs in hardirq context 499 * we schedule a tasklet to do the real work. 500 */ 501 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 502 { 503 struct kvm_vcpu *vcpu; 504 505 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 506 tasklet_schedule(&vcpu->arch.tasklet); 507 508 return HRTIMER_NORESTART; 509 } 510 511 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 512 { 513 int ret; 514 515 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 516 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 517 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 518 vcpu->arch.dec_expires = ~(u64)0; 519 520 #ifdef CONFIG_KVM_EXIT_TIMING 521 mutex_init(&vcpu->arch.exit_timing_lock); 522 #endif 523 ret = kvmppc_subarch_vcpu_init(vcpu); 524 return ret; 525 } 526 527 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 528 { 529 kvmppc_mmu_destroy(vcpu); 530 kvmppc_subarch_vcpu_uninit(vcpu); 531 } 532 533 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 534 { 535 #ifdef CONFIG_BOOKE 536 /* 537 * vrsave (formerly usprg0) isn't used by Linux, but may 538 * be used by the guest. 539 * 540 * On non-booke this is associated with Altivec and 541 * is handled by code in book3s.c. 542 */ 543 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 544 #endif 545 kvmppc_core_vcpu_load(vcpu, cpu); 546 } 547 548 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 549 { 550 kvmppc_core_vcpu_put(vcpu); 551 #ifdef CONFIG_BOOKE 552 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 553 #endif 554 } 555 556 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 557 struct kvm_run *run) 558 { 559 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 560 } 561 562 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 563 struct kvm_run *run) 564 { 565 u64 uninitialized_var(gpr); 566 567 if (run->mmio.len > sizeof(gpr)) { 568 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 569 return; 570 } 571 572 if (vcpu->arch.mmio_is_bigendian) { 573 switch (run->mmio.len) { 574 case 8: gpr = *(u64 *)run->mmio.data; break; 575 case 4: gpr = *(u32 *)run->mmio.data; break; 576 case 2: gpr = *(u16 *)run->mmio.data; break; 577 case 1: gpr = *(u8 *)run->mmio.data; break; 578 } 579 } else { 580 /* Convert BE data from userland back to LE. */ 581 switch (run->mmio.len) { 582 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 583 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 584 case 1: gpr = *(u8 *)run->mmio.data; break; 585 } 586 } 587 588 if (vcpu->arch.mmio_sign_extend) { 589 switch (run->mmio.len) { 590 #ifdef CONFIG_PPC64 591 case 4: 592 gpr = (s64)(s32)gpr; 593 break; 594 #endif 595 case 2: 596 gpr = (s64)(s16)gpr; 597 break; 598 case 1: 599 gpr = (s64)(s8)gpr; 600 break; 601 } 602 } 603 604 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 605 606 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 607 case KVM_MMIO_REG_GPR: 608 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 609 break; 610 case KVM_MMIO_REG_FPR: 611 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 612 break; 613 #ifdef CONFIG_PPC_BOOK3S 614 case KVM_MMIO_REG_QPR: 615 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 616 break; 617 case KVM_MMIO_REG_FQPR: 618 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 619 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 620 break; 621 #endif 622 default: 623 BUG(); 624 } 625 } 626 627 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 628 unsigned int rt, unsigned int bytes, int is_bigendian) 629 { 630 int idx, ret; 631 632 if (bytes > sizeof(run->mmio.data)) { 633 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 634 run->mmio.len); 635 } 636 637 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 638 run->mmio.len = bytes; 639 run->mmio.is_write = 0; 640 641 vcpu->arch.io_gpr = rt; 642 vcpu->arch.mmio_is_bigendian = is_bigendian; 643 vcpu->mmio_needed = 1; 644 vcpu->mmio_is_write = 0; 645 vcpu->arch.mmio_sign_extend = 0; 646 647 idx = srcu_read_lock(&vcpu->kvm->srcu); 648 649 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 650 bytes, &run->mmio.data); 651 652 srcu_read_unlock(&vcpu->kvm->srcu, idx); 653 654 if (!ret) { 655 kvmppc_complete_mmio_load(vcpu, run); 656 vcpu->mmio_needed = 0; 657 return EMULATE_DONE; 658 } 659 660 return EMULATE_DO_MMIO; 661 } 662 663 /* Same as above, but sign extends */ 664 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 665 unsigned int rt, unsigned int bytes, int is_bigendian) 666 { 667 int r; 668 669 vcpu->arch.mmio_sign_extend = 1; 670 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 671 672 return r; 673 } 674 675 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 676 u64 val, unsigned int bytes, int is_bigendian) 677 { 678 void *data = run->mmio.data; 679 int idx, ret; 680 681 if (bytes > sizeof(run->mmio.data)) { 682 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 683 run->mmio.len); 684 } 685 686 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 687 run->mmio.len = bytes; 688 run->mmio.is_write = 1; 689 vcpu->mmio_needed = 1; 690 vcpu->mmio_is_write = 1; 691 692 /* Store the value at the lowest bytes in 'data'. */ 693 if (is_bigendian) { 694 switch (bytes) { 695 case 8: *(u64 *)data = val; break; 696 case 4: *(u32 *)data = val; break; 697 case 2: *(u16 *)data = val; break; 698 case 1: *(u8 *)data = val; break; 699 } 700 } else { 701 /* Store LE value into 'data'. */ 702 switch (bytes) { 703 case 4: st_le32(data, val); break; 704 case 2: st_le16(data, val); break; 705 case 1: *(u8 *)data = val; break; 706 } 707 } 708 709 idx = srcu_read_lock(&vcpu->kvm->srcu); 710 711 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 712 bytes, &run->mmio.data); 713 714 srcu_read_unlock(&vcpu->kvm->srcu, idx); 715 716 if (!ret) { 717 vcpu->mmio_needed = 0; 718 return EMULATE_DONE; 719 } 720 721 return EMULATE_DO_MMIO; 722 } 723 724 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 725 { 726 int r; 727 sigset_t sigsaved; 728 729 if (vcpu->sigset_active) 730 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 731 732 if (vcpu->mmio_needed) { 733 if (!vcpu->mmio_is_write) 734 kvmppc_complete_mmio_load(vcpu, run); 735 vcpu->mmio_needed = 0; 736 } else if (vcpu->arch.dcr_needed) { 737 if (!vcpu->arch.dcr_is_write) 738 kvmppc_complete_dcr_load(vcpu, run); 739 vcpu->arch.dcr_needed = 0; 740 } else if (vcpu->arch.osi_needed) { 741 u64 *gprs = run->osi.gprs; 742 int i; 743 744 for (i = 0; i < 32; i++) 745 kvmppc_set_gpr(vcpu, i, gprs[i]); 746 vcpu->arch.osi_needed = 0; 747 } else if (vcpu->arch.hcall_needed) { 748 int i; 749 750 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 751 for (i = 0; i < 9; ++i) 752 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 753 vcpu->arch.hcall_needed = 0; 754 #ifdef CONFIG_BOOKE 755 } else if (vcpu->arch.epr_needed) { 756 kvmppc_set_epr(vcpu, run->epr.epr); 757 vcpu->arch.epr_needed = 0; 758 #endif 759 } 760 761 r = kvmppc_vcpu_run(run, vcpu); 762 763 if (vcpu->sigset_active) 764 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 765 766 return r; 767 } 768 769 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 770 { 771 if (irq->irq == KVM_INTERRUPT_UNSET) { 772 kvmppc_core_dequeue_external(vcpu); 773 return 0; 774 } 775 776 kvmppc_core_queue_external(vcpu, irq); 777 778 kvm_vcpu_kick(vcpu); 779 780 return 0; 781 } 782 783 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 784 struct kvm_enable_cap *cap) 785 { 786 int r; 787 788 if (cap->flags) 789 return -EINVAL; 790 791 switch (cap->cap) { 792 case KVM_CAP_PPC_OSI: 793 r = 0; 794 vcpu->arch.osi_enabled = true; 795 break; 796 case KVM_CAP_PPC_PAPR: 797 r = 0; 798 vcpu->arch.papr_enabled = true; 799 break; 800 case KVM_CAP_PPC_EPR: 801 r = 0; 802 if (cap->args[0]) 803 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 804 else 805 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 806 break; 807 #ifdef CONFIG_BOOKE 808 case KVM_CAP_PPC_BOOKE_WATCHDOG: 809 r = 0; 810 vcpu->arch.watchdog_enabled = true; 811 break; 812 #endif 813 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 814 case KVM_CAP_SW_TLB: { 815 struct kvm_config_tlb cfg; 816 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 817 818 r = -EFAULT; 819 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 820 break; 821 822 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 823 break; 824 } 825 #endif 826 #ifdef CONFIG_KVM_MPIC 827 case KVM_CAP_IRQ_MPIC: { 828 struct fd f; 829 struct kvm_device *dev; 830 831 r = -EBADF; 832 f = fdget(cap->args[0]); 833 if (!f.file) 834 break; 835 836 r = -EPERM; 837 dev = kvm_device_from_filp(f.file); 838 if (dev) 839 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 840 841 fdput(f); 842 break; 843 } 844 #endif 845 #ifdef CONFIG_KVM_XICS 846 case KVM_CAP_IRQ_XICS: { 847 struct fd f; 848 struct kvm_device *dev; 849 850 r = -EBADF; 851 f = fdget(cap->args[0]); 852 if (!f.file) 853 break; 854 855 r = -EPERM; 856 dev = kvm_device_from_filp(f.file); 857 if (dev) 858 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 859 860 fdput(f); 861 break; 862 } 863 #endif /* CONFIG_KVM_XICS */ 864 default: 865 r = -EINVAL; 866 break; 867 } 868 869 if (!r) 870 r = kvmppc_sanity_check(vcpu); 871 872 return r; 873 } 874 875 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 876 struct kvm_mp_state *mp_state) 877 { 878 return -EINVAL; 879 } 880 881 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 882 struct kvm_mp_state *mp_state) 883 { 884 return -EINVAL; 885 } 886 887 long kvm_arch_vcpu_ioctl(struct file *filp, 888 unsigned int ioctl, unsigned long arg) 889 { 890 struct kvm_vcpu *vcpu = filp->private_data; 891 void __user *argp = (void __user *)arg; 892 long r; 893 894 switch (ioctl) { 895 case KVM_INTERRUPT: { 896 struct kvm_interrupt irq; 897 r = -EFAULT; 898 if (copy_from_user(&irq, argp, sizeof(irq))) 899 goto out; 900 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 901 goto out; 902 } 903 904 case KVM_ENABLE_CAP: 905 { 906 struct kvm_enable_cap cap; 907 r = -EFAULT; 908 if (copy_from_user(&cap, argp, sizeof(cap))) 909 goto out; 910 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 911 break; 912 } 913 914 case KVM_SET_ONE_REG: 915 case KVM_GET_ONE_REG: 916 { 917 struct kvm_one_reg reg; 918 r = -EFAULT; 919 if (copy_from_user(®, argp, sizeof(reg))) 920 goto out; 921 if (ioctl == KVM_SET_ONE_REG) 922 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 923 else 924 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 925 break; 926 } 927 928 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 929 case KVM_DIRTY_TLB: { 930 struct kvm_dirty_tlb dirty; 931 r = -EFAULT; 932 if (copy_from_user(&dirty, argp, sizeof(dirty))) 933 goto out; 934 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 935 break; 936 } 937 #endif 938 default: 939 r = -EINVAL; 940 } 941 942 out: 943 return r; 944 } 945 946 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 947 { 948 return VM_FAULT_SIGBUS; 949 } 950 951 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 952 { 953 u32 inst_nop = 0x60000000; 954 #ifdef CONFIG_KVM_BOOKE_HV 955 u32 inst_sc1 = 0x44000022; 956 pvinfo->hcall[0] = inst_sc1; 957 pvinfo->hcall[1] = inst_nop; 958 pvinfo->hcall[2] = inst_nop; 959 pvinfo->hcall[3] = inst_nop; 960 #else 961 u32 inst_lis = 0x3c000000; 962 u32 inst_ori = 0x60000000; 963 u32 inst_sc = 0x44000002; 964 u32 inst_imm_mask = 0xffff; 965 966 /* 967 * The hypercall to get into KVM from within guest context is as 968 * follows: 969 * 970 * lis r0, r0, KVM_SC_MAGIC_R0@h 971 * ori r0, KVM_SC_MAGIC_R0@l 972 * sc 973 * nop 974 */ 975 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); 976 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 977 pvinfo->hcall[2] = inst_sc; 978 pvinfo->hcall[3] = inst_nop; 979 #endif 980 981 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 982 983 return 0; 984 } 985 986 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 987 bool line_status) 988 { 989 if (!irqchip_in_kernel(kvm)) 990 return -ENXIO; 991 992 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 993 irq_event->irq, irq_event->level, 994 line_status); 995 return 0; 996 } 997 998 long kvm_arch_vm_ioctl(struct file *filp, 999 unsigned int ioctl, unsigned long arg) 1000 { 1001 struct kvm *kvm __maybe_unused = filp->private_data; 1002 void __user *argp = (void __user *)arg; 1003 long r; 1004 1005 switch (ioctl) { 1006 case KVM_PPC_GET_PVINFO: { 1007 struct kvm_ppc_pvinfo pvinfo; 1008 memset(&pvinfo, 0, sizeof(pvinfo)); 1009 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 1010 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 1011 r = -EFAULT; 1012 goto out; 1013 } 1014 1015 break; 1016 } 1017 #ifdef CONFIG_PPC_BOOK3S_64 1018 case KVM_CREATE_SPAPR_TCE: { 1019 struct kvm_create_spapr_tce create_tce; 1020 1021 r = -EFAULT; 1022 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 1023 goto out; 1024 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 1025 goto out; 1026 } 1027 #endif /* CONFIG_PPC_BOOK3S_64 */ 1028 1029 #ifdef CONFIG_KVM_BOOK3S_64_HV 1030 case KVM_ALLOCATE_RMA: { 1031 struct kvm_allocate_rma rma; 1032 struct kvm *kvm = filp->private_data; 1033 1034 r = kvm_vm_ioctl_allocate_rma(kvm, &rma); 1035 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) 1036 r = -EFAULT; 1037 break; 1038 } 1039 1040 case KVM_PPC_ALLOCATE_HTAB: { 1041 u32 htab_order; 1042 1043 r = -EFAULT; 1044 if (get_user(htab_order, (u32 __user *)argp)) 1045 break; 1046 r = kvmppc_alloc_reset_hpt(kvm, &htab_order); 1047 if (r) 1048 break; 1049 r = -EFAULT; 1050 if (put_user(htab_order, (u32 __user *)argp)) 1051 break; 1052 r = 0; 1053 break; 1054 } 1055 1056 case KVM_PPC_GET_HTAB_FD: { 1057 struct kvm_get_htab_fd ghf; 1058 1059 r = -EFAULT; 1060 if (copy_from_user(&ghf, argp, sizeof(ghf))) 1061 break; 1062 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); 1063 break; 1064 } 1065 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 1066 1067 #ifdef CONFIG_PPC_BOOK3S_64 1068 case KVM_PPC_GET_SMMU_INFO: { 1069 struct kvm_ppc_smmu_info info; 1070 1071 memset(&info, 0, sizeof(info)); 1072 r = kvm_vm_ioctl_get_smmu_info(kvm, &info); 1073 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 1074 r = -EFAULT; 1075 break; 1076 } 1077 case KVM_PPC_RTAS_DEFINE_TOKEN: { 1078 struct kvm *kvm = filp->private_data; 1079 1080 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 1081 break; 1082 } 1083 #endif /* CONFIG_PPC_BOOK3S_64 */ 1084 default: 1085 r = -ENOTTY; 1086 } 1087 1088 out: 1089 return r; 1090 } 1091 1092 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 1093 static unsigned long nr_lpids; 1094 1095 long kvmppc_alloc_lpid(void) 1096 { 1097 long lpid; 1098 1099 do { 1100 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 1101 if (lpid >= nr_lpids) { 1102 pr_err("%s: No LPIDs free\n", __func__); 1103 return -ENOMEM; 1104 } 1105 } while (test_and_set_bit(lpid, lpid_inuse)); 1106 1107 return lpid; 1108 } 1109 1110 void kvmppc_claim_lpid(long lpid) 1111 { 1112 set_bit(lpid, lpid_inuse); 1113 } 1114 1115 void kvmppc_free_lpid(long lpid) 1116 { 1117 clear_bit(lpid, lpid_inuse); 1118 } 1119 1120 void kvmppc_init_lpid(unsigned long nr_lpids_param) 1121 { 1122 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1123 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1124 } 1125 1126 int kvm_arch_init(void *opaque) 1127 { 1128 return 0; 1129 } 1130 1131 void kvm_arch_exit(void) 1132 { 1133 } 1134