1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <asm/cputable.h> 29 #include <asm/uaccess.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/tlbflush.h> 32 #include <asm/cputhreads.h> 33 #include <asm/irqflags.h> 34 #include "timing.h" 35 #include "../mm/mmu_decl.h" 36 37 #define CREATE_TRACE_POINTS 38 #include "trace.h" 39 40 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 41 { 42 return !!(v->arch.pending_exceptions) || 43 v->requests; 44 } 45 46 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 47 { 48 return 1; 49 } 50 51 #ifndef CONFIG_KVM_BOOK3S_64_HV 52 /* 53 * Common checks before entering the guest world. Call with interrupts 54 * disabled. 55 * 56 * returns: 57 * 58 * == 1 if we're ready to go into guest state 59 * <= 0 if we need to go back to the host with return value 60 */ 61 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 62 { 63 int r = 1; 64 65 WARN_ON_ONCE(!irqs_disabled()); 66 while (true) { 67 if (need_resched()) { 68 local_irq_enable(); 69 cond_resched(); 70 local_irq_disable(); 71 continue; 72 } 73 74 if (signal_pending(current)) { 75 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 76 vcpu->run->exit_reason = KVM_EXIT_INTR; 77 r = -EINTR; 78 break; 79 } 80 81 smp_mb(); 82 if (vcpu->requests) { 83 /* Make sure we process requests preemptable */ 84 local_irq_enable(); 85 trace_kvm_check_requests(vcpu); 86 r = kvmppc_core_check_requests(vcpu); 87 local_irq_disable(); 88 if (r > 0) 89 continue; 90 break; 91 } 92 93 if (kvmppc_core_prepare_to_enter(vcpu)) { 94 /* interrupts got enabled in between, so we 95 are back at square 1 */ 96 continue; 97 } 98 99 #ifdef CONFIG_PPC64 100 /* lazy EE magic */ 101 hard_irq_disable(); 102 if (lazy_irq_pending()) { 103 /* Got an interrupt in between, try again */ 104 local_irq_enable(); 105 local_irq_disable(); 106 kvm_guest_exit(); 107 continue; 108 } 109 110 trace_hardirqs_on(); 111 #endif 112 113 kvm_guest_enter(); 114 115 /* Going into guest context! Yay! */ 116 vcpu->mode = IN_GUEST_MODE; 117 smp_wmb(); 118 119 break; 120 } 121 122 return r; 123 } 124 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 125 126 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 127 { 128 int nr = kvmppc_get_gpr(vcpu, 11); 129 int r; 130 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 131 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 132 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 133 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 134 unsigned long r2 = 0; 135 136 if (!(vcpu->arch.shared->msr & MSR_SF)) { 137 /* 32 bit mode */ 138 param1 &= 0xffffffff; 139 param2 &= 0xffffffff; 140 param3 &= 0xffffffff; 141 param4 &= 0xffffffff; 142 } 143 144 switch (nr) { 145 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 146 { 147 vcpu->arch.magic_page_pa = param1; 148 vcpu->arch.magic_page_ea = param2; 149 150 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 151 152 r = EV_SUCCESS; 153 break; 154 } 155 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 156 r = EV_SUCCESS; 157 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 158 /* XXX Missing magic page on 44x */ 159 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 160 #endif 161 162 /* Second return value is in r4 */ 163 break; 164 case EV_HCALL_TOKEN(EV_IDLE): 165 r = EV_SUCCESS; 166 kvm_vcpu_block(vcpu); 167 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 168 break; 169 default: 170 r = EV_UNIMPLEMENTED; 171 break; 172 } 173 174 kvmppc_set_gpr(vcpu, 4, r2); 175 176 return r; 177 } 178 179 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 180 { 181 int r = false; 182 183 /* We have to know what CPU to virtualize */ 184 if (!vcpu->arch.pvr) 185 goto out; 186 187 /* PAPR only works with book3s_64 */ 188 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 189 goto out; 190 191 #ifdef CONFIG_KVM_BOOK3S_64_HV 192 /* HV KVM can only do PAPR mode for now */ 193 if (!vcpu->arch.papr_enabled) 194 goto out; 195 #endif 196 197 #ifdef CONFIG_KVM_BOOKE_HV 198 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 199 goto out; 200 #endif 201 202 r = true; 203 204 out: 205 vcpu->arch.sane = r; 206 return r ? 0 : -EINVAL; 207 } 208 209 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 210 { 211 enum emulation_result er; 212 int r; 213 214 er = kvmppc_emulate_instruction(run, vcpu); 215 switch (er) { 216 case EMULATE_DONE: 217 /* Future optimization: only reload non-volatiles if they were 218 * actually modified. */ 219 r = RESUME_GUEST_NV; 220 break; 221 case EMULATE_DO_MMIO: 222 run->exit_reason = KVM_EXIT_MMIO; 223 /* We must reload nonvolatiles because "update" load/store 224 * instructions modify register state. */ 225 /* Future optimization: only reload non-volatiles if they were 226 * actually modified. */ 227 r = RESUME_HOST_NV; 228 break; 229 case EMULATE_FAIL: 230 /* XXX Deliver Program interrupt to guest. */ 231 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 232 kvmppc_get_last_inst(vcpu)); 233 r = RESUME_HOST; 234 break; 235 default: 236 BUG(); 237 } 238 239 return r; 240 } 241 242 int kvm_arch_hardware_enable(void *garbage) 243 { 244 return 0; 245 } 246 247 void kvm_arch_hardware_disable(void *garbage) 248 { 249 } 250 251 int kvm_arch_hardware_setup(void) 252 { 253 return 0; 254 } 255 256 void kvm_arch_hardware_unsetup(void) 257 { 258 } 259 260 void kvm_arch_check_processor_compat(void *rtn) 261 { 262 *(int *)rtn = kvmppc_core_check_processor_compat(); 263 } 264 265 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 266 { 267 if (type) 268 return -EINVAL; 269 270 return kvmppc_core_init_vm(kvm); 271 } 272 273 void kvm_arch_destroy_vm(struct kvm *kvm) 274 { 275 unsigned int i; 276 struct kvm_vcpu *vcpu; 277 278 kvm_for_each_vcpu(i, vcpu, kvm) 279 kvm_arch_vcpu_free(vcpu); 280 281 mutex_lock(&kvm->lock); 282 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 283 kvm->vcpus[i] = NULL; 284 285 atomic_set(&kvm->online_vcpus, 0); 286 287 kvmppc_core_destroy_vm(kvm); 288 289 mutex_unlock(&kvm->lock); 290 } 291 292 void kvm_arch_sync_events(struct kvm *kvm) 293 { 294 } 295 296 int kvm_dev_ioctl_check_extension(long ext) 297 { 298 int r; 299 300 switch (ext) { 301 #ifdef CONFIG_BOOKE 302 case KVM_CAP_PPC_BOOKE_SREGS: 303 case KVM_CAP_PPC_BOOKE_WATCHDOG: 304 #else 305 case KVM_CAP_PPC_SEGSTATE: 306 case KVM_CAP_PPC_HIOR: 307 case KVM_CAP_PPC_PAPR: 308 #endif 309 case KVM_CAP_PPC_UNSET_IRQ: 310 case KVM_CAP_PPC_IRQ_LEVEL: 311 case KVM_CAP_ENABLE_CAP: 312 case KVM_CAP_ONE_REG: 313 r = 1; 314 break; 315 #ifndef CONFIG_KVM_BOOK3S_64_HV 316 case KVM_CAP_PPC_PAIRED_SINGLES: 317 case KVM_CAP_PPC_OSI: 318 case KVM_CAP_PPC_GET_PVINFO: 319 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 320 case KVM_CAP_SW_TLB: 321 #endif 322 r = 1; 323 break; 324 case KVM_CAP_COALESCED_MMIO: 325 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 326 break; 327 #endif 328 #ifdef CONFIG_PPC_BOOK3S_64 329 case KVM_CAP_SPAPR_TCE: 330 case KVM_CAP_PPC_ALLOC_HTAB: 331 r = 1; 332 break; 333 #endif /* CONFIG_PPC_BOOK3S_64 */ 334 #ifdef CONFIG_KVM_BOOK3S_64_HV 335 case KVM_CAP_PPC_SMT: 336 r = threads_per_core; 337 break; 338 case KVM_CAP_PPC_RMA: 339 r = 1; 340 /* PPC970 requires an RMA */ 341 if (cpu_has_feature(CPU_FTR_ARCH_201)) 342 r = 2; 343 break; 344 #endif 345 case KVM_CAP_SYNC_MMU: 346 #ifdef CONFIG_KVM_BOOK3S_64_HV 347 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 348 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 349 r = 1; 350 #else 351 r = 0; 352 #endif 353 break; 354 case KVM_CAP_NR_VCPUS: 355 /* 356 * Recommending a number of CPUs is somewhat arbitrary; we 357 * return the number of present CPUs for -HV (since a host 358 * will have secondary threads "offline"), and for other KVM 359 * implementations just count online CPUs. 360 */ 361 #ifdef CONFIG_KVM_BOOK3S_64_HV 362 r = num_present_cpus(); 363 #else 364 r = num_online_cpus(); 365 #endif 366 break; 367 case KVM_CAP_MAX_VCPUS: 368 r = KVM_MAX_VCPUS; 369 break; 370 #ifdef CONFIG_PPC_BOOK3S_64 371 case KVM_CAP_PPC_GET_SMMU_INFO: 372 r = 1; 373 break; 374 #endif 375 default: 376 r = 0; 377 break; 378 } 379 return r; 380 381 } 382 383 long kvm_arch_dev_ioctl(struct file *filp, 384 unsigned int ioctl, unsigned long arg) 385 { 386 return -EINVAL; 387 } 388 389 void kvm_arch_free_memslot(struct kvm_memory_slot *free, 390 struct kvm_memory_slot *dont) 391 { 392 if (!dont || free->arch.rmap != dont->arch.rmap) { 393 vfree(free->arch.rmap); 394 free->arch.rmap = NULL; 395 } 396 } 397 398 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 399 { 400 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 401 if (!slot->arch.rmap) 402 return -ENOMEM; 403 404 return 0; 405 } 406 407 int kvm_arch_prepare_memory_region(struct kvm *kvm, 408 struct kvm_memory_slot *memslot, 409 struct kvm_memory_slot old, 410 struct kvm_userspace_memory_region *mem, 411 int user_alloc) 412 { 413 return kvmppc_core_prepare_memory_region(kvm, mem); 414 } 415 416 void kvm_arch_commit_memory_region(struct kvm *kvm, 417 struct kvm_userspace_memory_region *mem, 418 struct kvm_memory_slot old, 419 int user_alloc) 420 { 421 kvmppc_core_commit_memory_region(kvm, mem); 422 } 423 424 void kvm_arch_flush_shadow_all(struct kvm *kvm) 425 { 426 } 427 428 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 429 struct kvm_memory_slot *slot) 430 { 431 } 432 433 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 434 { 435 struct kvm_vcpu *vcpu; 436 vcpu = kvmppc_core_vcpu_create(kvm, id); 437 if (!IS_ERR(vcpu)) { 438 vcpu->arch.wqp = &vcpu->wq; 439 kvmppc_create_vcpu_debugfs(vcpu, id); 440 } 441 return vcpu; 442 } 443 444 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 445 { 446 /* Make sure we're not using the vcpu anymore */ 447 hrtimer_cancel(&vcpu->arch.dec_timer); 448 tasklet_kill(&vcpu->arch.tasklet); 449 450 kvmppc_remove_vcpu_debugfs(vcpu); 451 kvmppc_core_vcpu_free(vcpu); 452 } 453 454 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 455 { 456 kvm_arch_vcpu_free(vcpu); 457 } 458 459 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 460 { 461 return kvmppc_core_pending_dec(vcpu); 462 } 463 464 /* 465 * low level hrtimer wake routine. Because this runs in hardirq context 466 * we schedule a tasklet to do the real work. 467 */ 468 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 469 { 470 struct kvm_vcpu *vcpu; 471 472 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 473 tasklet_schedule(&vcpu->arch.tasklet); 474 475 return HRTIMER_NORESTART; 476 } 477 478 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 479 { 480 int ret; 481 482 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 483 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 484 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 485 vcpu->arch.dec_expires = ~(u64)0; 486 487 #ifdef CONFIG_KVM_EXIT_TIMING 488 mutex_init(&vcpu->arch.exit_timing_lock); 489 #endif 490 ret = kvmppc_subarch_vcpu_init(vcpu); 491 return ret; 492 } 493 494 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 495 { 496 kvmppc_mmu_destroy(vcpu); 497 kvmppc_subarch_vcpu_uninit(vcpu); 498 } 499 500 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 501 { 502 #ifdef CONFIG_BOOKE 503 /* 504 * vrsave (formerly usprg0) isn't used by Linux, but may 505 * be used by the guest. 506 * 507 * On non-booke this is associated with Altivec and 508 * is handled by code in book3s.c. 509 */ 510 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 511 #endif 512 kvmppc_core_vcpu_load(vcpu, cpu); 513 vcpu->cpu = smp_processor_id(); 514 } 515 516 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 517 { 518 kvmppc_core_vcpu_put(vcpu); 519 #ifdef CONFIG_BOOKE 520 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 521 #endif 522 vcpu->cpu = -1; 523 } 524 525 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 526 struct kvm_guest_debug *dbg) 527 { 528 return -EINVAL; 529 } 530 531 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 532 struct kvm_run *run) 533 { 534 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 535 } 536 537 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 538 struct kvm_run *run) 539 { 540 u64 uninitialized_var(gpr); 541 542 if (run->mmio.len > sizeof(gpr)) { 543 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 544 return; 545 } 546 547 if (vcpu->arch.mmio_is_bigendian) { 548 switch (run->mmio.len) { 549 case 8: gpr = *(u64 *)run->mmio.data; break; 550 case 4: gpr = *(u32 *)run->mmio.data; break; 551 case 2: gpr = *(u16 *)run->mmio.data; break; 552 case 1: gpr = *(u8 *)run->mmio.data; break; 553 } 554 } else { 555 /* Convert BE data from userland back to LE. */ 556 switch (run->mmio.len) { 557 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 558 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 559 case 1: gpr = *(u8 *)run->mmio.data; break; 560 } 561 } 562 563 if (vcpu->arch.mmio_sign_extend) { 564 switch (run->mmio.len) { 565 #ifdef CONFIG_PPC64 566 case 4: 567 gpr = (s64)(s32)gpr; 568 break; 569 #endif 570 case 2: 571 gpr = (s64)(s16)gpr; 572 break; 573 case 1: 574 gpr = (s64)(s8)gpr; 575 break; 576 } 577 } 578 579 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 580 581 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 582 case KVM_MMIO_REG_GPR: 583 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 584 break; 585 case KVM_MMIO_REG_FPR: 586 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 587 break; 588 #ifdef CONFIG_PPC_BOOK3S 589 case KVM_MMIO_REG_QPR: 590 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 591 break; 592 case KVM_MMIO_REG_FQPR: 593 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 594 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 595 break; 596 #endif 597 default: 598 BUG(); 599 } 600 } 601 602 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 603 unsigned int rt, unsigned int bytes, int is_bigendian) 604 { 605 if (bytes > sizeof(run->mmio.data)) { 606 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 607 run->mmio.len); 608 } 609 610 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 611 run->mmio.len = bytes; 612 run->mmio.is_write = 0; 613 614 vcpu->arch.io_gpr = rt; 615 vcpu->arch.mmio_is_bigendian = is_bigendian; 616 vcpu->mmio_needed = 1; 617 vcpu->mmio_is_write = 0; 618 vcpu->arch.mmio_sign_extend = 0; 619 620 return EMULATE_DO_MMIO; 621 } 622 623 /* Same as above, but sign extends */ 624 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 625 unsigned int rt, unsigned int bytes, int is_bigendian) 626 { 627 int r; 628 629 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 630 vcpu->arch.mmio_sign_extend = 1; 631 632 return r; 633 } 634 635 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 636 u64 val, unsigned int bytes, int is_bigendian) 637 { 638 void *data = run->mmio.data; 639 640 if (bytes > sizeof(run->mmio.data)) { 641 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 642 run->mmio.len); 643 } 644 645 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 646 run->mmio.len = bytes; 647 run->mmio.is_write = 1; 648 vcpu->mmio_needed = 1; 649 vcpu->mmio_is_write = 1; 650 651 /* Store the value at the lowest bytes in 'data'. */ 652 if (is_bigendian) { 653 switch (bytes) { 654 case 8: *(u64 *)data = val; break; 655 case 4: *(u32 *)data = val; break; 656 case 2: *(u16 *)data = val; break; 657 case 1: *(u8 *)data = val; break; 658 } 659 } else { 660 /* Store LE value into 'data'. */ 661 switch (bytes) { 662 case 4: st_le32(data, val); break; 663 case 2: st_le16(data, val); break; 664 case 1: *(u8 *)data = val; break; 665 } 666 } 667 668 return EMULATE_DO_MMIO; 669 } 670 671 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 672 { 673 int r; 674 sigset_t sigsaved; 675 676 if (vcpu->sigset_active) 677 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 678 679 if (vcpu->mmio_needed) { 680 if (!vcpu->mmio_is_write) 681 kvmppc_complete_mmio_load(vcpu, run); 682 vcpu->mmio_needed = 0; 683 } else if (vcpu->arch.dcr_needed) { 684 if (!vcpu->arch.dcr_is_write) 685 kvmppc_complete_dcr_load(vcpu, run); 686 vcpu->arch.dcr_needed = 0; 687 } else if (vcpu->arch.osi_needed) { 688 u64 *gprs = run->osi.gprs; 689 int i; 690 691 for (i = 0; i < 32; i++) 692 kvmppc_set_gpr(vcpu, i, gprs[i]); 693 vcpu->arch.osi_needed = 0; 694 } else if (vcpu->arch.hcall_needed) { 695 int i; 696 697 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 698 for (i = 0; i < 9; ++i) 699 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 700 vcpu->arch.hcall_needed = 0; 701 } 702 703 r = kvmppc_vcpu_run(run, vcpu); 704 705 if (vcpu->sigset_active) 706 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 707 708 return r; 709 } 710 711 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 712 { 713 if (irq->irq == KVM_INTERRUPT_UNSET) { 714 kvmppc_core_dequeue_external(vcpu, irq); 715 return 0; 716 } 717 718 kvmppc_core_queue_external(vcpu, irq); 719 720 kvm_vcpu_kick(vcpu); 721 722 return 0; 723 } 724 725 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 726 struct kvm_enable_cap *cap) 727 { 728 int r; 729 730 if (cap->flags) 731 return -EINVAL; 732 733 switch (cap->cap) { 734 case KVM_CAP_PPC_OSI: 735 r = 0; 736 vcpu->arch.osi_enabled = true; 737 break; 738 case KVM_CAP_PPC_PAPR: 739 r = 0; 740 vcpu->arch.papr_enabled = true; 741 break; 742 #ifdef CONFIG_BOOKE 743 case KVM_CAP_PPC_BOOKE_WATCHDOG: 744 r = 0; 745 vcpu->arch.watchdog_enabled = true; 746 break; 747 #endif 748 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 749 case KVM_CAP_SW_TLB: { 750 struct kvm_config_tlb cfg; 751 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 752 753 r = -EFAULT; 754 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 755 break; 756 757 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 758 break; 759 } 760 #endif 761 default: 762 r = -EINVAL; 763 break; 764 } 765 766 if (!r) 767 r = kvmppc_sanity_check(vcpu); 768 769 return r; 770 } 771 772 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 773 struct kvm_mp_state *mp_state) 774 { 775 return -EINVAL; 776 } 777 778 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 779 struct kvm_mp_state *mp_state) 780 { 781 return -EINVAL; 782 } 783 784 long kvm_arch_vcpu_ioctl(struct file *filp, 785 unsigned int ioctl, unsigned long arg) 786 { 787 struct kvm_vcpu *vcpu = filp->private_data; 788 void __user *argp = (void __user *)arg; 789 long r; 790 791 switch (ioctl) { 792 case KVM_INTERRUPT: { 793 struct kvm_interrupt irq; 794 r = -EFAULT; 795 if (copy_from_user(&irq, argp, sizeof(irq))) 796 goto out; 797 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 798 goto out; 799 } 800 801 case KVM_ENABLE_CAP: 802 { 803 struct kvm_enable_cap cap; 804 r = -EFAULT; 805 if (copy_from_user(&cap, argp, sizeof(cap))) 806 goto out; 807 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 808 break; 809 } 810 811 case KVM_SET_ONE_REG: 812 case KVM_GET_ONE_REG: 813 { 814 struct kvm_one_reg reg; 815 r = -EFAULT; 816 if (copy_from_user(®, argp, sizeof(reg))) 817 goto out; 818 if (ioctl == KVM_SET_ONE_REG) 819 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 820 else 821 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 822 break; 823 } 824 825 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 826 case KVM_DIRTY_TLB: { 827 struct kvm_dirty_tlb dirty; 828 r = -EFAULT; 829 if (copy_from_user(&dirty, argp, sizeof(dirty))) 830 goto out; 831 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 832 break; 833 } 834 #endif 835 default: 836 r = -EINVAL; 837 } 838 839 out: 840 return r; 841 } 842 843 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 844 { 845 return VM_FAULT_SIGBUS; 846 } 847 848 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 849 { 850 u32 inst_nop = 0x60000000; 851 #ifdef CONFIG_KVM_BOOKE_HV 852 u32 inst_sc1 = 0x44000022; 853 pvinfo->hcall[0] = inst_sc1; 854 pvinfo->hcall[1] = inst_nop; 855 pvinfo->hcall[2] = inst_nop; 856 pvinfo->hcall[3] = inst_nop; 857 #else 858 u32 inst_lis = 0x3c000000; 859 u32 inst_ori = 0x60000000; 860 u32 inst_sc = 0x44000002; 861 u32 inst_imm_mask = 0xffff; 862 863 /* 864 * The hypercall to get into KVM from within guest context is as 865 * follows: 866 * 867 * lis r0, r0, KVM_SC_MAGIC_R0@h 868 * ori r0, KVM_SC_MAGIC_R0@l 869 * sc 870 * nop 871 */ 872 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); 873 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 874 pvinfo->hcall[2] = inst_sc; 875 pvinfo->hcall[3] = inst_nop; 876 #endif 877 878 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 879 880 return 0; 881 } 882 883 long kvm_arch_vm_ioctl(struct file *filp, 884 unsigned int ioctl, unsigned long arg) 885 { 886 void __user *argp = (void __user *)arg; 887 long r; 888 889 switch (ioctl) { 890 case KVM_PPC_GET_PVINFO: { 891 struct kvm_ppc_pvinfo pvinfo; 892 memset(&pvinfo, 0, sizeof(pvinfo)); 893 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 894 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 895 r = -EFAULT; 896 goto out; 897 } 898 899 break; 900 } 901 #ifdef CONFIG_PPC_BOOK3S_64 902 case KVM_CREATE_SPAPR_TCE: { 903 struct kvm_create_spapr_tce create_tce; 904 struct kvm *kvm = filp->private_data; 905 906 r = -EFAULT; 907 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 908 goto out; 909 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 910 goto out; 911 } 912 #endif /* CONFIG_PPC_BOOK3S_64 */ 913 914 #ifdef CONFIG_KVM_BOOK3S_64_HV 915 case KVM_ALLOCATE_RMA: { 916 struct kvm *kvm = filp->private_data; 917 struct kvm_allocate_rma rma; 918 919 r = kvm_vm_ioctl_allocate_rma(kvm, &rma); 920 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) 921 r = -EFAULT; 922 break; 923 } 924 925 case KVM_PPC_ALLOCATE_HTAB: { 926 struct kvm *kvm = filp->private_data; 927 u32 htab_order; 928 929 r = -EFAULT; 930 if (get_user(htab_order, (u32 __user *)argp)) 931 break; 932 r = kvmppc_alloc_reset_hpt(kvm, &htab_order); 933 if (r) 934 break; 935 r = -EFAULT; 936 if (put_user(htab_order, (u32 __user *)argp)) 937 break; 938 r = 0; 939 break; 940 } 941 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 942 943 #ifdef CONFIG_PPC_BOOK3S_64 944 case KVM_PPC_GET_SMMU_INFO: { 945 struct kvm *kvm = filp->private_data; 946 struct kvm_ppc_smmu_info info; 947 948 memset(&info, 0, sizeof(info)); 949 r = kvm_vm_ioctl_get_smmu_info(kvm, &info); 950 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 951 r = -EFAULT; 952 break; 953 } 954 #endif /* CONFIG_PPC_BOOK3S_64 */ 955 default: 956 r = -ENOTTY; 957 } 958 959 out: 960 return r; 961 } 962 963 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 964 static unsigned long nr_lpids; 965 966 long kvmppc_alloc_lpid(void) 967 { 968 long lpid; 969 970 do { 971 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 972 if (lpid >= nr_lpids) { 973 pr_err("%s: No LPIDs free\n", __func__); 974 return -ENOMEM; 975 } 976 } while (test_and_set_bit(lpid, lpid_inuse)); 977 978 return lpid; 979 } 980 981 void kvmppc_claim_lpid(long lpid) 982 { 983 set_bit(lpid, lpid_inuse); 984 } 985 986 void kvmppc_free_lpid(long lpid) 987 { 988 clear_bit(lpid, lpid_inuse); 989 } 990 991 void kvmppc_init_lpid(unsigned long nr_lpids_param) 992 { 993 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 994 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 995 } 996 997 int kvm_arch_init(void *opaque) 998 { 999 return 0; 1000 } 1001 1002 void kvm_arch_exit(void) 1003 { 1004 } 1005