1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <asm/cputable.h> 29 #include <asm/uaccess.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/tlbflush.h> 32 #include <asm/cputhreads.h> 33 #include <asm/irqflags.h> 34 #include "timing.h" 35 #include "../mm/mmu_decl.h" 36 37 #define CREATE_TRACE_POINTS 38 #include "trace.h" 39 40 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 41 { 42 return !!(v->arch.pending_exceptions) || 43 v->requests; 44 } 45 46 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 47 { 48 return 1; 49 } 50 51 #ifndef CONFIG_KVM_BOOK3S_64_HV 52 /* 53 * Common checks before entering the guest world. Call with interrupts 54 * disabled. 55 * 56 * returns: 57 * 58 * == 1 if we're ready to go into guest state 59 * <= 0 if we need to go back to the host with return value 60 */ 61 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 62 { 63 int r = 1; 64 65 WARN_ON_ONCE(!irqs_disabled()); 66 while (true) { 67 if (need_resched()) { 68 local_irq_enable(); 69 cond_resched(); 70 local_irq_disable(); 71 continue; 72 } 73 74 if (signal_pending(current)) { 75 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 76 vcpu->run->exit_reason = KVM_EXIT_INTR; 77 r = -EINTR; 78 break; 79 } 80 81 vcpu->mode = IN_GUEST_MODE; 82 83 /* 84 * Reading vcpu->requests must happen after setting vcpu->mode, 85 * so we don't miss a request because the requester sees 86 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 87 * before next entering the guest (and thus doesn't IPI). 88 */ 89 smp_mb(); 90 91 if (vcpu->requests) { 92 /* Make sure we process requests preemptable */ 93 local_irq_enable(); 94 trace_kvm_check_requests(vcpu); 95 r = kvmppc_core_check_requests(vcpu); 96 local_irq_disable(); 97 if (r > 0) 98 continue; 99 break; 100 } 101 102 if (kvmppc_core_prepare_to_enter(vcpu)) { 103 /* interrupts got enabled in between, so we 104 are back at square 1 */ 105 continue; 106 } 107 108 #ifdef CONFIG_PPC64 109 /* lazy EE magic */ 110 hard_irq_disable(); 111 if (lazy_irq_pending()) { 112 /* Got an interrupt in between, try again */ 113 local_irq_enable(); 114 local_irq_disable(); 115 kvm_guest_exit(); 116 continue; 117 } 118 119 trace_hardirqs_on(); 120 #endif 121 122 kvm_guest_enter(); 123 break; 124 } 125 126 return r; 127 } 128 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 129 130 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 131 { 132 int nr = kvmppc_get_gpr(vcpu, 11); 133 int r; 134 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 135 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 136 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 137 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 138 unsigned long r2 = 0; 139 140 if (!(vcpu->arch.shared->msr & MSR_SF)) { 141 /* 32 bit mode */ 142 param1 &= 0xffffffff; 143 param2 &= 0xffffffff; 144 param3 &= 0xffffffff; 145 param4 &= 0xffffffff; 146 } 147 148 switch (nr) { 149 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 150 { 151 vcpu->arch.magic_page_pa = param1; 152 vcpu->arch.magic_page_ea = param2; 153 154 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 155 156 r = EV_SUCCESS; 157 break; 158 } 159 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 160 r = EV_SUCCESS; 161 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 162 /* XXX Missing magic page on 44x */ 163 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 164 #endif 165 166 /* Second return value is in r4 */ 167 break; 168 case EV_HCALL_TOKEN(EV_IDLE): 169 r = EV_SUCCESS; 170 kvm_vcpu_block(vcpu); 171 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 172 break; 173 default: 174 r = EV_UNIMPLEMENTED; 175 break; 176 } 177 178 kvmppc_set_gpr(vcpu, 4, r2); 179 180 return r; 181 } 182 183 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 184 { 185 int r = false; 186 187 /* We have to know what CPU to virtualize */ 188 if (!vcpu->arch.pvr) 189 goto out; 190 191 /* PAPR only works with book3s_64 */ 192 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 193 goto out; 194 195 #ifdef CONFIG_KVM_BOOK3S_64_HV 196 /* HV KVM can only do PAPR mode for now */ 197 if (!vcpu->arch.papr_enabled) 198 goto out; 199 #endif 200 201 #ifdef CONFIG_KVM_BOOKE_HV 202 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 203 goto out; 204 #endif 205 206 r = true; 207 208 out: 209 vcpu->arch.sane = r; 210 return r ? 0 : -EINVAL; 211 } 212 213 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 214 { 215 enum emulation_result er; 216 int r; 217 218 er = kvmppc_emulate_instruction(run, vcpu); 219 switch (er) { 220 case EMULATE_DONE: 221 /* Future optimization: only reload non-volatiles if they were 222 * actually modified. */ 223 r = RESUME_GUEST_NV; 224 break; 225 case EMULATE_DO_MMIO: 226 run->exit_reason = KVM_EXIT_MMIO; 227 /* We must reload nonvolatiles because "update" load/store 228 * instructions modify register state. */ 229 /* Future optimization: only reload non-volatiles if they were 230 * actually modified. */ 231 r = RESUME_HOST_NV; 232 break; 233 case EMULATE_FAIL: 234 /* XXX Deliver Program interrupt to guest. */ 235 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 236 kvmppc_get_last_inst(vcpu)); 237 r = RESUME_HOST; 238 break; 239 default: 240 BUG(); 241 } 242 243 return r; 244 } 245 246 int kvm_arch_hardware_enable(void *garbage) 247 { 248 return 0; 249 } 250 251 void kvm_arch_hardware_disable(void *garbage) 252 { 253 } 254 255 int kvm_arch_hardware_setup(void) 256 { 257 return 0; 258 } 259 260 void kvm_arch_hardware_unsetup(void) 261 { 262 } 263 264 void kvm_arch_check_processor_compat(void *rtn) 265 { 266 *(int *)rtn = kvmppc_core_check_processor_compat(); 267 } 268 269 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 270 { 271 if (type) 272 return -EINVAL; 273 274 return kvmppc_core_init_vm(kvm); 275 } 276 277 void kvm_arch_destroy_vm(struct kvm *kvm) 278 { 279 unsigned int i; 280 struct kvm_vcpu *vcpu; 281 282 kvm_for_each_vcpu(i, vcpu, kvm) 283 kvm_arch_vcpu_free(vcpu); 284 285 mutex_lock(&kvm->lock); 286 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 287 kvm->vcpus[i] = NULL; 288 289 atomic_set(&kvm->online_vcpus, 0); 290 291 kvmppc_core_destroy_vm(kvm); 292 293 mutex_unlock(&kvm->lock); 294 } 295 296 void kvm_arch_sync_events(struct kvm *kvm) 297 { 298 } 299 300 int kvm_dev_ioctl_check_extension(long ext) 301 { 302 int r; 303 304 switch (ext) { 305 #ifdef CONFIG_BOOKE 306 case KVM_CAP_PPC_BOOKE_SREGS: 307 case KVM_CAP_PPC_BOOKE_WATCHDOG: 308 #else 309 case KVM_CAP_PPC_SEGSTATE: 310 case KVM_CAP_PPC_HIOR: 311 case KVM_CAP_PPC_PAPR: 312 #endif 313 case KVM_CAP_PPC_UNSET_IRQ: 314 case KVM_CAP_PPC_IRQ_LEVEL: 315 case KVM_CAP_ENABLE_CAP: 316 case KVM_CAP_ONE_REG: 317 case KVM_CAP_IOEVENTFD: 318 r = 1; 319 break; 320 #ifndef CONFIG_KVM_BOOK3S_64_HV 321 case KVM_CAP_PPC_PAIRED_SINGLES: 322 case KVM_CAP_PPC_OSI: 323 case KVM_CAP_PPC_GET_PVINFO: 324 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 325 case KVM_CAP_SW_TLB: 326 #endif 327 r = 1; 328 break; 329 case KVM_CAP_COALESCED_MMIO: 330 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 331 break; 332 #endif 333 #ifdef CONFIG_PPC_BOOK3S_64 334 case KVM_CAP_SPAPR_TCE: 335 case KVM_CAP_PPC_ALLOC_HTAB: 336 r = 1; 337 break; 338 #endif /* CONFIG_PPC_BOOK3S_64 */ 339 #ifdef CONFIG_KVM_BOOK3S_64_HV 340 case KVM_CAP_PPC_SMT: 341 r = threads_per_core; 342 break; 343 case KVM_CAP_PPC_RMA: 344 r = 1; 345 /* PPC970 requires an RMA */ 346 if (cpu_has_feature(CPU_FTR_ARCH_201)) 347 r = 2; 348 break; 349 #endif 350 case KVM_CAP_SYNC_MMU: 351 #ifdef CONFIG_KVM_BOOK3S_64_HV 352 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 353 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 354 r = 1; 355 #else 356 r = 0; 357 break; 358 #endif 359 #ifdef CONFIG_KVM_BOOK3S_64_HV 360 case KVM_CAP_PPC_HTAB_FD: 361 r = 1; 362 break; 363 #endif 364 break; 365 case KVM_CAP_NR_VCPUS: 366 /* 367 * Recommending a number of CPUs is somewhat arbitrary; we 368 * return the number of present CPUs for -HV (since a host 369 * will have secondary threads "offline"), and for other KVM 370 * implementations just count online CPUs. 371 */ 372 #ifdef CONFIG_KVM_BOOK3S_64_HV 373 r = num_present_cpus(); 374 #else 375 r = num_online_cpus(); 376 #endif 377 break; 378 case KVM_CAP_MAX_VCPUS: 379 r = KVM_MAX_VCPUS; 380 break; 381 #ifdef CONFIG_PPC_BOOK3S_64 382 case KVM_CAP_PPC_GET_SMMU_INFO: 383 r = 1; 384 break; 385 #endif 386 default: 387 r = 0; 388 break; 389 } 390 return r; 391 392 } 393 394 long kvm_arch_dev_ioctl(struct file *filp, 395 unsigned int ioctl, unsigned long arg) 396 { 397 return -EINVAL; 398 } 399 400 void kvm_arch_free_memslot(struct kvm_memory_slot *free, 401 struct kvm_memory_slot *dont) 402 { 403 kvmppc_core_free_memslot(free, dont); 404 } 405 406 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 407 { 408 return kvmppc_core_create_memslot(slot, npages); 409 } 410 411 int kvm_arch_prepare_memory_region(struct kvm *kvm, 412 struct kvm_memory_slot *memslot, 413 struct kvm_memory_slot old, 414 struct kvm_userspace_memory_region *mem, 415 int user_alloc) 416 { 417 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 418 } 419 420 void kvm_arch_commit_memory_region(struct kvm *kvm, 421 struct kvm_userspace_memory_region *mem, 422 struct kvm_memory_slot old, 423 int user_alloc) 424 { 425 kvmppc_core_commit_memory_region(kvm, mem, old); 426 } 427 428 void kvm_arch_flush_shadow_all(struct kvm *kvm) 429 { 430 } 431 432 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 433 struct kvm_memory_slot *slot) 434 { 435 kvmppc_core_flush_memslot(kvm, slot); 436 } 437 438 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 439 { 440 struct kvm_vcpu *vcpu; 441 vcpu = kvmppc_core_vcpu_create(kvm, id); 442 if (!IS_ERR(vcpu)) { 443 vcpu->arch.wqp = &vcpu->wq; 444 kvmppc_create_vcpu_debugfs(vcpu, id); 445 } 446 return vcpu; 447 } 448 449 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 450 { 451 return 0; 452 } 453 454 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 455 { 456 /* Make sure we're not using the vcpu anymore */ 457 hrtimer_cancel(&vcpu->arch.dec_timer); 458 tasklet_kill(&vcpu->arch.tasklet); 459 460 kvmppc_remove_vcpu_debugfs(vcpu); 461 kvmppc_core_vcpu_free(vcpu); 462 } 463 464 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 465 { 466 kvm_arch_vcpu_free(vcpu); 467 } 468 469 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 470 { 471 return kvmppc_core_pending_dec(vcpu); 472 } 473 474 /* 475 * low level hrtimer wake routine. Because this runs in hardirq context 476 * we schedule a tasklet to do the real work. 477 */ 478 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 479 { 480 struct kvm_vcpu *vcpu; 481 482 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 483 tasklet_schedule(&vcpu->arch.tasklet); 484 485 return HRTIMER_NORESTART; 486 } 487 488 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 489 { 490 int ret; 491 492 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 493 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 494 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 495 vcpu->arch.dec_expires = ~(u64)0; 496 497 #ifdef CONFIG_KVM_EXIT_TIMING 498 mutex_init(&vcpu->arch.exit_timing_lock); 499 #endif 500 ret = kvmppc_subarch_vcpu_init(vcpu); 501 return ret; 502 } 503 504 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 505 { 506 kvmppc_mmu_destroy(vcpu); 507 kvmppc_subarch_vcpu_uninit(vcpu); 508 } 509 510 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 511 { 512 #ifdef CONFIG_BOOKE 513 /* 514 * vrsave (formerly usprg0) isn't used by Linux, but may 515 * be used by the guest. 516 * 517 * On non-booke this is associated with Altivec and 518 * is handled by code in book3s.c. 519 */ 520 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 521 #endif 522 kvmppc_core_vcpu_load(vcpu, cpu); 523 } 524 525 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 526 { 527 kvmppc_core_vcpu_put(vcpu); 528 #ifdef CONFIG_BOOKE 529 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 530 #endif 531 } 532 533 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 534 struct kvm_guest_debug *dbg) 535 { 536 return -EINVAL; 537 } 538 539 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 540 struct kvm_run *run) 541 { 542 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 543 } 544 545 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 546 struct kvm_run *run) 547 { 548 u64 uninitialized_var(gpr); 549 550 if (run->mmio.len > sizeof(gpr)) { 551 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 552 return; 553 } 554 555 if (vcpu->arch.mmio_is_bigendian) { 556 switch (run->mmio.len) { 557 case 8: gpr = *(u64 *)run->mmio.data; break; 558 case 4: gpr = *(u32 *)run->mmio.data; break; 559 case 2: gpr = *(u16 *)run->mmio.data; break; 560 case 1: gpr = *(u8 *)run->mmio.data; break; 561 } 562 } else { 563 /* Convert BE data from userland back to LE. */ 564 switch (run->mmio.len) { 565 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 566 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 567 case 1: gpr = *(u8 *)run->mmio.data; break; 568 } 569 } 570 571 if (vcpu->arch.mmio_sign_extend) { 572 switch (run->mmio.len) { 573 #ifdef CONFIG_PPC64 574 case 4: 575 gpr = (s64)(s32)gpr; 576 break; 577 #endif 578 case 2: 579 gpr = (s64)(s16)gpr; 580 break; 581 case 1: 582 gpr = (s64)(s8)gpr; 583 break; 584 } 585 } 586 587 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 588 589 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 590 case KVM_MMIO_REG_GPR: 591 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 592 break; 593 case KVM_MMIO_REG_FPR: 594 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 595 break; 596 #ifdef CONFIG_PPC_BOOK3S 597 case KVM_MMIO_REG_QPR: 598 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 599 break; 600 case KVM_MMIO_REG_FQPR: 601 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 602 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 603 break; 604 #endif 605 default: 606 BUG(); 607 } 608 } 609 610 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 611 unsigned int rt, unsigned int bytes, int is_bigendian) 612 { 613 if (bytes > sizeof(run->mmio.data)) { 614 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 615 run->mmio.len); 616 } 617 618 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 619 run->mmio.len = bytes; 620 run->mmio.is_write = 0; 621 622 vcpu->arch.io_gpr = rt; 623 vcpu->arch.mmio_is_bigendian = is_bigendian; 624 vcpu->mmio_needed = 1; 625 vcpu->mmio_is_write = 0; 626 vcpu->arch.mmio_sign_extend = 0; 627 628 if (!kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 629 bytes, &run->mmio.data)) { 630 kvmppc_complete_mmio_load(vcpu, run); 631 vcpu->mmio_needed = 0; 632 return EMULATE_DONE; 633 } 634 635 return EMULATE_DO_MMIO; 636 } 637 638 /* Same as above, but sign extends */ 639 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 640 unsigned int rt, unsigned int bytes, int is_bigendian) 641 { 642 int r; 643 644 vcpu->arch.mmio_sign_extend = 1; 645 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 646 647 return r; 648 } 649 650 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 651 u64 val, unsigned int bytes, int is_bigendian) 652 { 653 void *data = run->mmio.data; 654 655 if (bytes > sizeof(run->mmio.data)) { 656 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 657 run->mmio.len); 658 } 659 660 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 661 run->mmio.len = bytes; 662 run->mmio.is_write = 1; 663 vcpu->mmio_needed = 1; 664 vcpu->mmio_is_write = 1; 665 666 /* Store the value at the lowest bytes in 'data'. */ 667 if (is_bigendian) { 668 switch (bytes) { 669 case 8: *(u64 *)data = val; break; 670 case 4: *(u32 *)data = val; break; 671 case 2: *(u16 *)data = val; break; 672 case 1: *(u8 *)data = val; break; 673 } 674 } else { 675 /* Store LE value into 'data'. */ 676 switch (bytes) { 677 case 4: st_le32(data, val); break; 678 case 2: st_le16(data, val); break; 679 case 1: *(u8 *)data = val; break; 680 } 681 } 682 683 if (!kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, 684 bytes, &run->mmio.data)) { 685 kvmppc_complete_mmio_load(vcpu, run); 686 vcpu->mmio_needed = 0; 687 return EMULATE_DONE; 688 } 689 690 return EMULATE_DO_MMIO; 691 } 692 693 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 694 { 695 int r; 696 sigset_t sigsaved; 697 698 if (vcpu->sigset_active) 699 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 700 701 if (vcpu->mmio_needed) { 702 if (!vcpu->mmio_is_write) 703 kvmppc_complete_mmio_load(vcpu, run); 704 vcpu->mmio_needed = 0; 705 } else if (vcpu->arch.dcr_needed) { 706 if (!vcpu->arch.dcr_is_write) 707 kvmppc_complete_dcr_load(vcpu, run); 708 vcpu->arch.dcr_needed = 0; 709 } else if (vcpu->arch.osi_needed) { 710 u64 *gprs = run->osi.gprs; 711 int i; 712 713 for (i = 0; i < 32; i++) 714 kvmppc_set_gpr(vcpu, i, gprs[i]); 715 vcpu->arch.osi_needed = 0; 716 } else if (vcpu->arch.hcall_needed) { 717 int i; 718 719 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 720 for (i = 0; i < 9; ++i) 721 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 722 vcpu->arch.hcall_needed = 0; 723 } 724 725 r = kvmppc_vcpu_run(run, vcpu); 726 727 if (vcpu->sigset_active) 728 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 729 730 return r; 731 } 732 733 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 734 { 735 if (irq->irq == KVM_INTERRUPT_UNSET) { 736 kvmppc_core_dequeue_external(vcpu, irq); 737 return 0; 738 } 739 740 kvmppc_core_queue_external(vcpu, irq); 741 742 kvm_vcpu_kick(vcpu); 743 744 return 0; 745 } 746 747 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 748 struct kvm_enable_cap *cap) 749 { 750 int r; 751 752 if (cap->flags) 753 return -EINVAL; 754 755 switch (cap->cap) { 756 case KVM_CAP_PPC_OSI: 757 r = 0; 758 vcpu->arch.osi_enabled = true; 759 break; 760 case KVM_CAP_PPC_PAPR: 761 r = 0; 762 vcpu->arch.papr_enabled = true; 763 break; 764 #ifdef CONFIG_BOOKE 765 case KVM_CAP_PPC_BOOKE_WATCHDOG: 766 r = 0; 767 vcpu->arch.watchdog_enabled = true; 768 break; 769 #endif 770 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 771 case KVM_CAP_SW_TLB: { 772 struct kvm_config_tlb cfg; 773 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 774 775 r = -EFAULT; 776 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 777 break; 778 779 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 780 break; 781 } 782 #endif 783 default: 784 r = -EINVAL; 785 break; 786 } 787 788 if (!r) 789 r = kvmppc_sanity_check(vcpu); 790 791 return r; 792 } 793 794 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 795 struct kvm_mp_state *mp_state) 796 { 797 return -EINVAL; 798 } 799 800 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 801 struct kvm_mp_state *mp_state) 802 { 803 return -EINVAL; 804 } 805 806 long kvm_arch_vcpu_ioctl(struct file *filp, 807 unsigned int ioctl, unsigned long arg) 808 { 809 struct kvm_vcpu *vcpu = filp->private_data; 810 void __user *argp = (void __user *)arg; 811 long r; 812 813 switch (ioctl) { 814 case KVM_INTERRUPT: { 815 struct kvm_interrupt irq; 816 r = -EFAULT; 817 if (copy_from_user(&irq, argp, sizeof(irq))) 818 goto out; 819 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 820 goto out; 821 } 822 823 case KVM_ENABLE_CAP: 824 { 825 struct kvm_enable_cap cap; 826 r = -EFAULT; 827 if (copy_from_user(&cap, argp, sizeof(cap))) 828 goto out; 829 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 830 break; 831 } 832 833 case KVM_SET_ONE_REG: 834 case KVM_GET_ONE_REG: 835 { 836 struct kvm_one_reg reg; 837 r = -EFAULT; 838 if (copy_from_user(®, argp, sizeof(reg))) 839 goto out; 840 if (ioctl == KVM_SET_ONE_REG) 841 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 842 else 843 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 844 break; 845 } 846 847 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 848 case KVM_DIRTY_TLB: { 849 struct kvm_dirty_tlb dirty; 850 r = -EFAULT; 851 if (copy_from_user(&dirty, argp, sizeof(dirty))) 852 goto out; 853 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 854 break; 855 } 856 #endif 857 default: 858 r = -EINVAL; 859 } 860 861 out: 862 return r; 863 } 864 865 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 866 { 867 return VM_FAULT_SIGBUS; 868 } 869 870 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 871 { 872 u32 inst_nop = 0x60000000; 873 #ifdef CONFIG_KVM_BOOKE_HV 874 u32 inst_sc1 = 0x44000022; 875 pvinfo->hcall[0] = inst_sc1; 876 pvinfo->hcall[1] = inst_nop; 877 pvinfo->hcall[2] = inst_nop; 878 pvinfo->hcall[3] = inst_nop; 879 #else 880 u32 inst_lis = 0x3c000000; 881 u32 inst_ori = 0x60000000; 882 u32 inst_sc = 0x44000002; 883 u32 inst_imm_mask = 0xffff; 884 885 /* 886 * The hypercall to get into KVM from within guest context is as 887 * follows: 888 * 889 * lis r0, r0, KVM_SC_MAGIC_R0@h 890 * ori r0, KVM_SC_MAGIC_R0@l 891 * sc 892 * nop 893 */ 894 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); 895 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 896 pvinfo->hcall[2] = inst_sc; 897 pvinfo->hcall[3] = inst_nop; 898 #endif 899 900 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 901 902 return 0; 903 } 904 905 long kvm_arch_vm_ioctl(struct file *filp, 906 unsigned int ioctl, unsigned long arg) 907 { 908 void __user *argp = (void __user *)arg; 909 long r; 910 911 switch (ioctl) { 912 case KVM_PPC_GET_PVINFO: { 913 struct kvm_ppc_pvinfo pvinfo; 914 memset(&pvinfo, 0, sizeof(pvinfo)); 915 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 916 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 917 r = -EFAULT; 918 goto out; 919 } 920 921 break; 922 } 923 #ifdef CONFIG_PPC_BOOK3S_64 924 case KVM_CREATE_SPAPR_TCE: { 925 struct kvm_create_spapr_tce create_tce; 926 struct kvm *kvm = filp->private_data; 927 928 r = -EFAULT; 929 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 930 goto out; 931 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 932 goto out; 933 } 934 #endif /* CONFIG_PPC_BOOK3S_64 */ 935 936 #ifdef CONFIG_KVM_BOOK3S_64_HV 937 case KVM_ALLOCATE_RMA: { 938 struct kvm *kvm = filp->private_data; 939 struct kvm_allocate_rma rma; 940 941 r = kvm_vm_ioctl_allocate_rma(kvm, &rma); 942 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) 943 r = -EFAULT; 944 break; 945 } 946 947 case KVM_PPC_ALLOCATE_HTAB: { 948 struct kvm *kvm = filp->private_data; 949 u32 htab_order; 950 951 r = -EFAULT; 952 if (get_user(htab_order, (u32 __user *)argp)) 953 break; 954 r = kvmppc_alloc_reset_hpt(kvm, &htab_order); 955 if (r) 956 break; 957 r = -EFAULT; 958 if (put_user(htab_order, (u32 __user *)argp)) 959 break; 960 r = 0; 961 break; 962 } 963 964 case KVM_PPC_GET_HTAB_FD: { 965 struct kvm *kvm = filp->private_data; 966 struct kvm_get_htab_fd ghf; 967 968 r = -EFAULT; 969 if (copy_from_user(&ghf, argp, sizeof(ghf))) 970 break; 971 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); 972 break; 973 } 974 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 975 976 #ifdef CONFIG_PPC_BOOK3S_64 977 case KVM_PPC_GET_SMMU_INFO: { 978 struct kvm *kvm = filp->private_data; 979 struct kvm_ppc_smmu_info info; 980 981 memset(&info, 0, sizeof(info)); 982 r = kvm_vm_ioctl_get_smmu_info(kvm, &info); 983 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 984 r = -EFAULT; 985 break; 986 } 987 #endif /* CONFIG_PPC_BOOK3S_64 */ 988 default: 989 r = -ENOTTY; 990 } 991 992 out: 993 return r; 994 } 995 996 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 997 static unsigned long nr_lpids; 998 999 long kvmppc_alloc_lpid(void) 1000 { 1001 long lpid; 1002 1003 do { 1004 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 1005 if (lpid >= nr_lpids) { 1006 pr_err("%s: No LPIDs free\n", __func__); 1007 return -ENOMEM; 1008 } 1009 } while (test_and_set_bit(lpid, lpid_inuse)); 1010 1011 return lpid; 1012 } 1013 1014 void kvmppc_claim_lpid(long lpid) 1015 { 1016 set_bit(lpid, lpid_inuse); 1017 } 1018 1019 void kvmppc_free_lpid(long lpid) 1020 { 1021 clear_bit(lpid, lpid_inuse); 1022 } 1023 1024 void kvmppc_init_lpid(unsigned long nr_lpids_param) 1025 { 1026 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1027 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1028 } 1029 1030 int kvm_arch_init(void *opaque) 1031 { 1032 return 0; 1033 } 1034 1035 void kvm_arch_exit(void) 1036 { 1037 } 1038