1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <asm/cputable.h> 29 #include <asm/uaccess.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/tlbflush.h> 32 #include <asm/cputhreads.h> 33 #include <asm/irqflags.h> 34 #include "timing.h" 35 #include "../mm/mmu_decl.h" 36 37 #define CREATE_TRACE_POINTS 38 #include "trace.h" 39 40 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 41 { 42 return !!(v->arch.pending_exceptions) || 43 v->requests; 44 } 45 46 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 47 { 48 return 1; 49 } 50 51 #ifndef CONFIG_KVM_BOOK3S_64_HV 52 /* 53 * Common checks before entering the guest world. Call with interrupts 54 * disabled. 55 * 56 * returns: 57 * 58 * == 1 if we're ready to go into guest state 59 * <= 0 if we need to go back to the host with return value 60 */ 61 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 62 { 63 int r = 1; 64 65 WARN_ON_ONCE(!irqs_disabled()); 66 while (true) { 67 if (need_resched()) { 68 local_irq_enable(); 69 cond_resched(); 70 local_irq_disable(); 71 continue; 72 } 73 74 if (signal_pending(current)) { 75 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 76 vcpu->run->exit_reason = KVM_EXIT_INTR; 77 r = -EINTR; 78 break; 79 } 80 81 smp_mb(); 82 if (vcpu->requests) { 83 /* Make sure we process requests preemptable */ 84 local_irq_enable(); 85 trace_kvm_check_requests(vcpu); 86 r = kvmppc_core_check_requests(vcpu); 87 local_irq_disable(); 88 if (r > 0) 89 continue; 90 break; 91 } 92 93 if (kvmppc_core_prepare_to_enter(vcpu)) { 94 /* interrupts got enabled in between, so we 95 are back at square 1 */ 96 continue; 97 } 98 99 #ifdef CONFIG_PPC64 100 /* lazy EE magic */ 101 hard_irq_disable(); 102 if (lazy_irq_pending()) { 103 /* Got an interrupt in between, try again */ 104 local_irq_enable(); 105 local_irq_disable(); 106 kvm_guest_exit(); 107 continue; 108 } 109 110 trace_hardirqs_on(); 111 #endif 112 113 kvm_guest_enter(); 114 115 /* Going into guest context! Yay! */ 116 vcpu->mode = IN_GUEST_MODE; 117 smp_wmb(); 118 119 break; 120 } 121 122 return r; 123 } 124 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 125 126 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 127 { 128 int nr = kvmppc_get_gpr(vcpu, 11); 129 int r; 130 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 131 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 132 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 133 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 134 unsigned long r2 = 0; 135 136 if (!(vcpu->arch.shared->msr & MSR_SF)) { 137 /* 32 bit mode */ 138 param1 &= 0xffffffff; 139 param2 &= 0xffffffff; 140 param3 &= 0xffffffff; 141 param4 &= 0xffffffff; 142 } 143 144 switch (nr) { 145 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 146 { 147 vcpu->arch.magic_page_pa = param1; 148 vcpu->arch.magic_page_ea = param2; 149 150 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 151 152 r = EV_SUCCESS; 153 break; 154 } 155 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 156 r = EV_SUCCESS; 157 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 158 /* XXX Missing magic page on 44x */ 159 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 160 #endif 161 162 /* Second return value is in r4 */ 163 break; 164 case EV_HCALL_TOKEN(EV_IDLE): 165 r = EV_SUCCESS; 166 kvm_vcpu_block(vcpu); 167 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 168 break; 169 default: 170 r = EV_UNIMPLEMENTED; 171 break; 172 } 173 174 kvmppc_set_gpr(vcpu, 4, r2); 175 176 return r; 177 } 178 179 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 180 { 181 int r = false; 182 183 /* We have to know what CPU to virtualize */ 184 if (!vcpu->arch.pvr) 185 goto out; 186 187 /* PAPR only works with book3s_64 */ 188 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 189 goto out; 190 191 #ifdef CONFIG_KVM_BOOK3S_64_HV 192 /* HV KVM can only do PAPR mode for now */ 193 if (!vcpu->arch.papr_enabled) 194 goto out; 195 #endif 196 197 #ifdef CONFIG_KVM_BOOKE_HV 198 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 199 goto out; 200 #endif 201 202 r = true; 203 204 out: 205 vcpu->arch.sane = r; 206 return r ? 0 : -EINVAL; 207 } 208 209 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 210 { 211 enum emulation_result er; 212 int r; 213 214 er = kvmppc_emulate_instruction(run, vcpu); 215 switch (er) { 216 case EMULATE_DONE: 217 /* Future optimization: only reload non-volatiles if they were 218 * actually modified. */ 219 r = RESUME_GUEST_NV; 220 break; 221 case EMULATE_DO_MMIO: 222 run->exit_reason = KVM_EXIT_MMIO; 223 /* We must reload nonvolatiles because "update" load/store 224 * instructions modify register state. */ 225 /* Future optimization: only reload non-volatiles if they were 226 * actually modified. */ 227 r = RESUME_HOST_NV; 228 break; 229 case EMULATE_FAIL: 230 /* XXX Deliver Program interrupt to guest. */ 231 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 232 kvmppc_get_last_inst(vcpu)); 233 r = RESUME_HOST; 234 break; 235 default: 236 BUG(); 237 } 238 239 return r; 240 } 241 242 int kvm_arch_hardware_enable(void *garbage) 243 { 244 return 0; 245 } 246 247 void kvm_arch_hardware_disable(void *garbage) 248 { 249 } 250 251 int kvm_arch_hardware_setup(void) 252 { 253 return 0; 254 } 255 256 void kvm_arch_hardware_unsetup(void) 257 { 258 } 259 260 void kvm_arch_check_processor_compat(void *rtn) 261 { 262 *(int *)rtn = kvmppc_core_check_processor_compat(); 263 } 264 265 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 266 { 267 if (type) 268 return -EINVAL; 269 270 return kvmppc_core_init_vm(kvm); 271 } 272 273 void kvm_arch_destroy_vm(struct kvm *kvm) 274 { 275 unsigned int i; 276 struct kvm_vcpu *vcpu; 277 278 kvm_for_each_vcpu(i, vcpu, kvm) 279 kvm_arch_vcpu_free(vcpu); 280 281 mutex_lock(&kvm->lock); 282 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 283 kvm->vcpus[i] = NULL; 284 285 atomic_set(&kvm->online_vcpus, 0); 286 287 kvmppc_core_destroy_vm(kvm); 288 289 mutex_unlock(&kvm->lock); 290 } 291 292 void kvm_arch_sync_events(struct kvm *kvm) 293 { 294 } 295 296 int kvm_dev_ioctl_check_extension(long ext) 297 { 298 int r; 299 300 switch (ext) { 301 #ifdef CONFIG_BOOKE 302 case KVM_CAP_PPC_BOOKE_SREGS: 303 #else 304 case KVM_CAP_PPC_SEGSTATE: 305 case KVM_CAP_PPC_HIOR: 306 case KVM_CAP_PPC_PAPR: 307 #endif 308 case KVM_CAP_PPC_UNSET_IRQ: 309 case KVM_CAP_PPC_IRQ_LEVEL: 310 case KVM_CAP_ENABLE_CAP: 311 case KVM_CAP_ONE_REG: 312 r = 1; 313 break; 314 #ifndef CONFIG_KVM_BOOK3S_64_HV 315 case KVM_CAP_PPC_PAIRED_SINGLES: 316 case KVM_CAP_PPC_OSI: 317 case KVM_CAP_PPC_GET_PVINFO: 318 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 319 case KVM_CAP_SW_TLB: 320 #endif 321 r = 1; 322 break; 323 case KVM_CAP_COALESCED_MMIO: 324 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 325 break; 326 #endif 327 #ifdef CONFIG_PPC_BOOK3S_64 328 case KVM_CAP_SPAPR_TCE: 329 case KVM_CAP_PPC_ALLOC_HTAB: 330 r = 1; 331 break; 332 #endif /* CONFIG_PPC_BOOK3S_64 */ 333 #ifdef CONFIG_KVM_BOOK3S_64_HV 334 case KVM_CAP_PPC_SMT: 335 r = threads_per_core; 336 break; 337 case KVM_CAP_PPC_RMA: 338 r = 1; 339 /* PPC970 requires an RMA */ 340 if (cpu_has_feature(CPU_FTR_ARCH_201)) 341 r = 2; 342 break; 343 #endif 344 case KVM_CAP_SYNC_MMU: 345 #ifdef CONFIG_KVM_BOOK3S_64_HV 346 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 347 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 348 r = 1; 349 #else 350 r = 0; 351 #endif 352 break; 353 case KVM_CAP_NR_VCPUS: 354 /* 355 * Recommending a number of CPUs is somewhat arbitrary; we 356 * return the number of present CPUs for -HV (since a host 357 * will have secondary threads "offline"), and for other KVM 358 * implementations just count online CPUs. 359 */ 360 #ifdef CONFIG_KVM_BOOK3S_64_HV 361 r = num_present_cpus(); 362 #else 363 r = num_online_cpus(); 364 #endif 365 break; 366 case KVM_CAP_MAX_VCPUS: 367 r = KVM_MAX_VCPUS; 368 break; 369 #ifdef CONFIG_PPC_BOOK3S_64 370 case KVM_CAP_PPC_GET_SMMU_INFO: 371 r = 1; 372 break; 373 #endif 374 default: 375 r = 0; 376 break; 377 } 378 return r; 379 380 } 381 382 long kvm_arch_dev_ioctl(struct file *filp, 383 unsigned int ioctl, unsigned long arg) 384 { 385 return -EINVAL; 386 } 387 388 void kvm_arch_free_memslot(struct kvm_memory_slot *free, 389 struct kvm_memory_slot *dont) 390 { 391 if (!dont || free->arch.rmap != dont->arch.rmap) { 392 vfree(free->arch.rmap); 393 free->arch.rmap = NULL; 394 } 395 } 396 397 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 398 { 399 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 400 if (!slot->arch.rmap) 401 return -ENOMEM; 402 403 return 0; 404 } 405 406 int kvm_arch_prepare_memory_region(struct kvm *kvm, 407 struct kvm_memory_slot *memslot, 408 struct kvm_memory_slot old, 409 struct kvm_userspace_memory_region *mem, 410 int user_alloc) 411 { 412 return kvmppc_core_prepare_memory_region(kvm, mem); 413 } 414 415 void kvm_arch_commit_memory_region(struct kvm *kvm, 416 struct kvm_userspace_memory_region *mem, 417 struct kvm_memory_slot old, 418 int user_alloc) 419 { 420 kvmppc_core_commit_memory_region(kvm, mem); 421 } 422 423 void kvm_arch_flush_shadow_all(struct kvm *kvm) 424 { 425 } 426 427 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 428 struct kvm_memory_slot *slot) 429 { 430 } 431 432 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 433 { 434 struct kvm_vcpu *vcpu; 435 vcpu = kvmppc_core_vcpu_create(kvm, id); 436 if (!IS_ERR(vcpu)) { 437 vcpu->arch.wqp = &vcpu->wq; 438 kvmppc_create_vcpu_debugfs(vcpu, id); 439 } 440 return vcpu; 441 } 442 443 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 444 { 445 /* Make sure we're not using the vcpu anymore */ 446 hrtimer_cancel(&vcpu->arch.dec_timer); 447 tasklet_kill(&vcpu->arch.tasklet); 448 449 kvmppc_remove_vcpu_debugfs(vcpu); 450 kvmppc_core_vcpu_free(vcpu); 451 } 452 453 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 454 { 455 kvm_arch_vcpu_free(vcpu); 456 } 457 458 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 459 { 460 return kvmppc_core_pending_dec(vcpu); 461 } 462 463 /* 464 * low level hrtimer wake routine. Because this runs in hardirq context 465 * we schedule a tasklet to do the real work. 466 */ 467 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 468 { 469 struct kvm_vcpu *vcpu; 470 471 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 472 tasklet_schedule(&vcpu->arch.tasklet); 473 474 return HRTIMER_NORESTART; 475 } 476 477 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 478 { 479 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 480 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 481 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 482 vcpu->arch.dec_expires = ~(u64)0; 483 484 #ifdef CONFIG_KVM_EXIT_TIMING 485 mutex_init(&vcpu->arch.exit_timing_lock); 486 #endif 487 488 return 0; 489 } 490 491 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 492 { 493 kvmppc_mmu_destroy(vcpu); 494 } 495 496 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 497 { 498 #ifdef CONFIG_BOOKE 499 /* 500 * vrsave (formerly usprg0) isn't used by Linux, but may 501 * be used by the guest. 502 * 503 * On non-booke this is associated with Altivec and 504 * is handled by code in book3s.c. 505 */ 506 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 507 #endif 508 kvmppc_core_vcpu_load(vcpu, cpu); 509 vcpu->cpu = smp_processor_id(); 510 } 511 512 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 513 { 514 kvmppc_core_vcpu_put(vcpu); 515 #ifdef CONFIG_BOOKE 516 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 517 #endif 518 vcpu->cpu = -1; 519 } 520 521 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 522 struct kvm_guest_debug *dbg) 523 { 524 return -EINVAL; 525 } 526 527 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 528 struct kvm_run *run) 529 { 530 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 531 } 532 533 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 534 struct kvm_run *run) 535 { 536 u64 uninitialized_var(gpr); 537 538 if (run->mmio.len > sizeof(gpr)) { 539 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 540 return; 541 } 542 543 if (vcpu->arch.mmio_is_bigendian) { 544 switch (run->mmio.len) { 545 case 8: gpr = *(u64 *)run->mmio.data; break; 546 case 4: gpr = *(u32 *)run->mmio.data; break; 547 case 2: gpr = *(u16 *)run->mmio.data; break; 548 case 1: gpr = *(u8 *)run->mmio.data; break; 549 } 550 } else { 551 /* Convert BE data from userland back to LE. */ 552 switch (run->mmio.len) { 553 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 554 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 555 case 1: gpr = *(u8 *)run->mmio.data; break; 556 } 557 } 558 559 if (vcpu->arch.mmio_sign_extend) { 560 switch (run->mmio.len) { 561 #ifdef CONFIG_PPC64 562 case 4: 563 gpr = (s64)(s32)gpr; 564 break; 565 #endif 566 case 2: 567 gpr = (s64)(s16)gpr; 568 break; 569 case 1: 570 gpr = (s64)(s8)gpr; 571 break; 572 } 573 } 574 575 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 576 577 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 578 case KVM_MMIO_REG_GPR: 579 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 580 break; 581 case KVM_MMIO_REG_FPR: 582 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 583 break; 584 #ifdef CONFIG_PPC_BOOK3S 585 case KVM_MMIO_REG_QPR: 586 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 587 break; 588 case KVM_MMIO_REG_FQPR: 589 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 590 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 591 break; 592 #endif 593 default: 594 BUG(); 595 } 596 } 597 598 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 599 unsigned int rt, unsigned int bytes, int is_bigendian) 600 { 601 if (bytes > sizeof(run->mmio.data)) { 602 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 603 run->mmio.len); 604 } 605 606 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 607 run->mmio.len = bytes; 608 run->mmio.is_write = 0; 609 610 vcpu->arch.io_gpr = rt; 611 vcpu->arch.mmio_is_bigendian = is_bigendian; 612 vcpu->mmio_needed = 1; 613 vcpu->mmio_is_write = 0; 614 vcpu->arch.mmio_sign_extend = 0; 615 616 return EMULATE_DO_MMIO; 617 } 618 619 /* Same as above, but sign extends */ 620 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 621 unsigned int rt, unsigned int bytes, int is_bigendian) 622 { 623 int r; 624 625 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 626 vcpu->arch.mmio_sign_extend = 1; 627 628 return r; 629 } 630 631 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 632 u64 val, unsigned int bytes, int is_bigendian) 633 { 634 void *data = run->mmio.data; 635 636 if (bytes > sizeof(run->mmio.data)) { 637 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 638 run->mmio.len); 639 } 640 641 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 642 run->mmio.len = bytes; 643 run->mmio.is_write = 1; 644 vcpu->mmio_needed = 1; 645 vcpu->mmio_is_write = 1; 646 647 /* Store the value at the lowest bytes in 'data'. */ 648 if (is_bigendian) { 649 switch (bytes) { 650 case 8: *(u64 *)data = val; break; 651 case 4: *(u32 *)data = val; break; 652 case 2: *(u16 *)data = val; break; 653 case 1: *(u8 *)data = val; break; 654 } 655 } else { 656 /* Store LE value into 'data'. */ 657 switch (bytes) { 658 case 4: st_le32(data, val); break; 659 case 2: st_le16(data, val); break; 660 case 1: *(u8 *)data = val; break; 661 } 662 } 663 664 return EMULATE_DO_MMIO; 665 } 666 667 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 668 { 669 int r; 670 sigset_t sigsaved; 671 672 if (vcpu->sigset_active) 673 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 674 675 if (vcpu->mmio_needed) { 676 if (!vcpu->mmio_is_write) 677 kvmppc_complete_mmio_load(vcpu, run); 678 vcpu->mmio_needed = 0; 679 } else if (vcpu->arch.dcr_needed) { 680 if (!vcpu->arch.dcr_is_write) 681 kvmppc_complete_dcr_load(vcpu, run); 682 vcpu->arch.dcr_needed = 0; 683 } else if (vcpu->arch.osi_needed) { 684 u64 *gprs = run->osi.gprs; 685 int i; 686 687 for (i = 0; i < 32; i++) 688 kvmppc_set_gpr(vcpu, i, gprs[i]); 689 vcpu->arch.osi_needed = 0; 690 } else if (vcpu->arch.hcall_needed) { 691 int i; 692 693 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 694 for (i = 0; i < 9; ++i) 695 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 696 vcpu->arch.hcall_needed = 0; 697 } 698 699 r = kvmppc_vcpu_run(run, vcpu); 700 701 if (vcpu->sigset_active) 702 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 703 704 return r; 705 } 706 707 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 708 { 709 if (irq->irq == KVM_INTERRUPT_UNSET) { 710 kvmppc_core_dequeue_external(vcpu, irq); 711 return 0; 712 } 713 714 kvmppc_core_queue_external(vcpu, irq); 715 716 kvm_vcpu_kick(vcpu); 717 718 return 0; 719 } 720 721 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 722 struct kvm_enable_cap *cap) 723 { 724 int r; 725 726 if (cap->flags) 727 return -EINVAL; 728 729 switch (cap->cap) { 730 case KVM_CAP_PPC_OSI: 731 r = 0; 732 vcpu->arch.osi_enabled = true; 733 break; 734 case KVM_CAP_PPC_PAPR: 735 r = 0; 736 vcpu->arch.papr_enabled = true; 737 break; 738 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 739 case KVM_CAP_SW_TLB: { 740 struct kvm_config_tlb cfg; 741 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 742 743 r = -EFAULT; 744 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 745 break; 746 747 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 748 break; 749 } 750 #endif 751 default: 752 r = -EINVAL; 753 break; 754 } 755 756 if (!r) 757 r = kvmppc_sanity_check(vcpu); 758 759 return r; 760 } 761 762 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 763 struct kvm_mp_state *mp_state) 764 { 765 return -EINVAL; 766 } 767 768 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 769 struct kvm_mp_state *mp_state) 770 { 771 return -EINVAL; 772 } 773 774 long kvm_arch_vcpu_ioctl(struct file *filp, 775 unsigned int ioctl, unsigned long arg) 776 { 777 struct kvm_vcpu *vcpu = filp->private_data; 778 void __user *argp = (void __user *)arg; 779 long r; 780 781 switch (ioctl) { 782 case KVM_INTERRUPT: { 783 struct kvm_interrupt irq; 784 r = -EFAULT; 785 if (copy_from_user(&irq, argp, sizeof(irq))) 786 goto out; 787 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 788 goto out; 789 } 790 791 case KVM_ENABLE_CAP: 792 { 793 struct kvm_enable_cap cap; 794 r = -EFAULT; 795 if (copy_from_user(&cap, argp, sizeof(cap))) 796 goto out; 797 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 798 break; 799 } 800 801 case KVM_SET_ONE_REG: 802 case KVM_GET_ONE_REG: 803 { 804 struct kvm_one_reg reg; 805 r = -EFAULT; 806 if (copy_from_user(®, argp, sizeof(reg))) 807 goto out; 808 if (ioctl == KVM_SET_ONE_REG) 809 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 810 else 811 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 812 break; 813 } 814 815 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 816 case KVM_DIRTY_TLB: { 817 struct kvm_dirty_tlb dirty; 818 r = -EFAULT; 819 if (copy_from_user(&dirty, argp, sizeof(dirty))) 820 goto out; 821 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 822 break; 823 } 824 #endif 825 default: 826 r = -EINVAL; 827 } 828 829 out: 830 return r; 831 } 832 833 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 834 { 835 return VM_FAULT_SIGBUS; 836 } 837 838 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 839 { 840 u32 inst_nop = 0x60000000; 841 #ifdef CONFIG_KVM_BOOKE_HV 842 u32 inst_sc1 = 0x44000022; 843 pvinfo->hcall[0] = inst_sc1; 844 pvinfo->hcall[1] = inst_nop; 845 pvinfo->hcall[2] = inst_nop; 846 pvinfo->hcall[3] = inst_nop; 847 #else 848 u32 inst_lis = 0x3c000000; 849 u32 inst_ori = 0x60000000; 850 u32 inst_sc = 0x44000002; 851 u32 inst_imm_mask = 0xffff; 852 853 /* 854 * The hypercall to get into KVM from within guest context is as 855 * follows: 856 * 857 * lis r0, r0, KVM_SC_MAGIC_R0@h 858 * ori r0, KVM_SC_MAGIC_R0@l 859 * sc 860 * nop 861 */ 862 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); 863 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 864 pvinfo->hcall[2] = inst_sc; 865 pvinfo->hcall[3] = inst_nop; 866 #endif 867 868 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 869 870 return 0; 871 } 872 873 long kvm_arch_vm_ioctl(struct file *filp, 874 unsigned int ioctl, unsigned long arg) 875 { 876 void __user *argp = (void __user *)arg; 877 long r; 878 879 switch (ioctl) { 880 case KVM_PPC_GET_PVINFO: { 881 struct kvm_ppc_pvinfo pvinfo; 882 memset(&pvinfo, 0, sizeof(pvinfo)); 883 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 884 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 885 r = -EFAULT; 886 goto out; 887 } 888 889 break; 890 } 891 #ifdef CONFIG_PPC_BOOK3S_64 892 case KVM_CREATE_SPAPR_TCE: { 893 struct kvm_create_spapr_tce create_tce; 894 struct kvm *kvm = filp->private_data; 895 896 r = -EFAULT; 897 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 898 goto out; 899 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 900 goto out; 901 } 902 #endif /* CONFIG_PPC_BOOK3S_64 */ 903 904 #ifdef CONFIG_KVM_BOOK3S_64_HV 905 case KVM_ALLOCATE_RMA: { 906 struct kvm *kvm = filp->private_data; 907 struct kvm_allocate_rma rma; 908 909 r = kvm_vm_ioctl_allocate_rma(kvm, &rma); 910 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) 911 r = -EFAULT; 912 break; 913 } 914 915 case KVM_PPC_ALLOCATE_HTAB: { 916 struct kvm *kvm = filp->private_data; 917 u32 htab_order; 918 919 r = -EFAULT; 920 if (get_user(htab_order, (u32 __user *)argp)) 921 break; 922 r = kvmppc_alloc_reset_hpt(kvm, &htab_order); 923 if (r) 924 break; 925 r = -EFAULT; 926 if (put_user(htab_order, (u32 __user *)argp)) 927 break; 928 r = 0; 929 break; 930 } 931 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 932 933 #ifdef CONFIG_PPC_BOOK3S_64 934 case KVM_PPC_GET_SMMU_INFO: { 935 struct kvm *kvm = filp->private_data; 936 struct kvm_ppc_smmu_info info; 937 938 memset(&info, 0, sizeof(info)); 939 r = kvm_vm_ioctl_get_smmu_info(kvm, &info); 940 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 941 r = -EFAULT; 942 break; 943 } 944 #endif /* CONFIG_PPC_BOOK3S_64 */ 945 default: 946 r = -ENOTTY; 947 } 948 949 out: 950 return r; 951 } 952 953 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 954 static unsigned long nr_lpids; 955 956 long kvmppc_alloc_lpid(void) 957 { 958 long lpid; 959 960 do { 961 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 962 if (lpid >= nr_lpids) { 963 pr_err("%s: No LPIDs free\n", __func__); 964 return -ENOMEM; 965 } 966 } while (test_and_set_bit(lpid, lpid_inuse)); 967 968 return lpid; 969 } 970 971 void kvmppc_claim_lpid(long lpid) 972 { 973 set_bit(lpid, lpid_inuse); 974 } 975 976 void kvmppc_free_lpid(long lpid) 977 { 978 clear_bit(lpid, lpid_inuse); 979 } 980 981 void kvmppc_init_lpid(unsigned long nr_lpids_param) 982 { 983 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 984 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 985 } 986 987 int kvm_arch_init(void *opaque) 988 { 989 return 0; 990 } 991 992 void kvm_arch_exit(void) 993 { 994 } 995