1 /* 2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * Kevin Wolf <mail@kevin-wolf.de> 7 * 8 * Description: 9 * This file is derived from arch/powerpc/kvm/44x.c, 10 * by Hollis Blanchard <hollisb@us.ibm.com>. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License, version 2, as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/kvm_host.h> 18 #include <linux/err.h> 19 #include <linux/export.h> 20 #include <linux/slab.h> 21 #include <linux/module.h> 22 #include <linux/miscdevice.h> 23 24 #include <asm/reg.h> 25 #include <asm/cputable.h> 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 #include <asm/uaccess.h> 29 #include <asm/io.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/kvm_book3s.h> 32 #include <asm/mmu_context.h> 33 #include <asm/page.h> 34 #include <linux/gfp.h> 35 #include <linux/sched.h> 36 #include <linux/vmalloc.h> 37 #include <linux/highmem.h> 38 39 #include "book3s.h" 40 #include "trace.h" 41 42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 43 44 /* #define EXIT_DEBUG */ 45 46 struct kvm_stats_debugfs_item debugfs_entries[] = { 47 { "exits", VCPU_STAT(sum_exits) }, 48 { "mmio", VCPU_STAT(mmio_exits) }, 49 { "sig", VCPU_STAT(signal_exits) }, 50 { "sysc", VCPU_STAT(syscall_exits) }, 51 { "inst_emu", VCPU_STAT(emulated_inst_exits) }, 52 { "dec", VCPU_STAT(dec_exits) }, 53 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 54 { "queue_intr", VCPU_STAT(queue_intr) }, 55 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 56 { "pf_storage", VCPU_STAT(pf_storage) }, 57 { "sp_storage", VCPU_STAT(sp_storage) }, 58 { "pf_instruc", VCPU_STAT(pf_instruc) }, 59 { "sp_instruc", VCPU_STAT(sp_instruc) }, 60 { "ld", VCPU_STAT(ld) }, 61 { "ld_slow", VCPU_STAT(ld_slow) }, 62 { "st", VCPU_STAT(st) }, 63 { "st_slow", VCPU_STAT(st_slow) }, 64 { NULL } 65 }; 66 67 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) 68 { 69 } 70 71 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) 72 { 73 } 74 75 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 76 { 77 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 78 return to_book3s(vcpu)->hior; 79 return 0; 80 } 81 82 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, 83 unsigned long pending_now, unsigned long old_pending) 84 { 85 if (is_kvmppc_hv_enabled(vcpu->kvm)) 86 return; 87 if (pending_now) 88 kvmppc_set_int_pending(vcpu, 1); 89 else if (old_pending) 90 kvmppc_set_int_pending(vcpu, 0); 91 } 92 93 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 94 { 95 ulong crit_raw; 96 ulong crit_r1; 97 bool crit; 98 99 if (is_kvmppc_hv_enabled(vcpu->kvm)) 100 return false; 101 102 crit_raw = kvmppc_get_critical(vcpu); 103 crit_r1 = kvmppc_get_gpr(vcpu, 1); 104 105 /* Truncate crit indicators in 32 bit mode */ 106 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 107 crit_raw &= 0xffffffff; 108 crit_r1 &= 0xffffffff; 109 } 110 111 /* Critical section when crit == r1 */ 112 crit = (crit_raw == crit_r1); 113 /* ... and we're in supervisor mode */ 114 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); 115 116 return crit; 117 } 118 119 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 120 { 121 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); 122 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); 123 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 124 vcpu->arch.mmu.reset_msr(vcpu); 125 } 126 127 static int kvmppc_book3s_vec2irqprio(unsigned int vec) 128 { 129 unsigned int prio; 130 131 switch (vec) { 132 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; 133 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; 134 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; 135 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; 136 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 137 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 138 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 139 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; 140 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 141 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 142 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 143 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; 144 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; 145 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; 146 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; 147 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; 148 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; 149 default: prio = BOOK3S_IRQPRIO_MAX; break; 150 } 151 152 return prio; 153 } 154 155 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 156 unsigned int vec) 157 { 158 unsigned long old_pending = vcpu->arch.pending_exceptions; 159 160 clear_bit(kvmppc_book3s_vec2irqprio(vec), 161 &vcpu->arch.pending_exceptions); 162 163 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, 164 old_pending); 165 } 166 167 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 168 { 169 vcpu->stat.queue_intr++; 170 171 set_bit(kvmppc_book3s_vec2irqprio(vec), 172 &vcpu->arch.pending_exceptions); 173 #ifdef EXIT_DEBUG 174 printk(KERN_INFO "Queueing interrupt %x\n", vec); 175 #endif 176 } 177 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); 178 179 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 180 { 181 /* might as well deliver this straight away */ 182 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); 183 } 184 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); 185 186 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 187 { 188 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 189 } 190 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); 191 192 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 193 { 194 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 195 } 196 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); 197 198 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 199 { 200 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 201 } 202 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); 203 204 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 205 struct kvm_interrupt *irq) 206 { 207 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; 208 209 if (irq->irq == KVM_INTERRUPT_SET_LEVEL) 210 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; 211 212 kvmppc_book3s_queue_irqprio(vcpu, vec); 213 } 214 215 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) 216 { 217 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 218 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 219 } 220 221 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 222 { 223 int deliver = 1; 224 int vec = 0; 225 bool crit = kvmppc_critical_section(vcpu); 226 227 switch (priority) { 228 case BOOK3S_IRQPRIO_DECREMENTER: 229 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 230 vec = BOOK3S_INTERRUPT_DECREMENTER; 231 break; 232 case BOOK3S_IRQPRIO_EXTERNAL: 233 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 234 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 235 vec = BOOK3S_INTERRUPT_EXTERNAL; 236 break; 237 case BOOK3S_IRQPRIO_SYSTEM_RESET: 238 vec = BOOK3S_INTERRUPT_SYSTEM_RESET; 239 break; 240 case BOOK3S_IRQPRIO_MACHINE_CHECK: 241 vec = BOOK3S_INTERRUPT_MACHINE_CHECK; 242 break; 243 case BOOK3S_IRQPRIO_DATA_STORAGE: 244 vec = BOOK3S_INTERRUPT_DATA_STORAGE; 245 break; 246 case BOOK3S_IRQPRIO_INST_STORAGE: 247 vec = BOOK3S_INTERRUPT_INST_STORAGE; 248 break; 249 case BOOK3S_IRQPRIO_DATA_SEGMENT: 250 vec = BOOK3S_INTERRUPT_DATA_SEGMENT; 251 break; 252 case BOOK3S_IRQPRIO_INST_SEGMENT: 253 vec = BOOK3S_INTERRUPT_INST_SEGMENT; 254 break; 255 case BOOK3S_IRQPRIO_ALIGNMENT: 256 vec = BOOK3S_INTERRUPT_ALIGNMENT; 257 break; 258 case BOOK3S_IRQPRIO_PROGRAM: 259 vec = BOOK3S_INTERRUPT_PROGRAM; 260 break; 261 case BOOK3S_IRQPRIO_VSX: 262 vec = BOOK3S_INTERRUPT_VSX; 263 break; 264 case BOOK3S_IRQPRIO_ALTIVEC: 265 vec = BOOK3S_INTERRUPT_ALTIVEC; 266 break; 267 case BOOK3S_IRQPRIO_FP_UNAVAIL: 268 vec = BOOK3S_INTERRUPT_FP_UNAVAIL; 269 break; 270 case BOOK3S_IRQPRIO_SYSCALL: 271 vec = BOOK3S_INTERRUPT_SYSCALL; 272 break; 273 case BOOK3S_IRQPRIO_DEBUG: 274 vec = BOOK3S_INTERRUPT_TRACE; 275 break; 276 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: 277 vec = BOOK3S_INTERRUPT_PERFMON; 278 break; 279 case BOOK3S_IRQPRIO_FAC_UNAVAIL: 280 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; 281 break; 282 default: 283 deliver = 0; 284 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); 285 break; 286 } 287 288 #if 0 289 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); 290 #endif 291 292 if (deliver) 293 kvmppc_inject_interrupt(vcpu, vec, 0); 294 295 return deliver; 296 } 297 298 /* 299 * This function determines if an irqprio should be cleared once issued. 300 */ 301 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) 302 { 303 switch (priority) { 304 case BOOK3S_IRQPRIO_DECREMENTER: 305 /* DEC interrupts get cleared by mtdec */ 306 return false; 307 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 308 /* External interrupts get cleared by userspace */ 309 return false; 310 } 311 312 return true; 313 } 314 315 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 316 { 317 unsigned long *pending = &vcpu->arch.pending_exceptions; 318 unsigned long old_pending = vcpu->arch.pending_exceptions; 319 unsigned int priority; 320 321 #ifdef EXIT_DEBUG 322 if (vcpu->arch.pending_exceptions) 323 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 324 #endif 325 priority = __ffs(*pending); 326 while (priority < BOOK3S_IRQPRIO_MAX) { 327 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 328 clear_irqprio(vcpu, priority)) { 329 clear_bit(priority, &vcpu->arch.pending_exceptions); 330 break; 331 } 332 333 priority = find_next_bit(pending, 334 BITS_PER_BYTE * sizeof(*pending), 335 priority + 1); 336 } 337 338 /* Tell the guest about our interrupt status */ 339 kvmppc_update_int_pending(vcpu, *pending, old_pending); 340 341 return 0; 342 } 343 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 344 345 pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 346 bool *writable) 347 { 348 ulong mp_pa = vcpu->arch.magic_page_pa; 349 350 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 351 mp_pa = (uint32_t)mp_pa; 352 353 /* Magic page override */ 354 if (unlikely(mp_pa) && 355 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 356 ((mp_pa & PAGE_MASK) & KVM_PAM))) { 357 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 358 pfn_t pfn; 359 360 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 361 get_page(pfn_to_page(pfn)); 362 if (writable) 363 *writable = true; 364 return pfn; 365 } 366 367 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 368 } 369 EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); 370 371 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 372 bool iswrite, struct kvmppc_pte *pte) 373 { 374 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 375 int r; 376 377 if (relocated) { 378 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); 379 } else { 380 pte->eaddr = eaddr; 381 pte->raddr = eaddr & KVM_PAM; 382 pte->vpage = VSID_REAL | eaddr >> 12; 383 pte->may_read = true; 384 pte->may_write = true; 385 pte->may_execute = true; 386 r = 0; 387 } 388 389 return r; 390 } 391 392 static hva_t kvmppc_bad_hva(void) 393 { 394 return PAGE_OFFSET; 395 } 396 397 static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, 398 bool read) 399 { 400 hva_t hpage; 401 402 if (read && !pte->may_read) 403 goto err; 404 405 if (!read && !pte->may_write) 406 goto err; 407 408 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 409 if (kvm_is_error_hva(hpage)) 410 goto err; 411 412 return hpage | (pte->raddr & ~PAGE_MASK); 413 err: 414 return kvmppc_bad_hva(); 415 } 416 417 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 418 bool data) 419 { 420 struct kvmppc_pte pte; 421 422 vcpu->stat.st++; 423 424 if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte)) 425 return -ENOENT; 426 427 *eaddr = pte.raddr; 428 429 if (!pte.may_write) 430 return -EPERM; 431 432 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 433 return EMULATE_DO_MMIO; 434 435 return EMULATE_DONE; 436 } 437 EXPORT_SYMBOL_GPL(kvmppc_st); 438 439 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 440 bool data) 441 { 442 struct kvmppc_pte pte; 443 hva_t hva = *eaddr; 444 445 vcpu->stat.ld++; 446 447 if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte)) 448 goto nopte; 449 450 *eaddr = pte.raddr; 451 452 hva = kvmppc_pte_to_hva(vcpu, &pte, true); 453 if (kvm_is_error_hva(hva)) 454 goto mmio; 455 456 if (copy_from_user(ptr, (void __user *)hva, size)) { 457 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); 458 goto mmio; 459 } 460 461 return EMULATE_DONE; 462 463 nopte: 464 return -ENOENT; 465 mmio: 466 return EMULATE_DO_MMIO; 467 } 468 EXPORT_SYMBOL_GPL(kvmppc_ld); 469 470 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 471 { 472 return 0; 473 } 474 475 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) 476 { 477 return 0; 478 } 479 480 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 481 { 482 } 483 484 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 485 struct kvm_sregs *sregs) 486 { 487 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); 488 } 489 490 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 491 struct kvm_sregs *sregs) 492 { 493 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); 494 } 495 496 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 497 { 498 int i; 499 500 regs->pc = kvmppc_get_pc(vcpu); 501 regs->cr = kvmppc_get_cr(vcpu); 502 regs->ctr = kvmppc_get_ctr(vcpu); 503 regs->lr = kvmppc_get_lr(vcpu); 504 regs->xer = kvmppc_get_xer(vcpu); 505 regs->msr = kvmppc_get_msr(vcpu); 506 regs->srr0 = kvmppc_get_srr0(vcpu); 507 regs->srr1 = kvmppc_get_srr1(vcpu); 508 regs->pid = vcpu->arch.pid; 509 regs->sprg0 = kvmppc_get_sprg0(vcpu); 510 regs->sprg1 = kvmppc_get_sprg1(vcpu); 511 regs->sprg2 = kvmppc_get_sprg2(vcpu); 512 regs->sprg3 = kvmppc_get_sprg3(vcpu); 513 regs->sprg4 = kvmppc_get_sprg4(vcpu); 514 regs->sprg5 = kvmppc_get_sprg5(vcpu); 515 regs->sprg6 = kvmppc_get_sprg6(vcpu); 516 regs->sprg7 = kvmppc_get_sprg7(vcpu); 517 518 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 519 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 520 521 return 0; 522 } 523 524 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 525 { 526 int i; 527 528 kvmppc_set_pc(vcpu, regs->pc); 529 kvmppc_set_cr(vcpu, regs->cr); 530 kvmppc_set_ctr(vcpu, regs->ctr); 531 kvmppc_set_lr(vcpu, regs->lr); 532 kvmppc_set_xer(vcpu, regs->xer); 533 kvmppc_set_msr(vcpu, regs->msr); 534 kvmppc_set_srr0(vcpu, regs->srr0); 535 kvmppc_set_srr1(vcpu, regs->srr1); 536 kvmppc_set_sprg0(vcpu, regs->sprg0); 537 kvmppc_set_sprg1(vcpu, regs->sprg1); 538 kvmppc_set_sprg2(vcpu, regs->sprg2); 539 kvmppc_set_sprg3(vcpu, regs->sprg3); 540 kvmppc_set_sprg4(vcpu, regs->sprg4); 541 kvmppc_set_sprg5(vcpu, regs->sprg5); 542 kvmppc_set_sprg6(vcpu, regs->sprg6); 543 kvmppc_set_sprg7(vcpu, regs->sprg7); 544 545 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 546 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 547 548 return 0; 549 } 550 551 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 552 { 553 return -ENOTSUPP; 554 } 555 556 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 557 { 558 return -ENOTSUPP; 559 } 560 561 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 562 { 563 int r; 564 union kvmppc_one_reg val; 565 int size; 566 long int i; 567 568 size = one_reg_size(reg->id); 569 if (size > sizeof(val)) 570 return -EINVAL; 571 572 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); 573 if (r == -EINVAL) { 574 r = 0; 575 switch (reg->id) { 576 case KVM_REG_PPC_DAR: 577 val = get_reg_val(reg->id, kvmppc_get_dar(vcpu)); 578 break; 579 case KVM_REG_PPC_DSISR: 580 val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu)); 581 break; 582 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 583 i = reg->id - KVM_REG_PPC_FPR0; 584 val = get_reg_val(reg->id, VCPU_FPR(vcpu, i)); 585 break; 586 case KVM_REG_PPC_FPSCR: 587 val = get_reg_val(reg->id, vcpu->arch.fp.fpscr); 588 break; 589 #ifdef CONFIG_ALTIVEC 590 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 591 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 592 r = -ENXIO; 593 break; 594 } 595 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 596 break; 597 case KVM_REG_PPC_VSCR: 598 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 599 r = -ENXIO; 600 break; 601 } 602 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 603 break; 604 case KVM_REG_PPC_VRSAVE: 605 val = get_reg_val(reg->id, vcpu->arch.vrsave); 606 break; 607 #endif /* CONFIG_ALTIVEC */ 608 #ifdef CONFIG_VSX 609 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 610 if (cpu_has_feature(CPU_FTR_VSX)) { 611 long int i = reg->id - KVM_REG_PPC_VSR0; 612 val.vsxval[0] = vcpu->arch.fp.fpr[i][0]; 613 val.vsxval[1] = vcpu->arch.fp.fpr[i][1]; 614 } else { 615 r = -ENXIO; 616 } 617 break; 618 #endif /* CONFIG_VSX */ 619 case KVM_REG_PPC_DEBUG_INST: { 620 u32 opcode = INS_TW; 621 r = copy_to_user((u32 __user *)(long)reg->addr, 622 &opcode, sizeof(u32)); 623 break; 624 } 625 #ifdef CONFIG_KVM_XICS 626 case KVM_REG_PPC_ICP_STATE: 627 if (!vcpu->arch.icp) { 628 r = -ENXIO; 629 break; 630 } 631 val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu)); 632 break; 633 #endif /* CONFIG_KVM_XICS */ 634 case KVM_REG_PPC_FSCR: 635 val = get_reg_val(reg->id, vcpu->arch.fscr); 636 break; 637 case KVM_REG_PPC_TAR: 638 val = get_reg_val(reg->id, vcpu->arch.tar); 639 break; 640 case KVM_REG_PPC_EBBHR: 641 val = get_reg_val(reg->id, vcpu->arch.ebbhr); 642 break; 643 case KVM_REG_PPC_EBBRR: 644 val = get_reg_val(reg->id, vcpu->arch.ebbrr); 645 break; 646 case KVM_REG_PPC_BESCR: 647 val = get_reg_val(reg->id, vcpu->arch.bescr); 648 break; 649 default: 650 r = -EINVAL; 651 break; 652 } 653 } 654 if (r) 655 return r; 656 657 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 658 r = -EFAULT; 659 660 return r; 661 } 662 663 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 664 { 665 int r; 666 union kvmppc_one_reg val; 667 int size; 668 long int i; 669 670 size = one_reg_size(reg->id); 671 if (size > sizeof(val)) 672 return -EINVAL; 673 674 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 675 return -EFAULT; 676 677 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); 678 if (r == -EINVAL) { 679 r = 0; 680 switch (reg->id) { 681 case KVM_REG_PPC_DAR: 682 kvmppc_set_dar(vcpu, set_reg_val(reg->id, val)); 683 break; 684 case KVM_REG_PPC_DSISR: 685 kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val)); 686 break; 687 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 688 i = reg->id - KVM_REG_PPC_FPR0; 689 VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val); 690 break; 691 case KVM_REG_PPC_FPSCR: 692 vcpu->arch.fp.fpscr = set_reg_val(reg->id, val); 693 break; 694 #ifdef CONFIG_ALTIVEC 695 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 696 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 697 r = -ENXIO; 698 break; 699 } 700 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 701 break; 702 case KVM_REG_PPC_VSCR: 703 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 704 r = -ENXIO; 705 break; 706 } 707 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 708 break; 709 case KVM_REG_PPC_VRSAVE: 710 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 711 r = -ENXIO; 712 break; 713 } 714 vcpu->arch.vrsave = set_reg_val(reg->id, val); 715 break; 716 #endif /* CONFIG_ALTIVEC */ 717 #ifdef CONFIG_VSX 718 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 719 if (cpu_has_feature(CPU_FTR_VSX)) { 720 long int i = reg->id - KVM_REG_PPC_VSR0; 721 vcpu->arch.fp.fpr[i][0] = val.vsxval[0]; 722 vcpu->arch.fp.fpr[i][1] = val.vsxval[1]; 723 } else { 724 r = -ENXIO; 725 } 726 break; 727 #endif /* CONFIG_VSX */ 728 #ifdef CONFIG_KVM_XICS 729 case KVM_REG_PPC_ICP_STATE: 730 if (!vcpu->arch.icp) { 731 r = -ENXIO; 732 break; 733 } 734 r = kvmppc_xics_set_icp(vcpu, 735 set_reg_val(reg->id, val)); 736 break; 737 #endif /* CONFIG_KVM_XICS */ 738 case KVM_REG_PPC_FSCR: 739 vcpu->arch.fscr = set_reg_val(reg->id, val); 740 break; 741 case KVM_REG_PPC_TAR: 742 vcpu->arch.tar = set_reg_val(reg->id, val); 743 break; 744 case KVM_REG_PPC_EBBHR: 745 vcpu->arch.ebbhr = set_reg_val(reg->id, val); 746 break; 747 case KVM_REG_PPC_EBBRR: 748 vcpu->arch.ebbrr = set_reg_val(reg->id, val); 749 break; 750 case KVM_REG_PPC_BESCR: 751 vcpu->arch.bescr = set_reg_val(reg->id, val); 752 break; 753 default: 754 r = -EINVAL; 755 break; 756 } 757 } 758 759 return r; 760 } 761 762 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 763 { 764 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); 765 } 766 767 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 768 { 769 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); 770 } 771 772 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 773 { 774 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); 775 } 776 EXPORT_SYMBOL_GPL(kvmppc_set_msr); 777 778 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 779 { 780 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); 781 } 782 783 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 784 struct kvm_translation *tr) 785 { 786 return 0; 787 } 788 789 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 790 struct kvm_guest_debug *dbg) 791 { 792 return -EINVAL; 793 } 794 795 void kvmppc_decrementer_func(unsigned long data) 796 { 797 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 798 799 kvmppc_core_queue_dec(vcpu); 800 kvm_vcpu_kick(vcpu); 801 } 802 803 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 804 { 805 return kvm->arch.kvm_ops->vcpu_create(kvm, id); 806 } 807 808 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 809 { 810 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); 811 } 812 813 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 814 { 815 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); 816 } 817 818 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 819 { 820 return kvm->arch.kvm_ops->get_dirty_log(kvm, log); 821 } 822 823 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 824 struct kvm_memory_slot *dont) 825 { 826 kvm->arch.kvm_ops->free_memslot(free, dont); 827 } 828 829 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 830 unsigned long npages) 831 { 832 return kvm->arch.kvm_ops->create_memslot(slot, npages); 833 } 834 835 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 836 { 837 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); 838 } 839 840 int kvmppc_core_prepare_memory_region(struct kvm *kvm, 841 struct kvm_memory_slot *memslot, 842 struct kvm_userspace_memory_region *mem) 843 { 844 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); 845 } 846 847 void kvmppc_core_commit_memory_region(struct kvm *kvm, 848 struct kvm_userspace_memory_region *mem, 849 const struct kvm_memory_slot *old) 850 { 851 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old); 852 } 853 854 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 855 { 856 return kvm->arch.kvm_ops->unmap_hva(kvm, hva); 857 } 858 EXPORT_SYMBOL_GPL(kvm_unmap_hva); 859 860 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 861 { 862 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); 863 } 864 865 int kvm_age_hva(struct kvm *kvm, unsigned long hva) 866 { 867 return kvm->arch.kvm_ops->age_hva(kvm, hva); 868 } 869 870 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 871 { 872 return kvm->arch.kvm_ops->test_age_hva(kvm, hva); 873 } 874 875 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 876 { 877 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); 878 } 879 880 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 881 { 882 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); 883 } 884 885 int kvmppc_core_init_vm(struct kvm *kvm) 886 { 887 888 #ifdef CONFIG_PPC64 889 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); 890 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); 891 #endif 892 893 return kvm->arch.kvm_ops->init_vm(kvm); 894 } 895 896 void kvmppc_core_destroy_vm(struct kvm *kvm) 897 { 898 kvm->arch.kvm_ops->destroy_vm(kvm); 899 900 #ifdef CONFIG_PPC64 901 kvmppc_rtas_tokens_free(kvm); 902 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 903 #endif 904 } 905 906 int kvmppc_core_check_processor_compat(void) 907 { 908 /* 909 * We always return 0 for book3s. We check 910 * for compatability while loading the HV 911 * or PR module 912 */ 913 return 0; 914 } 915 916 static int kvmppc_book3s_init(void) 917 { 918 int r; 919 920 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 921 if (r) 922 return r; 923 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 924 r = kvmppc_book3s_init_pr(); 925 #endif 926 return r; 927 928 } 929 930 static void kvmppc_book3s_exit(void) 931 { 932 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 933 kvmppc_book3s_exit_pr(); 934 #endif 935 kvm_exit(); 936 } 937 938 module_init(kvmppc_book3s_init); 939 module_exit(kvmppc_book3s_exit); 940 941 /* On 32bit this is our one and only kernel module */ 942 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 943 MODULE_ALIAS_MISCDEV(KVM_MINOR); 944 MODULE_ALIAS("devname:kvm"); 945 #endif 946