1 /* 2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * Kevin Wolf <mail@kevin-wolf.de> 7 * 8 * Description: 9 * This file is derived from arch/powerpc/kvm/44x.c, 10 * by Hollis Blanchard <hollisb@us.ibm.com>. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License, version 2, as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/kvm_host.h> 18 #include <linux/err.h> 19 #include <linux/export.h> 20 #include <linux/slab.h> 21 #include <linux/module.h> 22 #include <linux/miscdevice.h> 23 24 #include <asm/reg.h> 25 #include <asm/cputable.h> 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 #include <asm/uaccess.h> 29 #include <asm/io.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/kvm_book3s.h> 32 #include <asm/mmu_context.h> 33 #include <asm/page.h> 34 #include <linux/gfp.h> 35 #include <linux/sched.h> 36 #include <linux/vmalloc.h> 37 #include <linux/highmem.h> 38 39 #include "book3s.h" 40 #include "trace.h" 41 42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 43 44 /* #define EXIT_DEBUG */ 45 46 struct kvm_stats_debugfs_item debugfs_entries[] = { 47 { "exits", VCPU_STAT(sum_exits) }, 48 { "mmio", VCPU_STAT(mmio_exits) }, 49 { "sig", VCPU_STAT(signal_exits) }, 50 { "sysc", VCPU_STAT(syscall_exits) }, 51 { "inst_emu", VCPU_STAT(emulated_inst_exits) }, 52 { "dec", VCPU_STAT(dec_exits) }, 53 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 54 { "queue_intr", VCPU_STAT(queue_intr) }, 55 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, 56 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), }, 57 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, 58 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 59 { "pf_storage", VCPU_STAT(pf_storage) }, 60 { "sp_storage", VCPU_STAT(sp_storage) }, 61 { "pf_instruc", VCPU_STAT(pf_instruc) }, 62 { "sp_instruc", VCPU_STAT(sp_instruc) }, 63 { "ld", VCPU_STAT(ld) }, 64 { "ld_slow", VCPU_STAT(ld_slow) }, 65 { "st", VCPU_STAT(st) }, 66 { "st_slow", VCPU_STAT(st_slow) }, 67 { NULL } 68 }; 69 70 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) 71 { 72 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { 73 ulong pc = kvmppc_get_pc(vcpu); 74 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) 75 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); 76 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; 77 } 78 } 79 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real); 80 81 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 82 { 83 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 84 return to_book3s(vcpu)->hior; 85 return 0; 86 } 87 88 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, 89 unsigned long pending_now, unsigned long old_pending) 90 { 91 if (is_kvmppc_hv_enabled(vcpu->kvm)) 92 return; 93 if (pending_now) 94 kvmppc_set_int_pending(vcpu, 1); 95 else if (old_pending) 96 kvmppc_set_int_pending(vcpu, 0); 97 } 98 99 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 100 { 101 ulong crit_raw; 102 ulong crit_r1; 103 bool crit; 104 105 if (is_kvmppc_hv_enabled(vcpu->kvm)) 106 return false; 107 108 crit_raw = kvmppc_get_critical(vcpu); 109 crit_r1 = kvmppc_get_gpr(vcpu, 1); 110 111 /* Truncate crit indicators in 32 bit mode */ 112 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 113 crit_raw &= 0xffffffff; 114 crit_r1 &= 0xffffffff; 115 } 116 117 /* Critical section when crit == r1 */ 118 crit = (crit_raw == crit_r1); 119 /* ... and we're in supervisor mode */ 120 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); 121 122 return crit; 123 } 124 125 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 126 { 127 kvmppc_unfixup_split_real(vcpu); 128 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); 129 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); 130 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 131 vcpu->arch.mmu.reset_msr(vcpu); 132 } 133 134 static int kvmppc_book3s_vec2irqprio(unsigned int vec) 135 { 136 unsigned int prio; 137 138 switch (vec) { 139 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; 140 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; 141 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; 142 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; 143 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 144 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 145 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 146 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; 147 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 148 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 149 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 150 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; 151 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; 152 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; 153 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; 154 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; 155 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; 156 default: prio = BOOK3S_IRQPRIO_MAX; break; 157 } 158 159 return prio; 160 } 161 162 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 163 unsigned int vec) 164 { 165 unsigned long old_pending = vcpu->arch.pending_exceptions; 166 167 clear_bit(kvmppc_book3s_vec2irqprio(vec), 168 &vcpu->arch.pending_exceptions); 169 170 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, 171 old_pending); 172 } 173 174 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 175 { 176 vcpu->stat.queue_intr++; 177 178 set_bit(kvmppc_book3s_vec2irqprio(vec), 179 &vcpu->arch.pending_exceptions); 180 #ifdef EXIT_DEBUG 181 printk(KERN_INFO "Queueing interrupt %x\n", vec); 182 #endif 183 } 184 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); 185 186 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 187 { 188 /* might as well deliver this straight away */ 189 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); 190 } 191 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); 192 193 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 194 { 195 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 196 } 197 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); 198 199 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 200 { 201 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 202 } 203 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); 204 205 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 206 { 207 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 208 } 209 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); 210 211 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 212 struct kvm_interrupt *irq) 213 { 214 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; 215 216 if (irq->irq == KVM_INTERRUPT_SET_LEVEL) 217 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; 218 219 kvmppc_book3s_queue_irqprio(vcpu, vec); 220 } 221 222 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) 223 { 224 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 225 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 226 } 227 228 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, 229 ulong flags) 230 { 231 kvmppc_set_dar(vcpu, dar); 232 kvmppc_set_dsisr(vcpu, flags); 233 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 234 } 235 236 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) 237 { 238 u64 msr = kvmppc_get_msr(vcpu); 239 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); 240 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); 241 kvmppc_set_msr_fast(vcpu, msr); 242 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 243 } 244 245 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, 246 unsigned int priority) 247 { 248 int deliver = 1; 249 int vec = 0; 250 bool crit = kvmppc_critical_section(vcpu); 251 252 switch (priority) { 253 case BOOK3S_IRQPRIO_DECREMENTER: 254 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 255 vec = BOOK3S_INTERRUPT_DECREMENTER; 256 break; 257 case BOOK3S_IRQPRIO_EXTERNAL: 258 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 259 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 260 vec = BOOK3S_INTERRUPT_EXTERNAL; 261 break; 262 case BOOK3S_IRQPRIO_SYSTEM_RESET: 263 vec = BOOK3S_INTERRUPT_SYSTEM_RESET; 264 break; 265 case BOOK3S_IRQPRIO_MACHINE_CHECK: 266 vec = BOOK3S_INTERRUPT_MACHINE_CHECK; 267 break; 268 case BOOK3S_IRQPRIO_DATA_STORAGE: 269 vec = BOOK3S_INTERRUPT_DATA_STORAGE; 270 break; 271 case BOOK3S_IRQPRIO_INST_STORAGE: 272 vec = BOOK3S_INTERRUPT_INST_STORAGE; 273 break; 274 case BOOK3S_IRQPRIO_DATA_SEGMENT: 275 vec = BOOK3S_INTERRUPT_DATA_SEGMENT; 276 break; 277 case BOOK3S_IRQPRIO_INST_SEGMENT: 278 vec = BOOK3S_INTERRUPT_INST_SEGMENT; 279 break; 280 case BOOK3S_IRQPRIO_ALIGNMENT: 281 vec = BOOK3S_INTERRUPT_ALIGNMENT; 282 break; 283 case BOOK3S_IRQPRIO_PROGRAM: 284 vec = BOOK3S_INTERRUPT_PROGRAM; 285 break; 286 case BOOK3S_IRQPRIO_VSX: 287 vec = BOOK3S_INTERRUPT_VSX; 288 break; 289 case BOOK3S_IRQPRIO_ALTIVEC: 290 vec = BOOK3S_INTERRUPT_ALTIVEC; 291 break; 292 case BOOK3S_IRQPRIO_FP_UNAVAIL: 293 vec = BOOK3S_INTERRUPT_FP_UNAVAIL; 294 break; 295 case BOOK3S_IRQPRIO_SYSCALL: 296 vec = BOOK3S_INTERRUPT_SYSCALL; 297 break; 298 case BOOK3S_IRQPRIO_DEBUG: 299 vec = BOOK3S_INTERRUPT_TRACE; 300 break; 301 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: 302 vec = BOOK3S_INTERRUPT_PERFMON; 303 break; 304 case BOOK3S_IRQPRIO_FAC_UNAVAIL: 305 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; 306 break; 307 default: 308 deliver = 0; 309 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); 310 break; 311 } 312 313 #if 0 314 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); 315 #endif 316 317 if (deliver) 318 kvmppc_inject_interrupt(vcpu, vec, 0); 319 320 return deliver; 321 } 322 323 /* 324 * This function determines if an irqprio should be cleared once issued. 325 */ 326 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) 327 { 328 switch (priority) { 329 case BOOK3S_IRQPRIO_DECREMENTER: 330 /* DEC interrupts get cleared by mtdec */ 331 return false; 332 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 333 /* External interrupts get cleared by userspace */ 334 return false; 335 } 336 337 return true; 338 } 339 340 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 341 { 342 unsigned long *pending = &vcpu->arch.pending_exceptions; 343 unsigned long old_pending = vcpu->arch.pending_exceptions; 344 unsigned int priority; 345 346 #ifdef EXIT_DEBUG 347 if (vcpu->arch.pending_exceptions) 348 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 349 #endif 350 priority = __ffs(*pending); 351 while (priority < BOOK3S_IRQPRIO_MAX) { 352 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 353 clear_irqprio(vcpu, priority)) { 354 clear_bit(priority, &vcpu->arch.pending_exceptions); 355 break; 356 } 357 358 priority = find_next_bit(pending, 359 BITS_PER_BYTE * sizeof(*pending), 360 priority + 1); 361 } 362 363 /* Tell the guest about our interrupt status */ 364 kvmppc_update_int_pending(vcpu, *pending, old_pending); 365 366 return 0; 367 } 368 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 369 370 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 371 bool *writable) 372 { 373 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; 374 gfn_t gfn = gpa >> PAGE_SHIFT; 375 376 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 377 mp_pa = (uint32_t)mp_pa; 378 379 /* Magic page override */ 380 gpa &= ~0xFFFULL; 381 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { 382 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 383 kvm_pfn_t pfn; 384 385 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 386 get_page(pfn_to_page(pfn)); 387 if (writable) 388 *writable = true; 389 return pfn; 390 } 391 392 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 393 } 394 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); 395 396 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, 397 enum xlate_readwrite xlrw, struct kvmppc_pte *pte) 398 { 399 bool data = (xlid == XLATE_DATA); 400 bool iswrite = (xlrw == XLATE_WRITE); 401 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 402 int r; 403 404 if (relocated) { 405 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); 406 } else { 407 pte->eaddr = eaddr; 408 pte->raddr = eaddr & KVM_PAM; 409 pte->vpage = VSID_REAL | eaddr >> 12; 410 pte->may_read = true; 411 pte->may_write = true; 412 pte->may_execute = true; 413 r = 0; 414 415 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR && 416 !data) { 417 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && 418 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) 419 pte->raddr &= ~SPLIT_HACK_MASK; 420 } 421 } 422 423 return r; 424 } 425 426 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, 427 u32 *inst) 428 { 429 ulong pc = kvmppc_get_pc(vcpu); 430 int r; 431 432 if (type == INST_SC) 433 pc -= 4; 434 435 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); 436 if (r == EMULATE_DONE) 437 return r; 438 else 439 return EMULATE_AGAIN; 440 } 441 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); 442 443 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 444 { 445 return 0; 446 } 447 448 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) 449 { 450 return 0; 451 } 452 453 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 454 { 455 } 456 457 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 458 struct kvm_sregs *sregs) 459 { 460 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); 461 } 462 463 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 464 struct kvm_sregs *sregs) 465 { 466 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); 467 } 468 469 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 470 { 471 int i; 472 473 regs->pc = kvmppc_get_pc(vcpu); 474 regs->cr = kvmppc_get_cr(vcpu); 475 regs->ctr = kvmppc_get_ctr(vcpu); 476 regs->lr = kvmppc_get_lr(vcpu); 477 regs->xer = kvmppc_get_xer(vcpu); 478 regs->msr = kvmppc_get_msr(vcpu); 479 regs->srr0 = kvmppc_get_srr0(vcpu); 480 regs->srr1 = kvmppc_get_srr1(vcpu); 481 regs->pid = vcpu->arch.pid; 482 regs->sprg0 = kvmppc_get_sprg0(vcpu); 483 regs->sprg1 = kvmppc_get_sprg1(vcpu); 484 regs->sprg2 = kvmppc_get_sprg2(vcpu); 485 regs->sprg3 = kvmppc_get_sprg3(vcpu); 486 regs->sprg4 = kvmppc_get_sprg4(vcpu); 487 regs->sprg5 = kvmppc_get_sprg5(vcpu); 488 regs->sprg6 = kvmppc_get_sprg6(vcpu); 489 regs->sprg7 = kvmppc_get_sprg7(vcpu); 490 491 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 492 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 493 494 return 0; 495 } 496 497 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 498 { 499 int i; 500 501 kvmppc_set_pc(vcpu, regs->pc); 502 kvmppc_set_cr(vcpu, regs->cr); 503 kvmppc_set_ctr(vcpu, regs->ctr); 504 kvmppc_set_lr(vcpu, regs->lr); 505 kvmppc_set_xer(vcpu, regs->xer); 506 kvmppc_set_msr(vcpu, regs->msr); 507 kvmppc_set_srr0(vcpu, regs->srr0); 508 kvmppc_set_srr1(vcpu, regs->srr1); 509 kvmppc_set_sprg0(vcpu, regs->sprg0); 510 kvmppc_set_sprg1(vcpu, regs->sprg1); 511 kvmppc_set_sprg2(vcpu, regs->sprg2); 512 kvmppc_set_sprg3(vcpu, regs->sprg3); 513 kvmppc_set_sprg4(vcpu, regs->sprg4); 514 kvmppc_set_sprg5(vcpu, regs->sprg5); 515 kvmppc_set_sprg6(vcpu, regs->sprg6); 516 kvmppc_set_sprg7(vcpu, regs->sprg7); 517 518 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 519 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 520 521 return 0; 522 } 523 524 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 525 { 526 return -ENOTSUPP; 527 } 528 529 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 530 { 531 return -ENOTSUPP; 532 } 533 534 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 535 union kvmppc_one_reg *val) 536 { 537 int r = 0; 538 long int i; 539 540 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); 541 if (r == -EINVAL) { 542 r = 0; 543 switch (id) { 544 case KVM_REG_PPC_DAR: 545 *val = get_reg_val(id, kvmppc_get_dar(vcpu)); 546 break; 547 case KVM_REG_PPC_DSISR: 548 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu)); 549 break; 550 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 551 i = id - KVM_REG_PPC_FPR0; 552 *val = get_reg_val(id, VCPU_FPR(vcpu, i)); 553 break; 554 case KVM_REG_PPC_FPSCR: 555 *val = get_reg_val(id, vcpu->arch.fp.fpscr); 556 break; 557 #ifdef CONFIG_VSX 558 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 559 if (cpu_has_feature(CPU_FTR_VSX)) { 560 i = id - KVM_REG_PPC_VSR0; 561 val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; 562 val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; 563 } else { 564 r = -ENXIO; 565 } 566 break; 567 #endif /* CONFIG_VSX */ 568 case KVM_REG_PPC_DEBUG_INST: 569 *val = get_reg_val(id, INS_TW); 570 break; 571 #ifdef CONFIG_KVM_XICS 572 case KVM_REG_PPC_ICP_STATE: 573 if (!vcpu->arch.icp) { 574 r = -ENXIO; 575 break; 576 } 577 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); 578 break; 579 #endif /* CONFIG_KVM_XICS */ 580 case KVM_REG_PPC_FSCR: 581 *val = get_reg_val(id, vcpu->arch.fscr); 582 break; 583 case KVM_REG_PPC_TAR: 584 *val = get_reg_val(id, vcpu->arch.tar); 585 break; 586 case KVM_REG_PPC_EBBHR: 587 *val = get_reg_val(id, vcpu->arch.ebbhr); 588 break; 589 case KVM_REG_PPC_EBBRR: 590 *val = get_reg_val(id, vcpu->arch.ebbrr); 591 break; 592 case KVM_REG_PPC_BESCR: 593 *val = get_reg_val(id, vcpu->arch.bescr); 594 break; 595 case KVM_REG_PPC_VTB: 596 *val = get_reg_val(id, vcpu->arch.vtb); 597 break; 598 case KVM_REG_PPC_IC: 599 *val = get_reg_val(id, vcpu->arch.ic); 600 break; 601 default: 602 r = -EINVAL; 603 break; 604 } 605 } 606 607 return r; 608 } 609 610 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 611 union kvmppc_one_reg *val) 612 { 613 int r = 0; 614 long int i; 615 616 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); 617 if (r == -EINVAL) { 618 r = 0; 619 switch (id) { 620 case KVM_REG_PPC_DAR: 621 kvmppc_set_dar(vcpu, set_reg_val(id, *val)); 622 break; 623 case KVM_REG_PPC_DSISR: 624 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val)); 625 break; 626 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 627 i = id - KVM_REG_PPC_FPR0; 628 VCPU_FPR(vcpu, i) = set_reg_val(id, *val); 629 break; 630 case KVM_REG_PPC_FPSCR: 631 vcpu->arch.fp.fpscr = set_reg_val(id, *val); 632 break; 633 #ifdef CONFIG_VSX 634 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 635 if (cpu_has_feature(CPU_FTR_VSX)) { 636 i = id - KVM_REG_PPC_VSR0; 637 vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; 638 vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; 639 } else { 640 r = -ENXIO; 641 } 642 break; 643 #endif /* CONFIG_VSX */ 644 #ifdef CONFIG_KVM_XICS 645 case KVM_REG_PPC_ICP_STATE: 646 if (!vcpu->arch.icp) { 647 r = -ENXIO; 648 break; 649 } 650 r = kvmppc_xics_set_icp(vcpu, 651 set_reg_val(id, *val)); 652 break; 653 #endif /* CONFIG_KVM_XICS */ 654 case KVM_REG_PPC_FSCR: 655 vcpu->arch.fscr = set_reg_val(id, *val); 656 break; 657 case KVM_REG_PPC_TAR: 658 vcpu->arch.tar = set_reg_val(id, *val); 659 break; 660 case KVM_REG_PPC_EBBHR: 661 vcpu->arch.ebbhr = set_reg_val(id, *val); 662 break; 663 case KVM_REG_PPC_EBBRR: 664 vcpu->arch.ebbrr = set_reg_val(id, *val); 665 break; 666 case KVM_REG_PPC_BESCR: 667 vcpu->arch.bescr = set_reg_val(id, *val); 668 break; 669 case KVM_REG_PPC_VTB: 670 vcpu->arch.vtb = set_reg_val(id, *val); 671 break; 672 case KVM_REG_PPC_IC: 673 vcpu->arch.ic = set_reg_val(id, *val); 674 break; 675 default: 676 r = -EINVAL; 677 break; 678 } 679 } 680 681 return r; 682 } 683 684 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 685 { 686 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); 687 } 688 689 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 690 { 691 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); 692 } 693 694 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 695 { 696 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); 697 } 698 EXPORT_SYMBOL_GPL(kvmppc_set_msr); 699 700 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 701 { 702 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); 703 } 704 705 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 706 struct kvm_translation *tr) 707 { 708 return 0; 709 } 710 711 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 712 struct kvm_guest_debug *dbg) 713 { 714 vcpu->guest_debug = dbg->control; 715 return 0; 716 } 717 718 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu) 719 { 720 kvmppc_core_queue_dec(vcpu); 721 kvm_vcpu_kick(vcpu); 722 } 723 724 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 725 { 726 return kvm->arch.kvm_ops->vcpu_create(kvm, id); 727 } 728 729 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 730 { 731 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); 732 } 733 734 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 735 { 736 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); 737 } 738 739 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 740 { 741 return kvm->arch.kvm_ops->get_dirty_log(kvm, log); 742 } 743 744 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 745 struct kvm_memory_slot *dont) 746 { 747 kvm->arch.kvm_ops->free_memslot(free, dont); 748 } 749 750 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 751 unsigned long npages) 752 { 753 return kvm->arch.kvm_ops->create_memslot(slot, npages); 754 } 755 756 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 757 { 758 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); 759 } 760 761 int kvmppc_core_prepare_memory_region(struct kvm *kvm, 762 struct kvm_memory_slot *memslot, 763 const struct kvm_userspace_memory_region *mem) 764 { 765 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); 766 } 767 768 void kvmppc_core_commit_memory_region(struct kvm *kvm, 769 const struct kvm_userspace_memory_region *mem, 770 const struct kvm_memory_slot *old, 771 const struct kvm_memory_slot *new) 772 { 773 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new); 774 } 775 776 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 777 { 778 return kvm->arch.kvm_ops->unmap_hva(kvm, hva); 779 } 780 EXPORT_SYMBOL_GPL(kvm_unmap_hva); 781 782 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 783 { 784 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); 785 } 786 787 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) 788 { 789 return kvm->arch.kvm_ops->age_hva(kvm, start, end); 790 } 791 792 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 793 { 794 return kvm->arch.kvm_ops->test_age_hva(kvm, hva); 795 } 796 797 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 798 { 799 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); 800 } 801 802 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 803 { 804 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); 805 } 806 807 int kvmppc_core_init_vm(struct kvm *kvm) 808 { 809 810 #ifdef CONFIG_PPC64 811 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); 812 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); 813 #endif 814 815 return kvm->arch.kvm_ops->init_vm(kvm); 816 } 817 818 void kvmppc_core_destroy_vm(struct kvm *kvm) 819 { 820 kvm->arch.kvm_ops->destroy_vm(kvm); 821 822 #ifdef CONFIG_PPC64 823 kvmppc_rtas_tokens_free(kvm); 824 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 825 #endif 826 } 827 828 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) 829 { 830 unsigned long size = kvmppc_get_gpr(vcpu, 4); 831 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 832 u64 buf; 833 int srcu_idx; 834 int ret; 835 836 if (!is_power_of_2(size) || (size > sizeof(buf))) 837 return H_TOO_HARD; 838 839 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 840 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); 841 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 842 if (ret != 0) 843 return H_TOO_HARD; 844 845 switch (size) { 846 case 1: 847 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf); 848 break; 849 850 case 2: 851 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf)); 852 break; 853 854 case 4: 855 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf)); 856 break; 857 858 case 8: 859 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf)); 860 break; 861 862 default: 863 BUG(); 864 } 865 866 return H_SUCCESS; 867 } 868 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load); 869 870 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) 871 { 872 unsigned long size = kvmppc_get_gpr(vcpu, 4); 873 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 874 unsigned long val = kvmppc_get_gpr(vcpu, 6); 875 u64 buf; 876 int srcu_idx; 877 int ret; 878 879 switch (size) { 880 case 1: 881 *(u8 *)&buf = val; 882 break; 883 884 case 2: 885 *(__be16 *)&buf = cpu_to_be16(val); 886 break; 887 888 case 4: 889 *(__be32 *)&buf = cpu_to_be32(val); 890 break; 891 892 case 8: 893 *(__be64 *)&buf = cpu_to_be64(val); 894 break; 895 896 default: 897 return H_TOO_HARD; 898 } 899 900 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 901 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); 902 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 903 if (ret != 0) 904 return H_TOO_HARD; 905 906 return H_SUCCESS; 907 } 908 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store); 909 910 int kvmppc_core_check_processor_compat(void) 911 { 912 /* 913 * We always return 0 for book3s. We check 914 * for compatibility while loading the HV 915 * or PR module 916 */ 917 return 0; 918 } 919 920 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) 921 { 922 return kvm->arch.kvm_ops->hcall_implemented(hcall); 923 } 924 925 static int kvmppc_book3s_init(void) 926 { 927 int r; 928 929 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 930 if (r) 931 return r; 932 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 933 r = kvmppc_book3s_init_pr(); 934 #endif 935 return r; 936 937 } 938 939 static void kvmppc_book3s_exit(void) 940 { 941 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 942 kvmppc_book3s_exit_pr(); 943 #endif 944 kvm_exit(); 945 } 946 947 module_init(kvmppc_book3s_init); 948 module_exit(kvmppc_book3s_exit); 949 950 /* On 32bit this is our one and only kernel module */ 951 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 952 MODULE_ALIAS_MISCDEV(KVM_MINOR); 953 MODULE_ALIAS("devname:kvm"); 954 #endif 955