1 /* 2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * Kevin Wolf <mail@kevin-wolf.de> 7 * 8 * Description: 9 * This file is derived from arch/powerpc/kvm/44x.c, 10 * by Hollis Blanchard <hollisb@us.ibm.com>. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License, version 2, as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/kvm_host.h> 18 #include <linux/err.h> 19 #include <linux/export.h> 20 #include <linux/slab.h> 21 #include <linux/module.h> 22 #include <linux/miscdevice.h> 23 24 #include <asm/reg.h> 25 #include <asm/cputable.h> 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 #include <asm/uaccess.h> 29 #include <asm/io.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/kvm_book3s.h> 32 #include <asm/mmu_context.h> 33 #include <asm/page.h> 34 #include <linux/gfp.h> 35 #include <linux/sched.h> 36 #include <linux/vmalloc.h> 37 #include <linux/highmem.h> 38 39 #include "book3s.h" 40 #include "trace.h" 41 42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 43 44 /* #define EXIT_DEBUG */ 45 46 struct kvm_stats_debugfs_item debugfs_entries[] = { 47 { "exits", VCPU_STAT(sum_exits) }, 48 { "mmio", VCPU_STAT(mmio_exits) }, 49 { "sig", VCPU_STAT(signal_exits) }, 50 { "sysc", VCPU_STAT(syscall_exits) }, 51 { "inst_emu", VCPU_STAT(emulated_inst_exits) }, 52 { "dec", VCPU_STAT(dec_exits) }, 53 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 54 { "queue_intr", VCPU_STAT(queue_intr) }, 55 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 56 { "pf_storage", VCPU_STAT(pf_storage) }, 57 { "sp_storage", VCPU_STAT(sp_storage) }, 58 { "pf_instruc", VCPU_STAT(pf_instruc) }, 59 { "sp_instruc", VCPU_STAT(sp_instruc) }, 60 { "ld", VCPU_STAT(ld) }, 61 { "ld_slow", VCPU_STAT(ld_slow) }, 62 { "st", VCPU_STAT(st) }, 63 { "st_slow", VCPU_STAT(st_slow) }, 64 { NULL } 65 }; 66 67 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) 68 { 69 } 70 71 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) 72 { 73 } 74 75 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) 76 { 77 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { 78 ulong pc = kvmppc_get_pc(vcpu); 79 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) 80 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); 81 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; 82 } 83 } 84 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real); 85 86 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 87 { 88 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 89 return to_book3s(vcpu)->hior; 90 return 0; 91 } 92 93 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, 94 unsigned long pending_now, unsigned long old_pending) 95 { 96 if (is_kvmppc_hv_enabled(vcpu->kvm)) 97 return; 98 if (pending_now) 99 kvmppc_set_int_pending(vcpu, 1); 100 else if (old_pending) 101 kvmppc_set_int_pending(vcpu, 0); 102 } 103 104 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 105 { 106 ulong crit_raw; 107 ulong crit_r1; 108 bool crit; 109 110 if (is_kvmppc_hv_enabled(vcpu->kvm)) 111 return false; 112 113 crit_raw = kvmppc_get_critical(vcpu); 114 crit_r1 = kvmppc_get_gpr(vcpu, 1); 115 116 /* Truncate crit indicators in 32 bit mode */ 117 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 118 crit_raw &= 0xffffffff; 119 crit_r1 &= 0xffffffff; 120 } 121 122 /* Critical section when crit == r1 */ 123 crit = (crit_raw == crit_r1); 124 /* ... and we're in supervisor mode */ 125 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); 126 127 return crit; 128 } 129 130 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 131 { 132 kvmppc_unfixup_split_real(vcpu); 133 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); 134 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); 135 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 136 vcpu->arch.mmu.reset_msr(vcpu); 137 } 138 139 static int kvmppc_book3s_vec2irqprio(unsigned int vec) 140 { 141 unsigned int prio; 142 143 switch (vec) { 144 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; 145 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; 146 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; 147 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; 148 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 149 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 150 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 151 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; 152 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 153 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 154 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 155 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; 156 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; 157 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; 158 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; 159 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; 160 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; 161 default: prio = BOOK3S_IRQPRIO_MAX; break; 162 } 163 164 return prio; 165 } 166 167 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 168 unsigned int vec) 169 { 170 unsigned long old_pending = vcpu->arch.pending_exceptions; 171 172 clear_bit(kvmppc_book3s_vec2irqprio(vec), 173 &vcpu->arch.pending_exceptions); 174 175 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, 176 old_pending); 177 } 178 179 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 180 { 181 vcpu->stat.queue_intr++; 182 183 set_bit(kvmppc_book3s_vec2irqprio(vec), 184 &vcpu->arch.pending_exceptions); 185 #ifdef EXIT_DEBUG 186 printk(KERN_INFO "Queueing interrupt %x\n", vec); 187 #endif 188 } 189 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); 190 191 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 192 { 193 /* might as well deliver this straight away */ 194 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); 195 } 196 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); 197 198 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 199 { 200 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 201 } 202 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); 203 204 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 205 { 206 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 207 } 208 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); 209 210 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 211 { 212 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 213 } 214 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); 215 216 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 217 struct kvm_interrupt *irq) 218 { 219 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; 220 221 if (irq->irq == KVM_INTERRUPT_SET_LEVEL) 222 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; 223 224 kvmppc_book3s_queue_irqprio(vcpu, vec); 225 } 226 227 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) 228 { 229 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 230 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 231 } 232 233 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, 234 ulong flags) 235 { 236 kvmppc_set_dar(vcpu, dar); 237 kvmppc_set_dsisr(vcpu, flags); 238 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 239 } 240 241 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) 242 { 243 u64 msr = kvmppc_get_msr(vcpu); 244 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); 245 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); 246 kvmppc_set_msr_fast(vcpu, msr); 247 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 248 } 249 250 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 251 { 252 int deliver = 1; 253 int vec = 0; 254 bool crit = kvmppc_critical_section(vcpu); 255 256 switch (priority) { 257 case BOOK3S_IRQPRIO_DECREMENTER: 258 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 259 vec = BOOK3S_INTERRUPT_DECREMENTER; 260 break; 261 case BOOK3S_IRQPRIO_EXTERNAL: 262 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 263 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 264 vec = BOOK3S_INTERRUPT_EXTERNAL; 265 break; 266 case BOOK3S_IRQPRIO_SYSTEM_RESET: 267 vec = BOOK3S_INTERRUPT_SYSTEM_RESET; 268 break; 269 case BOOK3S_IRQPRIO_MACHINE_CHECK: 270 vec = BOOK3S_INTERRUPT_MACHINE_CHECK; 271 break; 272 case BOOK3S_IRQPRIO_DATA_STORAGE: 273 vec = BOOK3S_INTERRUPT_DATA_STORAGE; 274 break; 275 case BOOK3S_IRQPRIO_INST_STORAGE: 276 vec = BOOK3S_INTERRUPT_INST_STORAGE; 277 break; 278 case BOOK3S_IRQPRIO_DATA_SEGMENT: 279 vec = BOOK3S_INTERRUPT_DATA_SEGMENT; 280 break; 281 case BOOK3S_IRQPRIO_INST_SEGMENT: 282 vec = BOOK3S_INTERRUPT_INST_SEGMENT; 283 break; 284 case BOOK3S_IRQPRIO_ALIGNMENT: 285 vec = BOOK3S_INTERRUPT_ALIGNMENT; 286 break; 287 case BOOK3S_IRQPRIO_PROGRAM: 288 vec = BOOK3S_INTERRUPT_PROGRAM; 289 break; 290 case BOOK3S_IRQPRIO_VSX: 291 vec = BOOK3S_INTERRUPT_VSX; 292 break; 293 case BOOK3S_IRQPRIO_ALTIVEC: 294 vec = BOOK3S_INTERRUPT_ALTIVEC; 295 break; 296 case BOOK3S_IRQPRIO_FP_UNAVAIL: 297 vec = BOOK3S_INTERRUPT_FP_UNAVAIL; 298 break; 299 case BOOK3S_IRQPRIO_SYSCALL: 300 vec = BOOK3S_INTERRUPT_SYSCALL; 301 break; 302 case BOOK3S_IRQPRIO_DEBUG: 303 vec = BOOK3S_INTERRUPT_TRACE; 304 break; 305 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: 306 vec = BOOK3S_INTERRUPT_PERFMON; 307 break; 308 case BOOK3S_IRQPRIO_FAC_UNAVAIL: 309 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; 310 break; 311 default: 312 deliver = 0; 313 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); 314 break; 315 } 316 317 #if 0 318 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); 319 #endif 320 321 if (deliver) 322 kvmppc_inject_interrupt(vcpu, vec, 0); 323 324 return deliver; 325 } 326 327 /* 328 * This function determines if an irqprio should be cleared once issued. 329 */ 330 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) 331 { 332 switch (priority) { 333 case BOOK3S_IRQPRIO_DECREMENTER: 334 /* DEC interrupts get cleared by mtdec */ 335 return false; 336 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 337 /* External interrupts get cleared by userspace */ 338 return false; 339 } 340 341 return true; 342 } 343 344 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 345 { 346 unsigned long *pending = &vcpu->arch.pending_exceptions; 347 unsigned long old_pending = vcpu->arch.pending_exceptions; 348 unsigned int priority; 349 350 #ifdef EXIT_DEBUG 351 if (vcpu->arch.pending_exceptions) 352 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 353 #endif 354 priority = __ffs(*pending); 355 while (priority < BOOK3S_IRQPRIO_MAX) { 356 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 357 clear_irqprio(vcpu, priority)) { 358 clear_bit(priority, &vcpu->arch.pending_exceptions); 359 break; 360 } 361 362 priority = find_next_bit(pending, 363 BITS_PER_BYTE * sizeof(*pending), 364 priority + 1); 365 } 366 367 /* Tell the guest about our interrupt status */ 368 kvmppc_update_int_pending(vcpu, *pending, old_pending); 369 370 return 0; 371 } 372 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 373 374 pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 375 bool *writable) 376 { 377 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; 378 gfn_t gfn = gpa >> PAGE_SHIFT; 379 380 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 381 mp_pa = (uint32_t)mp_pa; 382 383 /* Magic page override */ 384 gpa &= ~0xFFFULL; 385 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { 386 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 387 pfn_t pfn; 388 389 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 390 get_page(pfn_to_page(pfn)); 391 if (writable) 392 *writable = true; 393 return pfn; 394 } 395 396 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 397 } 398 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); 399 400 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, 401 enum xlate_readwrite xlrw, struct kvmppc_pte *pte) 402 { 403 bool data = (xlid == XLATE_DATA); 404 bool iswrite = (xlrw == XLATE_WRITE); 405 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 406 int r; 407 408 if (relocated) { 409 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); 410 } else { 411 pte->eaddr = eaddr; 412 pte->raddr = eaddr & KVM_PAM; 413 pte->vpage = VSID_REAL | eaddr >> 12; 414 pte->may_read = true; 415 pte->may_write = true; 416 pte->may_execute = true; 417 r = 0; 418 419 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR && 420 !data) { 421 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && 422 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) 423 pte->raddr &= ~SPLIT_HACK_MASK; 424 } 425 } 426 427 return r; 428 } 429 430 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, 431 u32 *inst) 432 { 433 ulong pc = kvmppc_get_pc(vcpu); 434 int r; 435 436 if (type == INST_SC) 437 pc -= 4; 438 439 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); 440 if (r == EMULATE_DONE) 441 return r; 442 else 443 return EMULATE_AGAIN; 444 } 445 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); 446 447 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 448 { 449 return 0; 450 } 451 452 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) 453 { 454 return 0; 455 } 456 457 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 458 { 459 } 460 461 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 462 struct kvm_sregs *sregs) 463 { 464 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); 465 } 466 467 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 468 struct kvm_sregs *sregs) 469 { 470 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); 471 } 472 473 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 474 { 475 int i; 476 477 regs->pc = kvmppc_get_pc(vcpu); 478 regs->cr = kvmppc_get_cr(vcpu); 479 regs->ctr = kvmppc_get_ctr(vcpu); 480 regs->lr = kvmppc_get_lr(vcpu); 481 regs->xer = kvmppc_get_xer(vcpu); 482 regs->msr = kvmppc_get_msr(vcpu); 483 regs->srr0 = kvmppc_get_srr0(vcpu); 484 regs->srr1 = kvmppc_get_srr1(vcpu); 485 regs->pid = vcpu->arch.pid; 486 regs->sprg0 = kvmppc_get_sprg0(vcpu); 487 regs->sprg1 = kvmppc_get_sprg1(vcpu); 488 regs->sprg2 = kvmppc_get_sprg2(vcpu); 489 regs->sprg3 = kvmppc_get_sprg3(vcpu); 490 regs->sprg4 = kvmppc_get_sprg4(vcpu); 491 regs->sprg5 = kvmppc_get_sprg5(vcpu); 492 regs->sprg6 = kvmppc_get_sprg6(vcpu); 493 regs->sprg7 = kvmppc_get_sprg7(vcpu); 494 495 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 496 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 497 498 return 0; 499 } 500 501 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 502 { 503 int i; 504 505 kvmppc_set_pc(vcpu, regs->pc); 506 kvmppc_set_cr(vcpu, regs->cr); 507 kvmppc_set_ctr(vcpu, regs->ctr); 508 kvmppc_set_lr(vcpu, regs->lr); 509 kvmppc_set_xer(vcpu, regs->xer); 510 kvmppc_set_msr(vcpu, regs->msr); 511 kvmppc_set_srr0(vcpu, regs->srr0); 512 kvmppc_set_srr1(vcpu, regs->srr1); 513 kvmppc_set_sprg0(vcpu, regs->sprg0); 514 kvmppc_set_sprg1(vcpu, regs->sprg1); 515 kvmppc_set_sprg2(vcpu, regs->sprg2); 516 kvmppc_set_sprg3(vcpu, regs->sprg3); 517 kvmppc_set_sprg4(vcpu, regs->sprg4); 518 kvmppc_set_sprg5(vcpu, regs->sprg5); 519 kvmppc_set_sprg6(vcpu, regs->sprg6); 520 kvmppc_set_sprg7(vcpu, regs->sprg7); 521 522 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 523 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 524 525 return 0; 526 } 527 528 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 529 { 530 return -ENOTSUPP; 531 } 532 533 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 534 { 535 return -ENOTSUPP; 536 } 537 538 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 539 { 540 int r; 541 union kvmppc_one_reg val; 542 int size; 543 long int i; 544 545 size = one_reg_size(reg->id); 546 if (size > sizeof(val)) 547 return -EINVAL; 548 549 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); 550 if (r == -EINVAL) { 551 r = 0; 552 switch (reg->id) { 553 case KVM_REG_PPC_DAR: 554 val = get_reg_val(reg->id, kvmppc_get_dar(vcpu)); 555 break; 556 case KVM_REG_PPC_DSISR: 557 val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu)); 558 break; 559 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 560 i = reg->id - KVM_REG_PPC_FPR0; 561 val = get_reg_val(reg->id, VCPU_FPR(vcpu, i)); 562 break; 563 case KVM_REG_PPC_FPSCR: 564 val = get_reg_val(reg->id, vcpu->arch.fp.fpscr); 565 break; 566 #ifdef CONFIG_ALTIVEC 567 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 568 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 569 r = -ENXIO; 570 break; 571 } 572 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 573 break; 574 case KVM_REG_PPC_VSCR: 575 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 576 r = -ENXIO; 577 break; 578 } 579 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 580 break; 581 case KVM_REG_PPC_VRSAVE: 582 val = get_reg_val(reg->id, vcpu->arch.vrsave); 583 break; 584 #endif /* CONFIG_ALTIVEC */ 585 #ifdef CONFIG_VSX 586 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 587 if (cpu_has_feature(CPU_FTR_VSX)) { 588 long int i = reg->id - KVM_REG_PPC_VSR0; 589 val.vsxval[0] = vcpu->arch.fp.fpr[i][0]; 590 val.vsxval[1] = vcpu->arch.fp.fpr[i][1]; 591 } else { 592 r = -ENXIO; 593 } 594 break; 595 #endif /* CONFIG_VSX */ 596 case KVM_REG_PPC_DEBUG_INST: { 597 u32 opcode = INS_TW; 598 r = copy_to_user((u32 __user *)(long)reg->addr, 599 &opcode, sizeof(u32)); 600 break; 601 } 602 #ifdef CONFIG_KVM_XICS 603 case KVM_REG_PPC_ICP_STATE: 604 if (!vcpu->arch.icp) { 605 r = -ENXIO; 606 break; 607 } 608 val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu)); 609 break; 610 #endif /* CONFIG_KVM_XICS */ 611 case KVM_REG_PPC_FSCR: 612 val = get_reg_val(reg->id, vcpu->arch.fscr); 613 break; 614 case KVM_REG_PPC_TAR: 615 val = get_reg_val(reg->id, vcpu->arch.tar); 616 break; 617 case KVM_REG_PPC_EBBHR: 618 val = get_reg_val(reg->id, vcpu->arch.ebbhr); 619 break; 620 case KVM_REG_PPC_EBBRR: 621 val = get_reg_val(reg->id, vcpu->arch.ebbrr); 622 break; 623 case KVM_REG_PPC_BESCR: 624 val = get_reg_val(reg->id, vcpu->arch.bescr); 625 break; 626 case KVM_REG_PPC_VTB: 627 val = get_reg_val(reg->id, vcpu->arch.vtb); 628 break; 629 case KVM_REG_PPC_IC: 630 val = get_reg_val(reg->id, vcpu->arch.ic); 631 break; 632 default: 633 r = -EINVAL; 634 break; 635 } 636 } 637 if (r) 638 return r; 639 640 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 641 r = -EFAULT; 642 643 return r; 644 } 645 646 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 647 { 648 int r; 649 union kvmppc_one_reg val; 650 int size; 651 long int i; 652 653 size = one_reg_size(reg->id); 654 if (size > sizeof(val)) 655 return -EINVAL; 656 657 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 658 return -EFAULT; 659 660 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); 661 if (r == -EINVAL) { 662 r = 0; 663 switch (reg->id) { 664 case KVM_REG_PPC_DAR: 665 kvmppc_set_dar(vcpu, set_reg_val(reg->id, val)); 666 break; 667 case KVM_REG_PPC_DSISR: 668 kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val)); 669 break; 670 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 671 i = reg->id - KVM_REG_PPC_FPR0; 672 VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val); 673 break; 674 case KVM_REG_PPC_FPSCR: 675 vcpu->arch.fp.fpscr = set_reg_val(reg->id, val); 676 break; 677 #ifdef CONFIG_ALTIVEC 678 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 679 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 680 r = -ENXIO; 681 break; 682 } 683 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 684 break; 685 case KVM_REG_PPC_VSCR: 686 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 687 r = -ENXIO; 688 break; 689 } 690 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 691 break; 692 case KVM_REG_PPC_VRSAVE: 693 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 694 r = -ENXIO; 695 break; 696 } 697 vcpu->arch.vrsave = set_reg_val(reg->id, val); 698 break; 699 #endif /* CONFIG_ALTIVEC */ 700 #ifdef CONFIG_VSX 701 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 702 if (cpu_has_feature(CPU_FTR_VSX)) { 703 long int i = reg->id - KVM_REG_PPC_VSR0; 704 vcpu->arch.fp.fpr[i][0] = val.vsxval[0]; 705 vcpu->arch.fp.fpr[i][1] = val.vsxval[1]; 706 } else { 707 r = -ENXIO; 708 } 709 break; 710 #endif /* CONFIG_VSX */ 711 #ifdef CONFIG_KVM_XICS 712 case KVM_REG_PPC_ICP_STATE: 713 if (!vcpu->arch.icp) { 714 r = -ENXIO; 715 break; 716 } 717 r = kvmppc_xics_set_icp(vcpu, 718 set_reg_val(reg->id, val)); 719 break; 720 #endif /* CONFIG_KVM_XICS */ 721 case KVM_REG_PPC_FSCR: 722 vcpu->arch.fscr = set_reg_val(reg->id, val); 723 break; 724 case KVM_REG_PPC_TAR: 725 vcpu->arch.tar = set_reg_val(reg->id, val); 726 break; 727 case KVM_REG_PPC_EBBHR: 728 vcpu->arch.ebbhr = set_reg_val(reg->id, val); 729 break; 730 case KVM_REG_PPC_EBBRR: 731 vcpu->arch.ebbrr = set_reg_val(reg->id, val); 732 break; 733 case KVM_REG_PPC_BESCR: 734 vcpu->arch.bescr = set_reg_val(reg->id, val); 735 break; 736 case KVM_REG_PPC_VTB: 737 vcpu->arch.vtb = set_reg_val(reg->id, val); 738 break; 739 case KVM_REG_PPC_IC: 740 vcpu->arch.ic = set_reg_val(reg->id, val); 741 break; 742 default: 743 r = -EINVAL; 744 break; 745 } 746 } 747 748 return r; 749 } 750 751 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 752 { 753 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); 754 } 755 756 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 757 { 758 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); 759 } 760 761 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 762 { 763 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); 764 } 765 EXPORT_SYMBOL_GPL(kvmppc_set_msr); 766 767 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 768 { 769 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); 770 } 771 772 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 773 struct kvm_translation *tr) 774 { 775 return 0; 776 } 777 778 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 779 struct kvm_guest_debug *dbg) 780 { 781 return -EINVAL; 782 } 783 784 void kvmppc_decrementer_func(unsigned long data) 785 { 786 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 787 788 kvmppc_core_queue_dec(vcpu); 789 kvm_vcpu_kick(vcpu); 790 } 791 792 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 793 { 794 return kvm->arch.kvm_ops->vcpu_create(kvm, id); 795 } 796 797 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 798 { 799 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); 800 } 801 802 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 803 { 804 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); 805 } 806 807 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 808 { 809 return kvm->arch.kvm_ops->get_dirty_log(kvm, log); 810 } 811 812 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 813 struct kvm_memory_slot *dont) 814 { 815 kvm->arch.kvm_ops->free_memslot(free, dont); 816 } 817 818 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 819 unsigned long npages) 820 { 821 return kvm->arch.kvm_ops->create_memslot(slot, npages); 822 } 823 824 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 825 { 826 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); 827 } 828 829 int kvmppc_core_prepare_memory_region(struct kvm *kvm, 830 struct kvm_memory_slot *memslot, 831 struct kvm_userspace_memory_region *mem) 832 { 833 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); 834 } 835 836 void kvmppc_core_commit_memory_region(struct kvm *kvm, 837 struct kvm_userspace_memory_region *mem, 838 const struct kvm_memory_slot *old) 839 { 840 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old); 841 } 842 843 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 844 { 845 return kvm->arch.kvm_ops->unmap_hva(kvm, hva); 846 } 847 EXPORT_SYMBOL_GPL(kvm_unmap_hva); 848 849 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 850 { 851 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); 852 } 853 854 int kvm_age_hva(struct kvm *kvm, unsigned long hva) 855 { 856 return kvm->arch.kvm_ops->age_hva(kvm, hva); 857 } 858 859 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 860 { 861 return kvm->arch.kvm_ops->test_age_hva(kvm, hva); 862 } 863 864 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 865 { 866 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); 867 } 868 869 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 870 { 871 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); 872 } 873 874 int kvmppc_core_init_vm(struct kvm *kvm) 875 { 876 877 #ifdef CONFIG_PPC64 878 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); 879 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); 880 #endif 881 882 return kvm->arch.kvm_ops->init_vm(kvm); 883 } 884 885 void kvmppc_core_destroy_vm(struct kvm *kvm) 886 { 887 kvm->arch.kvm_ops->destroy_vm(kvm); 888 889 #ifdef CONFIG_PPC64 890 kvmppc_rtas_tokens_free(kvm); 891 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 892 #endif 893 } 894 895 int kvmppc_core_check_processor_compat(void) 896 { 897 /* 898 * We always return 0 for book3s. We check 899 * for compatability while loading the HV 900 * or PR module 901 */ 902 return 0; 903 } 904 905 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) 906 { 907 return kvm->arch.kvm_ops->hcall_implemented(hcall); 908 } 909 910 static int kvmppc_book3s_init(void) 911 { 912 int r; 913 914 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 915 if (r) 916 return r; 917 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 918 r = kvmppc_book3s_init_pr(); 919 #endif 920 return r; 921 922 } 923 924 static void kvmppc_book3s_exit(void) 925 { 926 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 927 kvmppc_book3s_exit_pr(); 928 #endif 929 kvm_exit(); 930 } 931 932 module_init(kvmppc_book3s_init); 933 module_exit(kvmppc_book3s_exit); 934 935 /* On 32bit this is our one and only kernel module */ 936 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 937 MODULE_ALIAS_MISCDEV(KVM_MINOR); 938 MODULE_ALIAS("devname:kvm"); 939 #endif 940