1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 4 * 5 * Authors: 6 * Alexander Graf <agraf@suse.de> 7 * Kevin Wolf <mail@kevin-wolf.de> 8 * 9 * Description: 10 * This file is derived from arch/powerpc/kvm/44x.c, 11 * by Hollis Blanchard <hollisb@us.ibm.com>. 12 */ 13 14 #include <linux/kvm_host.h> 15 #include <linux/err.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/module.h> 19 #include <linux/miscdevice.h> 20 #include <linux/gfp.h> 21 #include <linux/sched.h> 22 #include <linux/vmalloc.h> 23 #include <linux/highmem.h> 24 25 #include <asm/reg.h> 26 #include <asm/cputable.h> 27 #include <asm/cacheflush.h> 28 #include <linux/uaccess.h> 29 #include <asm/io.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/kvm_book3s.h> 32 #include <asm/mmu_context.h> 33 #include <asm/page.h> 34 #include <asm/xive.h> 35 36 #include "book3s.h" 37 #include "trace.h" 38 39 /* #define EXIT_DEBUG */ 40 41 struct kvm_stats_debugfs_item debugfs_entries[] = { 42 VCPU_STAT("exits", sum_exits), 43 VCPU_STAT("mmio", mmio_exits), 44 VCPU_STAT("sig", signal_exits), 45 VCPU_STAT("sysc", syscall_exits), 46 VCPU_STAT("inst_emu", emulated_inst_exits), 47 VCPU_STAT("dec", dec_exits), 48 VCPU_STAT("ext_intr", ext_intr_exits), 49 VCPU_STAT("queue_intr", queue_intr), 50 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), 51 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), 52 VCPU_STAT("halt_wait_ns", halt_wait_ns), 53 VCPU_STAT("halt_successful_poll", halt_successful_poll), 54 VCPU_STAT("halt_attempted_poll", halt_attempted_poll), 55 VCPU_STAT("halt_successful_wait", halt_successful_wait), 56 VCPU_STAT("halt_poll_invalid", halt_poll_invalid), 57 VCPU_STAT("halt_wakeup", halt_wakeup), 58 VCPU_STAT("pf_storage", pf_storage), 59 VCPU_STAT("sp_storage", sp_storage), 60 VCPU_STAT("pf_instruc", pf_instruc), 61 VCPU_STAT("sp_instruc", sp_instruc), 62 VCPU_STAT("ld", ld), 63 VCPU_STAT("ld_slow", ld_slow), 64 VCPU_STAT("st", st), 65 VCPU_STAT("st_slow", st_slow), 66 VCPU_STAT("pthru_all", pthru_all), 67 VCPU_STAT("pthru_host", pthru_host), 68 VCPU_STAT("pthru_bad_aff", pthru_bad_aff), 69 VM_STAT("largepages_2M", num_2M_pages, .mode = 0444), 70 VM_STAT("largepages_1G", num_1G_pages, .mode = 0444), 71 { NULL } 72 }; 73 74 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, 75 unsigned long pending_now, unsigned long old_pending) 76 { 77 if (is_kvmppc_hv_enabled(vcpu->kvm)) 78 return; 79 if (pending_now) 80 kvmppc_set_int_pending(vcpu, 1); 81 else if (old_pending) 82 kvmppc_set_int_pending(vcpu, 0); 83 } 84 85 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 86 { 87 ulong crit_raw; 88 ulong crit_r1; 89 bool crit; 90 91 if (is_kvmppc_hv_enabled(vcpu->kvm)) 92 return false; 93 94 crit_raw = kvmppc_get_critical(vcpu); 95 crit_r1 = kvmppc_get_gpr(vcpu, 1); 96 97 /* Truncate crit indicators in 32 bit mode */ 98 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 99 crit_raw &= 0xffffffff; 100 crit_r1 &= 0xffffffff; 101 } 102 103 /* Critical section when crit == r1 */ 104 crit = (crit_raw == crit_r1); 105 /* ... and we're in supervisor mode */ 106 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); 107 108 return crit; 109 } 110 111 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 112 { 113 vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags); 114 } 115 116 static int kvmppc_book3s_vec2irqprio(unsigned int vec) 117 { 118 unsigned int prio; 119 120 switch (vec) { 121 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; 122 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; 123 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; 124 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; 125 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 126 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 127 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 128 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 129 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 130 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 131 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; 132 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; 133 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; 134 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; 135 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; 136 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; 137 default: prio = BOOK3S_IRQPRIO_MAX; break; 138 } 139 140 return prio; 141 } 142 143 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 144 unsigned int vec) 145 { 146 unsigned long old_pending = vcpu->arch.pending_exceptions; 147 148 clear_bit(kvmppc_book3s_vec2irqprio(vec), 149 &vcpu->arch.pending_exceptions); 150 151 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, 152 old_pending); 153 } 154 155 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 156 { 157 vcpu->stat.queue_intr++; 158 159 set_bit(kvmppc_book3s_vec2irqprio(vec), 160 &vcpu->arch.pending_exceptions); 161 #ifdef EXIT_DEBUG 162 printk(KERN_INFO "Queueing interrupt %x\n", vec); 163 #endif 164 } 165 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); 166 167 void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags) 168 { 169 /* might as well deliver this straight away */ 170 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags); 171 } 172 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); 173 174 void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu) 175 { 176 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0); 177 } 178 EXPORT_SYMBOL(kvmppc_core_queue_syscall); 179 180 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 181 { 182 /* might as well deliver this straight away */ 183 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); 184 } 185 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); 186 187 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) 188 { 189 /* might as well deliver this straight away */ 190 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0); 191 } 192 193 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) 194 { 195 /* might as well deliver this straight away */ 196 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0); 197 } 198 199 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu) 200 { 201 /* might as well deliver this straight away */ 202 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0); 203 } 204 205 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 206 { 207 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 208 } 209 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); 210 211 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 212 { 213 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 214 } 215 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); 216 217 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 218 { 219 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 220 } 221 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); 222 223 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 224 struct kvm_interrupt *irq) 225 { 226 /* 227 * This case (KVM_INTERRUPT_SET) should never actually arise for 228 * a pseries guest (because pseries guests expect their interrupt 229 * controllers to continue asserting an external interrupt request 230 * until it is acknowledged at the interrupt controller), but is 231 * included to avoid ABI breakage and potentially for other 232 * sorts of guest. 233 * 234 * There is a subtlety here: HV KVM does not test the 235 * external_oneshot flag in the code that synthesizes 236 * external interrupts for the guest just before entering 237 * the guest. That is OK even if userspace did do a 238 * KVM_INTERRUPT_SET on a pseries guest vcpu, because the 239 * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick() 240 * which ends up doing a smp_send_reschedule(), which will 241 * pull the guest all the way out to the host, meaning that 242 * we will call kvmppc_core_prepare_to_enter() before entering 243 * the guest again, and that will handle the external_oneshot 244 * flag correctly. 245 */ 246 if (irq->irq == KVM_INTERRUPT_SET) 247 vcpu->arch.external_oneshot = 1; 248 249 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 250 } 251 252 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) 253 { 254 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 255 } 256 257 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, 258 ulong flags) 259 { 260 kvmppc_set_dar(vcpu, dar); 261 kvmppc_set_dsisr(vcpu, flags); 262 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); 263 } 264 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); 265 266 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) 267 { 268 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags); 269 } 270 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); 271 272 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, 273 unsigned int priority) 274 { 275 int deliver = 1; 276 int vec = 0; 277 bool crit = kvmppc_critical_section(vcpu); 278 279 switch (priority) { 280 case BOOK3S_IRQPRIO_DECREMENTER: 281 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 282 vec = BOOK3S_INTERRUPT_DECREMENTER; 283 break; 284 case BOOK3S_IRQPRIO_EXTERNAL: 285 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 286 vec = BOOK3S_INTERRUPT_EXTERNAL; 287 break; 288 case BOOK3S_IRQPRIO_SYSTEM_RESET: 289 vec = BOOK3S_INTERRUPT_SYSTEM_RESET; 290 break; 291 case BOOK3S_IRQPRIO_MACHINE_CHECK: 292 vec = BOOK3S_INTERRUPT_MACHINE_CHECK; 293 break; 294 case BOOK3S_IRQPRIO_DATA_STORAGE: 295 vec = BOOK3S_INTERRUPT_DATA_STORAGE; 296 break; 297 case BOOK3S_IRQPRIO_INST_STORAGE: 298 vec = BOOK3S_INTERRUPT_INST_STORAGE; 299 break; 300 case BOOK3S_IRQPRIO_DATA_SEGMENT: 301 vec = BOOK3S_INTERRUPT_DATA_SEGMENT; 302 break; 303 case BOOK3S_IRQPRIO_INST_SEGMENT: 304 vec = BOOK3S_INTERRUPT_INST_SEGMENT; 305 break; 306 case BOOK3S_IRQPRIO_ALIGNMENT: 307 vec = BOOK3S_INTERRUPT_ALIGNMENT; 308 break; 309 case BOOK3S_IRQPRIO_PROGRAM: 310 vec = BOOK3S_INTERRUPT_PROGRAM; 311 break; 312 case BOOK3S_IRQPRIO_VSX: 313 vec = BOOK3S_INTERRUPT_VSX; 314 break; 315 case BOOK3S_IRQPRIO_ALTIVEC: 316 vec = BOOK3S_INTERRUPT_ALTIVEC; 317 break; 318 case BOOK3S_IRQPRIO_FP_UNAVAIL: 319 vec = BOOK3S_INTERRUPT_FP_UNAVAIL; 320 break; 321 case BOOK3S_IRQPRIO_SYSCALL: 322 vec = BOOK3S_INTERRUPT_SYSCALL; 323 break; 324 case BOOK3S_IRQPRIO_DEBUG: 325 vec = BOOK3S_INTERRUPT_TRACE; 326 break; 327 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: 328 vec = BOOK3S_INTERRUPT_PERFMON; 329 break; 330 case BOOK3S_IRQPRIO_FAC_UNAVAIL: 331 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; 332 break; 333 default: 334 deliver = 0; 335 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); 336 break; 337 } 338 339 #if 0 340 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); 341 #endif 342 343 if (deliver) 344 kvmppc_inject_interrupt(vcpu, vec, 0); 345 346 return deliver; 347 } 348 349 /* 350 * This function determines if an irqprio should be cleared once issued. 351 */ 352 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) 353 { 354 switch (priority) { 355 case BOOK3S_IRQPRIO_DECREMENTER: 356 /* DEC interrupts get cleared by mtdec */ 357 return false; 358 case BOOK3S_IRQPRIO_EXTERNAL: 359 /* 360 * External interrupts get cleared by userspace 361 * except when set by the KVM_INTERRUPT ioctl with 362 * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL). 363 */ 364 if (vcpu->arch.external_oneshot) { 365 vcpu->arch.external_oneshot = 0; 366 return true; 367 } 368 return false; 369 } 370 371 return true; 372 } 373 374 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 375 { 376 unsigned long *pending = &vcpu->arch.pending_exceptions; 377 unsigned long old_pending = vcpu->arch.pending_exceptions; 378 unsigned int priority; 379 380 #ifdef EXIT_DEBUG 381 if (vcpu->arch.pending_exceptions) 382 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 383 #endif 384 priority = __ffs(*pending); 385 while (priority < BOOK3S_IRQPRIO_MAX) { 386 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 387 clear_irqprio(vcpu, priority)) { 388 clear_bit(priority, &vcpu->arch.pending_exceptions); 389 break; 390 } 391 392 priority = find_next_bit(pending, 393 BITS_PER_BYTE * sizeof(*pending), 394 priority + 1); 395 } 396 397 /* Tell the guest about our interrupt status */ 398 kvmppc_update_int_pending(vcpu, *pending, old_pending); 399 400 return 0; 401 } 402 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 403 404 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 405 bool *writable) 406 { 407 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; 408 gfn_t gfn = gpa >> PAGE_SHIFT; 409 410 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 411 mp_pa = (uint32_t)mp_pa; 412 413 /* Magic page override */ 414 gpa &= ~0xFFFULL; 415 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { 416 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 417 kvm_pfn_t pfn; 418 419 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 420 get_page(pfn_to_page(pfn)); 421 if (writable) 422 *writable = true; 423 return pfn; 424 } 425 426 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 427 } 428 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); 429 430 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, 431 enum xlate_readwrite xlrw, struct kvmppc_pte *pte) 432 { 433 bool data = (xlid == XLATE_DATA); 434 bool iswrite = (xlrw == XLATE_WRITE); 435 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 436 int r; 437 438 if (relocated) { 439 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); 440 } else { 441 pte->eaddr = eaddr; 442 pte->raddr = eaddr & KVM_PAM; 443 pte->vpage = VSID_REAL | eaddr >> 12; 444 pte->may_read = true; 445 pte->may_write = true; 446 pte->may_execute = true; 447 r = 0; 448 449 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR && 450 !data) { 451 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && 452 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) 453 pte->raddr &= ~SPLIT_HACK_MASK; 454 } 455 } 456 457 return r; 458 } 459 460 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, 461 enum instruction_fetch_type type, u32 *inst) 462 { 463 ulong pc = kvmppc_get_pc(vcpu); 464 int r; 465 466 if (type == INST_SC) 467 pc -= 4; 468 469 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); 470 if (r == EMULATE_DONE) 471 return r; 472 else 473 return EMULATE_AGAIN; 474 } 475 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); 476 477 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) 478 { 479 return 0; 480 } 481 482 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 483 { 484 } 485 486 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 487 struct kvm_sregs *sregs) 488 { 489 int ret; 490 491 vcpu_load(vcpu); 492 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); 493 vcpu_put(vcpu); 494 495 return ret; 496 } 497 498 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 499 struct kvm_sregs *sregs) 500 { 501 int ret; 502 503 vcpu_load(vcpu); 504 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); 505 vcpu_put(vcpu); 506 507 return ret; 508 } 509 510 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 511 { 512 int i; 513 514 regs->pc = kvmppc_get_pc(vcpu); 515 regs->cr = kvmppc_get_cr(vcpu); 516 regs->ctr = kvmppc_get_ctr(vcpu); 517 regs->lr = kvmppc_get_lr(vcpu); 518 regs->xer = kvmppc_get_xer(vcpu); 519 regs->msr = kvmppc_get_msr(vcpu); 520 regs->srr0 = kvmppc_get_srr0(vcpu); 521 regs->srr1 = kvmppc_get_srr1(vcpu); 522 regs->pid = vcpu->arch.pid; 523 regs->sprg0 = kvmppc_get_sprg0(vcpu); 524 regs->sprg1 = kvmppc_get_sprg1(vcpu); 525 regs->sprg2 = kvmppc_get_sprg2(vcpu); 526 regs->sprg3 = kvmppc_get_sprg3(vcpu); 527 regs->sprg4 = kvmppc_get_sprg4(vcpu); 528 regs->sprg5 = kvmppc_get_sprg5(vcpu); 529 regs->sprg6 = kvmppc_get_sprg6(vcpu); 530 regs->sprg7 = kvmppc_get_sprg7(vcpu); 531 532 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 533 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 534 535 return 0; 536 } 537 538 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 539 { 540 int i; 541 542 kvmppc_set_pc(vcpu, regs->pc); 543 kvmppc_set_cr(vcpu, regs->cr); 544 kvmppc_set_ctr(vcpu, regs->ctr); 545 kvmppc_set_lr(vcpu, regs->lr); 546 kvmppc_set_xer(vcpu, regs->xer); 547 kvmppc_set_msr(vcpu, regs->msr); 548 kvmppc_set_srr0(vcpu, regs->srr0); 549 kvmppc_set_srr1(vcpu, regs->srr1); 550 kvmppc_set_sprg0(vcpu, regs->sprg0); 551 kvmppc_set_sprg1(vcpu, regs->sprg1); 552 kvmppc_set_sprg2(vcpu, regs->sprg2); 553 kvmppc_set_sprg3(vcpu, regs->sprg3); 554 kvmppc_set_sprg4(vcpu, regs->sprg4); 555 kvmppc_set_sprg5(vcpu, regs->sprg5); 556 kvmppc_set_sprg6(vcpu, regs->sprg6); 557 kvmppc_set_sprg7(vcpu, regs->sprg7); 558 559 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 560 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 561 562 return 0; 563 } 564 565 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 566 { 567 return -EOPNOTSUPP; 568 } 569 570 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 571 { 572 return -EOPNOTSUPP; 573 } 574 575 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 576 union kvmppc_one_reg *val) 577 { 578 int r = 0; 579 long int i; 580 581 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); 582 if (r == -EINVAL) { 583 r = 0; 584 switch (id) { 585 case KVM_REG_PPC_DAR: 586 *val = get_reg_val(id, kvmppc_get_dar(vcpu)); 587 break; 588 case KVM_REG_PPC_DSISR: 589 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu)); 590 break; 591 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 592 i = id - KVM_REG_PPC_FPR0; 593 *val = get_reg_val(id, VCPU_FPR(vcpu, i)); 594 break; 595 case KVM_REG_PPC_FPSCR: 596 *val = get_reg_val(id, vcpu->arch.fp.fpscr); 597 break; 598 #ifdef CONFIG_VSX 599 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 600 if (cpu_has_feature(CPU_FTR_VSX)) { 601 i = id - KVM_REG_PPC_VSR0; 602 val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; 603 val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; 604 } else { 605 r = -ENXIO; 606 } 607 break; 608 #endif /* CONFIG_VSX */ 609 case KVM_REG_PPC_DEBUG_INST: 610 *val = get_reg_val(id, INS_TW); 611 break; 612 #ifdef CONFIG_KVM_XICS 613 case KVM_REG_PPC_ICP_STATE: 614 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { 615 r = -ENXIO; 616 break; 617 } 618 if (xics_on_xive()) 619 *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu)); 620 else 621 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); 622 break; 623 #endif /* CONFIG_KVM_XICS */ 624 #ifdef CONFIG_KVM_XIVE 625 case KVM_REG_PPC_VP_STATE: 626 if (!vcpu->arch.xive_vcpu) { 627 r = -ENXIO; 628 break; 629 } 630 if (xive_enabled()) 631 r = kvmppc_xive_native_get_vp(vcpu, val); 632 else 633 r = -ENXIO; 634 break; 635 #endif /* CONFIG_KVM_XIVE */ 636 case KVM_REG_PPC_FSCR: 637 *val = get_reg_val(id, vcpu->arch.fscr); 638 break; 639 case KVM_REG_PPC_TAR: 640 *val = get_reg_val(id, vcpu->arch.tar); 641 break; 642 case KVM_REG_PPC_EBBHR: 643 *val = get_reg_val(id, vcpu->arch.ebbhr); 644 break; 645 case KVM_REG_PPC_EBBRR: 646 *val = get_reg_val(id, vcpu->arch.ebbrr); 647 break; 648 case KVM_REG_PPC_BESCR: 649 *val = get_reg_val(id, vcpu->arch.bescr); 650 break; 651 case KVM_REG_PPC_IC: 652 *val = get_reg_val(id, vcpu->arch.ic); 653 break; 654 default: 655 r = -EINVAL; 656 break; 657 } 658 } 659 660 return r; 661 } 662 663 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 664 union kvmppc_one_reg *val) 665 { 666 int r = 0; 667 long int i; 668 669 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); 670 if (r == -EINVAL) { 671 r = 0; 672 switch (id) { 673 case KVM_REG_PPC_DAR: 674 kvmppc_set_dar(vcpu, set_reg_val(id, *val)); 675 break; 676 case KVM_REG_PPC_DSISR: 677 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val)); 678 break; 679 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 680 i = id - KVM_REG_PPC_FPR0; 681 VCPU_FPR(vcpu, i) = set_reg_val(id, *val); 682 break; 683 case KVM_REG_PPC_FPSCR: 684 vcpu->arch.fp.fpscr = set_reg_val(id, *val); 685 break; 686 #ifdef CONFIG_VSX 687 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 688 if (cpu_has_feature(CPU_FTR_VSX)) { 689 i = id - KVM_REG_PPC_VSR0; 690 vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; 691 vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; 692 } else { 693 r = -ENXIO; 694 } 695 break; 696 #endif /* CONFIG_VSX */ 697 #ifdef CONFIG_KVM_XICS 698 case KVM_REG_PPC_ICP_STATE: 699 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { 700 r = -ENXIO; 701 break; 702 } 703 if (xics_on_xive()) 704 r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val)); 705 else 706 r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val)); 707 break; 708 #endif /* CONFIG_KVM_XICS */ 709 #ifdef CONFIG_KVM_XIVE 710 case KVM_REG_PPC_VP_STATE: 711 if (!vcpu->arch.xive_vcpu) { 712 r = -ENXIO; 713 break; 714 } 715 if (xive_enabled()) 716 r = kvmppc_xive_native_set_vp(vcpu, val); 717 else 718 r = -ENXIO; 719 break; 720 #endif /* CONFIG_KVM_XIVE */ 721 case KVM_REG_PPC_FSCR: 722 vcpu->arch.fscr = set_reg_val(id, *val); 723 break; 724 case KVM_REG_PPC_TAR: 725 vcpu->arch.tar = set_reg_val(id, *val); 726 break; 727 case KVM_REG_PPC_EBBHR: 728 vcpu->arch.ebbhr = set_reg_val(id, *val); 729 break; 730 case KVM_REG_PPC_EBBRR: 731 vcpu->arch.ebbrr = set_reg_val(id, *val); 732 break; 733 case KVM_REG_PPC_BESCR: 734 vcpu->arch.bescr = set_reg_val(id, *val); 735 break; 736 case KVM_REG_PPC_IC: 737 vcpu->arch.ic = set_reg_val(id, *val); 738 break; 739 default: 740 r = -EINVAL; 741 break; 742 } 743 } 744 745 return r; 746 } 747 748 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 749 { 750 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); 751 } 752 753 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 754 { 755 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); 756 } 757 758 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 759 { 760 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); 761 } 762 EXPORT_SYMBOL_GPL(kvmppc_set_msr); 763 764 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) 765 { 766 return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu); 767 } 768 769 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 770 struct kvm_translation *tr) 771 { 772 return 0; 773 } 774 775 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 776 struct kvm_guest_debug *dbg) 777 { 778 vcpu_load(vcpu); 779 vcpu->guest_debug = dbg->control; 780 vcpu_put(vcpu); 781 return 0; 782 } 783 784 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu) 785 { 786 kvmppc_core_queue_dec(vcpu); 787 kvm_vcpu_kick(vcpu); 788 } 789 790 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu) 791 { 792 return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu); 793 } 794 795 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 796 { 797 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); 798 } 799 800 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 801 { 802 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); 803 } 804 805 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 806 { 807 808 } 809 810 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 811 { 812 return kvm->arch.kvm_ops->get_dirty_log(kvm, log); 813 } 814 815 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 816 { 817 kvm->arch.kvm_ops->free_memslot(slot); 818 } 819 820 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 821 { 822 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); 823 } 824 825 int kvmppc_core_prepare_memory_region(struct kvm *kvm, 826 struct kvm_memory_slot *memslot, 827 const struct kvm_userspace_memory_region *mem, 828 enum kvm_mr_change change) 829 { 830 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem, 831 change); 832 } 833 834 void kvmppc_core_commit_memory_region(struct kvm *kvm, 835 const struct kvm_userspace_memory_region *mem, 836 const struct kvm_memory_slot *old, 837 const struct kvm_memory_slot *new, 838 enum kvm_mr_change change) 839 { 840 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); 841 } 842 843 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 844 { 845 return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range); 846 } 847 848 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 849 { 850 return kvm->arch.kvm_ops->age_gfn(kvm, range); 851 } 852 853 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 854 { 855 return kvm->arch.kvm_ops->test_age_gfn(kvm, range); 856 } 857 858 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 859 { 860 return kvm->arch.kvm_ops->set_spte_gfn(kvm, range); 861 } 862 863 int kvmppc_core_init_vm(struct kvm *kvm) 864 { 865 866 #ifdef CONFIG_PPC64 867 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); 868 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); 869 mutex_init(&kvm->arch.rtas_token_lock); 870 #endif 871 872 return kvm->arch.kvm_ops->init_vm(kvm); 873 } 874 875 void kvmppc_core_destroy_vm(struct kvm *kvm) 876 { 877 kvm->arch.kvm_ops->destroy_vm(kvm); 878 879 #ifdef CONFIG_PPC64 880 kvmppc_rtas_tokens_free(kvm); 881 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 882 #endif 883 884 #ifdef CONFIG_KVM_XICS 885 /* 886 * Free the XIVE and XICS devices which are not directly freed by the 887 * device 'release' method 888 */ 889 kfree(kvm->arch.xive_devices.native); 890 kvm->arch.xive_devices.native = NULL; 891 kfree(kvm->arch.xive_devices.xics_on_xive); 892 kvm->arch.xive_devices.xics_on_xive = NULL; 893 kfree(kvm->arch.xics_device); 894 kvm->arch.xics_device = NULL; 895 #endif /* CONFIG_KVM_XICS */ 896 } 897 898 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) 899 { 900 unsigned long size = kvmppc_get_gpr(vcpu, 4); 901 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 902 u64 buf; 903 int srcu_idx; 904 int ret; 905 906 if (!is_power_of_2(size) || (size > sizeof(buf))) 907 return H_TOO_HARD; 908 909 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 910 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); 911 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 912 if (ret != 0) 913 return H_TOO_HARD; 914 915 switch (size) { 916 case 1: 917 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf); 918 break; 919 920 case 2: 921 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf)); 922 break; 923 924 case 4: 925 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf)); 926 break; 927 928 case 8: 929 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf)); 930 break; 931 932 default: 933 BUG(); 934 } 935 936 return H_SUCCESS; 937 } 938 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load); 939 940 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) 941 { 942 unsigned long size = kvmppc_get_gpr(vcpu, 4); 943 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 944 unsigned long val = kvmppc_get_gpr(vcpu, 6); 945 u64 buf; 946 int srcu_idx; 947 int ret; 948 949 switch (size) { 950 case 1: 951 *(u8 *)&buf = val; 952 break; 953 954 case 2: 955 *(__be16 *)&buf = cpu_to_be16(val); 956 break; 957 958 case 4: 959 *(__be32 *)&buf = cpu_to_be32(val); 960 break; 961 962 case 8: 963 *(__be64 *)&buf = cpu_to_be64(val); 964 break; 965 966 default: 967 return H_TOO_HARD; 968 } 969 970 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 971 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); 972 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 973 if (ret != 0) 974 return H_TOO_HARD; 975 976 return H_SUCCESS; 977 } 978 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store); 979 980 int kvmppc_core_check_processor_compat(void) 981 { 982 /* 983 * We always return 0 for book3s. We check 984 * for compatibility while loading the HV 985 * or PR module 986 */ 987 return 0; 988 } 989 990 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) 991 { 992 return kvm->arch.kvm_ops->hcall_implemented(hcall); 993 } 994 995 #ifdef CONFIG_KVM_XICS 996 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 997 bool line_status) 998 { 999 if (xics_on_xive()) 1000 return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level, 1001 line_status); 1002 else 1003 return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level, 1004 line_status); 1005 } 1006 1007 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry, 1008 struct kvm *kvm, int irq_source_id, 1009 int level, bool line_status) 1010 { 1011 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, 1012 level, line_status); 1013 } 1014 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e, 1015 struct kvm *kvm, int irq_source_id, int level, 1016 bool line_status) 1017 { 1018 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status); 1019 } 1020 1021 int kvm_irq_map_gsi(struct kvm *kvm, 1022 struct kvm_kernel_irq_routing_entry *entries, int gsi) 1023 { 1024 entries->gsi = gsi; 1025 entries->type = KVM_IRQ_ROUTING_IRQCHIP; 1026 entries->set = kvmppc_book3s_set_irq; 1027 entries->irqchip.irqchip = 0; 1028 entries->irqchip.pin = gsi; 1029 return 1; 1030 } 1031 1032 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) 1033 { 1034 return pin; 1035 } 1036 1037 #endif /* CONFIG_KVM_XICS */ 1038 1039 static int kvmppc_book3s_init(void) 1040 { 1041 int r; 1042 1043 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1044 if (r) 1045 return r; 1046 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 1047 r = kvmppc_book3s_init_pr(); 1048 #endif 1049 1050 #ifdef CONFIG_KVM_XICS 1051 #ifdef CONFIG_KVM_XIVE 1052 if (xics_on_xive()) { 1053 kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); 1054 if (kvmppc_xive_native_supported()) 1055 kvm_register_device_ops(&kvm_xive_native_ops, 1056 KVM_DEV_TYPE_XIVE); 1057 } else 1058 #endif 1059 kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); 1060 #endif 1061 return r; 1062 } 1063 1064 static void kvmppc_book3s_exit(void) 1065 { 1066 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 1067 kvmppc_book3s_exit_pr(); 1068 #endif 1069 kvm_exit(); 1070 } 1071 1072 module_init(kvmppc_book3s_init); 1073 module_exit(kvmppc_book3s_exit); 1074 1075 /* On 32bit this is our one and only kernel module */ 1076 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 1077 MODULE_ALIAS_MISCDEV(KVM_MINOR); 1078 MODULE_ALIAS("devname:kvm"); 1079 #endif 1080