1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 4 * 5 * Authors: 6 * Alexander Graf <agraf@suse.de> 7 * Kevin Wolf <mail@kevin-wolf.de> 8 * Paul Mackerras <paulus@samba.org> 9 * 10 * Description: 11 * Functions relating to running KVM on Book 3S processors where 12 * we don't have access to hypervisor mode, and we run the guest 13 * in problem state (user mode). 14 * 15 * This file is derived from arch/powerpc/kvm/44x.c, 16 * by Hollis Blanchard <hollisb@us.ibm.com>. 17 */ 18 19 #include <linux/kvm_host.h> 20 #include <linux/export.h> 21 #include <linux/err.h> 22 #include <linux/slab.h> 23 24 #include <asm/reg.h> 25 #include <asm/cputable.h> 26 #include <asm/cacheflush.h> 27 #include <linux/uaccess.h> 28 #include <asm/io.h> 29 #include <asm/kvm_ppc.h> 30 #include <asm/kvm_book3s.h> 31 #include <asm/mmu_context.h> 32 #include <asm/switch_to.h> 33 #include <asm/firmware.h> 34 #include <asm/setup.h> 35 #include <linux/gfp.h> 36 #include <linux/sched.h> 37 #include <linux/vmalloc.h> 38 #include <linux/highmem.h> 39 #include <linux/module.h> 40 #include <linux/miscdevice.h> 41 #include <asm/asm-prototypes.h> 42 #include <asm/tm.h> 43 44 #include "book3s.h" 45 46 #define CREATE_TRACE_POINTS 47 #include "trace_pr.h" 48 49 /* #define EXIT_DEBUG */ 50 /* #define DEBUG_EXT */ 51 52 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 53 ulong msr); 54 #ifdef CONFIG_PPC_BOOK3S_64 55 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); 56 #endif 57 58 /* Some compatibility defines */ 59 #ifdef CONFIG_PPC_BOOK3S_32 60 #define MSR_USER32 MSR_USER 61 #define MSR_USER64 MSR_USER 62 #define HW_PAGE_SIZE PAGE_SIZE 63 #define HPTE_R_M _PAGE_COHERENT 64 #endif 65 66 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) 67 { 68 ulong msr = kvmppc_get_msr(vcpu); 69 return (msr & (MSR_IR|MSR_DR)) == MSR_DR; 70 } 71 72 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) 73 { 74 ulong msr = kvmppc_get_msr(vcpu); 75 ulong pc = kvmppc_get_pc(vcpu); 76 77 /* We are in DR only split real mode */ 78 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) 79 return; 80 81 /* We have not fixed up the guest already */ 82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) 83 return; 84 85 /* The code is in fixupable address space */ 86 if (pc & SPLIT_HACK_MASK) 87 return; 88 89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; 90 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); 91 } 92 93 static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) 94 { 95 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { 96 ulong pc = kvmppc_get_pc(vcpu); 97 ulong lr = kvmppc_get_lr(vcpu); 98 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) 99 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); 100 if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) 101 kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); 102 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; 103 } 104 } 105 106 static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) 107 { 108 unsigned long msr, pc, new_msr, new_pc; 109 110 kvmppc_unfixup_split_real(vcpu); 111 112 msr = kvmppc_get_msr(vcpu); 113 pc = kvmppc_get_pc(vcpu); 114 new_msr = vcpu->arch.intr_msr; 115 new_pc = to_book3s(vcpu)->hior + vec; 116 117 #ifdef CONFIG_PPC_BOOK3S_64 118 /* If transactional, change to suspend mode on IRQ delivery */ 119 if (MSR_TM_TRANSACTIONAL(msr)) 120 new_msr |= MSR_TS_S; 121 else 122 new_msr |= msr & MSR_TS_MASK; 123 #endif 124 125 kvmppc_set_srr0(vcpu, pc); 126 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); 127 kvmppc_set_pc(vcpu, new_pc); 128 kvmppc_set_msr(vcpu, new_msr); 129 } 130 131 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) 132 { 133 #ifdef CONFIG_PPC_BOOK3S_64 134 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 135 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 136 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 137 svcpu->in_use = 0; 138 svcpu_put(svcpu); 139 #endif 140 141 /* Disable AIL if supported */ 142 if (cpu_has_feature(CPU_FTR_HVMODE) && 143 cpu_has_feature(CPU_FTR_ARCH_207S)) 144 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); 145 146 vcpu->cpu = smp_processor_id(); 147 #ifdef CONFIG_PPC_BOOK3S_32 148 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; 149 #endif 150 151 if (kvmppc_is_split_real(vcpu)) 152 kvmppc_fixup_split_real(vcpu); 153 154 kvmppc_restore_tm_pr(vcpu); 155 } 156 157 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) 158 { 159 #ifdef CONFIG_PPC_BOOK3S_64 160 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 161 if (svcpu->in_use) { 162 kvmppc_copy_from_svcpu(vcpu); 163 } 164 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 165 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 166 svcpu_put(svcpu); 167 #endif 168 169 if (kvmppc_is_split_real(vcpu)) 170 kvmppc_unfixup_split_real(vcpu); 171 172 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 173 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 174 kvmppc_save_tm_pr(vcpu); 175 176 /* Enable AIL if supported */ 177 if (cpu_has_feature(CPU_FTR_HVMODE) && 178 cpu_has_feature(CPU_FTR_ARCH_207S)) 179 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); 180 181 vcpu->cpu = -1; 182 } 183 184 /* Copy data needed by real-mode code from vcpu to shadow vcpu */ 185 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) 186 { 187 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 188 189 svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; 190 svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; 191 svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; 192 svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; 193 svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; 194 svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; 195 svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; 196 svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; 197 svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; 198 svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; 199 svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; 200 svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; 201 svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; 202 svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; 203 svcpu->cr = vcpu->arch.regs.ccr; 204 svcpu->xer = vcpu->arch.regs.xer; 205 svcpu->ctr = vcpu->arch.regs.ctr; 206 svcpu->lr = vcpu->arch.regs.link; 207 svcpu->pc = vcpu->arch.regs.nip; 208 #ifdef CONFIG_PPC_BOOK3S_64 209 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; 210 #endif 211 /* 212 * Now also save the current time base value. We use this 213 * to find the guest purr and spurr value. 214 */ 215 vcpu->arch.entry_tb = get_tb(); 216 vcpu->arch.entry_vtb = get_vtb(); 217 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 218 vcpu->arch.entry_ic = mfspr(SPRN_IC); 219 svcpu->in_use = true; 220 221 svcpu_put(svcpu); 222 } 223 224 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 225 { 226 ulong guest_msr = kvmppc_get_msr(vcpu); 227 ulong smsr = guest_msr; 228 229 /* Guest MSR values */ 230 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 231 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | 232 MSR_TM | MSR_TS_MASK; 233 #else 234 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; 235 #endif 236 /* Process MSR values */ 237 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; 238 /* External providers the guest reserved */ 239 smsr |= (guest_msr & vcpu->arch.guest_owned_ext); 240 /* 64-bit Process MSR values */ 241 #ifdef CONFIG_PPC_BOOK3S_64 242 smsr |= MSR_HV; 243 #endif 244 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 245 /* 246 * in guest privileged state, we want to fail all TM transactions. 247 * So disable MSR TM bit so that all tbegin. will be able to be 248 * trapped into host. 249 */ 250 if (!(guest_msr & MSR_PR)) 251 smsr &= ~MSR_TM; 252 #endif 253 vcpu->arch.shadow_msr = smsr; 254 } 255 256 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ 257 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) 258 { 259 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 260 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 261 ulong old_msr; 262 #endif 263 264 /* 265 * Maybe we were already preempted and synced the svcpu from 266 * our preempt notifiers. Don't bother touching this svcpu then. 267 */ 268 if (!svcpu->in_use) 269 goto out; 270 271 vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; 272 vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; 273 vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; 274 vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; 275 vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; 276 vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; 277 vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; 278 vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; 279 vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; 280 vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; 281 vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; 282 vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; 283 vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; 284 vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; 285 vcpu->arch.regs.ccr = svcpu->cr; 286 vcpu->arch.regs.xer = svcpu->xer; 287 vcpu->arch.regs.ctr = svcpu->ctr; 288 vcpu->arch.regs.link = svcpu->lr; 289 vcpu->arch.regs.nip = svcpu->pc; 290 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; 291 vcpu->arch.fault_dar = svcpu->fault_dar; 292 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; 293 vcpu->arch.last_inst = svcpu->last_inst; 294 #ifdef CONFIG_PPC_BOOK3S_64 295 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; 296 #endif 297 /* 298 * Update purr and spurr using time base on exit. 299 */ 300 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; 301 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; 302 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; 303 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 304 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; 305 306 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 307 /* 308 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without 309 * notifying host: 310 * modified by unprivileged instructions like "tbegin"/"tend"/ 311 * "tresume"/"tsuspend" in PR KVM guest. 312 * 313 * It is necessary to sync here to calculate a correct shadow_msr. 314 * 315 * privileged guest's tbegin will be failed at present. So we 316 * only take care of problem state guest. 317 */ 318 old_msr = kvmppc_get_msr(vcpu); 319 if (unlikely((old_msr & MSR_PR) && 320 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != 321 (old_msr & (MSR_TS_MASK)))) { 322 old_msr &= ~(MSR_TS_MASK); 323 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); 324 kvmppc_set_msr_fast(vcpu, old_msr); 325 kvmppc_recalc_shadow_msr(vcpu); 326 } 327 #endif 328 329 svcpu->in_use = false; 330 331 out: 332 svcpu_put(svcpu); 333 } 334 335 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 336 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) 337 { 338 tm_enable(); 339 vcpu->arch.tfhar = mfspr(SPRN_TFHAR); 340 vcpu->arch.texasr = mfspr(SPRN_TEXASR); 341 vcpu->arch.tfiar = mfspr(SPRN_TFIAR); 342 tm_disable(); 343 } 344 345 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) 346 { 347 tm_enable(); 348 mtspr(SPRN_TFHAR, vcpu->arch.tfhar); 349 mtspr(SPRN_TEXASR, vcpu->arch.texasr); 350 mtspr(SPRN_TFIAR, vcpu->arch.tfiar); 351 tm_disable(); 352 } 353 354 /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at 355 * hardware. 356 */ 357 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) 358 { 359 ulong exit_nr; 360 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & 361 (MSR_FP | MSR_VEC | MSR_VSX); 362 363 if (!ext_diff) 364 return; 365 366 if (ext_diff == MSR_FP) 367 exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; 368 else if (ext_diff == MSR_VEC) 369 exit_nr = BOOK3S_INTERRUPT_ALTIVEC; 370 else 371 exit_nr = BOOK3S_INTERRUPT_VSX; 372 373 kvmppc_handle_ext(vcpu, exit_nr, ext_diff); 374 } 375 376 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) 377 { 378 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { 379 kvmppc_save_tm_sprs(vcpu); 380 return; 381 } 382 383 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 384 kvmppc_giveup_ext(vcpu, MSR_VSX); 385 386 preempt_disable(); 387 _kvmppc_save_tm_pr(vcpu, mfmsr()); 388 preempt_enable(); 389 } 390 391 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) 392 { 393 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { 394 kvmppc_restore_tm_sprs(vcpu); 395 if (kvmppc_get_msr(vcpu) & MSR_TM) { 396 kvmppc_handle_lost_math_exts(vcpu); 397 if (vcpu->arch.fscr & FSCR_TAR) 398 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); 399 } 400 return; 401 } 402 403 preempt_disable(); 404 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); 405 preempt_enable(); 406 407 if (kvmppc_get_msr(vcpu) & MSR_TM) { 408 kvmppc_handle_lost_math_exts(vcpu); 409 if (vcpu->arch.fscr & FSCR_TAR) 410 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); 411 } 412 } 413 #endif 414 415 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) 416 { 417 int r = 1; /* Indicate we want to get back into the guest */ 418 419 /* We misuse TLB_FLUSH to indicate that we want to clear 420 all shadow cache entries */ 421 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 422 kvmppc_mmu_pte_flush(vcpu, 0, 0); 423 424 return r; 425 } 426 427 /************* MMU Notifiers *************/ 428 static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 429 { 430 long i; 431 struct kvm_vcpu *vcpu; 432 433 kvm_for_each_vcpu(i, vcpu, kvm) 434 kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT, 435 range->end << PAGE_SHIFT); 436 437 return false; 438 } 439 440 static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range) 441 { 442 return do_kvm_unmap_gfn(kvm, range); 443 } 444 445 static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) 446 { 447 /* XXX could be more clever ;) */ 448 return false; 449 } 450 451 static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) 452 { 453 /* XXX could be more clever ;) */ 454 return false; 455 } 456 457 static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) 458 { 459 /* The page will get remapped properly on its next fault */ 460 return do_kvm_unmap_gfn(kvm, range); 461 } 462 463 /*****************************************/ 464 465 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) 466 { 467 ulong old_msr; 468 469 /* For PAPR guest, make sure MSR reflects guest mode */ 470 if (vcpu->arch.papr_enabled) 471 msr = (msr & ~MSR_HV) | MSR_ME; 472 473 #ifdef EXIT_DEBUG 474 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 475 #endif 476 477 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 478 /* We should never target guest MSR to TS=10 && PR=0, 479 * since we always fail transaction for guest privilege 480 * state. 481 */ 482 if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) 483 kvmppc_emulate_tabort(vcpu, 484 TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); 485 #endif 486 487 old_msr = kvmppc_get_msr(vcpu); 488 msr &= to_book3s(vcpu)->msr_mask; 489 kvmppc_set_msr_fast(vcpu, msr); 490 kvmppc_recalc_shadow_msr(vcpu); 491 492 if (msr & MSR_POW) { 493 if (!vcpu->arch.pending_exceptions) { 494 kvm_vcpu_block(vcpu); 495 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 496 vcpu->stat.halt_wakeup++; 497 498 /* Unset POW bit after we woke up */ 499 msr &= ~MSR_POW; 500 kvmppc_set_msr_fast(vcpu, msr); 501 } 502 } 503 504 if (kvmppc_is_split_real(vcpu)) 505 kvmppc_fixup_split_real(vcpu); 506 else 507 kvmppc_unfixup_split_real(vcpu); 508 509 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != 510 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 511 kvmppc_mmu_flush_segments(vcpu); 512 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 513 514 /* Preload magic page segment when in kernel mode */ 515 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { 516 struct kvm_vcpu_arch *a = &vcpu->arch; 517 518 if (msr & MSR_DR) 519 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); 520 else 521 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); 522 } 523 } 524 525 /* 526 * When switching from 32 to 64-bit, we may have a stale 32-bit 527 * magic page around, we need to flush it. Typically 32-bit magic 528 * page will be instantiated when calling into RTAS. Note: We 529 * assume that such transition only happens while in kernel mode, 530 * ie, we never transition from user 32-bit to kernel 64-bit with 531 * a 32-bit magic page around. 532 */ 533 if (vcpu->arch.magic_page_pa && 534 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { 535 /* going from RTAS to normal kernel code */ 536 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, 537 ~0xFFFUL); 538 } 539 540 /* Preload FPU if it's enabled */ 541 if (kvmppc_get_msr(vcpu) & MSR_FP) 542 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 543 544 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 545 if (kvmppc_get_msr(vcpu) & MSR_TM) 546 kvmppc_handle_lost_math_exts(vcpu); 547 #endif 548 } 549 550 static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) 551 { 552 u32 host_pvr; 553 554 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; 555 vcpu->arch.pvr = pvr; 556 #ifdef CONFIG_PPC_BOOK3S_64 557 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 558 kvmppc_mmu_book3s_64_init(vcpu); 559 if (!to_book3s(vcpu)->hior_explicit) 560 to_book3s(vcpu)->hior = 0xfff00000; 561 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; 562 vcpu->arch.cpu_type = KVM_CPU_3S_64; 563 } else 564 #endif 565 { 566 kvmppc_mmu_book3s_32_init(vcpu); 567 if (!to_book3s(vcpu)->hior_explicit) 568 to_book3s(vcpu)->hior = 0; 569 to_book3s(vcpu)->msr_mask = 0xffffffffULL; 570 vcpu->arch.cpu_type = KVM_CPU_3S_32; 571 } 572 573 kvmppc_sanity_check(vcpu); 574 575 /* If we are in hypervisor level on 970, we can tell the CPU to 576 * treat DCBZ as 32 bytes store */ 577 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; 578 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && 579 !strcmp(cur_cpu_spec->platform, "ppc970")) 580 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 581 582 /* Cell performs badly if MSR_FEx are set. So let's hope nobody 583 really needs them in a VM on Cell and force disable them. */ 584 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) 585 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); 586 587 /* 588 * If they're asking for POWER6 or later, set the flag 589 * indicating that we can do multiple large page sizes 590 * and 1TB segments. 591 * Also set the flag that indicates that tlbie has the large 592 * page bit in the RB operand instead of the instruction. 593 */ 594 switch (PVR_VER(pvr)) { 595 case PVR_POWER6: 596 case PVR_POWER7: 597 case PVR_POWER7p: 598 case PVR_POWER8: 599 case PVR_POWER8E: 600 case PVR_POWER8NVL: 601 case PVR_POWER9: 602 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | 603 BOOK3S_HFLAG_NEW_TLBIE; 604 break; 605 } 606 607 #ifdef CONFIG_PPC_BOOK3S_32 608 /* 32 bit Book3S always has 32 byte dcbz */ 609 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 610 #endif 611 612 /* On some CPUs we can execute paired single operations natively */ 613 asm ( "mfpvr %0" : "=r"(host_pvr)); 614 switch (host_pvr) { 615 case 0x00080200: /* lonestar 2.0 */ 616 case 0x00088202: /* lonestar 2.2 */ 617 case 0x70000100: /* gekko 1.0 */ 618 case 0x00080100: /* gekko 2.0 */ 619 case 0x00083203: /* gekko 2.3a */ 620 case 0x00083213: /* gekko 2.3b */ 621 case 0x00083204: /* gekko 2.4 */ 622 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 623 case 0x00087200: /* broadway */ 624 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; 625 /* Enable HID2.PSE - in case we need it later */ 626 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); 627 } 628 } 629 630 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 631 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to 632 * emulate 32 bytes dcbz length. 633 * 634 * The Book3s_64 inventors also realized this case and implemented a special bit 635 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. 636 * 637 * My approach here is to patch the dcbz instruction on executing pages. 638 */ 639 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) 640 { 641 struct page *hpage; 642 u64 hpage_offset; 643 u32 *page; 644 int i; 645 646 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 647 if (is_error_page(hpage)) 648 return; 649 650 hpage_offset = pte->raddr & ~PAGE_MASK; 651 hpage_offset &= ~0xFFFULL; 652 hpage_offset /= 4; 653 654 get_page(hpage); 655 page = kmap_atomic(hpage); 656 657 /* patch dcbz into reserved instruction, so we trap */ 658 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) 659 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) 660 page[i] &= cpu_to_be32(0xfffffff7); 661 662 kunmap_atomic(page); 663 put_page(hpage); 664 } 665 666 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 667 { 668 ulong mp_pa = vcpu->arch.magic_page_pa; 669 670 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 671 mp_pa = (uint32_t)mp_pa; 672 673 gpa &= ~0xFFFULL; 674 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { 675 return true; 676 } 677 678 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); 679 } 680 681 static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, 682 ulong eaddr, int vec) 683 { 684 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); 685 bool iswrite = false; 686 int r = RESUME_GUEST; 687 int relocated; 688 int page_found = 0; 689 struct kvmppc_pte pte = { 0 }; 690 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; 691 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; 692 u64 vsid; 693 694 relocated = data ? dr : ir; 695 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) 696 iswrite = true; 697 698 /* Resolve real address if translation turned on */ 699 if (relocated) { 700 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); 701 } else { 702 pte.may_execute = true; 703 pte.may_read = true; 704 pte.may_write = true; 705 pte.raddr = eaddr & KVM_PAM; 706 pte.eaddr = eaddr; 707 pte.vpage = eaddr >> 12; 708 pte.page_size = MMU_PAGE_64K; 709 pte.wimg = HPTE_R_M; 710 } 711 712 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { 713 case 0: 714 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 715 break; 716 case MSR_DR: 717 if (!data && 718 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && 719 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) 720 pte.raddr &= ~SPLIT_HACK_MASK; 721 fallthrough; 722 case MSR_IR: 723 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 724 725 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) 726 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 727 else 728 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); 729 pte.vpage |= vsid; 730 731 if (vsid == -1) 732 page_found = -EINVAL; 733 break; 734 } 735 736 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 737 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 738 /* 739 * If we do the dcbz hack, we have to NX on every execution, 740 * so we can patch the executing code. This renders our guest 741 * NX-less. 742 */ 743 pte.may_execute = !data; 744 } 745 746 if (page_found == -ENOENT || page_found == -EPERM) { 747 /* Page not found in guest PTE entries, or protection fault */ 748 u64 flags; 749 750 if (page_found == -EPERM) 751 flags = DSISR_PROTFAULT; 752 else 753 flags = DSISR_NOHPTE; 754 if (data) { 755 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; 756 kvmppc_core_queue_data_storage(vcpu, eaddr, flags); 757 } else { 758 kvmppc_core_queue_inst_storage(vcpu, flags); 759 } 760 } else if (page_found == -EINVAL) { 761 /* Page not found in guest SLB */ 762 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 763 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 764 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) { 765 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { 766 /* 767 * There is already a host HPTE there, presumably 768 * a read-only one for a page the guest thinks 769 * is writable, so get rid of it first. 770 */ 771 kvmppc_mmu_unmap_page(vcpu, &pte); 772 } 773 /* The guest's PTE is not mapped yet. Map on the host */ 774 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { 775 /* Exit KVM if mapping failed */ 776 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 777 return RESUME_HOST; 778 } 779 if (data) 780 vcpu->stat.sp_storage++; 781 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 782 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) 783 kvmppc_patch_dcbz(vcpu, &pte); 784 } else { 785 /* MMIO */ 786 vcpu->stat.mmio_exits++; 787 vcpu->arch.paddr_accessed = pte.raddr; 788 vcpu->arch.vaddr_accessed = pte.eaddr; 789 r = kvmppc_emulate_mmio(vcpu); 790 if ( r == RESUME_HOST_NV ) 791 r = RESUME_HOST; 792 } 793 794 return r; 795 } 796 797 /* Give up external provider (FPU, Altivec, VSX) */ 798 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) 799 { 800 struct thread_struct *t = ¤t->thread; 801 802 /* 803 * VSX instructions can access FP and vector registers, so if 804 * we are giving up VSX, make sure we give up FP and VMX as well. 805 */ 806 if (msr & MSR_VSX) 807 msr |= MSR_FP | MSR_VEC; 808 809 msr &= vcpu->arch.guest_owned_ext; 810 if (!msr) 811 return; 812 813 #ifdef DEBUG_EXT 814 printk(KERN_INFO "Giving up ext 0x%lx\n", msr); 815 #endif 816 817 if (msr & MSR_FP) { 818 /* 819 * Note that on CPUs with VSX, giveup_fpu stores 820 * both the traditional FP registers and the added VSX 821 * registers into thread.fp_state.fpr[]. 822 */ 823 if (t->regs->msr & MSR_FP) 824 giveup_fpu(current); 825 t->fp_save_area = NULL; 826 } 827 828 #ifdef CONFIG_ALTIVEC 829 if (msr & MSR_VEC) { 830 if (current->thread.regs->msr & MSR_VEC) 831 giveup_altivec(current); 832 t->vr_save_area = NULL; 833 } 834 #endif 835 836 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); 837 kvmppc_recalc_shadow_msr(vcpu); 838 } 839 840 /* Give up facility (TAR / EBB / DSCR) */ 841 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) 842 { 843 #ifdef CONFIG_PPC_BOOK3S_64 844 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { 845 /* Facility not available to the guest, ignore giveup request*/ 846 return; 847 } 848 849 switch (fac) { 850 case FSCR_TAR_LG: 851 vcpu->arch.tar = mfspr(SPRN_TAR); 852 mtspr(SPRN_TAR, current->thread.tar); 853 vcpu->arch.shadow_fscr &= ~FSCR_TAR; 854 break; 855 } 856 #endif 857 } 858 859 /* Handle external providers (FPU, Altivec, VSX) */ 860 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 861 ulong msr) 862 { 863 struct thread_struct *t = ¤t->thread; 864 865 /* When we have paired singles, we emulate in software */ 866 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 867 return RESUME_GUEST; 868 869 if (!(kvmppc_get_msr(vcpu) & msr)) { 870 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 871 return RESUME_GUEST; 872 } 873 874 if (msr == MSR_VSX) { 875 /* No VSX? Give an illegal instruction interrupt */ 876 #ifdef CONFIG_VSX 877 if (!cpu_has_feature(CPU_FTR_VSX)) 878 #endif 879 { 880 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 881 return RESUME_GUEST; 882 } 883 884 /* 885 * We have to load up all the FP and VMX registers before 886 * we can let the guest use VSX instructions. 887 */ 888 msr = MSR_FP | MSR_VEC | MSR_VSX; 889 } 890 891 /* See if we already own all the ext(s) needed */ 892 msr &= ~vcpu->arch.guest_owned_ext; 893 if (!msr) 894 return RESUME_GUEST; 895 896 #ifdef DEBUG_EXT 897 printk(KERN_INFO "Loading up ext 0x%lx\n", msr); 898 #endif 899 900 if (msr & MSR_FP) { 901 preempt_disable(); 902 enable_kernel_fp(); 903 load_fp_state(&vcpu->arch.fp); 904 disable_kernel_fp(); 905 t->fp_save_area = &vcpu->arch.fp; 906 preempt_enable(); 907 } 908 909 if (msr & MSR_VEC) { 910 #ifdef CONFIG_ALTIVEC 911 preempt_disable(); 912 enable_kernel_altivec(); 913 load_vr_state(&vcpu->arch.vr); 914 disable_kernel_altivec(); 915 t->vr_save_area = &vcpu->arch.vr; 916 preempt_enable(); 917 #endif 918 } 919 920 t->regs->msr |= msr; 921 vcpu->arch.guest_owned_ext |= msr; 922 kvmppc_recalc_shadow_msr(vcpu); 923 924 return RESUME_GUEST; 925 } 926 927 /* 928 * Kernel code using FP or VMX could have flushed guest state to 929 * the thread_struct; if so, get it back now. 930 */ 931 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) 932 { 933 unsigned long lost_ext; 934 935 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; 936 if (!lost_ext) 937 return; 938 939 if (lost_ext & MSR_FP) { 940 preempt_disable(); 941 enable_kernel_fp(); 942 load_fp_state(&vcpu->arch.fp); 943 disable_kernel_fp(); 944 preempt_enable(); 945 } 946 #ifdef CONFIG_ALTIVEC 947 if (lost_ext & MSR_VEC) { 948 preempt_disable(); 949 enable_kernel_altivec(); 950 load_vr_state(&vcpu->arch.vr); 951 disable_kernel_altivec(); 952 preempt_enable(); 953 } 954 #endif 955 current->thread.regs->msr |= lost_ext; 956 } 957 958 #ifdef CONFIG_PPC_BOOK3S_64 959 960 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) 961 { 962 /* Inject the Interrupt Cause field and trigger a guest interrupt */ 963 vcpu->arch.fscr &= ~(0xffULL << 56); 964 vcpu->arch.fscr |= (fac << 56); 965 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); 966 } 967 968 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) 969 { 970 enum emulation_result er = EMULATE_FAIL; 971 972 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) 973 er = kvmppc_emulate_instruction(vcpu); 974 975 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { 976 /* Couldn't emulate, trigger interrupt in guest */ 977 kvmppc_trigger_fac_interrupt(vcpu, fac); 978 } 979 } 980 981 /* Enable facilities (TAR, EBB, DSCR) for the guest */ 982 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) 983 { 984 bool guest_fac_enabled; 985 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); 986 987 /* 988 * Not every facility is enabled by FSCR bits, check whether the 989 * guest has this facility enabled at all. 990 */ 991 switch (fac) { 992 case FSCR_TAR_LG: 993 case FSCR_EBB_LG: 994 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); 995 break; 996 case FSCR_TM_LG: 997 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; 998 break; 999 default: 1000 guest_fac_enabled = false; 1001 break; 1002 } 1003 1004 if (!guest_fac_enabled) { 1005 /* Facility not enabled by the guest */ 1006 kvmppc_trigger_fac_interrupt(vcpu, fac); 1007 return RESUME_GUEST; 1008 } 1009 1010 switch (fac) { 1011 case FSCR_TAR_LG: 1012 /* TAR switching isn't lazy in Linux yet */ 1013 current->thread.tar = mfspr(SPRN_TAR); 1014 mtspr(SPRN_TAR, vcpu->arch.tar); 1015 vcpu->arch.shadow_fscr |= FSCR_TAR; 1016 break; 1017 default: 1018 kvmppc_emulate_fac(vcpu, fac); 1019 break; 1020 } 1021 1022 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1023 /* Since we disabled MSR_TM at privilege state, the mfspr instruction 1024 * for TM spr can trigger TM fac unavailable. In this case, the 1025 * emulation is handled by kvmppc_emulate_fac(), which invokes 1026 * kvmppc_emulate_mfspr() finally. But note the mfspr can include 1027 * RT for NV registers. So it need to restore those NV reg to reflect 1028 * the update. 1029 */ 1030 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) 1031 return RESUME_GUEST_NV; 1032 #endif 1033 1034 return RESUME_GUEST; 1035 } 1036 1037 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) 1038 { 1039 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { 1040 /* TAR got dropped, drop it in shadow too */ 1041 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 1042 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { 1043 vcpu->arch.fscr = fscr; 1044 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); 1045 return; 1046 } 1047 1048 vcpu->arch.fscr = fscr; 1049 } 1050 #endif 1051 1052 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) 1053 { 1054 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 1055 u64 msr = kvmppc_get_msr(vcpu); 1056 1057 kvmppc_set_msr(vcpu, msr | MSR_SE); 1058 } 1059 } 1060 1061 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) 1062 { 1063 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 1064 u64 msr = kvmppc_get_msr(vcpu); 1065 1066 kvmppc_set_msr(vcpu, msr & ~MSR_SE); 1067 } 1068 } 1069 1070 static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) 1071 { 1072 enum emulation_result er; 1073 ulong flags; 1074 u32 last_inst; 1075 int emul, r; 1076 1077 /* 1078 * shadow_srr1 only contains valid flags if we came here via a program 1079 * exception. The other exceptions (emulation assist, FP unavailable, 1080 * etc.) do not provide flags in SRR1, so use an illegal-instruction 1081 * exception when injecting a program interrupt into the guest. 1082 */ 1083 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM) 1084 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; 1085 else 1086 flags = SRR1_PROGILL; 1087 1088 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 1089 if (emul != EMULATE_DONE) 1090 return RESUME_GUEST; 1091 1092 if (kvmppc_get_msr(vcpu) & MSR_PR) { 1093 #ifdef EXIT_DEBUG 1094 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", 1095 kvmppc_get_pc(vcpu), last_inst); 1096 #endif 1097 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { 1098 kvmppc_core_queue_program(vcpu, flags); 1099 return RESUME_GUEST; 1100 } 1101 } 1102 1103 vcpu->stat.emulated_inst_exits++; 1104 er = kvmppc_emulate_instruction(vcpu); 1105 switch (er) { 1106 case EMULATE_DONE: 1107 r = RESUME_GUEST_NV; 1108 break; 1109 case EMULATE_AGAIN: 1110 r = RESUME_GUEST; 1111 break; 1112 case EMULATE_FAIL: 1113 pr_crit("%s: emulation at %lx failed (%08x)\n", 1114 __func__, kvmppc_get_pc(vcpu), last_inst); 1115 kvmppc_core_queue_program(vcpu, flags); 1116 r = RESUME_GUEST; 1117 break; 1118 case EMULATE_DO_MMIO: 1119 vcpu->run->exit_reason = KVM_EXIT_MMIO; 1120 r = RESUME_HOST_NV; 1121 break; 1122 case EMULATE_EXIT_USER: 1123 r = RESUME_HOST_NV; 1124 break; 1125 default: 1126 BUG(); 1127 } 1128 1129 return r; 1130 } 1131 1132 int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) 1133 { 1134 struct kvm_run *run = vcpu->run; 1135 int r = RESUME_HOST; 1136 int s; 1137 1138 vcpu->stat.sum_exits++; 1139 1140 run->exit_reason = KVM_EXIT_UNKNOWN; 1141 run->ready_for_interrupt_injection = 1; 1142 1143 /* We get here with MSR.EE=1 */ 1144 1145 trace_kvm_exit(exit_nr, vcpu); 1146 guest_exit(); 1147 1148 switch (exit_nr) { 1149 case BOOK3S_INTERRUPT_INST_STORAGE: 1150 { 1151 ulong shadow_srr1 = vcpu->arch.shadow_srr1; 1152 vcpu->stat.pf_instruc++; 1153 1154 if (kvmppc_is_split_real(vcpu)) 1155 kvmppc_fixup_split_real(vcpu); 1156 1157 #ifdef CONFIG_PPC_BOOK3S_32 1158 /* We set segments as unused segments when invalidating them. So 1159 * treat the respective fault as segment fault. */ 1160 { 1161 struct kvmppc_book3s_shadow_vcpu *svcpu; 1162 u32 sr; 1163 1164 svcpu = svcpu_get(vcpu); 1165 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; 1166 svcpu_put(svcpu); 1167 if (sr == SR_INVALID) { 1168 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 1169 r = RESUME_GUEST; 1170 break; 1171 } 1172 } 1173 #endif 1174 1175 /* only care about PTEG not found errors, but leave NX alone */ 1176 if (shadow_srr1 & 0x40000000) { 1177 int idx = srcu_read_lock(&vcpu->kvm->srcu); 1178 r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); 1179 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1180 vcpu->stat.sp_instruc++; 1181 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 1182 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 1183 /* 1184 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, 1185 * so we can't use the NX bit inside the guest. Let's cross our fingers, 1186 * that no guest that needs the dcbz hack does NX. 1187 */ 1188 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 1189 r = RESUME_GUEST; 1190 } else { 1191 kvmppc_core_queue_inst_storage(vcpu, 1192 shadow_srr1 & 0x58000000); 1193 r = RESUME_GUEST; 1194 } 1195 break; 1196 } 1197 case BOOK3S_INTERRUPT_DATA_STORAGE: 1198 { 1199 ulong dar = kvmppc_get_fault_dar(vcpu); 1200 u32 fault_dsisr = vcpu->arch.fault_dsisr; 1201 vcpu->stat.pf_storage++; 1202 1203 #ifdef CONFIG_PPC_BOOK3S_32 1204 /* We set segments as unused segments when invalidating them. So 1205 * treat the respective fault as segment fault. */ 1206 { 1207 struct kvmppc_book3s_shadow_vcpu *svcpu; 1208 u32 sr; 1209 1210 svcpu = svcpu_get(vcpu); 1211 sr = svcpu->sr[dar >> SID_SHIFT]; 1212 svcpu_put(svcpu); 1213 if (sr == SR_INVALID) { 1214 kvmppc_mmu_map_segment(vcpu, dar); 1215 r = RESUME_GUEST; 1216 break; 1217 } 1218 } 1219 #endif 1220 1221 /* 1222 * We need to handle missing shadow PTEs, and 1223 * protection faults due to us mapping a page read-only 1224 * when the guest thinks it is writable. 1225 */ 1226 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { 1227 int idx = srcu_read_lock(&vcpu->kvm->srcu); 1228 r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); 1229 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1230 } else { 1231 kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); 1232 r = RESUME_GUEST; 1233 } 1234 break; 1235 } 1236 case BOOK3S_INTERRUPT_DATA_SEGMENT: 1237 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { 1238 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 1239 kvmppc_book3s_queue_irqprio(vcpu, 1240 BOOK3S_INTERRUPT_DATA_SEGMENT); 1241 } 1242 r = RESUME_GUEST; 1243 break; 1244 case BOOK3S_INTERRUPT_INST_SEGMENT: 1245 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { 1246 kvmppc_book3s_queue_irqprio(vcpu, 1247 BOOK3S_INTERRUPT_INST_SEGMENT); 1248 } 1249 r = RESUME_GUEST; 1250 break; 1251 /* We're good on these - the host merely wanted to get our attention */ 1252 case BOOK3S_INTERRUPT_DECREMENTER: 1253 case BOOK3S_INTERRUPT_HV_DECREMENTER: 1254 case BOOK3S_INTERRUPT_DOORBELL: 1255 case BOOK3S_INTERRUPT_H_DOORBELL: 1256 vcpu->stat.dec_exits++; 1257 r = RESUME_GUEST; 1258 break; 1259 case BOOK3S_INTERRUPT_EXTERNAL: 1260 case BOOK3S_INTERRUPT_EXTERNAL_HV: 1261 case BOOK3S_INTERRUPT_H_VIRT: 1262 vcpu->stat.ext_intr_exits++; 1263 r = RESUME_GUEST; 1264 break; 1265 case BOOK3S_INTERRUPT_HMI: 1266 case BOOK3S_INTERRUPT_PERFMON: 1267 case BOOK3S_INTERRUPT_SYSTEM_RESET: 1268 r = RESUME_GUEST; 1269 break; 1270 case BOOK3S_INTERRUPT_PROGRAM: 1271 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 1272 r = kvmppc_exit_pr_progint(vcpu, exit_nr); 1273 break; 1274 case BOOK3S_INTERRUPT_SYSCALL: 1275 { 1276 u32 last_sc; 1277 int emul; 1278 1279 /* Get last sc for papr */ 1280 if (vcpu->arch.papr_enabled) { 1281 /* The sc instuction points SRR0 to the next inst */ 1282 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); 1283 if (emul != EMULATE_DONE) { 1284 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); 1285 r = RESUME_GUEST; 1286 break; 1287 } 1288 } 1289 1290 if (vcpu->arch.papr_enabled && 1291 (last_sc == 0x44000022) && 1292 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 1293 /* SC 1 papr hypercalls */ 1294 ulong cmd = kvmppc_get_gpr(vcpu, 3); 1295 int i; 1296 1297 #ifdef CONFIG_PPC_BOOK3S_64 1298 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { 1299 r = RESUME_GUEST; 1300 break; 1301 } 1302 #endif 1303 1304 run->papr_hcall.nr = cmd; 1305 for (i = 0; i < 9; ++i) { 1306 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); 1307 run->papr_hcall.args[i] = gpr; 1308 } 1309 run->exit_reason = KVM_EXIT_PAPR_HCALL; 1310 vcpu->arch.hcall_needed = 1; 1311 r = RESUME_HOST; 1312 } else if (vcpu->arch.osi_enabled && 1313 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && 1314 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { 1315 /* MOL hypercalls */ 1316 u64 *gprs = run->osi.gprs; 1317 int i; 1318 1319 run->exit_reason = KVM_EXIT_OSI; 1320 for (i = 0; i < 32; i++) 1321 gprs[i] = kvmppc_get_gpr(vcpu, i); 1322 vcpu->arch.osi_needed = 1; 1323 r = RESUME_HOST_NV; 1324 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && 1325 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { 1326 /* KVM PV hypercalls */ 1327 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); 1328 r = RESUME_GUEST; 1329 } else { 1330 /* Guest syscalls */ 1331 vcpu->stat.syscall_exits++; 1332 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1333 r = RESUME_GUEST; 1334 } 1335 break; 1336 } 1337 case BOOK3S_INTERRUPT_FP_UNAVAIL: 1338 case BOOK3S_INTERRUPT_ALTIVEC: 1339 case BOOK3S_INTERRUPT_VSX: 1340 { 1341 int ext_msr = 0; 1342 int emul; 1343 u32 last_inst; 1344 1345 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { 1346 /* Do paired single instruction emulation */ 1347 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, 1348 &last_inst); 1349 if (emul == EMULATE_DONE) 1350 r = kvmppc_exit_pr_progint(vcpu, exit_nr); 1351 else 1352 r = RESUME_GUEST; 1353 1354 break; 1355 } 1356 1357 /* Enable external provider */ 1358 switch (exit_nr) { 1359 case BOOK3S_INTERRUPT_FP_UNAVAIL: 1360 ext_msr = MSR_FP; 1361 break; 1362 1363 case BOOK3S_INTERRUPT_ALTIVEC: 1364 ext_msr = MSR_VEC; 1365 break; 1366 1367 case BOOK3S_INTERRUPT_VSX: 1368 ext_msr = MSR_VSX; 1369 break; 1370 } 1371 1372 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); 1373 break; 1374 } 1375 case BOOK3S_INTERRUPT_ALIGNMENT: 1376 { 1377 u32 last_inst; 1378 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 1379 1380 if (emul == EMULATE_DONE) { 1381 u32 dsisr; 1382 u64 dar; 1383 1384 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); 1385 dar = kvmppc_alignment_dar(vcpu, last_inst); 1386 1387 kvmppc_set_dsisr(vcpu, dsisr); 1388 kvmppc_set_dar(vcpu, dar); 1389 1390 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1391 } 1392 r = RESUME_GUEST; 1393 break; 1394 } 1395 #ifdef CONFIG_PPC_BOOK3S_64 1396 case BOOK3S_INTERRUPT_FAC_UNAVAIL: 1397 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); 1398 break; 1399 #endif 1400 case BOOK3S_INTERRUPT_MACHINE_CHECK: 1401 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1402 r = RESUME_GUEST; 1403 break; 1404 case BOOK3S_INTERRUPT_TRACE: 1405 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 1406 run->exit_reason = KVM_EXIT_DEBUG; 1407 r = RESUME_HOST; 1408 } else { 1409 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1410 r = RESUME_GUEST; 1411 } 1412 break; 1413 default: 1414 { 1415 ulong shadow_srr1 = vcpu->arch.shadow_srr1; 1416 /* Ugh - bork here! What did we get? */ 1417 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 1418 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); 1419 r = RESUME_HOST; 1420 BUG(); 1421 break; 1422 } 1423 } 1424 1425 if (!(r & RESUME_HOST)) { 1426 /* To avoid clobbering exit_reason, only check for signals if 1427 * we aren't already exiting to userspace for some other 1428 * reason. */ 1429 1430 /* 1431 * Interrupts could be timers for the guest which we have to 1432 * inject again, so let's postpone them until we're in the guest 1433 * and if we really did time things so badly, then we just exit 1434 * again due to a host external interrupt. 1435 */ 1436 s = kvmppc_prepare_to_enter(vcpu); 1437 if (s <= 0) 1438 r = s; 1439 else { 1440 /* interrupts now hard-disabled */ 1441 kvmppc_fix_ee_before_entry(); 1442 } 1443 1444 kvmppc_handle_lost_ext(vcpu); 1445 } 1446 1447 trace_kvm_book3s_reenter(r, vcpu); 1448 1449 return r; 1450 } 1451 1452 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, 1453 struct kvm_sregs *sregs) 1454 { 1455 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1456 int i; 1457 1458 sregs->pvr = vcpu->arch.pvr; 1459 1460 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; 1461 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1462 for (i = 0; i < 64; i++) { 1463 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; 1464 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; 1465 } 1466 } else { 1467 for (i = 0; i < 16; i++) 1468 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); 1469 1470 for (i = 0; i < 8; i++) { 1471 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; 1472 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; 1473 } 1474 } 1475 1476 return 0; 1477 } 1478 1479 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, 1480 struct kvm_sregs *sregs) 1481 { 1482 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1483 int i; 1484 1485 kvmppc_set_pvr_pr(vcpu, sregs->pvr); 1486 1487 vcpu3s->sdr1 = sregs->u.s.sdr1; 1488 #ifdef CONFIG_PPC_BOOK3S_64 1489 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1490 /* Flush all SLB entries */ 1491 vcpu->arch.mmu.slbmte(vcpu, 0, 0); 1492 vcpu->arch.mmu.slbia(vcpu); 1493 1494 for (i = 0; i < 64; i++) { 1495 u64 rb = sregs->u.s.ppc64.slb[i].slbe; 1496 u64 rs = sregs->u.s.ppc64.slb[i].slbv; 1497 1498 if (rb & SLB_ESID_V) 1499 vcpu->arch.mmu.slbmte(vcpu, rs, rb); 1500 } 1501 } else 1502 #endif 1503 { 1504 for (i = 0; i < 16; i++) { 1505 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); 1506 } 1507 for (i = 0; i < 8; i++) { 1508 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, 1509 (u32)sregs->u.s.ppc32.ibat[i]); 1510 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, 1511 (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); 1512 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, 1513 (u32)sregs->u.s.ppc32.dbat[i]); 1514 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, 1515 (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); 1516 } 1517 } 1518 1519 /* Flush the MMU after messing with the segments */ 1520 kvmppc_mmu_pte_flush(vcpu, 0, 0); 1521 1522 return 0; 1523 } 1524 1525 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, 1526 union kvmppc_one_reg *val) 1527 { 1528 int r = 0; 1529 1530 switch (id) { 1531 case KVM_REG_PPC_DEBUG_INST: 1532 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); 1533 break; 1534 case KVM_REG_PPC_HIOR: 1535 *val = get_reg_val(id, to_book3s(vcpu)->hior); 1536 break; 1537 case KVM_REG_PPC_VTB: 1538 *val = get_reg_val(id, to_book3s(vcpu)->vtb); 1539 break; 1540 case KVM_REG_PPC_LPCR: 1541 case KVM_REG_PPC_LPCR_64: 1542 /* 1543 * We are only interested in the LPCR_ILE bit 1544 */ 1545 if (vcpu->arch.intr_msr & MSR_LE) 1546 *val = get_reg_val(id, LPCR_ILE); 1547 else 1548 *val = get_reg_val(id, 0); 1549 break; 1550 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1551 case KVM_REG_PPC_TFHAR: 1552 *val = get_reg_val(id, vcpu->arch.tfhar); 1553 break; 1554 case KVM_REG_PPC_TFIAR: 1555 *val = get_reg_val(id, vcpu->arch.tfiar); 1556 break; 1557 case KVM_REG_PPC_TEXASR: 1558 *val = get_reg_val(id, vcpu->arch.texasr); 1559 break; 1560 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: 1561 *val = get_reg_val(id, 1562 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); 1563 break; 1564 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: 1565 { 1566 int i, j; 1567 1568 i = id - KVM_REG_PPC_TM_VSR0; 1569 if (i < 32) 1570 for (j = 0; j < TS_FPRWIDTH; j++) 1571 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; 1572 else { 1573 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1574 val->vval = vcpu->arch.vr_tm.vr[i-32]; 1575 else 1576 r = -ENXIO; 1577 } 1578 break; 1579 } 1580 case KVM_REG_PPC_TM_CR: 1581 *val = get_reg_val(id, vcpu->arch.cr_tm); 1582 break; 1583 case KVM_REG_PPC_TM_XER: 1584 *val = get_reg_val(id, vcpu->arch.xer_tm); 1585 break; 1586 case KVM_REG_PPC_TM_LR: 1587 *val = get_reg_val(id, vcpu->arch.lr_tm); 1588 break; 1589 case KVM_REG_PPC_TM_CTR: 1590 *val = get_reg_val(id, vcpu->arch.ctr_tm); 1591 break; 1592 case KVM_REG_PPC_TM_FPSCR: 1593 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); 1594 break; 1595 case KVM_REG_PPC_TM_AMR: 1596 *val = get_reg_val(id, vcpu->arch.amr_tm); 1597 break; 1598 case KVM_REG_PPC_TM_PPR: 1599 *val = get_reg_val(id, vcpu->arch.ppr_tm); 1600 break; 1601 case KVM_REG_PPC_TM_VRSAVE: 1602 *val = get_reg_val(id, vcpu->arch.vrsave_tm); 1603 break; 1604 case KVM_REG_PPC_TM_VSCR: 1605 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1606 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); 1607 else 1608 r = -ENXIO; 1609 break; 1610 case KVM_REG_PPC_TM_DSCR: 1611 *val = get_reg_val(id, vcpu->arch.dscr_tm); 1612 break; 1613 case KVM_REG_PPC_TM_TAR: 1614 *val = get_reg_val(id, vcpu->arch.tar_tm); 1615 break; 1616 #endif 1617 default: 1618 r = -EINVAL; 1619 break; 1620 } 1621 1622 return r; 1623 } 1624 1625 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) 1626 { 1627 if (new_lpcr & LPCR_ILE) 1628 vcpu->arch.intr_msr |= MSR_LE; 1629 else 1630 vcpu->arch.intr_msr &= ~MSR_LE; 1631 } 1632 1633 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, 1634 union kvmppc_one_reg *val) 1635 { 1636 int r = 0; 1637 1638 switch (id) { 1639 case KVM_REG_PPC_HIOR: 1640 to_book3s(vcpu)->hior = set_reg_val(id, *val); 1641 to_book3s(vcpu)->hior_explicit = true; 1642 break; 1643 case KVM_REG_PPC_VTB: 1644 to_book3s(vcpu)->vtb = set_reg_val(id, *val); 1645 break; 1646 case KVM_REG_PPC_LPCR: 1647 case KVM_REG_PPC_LPCR_64: 1648 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); 1649 break; 1650 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1651 case KVM_REG_PPC_TFHAR: 1652 vcpu->arch.tfhar = set_reg_val(id, *val); 1653 break; 1654 case KVM_REG_PPC_TFIAR: 1655 vcpu->arch.tfiar = set_reg_val(id, *val); 1656 break; 1657 case KVM_REG_PPC_TEXASR: 1658 vcpu->arch.texasr = set_reg_val(id, *val); 1659 break; 1660 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: 1661 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = 1662 set_reg_val(id, *val); 1663 break; 1664 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: 1665 { 1666 int i, j; 1667 1668 i = id - KVM_REG_PPC_TM_VSR0; 1669 if (i < 32) 1670 for (j = 0; j < TS_FPRWIDTH; j++) 1671 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; 1672 else 1673 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1674 vcpu->arch.vr_tm.vr[i-32] = val->vval; 1675 else 1676 r = -ENXIO; 1677 break; 1678 } 1679 case KVM_REG_PPC_TM_CR: 1680 vcpu->arch.cr_tm = set_reg_val(id, *val); 1681 break; 1682 case KVM_REG_PPC_TM_XER: 1683 vcpu->arch.xer_tm = set_reg_val(id, *val); 1684 break; 1685 case KVM_REG_PPC_TM_LR: 1686 vcpu->arch.lr_tm = set_reg_val(id, *val); 1687 break; 1688 case KVM_REG_PPC_TM_CTR: 1689 vcpu->arch.ctr_tm = set_reg_val(id, *val); 1690 break; 1691 case KVM_REG_PPC_TM_FPSCR: 1692 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); 1693 break; 1694 case KVM_REG_PPC_TM_AMR: 1695 vcpu->arch.amr_tm = set_reg_val(id, *val); 1696 break; 1697 case KVM_REG_PPC_TM_PPR: 1698 vcpu->arch.ppr_tm = set_reg_val(id, *val); 1699 break; 1700 case KVM_REG_PPC_TM_VRSAVE: 1701 vcpu->arch.vrsave_tm = set_reg_val(id, *val); 1702 break; 1703 case KVM_REG_PPC_TM_VSCR: 1704 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1705 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); 1706 else 1707 r = -ENXIO; 1708 break; 1709 case KVM_REG_PPC_TM_DSCR: 1710 vcpu->arch.dscr_tm = set_reg_val(id, *val); 1711 break; 1712 case KVM_REG_PPC_TM_TAR: 1713 vcpu->arch.tar_tm = set_reg_val(id, *val); 1714 break; 1715 #endif 1716 default: 1717 r = -EINVAL; 1718 break; 1719 } 1720 1721 return r; 1722 } 1723 1724 static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) 1725 { 1726 struct kvmppc_vcpu_book3s *vcpu_book3s; 1727 unsigned long p; 1728 int err; 1729 1730 err = -ENOMEM; 1731 1732 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); 1733 if (!vcpu_book3s) 1734 goto out; 1735 vcpu->arch.book3s = vcpu_book3s; 1736 1737 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 1738 vcpu->arch.shadow_vcpu = 1739 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); 1740 if (!vcpu->arch.shadow_vcpu) 1741 goto free_vcpu3s; 1742 #endif 1743 1744 p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1745 if (!p) 1746 goto free_shadow_vcpu; 1747 vcpu->arch.shared = (void *)p; 1748 #ifdef CONFIG_PPC_BOOK3S_64 1749 /* Always start the shared struct in native endian mode */ 1750 #ifdef __BIG_ENDIAN__ 1751 vcpu->arch.shared_big_endian = true; 1752 #else 1753 vcpu->arch.shared_big_endian = false; 1754 #endif 1755 1756 /* 1757 * Default to the same as the host if we're on sufficiently 1758 * recent machine that we have 1TB segments; 1759 * otherwise default to PPC970FX. 1760 */ 1761 vcpu->arch.pvr = 0x3C0301; 1762 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) 1763 vcpu->arch.pvr = mfspr(SPRN_PVR); 1764 vcpu->arch.intr_msr = MSR_SF; 1765 #else 1766 /* default to book3s_32 (750) */ 1767 vcpu->arch.pvr = 0x84202; 1768 vcpu->arch.intr_msr = 0; 1769 #endif 1770 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); 1771 vcpu->arch.slb_nr = 64; 1772 1773 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; 1774 1775 err = kvmppc_mmu_init_pr(vcpu); 1776 if (err < 0) 1777 goto free_shared_page; 1778 1779 return 0; 1780 1781 free_shared_page: 1782 free_page((unsigned long)vcpu->arch.shared); 1783 free_shadow_vcpu: 1784 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 1785 kfree(vcpu->arch.shadow_vcpu); 1786 free_vcpu3s: 1787 #endif 1788 vfree(vcpu_book3s); 1789 out: 1790 return err; 1791 } 1792 1793 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) 1794 { 1795 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1796 1797 kvmppc_mmu_destroy_pr(vcpu); 1798 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1799 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 1800 kfree(vcpu->arch.shadow_vcpu); 1801 #endif 1802 vfree(vcpu_book3s); 1803 } 1804 1805 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) 1806 { 1807 int ret; 1808 1809 /* Check if we can run the vcpu at all */ 1810 if (!vcpu->arch.sane) { 1811 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1812 ret = -EINVAL; 1813 goto out; 1814 } 1815 1816 kvmppc_setup_debug(vcpu); 1817 1818 /* 1819 * Interrupts could be timers for the guest which we have to inject 1820 * again, so let's postpone them until we're in the guest and if we 1821 * really did time things so badly, then we just exit again due to 1822 * a host external interrupt. 1823 */ 1824 ret = kvmppc_prepare_to_enter(vcpu); 1825 if (ret <= 0) 1826 goto out; 1827 /* interrupts now hard-disabled */ 1828 1829 /* Save FPU, Altivec and VSX state */ 1830 giveup_all(current); 1831 1832 /* Preload FPU if it's enabled */ 1833 if (kvmppc_get_msr(vcpu) & MSR_FP) 1834 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1835 1836 kvmppc_fix_ee_before_entry(); 1837 1838 ret = __kvmppc_vcpu_run(vcpu); 1839 1840 kvmppc_clear_debug(vcpu); 1841 1842 /* No need for guest_exit. It's done in handle_exit. 1843 We also get here with interrupts enabled. */ 1844 1845 /* Make sure we save the guest FPU/Altivec/VSX state */ 1846 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 1847 1848 /* Make sure we save the guest TAR/EBB/DSCR state */ 1849 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 1850 1851 out: 1852 vcpu->mode = OUTSIDE_GUEST_MODE; 1853 return ret; 1854 } 1855 1856 /* 1857 * Get (and clear) the dirty memory log for a memory slot. 1858 */ 1859 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, 1860 struct kvm_dirty_log *log) 1861 { 1862 struct kvm_memory_slot *memslot; 1863 struct kvm_vcpu *vcpu; 1864 ulong ga, ga_end; 1865 int is_dirty = 0; 1866 int r; 1867 unsigned long n; 1868 1869 mutex_lock(&kvm->slots_lock); 1870 1871 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); 1872 if (r) 1873 goto out; 1874 1875 /* If nothing is dirty, don't bother messing with page tables. */ 1876 if (is_dirty) { 1877 ga = memslot->base_gfn << PAGE_SHIFT; 1878 ga_end = ga + (memslot->npages << PAGE_SHIFT); 1879 1880 kvm_for_each_vcpu(n, vcpu, kvm) 1881 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); 1882 1883 n = kvm_dirty_bitmap_bytes(memslot); 1884 memset(memslot->dirty_bitmap, 0, n); 1885 } 1886 1887 r = 0; 1888 out: 1889 mutex_unlock(&kvm->slots_lock); 1890 return r; 1891 } 1892 1893 static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, 1894 struct kvm_memory_slot *memslot) 1895 { 1896 return; 1897 } 1898 1899 static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, 1900 struct kvm_memory_slot *memslot, 1901 const struct kvm_userspace_memory_region *mem, 1902 enum kvm_mr_change change) 1903 { 1904 return 0; 1905 } 1906 1907 static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, 1908 const struct kvm_userspace_memory_region *mem, 1909 const struct kvm_memory_slot *old, 1910 const struct kvm_memory_slot *new, 1911 enum kvm_mr_change change) 1912 { 1913 return; 1914 } 1915 1916 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot) 1917 { 1918 return; 1919 } 1920 1921 #ifdef CONFIG_PPC64 1922 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, 1923 struct kvm_ppc_smmu_info *info) 1924 { 1925 long int i; 1926 struct kvm_vcpu *vcpu; 1927 1928 info->flags = 0; 1929 1930 /* SLB is always 64 entries */ 1931 info->slb_size = 64; 1932 1933 /* Standard 4k base page size segment */ 1934 info->sps[0].page_shift = 12; 1935 info->sps[0].slb_enc = 0; 1936 info->sps[0].enc[0].page_shift = 12; 1937 info->sps[0].enc[0].pte_enc = 0; 1938 1939 /* 1940 * 64k large page size. 1941 * We only want to put this in if the CPUs we're emulating 1942 * support it, but unfortunately we don't have a vcpu easily 1943 * to hand here to test. Just pick the first vcpu, and if 1944 * that doesn't exist yet, report the minimum capability, 1945 * i.e., no 64k pages. 1946 * 1T segment support goes along with 64k pages. 1947 */ 1948 i = 1; 1949 vcpu = kvm_get_vcpu(kvm, 0); 1950 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { 1951 info->flags = KVM_PPC_1T_SEGMENTS; 1952 info->sps[i].page_shift = 16; 1953 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; 1954 info->sps[i].enc[0].page_shift = 16; 1955 info->sps[i].enc[0].pte_enc = 1; 1956 ++i; 1957 } 1958 1959 /* Standard 16M large page size segment */ 1960 info->sps[i].page_shift = 24; 1961 info->sps[i].slb_enc = SLB_VSID_L; 1962 info->sps[i].enc[0].page_shift = 24; 1963 info->sps[i].enc[0].pte_enc = 0; 1964 1965 return 0; 1966 } 1967 1968 static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) 1969 { 1970 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1971 return -ENODEV; 1972 /* Require flags and process table base and size to all be zero. */ 1973 if (cfg->flags || cfg->process_table) 1974 return -EINVAL; 1975 return 0; 1976 } 1977 1978 #else 1979 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, 1980 struct kvm_ppc_smmu_info *info) 1981 { 1982 /* We should not get called */ 1983 BUG(); 1984 return 0; 1985 } 1986 #endif /* CONFIG_PPC64 */ 1987 1988 static unsigned int kvm_global_user_count = 0; 1989 static DEFINE_SPINLOCK(kvm_global_user_count_lock); 1990 1991 static int kvmppc_core_init_vm_pr(struct kvm *kvm) 1992 { 1993 mutex_init(&kvm->arch.hpt_mutex); 1994 1995 #ifdef CONFIG_PPC_BOOK3S_64 1996 /* Start out with the default set of hcalls enabled */ 1997 kvmppc_pr_init_default_hcalls(kvm); 1998 #endif 1999 2000 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 2001 spin_lock(&kvm_global_user_count_lock); 2002 if (++kvm_global_user_count == 1) 2003 pseries_disable_reloc_on_exc(); 2004 spin_unlock(&kvm_global_user_count_lock); 2005 } 2006 return 0; 2007 } 2008 2009 static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) 2010 { 2011 #ifdef CONFIG_PPC64 2012 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 2013 #endif 2014 2015 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 2016 spin_lock(&kvm_global_user_count_lock); 2017 BUG_ON(kvm_global_user_count == 0); 2018 if (--kvm_global_user_count == 0) 2019 pseries_enable_reloc_on_exc(); 2020 spin_unlock(&kvm_global_user_count_lock); 2021 } 2022 } 2023 2024 static int kvmppc_core_check_processor_compat_pr(void) 2025 { 2026 /* 2027 * PR KVM can work on POWER9 inside a guest partition 2028 * running in HPT mode. It can't work if we are using 2029 * radix translation (because radix provides no way for 2030 * a process to have unique translations in quadrant 3). 2031 */ 2032 if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) 2033 return -EIO; 2034 return 0; 2035 } 2036 2037 static long kvm_arch_vm_ioctl_pr(struct file *filp, 2038 unsigned int ioctl, unsigned long arg) 2039 { 2040 return -ENOTTY; 2041 } 2042 2043 static struct kvmppc_ops kvm_ops_pr = { 2044 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, 2045 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, 2046 .get_one_reg = kvmppc_get_one_reg_pr, 2047 .set_one_reg = kvmppc_set_one_reg_pr, 2048 .vcpu_load = kvmppc_core_vcpu_load_pr, 2049 .vcpu_put = kvmppc_core_vcpu_put_pr, 2050 .inject_interrupt = kvmppc_inject_interrupt_pr, 2051 .set_msr = kvmppc_set_msr_pr, 2052 .vcpu_run = kvmppc_vcpu_run_pr, 2053 .vcpu_create = kvmppc_core_vcpu_create_pr, 2054 .vcpu_free = kvmppc_core_vcpu_free_pr, 2055 .check_requests = kvmppc_core_check_requests_pr, 2056 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, 2057 .flush_memslot = kvmppc_core_flush_memslot_pr, 2058 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, 2059 .commit_memory_region = kvmppc_core_commit_memory_region_pr, 2060 .unmap_gfn_range = kvm_unmap_gfn_range_pr, 2061 .age_gfn = kvm_age_gfn_pr, 2062 .test_age_gfn = kvm_test_age_gfn_pr, 2063 .set_spte_gfn = kvm_set_spte_gfn_pr, 2064 .free_memslot = kvmppc_core_free_memslot_pr, 2065 .init_vm = kvmppc_core_init_vm_pr, 2066 .destroy_vm = kvmppc_core_destroy_vm_pr, 2067 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, 2068 .emulate_op = kvmppc_core_emulate_op_pr, 2069 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, 2070 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, 2071 .fast_vcpu_kick = kvm_vcpu_kick, 2072 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, 2073 #ifdef CONFIG_PPC_BOOK3S_64 2074 .hcall_implemented = kvmppc_hcall_impl_pr, 2075 .configure_mmu = kvm_configure_mmu_pr, 2076 #endif 2077 .giveup_ext = kvmppc_giveup_ext, 2078 }; 2079 2080 2081 int kvmppc_book3s_init_pr(void) 2082 { 2083 int r; 2084 2085 r = kvmppc_core_check_processor_compat_pr(); 2086 if (r < 0) 2087 return r; 2088 2089 kvm_ops_pr.owner = THIS_MODULE; 2090 kvmppc_pr_ops = &kvm_ops_pr; 2091 2092 r = kvmppc_mmu_hpte_sysinit(); 2093 return r; 2094 } 2095 2096 void kvmppc_book3s_exit_pr(void) 2097 { 2098 kvmppc_pr_ops = NULL; 2099 kvmppc_mmu_hpte_sysexit(); 2100 } 2101 2102 /* 2103 * We only support separate modules for book3s 64 2104 */ 2105 #ifdef CONFIG_PPC_BOOK3S_64 2106 2107 module_init(kvmppc_book3s_init_pr); 2108 module_exit(kvmppc_book3s_exit_pr); 2109 2110 MODULE_LICENSE("GPL"); 2111 MODULE_ALIAS_MISCDEV(KVM_MINOR); 2112 MODULE_ALIAS("devname:kvm"); 2113 #endif 2114