1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 4 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 5 * 6 * Authors: 7 * Paul Mackerras <paulus@au1.ibm.com> 8 * Alexander Graf <agraf@suse.de> 9 * Kevin Wolf <mail@kevin-wolf.de> 10 * 11 * Description: KVM functions specific to running on Book 3S 12 * processors in hypervisor mode (specifically POWER7 and later). 13 * 14 * This file is derived from arch/powerpc/kvm/book3s.c, 15 * by Alexander Graf <agraf@suse.de>. 16 */ 17 18 #include <linux/kvm_host.h> 19 #include <linux/kernel.h> 20 #include <linux/err.h> 21 #include <linux/slab.h> 22 #include <linux/preempt.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/stat.h> 25 #include <linux/delay.h> 26 #include <linux/export.h> 27 #include <linux/fs.h> 28 #include <linux/anon_inodes.h> 29 #include <linux/cpu.h> 30 #include <linux/cpumask.h> 31 #include <linux/spinlock.h> 32 #include <linux/page-flags.h> 33 #include <linux/srcu.h> 34 #include <linux/miscdevice.h> 35 #include <linux/debugfs.h> 36 #include <linux/gfp.h> 37 #include <linux/vmalloc.h> 38 #include <linux/highmem.h> 39 #include <linux/hugetlb.h> 40 #include <linux/kvm_irqfd.h> 41 #include <linux/irqbypass.h> 42 #include <linux/module.h> 43 #include <linux/compiler.h> 44 #include <linux/of.h> 45 #include <linux/irqdomain.h> 46 47 #include <asm/ftrace.h> 48 #include <asm/reg.h> 49 #include <asm/ppc-opcode.h> 50 #include <asm/asm-prototypes.h> 51 #include <asm/archrandom.h> 52 #include <asm/debug.h> 53 #include <asm/disassemble.h> 54 #include <asm/cputable.h> 55 #include <asm/cacheflush.h> 56 #include <linux/uaccess.h> 57 #include <asm/interrupt.h> 58 #include <asm/io.h> 59 #include <asm/kvm_ppc.h> 60 #include <asm/kvm_book3s.h> 61 #include <asm/mmu_context.h> 62 #include <asm/lppaca.h> 63 #include <asm/pmc.h> 64 #include <asm/processor.h> 65 #include <asm/cputhreads.h> 66 #include <asm/page.h> 67 #include <asm/hvcall.h> 68 #include <asm/switch_to.h> 69 #include <asm/smp.h> 70 #include <asm/dbell.h> 71 #include <asm/hmi.h> 72 #include <asm/pnv-pci.h> 73 #include <asm/mmu.h> 74 #include <asm/opal.h> 75 #include <asm/xics.h> 76 #include <asm/xive.h> 77 #include <asm/hw_breakpoint.h> 78 #include <asm/kvm_book3s_uvmem.h> 79 #include <asm/ultravisor.h> 80 #include <asm/dtl.h> 81 #include <asm/plpar_wrappers.h> 82 83 #include "book3s.h" 84 #include "book3s_hv.h" 85 86 #define CREATE_TRACE_POINTS 87 #include "trace_hv.h" 88 89 /* #define EXIT_DEBUG */ 90 /* #define EXIT_DEBUG_SIMPLE */ 91 /* #define EXIT_DEBUG_INT */ 92 93 /* Used to indicate that a guest page fault needs to be handled */ 94 #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) 95 /* Used to indicate that a guest passthrough interrupt needs to be handled */ 96 #define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2) 97 98 /* Used as a "null" value for timebase values */ 99 #define TB_NIL (~(u64)0) 100 101 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); 102 103 static int dynamic_mt_modes = 6; 104 module_param(dynamic_mt_modes, int, 0644); 105 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); 106 static int target_smt_mode; 107 module_param(target_smt_mode, int, 0644); 108 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); 109 110 static bool one_vm_per_core; 111 module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR); 112 MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)"); 113 114 #ifdef CONFIG_KVM_XICS 115 static const struct kernel_param_ops module_param_ops = { 116 .set = param_set_int, 117 .get = param_get_int, 118 }; 119 120 module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644); 121 MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization"); 122 123 module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); 124 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); 125 #endif 126 127 /* If set, guests are allowed to create and control nested guests */ 128 static bool nested = true; 129 module_param(nested, bool, S_IRUGO | S_IWUSR); 130 MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)"); 131 132 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 133 134 /* 135 * RWMR values for POWER8. These control the rate at which PURR 136 * and SPURR count and should be set according to the number of 137 * online threads in the vcore being run. 138 */ 139 #define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL 140 #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL 141 #define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL 142 #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL 143 #define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL 144 #define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL 145 #define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL 146 #define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL 147 148 static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { 149 RWMR_RPA_P8_1THREAD, 150 RWMR_RPA_P8_1THREAD, 151 RWMR_RPA_P8_2THREAD, 152 RWMR_RPA_P8_3THREAD, 153 RWMR_RPA_P8_4THREAD, 154 RWMR_RPA_P8_5THREAD, 155 RWMR_RPA_P8_6THREAD, 156 RWMR_RPA_P8_7THREAD, 157 RWMR_RPA_P8_8THREAD, 158 }; 159 160 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, 161 int *ip) 162 { 163 int i = *ip; 164 struct kvm_vcpu *vcpu; 165 166 while (++i < MAX_SMT_THREADS) { 167 vcpu = READ_ONCE(vc->runnable_threads[i]); 168 if (vcpu) { 169 *ip = i; 170 return vcpu; 171 } 172 } 173 return NULL; 174 } 175 176 /* Used to traverse the list of runnable threads for a given vcore */ 177 #define for_each_runnable_thread(i, vcpu, vc) \ 178 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); ) 179 180 static bool kvmppc_ipi_thread(int cpu) 181 { 182 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 183 184 /* If we're a nested hypervisor, fall back to ordinary IPIs for now */ 185 if (kvmhv_on_pseries()) 186 return false; 187 188 /* On POWER9 we can use msgsnd to IPI any cpu */ 189 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 190 msg |= get_hard_smp_processor_id(cpu); 191 smp_mb(); 192 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 193 return true; 194 } 195 196 /* On POWER8 for IPIs to threads in the same core, use msgsnd */ 197 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 198 preempt_disable(); 199 if (cpu_first_thread_sibling(cpu) == 200 cpu_first_thread_sibling(smp_processor_id())) { 201 msg |= cpu_thread_in_core(cpu); 202 smp_mb(); 203 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 204 preempt_enable(); 205 return true; 206 } 207 preempt_enable(); 208 } 209 210 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 211 if (cpu >= 0 && cpu < nr_cpu_ids) { 212 if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { 213 xics_wake_cpu(cpu); 214 return true; 215 } 216 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 217 return true; 218 } 219 #endif 220 221 return false; 222 } 223 224 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) 225 { 226 int cpu; 227 struct rcuwait *waitp; 228 229 /* 230 * rcuwait_wake_up contains smp_mb() which orders prior stores that 231 * create pending work vs below loads of cpu fields. The other side 232 * is the barrier in vcpu run that orders setting the cpu fields vs 233 * testing for pending work. 234 */ 235 236 waitp = kvm_arch_vcpu_get_wait(vcpu); 237 if (rcuwait_wake_up(waitp)) 238 ++vcpu->stat.generic.halt_wakeup; 239 240 cpu = READ_ONCE(vcpu->arch.thread_cpu); 241 if (cpu >= 0 && kvmppc_ipi_thread(cpu)) 242 return; 243 244 /* CPU points to the first thread of the core */ 245 cpu = vcpu->cpu; 246 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) 247 smp_send_reschedule(cpu); 248 } 249 250 /* 251 * We use the vcpu_load/put functions to measure stolen time. 252 * Stolen time is counted as time when either the vcpu is able to 253 * run as part of a virtual core, but the task running the vcore 254 * is preempted or sleeping, or when the vcpu needs something done 255 * in the kernel by the task running the vcpu, but that task is 256 * preempted or sleeping. Those two things have to be counted 257 * separately, since one of the vcpu tasks will take on the job 258 * of running the core, and the other vcpu tasks in the vcore will 259 * sleep waiting for it to do that, but that sleep shouldn't count 260 * as stolen time. 261 * 262 * Hence we accumulate stolen time when the vcpu can run as part of 263 * a vcore using vc->stolen_tb, and the stolen time when the vcpu 264 * needs its task to do other things in the kernel (for example, 265 * service a page fault) in busy_stolen. We don't accumulate 266 * stolen time for a vcore when it is inactive, or for a vcpu 267 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of 268 * a misnomer; it means that the vcpu task is not executing in 269 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in 270 * the kernel. We don't have any way of dividing up that time 271 * between time that the vcpu is genuinely stopped, time that 272 * the task is actively working on behalf of the vcpu, and time 273 * that the task is preempted, so we don't count any of it as 274 * stolen. 275 * 276 * Updates to busy_stolen are protected by arch.tbacct_lock; 277 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock 278 * lock. The stolen times are measured in units of timebase ticks. 279 * (Note that the != TB_NIL checks below are purely defensive; 280 * they should never fail.) 281 */ 282 283 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb) 284 { 285 unsigned long flags; 286 287 WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); 288 289 spin_lock_irqsave(&vc->stoltb_lock, flags); 290 vc->preempt_tb = tb; 291 spin_unlock_irqrestore(&vc->stoltb_lock, flags); 292 } 293 294 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb) 295 { 296 unsigned long flags; 297 298 WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); 299 300 spin_lock_irqsave(&vc->stoltb_lock, flags); 301 if (vc->preempt_tb != TB_NIL) { 302 vc->stolen_tb += tb - vc->preempt_tb; 303 vc->preempt_tb = TB_NIL; 304 } 305 spin_unlock_irqrestore(&vc->stoltb_lock, flags); 306 } 307 308 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) 309 { 310 struct kvmppc_vcore *vc = vcpu->arch.vcore; 311 unsigned long flags; 312 u64 now; 313 314 if (cpu_has_feature(CPU_FTR_ARCH_300)) 315 return; 316 317 now = mftb(); 318 319 /* 320 * We can test vc->runner without taking the vcore lock, 321 * because only this task ever sets vc->runner to this 322 * vcpu, and once it is set to this vcpu, only this task 323 * ever sets it to NULL. 324 */ 325 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) 326 kvmppc_core_end_stolen(vc, now); 327 328 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); 329 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && 330 vcpu->arch.busy_preempt != TB_NIL) { 331 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; 332 vcpu->arch.busy_preempt = TB_NIL; 333 } 334 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); 335 } 336 337 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) 338 { 339 struct kvmppc_vcore *vc = vcpu->arch.vcore; 340 unsigned long flags; 341 u64 now; 342 343 if (cpu_has_feature(CPU_FTR_ARCH_300)) 344 return; 345 346 now = mftb(); 347 348 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) 349 kvmppc_core_start_stolen(vc, now); 350 351 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); 352 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) 353 vcpu->arch.busy_preempt = now; 354 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); 355 } 356 357 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) 358 { 359 vcpu->arch.pvr = pvr; 360 } 361 362 /* Dummy value used in computing PCR value below */ 363 #define PCR_ARCH_31 (PCR_ARCH_300 << 1) 364 365 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) 366 { 367 unsigned long host_pcr_bit = 0, guest_pcr_bit = 0; 368 struct kvmppc_vcore *vc = vcpu->arch.vcore; 369 370 /* We can (emulate) our own architecture version and anything older */ 371 if (cpu_has_feature(CPU_FTR_ARCH_31)) 372 host_pcr_bit = PCR_ARCH_31; 373 else if (cpu_has_feature(CPU_FTR_ARCH_300)) 374 host_pcr_bit = PCR_ARCH_300; 375 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 376 host_pcr_bit = PCR_ARCH_207; 377 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 378 host_pcr_bit = PCR_ARCH_206; 379 else 380 host_pcr_bit = PCR_ARCH_205; 381 382 /* Determine lowest PCR bit needed to run guest in given PVR level */ 383 guest_pcr_bit = host_pcr_bit; 384 if (arch_compat) { 385 switch (arch_compat) { 386 case PVR_ARCH_205: 387 guest_pcr_bit = PCR_ARCH_205; 388 break; 389 case PVR_ARCH_206: 390 case PVR_ARCH_206p: 391 guest_pcr_bit = PCR_ARCH_206; 392 break; 393 case PVR_ARCH_207: 394 guest_pcr_bit = PCR_ARCH_207; 395 break; 396 case PVR_ARCH_300: 397 guest_pcr_bit = PCR_ARCH_300; 398 break; 399 case PVR_ARCH_31: 400 guest_pcr_bit = PCR_ARCH_31; 401 break; 402 default: 403 return -EINVAL; 404 } 405 } 406 407 /* Check requested PCR bits don't exceed our capabilities */ 408 if (guest_pcr_bit > host_pcr_bit) 409 return -EINVAL; 410 411 spin_lock(&vc->lock); 412 vc->arch_compat = arch_compat; 413 /* 414 * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit 415 * Also set all reserved PCR bits 416 */ 417 vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK; 418 spin_unlock(&vc->lock); 419 420 return 0; 421 } 422 423 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) 424 { 425 int r; 426 427 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); 428 pr_err("pc = %.16lx msr = %.16llx trap = %x\n", 429 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); 430 for (r = 0; r < 16; ++r) 431 pr_err("r%2d = %.16lx r%d = %.16lx\n", 432 r, kvmppc_get_gpr(vcpu, r), 433 r+16, kvmppc_get_gpr(vcpu, r+16)); 434 pr_err("ctr = %.16lx lr = %.16lx\n", 435 vcpu->arch.regs.ctr, vcpu->arch.regs.link); 436 pr_err("srr0 = %.16llx srr1 = %.16llx\n", 437 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); 438 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", 439 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); 440 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", 441 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); 442 pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", 443 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); 444 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); 445 pr_err("fault dar = %.16lx dsisr = %.8x\n", 446 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 447 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); 448 for (r = 0; r < vcpu->arch.slb_max; ++r) 449 pr_err(" ESID = %.16llx VSID = %.16llx\n", 450 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); 451 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", 452 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, 453 vcpu->arch.last_inst); 454 } 455 456 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) 457 { 458 return kvm_get_vcpu_by_id(kvm, id); 459 } 460 461 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 462 { 463 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; 464 vpa->yield_count = cpu_to_be32(1); 465 } 466 467 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, 468 unsigned long addr, unsigned long len) 469 { 470 /* check address is cacheline aligned */ 471 if (addr & (L1_CACHE_BYTES - 1)) 472 return -EINVAL; 473 spin_lock(&vcpu->arch.vpa_update_lock); 474 if (v->next_gpa != addr || v->len != len) { 475 v->next_gpa = addr; 476 v->len = addr ? len : 0; 477 v->update_pending = 1; 478 } 479 spin_unlock(&vcpu->arch.vpa_update_lock); 480 return 0; 481 } 482 483 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ 484 struct reg_vpa { 485 u32 dummy; 486 union { 487 __be16 hword; 488 __be32 word; 489 } length; 490 }; 491 492 static int vpa_is_registered(struct kvmppc_vpa *vpap) 493 { 494 if (vpap->update_pending) 495 return vpap->next_gpa != 0; 496 return vpap->pinned_addr != NULL; 497 } 498 499 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, 500 unsigned long flags, 501 unsigned long vcpuid, unsigned long vpa) 502 { 503 struct kvm *kvm = vcpu->kvm; 504 unsigned long len, nb; 505 void *va; 506 struct kvm_vcpu *tvcpu; 507 int err; 508 int subfunc; 509 struct kvmppc_vpa *vpap; 510 511 tvcpu = kvmppc_find_vcpu(kvm, vcpuid); 512 if (!tvcpu) 513 return H_PARAMETER; 514 515 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; 516 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || 517 subfunc == H_VPA_REG_SLB) { 518 /* Registering new area - address must be cache-line aligned */ 519 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) 520 return H_PARAMETER; 521 522 /* convert logical addr to kernel addr and read length */ 523 va = kvmppc_pin_guest_page(kvm, vpa, &nb); 524 if (va == NULL) 525 return H_PARAMETER; 526 if (subfunc == H_VPA_REG_VPA) 527 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); 528 else 529 len = be32_to_cpu(((struct reg_vpa *)va)->length.word); 530 kvmppc_unpin_guest_page(kvm, va, vpa, false); 531 532 /* Check length */ 533 if (len > nb || len < sizeof(struct reg_vpa)) 534 return H_PARAMETER; 535 } else { 536 vpa = 0; 537 len = 0; 538 } 539 540 err = H_PARAMETER; 541 vpap = NULL; 542 spin_lock(&tvcpu->arch.vpa_update_lock); 543 544 switch (subfunc) { 545 case H_VPA_REG_VPA: /* register VPA */ 546 /* 547 * The size of our lppaca is 1kB because of the way we align 548 * it for the guest to avoid crossing a 4kB boundary. We only 549 * use 640 bytes of the structure though, so we should accept 550 * clients that set a size of 640. 551 */ 552 BUILD_BUG_ON(sizeof(struct lppaca) != 640); 553 if (len < sizeof(struct lppaca)) 554 break; 555 vpap = &tvcpu->arch.vpa; 556 err = 0; 557 break; 558 559 case H_VPA_REG_DTL: /* register DTL */ 560 if (len < sizeof(struct dtl_entry)) 561 break; 562 len -= len % sizeof(struct dtl_entry); 563 564 /* Check that they have previously registered a VPA */ 565 err = H_RESOURCE; 566 if (!vpa_is_registered(&tvcpu->arch.vpa)) 567 break; 568 569 vpap = &tvcpu->arch.dtl; 570 err = 0; 571 break; 572 573 case H_VPA_REG_SLB: /* register SLB shadow buffer */ 574 /* Check that they have previously registered a VPA */ 575 err = H_RESOURCE; 576 if (!vpa_is_registered(&tvcpu->arch.vpa)) 577 break; 578 579 vpap = &tvcpu->arch.slb_shadow; 580 err = 0; 581 break; 582 583 case H_VPA_DEREG_VPA: /* deregister VPA */ 584 /* Check they don't still have a DTL or SLB buf registered */ 585 err = H_RESOURCE; 586 if (vpa_is_registered(&tvcpu->arch.dtl) || 587 vpa_is_registered(&tvcpu->arch.slb_shadow)) 588 break; 589 590 vpap = &tvcpu->arch.vpa; 591 err = 0; 592 break; 593 594 case H_VPA_DEREG_DTL: /* deregister DTL */ 595 vpap = &tvcpu->arch.dtl; 596 err = 0; 597 break; 598 599 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ 600 vpap = &tvcpu->arch.slb_shadow; 601 err = 0; 602 break; 603 } 604 605 if (vpap) { 606 vpap->next_gpa = vpa; 607 vpap->len = len; 608 vpap->update_pending = 1; 609 } 610 611 spin_unlock(&tvcpu->arch.vpa_update_lock); 612 613 return err; 614 } 615 616 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) 617 { 618 struct kvm *kvm = vcpu->kvm; 619 void *va; 620 unsigned long nb; 621 unsigned long gpa; 622 623 /* 624 * We need to pin the page pointed to by vpap->next_gpa, 625 * but we can't call kvmppc_pin_guest_page under the lock 626 * as it does get_user_pages() and down_read(). So we 627 * have to drop the lock, pin the page, then get the lock 628 * again and check that a new area didn't get registered 629 * in the meantime. 630 */ 631 for (;;) { 632 gpa = vpap->next_gpa; 633 spin_unlock(&vcpu->arch.vpa_update_lock); 634 va = NULL; 635 nb = 0; 636 if (gpa) 637 va = kvmppc_pin_guest_page(kvm, gpa, &nb); 638 spin_lock(&vcpu->arch.vpa_update_lock); 639 if (gpa == vpap->next_gpa) 640 break; 641 /* sigh... unpin that one and try again */ 642 if (va) 643 kvmppc_unpin_guest_page(kvm, va, gpa, false); 644 } 645 646 vpap->update_pending = 0; 647 if (va && nb < vpap->len) { 648 /* 649 * If it's now too short, it must be that userspace 650 * has changed the mappings underlying guest memory, 651 * so unregister the region. 652 */ 653 kvmppc_unpin_guest_page(kvm, va, gpa, false); 654 va = NULL; 655 } 656 if (vpap->pinned_addr) 657 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, 658 vpap->dirty); 659 vpap->gpa = gpa; 660 vpap->pinned_addr = va; 661 vpap->dirty = false; 662 if (va) 663 vpap->pinned_end = va + vpap->len; 664 } 665 666 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 667 { 668 if (!(vcpu->arch.vpa.update_pending || 669 vcpu->arch.slb_shadow.update_pending || 670 vcpu->arch.dtl.update_pending)) 671 return; 672 673 spin_lock(&vcpu->arch.vpa_update_lock); 674 if (vcpu->arch.vpa.update_pending) { 675 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); 676 if (vcpu->arch.vpa.pinned_addr) 677 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 678 } 679 if (vcpu->arch.dtl.update_pending) { 680 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); 681 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 682 vcpu->arch.dtl_index = 0; 683 } 684 if (vcpu->arch.slb_shadow.update_pending) 685 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); 686 spin_unlock(&vcpu->arch.vpa_update_lock); 687 } 688 689 /* 690 * Return the accumulated stolen time for the vcore up until `now'. 691 * The caller should hold the vcore lock. 692 */ 693 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) 694 { 695 u64 p; 696 unsigned long flags; 697 698 WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); 699 700 spin_lock_irqsave(&vc->stoltb_lock, flags); 701 p = vc->stolen_tb; 702 if (vc->vcore_state != VCORE_INACTIVE && 703 vc->preempt_tb != TB_NIL) 704 p += now - vc->preempt_tb; 705 spin_unlock_irqrestore(&vc->stoltb_lock, flags); 706 return p; 707 } 708 709 static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, 710 unsigned int pcpu, u64 now, 711 unsigned long stolen) 712 { 713 struct dtl_entry *dt; 714 struct lppaca *vpa; 715 716 dt = vcpu->arch.dtl_ptr; 717 vpa = vcpu->arch.vpa.pinned_addr; 718 719 if (!dt || !vpa) 720 return; 721 722 dt->dispatch_reason = 7; 723 dt->preempt_reason = 0; 724 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); 725 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); 726 dt->ready_to_enqueue_time = 0; 727 dt->waiting_to_ready_time = 0; 728 dt->timebase = cpu_to_be64(now); 729 dt->fault_addr = 0; 730 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); 731 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); 732 733 ++dt; 734 if (dt == vcpu->arch.dtl.pinned_end) 735 dt = vcpu->arch.dtl.pinned_addr; 736 vcpu->arch.dtl_ptr = dt; 737 /* order writing *dt vs. writing vpa->dtl_idx */ 738 smp_wmb(); 739 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); 740 vcpu->arch.dtl.dirty = true; 741 } 742 743 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, 744 struct kvmppc_vcore *vc) 745 { 746 unsigned long stolen; 747 unsigned long core_stolen; 748 u64 now; 749 unsigned long flags; 750 751 now = mftb(); 752 753 core_stolen = vcore_stolen_time(vc, now); 754 stolen = core_stolen - vcpu->arch.stolen_logged; 755 vcpu->arch.stolen_logged = core_stolen; 756 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); 757 stolen += vcpu->arch.busy_stolen; 758 vcpu->arch.busy_stolen = 0; 759 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); 760 761 __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen); 762 } 763 764 /* See if there is a doorbell interrupt pending for a vcpu */ 765 static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) 766 { 767 int thr; 768 struct kvmppc_vcore *vc; 769 770 if (vcpu->arch.doorbell_request) 771 return true; 772 if (cpu_has_feature(CPU_FTR_ARCH_300)) 773 return false; 774 /* 775 * Ensure that the read of vcore->dpdes comes after the read 776 * of vcpu->doorbell_request. This barrier matches the 777 * smp_wmb() in kvmppc_guest_entry_inject(). 778 */ 779 smp_rmb(); 780 vc = vcpu->arch.vcore; 781 thr = vcpu->vcpu_id - vc->first_vcpuid; 782 return !!(vc->dpdes & (1 << thr)); 783 } 784 785 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) 786 { 787 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) 788 return true; 789 if ((!vcpu->arch.vcore->arch_compat) && 790 cpu_has_feature(CPU_FTR_ARCH_207S)) 791 return true; 792 return false; 793 } 794 795 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, 796 unsigned long resource, unsigned long value1, 797 unsigned long value2) 798 { 799 switch (resource) { 800 case H_SET_MODE_RESOURCE_SET_CIABR: 801 if (!kvmppc_power8_compatible(vcpu)) 802 return H_P2; 803 if (value2) 804 return H_P4; 805 if (mflags) 806 return H_UNSUPPORTED_FLAG_START; 807 /* Guests can't breakpoint the hypervisor */ 808 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) 809 return H_P3; 810 vcpu->arch.ciabr = value1; 811 return H_SUCCESS; 812 case H_SET_MODE_RESOURCE_SET_DAWR0: 813 if (!kvmppc_power8_compatible(vcpu)) 814 return H_P2; 815 if (!ppc_breakpoint_available()) 816 return H_P2; 817 if (mflags) 818 return H_UNSUPPORTED_FLAG_START; 819 if (value2 & DABRX_HYP) 820 return H_P4; 821 vcpu->arch.dawr0 = value1; 822 vcpu->arch.dawrx0 = value2; 823 return H_SUCCESS; 824 case H_SET_MODE_RESOURCE_SET_DAWR1: 825 if (!kvmppc_power8_compatible(vcpu)) 826 return H_P2; 827 if (!ppc_breakpoint_available()) 828 return H_P2; 829 if (!cpu_has_feature(CPU_FTR_DAWR1)) 830 return H_P2; 831 if (!vcpu->kvm->arch.dawr1_enabled) 832 return H_FUNCTION; 833 if (mflags) 834 return H_UNSUPPORTED_FLAG_START; 835 if (value2 & DABRX_HYP) 836 return H_P4; 837 vcpu->arch.dawr1 = value1; 838 vcpu->arch.dawrx1 = value2; 839 return H_SUCCESS; 840 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: 841 /* 842 * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved. 843 * Keep this in synch with kvmppc_filter_guest_lpcr_hv. 844 */ 845 if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) && 846 kvmhv_vcpu_is_radix(vcpu) && mflags == 3) 847 return H_UNSUPPORTED_FLAG_START; 848 return H_TOO_HARD; 849 default: 850 return H_TOO_HARD; 851 } 852 } 853 854 /* Copy guest memory in place - must reside within a single memslot */ 855 static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, 856 unsigned long len) 857 { 858 struct kvm_memory_slot *to_memslot = NULL; 859 struct kvm_memory_slot *from_memslot = NULL; 860 unsigned long to_addr, from_addr; 861 int r; 862 863 /* Get HPA for from address */ 864 from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT); 865 if (!from_memslot) 866 return -EFAULT; 867 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) 868 << PAGE_SHIFT)) 869 return -EINVAL; 870 from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT); 871 if (kvm_is_error_hva(from_addr)) 872 return -EFAULT; 873 from_addr |= (from & (PAGE_SIZE - 1)); 874 875 /* Get HPA for to address */ 876 to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT); 877 if (!to_memslot) 878 return -EFAULT; 879 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) 880 << PAGE_SHIFT)) 881 return -EINVAL; 882 to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT); 883 if (kvm_is_error_hva(to_addr)) 884 return -EFAULT; 885 to_addr |= (to & (PAGE_SIZE - 1)); 886 887 /* Perform copy */ 888 r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr, 889 len); 890 if (r) 891 return -EFAULT; 892 mark_page_dirty(kvm, to >> PAGE_SHIFT); 893 return 0; 894 } 895 896 static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, 897 unsigned long dest, unsigned long src) 898 { 899 u64 pg_sz = SZ_4K; /* 4K page size */ 900 u64 pg_mask = SZ_4K - 1; 901 int ret; 902 903 /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ 904 if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | 905 H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) 906 return H_PARAMETER; 907 908 /* dest (and src if copy_page flag set) must be page aligned */ 909 if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) 910 return H_PARAMETER; 911 912 /* zero and/or copy the page as determined by the flags */ 913 if (flags & H_COPY_PAGE) { 914 ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); 915 if (ret < 0) 916 return H_PARAMETER; 917 } else if (flags & H_ZERO_PAGE) { 918 ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); 919 if (ret < 0) 920 return H_PARAMETER; 921 } 922 923 /* We can ignore the remaining flags */ 924 925 return H_SUCCESS; 926 } 927 928 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) 929 { 930 struct kvmppc_vcore *vcore = target->arch.vcore; 931 932 /* 933 * We expect to have been called by the real mode handler 934 * (kvmppc_rm_h_confer()) which would have directly returned 935 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may 936 * have useful work to do and should not confer) so we don't 937 * recheck that here. 938 * 939 * In the case of the P9 single vcpu per vcore case, the real 940 * mode handler is not called but no other threads are in the 941 * source vcore. 942 */ 943 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 944 spin_lock(&vcore->lock); 945 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && 946 vcore->vcore_state != VCORE_INACTIVE && 947 vcore->runner) 948 target = vcore->runner; 949 spin_unlock(&vcore->lock); 950 } 951 952 return kvm_vcpu_yield_to(target); 953 } 954 955 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) 956 { 957 int yield_count = 0; 958 struct lppaca *lppaca; 959 960 spin_lock(&vcpu->arch.vpa_update_lock); 961 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; 962 if (lppaca) 963 yield_count = be32_to_cpu(lppaca->yield_count); 964 spin_unlock(&vcpu->arch.vpa_update_lock); 965 return yield_count; 966 } 967 968 /* 969 * H_RPT_INVALIDATE hcall handler for nested guests. 970 * 971 * Handles only nested process-scoped invalidation requests in L0. 972 */ 973 static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu) 974 { 975 unsigned long type = kvmppc_get_gpr(vcpu, 6); 976 unsigned long pid, pg_sizes, start, end; 977 978 /* 979 * The partition-scoped invalidations aren't handled here in L0. 980 */ 981 if (type & H_RPTI_TYPE_NESTED) 982 return RESUME_HOST; 983 984 pid = kvmppc_get_gpr(vcpu, 4); 985 pg_sizes = kvmppc_get_gpr(vcpu, 7); 986 start = kvmppc_get_gpr(vcpu, 8); 987 end = kvmppc_get_gpr(vcpu, 9); 988 989 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, 990 type, pg_sizes, start, end); 991 992 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); 993 return RESUME_GUEST; 994 } 995 996 static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, 997 unsigned long id, unsigned long target, 998 unsigned long type, unsigned long pg_sizes, 999 unsigned long start, unsigned long end) 1000 { 1001 if (!kvm_is_radix(vcpu->kvm)) 1002 return H_UNSUPPORTED; 1003 1004 if (end < start) 1005 return H_P5; 1006 1007 /* 1008 * Partition-scoped invalidation for nested guests. 1009 */ 1010 if (type & H_RPTI_TYPE_NESTED) { 1011 if (!nesting_enabled(vcpu->kvm)) 1012 return H_FUNCTION; 1013 1014 /* Support only cores as target */ 1015 if (target != H_RPTI_TARGET_CMMU) 1016 return H_P2; 1017 1018 return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes, 1019 start, end); 1020 } 1021 1022 /* 1023 * Process-scoped invalidation for L1 guests. 1024 */ 1025 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, 1026 type, pg_sizes, start, end); 1027 return H_SUCCESS; 1028 } 1029 1030 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) 1031 { 1032 struct kvm *kvm = vcpu->kvm; 1033 unsigned long req = kvmppc_get_gpr(vcpu, 3); 1034 unsigned long target, ret = H_SUCCESS; 1035 int yield_count; 1036 struct kvm_vcpu *tvcpu; 1037 int idx, rc; 1038 1039 if (req <= MAX_HCALL_OPCODE && 1040 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) 1041 return RESUME_HOST; 1042 1043 switch (req) { 1044 case H_REMOVE: 1045 ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4), 1046 kvmppc_get_gpr(vcpu, 5), 1047 kvmppc_get_gpr(vcpu, 6)); 1048 if (ret == H_TOO_HARD) 1049 return RESUME_HOST; 1050 break; 1051 case H_ENTER: 1052 ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), 1053 kvmppc_get_gpr(vcpu, 5), 1054 kvmppc_get_gpr(vcpu, 6), 1055 kvmppc_get_gpr(vcpu, 7)); 1056 if (ret == H_TOO_HARD) 1057 return RESUME_HOST; 1058 break; 1059 case H_READ: 1060 ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4), 1061 kvmppc_get_gpr(vcpu, 5)); 1062 if (ret == H_TOO_HARD) 1063 return RESUME_HOST; 1064 break; 1065 case H_CLEAR_MOD: 1066 ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4), 1067 kvmppc_get_gpr(vcpu, 5)); 1068 if (ret == H_TOO_HARD) 1069 return RESUME_HOST; 1070 break; 1071 case H_CLEAR_REF: 1072 ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4), 1073 kvmppc_get_gpr(vcpu, 5)); 1074 if (ret == H_TOO_HARD) 1075 return RESUME_HOST; 1076 break; 1077 case H_PROTECT: 1078 ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4), 1079 kvmppc_get_gpr(vcpu, 5), 1080 kvmppc_get_gpr(vcpu, 6)); 1081 if (ret == H_TOO_HARD) 1082 return RESUME_HOST; 1083 break; 1084 case H_BULK_REMOVE: 1085 ret = kvmppc_h_bulk_remove(vcpu); 1086 if (ret == H_TOO_HARD) 1087 return RESUME_HOST; 1088 break; 1089 1090 case H_CEDE: 1091 break; 1092 case H_PROD: 1093 target = kvmppc_get_gpr(vcpu, 4); 1094 tvcpu = kvmppc_find_vcpu(kvm, target); 1095 if (!tvcpu) { 1096 ret = H_PARAMETER; 1097 break; 1098 } 1099 tvcpu->arch.prodded = 1; 1100 smp_mb(); /* This orders prodded store vs ceded load */ 1101 if (tvcpu->arch.ceded) 1102 kvmppc_fast_vcpu_kick_hv(tvcpu); 1103 break; 1104 case H_CONFER: 1105 target = kvmppc_get_gpr(vcpu, 4); 1106 if (target == -1) 1107 break; 1108 tvcpu = kvmppc_find_vcpu(kvm, target); 1109 if (!tvcpu) { 1110 ret = H_PARAMETER; 1111 break; 1112 } 1113 yield_count = kvmppc_get_gpr(vcpu, 5); 1114 if (kvmppc_get_yield_count(tvcpu) != yield_count) 1115 break; 1116 kvm_arch_vcpu_yield_to(tvcpu); 1117 break; 1118 case H_REGISTER_VPA: 1119 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), 1120 kvmppc_get_gpr(vcpu, 5), 1121 kvmppc_get_gpr(vcpu, 6)); 1122 break; 1123 case H_RTAS: 1124 if (list_empty(&kvm->arch.rtas_tokens)) 1125 return RESUME_HOST; 1126 1127 idx = srcu_read_lock(&kvm->srcu); 1128 rc = kvmppc_rtas_hcall(vcpu); 1129 srcu_read_unlock(&kvm->srcu, idx); 1130 1131 if (rc == -ENOENT) 1132 return RESUME_HOST; 1133 else if (rc == 0) 1134 break; 1135 1136 /* Send the error out to userspace via KVM_RUN */ 1137 return rc; 1138 case H_LOGICAL_CI_LOAD: 1139 ret = kvmppc_h_logical_ci_load(vcpu); 1140 if (ret == H_TOO_HARD) 1141 return RESUME_HOST; 1142 break; 1143 case H_LOGICAL_CI_STORE: 1144 ret = kvmppc_h_logical_ci_store(vcpu); 1145 if (ret == H_TOO_HARD) 1146 return RESUME_HOST; 1147 break; 1148 case H_SET_MODE: 1149 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), 1150 kvmppc_get_gpr(vcpu, 5), 1151 kvmppc_get_gpr(vcpu, 6), 1152 kvmppc_get_gpr(vcpu, 7)); 1153 if (ret == H_TOO_HARD) 1154 return RESUME_HOST; 1155 break; 1156 case H_XIRR: 1157 case H_CPPR: 1158 case H_EOI: 1159 case H_IPI: 1160 case H_IPOLL: 1161 case H_XIRR_X: 1162 if (kvmppc_xics_enabled(vcpu)) { 1163 if (xics_on_xive()) { 1164 ret = H_NOT_AVAILABLE; 1165 return RESUME_GUEST; 1166 } 1167 ret = kvmppc_xics_hcall(vcpu, req); 1168 break; 1169 } 1170 return RESUME_HOST; 1171 case H_SET_DABR: 1172 ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); 1173 break; 1174 case H_SET_XDABR: 1175 ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4), 1176 kvmppc_get_gpr(vcpu, 5)); 1177 break; 1178 #ifdef CONFIG_SPAPR_TCE_IOMMU 1179 case H_GET_TCE: 1180 ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4), 1181 kvmppc_get_gpr(vcpu, 5)); 1182 if (ret == H_TOO_HARD) 1183 return RESUME_HOST; 1184 break; 1185 case H_PUT_TCE: 1186 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), 1187 kvmppc_get_gpr(vcpu, 5), 1188 kvmppc_get_gpr(vcpu, 6)); 1189 if (ret == H_TOO_HARD) 1190 return RESUME_HOST; 1191 break; 1192 case H_PUT_TCE_INDIRECT: 1193 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4), 1194 kvmppc_get_gpr(vcpu, 5), 1195 kvmppc_get_gpr(vcpu, 6), 1196 kvmppc_get_gpr(vcpu, 7)); 1197 if (ret == H_TOO_HARD) 1198 return RESUME_HOST; 1199 break; 1200 case H_STUFF_TCE: 1201 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4), 1202 kvmppc_get_gpr(vcpu, 5), 1203 kvmppc_get_gpr(vcpu, 6), 1204 kvmppc_get_gpr(vcpu, 7)); 1205 if (ret == H_TOO_HARD) 1206 return RESUME_HOST; 1207 break; 1208 #endif 1209 case H_RANDOM: 1210 if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1)) 1211 ret = H_HARDWARE; 1212 break; 1213 case H_RPT_INVALIDATE: 1214 ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4), 1215 kvmppc_get_gpr(vcpu, 5), 1216 kvmppc_get_gpr(vcpu, 6), 1217 kvmppc_get_gpr(vcpu, 7), 1218 kvmppc_get_gpr(vcpu, 8), 1219 kvmppc_get_gpr(vcpu, 9)); 1220 break; 1221 1222 case H_SET_PARTITION_TABLE: 1223 ret = H_FUNCTION; 1224 if (nesting_enabled(kvm)) 1225 ret = kvmhv_set_partition_table(vcpu); 1226 break; 1227 case H_ENTER_NESTED: 1228 ret = H_FUNCTION; 1229 if (!nesting_enabled(kvm)) 1230 break; 1231 ret = kvmhv_enter_nested_guest(vcpu); 1232 if (ret == H_INTERRUPT) { 1233 kvmppc_set_gpr(vcpu, 3, 0); 1234 vcpu->arch.hcall_needed = 0; 1235 return -EINTR; 1236 } else if (ret == H_TOO_HARD) { 1237 kvmppc_set_gpr(vcpu, 3, 0); 1238 vcpu->arch.hcall_needed = 0; 1239 return RESUME_HOST; 1240 } 1241 break; 1242 case H_TLB_INVALIDATE: 1243 ret = H_FUNCTION; 1244 if (nesting_enabled(kvm)) 1245 ret = kvmhv_do_nested_tlbie(vcpu); 1246 break; 1247 case H_COPY_TOFROM_GUEST: 1248 ret = H_FUNCTION; 1249 if (nesting_enabled(kvm)) 1250 ret = kvmhv_copy_tofrom_guest_nested(vcpu); 1251 break; 1252 case H_PAGE_INIT: 1253 ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4), 1254 kvmppc_get_gpr(vcpu, 5), 1255 kvmppc_get_gpr(vcpu, 6)); 1256 break; 1257 case H_SVM_PAGE_IN: 1258 ret = H_UNSUPPORTED; 1259 if (kvmppc_get_srr1(vcpu) & MSR_S) 1260 ret = kvmppc_h_svm_page_in(kvm, 1261 kvmppc_get_gpr(vcpu, 4), 1262 kvmppc_get_gpr(vcpu, 5), 1263 kvmppc_get_gpr(vcpu, 6)); 1264 break; 1265 case H_SVM_PAGE_OUT: 1266 ret = H_UNSUPPORTED; 1267 if (kvmppc_get_srr1(vcpu) & MSR_S) 1268 ret = kvmppc_h_svm_page_out(kvm, 1269 kvmppc_get_gpr(vcpu, 4), 1270 kvmppc_get_gpr(vcpu, 5), 1271 kvmppc_get_gpr(vcpu, 6)); 1272 break; 1273 case H_SVM_INIT_START: 1274 ret = H_UNSUPPORTED; 1275 if (kvmppc_get_srr1(vcpu) & MSR_S) 1276 ret = kvmppc_h_svm_init_start(kvm); 1277 break; 1278 case H_SVM_INIT_DONE: 1279 ret = H_UNSUPPORTED; 1280 if (kvmppc_get_srr1(vcpu) & MSR_S) 1281 ret = kvmppc_h_svm_init_done(kvm); 1282 break; 1283 case H_SVM_INIT_ABORT: 1284 /* 1285 * Even if that call is made by the Ultravisor, the SSR1 value 1286 * is the guest context one, with the secure bit clear as it has 1287 * not yet been secured. So we can't check it here. 1288 * Instead the kvm->arch.secure_guest flag is checked inside 1289 * kvmppc_h_svm_init_abort(). 1290 */ 1291 ret = kvmppc_h_svm_init_abort(kvm); 1292 break; 1293 1294 default: 1295 return RESUME_HOST; 1296 } 1297 WARN_ON_ONCE(ret == H_TOO_HARD); 1298 kvmppc_set_gpr(vcpu, 3, ret); 1299 vcpu->arch.hcall_needed = 0; 1300 return RESUME_GUEST; 1301 } 1302 1303 /* 1304 * Handle H_CEDE in the P9 path where we don't call the real-mode hcall 1305 * handlers in book3s_hv_rmhandlers.S. 1306 * 1307 * This has to be done early, not in kvmppc_pseries_do_hcall(), so 1308 * that the cede logic in kvmppc_run_single_vcpu() works properly. 1309 */ 1310 static void kvmppc_cede(struct kvm_vcpu *vcpu) 1311 { 1312 vcpu->arch.shregs.msr |= MSR_EE; 1313 vcpu->arch.ceded = 1; 1314 smp_mb(); 1315 if (vcpu->arch.prodded) { 1316 vcpu->arch.prodded = 0; 1317 smp_mb(); 1318 vcpu->arch.ceded = 0; 1319 } 1320 } 1321 1322 static int kvmppc_hcall_impl_hv(unsigned long cmd) 1323 { 1324 switch (cmd) { 1325 case H_CEDE: 1326 case H_PROD: 1327 case H_CONFER: 1328 case H_REGISTER_VPA: 1329 case H_SET_MODE: 1330 #ifdef CONFIG_SPAPR_TCE_IOMMU 1331 case H_GET_TCE: 1332 case H_PUT_TCE: 1333 case H_PUT_TCE_INDIRECT: 1334 case H_STUFF_TCE: 1335 #endif 1336 case H_LOGICAL_CI_LOAD: 1337 case H_LOGICAL_CI_STORE: 1338 #ifdef CONFIG_KVM_XICS 1339 case H_XIRR: 1340 case H_CPPR: 1341 case H_EOI: 1342 case H_IPI: 1343 case H_IPOLL: 1344 case H_XIRR_X: 1345 #endif 1346 case H_PAGE_INIT: 1347 case H_RPT_INVALIDATE: 1348 return 1; 1349 } 1350 1351 /* See if it's in the real-mode table */ 1352 return kvmppc_hcall_impl_hv_realmode(cmd); 1353 } 1354 1355 static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) 1356 { 1357 u32 last_inst; 1358 1359 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != 1360 EMULATE_DONE) { 1361 /* 1362 * Fetch failed, so return to guest and 1363 * try executing it again. 1364 */ 1365 return RESUME_GUEST; 1366 } 1367 1368 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { 1369 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 1370 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); 1371 return RESUME_HOST; 1372 } else { 1373 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1374 return RESUME_GUEST; 1375 } 1376 } 1377 1378 static void do_nothing(void *x) 1379 { 1380 } 1381 1382 static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu) 1383 { 1384 int thr, cpu, pcpu, nthreads; 1385 struct kvm_vcpu *v; 1386 unsigned long dpdes; 1387 1388 nthreads = vcpu->kvm->arch.emul_smt_mode; 1389 dpdes = 0; 1390 cpu = vcpu->vcpu_id & ~(nthreads - 1); 1391 for (thr = 0; thr < nthreads; ++thr, ++cpu) { 1392 v = kvmppc_find_vcpu(vcpu->kvm, cpu); 1393 if (!v) 1394 continue; 1395 /* 1396 * If the vcpu is currently running on a physical cpu thread, 1397 * interrupt it in order to pull it out of the guest briefly, 1398 * which will update its vcore->dpdes value. 1399 */ 1400 pcpu = READ_ONCE(v->cpu); 1401 if (pcpu >= 0) 1402 smp_call_function_single(pcpu, do_nothing, NULL, 1); 1403 if (kvmppc_doorbell_pending(v)) 1404 dpdes |= 1 << thr; 1405 } 1406 return dpdes; 1407 } 1408 1409 /* 1410 * On POWER9, emulate doorbell-related instructions in order to 1411 * give the guest the illusion of running on a multi-threaded core. 1412 * The instructions emulated are msgsndp, msgclrp, mfspr TIR, 1413 * and mfspr DPDES. 1414 */ 1415 static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) 1416 { 1417 u32 inst, rb, thr; 1418 unsigned long arg; 1419 struct kvm *kvm = vcpu->kvm; 1420 struct kvm_vcpu *tvcpu; 1421 1422 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) 1423 return RESUME_GUEST; 1424 if (get_op(inst) != 31) 1425 return EMULATE_FAIL; 1426 rb = get_rb(inst); 1427 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); 1428 switch (get_xop(inst)) { 1429 case OP_31_XOP_MSGSNDP: 1430 arg = kvmppc_get_gpr(vcpu, rb); 1431 if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) 1432 break; 1433 arg &= 0x7f; 1434 if (arg >= kvm->arch.emul_smt_mode) 1435 break; 1436 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); 1437 if (!tvcpu) 1438 break; 1439 if (!tvcpu->arch.doorbell_request) { 1440 tvcpu->arch.doorbell_request = 1; 1441 kvmppc_fast_vcpu_kick_hv(tvcpu); 1442 } 1443 break; 1444 case OP_31_XOP_MSGCLRP: 1445 arg = kvmppc_get_gpr(vcpu, rb); 1446 if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) 1447 break; 1448 vcpu->arch.vcore->dpdes = 0; 1449 vcpu->arch.doorbell_request = 0; 1450 break; 1451 case OP_31_XOP_MFSPR: 1452 switch (get_sprn(inst)) { 1453 case SPRN_TIR: 1454 arg = thr; 1455 break; 1456 case SPRN_DPDES: 1457 arg = kvmppc_read_dpdes(vcpu); 1458 break; 1459 default: 1460 return EMULATE_FAIL; 1461 } 1462 kvmppc_set_gpr(vcpu, get_rt(inst), arg); 1463 break; 1464 default: 1465 return EMULATE_FAIL; 1466 } 1467 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 1468 return RESUME_GUEST; 1469 } 1470 1471 /* 1472 * If the lppaca had pmcregs_in_use clear when we exited the guest, then 1473 * HFSCR_PM is cleared for next entry. If the guest then tries to access 1474 * the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM 1475 * back in the guest HFSCR will cause the next entry to load the PMU SPRs and 1476 * allow the guest access to continue. 1477 */ 1478 static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu) 1479 { 1480 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) 1481 return EMULATE_FAIL; 1482 1483 vcpu->arch.hfscr |= HFSCR_PM; 1484 1485 return RESUME_GUEST; 1486 } 1487 1488 static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu) 1489 { 1490 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) 1491 return EMULATE_FAIL; 1492 1493 vcpu->arch.hfscr |= HFSCR_EBB; 1494 1495 return RESUME_GUEST; 1496 } 1497 1498 static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu) 1499 { 1500 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) 1501 return EMULATE_FAIL; 1502 1503 vcpu->arch.hfscr |= HFSCR_TM; 1504 1505 return RESUME_GUEST; 1506 } 1507 1508 static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, 1509 struct task_struct *tsk) 1510 { 1511 struct kvm_run *run = vcpu->run; 1512 int r = RESUME_HOST; 1513 1514 vcpu->stat.sum_exits++; 1515 1516 /* 1517 * This can happen if an interrupt occurs in the last stages 1518 * of guest entry or the first stages of guest exit (i.e. after 1519 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV 1520 * and before setting it to KVM_GUEST_MODE_HOST_HV). 1521 * That can happen due to a bug, or due to a machine check 1522 * occurring at just the wrong time. 1523 */ 1524 if (vcpu->arch.shregs.msr & MSR_HV) { 1525 printk(KERN_EMERG "KVM trap in HV mode!\n"); 1526 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1527 vcpu->arch.trap, kvmppc_get_pc(vcpu), 1528 vcpu->arch.shregs.msr); 1529 kvmppc_dump_regs(vcpu); 1530 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1531 run->hw.hardware_exit_reason = vcpu->arch.trap; 1532 return RESUME_HOST; 1533 } 1534 run->exit_reason = KVM_EXIT_UNKNOWN; 1535 run->ready_for_interrupt_injection = 1; 1536 switch (vcpu->arch.trap) { 1537 /* We're good on these - the host merely wanted to get our attention */ 1538 case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER: 1539 WARN_ON_ONCE(1); /* Should never happen */ 1540 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; 1541 fallthrough; 1542 case BOOK3S_INTERRUPT_HV_DECREMENTER: 1543 vcpu->stat.dec_exits++; 1544 r = RESUME_GUEST; 1545 break; 1546 case BOOK3S_INTERRUPT_EXTERNAL: 1547 case BOOK3S_INTERRUPT_H_DOORBELL: 1548 case BOOK3S_INTERRUPT_H_VIRT: 1549 vcpu->stat.ext_intr_exits++; 1550 r = RESUME_GUEST; 1551 break; 1552 /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ 1553 case BOOK3S_INTERRUPT_HMI: 1554 case BOOK3S_INTERRUPT_PERFMON: 1555 case BOOK3S_INTERRUPT_SYSTEM_RESET: 1556 r = RESUME_GUEST; 1557 break; 1558 case BOOK3S_INTERRUPT_MACHINE_CHECK: { 1559 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 1560 DEFAULT_RATELIMIT_BURST); 1561 /* 1562 * Print the MCE event to host console. Ratelimit so the guest 1563 * can't flood the host log. 1564 */ 1565 if (__ratelimit(&rs)) 1566 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); 1567 1568 /* 1569 * If the guest can do FWNMI, exit to userspace so it can 1570 * deliver a FWNMI to the guest. 1571 * Otherwise we synthesize a machine check for the guest 1572 * so that it knows that the machine check occurred. 1573 */ 1574 if (!vcpu->kvm->arch.fwnmi_enabled) { 1575 ulong flags = vcpu->arch.shregs.msr & 0x083c0000; 1576 kvmppc_core_queue_machine_check(vcpu, flags); 1577 r = RESUME_GUEST; 1578 break; 1579 } 1580 1581 /* Exit to guest with KVM_EXIT_NMI as exit reason */ 1582 run->exit_reason = KVM_EXIT_NMI; 1583 run->hw.hardware_exit_reason = vcpu->arch.trap; 1584 /* Clear out the old NMI status from run->flags */ 1585 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; 1586 /* Now set the NMI status */ 1587 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) 1588 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; 1589 else 1590 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; 1591 1592 r = RESUME_HOST; 1593 break; 1594 } 1595 case BOOK3S_INTERRUPT_PROGRAM: 1596 { 1597 ulong flags; 1598 /* 1599 * Normally program interrupts are delivered directly 1600 * to the guest by the hardware, but we can get here 1601 * as a result of a hypervisor emulation interrupt 1602 * (e40) getting turned into a 700 by BML RTAS. 1603 */ 1604 flags = vcpu->arch.shregs.msr & 0x1f0000ull; 1605 kvmppc_core_queue_program(vcpu, flags); 1606 r = RESUME_GUEST; 1607 break; 1608 } 1609 case BOOK3S_INTERRUPT_SYSCALL: 1610 { 1611 int i; 1612 1613 if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) { 1614 /* 1615 * Guest userspace executed sc 1. This can only be 1616 * reached by the P9 path because the old path 1617 * handles this case in realmode hcall handlers. 1618 */ 1619 if (!kvmhv_vcpu_is_radix(vcpu)) { 1620 /* 1621 * A guest could be running PR KVM, so this 1622 * may be a PR KVM hcall. It must be reflected 1623 * to the guest kernel as a sc interrupt. 1624 */ 1625 kvmppc_core_queue_syscall(vcpu); 1626 } else { 1627 /* 1628 * Radix guests can not run PR KVM or nested HV 1629 * hash guests which might run PR KVM, so this 1630 * is always a privilege fault. Send a program 1631 * check to guest kernel. 1632 */ 1633 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); 1634 } 1635 r = RESUME_GUEST; 1636 break; 1637 } 1638 1639 /* 1640 * hcall - gather args and set exit_reason. This will next be 1641 * handled by kvmppc_pseries_do_hcall which may be able to deal 1642 * with it and resume guest, or may punt to userspace. 1643 */ 1644 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); 1645 for (i = 0; i < 9; ++i) 1646 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); 1647 run->exit_reason = KVM_EXIT_PAPR_HCALL; 1648 vcpu->arch.hcall_needed = 1; 1649 r = RESUME_HOST; 1650 break; 1651 } 1652 /* 1653 * We get these next two if the guest accesses a page which it thinks 1654 * it has mapped but which is not actually present, either because 1655 * it is for an emulated I/O device or because the corresonding 1656 * host page has been paged out. 1657 * 1658 * Any other HDSI/HISI interrupts have been handled already for P7/8 1659 * guests. For POWER9 hash guests not using rmhandlers, basic hash 1660 * fault handling is done here. 1661 */ 1662 case BOOK3S_INTERRUPT_H_DATA_STORAGE: { 1663 unsigned long vsid; 1664 long err; 1665 1666 if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) && 1667 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { 1668 r = RESUME_GUEST; /* Just retry if it's the canary */ 1669 break; 1670 } 1671 1672 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { 1673 /* 1674 * Radix doesn't require anything, and pre-ISAv3.0 hash 1675 * already attempted to handle this in rmhandlers. The 1676 * hash fault handling below is v3 only (it uses ASDR 1677 * via fault_gpa). 1678 */ 1679 r = RESUME_PAGE_FAULT; 1680 break; 1681 } 1682 1683 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { 1684 kvmppc_core_queue_data_storage(vcpu, 1685 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 1686 r = RESUME_GUEST; 1687 break; 1688 } 1689 1690 if (!(vcpu->arch.shregs.msr & MSR_DR)) 1691 vsid = vcpu->kvm->arch.vrma_slb_v; 1692 else 1693 vsid = vcpu->arch.fault_gpa; 1694 1695 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, 1696 vsid, vcpu->arch.fault_dsisr, true); 1697 if (err == 0) { 1698 r = RESUME_GUEST; 1699 } else if (err == -1 || err == -2) { 1700 r = RESUME_PAGE_FAULT; 1701 } else { 1702 kvmppc_core_queue_data_storage(vcpu, 1703 vcpu->arch.fault_dar, err); 1704 r = RESUME_GUEST; 1705 } 1706 break; 1707 } 1708 case BOOK3S_INTERRUPT_H_INST_STORAGE: { 1709 unsigned long vsid; 1710 long err; 1711 1712 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); 1713 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & 1714 DSISR_SRR1_MATCH_64S; 1715 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { 1716 /* 1717 * Radix doesn't require anything, and pre-ISAv3.0 hash 1718 * already attempted to handle this in rmhandlers. The 1719 * hash fault handling below is v3 only (it uses ASDR 1720 * via fault_gpa). 1721 */ 1722 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) 1723 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; 1724 r = RESUME_PAGE_FAULT; 1725 break; 1726 } 1727 1728 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { 1729 kvmppc_core_queue_inst_storage(vcpu, 1730 vcpu->arch.fault_dsisr); 1731 r = RESUME_GUEST; 1732 break; 1733 } 1734 1735 if (!(vcpu->arch.shregs.msr & MSR_IR)) 1736 vsid = vcpu->kvm->arch.vrma_slb_v; 1737 else 1738 vsid = vcpu->arch.fault_gpa; 1739 1740 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, 1741 vsid, vcpu->arch.fault_dsisr, false); 1742 if (err == 0) { 1743 r = RESUME_GUEST; 1744 } else if (err == -1) { 1745 r = RESUME_PAGE_FAULT; 1746 } else { 1747 kvmppc_core_queue_inst_storage(vcpu, err); 1748 r = RESUME_GUEST; 1749 } 1750 break; 1751 } 1752 1753 /* 1754 * This occurs if the guest executes an illegal instruction. 1755 * If the guest debug is disabled, generate a program interrupt 1756 * to the guest. If guest debug is enabled, we need to check 1757 * whether the instruction is a software breakpoint instruction. 1758 * Accordingly return to Guest or Host. 1759 */ 1760 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 1761 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) 1762 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? 1763 swab32(vcpu->arch.emul_inst) : 1764 vcpu->arch.emul_inst; 1765 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { 1766 r = kvmppc_emulate_debug_inst(vcpu); 1767 } else { 1768 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1769 r = RESUME_GUEST; 1770 } 1771 break; 1772 1773 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1774 case BOOK3S_INTERRUPT_HV_SOFTPATCH: 1775 /* 1776 * This occurs for various TM-related instructions that 1777 * we need to emulate on POWER9 DD2.2. We have already 1778 * handled the cases where the guest was in real-suspend 1779 * mode and was transitioning to transactional state. 1780 */ 1781 r = kvmhv_p9_tm_emulation(vcpu); 1782 if (r != -1) 1783 break; 1784 fallthrough; /* go to facility unavailable handler */ 1785 #endif 1786 1787 /* 1788 * This occurs if the guest (kernel or userspace), does something that 1789 * is prohibited by HFSCR. 1790 * On POWER9, this could be a doorbell instruction that we need 1791 * to emulate. 1792 * Otherwise, we just generate a program interrupt to the guest. 1793 */ 1794 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: { 1795 u64 cause = vcpu->arch.hfscr >> 56; 1796 1797 r = EMULATE_FAIL; 1798 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1799 if (cause == FSCR_MSGP_LG) 1800 r = kvmppc_emulate_doorbell_instr(vcpu); 1801 if (cause == FSCR_PM_LG) 1802 r = kvmppc_pmu_unavailable(vcpu); 1803 if (cause == FSCR_EBB_LG) 1804 r = kvmppc_ebb_unavailable(vcpu); 1805 if (cause == FSCR_TM_LG) 1806 r = kvmppc_tm_unavailable(vcpu); 1807 } 1808 if (r == EMULATE_FAIL) { 1809 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1810 r = RESUME_GUEST; 1811 } 1812 break; 1813 } 1814 1815 case BOOK3S_INTERRUPT_HV_RM_HARD: 1816 r = RESUME_PASSTHROUGH; 1817 break; 1818 default: 1819 kvmppc_dump_regs(vcpu); 1820 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1821 vcpu->arch.trap, kvmppc_get_pc(vcpu), 1822 vcpu->arch.shregs.msr); 1823 run->hw.hardware_exit_reason = vcpu->arch.trap; 1824 r = RESUME_HOST; 1825 break; 1826 } 1827 1828 return r; 1829 } 1830 1831 static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) 1832 { 1833 int r; 1834 int srcu_idx; 1835 1836 vcpu->stat.sum_exits++; 1837 1838 /* 1839 * This can happen if an interrupt occurs in the last stages 1840 * of guest entry or the first stages of guest exit (i.e. after 1841 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV 1842 * and before setting it to KVM_GUEST_MODE_HOST_HV). 1843 * That can happen due to a bug, or due to a machine check 1844 * occurring at just the wrong time. 1845 */ 1846 if (vcpu->arch.shregs.msr & MSR_HV) { 1847 pr_emerg("KVM trap in HV mode while nested!\n"); 1848 pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1849 vcpu->arch.trap, kvmppc_get_pc(vcpu), 1850 vcpu->arch.shregs.msr); 1851 kvmppc_dump_regs(vcpu); 1852 return RESUME_HOST; 1853 } 1854 switch (vcpu->arch.trap) { 1855 /* We're good on these - the host merely wanted to get our attention */ 1856 case BOOK3S_INTERRUPT_HV_DECREMENTER: 1857 vcpu->stat.dec_exits++; 1858 r = RESUME_GUEST; 1859 break; 1860 case BOOK3S_INTERRUPT_EXTERNAL: 1861 vcpu->stat.ext_intr_exits++; 1862 r = RESUME_HOST; 1863 break; 1864 case BOOK3S_INTERRUPT_H_DOORBELL: 1865 case BOOK3S_INTERRUPT_H_VIRT: 1866 vcpu->stat.ext_intr_exits++; 1867 r = RESUME_GUEST; 1868 break; 1869 /* These need to go to the nested HV */ 1870 case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER: 1871 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; 1872 vcpu->stat.dec_exits++; 1873 r = RESUME_HOST; 1874 break; 1875 /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ 1876 case BOOK3S_INTERRUPT_HMI: 1877 case BOOK3S_INTERRUPT_PERFMON: 1878 case BOOK3S_INTERRUPT_SYSTEM_RESET: 1879 r = RESUME_GUEST; 1880 break; 1881 case BOOK3S_INTERRUPT_MACHINE_CHECK: 1882 { 1883 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 1884 DEFAULT_RATELIMIT_BURST); 1885 /* Pass the machine check to the L1 guest */ 1886 r = RESUME_HOST; 1887 /* Print the MCE event to host console. */ 1888 if (__ratelimit(&rs)) 1889 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); 1890 break; 1891 } 1892 /* 1893 * We get these next two if the guest accesses a page which it thinks 1894 * it has mapped but which is not actually present, either because 1895 * it is for an emulated I/O device or because the corresonding 1896 * host page has been paged out. 1897 */ 1898 case BOOK3S_INTERRUPT_H_DATA_STORAGE: 1899 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1900 r = kvmhv_nested_page_fault(vcpu); 1901 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 1902 break; 1903 case BOOK3S_INTERRUPT_H_INST_STORAGE: 1904 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); 1905 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & 1906 DSISR_SRR1_MATCH_64S; 1907 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) 1908 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; 1909 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1910 r = kvmhv_nested_page_fault(vcpu); 1911 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 1912 break; 1913 1914 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1915 case BOOK3S_INTERRUPT_HV_SOFTPATCH: 1916 /* 1917 * This occurs for various TM-related instructions that 1918 * we need to emulate on POWER9 DD2.2. We have already 1919 * handled the cases where the guest was in real-suspend 1920 * mode and was transitioning to transactional state. 1921 */ 1922 r = kvmhv_p9_tm_emulation(vcpu); 1923 if (r != -1) 1924 break; 1925 fallthrough; /* go to facility unavailable handler */ 1926 #endif 1927 1928 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: { 1929 u64 cause = vcpu->arch.hfscr >> 56; 1930 1931 /* 1932 * Only pass HFU interrupts to the L1 if the facility is 1933 * permitted but disabled by the L1's HFSCR, otherwise 1934 * the interrupt does not make sense to the L1 so turn 1935 * it into a HEAI. 1936 */ 1937 if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || 1938 (vcpu->arch.nested_hfscr & (1UL << cause))) { 1939 vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; 1940 1941 /* 1942 * If the fetch failed, return to guest and 1943 * try executing it again. 1944 */ 1945 r = kvmppc_get_last_inst(vcpu, INST_GENERIC, 1946 &vcpu->arch.emul_inst); 1947 if (r != EMULATE_DONE) 1948 r = RESUME_GUEST; 1949 else 1950 r = RESUME_HOST; 1951 } else { 1952 r = RESUME_HOST; 1953 } 1954 1955 break; 1956 } 1957 1958 case BOOK3S_INTERRUPT_HV_RM_HARD: 1959 vcpu->arch.trap = 0; 1960 r = RESUME_GUEST; 1961 if (!xics_on_xive()) 1962 kvmppc_xics_rm_complete(vcpu, 0); 1963 break; 1964 case BOOK3S_INTERRUPT_SYSCALL: 1965 { 1966 unsigned long req = kvmppc_get_gpr(vcpu, 3); 1967 1968 /* 1969 * The H_RPT_INVALIDATE hcalls issued by nested 1970 * guests for process-scoped invalidations when 1971 * GTSE=0, are handled here in L0. 1972 */ 1973 if (req == H_RPT_INVALIDATE) { 1974 r = kvmppc_nested_h_rpt_invalidate(vcpu); 1975 break; 1976 } 1977 1978 r = RESUME_HOST; 1979 break; 1980 } 1981 default: 1982 r = RESUME_HOST; 1983 break; 1984 } 1985 1986 return r; 1987 } 1988 1989 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, 1990 struct kvm_sregs *sregs) 1991 { 1992 int i; 1993 1994 memset(sregs, 0, sizeof(struct kvm_sregs)); 1995 sregs->pvr = vcpu->arch.pvr; 1996 for (i = 0; i < vcpu->arch.slb_max; i++) { 1997 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; 1998 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; 1999 } 2000 2001 return 0; 2002 } 2003 2004 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, 2005 struct kvm_sregs *sregs) 2006 { 2007 int i, j; 2008 2009 /* Only accept the same PVR as the host's, since we can't spoof it */ 2010 if (sregs->pvr != vcpu->arch.pvr) 2011 return -EINVAL; 2012 2013 j = 0; 2014 for (i = 0; i < vcpu->arch.slb_nr; i++) { 2015 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { 2016 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; 2017 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; 2018 ++j; 2019 } 2020 } 2021 vcpu->arch.slb_max = j; 2022 2023 return 0; 2024 } 2025 2026 /* 2027 * Enforce limits on guest LPCR values based on hardware availability, 2028 * guest configuration, and possibly hypervisor support and security 2029 * concerns. 2030 */ 2031 unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) 2032 { 2033 /* LPCR_TC only applies to HPT guests */ 2034 if (kvm_is_radix(kvm)) 2035 lpcr &= ~LPCR_TC; 2036 2037 /* On POWER8 and above, userspace can modify AIL */ 2038 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) 2039 lpcr &= ~LPCR_AIL; 2040 if ((lpcr & LPCR_AIL) != LPCR_AIL_3) 2041 lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ 2042 /* 2043 * On some POWER9s we force AIL off for radix guests to prevent 2044 * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to 2045 * guest, which can result in Q0 translations with LPID=0 PID=PIDR to 2046 * be cached, which the host TLB management does not expect. 2047 */ 2048 if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) 2049 lpcr &= ~LPCR_AIL; 2050 2051 /* 2052 * On POWER9, allow userspace to enable large decrementer for the 2053 * guest, whether or not the host has it enabled. 2054 */ 2055 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2056 lpcr &= ~LPCR_LD; 2057 2058 return lpcr; 2059 } 2060 2061 static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) 2062 { 2063 if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { 2064 WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", 2065 lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); 2066 } 2067 } 2068 2069 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, 2070 bool preserve_top32) 2071 { 2072 struct kvm *kvm = vcpu->kvm; 2073 struct kvmppc_vcore *vc = vcpu->arch.vcore; 2074 u64 mask; 2075 2076 spin_lock(&vc->lock); 2077 2078 /* 2079 * Userspace can only modify 2080 * DPFD (default prefetch depth), ILE (interrupt little-endian), 2081 * TC (translation control), AIL (alternate interrupt location), 2082 * LD (large decrementer). 2083 * These are subject to restrictions from kvmppc_filter_lcpr_hv(). 2084 */ 2085 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD; 2086 2087 /* Broken 32-bit version of LPCR must not clear top bits */ 2088 if (preserve_top32) 2089 mask &= 0xFFFFFFFF; 2090 2091 new_lpcr = kvmppc_filter_lpcr_hv(kvm, 2092 (vc->lpcr & ~mask) | (new_lpcr & mask)); 2093 2094 /* 2095 * If ILE (interrupt little-endian) has changed, update the 2096 * MSR_LE bit in the intr_msr for each vcpu in this vcore. 2097 */ 2098 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 2099 struct kvm_vcpu *vcpu; 2100 unsigned long i; 2101 2102 kvm_for_each_vcpu(i, vcpu, kvm) { 2103 if (vcpu->arch.vcore != vc) 2104 continue; 2105 if (new_lpcr & LPCR_ILE) 2106 vcpu->arch.intr_msr |= MSR_LE; 2107 else 2108 vcpu->arch.intr_msr &= ~MSR_LE; 2109 } 2110 } 2111 2112 vc->lpcr = new_lpcr; 2113 2114 spin_unlock(&vc->lock); 2115 } 2116 2117 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 2118 union kvmppc_one_reg *val) 2119 { 2120 int r = 0; 2121 long int i; 2122 2123 switch (id) { 2124 case KVM_REG_PPC_DEBUG_INST: 2125 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); 2126 break; 2127 case KVM_REG_PPC_HIOR: 2128 *val = get_reg_val(id, 0); 2129 break; 2130 case KVM_REG_PPC_DABR: 2131 *val = get_reg_val(id, vcpu->arch.dabr); 2132 break; 2133 case KVM_REG_PPC_DABRX: 2134 *val = get_reg_val(id, vcpu->arch.dabrx); 2135 break; 2136 case KVM_REG_PPC_DSCR: 2137 *val = get_reg_val(id, vcpu->arch.dscr); 2138 break; 2139 case KVM_REG_PPC_PURR: 2140 *val = get_reg_val(id, vcpu->arch.purr); 2141 break; 2142 case KVM_REG_PPC_SPURR: 2143 *val = get_reg_val(id, vcpu->arch.spurr); 2144 break; 2145 case KVM_REG_PPC_AMR: 2146 *val = get_reg_val(id, vcpu->arch.amr); 2147 break; 2148 case KVM_REG_PPC_UAMOR: 2149 *val = get_reg_val(id, vcpu->arch.uamor); 2150 break; 2151 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: 2152 i = id - KVM_REG_PPC_MMCR0; 2153 *val = get_reg_val(id, vcpu->arch.mmcr[i]); 2154 break; 2155 case KVM_REG_PPC_MMCR2: 2156 *val = get_reg_val(id, vcpu->arch.mmcr[2]); 2157 break; 2158 case KVM_REG_PPC_MMCRA: 2159 *val = get_reg_val(id, vcpu->arch.mmcra); 2160 break; 2161 case KVM_REG_PPC_MMCRS: 2162 *val = get_reg_val(id, vcpu->arch.mmcrs); 2163 break; 2164 case KVM_REG_PPC_MMCR3: 2165 *val = get_reg_val(id, vcpu->arch.mmcr[3]); 2166 break; 2167 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: 2168 i = id - KVM_REG_PPC_PMC1; 2169 *val = get_reg_val(id, vcpu->arch.pmc[i]); 2170 break; 2171 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 2172 i = id - KVM_REG_PPC_SPMC1; 2173 *val = get_reg_val(id, vcpu->arch.spmc[i]); 2174 break; 2175 case KVM_REG_PPC_SIAR: 2176 *val = get_reg_val(id, vcpu->arch.siar); 2177 break; 2178 case KVM_REG_PPC_SDAR: 2179 *val = get_reg_val(id, vcpu->arch.sdar); 2180 break; 2181 case KVM_REG_PPC_SIER: 2182 *val = get_reg_val(id, vcpu->arch.sier[0]); 2183 break; 2184 case KVM_REG_PPC_SIER2: 2185 *val = get_reg_val(id, vcpu->arch.sier[1]); 2186 break; 2187 case KVM_REG_PPC_SIER3: 2188 *val = get_reg_val(id, vcpu->arch.sier[2]); 2189 break; 2190 case KVM_REG_PPC_IAMR: 2191 *val = get_reg_val(id, vcpu->arch.iamr); 2192 break; 2193 case KVM_REG_PPC_PSPB: 2194 *val = get_reg_val(id, vcpu->arch.pspb); 2195 break; 2196 case KVM_REG_PPC_DPDES: 2197 /* 2198 * On POWER9, where we are emulating msgsndp etc., 2199 * we return 1 bit for each vcpu, which can come from 2200 * either vcore->dpdes or doorbell_request. 2201 * On POWER8, doorbell_request is 0. 2202 */ 2203 if (cpu_has_feature(CPU_FTR_ARCH_300)) 2204 *val = get_reg_val(id, vcpu->arch.doorbell_request); 2205 else 2206 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); 2207 break; 2208 case KVM_REG_PPC_VTB: 2209 *val = get_reg_val(id, vcpu->arch.vcore->vtb); 2210 break; 2211 case KVM_REG_PPC_DAWR: 2212 *val = get_reg_val(id, vcpu->arch.dawr0); 2213 break; 2214 case KVM_REG_PPC_DAWRX: 2215 *val = get_reg_val(id, vcpu->arch.dawrx0); 2216 break; 2217 case KVM_REG_PPC_DAWR1: 2218 *val = get_reg_val(id, vcpu->arch.dawr1); 2219 break; 2220 case KVM_REG_PPC_DAWRX1: 2221 *val = get_reg_val(id, vcpu->arch.dawrx1); 2222 break; 2223 case KVM_REG_PPC_CIABR: 2224 *val = get_reg_val(id, vcpu->arch.ciabr); 2225 break; 2226 case KVM_REG_PPC_CSIGR: 2227 *val = get_reg_val(id, vcpu->arch.csigr); 2228 break; 2229 case KVM_REG_PPC_TACR: 2230 *val = get_reg_val(id, vcpu->arch.tacr); 2231 break; 2232 case KVM_REG_PPC_TCSCR: 2233 *val = get_reg_val(id, vcpu->arch.tcscr); 2234 break; 2235 case KVM_REG_PPC_PID: 2236 *val = get_reg_val(id, vcpu->arch.pid); 2237 break; 2238 case KVM_REG_PPC_ACOP: 2239 *val = get_reg_val(id, vcpu->arch.acop); 2240 break; 2241 case KVM_REG_PPC_WORT: 2242 *val = get_reg_val(id, vcpu->arch.wort); 2243 break; 2244 case KVM_REG_PPC_TIDR: 2245 *val = get_reg_val(id, vcpu->arch.tid); 2246 break; 2247 case KVM_REG_PPC_PSSCR: 2248 *val = get_reg_val(id, vcpu->arch.psscr); 2249 break; 2250 case KVM_REG_PPC_VPA_ADDR: 2251 spin_lock(&vcpu->arch.vpa_update_lock); 2252 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); 2253 spin_unlock(&vcpu->arch.vpa_update_lock); 2254 break; 2255 case KVM_REG_PPC_VPA_SLB: 2256 spin_lock(&vcpu->arch.vpa_update_lock); 2257 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; 2258 val->vpaval.length = vcpu->arch.slb_shadow.len; 2259 spin_unlock(&vcpu->arch.vpa_update_lock); 2260 break; 2261 case KVM_REG_PPC_VPA_DTL: 2262 spin_lock(&vcpu->arch.vpa_update_lock); 2263 val->vpaval.addr = vcpu->arch.dtl.next_gpa; 2264 val->vpaval.length = vcpu->arch.dtl.len; 2265 spin_unlock(&vcpu->arch.vpa_update_lock); 2266 break; 2267 case KVM_REG_PPC_TB_OFFSET: 2268 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); 2269 break; 2270 case KVM_REG_PPC_LPCR: 2271 case KVM_REG_PPC_LPCR_64: 2272 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); 2273 break; 2274 case KVM_REG_PPC_PPR: 2275 *val = get_reg_val(id, vcpu->arch.ppr); 2276 break; 2277 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2278 case KVM_REG_PPC_TFHAR: 2279 *val = get_reg_val(id, vcpu->arch.tfhar); 2280 break; 2281 case KVM_REG_PPC_TFIAR: 2282 *val = get_reg_val(id, vcpu->arch.tfiar); 2283 break; 2284 case KVM_REG_PPC_TEXASR: 2285 *val = get_reg_val(id, vcpu->arch.texasr); 2286 break; 2287 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: 2288 i = id - KVM_REG_PPC_TM_GPR0; 2289 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); 2290 break; 2291 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: 2292 { 2293 int j; 2294 i = id - KVM_REG_PPC_TM_VSR0; 2295 if (i < 32) 2296 for (j = 0; j < TS_FPRWIDTH; j++) 2297 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; 2298 else { 2299 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 2300 val->vval = vcpu->arch.vr_tm.vr[i-32]; 2301 else 2302 r = -ENXIO; 2303 } 2304 break; 2305 } 2306 case KVM_REG_PPC_TM_CR: 2307 *val = get_reg_val(id, vcpu->arch.cr_tm); 2308 break; 2309 case KVM_REG_PPC_TM_XER: 2310 *val = get_reg_val(id, vcpu->arch.xer_tm); 2311 break; 2312 case KVM_REG_PPC_TM_LR: 2313 *val = get_reg_val(id, vcpu->arch.lr_tm); 2314 break; 2315 case KVM_REG_PPC_TM_CTR: 2316 *val = get_reg_val(id, vcpu->arch.ctr_tm); 2317 break; 2318 case KVM_REG_PPC_TM_FPSCR: 2319 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); 2320 break; 2321 case KVM_REG_PPC_TM_AMR: 2322 *val = get_reg_val(id, vcpu->arch.amr_tm); 2323 break; 2324 case KVM_REG_PPC_TM_PPR: 2325 *val = get_reg_val(id, vcpu->arch.ppr_tm); 2326 break; 2327 case KVM_REG_PPC_TM_VRSAVE: 2328 *val = get_reg_val(id, vcpu->arch.vrsave_tm); 2329 break; 2330 case KVM_REG_PPC_TM_VSCR: 2331 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 2332 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); 2333 else 2334 r = -ENXIO; 2335 break; 2336 case KVM_REG_PPC_TM_DSCR: 2337 *val = get_reg_val(id, vcpu->arch.dscr_tm); 2338 break; 2339 case KVM_REG_PPC_TM_TAR: 2340 *val = get_reg_val(id, vcpu->arch.tar_tm); 2341 break; 2342 #endif 2343 case KVM_REG_PPC_ARCH_COMPAT: 2344 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); 2345 break; 2346 case KVM_REG_PPC_DEC_EXPIRY: 2347 *val = get_reg_val(id, vcpu->arch.dec_expires); 2348 break; 2349 case KVM_REG_PPC_ONLINE: 2350 *val = get_reg_val(id, vcpu->arch.online); 2351 break; 2352 case KVM_REG_PPC_PTCR: 2353 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); 2354 break; 2355 default: 2356 r = -EINVAL; 2357 break; 2358 } 2359 2360 return r; 2361 } 2362 2363 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 2364 union kvmppc_one_reg *val) 2365 { 2366 int r = 0; 2367 long int i; 2368 unsigned long addr, len; 2369 2370 switch (id) { 2371 case KVM_REG_PPC_HIOR: 2372 /* Only allow this to be set to zero */ 2373 if (set_reg_val(id, *val)) 2374 r = -EINVAL; 2375 break; 2376 case KVM_REG_PPC_DABR: 2377 vcpu->arch.dabr = set_reg_val(id, *val); 2378 break; 2379 case KVM_REG_PPC_DABRX: 2380 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; 2381 break; 2382 case KVM_REG_PPC_DSCR: 2383 vcpu->arch.dscr = set_reg_val(id, *val); 2384 break; 2385 case KVM_REG_PPC_PURR: 2386 vcpu->arch.purr = set_reg_val(id, *val); 2387 break; 2388 case KVM_REG_PPC_SPURR: 2389 vcpu->arch.spurr = set_reg_val(id, *val); 2390 break; 2391 case KVM_REG_PPC_AMR: 2392 vcpu->arch.amr = set_reg_val(id, *val); 2393 break; 2394 case KVM_REG_PPC_UAMOR: 2395 vcpu->arch.uamor = set_reg_val(id, *val); 2396 break; 2397 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: 2398 i = id - KVM_REG_PPC_MMCR0; 2399 vcpu->arch.mmcr[i] = set_reg_val(id, *val); 2400 break; 2401 case KVM_REG_PPC_MMCR2: 2402 vcpu->arch.mmcr[2] = set_reg_val(id, *val); 2403 break; 2404 case KVM_REG_PPC_MMCRA: 2405 vcpu->arch.mmcra = set_reg_val(id, *val); 2406 break; 2407 case KVM_REG_PPC_MMCRS: 2408 vcpu->arch.mmcrs = set_reg_val(id, *val); 2409 break; 2410 case KVM_REG_PPC_MMCR3: 2411 *val = get_reg_val(id, vcpu->arch.mmcr[3]); 2412 break; 2413 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: 2414 i = id - KVM_REG_PPC_PMC1; 2415 vcpu->arch.pmc[i] = set_reg_val(id, *val); 2416 break; 2417 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 2418 i = id - KVM_REG_PPC_SPMC1; 2419 vcpu->arch.spmc[i] = set_reg_val(id, *val); 2420 break; 2421 case KVM_REG_PPC_SIAR: 2422 vcpu->arch.siar = set_reg_val(id, *val); 2423 break; 2424 case KVM_REG_PPC_SDAR: 2425 vcpu->arch.sdar = set_reg_val(id, *val); 2426 break; 2427 case KVM_REG_PPC_SIER: 2428 vcpu->arch.sier[0] = set_reg_val(id, *val); 2429 break; 2430 case KVM_REG_PPC_SIER2: 2431 vcpu->arch.sier[1] = set_reg_val(id, *val); 2432 break; 2433 case KVM_REG_PPC_SIER3: 2434 vcpu->arch.sier[2] = set_reg_val(id, *val); 2435 break; 2436 case KVM_REG_PPC_IAMR: 2437 vcpu->arch.iamr = set_reg_val(id, *val); 2438 break; 2439 case KVM_REG_PPC_PSPB: 2440 vcpu->arch.pspb = set_reg_val(id, *val); 2441 break; 2442 case KVM_REG_PPC_DPDES: 2443 if (cpu_has_feature(CPU_FTR_ARCH_300)) 2444 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; 2445 else 2446 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); 2447 break; 2448 case KVM_REG_PPC_VTB: 2449 vcpu->arch.vcore->vtb = set_reg_val(id, *val); 2450 break; 2451 case KVM_REG_PPC_DAWR: 2452 vcpu->arch.dawr0 = set_reg_val(id, *val); 2453 break; 2454 case KVM_REG_PPC_DAWRX: 2455 vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; 2456 break; 2457 case KVM_REG_PPC_DAWR1: 2458 vcpu->arch.dawr1 = set_reg_val(id, *val); 2459 break; 2460 case KVM_REG_PPC_DAWRX1: 2461 vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; 2462 break; 2463 case KVM_REG_PPC_CIABR: 2464 vcpu->arch.ciabr = set_reg_val(id, *val); 2465 /* Don't allow setting breakpoints in hypervisor code */ 2466 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) 2467 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ 2468 break; 2469 case KVM_REG_PPC_CSIGR: 2470 vcpu->arch.csigr = set_reg_val(id, *val); 2471 break; 2472 case KVM_REG_PPC_TACR: 2473 vcpu->arch.tacr = set_reg_val(id, *val); 2474 break; 2475 case KVM_REG_PPC_TCSCR: 2476 vcpu->arch.tcscr = set_reg_val(id, *val); 2477 break; 2478 case KVM_REG_PPC_PID: 2479 vcpu->arch.pid = set_reg_val(id, *val); 2480 break; 2481 case KVM_REG_PPC_ACOP: 2482 vcpu->arch.acop = set_reg_val(id, *val); 2483 break; 2484 case KVM_REG_PPC_WORT: 2485 vcpu->arch.wort = set_reg_val(id, *val); 2486 break; 2487 case KVM_REG_PPC_TIDR: 2488 vcpu->arch.tid = set_reg_val(id, *val); 2489 break; 2490 case KVM_REG_PPC_PSSCR: 2491 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; 2492 break; 2493 case KVM_REG_PPC_VPA_ADDR: 2494 addr = set_reg_val(id, *val); 2495 r = -EINVAL; 2496 if (!addr && (vcpu->arch.slb_shadow.next_gpa || 2497 vcpu->arch.dtl.next_gpa)) 2498 break; 2499 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); 2500 break; 2501 case KVM_REG_PPC_VPA_SLB: 2502 addr = val->vpaval.addr; 2503 len = val->vpaval.length; 2504 r = -EINVAL; 2505 if (addr && !vcpu->arch.vpa.next_gpa) 2506 break; 2507 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); 2508 break; 2509 case KVM_REG_PPC_VPA_DTL: 2510 addr = val->vpaval.addr; 2511 len = val->vpaval.length; 2512 r = -EINVAL; 2513 if (addr && (len < sizeof(struct dtl_entry) || 2514 !vcpu->arch.vpa.next_gpa)) 2515 break; 2516 len -= len % sizeof(struct dtl_entry); 2517 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 2518 break; 2519 case KVM_REG_PPC_TB_OFFSET: 2520 { 2521 /* round up to multiple of 2^24 */ 2522 u64 tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24); 2523 2524 /* 2525 * Now that we know the timebase offset, update the 2526 * decrementer expiry with a guest timebase value. If 2527 * the userspace does not set DEC_EXPIRY, this ensures 2528 * a migrated vcpu at least starts with an expired 2529 * decrementer, which is better than a large one that 2530 * causes a hang. 2531 */ 2532 if (!vcpu->arch.dec_expires && tb_offset) 2533 vcpu->arch.dec_expires = get_tb() + tb_offset; 2534 2535 vcpu->arch.vcore->tb_offset = tb_offset; 2536 break; 2537 } 2538 case KVM_REG_PPC_LPCR: 2539 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); 2540 break; 2541 case KVM_REG_PPC_LPCR_64: 2542 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); 2543 break; 2544 case KVM_REG_PPC_PPR: 2545 vcpu->arch.ppr = set_reg_val(id, *val); 2546 break; 2547 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2548 case KVM_REG_PPC_TFHAR: 2549 vcpu->arch.tfhar = set_reg_val(id, *val); 2550 break; 2551 case KVM_REG_PPC_TFIAR: 2552 vcpu->arch.tfiar = set_reg_val(id, *val); 2553 break; 2554 case KVM_REG_PPC_TEXASR: 2555 vcpu->arch.texasr = set_reg_val(id, *val); 2556 break; 2557 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: 2558 i = id - KVM_REG_PPC_TM_GPR0; 2559 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); 2560 break; 2561 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: 2562 { 2563 int j; 2564 i = id - KVM_REG_PPC_TM_VSR0; 2565 if (i < 32) 2566 for (j = 0; j < TS_FPRWIDTH; j++) 2567 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; 2568 else 2569 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 2570 vcpu->arch.vr_tm.vr[i-32] = val->vval; 2571 else 2572 r = -ENXIO; 2573 break; 2574 } 2575 case KVM_REG_PPC_TM_CR: 2576 vcpu->arch.cr_tm = set_reg_val(id, *val); 2577 break; 2578 case KVM_REG_PPC_TM_XER: 2579 vcpu->arch.xer_tm = set_reg_val(id, *val); 2580 break; 2581 case KVM_REG_PPC_TM_LR: 2582 vcpu->arch.lr_tm = set_reg_val(id, *val); 2583 break; 2584 case KVM_REG_PPC_TM_CTR: 2585 vcpu->arch.ctr_tm = set_reg_val(id, *val); 2586 break; 2587 case KVM_REG_PPC_TM_FPSCR: 2588 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); 2589 break; 2590 case KVM_REG_PPC_TM_AMR: 2591 vcpu->arch.amr_tm = set_reg_val(id, *val); 2592 break; 2593 case KVM_REG_PPC_TM_PPR: 2594 vcpu->arch.ppr_tm = set_reg_val(id, *val); 2595 break; 2596 case KVM_REG_PPC_TM_VRSAVE: 2597 vcpu->arch.vrsave_tm = set_reg_val(id, *val); 2598 break; 2599 case KVM_REG_PPC_TM_VSCR: 2600 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 2601 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); 2602 else 2603 r = - ENXIO; 2604 break; 2605 case KVM_REG_PPC_TM_DSCR: 2606 vcpu->arch.dscr_tm = set_reg_val(id, *val); 2607 break; 2608 case KVM_REG_PPC_TM_TAR: 2609 vcpu->arch.tar_tm = set_reg_val(id, *val); 2610 break; 2611 #endif 2612 case KVM_REG_PPC_ARCH_COMPAT: 2613 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); 2614 break; 2615 case KVM_REG_PPC_DEC_EXPIRY: 2616 vcpu->arch.dec_expires = set_reg_val(id, *val); 2617 break; 2618 case KVM_REG_PPC_ONLINE: 2619 i = set_reg_val(id, *val); 2620 if (i && !vcpu->arch.online) 2621 atomic_inc(&vcpu->arch.vcore->online_count); 2622 else if (!i && vcpu->arch.online) 2623 atomic_dec(&vcpu->arch.vcore->online_count); 2624 vcpu->arch.online = i; 2625 break; 2626 case KVM_REG_PPC_PTCR: 2627 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); 2628 break; 2629 default: 2630 r = -EINVAL; 2631 break; 2632 } 2633 2634 return r; 2635 } 2636 2637 /* 2638 * On POWER9, threads are independent and can be in different partitions. 2639 * Therefore we consider each thread to be a subcore. 2640 * There is a restriction that all threads have to be in the same 2641 * MMU mode (radix or HPT), unfortunately, but since we only support 2642 * HPT guests on a HPT host so far, that isn't an impediment yet. 2643 */ 2644 static int threads_per_vcore(struct kvm *kvm) 2645 { 2646 if (cpu_has_feature(CPU_FTR_ARCH_300)) 2647 return 1; 2648 return threads_per_subcore; 2649 } 2650 2651 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id) 2652 { 2653 struct kvmppc_vcore *vcore; 2654 2655 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); 2656 2657 if (vcore == NULL) 2658 return NULL; 2659 2660 spin_lock_init(&vcore->lock); 2661 spin_lock_init(&vcore->stoltb_lock); 2662 rcuwait_init(&vcore->wait); 2663 vcore->preempt_tb = TB_NIL; 2664 vcore->lpcr = kvm->arch.lpcr; 2665 vcore->first_vcpuid = id; 2666 vcore->kvm = kvm; 2667 INIT_LIST_HEAD(&vcore->preempt_list); 2668 2669 return vcore; 2670 } 2671 2672 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2673 static struct debugfs_timings_element { 2674 const char *name; 2675 size_t offset; 2676 } timings[] = { 2677 #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING 2678 {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)}, 2679 {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)}, 2680 {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)}, 2681 {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)}, 2682 {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)}, 2683 {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)}, 2684 {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)}, 2685 #else 2686 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, 2687 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, 2688 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, 2689 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, 2690 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, 2691 #endif 2692 }; 2693 2694 #define N_TIMINGS (ARRAY_SIZE(timings)) 2695 2696 struct debugfs_timings_state { 2697 struct kvm_vcpu *vcpu; 2698 unsigned int buflen; 2699 char buf[N_TIMINGS * 100]; 2700 }; 2701 2702 static int debugfs_timings_open(struct inode *inode, struct file *file) 2703 { 2704 struct kvm_vcpu *vcpu = inode->i_private; 2705 struct debugfs_timings_state *p; 2706 2707 p = kzalloc(sizeof(*p), GFP_KERNEL); 2708 if (!p) 2709 return -ENOMEM; 2710 2711 kvm_get_kvm(vcpu->kvm); 2712 p->vcpu = vcpu; 2713 file->private_data = p; 2714 2715 return nonseekable_open(inode, file); 2716 } 2717 2718 static int debugfs_timings_release(struct inode *inode, struct file *file) 2719 { 2720 struct debugfs_timings_state *p = file->private_data; 2721 2722 kvm_put_kvm(p->vcpu->kvm); 2723 kfree(p); 2724 return 0; 2725 } 2726 2727 static ssize_t debugfs_timings_read(struct file *file, char __user *buf, 2728 size_t len, loff_t *ppos) 2729 { 2730 struct debugfs_timings_state *p = file->private_data; 2731 struct kvm_vcpu *vcpu = p->vcpu; 2732 char *s, *buf_end; 2733 struct kvmhv_tb_accumulator tb; 2734 u64 count; 2735 loff_t pos; 2736 ssize_t n; 2737 int i, loops; 2738 bool ok; 2739 2740 if (!p->buflen) { 2741 s = p->buf; 2742 buf_end = s + sizeof(p->buf); 2743 for (i = 0; i < N_TIMINGS; ++i) { 2744 struct kvmhv_tb_accumulator *acc; 2745 2746 acc = (struct kvmhv_tb_accumulator *) 2747 ((unsigned long)vcpu + timings[i].offset); 2748 ok = false; 2749 for (loops = 0; loops < 1000; ++loops) { 2750 count = acc->seqcount; 2751 if (!(count & 1)) { 2752 smp_rmb(); 2753 tb = *acc; 2754 smp_rmb(); 2755 if (count == acc->seqcount) { 2756 ok = true; 2757 break; 2758 } 2759 } 2760 udelay(1); 2761 } 2762 if (!ok) 2763 snprintf(s, buf_end - s, "%s: stuck\n", 2764 timings[i].name); 2765 else 2766 snprintf(s, buf_end - s, 2767 "%s: %llu %llu %llu %llu\n", 2768 timings[i].name, count / 2, 2769 tb_to_ns(tb.tb_total), 2770 tb_to_ns(tb.tb_min), 2771 tb_to_ns(tb.tb_max)); 2772 s += strlen(s); 2773 } 2774 p->buflen = s - p->buf; 2775 } 2776 2777 pos = *ppos; 2778 if (pos >= p->buflen) 2779 return 0; 2780 if (len > p->buflen - pos) 2781 len = p->buflen - pos; 2782 n = copy_to_user(buf, p->buf + pos, len); 2783 if (n) { 2784 if (n == len) 2785 return -EFAULT; 2786 len -= n; 2787 } 2788 *ppos = pos + len; 2789 return len; 2790 } 2791 2792 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf, 2793 size_t len, loff_t *ppos) 2794 { 2795 return -EACCES; 2796 } 2797 2798 static const struct file_operations debugfs_timings_ops = { 2799 .owner = THIS_MODULE, 2800 .open = debugfs_timings_open, 2801 .release = debugfs_timings_release, 2802 .read = debugfs_timings_read, 2803 .write = debugfs_timings_write, 2804 .llseek = generic_file_llseek, 2805 }; 2806 2807 /* Create a debugfs directory for the vcpu */ 2808 static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) 2809 { 2810 if (cpu_has_feature(CPU_FTR_ARCH_300) == IS_ENABLED(CONFIG_KVM_BOOK3S_HV_P9_TIMING)) 2811 debugfs_create_file("timings", 0444, debugfs_dentry, vcpu, 2812 &debugfs_timings_ops); 2813 return 0; 2814 } 2815 2816 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ 2817 static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) 2818 { 2819 return 0; 2820 } 2821 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ 2822 2823 static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) 2824 { 2825 int err; 2826 int core; 2827 struct kvmppc_vcore *vcore; 2828 struct kvm *kvm; 2829 unsigned int id; 2830 2831 kvm = vcpu->kvm; 2832 id = vcpu->vcpu_id; 2833 2834 vcpu->arch.shared = &vcpu->arch.shregs; 2835 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2836 /* 2837 * The shared struct is never shared on HV, 2838 * so we can always use host endianness 2839 */ 2840 #ifdef __BIG_ENDIAN__ 2841 vcpu->arch.shared_big_endian = true; 2842 #else 2843 vcpu->arch.shared_big_endian = false; 2844 #endif 2845 #endif 2846 vcpu->arch.mmcr[0] = MMCR0_FC; 2847 if (cpu_has_feature(CPU_FTR_ARCH_31)) { 2848 vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT; 2849 vcpu->arch.mmcra = MMCRA_BHRB_DISABLE; 2850 } 2851 2852 vcpu->arch.ctrl = CTRL_RUNLATCH; 2853 /* default to host PVR, since we can't spoof it */ 2854 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); 2855 spin_lock_init(&vcpu->arch.vpa_update_lock); 2856 spin_lock_init(&vcpu->arch.tbacct_lock); 2857 vcpu->arch.busy_preempt = TB_NIL; 2858 vcpu->arch.shregs.msr = MSR_ME; 2859 vcpu->arch.intr_msr = MSR_SF | MSR_ME; 2860 2861 /* 2862 * Set the default HFSCR for the guest from the host value. 2863 * This value is only used on POWER9. 2864 * On POWER9, we want to virtualize the doorbell facility, so we 2865 * don't set the HFSCR_MSGP bit, and that causes those instructions 2866 * to trap and then we emulate them. 2867 */ 2868 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | 2869 HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; 2870 if (cpu_has_feature(CPU_FTR_HVMODE)) { 2871 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); 2872 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2873 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) 2874 vcpu->arch.hfscr |= HFSCR_TM; 2875 #endif 2876 } 2877 if (cpu_has_feature(CPU_FTR_TM_COMP)) 2878 vcpu->arch.hfscr |= HFSCR_TM; 2879 2880 vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; 2881 2882 /* 2883 * PM, EBB, TM are demand-faulted so start with it clear. 2884 */ 2885 vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM); 2886 2887 kvmppc_mmu_book3s_hv_init(vcpu); 2888 2889 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 2890 2891 init_waitqueue_head(&vcpu->arch.cpu_run); 2892 2893 mutex_lock(&kvm->lock); 2894 vcore = NULL; 2895 err = -EINVAL; 2896 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 2897 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { 2898 pr_devel("KVM: VCPU ID too high\n"); 2899 core = KVM_MAX_VCORES; 2900 } else { 2901 BUG_ON(kvm->arch.smt_mode != 1); 2902 core = kvmppc_pack_vcpu_id(kvm, id); 2903 } 2904 } else { 2905 core = id / kvm->arch.smt_mode; 2906 } 2907 if (core < KVM_MAX_VCORES) { 2908 vcore = kvm->arch.vcores[core]; 2909 if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) { 2910 pr_devel("KVM: collision on id %u", id); 2911 vcore = NULL; 2912 } else if (!vcore) { 2913 /* 2914 * Take mmu_setup_lock for mutual exclusion 2915 * with kvmppc_update_lpcr(). 2916 */ 2917 err = -ENOMEM; 2918 vcore = kvmppc_vcore_create(kvm, 2919 id & ~(kvm->arch.smt_mode - 1)); 2920 mutex_lock(&kvm->arch.mmu_setup_lock); 2921 kvm->arch.vcores[core] = vcore; 2922 kvm->arch.online_vcores++; 2923 mutex_unlock(&kvm->arch.mmu_setup_lock); 2924 } 2925 } 2926 mutex_unlock(&kvm->lock); 2927 2928 if (!vcore) 2929 return err; 2930 2931 spin_lock(&vcore->lock); 2932 ++vcore->num_threads; 2933 spin_unlock(&vcore->lock); 2934 vcpu->arch.vcore = vcore; 2935 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; 2936 vcpu->arch.thread_cpu = -1; 2937 vcpu->arch.prev_cpu = -1; 2938 2939 vcpu->arch.cpu_type = KVM_CPU_3S_64; 2940 kvmppc_sanity_check(vcpu); 2941 2942 return 0; 2943 } 2944 2945 static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode, 2946 unsigned long flags) 2947 { 2948 int err; 2949 int esmt = 0; 2950 2951 if (flags) 2952 return -EINVAL; 2953 if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode)) 2954 return -EINVAL; 2955 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 2956 /* 2957 * On POWER8 (or POWER7), the threading mode is "strict", 2958 * so we pack smt_mode vcpus per vcore. 2959 */ 2960 if (smt_mode > threads_per_subcore) 2961 return -EINVAL; 2962 } else { 2963 /* 2964 * On POWER9, the threading mode is "loose", 2965 * so each vcpu gets its own vcore. 2966 */ 2967 esmt = smt_mode; 2968 smt_mode = 1; 2969 } 2970 mutex_lock(&kvm->lock); 2971 err = -EBUSY; 2972 if (!kvm->arch.online_vcores) { 2973 kvm->arch.smt_mode = smt_mode; 2974 kvm->arch.emul_smt_mode = esmt; 2975 err = 0; 2976 } 2977 mutex_unlock(&kvm->lock); 2978 2979 return err; 2980 } 2981 2982 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) 2983 { 2984 if (vpa->pinned_addr) 2985 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, 2986 vpa->dirty); 2987 } 2988 2989 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) 2990 { 2991 spin_lock(&vcpu->arch.vpa_update_lock); 2992 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); 2993 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); 2994 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); 2995 spin_unlock(&vcpu->arch.vpa_update_lock); 2996 } 2997 2998 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) 2999 { 3000 /* Indicate we want to get back into the guest */ 3001 return 1; 3002 } 3003 3004 static void kvmppc_set_timer(struct kvm_vcpu *vcpu) 3005 { 3006 unsigned long dec_nsec, now; 3007 3008 now = get_tb(); 3009 if (now > kvmppc_dec_expires_host_tb(vcpu)) { 3010 /* decrementer has already gone negative */ 3011 kvmppc_core_queue_dec(vcpu); 3012 kvmppc_core_prepare_to_enter(vcpu); 3013 return; 3014 } 3015 dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now); 3016 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); 3017 vcpu->arch.timer_running = 1; 3018 } 3019 3020 extern int __kvmppc_vcore_entry(void); 3021 3022 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 3023 struct kvm_vcpu *vcpu, u64 tb) 3024 { 3025 u64 now; 3026 3027 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 3028 return; 3029 spin_lock_irq(&vcpu->arch.tbacct_lock); 3030 now = tb; 3031 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - 3032 vcpu->arch.stolen_logged; 3033 vcpu->arch.busy_preempt = now; 3034 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 3035 spin_unlock_irq(&vcpu->arch.tbacct_lock); 3036 --vc->n_runnable; 3037 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); 3038 } 3039 3040 static int kvmppc_grab_hwthread(int cpu) 3041 { 3042 struct paca_struct *tpaca; 3043 long timeout = 10000; 3044 3045 tpaca = paca_ptrs[cpu]; 3046 3047 /* Ensure the thread won't go into the kernel if it wakes */ 3048 tpaca->kvm_hstate.kvm_vcpu = NULL; 3049 tpaca->kvm_hstate.kvm_vcore = NULL; 3050 tpaca->kvm_hstate.napping = 0; 3051 smp_wmb(); 3052 tpaca->kvm_hstate.hwthread_req = 1; 3053 3054 /* 3055 * If the thread is already executing in the kernel (e.g. handling 3056 * a stray interrupt), wait for it to get back to nap mode. 3057 * The smp_mb() is to ensure that our setting of hwthread_req 3058 * is visible before we look at hwthread_state, so if this 3059 * races with the code at system_reset_pSeries and the thread 3060 * misses our setting of hwthread_req, we are sure to see its 3061 * setting of hwthread_state, and vice versa. 3062 */ 3063 smp_mb(); 3064 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { 3065 if (--timeout <= 0) { 3066 pr_err("KVM: couldn't grab cpu %d\n", cpu); 3067 return -EBUSY; 3068 } 3069 udelay(1); 3070 } 3071 return 0; 3072 } 3073 3074 static void kvmppc_release_hwthread(int cpu) 3075 { 3076 struct paca_struct *tpaca; 3077 3078 tpaca = paca_ptrs[cpu]; 3079 tpaca->kvm_hstate.hwthread_req = 0; 3080 tpaca->kvm_hstate.kvm_vcpu = NULL; 3081 tpaca->kvm_hstate.kvm_vcore = NULL; 3082 tpaca->kvm_hstate.kvm_split_mode = NULL; 3083 } 3084 3085 static DEFINE_PER_CPU(struct kvm *, cpu_in_guest); 3086 3087 static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) 3088 { 3089 struct kvm_nested_guest *nested = vcpu->arch.nested; 3090 cpumask_t *need_tlb_flush; 3091 int i; 3092 3093 if (nested) 3094 need_tlb_flush = &nested->need_tlb_flush; 3095 else 3096 need_tlb_flush = &kvm->arch.need_tlb_flush; 3097 3098 cpu = cpu_first_tlb_thread_sibling(cpu); 3099 for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu); 3100 i += cpu_tlb_thread_sibling_step()) 3101 cpumask_set_cpu(i, need_tlb_flush); 3102 3103 /* 3104 * Make sure setting of bit in need_tlb_flush precedes testing of 3105 * cpu_in_guest. The matching barrier on the other side is hwsync 3106 * when switching to guest MMU mode, which happens between 3107 * cpu_in_guest being set to the guest kvm, and need_tlb_flush bit 3108 * being tested. 3109 */ 3110 smp_mb(); 3111 3112 for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu); 3113 i += cpu_tlb_thread_sibling_step()) { 3114 struct kvm *running = *per_cpu_ptr(&cpu_in_guest, i); 3115 3116 if (running == kvm) 3117 smp_call_function_single(i, do_nothing, NULL, 1); 3118 } 3119 } 3120 3121 static void do_migrate_away_vcpu(void *arg) 3122 { 3123 struct kvm_vcpu *vcpu = arg; 3124 struct kvm *kvm = vcpu->kvm; 3125 3126 /* 3127 * If the guest has GTSE, it may execute tlbie, so do a eieio; tlbsync; 3128 * ptesync sequence on the old CPU before migrating to a new one, in 3129 * case we interrupted the guest between a tlbie ; eieio ; 3130 * tlbsync; ptesync sequence. 3131 * 3132 * Otherwise, ptesync is sufficient for ordering tlbiel sequences. 3133 */ 3134 if (kvm->arch.lpcr & LPCR_GTSE) 3135 asm volatile("eieio; tlbsync; ptesync"); 3136 else 3137 asm volatile("ptesync"); 3138 } 3139 3140 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) 3141 { 3142 struct kvm_nested_guest *nested = vcpu->arch.nested; 3143 struct kvm *kvm = vcpu->kvm; 3144 int prev_cpu; 3145 3146 if (!cpu_has_feature(CPU_FTR_HVMODE)) 3147 return; 3148 3149 if (nested) 3150 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; 3151 else 3152 prev_cpu = vcpu->arch.prev_cpu; 3153 3154 /* 3155 * With radix, the guest can do TLB invalidations itself, 3156 * and it could choose to use the local form (tlbiel) if 3157 * it is invalidating a translation that has only ever been 3158 * used on one vcpu. However, that doesn't mean it has 3159 * only ever been used on one physical cpu, since vcpus 3160 * can move around between pcpus. To cope with this, when 3161 * a vcpu moves from one pcpu to another, we need to tell 3162 * any vcpus running on the same core as this vcpu previously 3163 * ran to flush the TLB. 3164 */ 3165 if (prev_cpu != pcpu) { 3166 if (prev_cpu >= 0) { 3167 if (cpu_first_tlb_thread_sibling(prev_cpu) != 3168 cpu_first_tlb_thread_sibling(pcpu)) 3169 radix_flush_cpu(kvm, prev_cpu, vcpu); 3170 3171 smp_call_function_single(prev_cpu, 3172 do_migrate_away_vcpu, vcpu, 1); 3173 } 3174 if (nested) 3175 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; 3176 else 3177 vcpu->arch.prev_cpu = pcpu; 3178 } 3179 } 3180 3181 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) 3182 { 3183 int cpu; 3184 struct paca_struct *tpaca; 3185 3186 cpu = vc->pcpu; 3187 if (vcpu) { 3188 if (vcpu->arch.timer_running) { 3189 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 3190 vcpu->arch.timer_running = 0; 3191 } 3192 cpu += vcpu->arch.ptid; 3193 vcpu->cpu = vc->pcpu; 3194 vcpu->arch.thread_cpu = cpu; 3195 } 3196 tpaca = paca_ptrs[cpu]; 3197 tpaca->kvm_hstate.kvm_vcpu = vcpu; 3198 tpaca->kvm_hstate.ptid = cpu - vc->pcpu; 3199 tpaca->kvm_hstate.fake_suspend = 0; 3200 /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ 3201 smp_wmb(); 3202 tpaca->kvm_hstate.kvm_vcore = vc; 3203 if (cpu != smp_processor_id()) 3204 kvmppc_ipi_thread(cpu); 3205 } 3206 3207 static void kvmppc_wait_for_nap(int n_threads) 3208 { 3209 int cpu = smp_processor_id(); 3210 int i, loops; 3211 3212 if (n_threads <= 1) 3213 return; 3214 for (loops = 0; loops < 1000000; ++loops) { 3215 /* 3216 * Check if all threads are finished. 3217 * We set the vcore pointer when starting a thread 3218 * and the thread clears it when finished, so we look 3219 * for any threads that still have a non-NULL vcore ptr. 3220 */ 3221 for (i = 1; i < n_threads; ++i) 3222 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) 3223 break; 3224 if (i == n_threads) { 3225 HMT_medium(); 3226 return; 3227 } 3228 HMT_low(); 3229 } 3230 HMT_medium(); 3231 for (i = 1; i < n_threads; ++i) 3232 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) 3233 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); 3234 } 3235 3236 /* 3237 * Check that we are on thread 0 and that any other threads in 3238 * this core are off-line. Then grab the threads so they can't 3239 * enter the kernel. 3240 */ 3241 static int on_primary_thread(void) 3242 { 3243 int cpu = smp_processor_id(); 3244 int thr; 3245 3246 /* Are we on a primary subcore? */ 3247 if (cpu_thread_in_subcore(cpu)) 3248 return 0; 3249 3250 thr = 0; 3251 while (++thr < threads_per_subcore) 3252 if (cpu_online(cpu + thr)) 3253 return 0; 3254 3255 /* Grab all hw threads so they can't go into the kernel */ 3256 for (thr = 1; thr < threads_per_subcore; ++thr) { 3257 if (kvmppc_grab_hwthread(cpu + thr)) { 3258 /* Couldn't grab one; let the others go */ 3259 do { 3260 kvmppc_release_hwthread(cpu + thr); 3261 } while (--thr > 0); 3262 return 0; 3263 } 3264 } 3265 return 1; 3266 } 3267 3268 /* 3269 * A list of virtual cores for each physical CPU. 3270 * These are vcores that could run but their runner VCPU tasks are 3271 * (or may be) preempted. 3272 */ 3273 struct preempted_vcore_list { 3274 struct list_head list; 3275 spinlock_t lock; 3276 }; 3277 3278 static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores); 3279 3280 static void init_vcore_lists(void) 3281 { 3282 int cpu; 3283 3284 for_each_possible_cpu(cpu) { 3285 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu); 3286 spin_lock_init(&lp->lock); 3287 INIT_LIST_HEAD(&lp->list); 3288 } 3289 } 3290 3291 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) 3292 { 3293 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); 3294 3295 WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); 3296 3297 vc->vcore_state = VCORE_PREEMPT; 3298 vc->pcpu = smp_processor_id(); 3299 if (vc->num_threads < threads_per_vcore(vc->kvm)) { 3300 spin_lock(&lp->lock); 3301 list_add_tail(&vc->preempt_list, &lp->list); 3302 spin_unlock(&lp->lock); 3303 } 3304 3305 /* Start accumulating stolen time */ 3306 kvmppc_core_start_stolen(vc, mftb()); 3307 } 3308 3309 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) 3310 { 3311 struct preempted_vcore_list *lp; 3312 3313 WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); 3314 3315 kvmppc_core_end_stolen(vc, mftb()); 3316 if (!list_empty(&vc->preempt_list)) { 3317 lp = &per_cpu(preempted_vcores, vc->pcpu); 3318 spin_lock(&lp->lock); 3319 list_del_init(&vc->preempt_list); 3320 spin_unlock(&lp->lock); 3321 } 3322 vc->vcore_state = VCORE_INACTIVE; 3323 } 3324 3325 /* 3326 * This stores information about the virtual cores currently 3327 * assigned to a physical core. 3328 */ 3329 struct core_info { 3330 int n_subcores; 3331 int max_subcore_threads; 3332 int total_threads; 3333 int subcore_threads[MAX_SUBCORES]; 3334 struct kvmppc_vcore *vc[MAX_SUBCORES]; 3335 }; 3336 3337 /* 3338 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7 3339 * respectively in 2-way micro-threading (split-core) mode on POWER8. 3340 */ 3341 static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; 3342 3343 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) 3344 { 3345 memset(cip, 0, sizeof(*cip)); 3346 cip->n_subcores = 1; 3347 cip->max_subcore_threads = vc->num_threads; 3348 cip->total_threads = vc->num_threads; 3349 cip->subcore_threads[0] = vc->num_threads; 3350 cip->vc[0] = vc; 3351 } 3352 3353 static bool subcore_config_ok(int n_subcores, int n_threads) 3354 { 3355 /* 3356 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way 3357 * split-core mode, with one thread per subcore. 3358 */ 3359 if (cpu_has_feature(CPU_FTR_ARCH_300)) 3360 return n_subcores <= 4 && n_threads == 1; 3361 3362 /* On POWER8, can only dynamically split if unsplit to begin with */ 3363 if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS) 3364 return false; 3365 if (n_subcores > MAX_SUBCORES) 3366 return false; 3367 if (n_subcores > 1) { 3368 if (!(dynamic_mt_modes & 2)) 3369 n_subcores = 4; 3370 if (n_subcores > 2 && !(dynamic_mt_modes & 4)) 3371 return false; 3372 } 3373 3374 return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS; 3375 } 3376 3377 static void init_vcore_to_run(struct kvmppc_vcore *vc) 3378 { 3379 vc->entry_exit_map = 0; 3380 vc->in_guest = 0; 3381 vc->napping_threads = 0; 3382 vc->conferring_threads = 0; 3383 vc->tb_offset_applied = 0; 3384 } 3385 3386 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) 3387 { 3388 int n_threads = vc->num_threads; 3389 int sub; 3390 3391 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) 3392 return false; 3393 3394 /* In one_vm_per_core mode, require all vcores to be from the same vm */ 3395 if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) 3396 return false; 3397 3398 if (n_threads < cip->max_subcore_threads) 3399 n_threads = cip->max_subcore_threads; 3400 if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) 3401 return false; 3402 cip->max_subcore_threads = n_threads; 3403 3404 sub = cip->n_subcores; 3405 ++cip->n_subcores; 3406 cip->total_threads += vc->num_threads; 3407 cip->subcore_threads[sub] = vc->num_threads; 3408 cip->vc[sub] = vc; 3409 init_vcore_to_run(vc); 3410 list_del_init(&vc->preempt_list); 3411 3412 return true; 3413 } 3414 3415 /* 3416 * Work out whether it is possible to piggyback the execution of 3417 * vcore *pvc onto the execution of the other vcores described in *cip. 3418 */ 3419 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip, 3420 int target_threads) 3421 { 3422 if (cip->total_threads + pvc->num_threads > target_threads) 3423 return false; 3424 3425 return can_dynamic_split(pvc, cip); 3426 } 3427 3428 static void prepare_threads(struct kvmppc_vcore *vc) 3429 { 3430 int i; 3431 struct kvm_vcpu *vcpu; 3432 3433 for_each_runnable_thread(i, vcpu, vc) { 3434 if (signal_pending(vcpu->arch.run_task)) 3435 vcpu->arch.ret = -EINTR; 3436 else if (vcpu->arch.vpa.update_pending || 3437 vcpu->arch.slb_shadow.update_pending || 3438 vcpu->arch.dtl.update_pending) 3439 vcpu->arch.ret = RESUME_GUEST; 3440 else 3441 continue; 3442 kvmppc_remove_runnable(vc, vcpu, mftb()); 3443 wake_up(&vcpu->arch.cpu_run); 3444 } 3445 } 3446 3447 static void collect_piggybacks(struct core_info *cip, int target_threads) 3448 { 3449 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); 3450 struct kvmppc_vcore *pvc, *vcnext; 3451 3452 spin_lock(&lp->lock); 3453 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { 3454 if (!spin_trylock(&pvc->lock)) 3455 continue; 3456 prepare_threads(pvc); 3457 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { 3458 list_del_init(&pvc->preempt_list); 3459 if (pvc->runner == NULL) { 3460 pvc->vcore_state = VCORE_INACTIVE; 3461 kvmppc_core_end_stolen(pvc, mftb()); 3462 } 3463 spin_unlock(&pvc->lock); 3464 continue; 3465 } 3466 if (!can_piggyback(pvc, cip, target_threads)) { 3467 spin_unlock(&pvc->lock); 3468 continue; 3469 } 3470 kvmppc_core_end_stolen(pvc, mftb()); 3471 pvc->vcore_state = VCORE_PIGGYBACK; 3472 if (cip->total_threads >= target_threads) 3473 break; 3474 } 3475 spin_unlock(&lp->lock); 3476 } 3477 3478 static bool recheck_signals_and_mmu(struct core_info *cip) 3479 { 3480 int sub, i; 3481 struct kvm_vcpu *vcpu; 3482 struct kvmppc_vcore *vc; 3483 3484 for (sub = 0; sub < cip->n_subcores; ++sub) { 3485 vc = cip->vc[sub]; 3486 if (!vc->kvm->arch.mmu_ready) 3487 return true; 3488 for_each_runnable_thread(i, vcpu, vc) 3489 if (signal_pending(vcpu->arch.run_task)) 3490 return true; 3491 } 3492 return false; 3493 } 3494 3495 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) 3496 { 3497 int still_running = 0, i; 3498 u64 now; 3499 long ret; 3500 struct kvm_vcpu *vcpu; 3501 3502 spin_lock(&vc->lock); 3503 now = get_tb(); 3504 for_each_runnable_thread(i, vcpu, vc) { 3505 /* 3506 * It's safe to unlock the vcore in the loop here, because 3507 * for_each_runnable_thread() is safe against removal of 3508 * the vcpu, and the vcore state is VCORE_EXITING here, 3509 * so any vcpus becoming runnable will have their arch.trap 3510 * set to zero and can't actually run in the guest. 3511 */ 3512 spin_unlock(&vc->lock); 3513 /* cancel pending dec exception if dec is positive */ 3514 if (now < kvmppc_dec_expires_host_tb(vcpu) && 3515 kvmppc_core_pending_dec(vcpu)) 3516 kvmppc_core_dequeue_dec(vcpu); 3517 3518 trace_kvm_guest_exit(vcpu); 3519 3520 ret = RESUME_GUEST; 3521 if (vcpu->arch.trap) 3522 ret = kvmppc_handle_exit_hv(vcpu, 3523 vcpu->arch.run_task); 3524 3525 vcpu->arch.ret = ret; 3526 vcpu->arch.trap = 0; 3527 3528 spin_lock(&vc->lock); 3529 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { 3530 if (vcpu->arch.pending_exceptions) 3531 kvmppc_core_prepare_to_enter(vcpu); 3532 if (vcpu->arch.ceded) 3533 kvmppc_set_timer(vcpu); 3534 else 3535 ++still_running; 3536 } else { 3537 kvmppc_remove_runnable(vc, vcpu, mftb()); 3538 wake_up(&vcpu->arch.cpu_run); 3539 } 3540 } 3541 if (!is_master) { 3542 if (still_running > 0) { 3543 kvmppc_vcore_preempt(vc); 3544 } else if (vc->runner) { 3545 vc->vcore_state = VCORE_PREEMPT; 3546 kvmppc_core_start_stolen(vc, mftb()); 3547 } else { 3548 vc->vcore_state = VCORE_INACTIVE; 3549 } 3550 if (vc->n_runnable > 0 && vc->runner == NULL) { 3551 /* make sure there's a candidate runner awake */ 3552 i = -1; 3553 vcpu = next_runnable_thread(vc, &i); 3554 wake_up(&vcpu->arch.cpu_run); 3555 } 3556 } 3557 spin_unlock(&vc->lock); 3558 } 3559 3560 /* 3561 * Clear core from the list of active host cores as we are about to 3562 * enter the guest. Only do this if it is the primary thread of the 3563 * core (not if a subcore) that is entering the guest. 3564 */ 3565 static inline int kvmppc_clear_host_core(unsigned int cpu) 3566 { 3567 int core; 3568 3569 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) 3570 return 0; 3571 /* 3572 * Memory barrier can be omitted here as we will do a smp_wmb() 3573 * later in kvmppc_start_thread and we need ensure that state is 3574 * visible to other CPUs only after we enter guest. 3575 */ 3576 core = cpu >> threads_shift; 3577 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; 3578 return 0; 3579 } 3580 3581 /* 3582 * Advertise this core as an active host core since we exited the guest 3583 * Only need to do this if it is the primary thread of the core that is 3584 * exiting. 3585 */ 3586 static inline int kvmppc_set_host_core(unsigned int cpu) 3587 { 3588 int core; 3589 3590 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) 3591 return 0; 3592 3593 /* 3594 * Memory barrier can be omitted here because we do a spin_unlock 3595 * immediately after this which provides the memory barrier. 3596 */ 3597 core = cpu >> threads_shift; 3598 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; 3599 return 0; 3600 } 3601 3602 static void set_irq_happened(int trap) 3603 { 3604 switch (trap) { 3605 case BOOK3S_INTERRUPT_EXTERNAL: 3606 local_paca->irq_happened |= PACA_IRQ_EE; 3607 break; 3608 case BOOK3S_INTERRUPT_H_DOORBELL: 3609 local_paca->irq_happened |= PACA_IRQ_DBELL; 3610 break; 3611 case BOOK3S_INTERRUPT_HMI: 3612 local_paca->irq_happened |= PACA_IRQ_HMI; 3613 break; 3614 case BOOK3S_INTERRUPT_SYSTEM_RESET: 3615 replay_system_reset(); 3616 break; 3617 } 3618 } 3619 3620 /* 3621 * Run a set of guest threads on a physical core. 3622 * Called with vc->lock held. 3623 */ 3624 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) 3625 { 3626 struct kvm_vcpu *vcpu; 3627 int i; 3628 int srcu_idx; 3629 struct core_info core_info; 3630 struct kvmppc_vcore *pvc; 3631 struct kvm_split_mode split_info, *sip; 3632 int split, subcore_size, active; 3633 int sub; 3634 bool thr0_done; 3635 unsigned long cmd_bit, stat_bit; 3636 int pcpu, thr; 3637 int target_threads; 3638 int controlled_threads; 3639 int trap; 3640 bool is_power8; 3641 3642 if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300))) 3643 return; 3644 3645 /* 3646 * Remove from the list any threads that have a signal pending 3647 * or need a VPA update done 3648 */ 3649 prepare_threads(vc); 3650 3651 /* if the runner is no longer runnable, let the caller pick a new one */ 3652 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) 3653 return; 3654 3655 /* 3656 * Initialize *vc. 3657 */ 3658 init_vcore_to_run(vc); 3659 vc->preempt_tb = TB_NIL; 3660 3661 /* 3662 * Number of threads that we will be controlling: the same as 3663 * the number of threads per subcore, except on POWER9, 3664 * where it's 1 because the threads are (mostly) independent. 3665 */ 3666 controlled_threads = threads_per_vcore(vc->kvm); 3667 3668 /* 3669 * Make sure we are running on primary threads, and that secondary 3670 * threads are offline. Also check if the number of threads in this 3671 * guest are greater than the current system threads per guest. 3672 */ 3673 if ((controlled_threads > 1) && 3674 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { 3675 for_each_runnable_thread(i, vcpu, vc) { 3676 vcpu->arch.ret = -EBUSY; 3677 kvmppc_remove_runnable(vc, vcpu, mftb()); 3678 wake_up(&vcpu->arch.cpu_run); 3679 } 3680 goto out; 3681 } 3682 3683 /* 3684 * See if we could run any other vcores on the physical core 3685 * along with this one. 3686 */ 3687 init_core_info(&core_info, vc); 3688 pcpu = smp_processor_id(); 3689 target_threads = controlled_threads; 3690 if (target_smt_mode && target_smt_mode < target_threads) 3691 target_threads = target_smt_mode; 3692 if (vc->num_threads < target_threads) 3693 collect_piggybacks(&core_info, target_threads); 3694 3695 /* 3696 * Hard-disable interrupts, and check resched flag and signals. 3697 * If we need to reschedule or deliver a signal, clean up 3698 * and return without going into the guest(s). 3699 * If the mmu_ready flag has been cleared, don't go into the 3700 * guest because that means a HPT resize operation is in progress. 3701 */ 3702 local_irq_disable(); 3703 hard_irq_disable(); 3704 if (lazy_irq_pending() || need_resched() || 3705 recheck_signals_and_mmu(&core_info)) { 3706 local_irq_enable(); 3707 vc->vcore_state = VCORE_INACTIVE; 3708 /* Unlock all except the primary vcore */ 3709 for (sub = 1; sub < core_info.n_subcores; ++sub) { 3710 pvc = core_info.vc[sub]; 3711 /* Put back on to the preempted vcores list */ 3712 kvmppc_vcore_preempt(pvc); 3713 spin_unlock(&pvc->lock); 3714 } 3715 for (i = 0; i < controlled_threads; ++i) 3716 kvmppc_release_hwthread(pcpu + i); 3717 return; 3718 } 3719 3720 kvmppc_clear_host_core(pcpu); 3721 3722 /* Decide on micro-threading (split-core) mode */ 3723 subcore_size = threads_per_subcore; 3724 cmd_bit = stat_bit = 0; 3725 split = core_info.n_subcores; 3726 sip = NULL; 3727 is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S); 3728 3729 if (split > 1) { 3730 sip = &split_info; 3731 memset(&split_info, 0, sizeof(split_info)); 3732 for (sub = 0; sub < core_info.n_subcores; ++sub) 3733 split_info.vc[sub] = core_info.vc[sub]; 3734 3735 if (is_power8) { 3736 if (split == 2 && (dynamic_mt_modes & 2)) { 3737 cmd_bit = HID0_POWER8_1TO2LPAR; 3738 stat_bit = HID0_POWER8_2LPARMODE; 3739 } else { 3740 split = 4; 3741 cmd_bit = HID0_POWER8_1TO4LPAR; 3742 stat_bit = HID0_POWER8_4LPARMODE; 3743 } 3744 subcore_size = MAX_SMT_THREADS / split; 3745 split_info.rpr = mfspr(SPRN_RPR); 3746 split_info.pmmar = mfspr(SPRN_PMMAR); 3747 split_info.ldbar = mfspr(SPRN_LDBAR); 3748 split_info.subcore_size = subcore_size; 3749 } else { 3750 split_info.subcore_size = 1; 3751 } 3752 3753 /* order writes to split_info before kvm_split_mode pointer */ 3754 smp_wmb(); 3755 } 3756 3757 for (thr = 0; thr < controlled_threads; ++thr) { 3758 struct paca_struct *paca = paca_ptrs[pcpu + thr]; 3759 3760 paca->kvm_hstate.napping = 0; 3761 paca->kvm_hstate.kvm_split_mode = sip; 3762 } 3763 3764 /* Initiate micro-threading (split-core) on POWER8 if required */ 3765 if (cmd_bit) { 3766 unsigned long hid0 = mfspr(SPRN_HID0); 3767 3768 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS; 3769 mb(); 3770 mtspr(SPRN_HID0, hid0); 3771 isync(); 3772 for (;;) { 3773 hid0 = mfspr(SPRN_HID0); 3774 if (hid0 & stat_bit) 3775 break; 3776 cpu_relax(); 3777 } 3778 } 3779 3780 /* 3781 * On POWER8, set RWMR register. 3782 * Since it only affects PURR and SPURR, it doesn't affect 3783 * the host, so we don't save/restore the host value. 3784 */ 3785 if (is_power8) { 3786 unsigned long rwmr_val = RWMR_RPA_P8_8THREAD; 3787 int n_online = atomic_read(&vc->online_count); 3788 3789 /* 3790 * Use the 8-thread value if we're doing split-core 3791 * or if the vcore's online count looks bogus. 3792 */ 3793 if (split == 1 && threads_per_subcore == MAX_SMT_THREADS && 3794 n_online >= 1 && n_online <= MAX_SMT_THREADS) 3795 rwmr_val = p8_rwmr_values[n_online]; 3796 mtspr(SPRN_RWMR, rwmr_val); 3797 } 3798 3799 /* Start all the threads */ 3800 active = 0; 3801 for (sub = 0; sub < core_info.n_subcores; ++sub) { 3802 thr = is_power8 ? subcore_thread_map[sub] : sub; 3803 thr0_done = false; 3804 active |= 1 << thr; 3805 pvc = core_info.vc[sub]; 3806 pvc->pcpu = pcpu + thr; 3807 for_each_runnable_thread(i, vcpu, pvc) { 3808 /* 3809 * XXX: is kvmppc_start_thread called too late here? 3810 * It updates vcpu->cpu and vcpu->arch.thread_cpu 3811 * which are used by kvmppc_fast_vcpu_kick_hv(), but 3812 * kick is called after new exceptions become available 3813 * and exceptions are checked earlier than here, by 3814 * kvmppc_core_prepare_to_enter. 3815 */ 3816 kvmppc_start_thread(vcpu, pvc); 3817 kvmppc_create_dtl_entry(vcpu, pvc); 3818 trace_kvm_guest_enter(vcpu); 3819 if (!vcpu->arch.ptid) 3820 thr0_done = true; 3821 active |= 1 << (thr + vcpu->arch.ptid); 3822 } 3823 /* 3824 * We need to start the first thread of each subcore 3825 * even if it doesn't have a vcpu. 3826 */ 3827 if (!thr0_done) 3828 kvmppc_start_thread(NULL, pvc); 3829 } 3830 3831 /* 3832 * Ensure that split_info.do_nap is set after setting 3833 * the vcore pointer in the PACA of the secondaries. 3834 */ 3835 smp_mb(); 3836 3837 /* 3838 * When doing micro-threading, poke the inactive threads as well. 3839 * This gets them to the nap instruction after kvm_do_nap, 3840 * which reduces the time taken to unsplit later. 3841 */ 3842 if (cmd_bit) { 3843 split_info.do_nap = 1; /* ask secondaries to nap when done */ 3844 for (thr = 1; thr < threads_per_subcore; ++thr) 3845 if (!(active & (1 << thr))) 3846 kvmppc_ipi_thread(pcpu + thr); 3847 } 3848 3849 vc->vcore_state = VCORE_RUNNING; 3850 preempt_disable(); 3851 3852 trace_kvmppc_run_core(vc, 0); 3853 3854 for (sub = 0; sub < core_info.n_subcores; ++sub) 3855 spin_unlock(&core_info.vc[sub]->lock); 3856 3857 guest_enter_irqoff(); 3858 3859 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 3860 3861 this_cpu_disable_ftrace(); 3862 3863 /* 3864 * Interrupts will be enabled once we get into the guest, 3865 * so tell lockdep that we're about to enable interrupts. 3866 */ 3867 trace_hardirqs_on(); 3868 3869 trap = __kvmppc_vcore_entry(); 3870 3871 trace_hardirqs_off(); 3872 3873 this_cpu_enable_ftrace(); 3874 3875 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 3876 3877 set_irq_happened(trap); 3878 3879 spin_lock(&vc->lock); 3880 /* prevent other vcpu threads from doing kvmppc_start_thread() now */ 3881 vc->vcore_state = VCORE_EXITING; 3882 3883 /* wait for secondary threads to finish writing their state to memory */ 3884 kvmppc_wait_for_nap(controlled_threads); 3885 3886 /* Return to whole-core mode if we split the core earlier */ 3887 if (cmd_bit) { 3888 unsigned long hid0 = mfspr(SPRN_HID0); 3889 unsigned long loops = 0; 3890 3891 hid0 &= ~HID0_POWER8_DYNLPARDIS; 3892 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; 3893 mb(); 3894 mtspr(SPRN_HID0, hid0); 3895 isync(); 3896 for (;;) { 3897 hid0 = mfspr(SPRN_HID0); 3898 if (!(hid0 & stat_bit)) 3899 break; 3900 cpu_relax(); 3901 ++loops; 3902 } 3903 split_info.do_nap = 0; 3904 } 3905 3906 kvmppc_set_host_core(pcpu); 3907 3908 context_tracking_guest_exit(); 3909 if (!vtime_accounting_enabled_this_cpu()) { 3910 local_irq_enable(); 3911 /* 3912 * Service IRQs here before vtime_account_guest_exit() so any 3913 * ticks that occurred while running the guest are accounted to 3914 * the guest. If vtime accounting is enabled, accounting uses 3915 * TB rather than ticks, so it can be done without enabling 3916 * interrupts here, which has the problem that it accounts 3917 * interrupt processing overhead to the host. 3918 */ 3919 local_irq_disable(); 3920 } 3921 vtime_account_guest_exit(); 3922 3923 local_irq_enable(); 3924 3925 /* Let secondaries go back to the offline loop */ 3926 for (i = 0; i < controlled_threads; ++i) { 3927 kvmppc_release_hwthread(pcpu + i); 3928 if (sip && sip->napped[i]) 3929 kvmppc_ipi_thread(pcpu + i); 3930 } 3931 3932 spin_unlock(&vc->lock); 3933 3934 /* make sure updates to secondary vcpu structs are visible now */ 3935 smp_mb(); 3936 3937 preempt_enable(); 3938 3939 for (sub = 0; sub < core_info.n_subcores; ++sub) { 3940 pvc = core_info.vc[sub]; 3941 post_guest_process(pvc, pvc == vc); 3942 } 3943 3944 spin_lock(&vc->lock); 3945 3946 out: 3947 vc->vcore_state = VCORE_INACTIVE; 3948 trace_kvmppc_run_core(vc, 1); 3949 } 3950 3951 static inline bool hcall_is_xics(unsigned long req) 3952 { 3953 return req == H_EOI || req == H_CPPR || req == H_IPI || 3954 req == H_IPOLL || req == H_XIRR || req == H_XIRR_X; 3955 } 3956 3957 static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) 3958 { 3959 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; 3960 if (lp) { 3961 u32 yield_count = be32_to_cpu(lp->yield_count) + 1; 3962 lp->yield_count = cpu_to_be32(yield_count); 3963 vcpu->arch.vpa.dirty = 1; 3964 } 3965 } 3966 3967 /* call our hypervisor to load up HV regs and go */ 3968 static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) 3969 { 3970 struct kvmppc_vcore *vc = vcpu->arch.vcore; 3971 unsigned long host_psscr; 3972 unsigned long msr; 3973 struct hv_guest_state hvregs; 3974 struct p9_host_os_sprs host_os_sprs; 3975 s64 dec; 3976 int trap; 3977 3978 msr = mfmsr(); 3979 3980 save_p9_host_os_sprs(&host_os_sprs); 3981 3982 /* 3983 * We need to save and restore the guest visible part of the 3984 * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor 3985 * doesn't do this for us. Note only required if pseries since 3986 * this is done in kvmhv_vcpu_entry_p9() below otherwise. 3987 */ 3988 host_psscr = mfspr(SPRN_PSSCR_PR); 3989 3990 kvmppc_msr_hard_disable_set_facilities(vcpu, msr); 3991 if (lazy_irq_pending()) 3992 return 0; 3993 3994 if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) 3995 msr = mfmsr(); /* TM restore can update msr */ 3996 3997 if (vcpu->arch.psscr != host_psscr) 3998 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); 3999 4000 kvmhv_save_hv_regs(vcpu, &hvregs); 4001 hvregs.lpcr = lpcr; 4002 hvregs.amor = ~0; 4003 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; 4004 hvregs.version = HV_GUEST_STATE_VERSION; 4005 if (vcpu->arch.nested) { 4006 hvregs.lpid = vcpu->arch.nested->shadow_lpid; 4007 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; 4008 } else { 4009 hvregs.lpid = vcpu->kvm->arch.lpid; 4010 hvregs.vcpu_token = vcpu->vcpu_id; 4011 } 4012 hvregs.hdec_expiry = time_limit; 4013 4014 /* 4015 * When setting DEC, we must always deal with irq_work_raise 4016 * via NMI vs setting DEC. The problem occurs right as we 4017 * switch into guest mode if a NMI hits and sets pending work 4018 * and sets DEC, then that will apply to the guest and not 4019 * bring us back to the host. 4020 * 4021 * irq_work_raise could check a flag (or possibly LPCR[HDICE] 4022 * for example) and set HDEC to 1? That wouldn't solve the 4023 * nested hv case which needs to abort the hcall or zero the 4024 * time limit. 4025 * 4026 * XXX: Another day's problem. 4027 */ 4028 mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb); 4029 4030 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); 4031 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); 4032 switch_pmu_to_guest(vcpu, &host_os_sprs); 4033 accumulate_time(vcpu, &vcpu->arch.in_guest); 4034 trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), 4035 __pa(&vcpu->arch.regs)); 4036 accumulate_time(vcpu, &vcpu->arch.guest_exit); 4037 kvmhv_restore_hv_return_state(vcpu, &hvregs); 4038 switch_pmu_to_host(vcpu, &host_os_sprs); 4039 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; 4040 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); 4041 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); 4042 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); 4043 4044 store_vcpu_state(vcpu); 4045 4046 dec = mfspr(SPRN_DEC); 4047 if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ 4048 dec = (s32) dec; 4049 *tb = mftb(); 4050 vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset); 4051 4052 timer_rearm_host_dec(*tb); 4053 4054 restore_p9_host_os_sprs(vcpu, &host_os_sprs); 4055 if (vcpu->arch.psscr != host_psscr) 4056 mtspr(SPRN_PSSCR_PR, host_psscr); 4057 4058 return trap; 4059 } 4060 4061 /* 4062 * Guest entry for POWER9 and later CPUs. 4063 */ 4064 static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, 4065 unsigned long lpcr, u64 *tb) 4066 { 4067 struct kvm *kvm = vcpu->kvm; 4068 struct kvm_nested_guest *nested = vcpu->arch.nested; 4069 u64 next_timer; 4070 int trap; 4071 4072 next_timer = timer_get_next_tb(); 4073 if (*tb >= next_timer) 4074 return BOOK3S_INTERRUPT_HV_DECREMENTER; 4075 if (next_timer < time_limit) 4076 time_limit = next_timer; 4077 else if (*tb >= time_limit) /* nested time limit */ 4078 return BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER; 4079 4080 vcpu->arch.ceded = 0; 4081 4082 vcpu_vpa_increment_dispatch(vcpu); 4083 4084 if (kvmhv_on_pseries()) { 4085 trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); 4086 4087 /* H_CEDE has to be handled now, not later */ 4088 if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested && 4089 kvmppc_get_gpr(vcpu, 3) == H_CEDE) { 4090 kvmppc_cede(vcpu); 4091 kvmppc_set_gpr(vcpu, 3, 0); 4092 trap = 0; 4093 } 4094 4095 } else if (nested) { 4096 __this_cpu_write(cpu_in_guest, kvm); 4097 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); 4098 __this_cpu_write(cpu_in_guest, NULL); 4099 4100 } else { 4101 kvmppc_xive_push_vcpu(vcpu); 4102 4103 __this_cpu_write(cpu_in_guest, kvm); 4104 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); 4105 __this_cpu_write(cpu_in_guest, NULL); 4106 4107 if (trap == BOOK3S_INTERRUPT_SYSCALL && 4108 !(vcpu->arch.shregs.msr & MSR_PR)) { 4109 unsigned long req = kvmppc_get_gpr(vcpu, 3); 4110 4111 /* 4112 * XIVE rearm and XICS hcalls must be handled 4113 * before xive context is pulled (is this 4114 * true?) 4115 */ 4116 if (req == H_CEDE) { 4117 /* H_CEDE has to be handled now */ 4118 kvmppc_cede(vcpu); 4119 if (!kvmppc_xive_rearm_escalation(vcpu)) { 4120 /* 4121 * Pending escalation so abort 4122 * the cede. 4123 */ 4124 vcpu->arch.ceded = 0; 4125 } 4126 kvmppc_set_gpr(vcpu, 3, 0); 4127 trap = 0; 4128 4129 } else if (req == H_ENTER_NESTED) { 4130 /* 4131 * L2 should not run with the L1 4132 * context so rearm and pull it. 4133 */ 4134 if (!kvmppc_xive_rearm_escalation(vcpu)) { 4135 /* 4136 * Pending escalation so abort 4137 * H_ENTER_NESTED. 4138 */ 4139 kvmppc_set_gpr(vcpu, 3, 0); 4140 trap = 0; 4141 } 4142 4143 } else if (hcall_is_xics(req)) { 4144 int ret; 4145 4146 ret = kvmppc_xive_xics_hcall(vcpu, req); 4147 if (ret != H_TOO_HARD) { 4148 kvmppc_set_gpr(vcpu, 3, ret); 4149 trap = 0; 4150 } 4151 } 4152 } 4153 kvmppc_xive_pull_vcpu(vcpu); 4154 4155 if (kvm_is_radix(kvm)) 4156 vcpu->arch.slb_max = 0; 4157 } 4158 4159 vcpu_vpa_increment_dispatch(vcpu); 4160 4161 return trap; 4162 } 4163 4164 /* 4165 * Wait for some other vcpu thread to execute us, and 4166 * wake us up when we need to handle something in the host. 4167 */ 4168 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, 4169 struct kvm_vcpu *vcpu, int wait_state) 4170 { 4171 DEFINE_WAIT(wait); 4172 4173 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); 4174 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { 4175 spin_unlock(&vc->lock); 4176 schedule(); 4177 spin_lock(&vc->lock); 4178 } 4179 finish_wait(&vcpu->arch.cpu_run, &wait); 4180 } 4181 4182 static void grow_halt_poll_ns(struct kvmppc_vcore *vc) 4183 { 4184 if (!halt_poll_ns_grow) 4185 return; 4186 4187 vc->halt_poll_ns *= halt_poll_ns_grow; 4188 if (vc->halt_poll_ns < halt_poll_ns_grow_start) 4189 vc->halt_poll_ns = halt_poll_ns_grow_start; 4190 } 4191 4192 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc) 4193 { 4194 if (halt_poll_ns_shrink == 0) 4195 vc->halt_poll_ns = 0; 4196 else 4197 vc->halt_poll_ns /= halt_poll_ns_shrink; 4198 } 4199 4200 #ifdef CONFIG_KVM_XICS 4201 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) 4202 { 4203 if (!xics_on_xive()) 4204 return false; 4205 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < 4206 vcpu->arch.xive_saved_state.cppr; 4207 } 4208 #else 4209 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) 4210 { 4211 return false; 4212 } 4213 #endif /* CONFIG_KVM_XICS */ 4214 4215 static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu) 4216 { 4217 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || 4218 kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu)) 4219 return true; 4220 4221 return false; 4222 } 4223 4224 static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu) 4225 { 4226 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) 4227 return true; 4228 return false; 4229 } 4230 4231 /* 4232 * Check to see if any of the runnable vcpus on the vcore have pending 4233 * exceptions or are no longer ceded 4234 */ 4235 static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc) 4236 { 4237 struct kvm_vcpu *vcpu; 4238 int i; 4239 4240 for_each_runnable_thread(i, vcpu, vc) { 4241 if (kvmppc_vcpu_check_block(vcpu)) 4242 return 1; 4243 } 4244 4245 return 0; 4246 } 4247 4248 /* 4249 * All the vcpus in this vcore are idle, so wait for a decrementer 4250 * or external interrupt to one of the vcpus. vc->lock is held. 4251 */ 4252 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) 4253 { 4254 ktime_t cur, start_poll, start_wait; 4255 int do_sleep = 1; 4256 u64 block_ns; 4257 4258 WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); 4259 4260 /* Poll for pending exceptions and ceded state */ 4261 cur = start_poll = ktime_get(); 4262 if (vc->halt_poll_ns) { 4263 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); 4264 ++vc->runner->stat.generic.halt_attempted_poll; 4265 4266 vc->vcore_state = VCORE_POLLING; 4267 spin_unlock(&vc->lock); 4268 4269 do { 4270 if (kvmppc_vcore_check_block(vc)) { 4271 do_sleep = 0; 4272 break; 4273 } 4274 cur = ktime_get(); 4275 } while (kvm_vcpu_can_poll(cur, stop)); 4276 4277 spin_lock(&vc->lock); 4278 vc->vcore_state = VCORE_INACTIVE; 4279 4280 if (!do_sleep) { 4281 ++vc->runner->stat.generic.halt_successful_poll; 4282 goto out; 4283 } 4284 } 4285 4286 prepare_to_rcuwait(&vc->wait); 4287 set_current_state(TASK_INTERRUPTIBLE); 4288 if (kvmppc_vcore_check_block(vc)) { 4289 finish_rcuwait(&vc->wait); 4290 do_sleep = 0; 4291 /* If we polled, count this as a successful poll */ 4292 if (vc->halt_poll_ns) 4293 ++vc->runner->stat.generic.halt_successful_poll; 4294 goto out; 4295 } 4296 4297 start_wait = ktime_get(); 4298 4299 vc->vcore_state = VCORE_SLEEPING; 4300 trace_kvmppc_vcore_blocked(vc->runner, 0); 4301 spin_unlock(&vc->lock); 4302 schedule(); 4303 finish_rcuwait(&vc->wait); 4304 spin_lock(&vc->lock); 4305 vc->vcore_state = VCORE_INACTIVE; 4306 trace_kvmppc_vcore_blocked(vc->runner, 1); 4307 ++vc->runner->stat.halt_successful_wait; 4308 4309 cur = ktime_get(); 4310 4311 out: 4312 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); 4313 4314 /* Attribute wait time */ 4315 if (do_sleep) { 4316 vc->runner->stat.generic.halt_wait_ns += 4317 ktime_to_ns(cur) - ktime_to_ns(start_wait); 4318 KVM_STATS_LOG_HIST_UPDATE( 4319 vc->runner->stat.generic.halt_wait_hist, 4320 ktime_to_ns(cur) - ktime_to_ns(start_wait)); 4321 /* Attribute failed poll time */ 4322 if (vc->halt_poll_ns) { 4323 vc->runner->stat.generic.halt_poll_fail_ns += 4324 ktime_to_ns(start_wait) - 4325 ktime_to_ns(start_poll); 4326 KVM_STATS_LOG_HIST_UPDATE( 4327 vc->runner->stat.generic.halt_poll_fail_hist, 4328 ktime_to_ns(start_wait) - 4329 ktime_to_ns(start_poll)); 4330 } 4331 } else { 4332 /* Attribute successful poll time */ 4333 if (vc->halt_poll_ns) { 4334 vc->runner->stat.generic.halt_poll_success_ns += 4335 ktime_to_ns(cur) - 4336 ktime_to_ns(start_poll); 4337 KVM_STATS_LOG_HIST_UPDATE( 4338 vc->runner->stat.generic.halt_poll_success_hist, 4339 ktime_to_ns(cur) - ktime_to_ns(start_poll)); 4340 } 4341 } 4342 4343 /* Adjust poll time */ 4344 if (halt_poll_ns) { 4345 if (block_ns <= vc->halt_poll_ns) 4346 ; 4347 /* We slept and blocked for longer than the max halt time */ 4348 else if (vc->halt_poll_ns && block_ns > halt_poll_ns) 4349 shrink_halt_poll_ns(vc); 4350 /* We slept and our poll time is too small */ 4351 else if (vc->halt_poll_ns < halt_poll_ns && 4352 block_ns < halt_poll_ns) 4353 grow_halt_poll_ns(vc); 4354 if (vc->halt_poll_ns > halt_poll_ns) 4355 vc->halt_poll_ns = halt_poll_ns; 4356 } else 4357 vc->halt_poll_ns = 0; 4358 4359 trace_kvmppc_vcore_wakeup(do_sleep, block_ns); 4360 } 4361 4362 /* 4363 * This never fails for a radix guest, as none of the operations it does 4364 * for a radix guest can fail or have a way to report failure. 4365 */ 4366 static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) 4367 { 4368 int r = 0; 4369 struct kvm *kvm = vcpu->kvm; 4370 4371 mutex_lock(&kvm->arch.mmu_setup_lock); 4372 if (!kvm->arch.mmu_ready) { 4373 if (!kvm_is_radix(kvm)) 4374 r = kvmppc_hv_setup_htab_rma(vcpu); 4375 if (!r) { 4376 if (cpu_has_feature(CPU_FTR_ARCH_300)) 4377 kvmppc_setup_partition_table(kvm); 4378 kvm->arch.mmu_ready = 1; 4379 } 4380 } 4381 mutex_unlock(&kvm->arch.mmu_setup_lock); 4382 return r; 4383 } 4384 4385 static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) 4386 { 4387 struct kvm_run *run = vcpu->run; 4388 int n_ceded, i, r; 4389 struct kvmppc_vcore *vc; 4390 struct kvm_vcpu *v; 4391 4392 trace_kvmppc_run_vcpu_enter(vcpu); 4393 4394 run->exit_reason = 0; 4395 vcpu->arch.ret = RESUME_GUEST; 4396 vcpu->arch.trap = 0; 4397 kvmppc_update_vpas(vcpu); 4398 4399 /* 4400 * Synchronize with other threads in this virtual core 4401 */ 4402 vc = vcpu->arch.vcore; 4403 spin_lock(&vc->lock); 4404 vcpu->arch.ceded = 0; 4405 vcpu->arch.run_task = current; 4406 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); 4407 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; 4408 vcpu->arch.busy_preempt = TB_NIL; 4409 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); 4410 ++vc->n_runnable; 4411 4412 /* 4413 * This happens the first time this is called for a vcpu. 4414 * If the vcore is already running, we may be able to start 4415 * this thread straight away and have it join in. 4416 */ 4417 if (!signal_pending(current)) { 4418 if ((vc->vcore_state == VCORE_PIGGYBACK || 4419 vc->vcore_state == VCORE_RUNNING) && 4420 !VCORE_IS_EXITING(vc)) { 4421 kvmppc_create_dtl_entry(vcpu, vc); 4422 kvmppc_start_thread(vcpu, vc); 4423 trace_kvm_guest_enter(vcpu); 4424 } else if (vc->vcore_state == VCORE_SLEEPING) { 4425 rcuwait_wake_up(&vc->wait); 4426 } 4427 4428 } 4429 4430 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && 4431 !signal_pending(current)) { 4432 /* See if the MMU is ready to go */ 4433 if (!vcpu->kvm->arch.mmu_ready) { 4434 spin_unlock(&vc->lock); 4435 r = kvmhv_setup_mmu(vcpu); 4436 spin_lock(&vc->lock); 4437 if (r) { 4438 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 4439 run->fail_entry. 4440 hardware_entry_failure_reason = 0; 4441 vcpu->arch.ret = r; 4442 break; 4443 } 4444 } 4445 4446 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) 4447 kvmppc_vcore_end_preempt(vc); 4448 4449 if (vc->vcore_state != VCORE_INACTIVE) { 4450 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); 4451 continue; 4452 } 4453 for_each_runnable_thread(i, v, vc) { 4454 kvmppc_core_prepare_to_enter(v); 4455 if (signal_pending(v->arch.run_task)) { 4456 kvmppc_remove_runnable(vc, v, mftb()); 4457 v->stat.signal_exits++; 4458 v->run->exit_reason = KVM_EXIT_INTR; 4459 v->arch.ret = -EINTR; 4460 wake_up(&v->arch.cpu_run); 4461 } 4462 } 4463 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 4464 break; 4465 n_ceded = 0; 4466 for_each_runnable_thread(i, v, vc) { 4467 if (!kvmppc_vcpu_woken(v)) 4468 n_ceded += v->arch.ceded; 4469 else 4470 v->arch.ceded = 0; 4471 } 4472 vc->runner = vcpu; 4473 if (n_ceded == vc->n_runnable) { 4474 kvmppc_vcore_blocked(vc); 4475 } else if (need_resched()) { 4476 kvmppc_vcore_preempt(vc); 4477 /* Let something else run */ 4478 cond_resched_lock(&vc->lock); 4479 if (vc->vcore_state == VCORE_PREEMPT) 4480 kvmppc_vcore_end_preempt(vc); 4481 } else { 4482 kvmppc_run_core(vc); 4483 } 4484 vc->runner = NULL; 4485 } 4486 4487 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && 4488 (vc->vcore_state == VCORE_RUNNING || 4489 vc->vcore_state == VCORE_EXITING || 4490 vc->vcore_state == VCORE_PIGGYBACK)) 4491 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); 4492 4493 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) 4494 kvmppc_vcore_end_preempt(vc); 4495 4496 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { 4497 kvmppc_remove_runnable(vc, vcpu, mftb()); 4498 vcpu->stat.signal_exits++; 4499 run->exit_reason = KVM_EXIT_INTR; 4500 vcpu->arch.ret = -EINTR; 4501 } 4502 4503 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { 4504 /* Wake up some vcpu to run the core */ 4505 i = -1; 4506 v = next_runnable_thread(vc, &i); 4507 wake_up(&v->arch.cpu_run); 4508 } 4509 4510 trace_kvmppc_run_vcpu_exit(vcpu); 4511 spin_unlock(&vc->lock); 4512 return vcpu->arch.ret; 4513 } 4514 4515 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, 4516 unsigned long lpcr) 4517 { 4518 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 4519 struct kvm_run *run = vcpu->run; 4520 int trap, r, pcpu; 4521 int srcu_idx; 4522 struct kvmppc_vcore *vc; 4523 struct kvm *kvm = vcpu->kvm; 4524 struct kvm_nested_guest *nested = vcpu->arch.nested; 4525 unsigned long flags; 4526 u64 tb; 4527 4528 trace_kvmppc_run_vcpu_enter(vcpu); 4529 4530 run->exit_reason = 0; 4531 vcpu->arch.ret = RESUME_GUEST; 4532 vcpu->arch.trap = 0; 4533 4534 vc = vcpu->arch.vcore; 4535 vcpu->arch.ceded = 0; 4536 vcpu->arch.run_task = current; 4537 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; 4538 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; 4539 4540 /* See if the MMU is ready to go */ 4541 if (unlikely(!kvm->arch.mmu_ready)) { 4542 r = kvmhv_setup_mmu(vcpu); 4543 if (r) { 4544 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 4545 run->fail_entry.hardware_entry_failure_reason = 0; 4546 vcpu->arch.ret = r; 4547 return r; 4548 } 4549 } 4550 4551 if (need_resched()) 4552 cond_resched(); 4553 4554 kvmppc_update_vpas(vcpu); 4555 4556 preempt_disable(); 4557 pcpu = smp_processor_id(); 4558 if (kvm_is_radix(kvm)) 4559 kvmppc_prepare_radix_vcpu(vcpu, pcpu); 4560 4561 /* flags save not required, but irq_pmu has no disable/enable API */ 4562 powerpc_local_irq_pmu_save(flags); 4563 4564 if (signal_pending(current)) 4565 goto sigpend; 4566 if (need_resched() || !kvm->arch.mmu_ready) 4567 goto out; 4568 4569 vcpu->cpu = pcpu; 4570 vcpu->arch.thread_cpu = pcpu; 4571 vc->pcpu = pcpu; 4572 local_paca->kvm_hstate.kvm_vcpu = vcpu; 4573 local_paca->kvm_hstate.ptid = 0; 4574 local_paca->kvm_hstate.fake_suspend = 0; 4575 4576 /* 4577 * Orders set cpu/thread_cpu vs testing for pending interrupts and 4578 * doorbells below. The other side is when these fields are set vs 4579 * kvmppc_fast_vcpu_kick_hv reading the cpu/thread_cpu fields to 4580 * kick a vCPU to notice the pending interrupt. 4581 */ 4582 smp_mb(); 4583 4584 if (!nested) { 4585 kvmppc_core_prepare_to_enter(vcpu); 4586 if (vcpu->arch.shregs.msr & MSR_EE) { 4587 if (xive_interrupt_pending(vcpu)) 4588 kvmppc_inject_interrupt_hv(vcpu, 4589 BOOK3S_INTERRUPT_EXTERNAL, 0); 4590 } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, 4591 &vcpu->arch.pending_exceptions)) { 4592 lpcr |= LPCR_MER; 4593 } 4594 } else if (vcpu->arch.pending_exceptions || 4595 vcpu->arch.doorbell_request || 4596 xive_interrupt_pending(vcpu)) { 4597 vcpu->arch.ret = RESUME_HOST; 4598 goto out; 4599 } 4600 4601 if (vcpu->arch.timer_running) { 4602 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 4603 vcpu->arch.timer_running = 0; 4604 } 4605 4606 tb = mftb(); 4607 4608 __kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0); 4609 4610 trace_kvm_guest_enter(vcpu); 4611 4612 guest_enter_irqoff(); 4613 4614 srcu_idx = srcu_read_lock(&kvm->srcu); 4615 4616 this_cpu_disable_ftrace(); 4617 4618 /* Tell lockdep that we're about to enable interrupts */ 4619 trace_hardirqs_on(); 4620 4621 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); 4622 vcpu->arch.trap = trap; 4623 4624 trace_hardirqs_off(); 4625 4626 this_cpu_enable_ftrace(); 4627 4628 srcu_read_unlock(&kvm->srcu, srcu_idx); 4629 4630 set_irq_happened(trap); 4631 4632 vcpu->cpu = -1; 4633 vcpu->arch.thread_cpu = -1; 4634 4635 context_tracking_guest_exit(); 4636 if (!vtime_accounting_enabled_this_cpu()) { 4637 powerpc_local_irq_pmu_restore(flags); 4638 /* 4639 * Service IRQs here before vtime_account_guest_exit() so any 4640 * ticks that occurred while running the guest are accounted to 4641 * the guest. If vtime accounting is enabled, accounting uses 4642 * TB rather than ticks, so it can be done without enabling 4643 * interrupts here, which has the problem that it accounts 4644 * interrupt processing overhead to the host. 4645 */ 4646 powerpc_local_irq_pmu_save(flags); 4647 } 4648 vtime_account_guest_exit(); 4649 4650 powerpc_local_irq_pmu_restore(flags); 4651 4652 preempt_enable(); 4653 4654 /* 4655 * cancel pending decrementer exception if DEC is now positive, or if 4656 * entering a nested guest in which case the decrementer is now owned 4657 * by L2 and the L1 decrementer is provided in hdec_expires 4658 */ 4659 if (kvmppc_core_pending_dec(vcpu) && 4660 ((tb < kvmppc_dec_expires_host_tb(vcpu)) || 4661 (trap == BOOK3S_INTERRUPT_SYSCALL && 4662 kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) 4663 kvmppc_core_dequeue_dec(vcpu); 4664 4665 trace_kvm_guest_exit(vcpu); 4666 r = RESUME_GUEST; 4667 if (trap) { 4668 if (!nested) 4669 r = kvmppc_handle_exit_hv(vcpu, current); 4670 else 4671 r = kvmppc_handle_nested_exit(vcpu); 4672 } 4673 vcpu->arch.ret = r; 4674 4675 if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) { 4676 kvmppc_set_timer(vcpu); 4677 4678 prepare_to_rcuwait(wait); 4679 for (;;) { 4680 set_current_state(TASK_INTERRUPTIBLE); 4681 if (signal_pending(current)) { 4682 vcpu->stat.signal_exits++; 4683 run->exit_reason = KVM_EXIT_INTR; 4684 vcpu->arch.ret = -EINTR; 4685 break; 4686 } 4687 4688 if (kvmppc_vcpu_check_block(vcpu)) 4689 break; 4690 4691 trace_kvmppc_vcore_blocked(vcpu, 0); 4692 schedule(); 4693 trace_kvmppc_vcore_blocked(vcpu, 1); 4694 } 4695 finish_rcuwait(wait); 4696 } 4697 vcpu->arch.ceded = 0; 4698 4699 done: 4700 trace_kvmppc_run_vcpu_exit(vcpu); 4701 4702 return vcpu->arch.ret; 4703 4704 sigpend: 4705 vcpu->stat.signal_exits++; 4706 run->exit_reason = KVM_EXIT_INTR; 4707 vcpu->arch.ret = -EINTR; 4708 out: 4709 vcpu->cpu = -1; 4710 vcpu->arch.thread_cpu = -1; 4711 powerpc_local_irq_pmu_restore(flags); 4712 preempt_enable(); 4713 goto done; 4714 } 4715 4716 static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) 4717 { 4718 struct kvm_run *run = vcpu->run; 4719 int r; 4720 int srcu_idx; 4721 struct kvm *kvm; 4722 unsigned long msr; 4723 4724 start_timing(vcpu, &vcpu->arch.vcpu_entry); 4725 4726 if (!vcpu->arch.sane) { 4727 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4728 return -EINVAL; 4729 } 4730 4731 /* No need to go into the guest when all we'll do is come back out */ 4732 if (signal_pending(current)) { 4733 run->exit_reason = KVM_EXIT_INTR; 4734 return -EINTR; 4735 } 4736 4737 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 4738 /* 4739 * Don't allow entry with a suspended transaction, because 4740 * the guest entry/exit code will lose it. 4741 */ 4742 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && 4743 (current->thread.regs->msr & MSR_TM)) { 4744 if (MSR_TM_ACTIVE(current->thread.regs->msr)) { 4745 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 4746 run->fail_entry.hardware_entry_failure_reason = 0; 4747 return -EINVAL; 4748 } 4749 } 4750 #endif 4751 4752 /* 4753 * Force online to 1 for the sake of old userspace which doesn't 4754 * set it. 4755 */ 4756 if (!vcpu->arch.online) { 4757 atomic_inc(&vcpu->arch.vcore->online_count); 4758 vcpu->arch.online = 1; 4759 } 4760 4761 kvmppc_core_prepare_to_enter(vcpu); 4762 4763 kvm = vcpu->kvm; 4764 atomic_inc(&kvm->arch.vcpus_running); 4765 /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ 4766 smp_mb(); 4767 4768 msr = 0; 4769 if (IS_ENABLED(CONFIG_PPC_FPU)) 4770 msr |= MSR_FP; 4771 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 4772 msr |= MSR_VEC; 4773 if (cpu_has_feature(CPU_FTR_VSX)) 4774 msr |= MSR_VSX; 4775 if ((cpu_has_feature(CPU_FTR_TM) || 4776 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && 4777 (vcpu->arch.hfscr & HFSCR_TM)) 4778 msr |= MSR_TM; 4779 msr = msr_check_and_set(msr); 4780 4781 kvmppc_save_user_regs(); 4782 4783 kvmppc_save_current_sprs(); 4784 4785 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 4786 vcpu->arch.waitp = &vcpu->arch.vcore->wait; 4787 vcpu->arch.pgdir = kvm->mm->pgd; 4788 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 4789 4790 do { 4791 accumulate_time(vcpu, &vcpu->arch.guest_entry); 4792 if (cpu_has_feature(CPU_FTR_ARCH_300)) 4793 r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, 4794 vcpu->arch.vcore->lpcr); 4795 else 4796 r = kvmppc_run_vcpu(vcpu); 4797 4798 if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { 4799 accumulate_time(vcpu, &vcpu->arch.hcall); 4800 4801 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { 4802 /* 4803 * These should have been caught reflected 4804 * into the guest by now. Final sanity check: 4805 * don't allow userspace to execute hcalls in 4806 * the hypervisor. 4807 */ 4808 r = RESUME_GUEST; 4809 continue; 4810 } 4811 trace_kvm_hcall_enter(vcpu); 4812 r = kvmppc_pseries_do_hcall(vcpu); 4813 trace_kvm_hcall_exit(vcpu, r); 4814 kvmppc_core_prepare_to_enter(vcpu); 4815 } else if (r == RESUME_PAGE_FAULT) { 4816 accumulate_time(vcpu, &vcpu->arch.pg_fault); 4817 srcu_idx = srcu_read_lock(&kvm->srcu); 4818 r = kvmppc_book3s_hv_page_fault(vcpu, 4819 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 4820 srcu_read_unlock(&kvm->srcu, srcu_idx); 4821 } else if (r == RESUME_PASSTHROUGH) { 4822 if (WARN_ON(xics_on_xive())) 4823 r = H_SUCCESS; 4824 else 4825 r = kvmppc_xics_rm_complete(vcpu, 0); 4826 } 4827 } while (is_kvmppc_resume_guest(r)); 4828 accumulate_time(vcpu, &vcpu->arch.vcpu_exit); 4829 4830 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 4831 atomic_dec(&kvm->arch.vcpus_running); 4832 4833 srr_regs_clobbered(); 4834 4835 end_timing(vcpu); 4836 4837 return r; 4838 } 4839 4840 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, 4841 int shift, int sllp) 4842 { 4843 (*sps)->page_shift = shift; 4844 (*sps)->slb_enc = sllp; 4845 (*sps)->enc[0].page_shift = shift; 4846 (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); 4847 /* 4848 * Add 16MB MPSS support (may get filtered out by userspace) 4849 */ 4850 if (shift != 24) { 4851 int penc = kvmppc_pgsize_lp_encoding(shift, 24); 4852 if (penc != -1) { 4853 (*sps)->enc[1].page_shift = 24; 4854 (*sps)->enc[1].pte_enc = penc; 4855 } 4856 } 4857 (*sps)++; 4858 } 4859 4860 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, 4861 struct kvm_ppc_smmu_info *info) 4862 { 4863 struct kvm_ppc_one_seg_page_size *sps; 4864 4865 /* 4866 * POWER7, POWER8 and POWER9 all support 32 storage keys for data. 4867 * POWER7 doesn't support keys for instruction accesses, 4868 * POWER8 and POWER9 do. 4869 */ 4870 info->data_keys = 32; 4871 info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; 4872 4873 /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ 4874 info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; 4875 info->slb_size = 32; 4876 4877 /* We only support these sizes for now, and no muti-size segments */ 4878 sps = &info->sps[0]; 4879 kvmppc_add_seg_page_size(&sps, 12, 0); 4880 kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01); 4881 kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L); 4882 4883 /* If running as a nested hypervisor, we don't support HPT guests */ 4884 if (kvmhv_on_pseries()) 4885 info->flags |= KVM_PPC_NO_HASH; 4886 4887 return 0; 4888 } 4889 4890 /* 4891 * Get (and clear) the dirty memory log for a memory slot. 4892 */ 4893 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, 4894 struct kvm_dirty_log *log) 4895 { 4896 struct kvm_memslots *slots; 4897 struct kvm_memory_slot *memslot; 4898 int r; 4899 unsigned long n, i; 4900 unsigned long *buf, *p; 4901 struct kvm_vcpu *vcpu; 4902 4903 mutex_lock(&kvm->slots_lock); 4904 4905 r = -EINVAL; 4906 if (log->slot >= KVM_USER_MEM_SLOTS) 4907 goto out; 4908 4909 slots = kvm_memslots(kvm); 4910 memslot = id_to_memslot(slots, log->slot); 4911 r = -ENOENT; 4912 if (!memslot || !memslot->dirty_bitmap) 4913 goto out; 4914 4915 /* 4916 * Use second half of bitmap area because both HPT and radix 4917 * accumulate bits in the first half. 4918 */ 4919 n = kvm_dirty_bitmap_bytes(memslot); 4920 buf = memslot->dirty_bitmap + n / sizeof(long); 4921 memset(buf, 0, n); 4922 4923 if (kvm_is_radix(kvm)) 4924 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf); 4925 else 4926 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf); 4927 if (r) 4928 goto out; 4929 4930 /* 4931 * We accumulate dirty bits in the first half of the 4932 * memslot's dirty_bitmap area, for when pages are paged 4933 * out or modified by the host directly. Pick up these 4934 * bits and add them to the map. 4935 */ 4936 p = memslot->dirty_bitmap; 4937 for (i = 0; i < n / sizeof(long); ++i) 4938 buf[i] |= xchg(&p[i], 0); 4939 4940 /* Harvest dirty bits from VPA and DTL updates */ 4941 /* Note: we never modify the SLB shadow buffer areas */ 4942 kvm_for_each_vcpu(i, vcpu, kvm) { 4943 spin_lock(&vcpu->arch.vpa_update_lock); 4944 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); 4945 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); 4946 spin_unlock(&vcpu->arch.vpa_update_lock); 4947 } 4948 4949 r = -EFAULT; 4950 if (copy_to_user(log->dirty_bitmap, buf, n)) 4951 goto out; 4952 4953 r = 0; 4954 out: 4955 mutex_unlock(&kvm->slots_lock); 4956 return r; 4957 } 4958 4959 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot) 4960 { 4961 vfree(slot->arch.rmap); 4962 slot->arch.rmap = NULL; 4963 } 4964 4965 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, 4966 const struct kvm_memory_slot *old, 4967 struct kvm_memory_slot *new, 4968 enum kvm_mr_change change) 4969 { 4970 if (change == KVM_MR_CREATE) { 4971 unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap)); 4972 4973 if ((size >> PAGE_SHIFT) > totalram_pages()) 4974 return -ENOMEM; 4975 4976 new->arch.rmap = vzalloc(size); 4977 if (!new->arch.rmap) 4978 return -ENOMEM; 4979 } else if (change != KVM_MR_DELETE) { 4980 new->arch.rmap = old->arch.rmap; 4981 } 4982 4983 return 0; 4984 } 4985 4986 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, 4987 struct kvm_memory_slot *old, 4988 const struct kvm_memory_slot *new, 4989 enum kvm_mr_change change) 4990 { 4991 /* 4992 * If we are creating or modifying a memslot, it might make 4993 * some address that was previously cached as emulated 4994 * MMIO be no longer emulated MMIO, so invalidate 4995 * all the caches of emulated MMIO translations. 4996 */ 4997 if (change != KVM_MR_DELETE) 4998 atomic64_inc(&kvm->arch.mmio_update); 4999 5000 /* 5001 * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels 5002 * have already called kvm_arch_flush_shadow_memslot() to 5003 * flush shadow mappings. For KVM_MR_CREATE we have no 5004 * previous mappings. So the only case to handle is 5005 * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit 5006 * has been changed. 5007 * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES 5008 * to get rid of any THP PTEs in the partition-scoped page tables 5009 * so we can track dirtiness at the page level; we flush when 5010 * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to 5011 * using THP PTEs. 5012 */ 5013 if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && 5014 ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) 5015 kvmppc_radix_flush_memslot(kvm, old); 5016 /* 5017 * If UV hasn't yet called H_SVM_INIT_START, don't register memslots. 5018 */ 5019 if (!kvm->arch.secure_guest) 5020 return; 5021 5022 switch (change) { 5023 case KVM_MR_CREATE: 5024 /* 5025 * @TODO kvmppc_uvmem_memslot_create() can fail and 5026 * return error. Fix this. 5027 */ 5028 kvmppc_uvmem_memslot_create(kvm, new); 5029 break; 5030 case KVM_MR_DELETE: 5031 kvmppc_uvmem_memslot_delete(kvm, old); 5032 break; 5033 default: 5034 /* TODO: Handle KVM_MR_MOVE */ 5035 break; 5036 } 5037 } 5038 5039 /* 5040 * Update LPCR values in kvm->arch and in vcores. 5041 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion 5042 * of kvm->arch.lpcr update). 5043 */ 5044 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) 5045 { 5046 long int i; 5047 u32 cores_done = 0; 5048 5049 if ((kvm->arch.lpcr & mask) == lpcr) 5050 return; 5051 5052 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; 5053 5054 for (i = 0; i < KVM_MAX_VCORES; ++i) { 5055 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; 5056 if (!vc) 5057 continue; 5058 5059 spin_lock(&vc->lock); 5060 vc->lpcr = (vc->lpcr & ~mask) | lpcr; 5061 verify_lpcr(kvm, vc->lpcr); 5062 spin_unlock(&vc->lock); 5063 if (++cores_done >= kvm->arch.online_vcores) 5064 break; 5065 } 5066 } 5067 5068 void kvmppc_setup_partition_table(struct kvm *kvm) 5069 { 5070 unsigned long dw0, dw1; 5071 5072 if (!kvm_is_radix(kvm)) { 5073 /* PS field - page size for VRMA */ 5074 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | 5075 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); 5076 /* HTABSIZE and HTABORG fields */ 5077 dw0 |= kvm->arch.sdr1; 5078 5079 /* Second dword as set by userspace */ 5080 dw1 = kvm->arch.process_table; 5081 } else { 5082 dw0 = PATB_HR | radix__get_tree_size() | 5083 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; 5084 dw1 = PATB_GR | kvm->arch.process_table; 5085 } 5086 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); 5087 } 5088 5089 /* 5090 * Set up HPT (hashed page table) and RMA (real-mode area). 5091 * Must be called with kvm->arch.mmu_setup_lock held. 5092 */ 5093 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 5094 { 5095 int err = 0; 5096 struct kvm *kvm = vcpu->kvm; 5097 unsigned long hva; 5098 struct kvm_memory_slot *memslot; 5099 struct vm_area_struct *vma; 5100 unsigned long lpcr = 0, senc; 5101 unsigned long psize, porder; 5102 int srcu_idx; 5103 5104 /* Allocate hashed page table (if not done already) and reset it */ 5105 if (!kvm->arch.hpt.virt) { 5106 int order = KVM_DEFAULT_HPT_ORDER; 5107 struct kvm_hpt_info info; 5108 5109 err = kvmppc_allocate_hpt(&info, order); 5110 /* If we get here, it means userspace didn't specify a 5111 * size explicitly. So, try successively smaller 5112 * sizes if the default failed. */ 5113 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER) 5114 err = kvmppc_allocate_hpt(&info, order); 5115 5116 if (err < 0) { 5117 pr_err("KVM: Couldn't alloc HPT\n"); 5118 goto out; 5119 } 5120 5121 kvmppc_set_hpt(kvm, &info); 5122 } 5123 5124 /* Look up the memslot for guest physical address 0 */ 5125 srcu_idx = srcu_read_lock(&kvm->srcu); 5126 memslot = gfn_to_memslot(kvm, 0); 5127 5128 /* We must have some memory at 0 by now */ 5129 err = -EINVAL; 5130 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 5131 goto out_srcu; 5132 5133 /* Look up the VMA for the start of this memory slot */ 5134 hva = memslot->userspace_addr; 5135 mmap_read_lock(kvm->mm); 5136 vma = vma_lookup(kvm->mm, hva); 5137 if (!vma || (vma->vm_flags & VM_IO)) 5138 goto up_out; 5139 5140 psize = vma_kernel_pagesize(vma); 5141 5142 mmap_read_unlock(kvm->mm); 5143 5144 /* We can handle 4k, 64k or 16M pages in the VRMA */ 5145 if (psize >= 0x1000000) 5146 psize = 0x1000000; 5147 else if (psize >= 0x10000) 5148 psize = 0x10000; 5149 else 5150 psize = 0x1000; 5151 porder = __ilog2(psize); 5152 5153 senc = slb_pgsize_encoding(psize); 5154 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 5155 (VRMA_VSID << SLB_VSID_SHIFT_1T); 5156 /* Create HPTEs in the hash page table for the VRMA */ 5157 kvmppc_map_vrma(vcpu, memslot, porder); 5158 5159 /* Update VRMASD field in the LPCR */ 5160 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 5161 /* the -4 is to account for senc values starting at 0x10 */ 5162 lpcr = senc << (LPCR_VRMASD_SH - 4); 5163 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); 5164 } 5165 5166 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ 5167 smp_wmb(); 5168 err = 0; 5169 out_srcu: 5170 srcu_read_unlock(&kvm->srcu, srcu_idx); 5171 out: 5172 return err; 5173 5174 up_out: 5175 mmap_read_unlock(kvm->mm); 5176 goto out_srcu; 5177 } 5178 5179 /* 5180 * Must be called with kvm->arch.mmu_setup_lock held and 5181 * mmu_ready = 0 and no vcpus running. 5182 */ 5183 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) 5184 { 5185 unsigned long lpcr, lpcr_mask; 5186 5187 if (nesting_enabled(kvm)) 5188 kvmhv_release_all_nested(kvm); 5189 kvmppc_rmap_reset(kvm); 5190 kvm->arch.process_table = 0; 5191 /* Mutual exclusion with kvm_unmap_gfn_range etc. */ 5192 spin_lock(&kvm->mmu_lock); 5193 kvm->arch.radix = 0; 5194 spin_unlock(&kvm->mmu_lock); 5195 kvmppc_free_radix(kvm); 5196 5197 lpcr = LPCR_VPM1; 5198 lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR; 5199 if (cpu_has_feature(CPU_FTR_ARCH_31)) 5200 lpcr_mask |= LPCR_HAIL; 5201 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); 5202 5203 return 0; 5204 } 5205 5206 /* 5207 * Must be called with kvm->arch.mmu_setup_lock held and 5208 * mmu_ready = 0 and no vcpus running. 5209 */ 5210 int kvmppc_switch_mmu_to_radix(struct kvm *kvm) 5211 { 5212 unsigned long lpcr, lpcr_mask; 5213 int err; 5214 5215 err = kvmppc_init_vm_radix(kvm); 5216 if (err) 5217 return err; 5218 kvmppc_rmap_reset(kvm); 5219 /* Mutual exclusion with kvm_unmap_gfn_range etc. */ 5220 spin_lock(&kvm->mmu_lock); 5221 kvm->arch.radix = 1; 5222 spin_unlock(&kvm->mmu_lock); 5223 kvmppc_free_hpt(&kvm->arch.hpt); 5224 5225 lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR; 5226 lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR; 5227 if (cpu_has_feature(CPU_FTR_ARCH_31)) { 5228 lpcr_mask |= LPCR_HAIL; 5229 if (cpu_has_feature(CPU_FTR_HVMODE) && 5230 (kvm->arch.host_lpcr & LPCR_HAIL)) 5231 lpcr |= LPCR_HAIL; 5232 } 5233 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); 5234 5235 return 0; 5236 } 5237 5238 #ifdef CONFIG_KVM_XICS 5239 /* 5240 * Allocate a per-core structure for managing state about which cores are 5241 * running in the host versus the guest and for exchanging data between 5242 * real mode KVM and CPU running in the host. 5243 * This is only done for the first VM. 5244 * The allocated structure stays even if all VMs have stopped. 5245 * It is only freed when the kvm-hv module is unloaded. 5246 * It's OK for this routine to fail, we just don't support host 5247 * core operations like redirecting H_IPI wakeups. 5248 */ 5249 void kvmppc_alloc_host_rm_ops(void) 5250 { 5251 struct kvmppc_host_rm_ops *ops; 5252 unsigned long l_ops; 5253 int cpu, core; 5254 int size; 5255 5256 if (cpu_has_feature(CPU_FTR_ARCH_300)) 5257 return; 5258 5259 /* Not the first time here ? */ 5260 if (kvmppc_host_rm_ops_hv != NULL) 5261 return; 5262 5263 ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL); 5264 if (!ops) 5265 return; 5266 5267 size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core); 5268 ops->rm_core = kzalloc(size, GFP_KERNEL); 5269 5270 if (!ops->rm_core) { 5271 kfree(ops); 5272 return; 5273 } 5274 5275 cpus_read_lock(); 5276 5277 for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { 5278 if (!cpu_online(cpu)) 5279 continue; 5280 5281 core = cpu >> threads_shift; 5282 ops->rm_core[core].rm_state.in_host = 1; 5283 } 5284 5285 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; 5286 5287 /* 5288 * Make the contents of the kvmppc_host_rm_ops structure visible 5289 * to other CPUs before we assign it to the global variable. 5290 * Do an atomic assignment (no locks used here), but if someone 5291 * beats us to it, just free our copy and return. 5292 */ 5293 smp_wmb(); 5294 l_ops = (unsigned long) ops; 5295 5296 if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { 5297 cpus_read_unlock(); 5298 kfree(ops->rm_core); 5299 kfree(ops); 5300 return; 5301 } 5302 5303 cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE, 5304 "ppc/kvm_book3s:prepare", 5305 kvmppc_set_host_core, 5306 kvmppc_clear_host_core); 5307 cpus_read_unlock(); 5308 } 5309 5310 void kvmppc_free_host_rm_ops(void) 5311 { 5312 if (kvmppc_host_rm_ops_hv) { 5313 cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE); 5314 kfree(kvmppc_host_rm_ops_hv->rm_core); 5315 kfree(kvmppc_host_rm_ops_hv); 5316 kvmppc_host_rm_ops_hv = NULL; 5317 } 5318 } 5319 #endif 5320 5321 static int kvmppc_core_init_vm_hv(struct kvm *kvm) 5322 { 5323 unsigned long lpcr, lpid; 5324 int ret; 5325 5326 mutex_init(&kvm->arch.uvmem_lock); 5327 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); 5328 mutex_init(&kvm->arch.mmu_setup_lock); 5329 5330 /* Allocate the guest's logical partition ID */ 5331 5332 lpid = kvmppc_alloc_lpid(); 5333 if ((long)lpid < 0) 5334 return -ENOMEM; 5335 kvm->arch.lpid = lpid; 5336 5337 kvmppc_alloc_host_rm_ops(); 5338 5339 kvmhv_vm_nested_init(kvm); 5340 5341 /* 5342 * Since we don't flush the TLB when tearing down a VM, 5343 * and this lpid might have previously been used, 5344 * make sure we flush on each core before running the new VM. 5345 * On POWER9, the tlbie in mmu_partition_table_set_entry() 5346 * does this flush for us. 5347 */ 5348 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 5349 cpumask_setall(&kvm->arch.need_tlb_flush); 5350 5351 /* Start out with the default set of hcalls enabled */ 5352 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, 5353 sizeof(kvm->arch.enabled_hcalls)); 5354 5355 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 5356 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 5357 5358 /* Init LPCR for virtual RMA mode */ 5359 if (cpu_has_feature(CPU_FTR_HVMODE)) { 5360 kvm->arch.host_lpid = mfspr(SPRN_LPID); 5361 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); 5362 lpcr &= LPCR_PECE | LPCR_LPES; 5363 } else { 5364 /* 5365 * The L2 LPES mode will be set by the L0 according to whether 5366 * or not it needs to take external interrupts in HV mode. 5367 */ 5368 lpcr = 0; 5369 } 5370 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | 5371 LPCR_VPM0 | LPCR_VPM1; 5372 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | 5373 (VRMA_VSID << SLB_VSID_SHIFT_1T); 5374 /* On POWER8 turn on online bit to enable PURR/SPURR */ 5375 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 5376 lpcr |= LPCR_ONL; 5377 /* 5378 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed) 5379 * Set HVICE bit to enable hypervisor virtualization interrupts. 5380 * Set HEIC to prevent OS interrupts to go to hypervisor (should 5381 * be unnecessary but better safe than sorry in case we re-enable 5382 * EE in HV mode with this LPCR still set) 5383 */ 5384 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 5385 lpcr &= ~LPCR_VPM0; 5386 lpcr |= LPCR_HVICE | LPCR_HEIC; 5387 5388 /* 5389 * If xive is enabled, we route 0x500 interrupts directly 5390 * to the guest. 5391 */ 5392 if (xics_on_xive()) 5393 lpcr |= LPCR_LPES; 5394 } 5395 5396 /* 5397 * If the host uses radix, the guest starts out as radix. 5398 */ 5399 if (radix_enabled()) { 5400 kvm->arch.radix = 1; 5401 kvm->arch.mmu_ready = 1; 5402 lpcr &= ~LPCR_VPM1; 5403 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; 5404 if (cpu_has_feature(CPU_FTR_HVMODE) && 5405 cpu_has_feature(CPU_FTR_ARCH_31) && 5406 (kvm->arch.host_lpcr & LPCR_HAIL)) 5407 lpcr |= LPCR_HAIL; 5408 ret = kvmppc_init_vm_radix(kvm); 5409 if (ret) { 5410 kvmppc_free_lpid(kvm->arch.lpid); 5411 return ret; 5412 } 5413 kvmppc_setup_partition_table(kvm); 5414 } 5415 5416 verify_lpcr(kvm, lpcr); 5417 kvm->arch.lpcr = lpcr; 5418 5419 /* Initialization for future HPT resizes */ 5420 kvm->arch.resize_hpt = NULL; 5421 5422 /* 5423 * Work out how many sets the TLB has, for the use of 5424 * the TLB invalidation loop in book3s_hv_rmhandlers.S. 5425 */ 5426 if (cpu_has_feature(CPU_FTR_ARCH_31)) { 5427 /* 5428 * P10 will flush all the congruence class with a single tlbiel 5429 */ 5430 kvm->arch.tlb_sets = 1; 5431 } else if (radix_enabled()) 5432 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ 5433 else if (cpu_has_feature(CPU_FTR_ARCH_300)) 5434 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ 5435 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 5436 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ 5437 else 5438 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ 5439 5440 /* 5441 * Track that we now have a HV mode VM active. This blocks secondary 5442 * CPU threads from coming online. 5443 */ 5444 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 5445 kvm_hv_vm_activated(); 5446 5447 /* 5448 * Initialize smt_mode depending on processor. 5449 * POWER8 and earlier have to use "strict" threading, where 5450 * all vCPUs in a vcore have to run on the same (sub)core, 5451 * whereas on POWER9 the threads can each run a different 5452 * guest. 5453 */ 5454 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 5455 kvm->arch.smt_mode = threads_per_subcore; 5456 else 5457 kvm->arch.smt_mode = 1; 5458 kvm->arch.emul_smt_mode = 1; 5459 5460 return 0; 5461 } 5462 5463 static int kvmppc_arch_create_vm_debugfs_hv(struct kvm *kvm) 5464 { 5465 kvmppc_mmu_debugfs_init(kvm); 5466 if (radix_enabled()) 5467 kvmhv_radix_debugfs_init(kvm); 5468 return 0; 5469 } 5470 5471 static void kvmppc_free_vcores(struct kvm *kvm) 5472 { 5473 long int i; 5474 5475 for (i = 0; i < KVM_MAX_VCORES; ++i) 5476 kfree(kvm->arch.vcores[i]); 5477 kvm->arch.online_vcores = 0; 5478 } 5479 5480 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) 5481 { 5482 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 5483 kvm_hv_vm_deactivated(); 5484 5485 kvmppc_free_vcores(kvm); 5486 5487 5488 if (kvm_is_radix(kvm)) 5489 kvmppc_free_radix(kvm); 5490 else 5491 kvmppc_free_hpt(&kvm->arch.hpt); 5492 5493 /* Perform global invalidation and return lpid to the pool */ 5494 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 5495 if (nesting_enabled(kvm)) 5496 kvmhv_release_all_nested(kvm); 5497 kvm->arch.process_table = 0; 5498 if (kvm->arch.secure_guest) 5499 uv_svm_terminate(kvm->arch.lpid); 5500 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); 5501 } 5502 5503 kvmppc_free_lpid(kvm->arch.lpid); 5504 5505 kvmppc_free_pimap(kvm); 5506 } 5507 5508 /* We don't need to emulate any privileged instructions or dcbz */ 5509 static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu, 5510 unsigned int inst, int *advance) 5511 { 5512 return EMULATE_FAIL; 5513 } 5514 5515 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, 5516 ulong spr_val) 5517 { 5518 return EMULATE_FAIL; 5519 } 5520 5521 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, 5522 ulong *spr_val) 5523 { 5524 return EMULATE_FAIL; 5525 } 5526 5527 static int kvmppc_core_check_processor_compat_hv(void) 5528 { 5529 if (cpu_has_feature(CPU_FTR_HVMODE) && 5530 cpu_has_feature(CPU_FTR_ARCH_206)) 5531 return 0; 5532 5533 /* POWER9 in radix mode is capable of being a nested hypervisor. */ 5534 if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) 5535 return 0; 5536 5537 return -EIO; 5538 } 5539 5540 #ifdef CONFIG_KVM_XICS 5541 5542 void kvmppc_free_pimap(struct kvm *kvm) 5543 { 5544 kfree(kvm->arch.pimap); 5545 } 5546 5547 static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void) 5548 { 5549 return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL); 5550 } 5551 5552 static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) 5553 { 5554 struct irq_desc *desc; 5555 struct kvmppc_irq_map *irq_map; 5556 struct kvmppc_passthru_irqmap *pimap; 5557 struct irq_chip *chip; 5558 int i, rc = 0; 5559 struct irq_data *host_data; 5560 5561 if (!kvm_irq_bypass) 5562 return 1; 5563 5564 desc = irq_to_desc(host_irq); 5565 if (!desc) 5566 return -EIO; 5567 5568 mutex_lock(&kvm->lock); 5569 5570 pimap = kvm->arch.pimap; 5571 if (pimap == NULL) { 5572 /* First call, allocate structure to hold IRQ map */ 5573 pimap = kvmppc_alloc_pimap(); 5574 if (pimap == NULL) { 5575 mutex_unlock(&kvm->lock); 5576 return -ENOMEM; 5577 } 5578 kvm->arch.pimap = pimap; 5579 } 5580 5581 /* 5582 * For now, we only support interrupts for which the EOI operation 5583 * is an OPAL call followed by a write to XIRR, since that's 5584 * what our real-mode EOI code does, or a XIVE interrupt 5585 */ 5586 chip = irq_data_get_irq_chip(&desc->irq_data); 5587 if (!chip || !is_pnv_opal_msi(chip)) { 5588 pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", 5589 host_irq, guest_gsi); 5590 mutex_unlock(&kvm->lock); 5591 return -ENOENT; 5592 } 5593 5594 /* 5595 * See if we already have an entry for this guest IRQ number. 5596 * If it's mapped to a hardware IRQ number, that's an error, 5597 * otherwise re-use this entry. 5598 */ 5599 for (i = 0; i < pimap->n_mapped; i++) { 5600 if (guest_gsi == pimap->mapped[i].v_hwirq) { 5601 if (pimap->mapped[i].r_hwirq) { 5602 mutex_unlock(&kvm->lock); 5603 return -EINVAL; 5604 } 5605 break; 5606 } 5607 } 5608 5609 if (i == KVMPPC_PIRQ_MAPPED) { 5610 mutex_unlock(&kvm->lock); 5611 return -EAGAIN; /* table is full */ 5612 } 5613 5614 irq_map = &pimap->mapped[i]; 5615 5616 irq_map->v_hwirq = guest_gsi; 5617 irq_map->desc = desc; 5618 5619 /* 5620 * Order the above two stores before the next to serialize with 5621 * the KVM real mode handler. 5622 */ 5623 smp_wmb(); 5624 5625 /* 5626 * The 'host_irq' number is mapped in the PCI-MSI domain but 5627 * the underlying calls, which will EOI the interrupt in real 5628 * mode, need an HW IRQ number mapped in the XICS IRQ domain. 5629 */ 5630 host_data = irq_domain_get_irq_data(irq_get_default_host(), host_irq); 5631 irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data); 5632 5633 if (i == pimap->n_mapped) 5634 pimap->n_mapped++; 5635 5636 if (xics_on_xive()) 5637 rc = kvmppc_xive_set_mapped(kvm, guest_gsi, host_irq); 5638 else 5639 kvmppc_xics_set_mapped(kvm, guest_gsi, irq_map->r_hwirq); 5640 if (rc) 5641 irq_map->r_hwirq = 0; 5642 5643 mutex_unlock(&kvm->lock); 5644 5645 return 0; 5646 } 5647 5648 static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) 5649 { 5650 struct irq_desc *desc; 5651 struct kvmppc_passthru_irqmap *pimap; 5652 int i, rc = 0; 5653 5654 if (!kvm_irq_bypass) 5655 return 0; 5656 5657 desc = irq_to_desc(host_irq); 5658 if (!desc) 5659 return -EIO; 5660 5661 mutex_lock(&kvm->lock); 5662 if (!kvm->arch.pimap) 5663 goto unlock; 5664 5665 pimap = kvm->arch.pimap; 5666 5667 for (i = 0; i < pimap->n_mapped; i++) { 5668 if (guest_gsi == pimap->mapped[i].v_hwirq) 5669 break; 5670 } 5671 5672 if (i == pimap->n_mapped) { 5673 mutex_unlock(&kvm->lock); 5674 return -ENODEV; 5675 } 5676 5677 if (xics_on_xive()) 5678 rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, host_irq); 5679 else 5680 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); 5681 5682 /* invalidate the entry (what to do on error from the above ?) */ 5683 pimap->mapped[i].r_hwirq = 0; 5684 5685 /* 5686 * We don't free this structure even when the count goes to 5687 * zero. The structure is freed when we destroy the VM. 5688 */ 5689 unlock: 5690 mutex_unlock(&kvm->lock); 5691 return rc; 5692 } 5693 5694 static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons, 5695 struct irq_bypass_producer *prod) 5696 { 5697 int ret = 0; 5698 struct kvm_kernel_irqfd *irqfd = 5699 container_of(cons, struct kvm_kernel_irqfd, consumer); 5700 5701 irqfd->producer = prod; 5702 5703 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); 5704 if (ret) 5705 pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n", 5706 prod->irq, irqfd->gsi, ret); 5707 5708 return ret; 5709 } 5710 5711 static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons, 5712 struct irq_bypass_producer *prod) 5713 { 5714 int ret; 5715 struct kvm_kernel_irqfd *irqfd = 5716 container_of(cons, struct kvm_kernel_irqfd, consumer); 5717 5718 irqfd->producer = NULL; 5719 5720 /* 5721 * When producer of consumer is unregistered, we change back to 5722 * default external interrupt handling mode - KVM real mode 5723 * will switch back to host. 5724 */ 5725 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); 5726 if (ret) 5727 pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n", 5728 prod->irq, irqfd->gsi, ret); 5729 } 5730 #endif 5731 5732 static long kvm_arch_vm_ioctl_hv(struct file *filp, 5733 unsigned int ioctl, unsigned long arg) 5734 { 5735 struct kvm *kvm __maybe_unused = filp->private_data; 5736 void __user *argp = (void __user *)arg; 5737 long r; 5738 5739 switch (ioctl) { 5740 5741 case KVM_PPC_ALLOCATE_HTAB: { 5742 u32 htab_order; 5743 5744 /* If we're a nested hypervisor, we currently only support radix */ 5745 if (kvmhv_on_pseries()) { 5746 r = -EOPNOTSUPP; 5747 break; 5748 } 5749 5750 r = -EFAULT; 5751 if (get_user(htab_order, (u32 __user *)argp)) 5752 break; 5753 r = kvmppc_alloc_reset_hpt(kvm, htab_order); 5754 if (r) 5755 break; 5756 r = 0; 5757 break; 5758 } 5759 5760 case KVM_PPC_GET_HTAB_FD: { 5761 struct kvm_get_htab_fd ghf; 5762 5763 r = -EFAULT; 5764 if (copy_from_user(&ghf, argp, sizeof(ghf))) 5765 break; 5766 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); 5767 break; 5768 } 5769 5770 case KVM_PPC_RESIZE_HPT_PREPARE: { 5771 struct kvm_ppc_resize_hpt rhpt; 5772 5773 r = -EFAULT; 5774 if (copy_from_user(&rhpt, argp, sizeof(rhpt))) 5775 break; 5776 5777 r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt); 5778 break; 5779 } 5780 5781 case KVM_PPC_RESIZE_HPT_COMMIT: { 5782 struct kvm_ppc_resize_hpt rhpt; 5783 5784 r = -EFAULT; 5785 if (copy_from_user(&rhpt, argp, sizeof(rhpt))) 5786 break; 5787 5788 r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt); 5789 break; 5790 } 5791 5792 default: 5793 r = -ENOTTY; 5794 } 5795 5796 return r; 5797 } 5798 5799 /* 5800 * List of hcall numbers to enable by default. 5801 * For compatibility with old userspace, we enable by default 5802 * all hcalls that were implemented before the hcall-enabling 5803 * facility was added. Note this list should not include H_RTAS. 5804 */ 5805 static unsigned int default_hcall_list[] = { 5806 H_REMOVE, 5807 H_ENTER, 5808 H_READ, 5809 H_PROTECT, 5810 H_BULK_REMOVE, 5811 #ifdef CONFIG_SPAPR_TCE_IOMMU 5812 H_GET_TCE, 5813 H_PUT_TCE, 5814 #endif 5815 H_SET_DABR, 5816 H_SET_XDABR, 5817 H_CEDE, 5818 H_PROD, 5819 H_CONFER, 5820 H_REGISTER_VPA, 5821 #ifdef CONFIG_KVM_XICS 5822 H_EOI, 5823 H_CPPR, 5824 H_IPI, 5825 H_IPOLL, 5826 H_XIRR, 5827 H_XIRR_X, 5828 #endif 5829 0 5830 }; 5831 5832 static void init_default_hcalls(void) 5833 { 5834 int i; 5835 unsigned int hcall; 5836 5837 for (i = 0; default_hcall_list[i]; ++i) { 5838 hcall = default_hcall_list[i]; 5839 WARN_ON(!kvmppc_hcall_impl_hv(hcall)); 5840 __set_bit(hcall / 4, default_enabled_hcalls); 5841 } 5842 } 5843 5844 static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) 5845 { 5846 unsigned long lpcr; 5847 int radix; 5848 int err; 5849 5850 /* If not on a POWER9, reject it */ 5851 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 5852 return -ENODEV; 5853 5854 /* If any unknown flags set, reject it */ 5855 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) 5856 return -EINVAL; 5857 5858 /* GR (guest radix) bit in process_table field must match */ 5859 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); 5860 if (!!(cfg->process_table & PATB_GR) != radix) 5861 return -EINVAL; 5862 5863 /* Process table size field must be reasonable, i.e. <= 24 */ 5864 if ((cfg->process_table & PRTS_MASK) > 24) 5865 return -EINVAL; 5866 5867 /* We can change a guest to/from radix now, if the host is radix */ 5868 if (radix && !radix_enabled()) 5869 return -EINVAL; 5870 5871 /* If we're a nested hypervisor, we currently only support radix */ 5872 if (kvmhv_on_pseries() && !radix) 5873 return -EINVAL; 5874 5875 mutex_lock(&kvm->arch.mmu_setup_lock); 5876 if (radix != kvm_is_radix(kvm)) { 5877 if (kvm->arch.mmu_ready) { 5878 kvm->arch.mmu_ready = 0; 5879 /* order mmu_ready vs. vcpus_running */ 5880 smp_mb(); 5881 if (atomic_read(&kvm->arch.vcpus_running)) { 5882 kvm->arch.mmu_ready = 1; 5883 err = -EBUSY; 5884 goto out_unlock; 5885 } 5886 } 5887 if (radix) 5888 err = kvmppc_switch_mmu_to_radix(kvm); 5889 else 5890 err = kvmppc_switch_mmu_to_hpt(kvm); 5891 if (err) 5892 goto out_unlock; 5893 } 5894 5895 kvm->arch.process_table = cfg->process_table; 5896 kvmppc_setup_partition_table(kvm); 5897 5898 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; 5899 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); 5900 err = 0; 5901 5902 out_unlock: 5903 mutex_unlock(&kvm->arch.mmu_setup_lock); 5904 return err; 5905 } 5906 5907 static int kvmhv_enable_nested(struct kvm *kvm) 5908 { 5909 if (!nested) 5910 return -EPERM; 5911 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 5912 return -ENODEV; 5913 if (!radix_enabled()) 5914 return -ENODEV; 5915 5916 /* kvm == NULL means the caller is testing if the capability exists */ 5917 if (kvm) 5918 kvm->arch.nested_enable = true; 5919 return 0; 5920 } 5921 5922 static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, 5923 int size) 5924 { 5925 int rc = -EINVAL; 5926 5927 if (kvmhv_vcpu_is_radix(vcpu)) { 5928 rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size); 5929 5930 if (rc > 0) 5931 rc = -EINVAL; 5932 } 5933 5934 /* For now quadrants are the only way to access nested guest memory */ 5935 if (rc && vcpu->arch.nested) 5936 rc = -EAGAIN; 5937 5938 return rc; 5939 } 5940 5941 static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, 5942 int size) 5943 { 5944 int rc = -EINVAL; 5945 5946 if (kvmhv_vcpu_is_radix(vcpu)) { 5947 rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size); 5948 5949 if (rc > 0) 5950 rc = -EINVAL; 5951 } 5952 5953 /* For now quadrants are the only way to access nested guest memory */ 5954 if (rc && vcpu->arch.nested) 5955 rc = -EAGAIN; 5956 5957 return rc; 5958 } 5959 5960 static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) 5961 { 5962 unpin_vpa(kvm, vpa); 5963 vpa->gpa = 0; 5964 vpa->pinned_addr = NULL; 5965 vpa->dirty = false; 5966 vpa->update_pending = 0; 5967 } 5968 5969 /* 5970 * Enable a guest to become a secure VM, or test whether 5971 * that could be enabled. 5972 * Called when the KVM_CAP_PPC_SECURE_GUEST capability is 5973 * tested (kvm == NULL) or enabled (kvm != NULL). 5974 */ 5975 static int kvmhv_enable_svm(struct kvm *kvm) 5976 { 5977 if (!kvmppc_uvmem_available()) 5978 return -EINVAL; 5979 if (kvm) 5980 kvm->arch.svm_enabled = 1; 5981 return 0; 5982 } 5983 5984 /* 5985 * IOCTL handler to turn off secure mode of guest 5986 * 5987 * - Release all device pages 5988 * - Issue ucall to terminate the guest on the UV side 5989 * - Unpin the VPA pages. 5990 * - Reinit the partition scoped page tables 5991 */ 5992 static int kvmhv_svm_off(struct kvm *kvm) 5993 { 5994 struct kvm_vcpu *vcpu; 5995 int mmu_was_ready; 5996 int srcu_idx; 5997 int ret = 0; 5998 unsigned long i; 5999 6000 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) 6001 return ret; 6002 6003 mutex_lock(&kvm->arch.mmu_setup_lock); 6004 mmu_was_ready = kvm->arch.mmu_ready; 6005 if (kvm->arch.mmu_ready) { 6006 kvm->arch.mmu_ready = 0; 6007 /* order mmu_ready vs. vcpus_running */ 6008 smp_mb(); 6009 if (atomic_read(&kvm->arch.vcpus_running)) { 6010 kvm->arch.mmu_ready = 1; 6011 ret = -EBUSY; 6012 goto out; 6013 } 6014 } 6015 6016 srcu_idx = srcu_read_lock(&kvm->srcu); 6017 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 6018 struct kvm_memory_slot *memslot; 6019 struct kvm_memslots *slots = __kvm_memslots(kvm, i); 6020 int bkt; 6021 6022 if (!slots) 6023 continue; 6024 6025 kvm_for_each_memslot(memslot, bkt, slots) { 6026 kvmppc_uvmem_drop_pages(memslot, kvm, true); 6027 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); 6028 } 6029 } 6030 srcu_read_unlock(&kvm->srcu, srcu_idx); 6031 6032 ret = uv_svm_terminate(kvm->arch.lpid); 6033 if (ret != U_SUCCESS) { 6034 ret = -EINVAL; 6035 goto out; 6036 } 6037 6038 /* 6039 * When secure guest is reset, all the guest pages are sent 6040 * to UV via UV_PAGE_IN before the non-boot vcpus get a 6041 * chance to run and unpin their VPA pages. Unpinning of all 6042 * VPA pages is done here explicitly so that VPA pages 6043 * can be migrated to the secure side. 6044 * 6045 * This is required to for the secure SMP guest to reboot 6046 * correctly. 6047 */ 6048 kvm_for_each_vcpu(i, vcpu, kvm) { 6049 spin_lock(&vcpu->arch.vpa_update_lock); 6050 unpin_vpa_reset(kvm, &vcpu->arch.dtl); 6051 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); 6052 unpin_vpa_reset(kvm, &vcpu->arch.vpa); 6053 spin_unlock(&vcpu->arch.vpa_update_lock); 6054 } 6055 6056 kvmppc_setup_partition_table(kvm); 6057 kvm->arch.secure_guest = 0; 6058 kvm->arch.mmu_ready = mmu_was_ready; 6059 out: 6060 mutex_unlock(&kvm->arch.mmu_setup_lock); 6061 return ret; 6062 } 6063 6064 static int kvmhv_enable_dawr1(struct kvm *kvm) 6065 { 6066 if (!cpu_has_feature(CPU_FTR_DAWR1)) 6067 return -ENODEV; 6068 6069 /* kvm == NULL means the caller is testing if the capability exists */ 6070 if (kvm) 6071 kvm->arch.dawr1_enabled = true; 6072 return 0; 6073 } 6074 6075 static bool kvmppc_hash_v3_possible(void) 6076 { 6077 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 6078 return false; 6079 6080 if (!cpu_has_feature(CPU_FTR_HVMODE)) 6081 return false; 6082 6083 /* 6084 * POWER9 chips before version 2.02 can't have some threads in 6085 * HPT mode and some in radix mode on the same core. 6086 */ 6087 if (radix_enabled()) { 6088 unsigned int pvr = mfspr(SPRN_PVR); 6089 if ((pvr >> 16) == PVR_POWER9 && 6090 (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) || 6091 ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101))) 6092 return false; 6093 } 6094 6095 return true; 6096 } 6097 6098 static struct kvmppc_ops kvm_ops_hv = { 6099 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, 6100 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, 6101 .get_one_reg = kvmppc_get_one_reg_hv, 6102 .set_one_reg = kvmppc_set_one_reg_hv, 6103 .vcpu_load = kvmppc_core_vcpu_load_hv, 6104 .vcpu_put = kvmppc_core_vcpu_put_hv, 6105 .inject_interrupt = kvmppc_inject_interrupt_hv, 6106 .set_msr = kvmppc_set_msr_hv, 6107 .vcpu_run = kvmppc_vcpu_run_hv, 6108 .vcpu_create = kvmppc_core_vcpu_create_hv, 6109 .vcpu_free = kvmppc_core_vcpu_free_hv, 6110 .check_requests = kvmppc_core_check_requests_hv, 6111 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, 6112 .flush_memslot = kvmppc_core_flush_memslot_hv, 6113 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, 6114 .commit_memory_region = kvmppc_core_commit_memory_region_hv, 6115 .unmap_gfn_range = kvm_unmap_gfn_range_hv, 6116 .age_gfn = kvm_age_gfn_hv, 6117 .test_age_gfn = kvm_test_age_gfn_hv, 6118 .set_spte_gfn = kvm_set_spte_gfn_hv, 6119 .free_memslot = kvmppc_core_free_memslot_hv, 6120 .init_vm = kvmppc_core_init_vm_hv, 6121 .destroy_vm = kvmppc_core_destroy_vm_hv, 6122 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, 6123 .emulate_op = kvmppc_core_emulate_op_hv, 6124 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, 6125 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, 6126 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, 6127 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, 6128 .hcall_implemented = kvmppc_hcall_impl_hv, 6129 #ifdef CONFIG_KVM_XICS 6130 .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, 6131 .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, 6132 #endif 6133 .configure_mmu = kvmhv_configure_mmu, 6134 .get_rmmu_info = kvmhv_get_rmmu_info, 6135 .set_smt_mode = kvmhv_set_smt_mode, 6136 .enable_nested = kvmhv_enable_nested, 6137 .load_from_eaddr = kvmhv_load_from_eaddr, 6138 .store_to_eaddr = kvmhv_store_to_eaddr, 6139 .enable_svm = kvmhv_enable_svm, 6140 .svm_off = kvmhv_svm_off, 6141 .enable_dawr1 = kvmhv_enable_dawr1, 6142 .hash_v3_possible = kvmppc_hash_v3_possible, 6143 .create_vcpu_debugfs = kvmppc_arch_create_vcpu_debugfs_hv, 6144 .create_vm_debugfs = kvmppc_arch_create_vm_debugfs_hv, 6145 }; 6146 6147 static int kvm_init_subcore_bitmap(void) 6148 { 6149 int i, j; 6150 int nr_cores = cpu_nr_cores(); 6151 struct sibling_subcore_state *sibling_subcore_state; 6152 6153 for (i = 0; i < nr_cores; i++) { 6154 int first_cpu = i * threads_per_core; 6155 int node = cpu_to_node(first_cpu); 6156 6157 /* Ignore if it is already allocated. */ 6158 if (paca_ptrs[first_cpu]->sibling_subcore_state) 6159 continue; 6160 6161 sibling_subcore_state = 6162 kzalloc_node(sizeof(struct sibling_subcore_state), 6163 GFP_KERNEL, node); 6164 if (!sibling_subcore_state) 6165 return -ENOMEM; 6166 6167 6168 for (j = 0; j < threads_per_core; j++) { 6169 int cpu = first_cpu + j; 6170 6171 paca_ptrs[cpu]->sibling_subcore_state = 6172 sibling_subcore_state; 6173 } 6174 } 6175 return 0; 6176 } 6177 6178 static int kvmppc_radix_possible(void) 6179 { 6180 return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled(); 6181 } 6182 6183 static int kvmppc_book3s_init_hv(void) 6184 { 6185 int r; 6186 6187 if (!tlbie_capable) { 6188 pr_err("KVM-HV: Host does not support TLBIE\n"); 6189 return -ENODEV; 6190 } 6191 6192 /* 6193 * FIXME!! Do we need to check on all cpus ? 6194 */ 6195 r = kvmppc_core_check_processor_compat_hv(); 6196 if (r < 0) 6197 return -ENODEV; 6198 6199 r = kvmhv_nested_init(); 6200 if (r) 6201 return r; 6202 6203 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 6204 r = kvm_init_subcore_bitmap(); 6205 if (r) 6206 goto err; 6207 } 6208 6209 /* 6210 * We need a way of accessing the XICS interrupt controller, 6211 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or 6212 * indirectly, via OPAL. 6213 */ 6214 #ifdef CONFIG_SMP 6215 if (!xics_on_xive() && !kvmhv_on_pseries() && 6216 !local_paca->kvm_hstate.xics_phys) { 6217 struct device_node *np; 6218 6219 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); 6220 if (!np) { 6221 pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); 6222 r = -ENODEV; 6223 goto err; 6224 } 6225 /* presence of intc confirmed - node can be dropped again */ 6226 of_node_put(np); 6227 } 6228 #endif 6229 6230 init_default_hcalls(); 6231 6232 init_vcore_lists(); 6233 6234 r = kvmppc_mmu_hv_init(); 6235 if (r) 6236 goto err; 6237 6238 if (kvmppc_radix_possible()) { 6239 r = kvmppc_radix_init(); 6240 if (r) 6241 goto err; 6242 } 6243 6244 r = kvmppc_uvmem_init(); 6245 if (r < 0) { 6246 pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); 6247 return r; 6248 } 6249 6250 kvm_ops_hv.owner = THIS_MODULE; 6251 kvmppc_hv_ops = &kvm_ops_hv; 6252 6253 return 0; 6254 6255 err: 6256 kvmhv_nested_exit(); 6257 kvmppc_radix_exit(); 6258 6259 return r; 6260 } 6261 6262 static void kvmppc_book3s_exit_hv(void) 6263 { 6264 kvmppc_uvmem_free(); 6265 kvmppc_free_host_rm_ops(); 6266 if (kvmppc_radix_possible()) 6267 kvmppc_radix_exit(); 6268 kvmppc_hv_ops = NULL; 6269 kvmhv_nested_exit(); 6270 } 6271 6272 module_init(kvmppc_book3s_init_hv); 6273 module_exit(kvmppc_book3s_exit_hv); 6274 MODULE_LICENSE("GPL"); 6275 MODULE_ALIAS_MISCDEV(KVM_MINOR); 6276 MODULE_ALIAS("devname:kvm"); 6277