1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 4 * 5 * Authors: 6 * Paul Mackerras <paulus@au1.ibm.com> 7 * Alexander Graf <agraf@suse.de> 8 * Kevin Wolf <mail@kevin-wolf.de> 9 * 10 * Description: KVM functions specific to running on Book 3S 11 * processors in hypervisor mode (specifically POWER7 and later). 12 * 13 * This file is derived from arch/powerpc/kvm/book3s.c, 14 * by Alexander Graf <agraf@suse.de>. 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License, version 2, as 18 * published by the Free Software Foundation. 19 */ 20 21 #include <linux/kvm_host.h> 22 #include <linux/kernel.h> 23 #include <linux/err.h> 24 #include <linux/slab.h> 25 #include <linux/preempt.h> 26 #include <linux/sched/signal.h> 27 #include <linux/sched/stat.h> 28 #include <linux/delay.h> 29 #include <linux/export.h> 30 #include <linux/fs.h> 31 #include <linux/anon_inodes.h> 32 #include <linux/cpu.h> 33 #include <linux/cpumask.h> 34 #include <linux/spinlock.h> 35 #include <linux/page-flags.h> 36 #include <linux/srcu.h> 37 #include <linux/miscdevice.h> 38 #include <linux/debugfs.h> 39 #include <linux/gfp.h> 40 #include <linux/vmalloc.h> 41 #include <linux/highmem.h> 42 #include <linux/hugetlb.h> 43 #include <linux/kvm_irqfd.h> 44 #include <linux/irqbypass.h> 45 #include <linux/module.h> 46 #include <linux/compiler.h> 47 #include <linux/of.h> 48 49 #include <asm/reg.h> 50 #include <asm/ppc-opcode.h> 51 #include <asm/asm-prototypes.h> 52 #include <asm/debug.h> 53 #include <asm/disassemble.h> 54 #include <asm/cputable.h> 55 #include <asm/cacheflush.h> 56 #include <asm/tlbflush.h> 57 #include <linux/uaccess.h> 58 #include <asm/io.h> 59 #include <asm/kvm_ppc.h> 60 #include <asm/kvm_book3s.h> 61 #include <asm/mmu_context.h> 62 #include <asm/lppaca.h> 63 #include <asm/processor.h> 64 #include <asm/cputhreads.h> 65 #include <asm/page.h> 66 #include <asm/hvcall.h> 67 #include <asm/switch_to.h> 68 #include <asm/smp.h> 69 #include <asm/dbell.h> 70 #include <asm/hmi.h> 71 #include <asm/pnv-pci.h> 72 #include <asm/mmu.h> 73 #include <asm/opal.h> 74 #include <asm/xics.h> 75 #include <asm/xive.h> 76 77 #include "book3s.h" 78 79 #define CREATE_TRACE_POINTS 80 #include "trace_hv.h" 81 82 /* #define EXIT_DEBUG */ 83 /* #define EXIT_DEBUG_SIMPLE */ 84 /* #define EXIT_DEBUG_INT */ 85 86 /* Used to indicate that a guest page fault needs to be handled */ 87 #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) 88 /* Used to indicate that a guest passthrough interrupt needs to be handled */ 89 #define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2) 90 91 /* Used as a "null" value for timebase values */ 92 #define TB_NIL (~(u64)0) 93 94 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); 95 96 static int dynamic_mt_modes = 6; 97 module_param(dynamic_mt_modes, int, 0644); 98 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); 99 static int target_smt_mode; 100 module_param(target_smt_mode, int, 0644); 101 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); 102 103 static bool indep_threads_mode = true; 104 module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR); 105 MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)"); 106 107 #ifdef CONFIG_KVM_XICS 108 static struct kernel_param_ops module_param_ops = { 109 .set = param_set_int, 110 .get = param_get_int, 111 }; 112 113 module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644); 114 MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization"); 115 116 module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); 117 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); 118 #endif 119 120 /* If set, the threads on each CPU core have to be in the same MMU mode */ 121 static bool no_mixing_hpt_and_radix; 122 123 static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 124 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 125 126 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, 127 int *ip) 128 { 129 int i = *ip; 130 struct kvm_vcpu *vcpu; 131 132 while (++i < MAX_SMT_THREADS) { 133 vcpu = READ_ONCE(vc->runnable_threads[i]); 134 if (vcpu) { 135 *ip = i; 136 return vcpu; 137 } 138 } 139 return NULL; 140 } 141 142 /* Used to traverse the list of runnable threads for a given vcore */ 143 #define for_each_runnable_thread(i, vcpu, vc) \ 144 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); ) 145 146 static bool kvmppc_ipi_thread(int cpu) 147 { 148 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 149 150 /* On POWER9 we can use msgsnd to IPI any cpu */ 151 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 152 msg |= get_hard_smp_processor_id(cpu); 153 smp_mb(); 154 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 155 return true; 156 } 157 158 /* On POWER8 for IPIs to threads in the same core, use msgsnd */ 159 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 160 preempt_disable(); 161 if (cpu_first_thread_sibling(cpu) == 162 cpu_first_thread_sibling(smp_processor_id())) { 163 msg |= cpu_thread_in_core(cpu); 164 smp_mb(); 165 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 166 preempt_enable(); 167 return true; 168 } 169 preempt_enable(); 170 } 171 172 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 173 if (cpu >= 0 && cpu < nr_cpu_ids) { 174 if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { 175 xics_wake_cpu(cpu); 176 return true; 177 } 178 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 179 return true; 180 } 181 #endif 182 183 return false; 184 } 185 186 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) 187 { 188 int cpu; 189 struct swait_queue_head *wqp; 190 191 wqp = kvm_arch_vcpu_wq(vcpu); 192 if (swq_has_sleeper(wqp)) { 193 swake_up(wqp); 194 ++vcpu->stat.halt_wakeup; 195 } 196 197 cpu = READ_ONCE(vcpu->arch.thread_cpu); 198 if (cpu >= 0 && kvmppc_ipi_thread(cpu)) 199 return; 200 201 /* CPU points to the first thread of the core */ 202 cpu = vcpu->cpu; 203 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) 204 smp_send_reschedule(cpu); 205 } 206 207 /* 208 * We use the vcpu_load/put functions to measure stolen time. 209 * Stolen time is counted as time when either the vcpu is able to 210 * run as part of a virtual core, but the task running the vcore 211 * is preempted or sleeping, or when the vcpu needs something done 212 * in the kernel by the task running the vcpu, but that task is 213 * preempted or sleeping. Those two things have to be counted 214 * separately, since one of the vcpu tasks will take on the job 215 * of running the core, and the other vcpu tasks in the vcore will 216 * sleep waiting for it to do that, but that sleep shouldn't count 217 * as stolen time. 218 * 219 * Hence we accumulate stolen time when the vcpu can run as part of 220 * a vcore using vc->stolen_tb, and the stolen time when the vcpu 221 * needs its task to do other things in the kernel (for example, 222 * service a page fault) in busy_stolen. We don't accumulate 223 * stolen time for a vcore when it is inactive, or for a vcpu 224 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of 225 * a misnomer; it means that the vcpu task is not executing in 226 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in 227 * the kernel. We don't have any way of dividing up that time 228 * between time that the vcpu is genuinely stopped, time that 229 * the task is actively working on behalf of the vcpu, and time 230 * that the task is preempted, so we don't count any of it as 231 * stolen. 232 * 233 * Updates to busy_stolen are protected by arch.tbacct_lock; 234 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock 235 * lock. The stolen times are measured in units of timebase ticks. 236 * (Note that the != TB_NIL checks below are purely defensive; 237 * they should never fail.) 238 */ 239 240 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc) 241 { 242 unsigned long flags; 243 244 spin_lock_irqsave(&vc->stoltb_lock, flags); 245 vc->preempt_tb = mftb(); 246 spin_unlock_irqrestore(&vc->stoltb_lock, flags); 247 } 248 249 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc) 250 { 251 unsigned long flags; 252 253 spin_lock_irqsave(&vc->stoltb_lock, flags); 254 if (vc->preempt_tb != TB_NIL) { 255 vc->stolen_tb += mftb() - vc->preempt_tb; 256 vc->preempt_tb = TB_NIL; 257 } 258 spin_unlock_irqrestore(&vc->stoltb_lock, flags); 259 } 260 261 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) 262 { 263 struct kvmppc_vcore *vc = vcpu->arch.vcore; 264 unsigned long flags; 265 266 /* 267 * We can test vc->runner without taking the vcore lock, 268 * because only this task ever sets vc->runner to this 269 * vcpu, and once it is set to this vcpu, only this task 270 * ever sets it to NULL. 271 */ 272 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) 273 kvmppc_core_end_stolen(vc); 274 275 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); 276 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && 277 vcpu->arch.busy_preempt != TB_NIL) { 278 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; 279 vcpu->arch.busy_preempt = TB_NIL; 280 } 281 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); 282 } 283 284 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) 285 { 286 struct kvmppc_vcore *vc = vcpu->arch.vcore; 287 unsigned long flags; 288 289 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) 290 kvmppc_core_start_stolen(vc); 291 292 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); 293 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) 294 vcpu->arch.busy_preempt = mftb(); 295 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); 296 } 297 298 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 299 { 300 /* 301 * Check for illegal transactional state bit combination 302 * and if we find it, force the TS field to a safe state. 303 */ 304 if ((msr & MSR_TS_MASK) == MSR_TS_MASK) 305 msr &= ~MSR_TS_MASK; 306 vcpu->arch.shregs.msr = msr; 307 kvmppc_end_cede(vcpu); 308 } 309 310 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) 311 { 312 vcpu->arch.pvr = pvr; 313 } 314 315 /* Dummy value used in computing PCR value below */ 316 #define PCR_ARCH_300 (PCR_ARCH_207 << 1) 317 318 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) 319 { 320 unsigned long host_pcr_bit = 0, guest_pcr_bit = 0; 321 struct kvmppc_vcore *vc = vcpu->arch.vcore; 322 323 /* We can (emulate) our own architecture version and anything older */ 324 if (cpu_has_feature(CPU_FTR_ARCH_300)) 325 host_pcr_bit = PCR_ARCH_300; 326 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 327 host_pcr_bit = PCR_ARCH_207; 328 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 329 host_pcr_bit = PCR_ARCH_206; 330 else 331 host_pcr_bit = PCR_ARCH_205; 332 333 /* Determine lowest PCR bit needed to run guest in given PVR level */ 334 guest_pcr_bit = host_pcr_bit; 335 if (arch_compat) { 336 switch (arch_compat) { 337 case PVR_ARCH_205: 338 guest_pcr_bit = PCR_ARCH_205; 339 break; 340 case PVR_ARCH_206: 341 case PVR_ARCH_206p: 342 guest_pcr_bit = PCR_ARCH_206; 343 break; 344 case PVR_ARCH_207: 345 guest_pcr_bit = PCR_ARCH_207; 346 break; 347 case PVR_ARCH_300: 348 guest_pcr_bit = PCR_ARCH_300; 349 break; 350 default: 351 return -EINVAL; 352 } 353 } 354 355 /* Check requested PCR bits don't exceed our capabilities */ 356 if (guest_pcr_bit > host_pcr_bit) 357 return -EINVAL; 358 359 spin_lock(&vc->lock); 360 vc->arch_compat = arch_compat; 361 /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */ 362 vc->pcr = host_pcr_bit - guest_pcr_bit; 363 spin_unlock(&vc->lock); 364 365 return 0; 366 } 367 368 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) 369 { 370 int r; 371 372 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); 373 pr_err("pc = %.16lx msr = %.16llx trap = %x\n", 374 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); 375 for (r = 0; r < 16; ++r) 376 pr_err("r%2d = %.16lx r%d = %.16lx\n", 377 r, kvmppc_get_gpr(vcpu, r), 378 r+16, kvmppc_get_gpr(vcpu, r+16)); 379 pr_err("ctr = %.16lx lr = %.16lx\n", 380 vcpu->arch.ctr, vcpu->arch.lr); 381 pr_err("srr0 = %.16llx srr1 = %.16llx\n", 382 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); 383 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", 384 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); 385 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", 386 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); 387 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", 388 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); 389 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); 390 pr_err("fault dar = %.16lx dsisr = %.8x\n", 391 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 392 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); 393 for (r = 0; r < vcpu->arch.slb_max; ++r) 394 pr_err(" ESID = %.16llx VSID = %.16llx\n", 395 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); 396 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", 397 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, 398 vcpu->arch.last_inst); 399 } 400 401 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) 402 { 403 struct kvm_vcpu *ret; 404 405 mutex_lock(&kvm->lock); 406 ret = kvm_get_vcpu_by_id(kvm, id); 407 mutex_unlock(&kvm->lock); 408 return ret; 409 } 410 411 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 412 { 413 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; 414 vpa->yield_count = cpu_to_be32(1); 415 } 416 417 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, 418 unsigned long addr, unsigned long len) 419 { 420 /* check address is cacheline aligned */ 421 if (addr & (L1_CACHE_BYTES - 1)) 422 return -EINVAL; 423 spin_lock(&vcpu->arch.vpa_update_lock); 424 if (v->next_gpa != addr || v->len != len) { 425 v->next_gpa = addr; 426 v->len = addr ? len : 0; 427 v->update_pending = 1; 428 } 429 spin_unlock(&vcpu->arch.vpa_update_lock); 430 return 0; 431 } 432 433 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ 434 struct reg_vpa { 435 u32 dummy; 436 union { 437 __be16 hword; 438 __be32 word; 439 } length; 440 }; 441 442 static int vpa_is_registered(struct kvmppc_vpa *vpap) 443 { 444 if (vpap->update_pending) 445 return vpap->next_gpa != 0; 446 return vpap->pinned_addr != NULL; 447 } 448 449 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, 450 unsigned long flags, 451 unsigned long vcpuid, unsigned long vpa) 452 { 453 struct kvm *kvm = vcpu->kvm; 454 unsigned long len, nb; 455 void *va; 456 struct kvm_vcpu *tvcpu; 457 int err; 458 int subfunc; 459 struct kvmppc_vpa *vpap; 460 461 tvcpu = kvmppc_find_vcpu(kvm, vcpuid); 462 if (!tvcpu) 463 return H_PARAMETER; 464 465 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; 466 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || 467 subfunc == H_VPA_REG_SLB) { 468 /* Registering new area - address must be cache-line aligned */ 469 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) 470 return H_PARAMETER; 471 472 /* convert logical addr to kernel addr and read length */ 473 va = kvmppc_pin_guest_page(kvm, vpa, &nb); 474 if (va == NULL) 475 return H_PARAMETER; 476 if (subfunc == H_VPA_REG_VPA) 477 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); 478 else 479 len = be32_to_cpu(((struct reg_vpa *)va)->length.word); 480 kvmppc_unpin_guest_page(kvm, va, vpa, false); 481 482 /* Check length */ 483 if (len > nb || len < sizeof(struct reg_vpa)) 484 return H_PARAMETER; 485 } else { 486 vpa = 0; 487 len = 0; 488 } 489 490 err = H_PARAMETER; 491 vpap = NULL; 492 spin_lock(&tvcpu->arch.vpa_update_lock); 493 494 switch (subfunc) { 495 case H_VPA_REG_VPA: /* register VPA */ 496 /* 497 * The size of our lppaca is 1kB because of the way we align 498 * it for the guest to avoid crossing a 4kB boundary. We only 499 * use 640 bytes of the structure though, so we should accept 500 * clients that set a size of 640. 501 */ 502 BUILD_BUG_ON(sizeof(struct lppaca) != 640); 503 if (len < sizeof(struct lppaca)) 504 break; 505 vpap = &tvcpu->arch.vpa; 506 err = 0; 507 break; 508 509 case H_VPA_REG_DTL: /* register DTL */ 510 if (len < sizeof(struct dtl_entry)) 511 break; 512 len -= len % sizeof(struct dtl_entry); 513 514 /* Check that they have previously registered a VPA */ 515 err = H_RESOURCE; 516 if (!vpa_is_registered(&tvcpu->arch.vpa)) 517 break; 518 519 vpap = &tvcpu->arch.dtl; 520 err = 0; 521 break; 522 523 case H_VPA_REG_SLB: /* register SLB shadow buffer */ 524 /* Check that they have previously registered a VPA */ 525 err = H_RESOURCE; 526 if (!vpa_is_registered(&tvcpu->arch.vpa)) 527 break; 528 529 vpap = &tvcpu->arch.slb_shadow; 530 err = 0; 531 break; 532 533 case H_VPA_DEREG_VPA: /* deregister VPA */ 534 /* Check they don't still have a DTL or SLB buf registered */ 535 err = H_RESOURCE; 536 if (vpa_is_registered(&tvcpu->arch.dtl) || 537 vpa_is_registered(&tvcpu->arch.slb_shadow)) 538 break; 539 540 vpap = &tvcpu->arch.vpa; 541 err = 0; 542 break; 543 544 case H_VPA_DEREG_DTL: /* deregister DTL */ 545 vpap = &tvcpu->arch.dtl; 546 err = 0; 547 break; 548 549 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ 550 vpap = &tvcpu->arch.slb_shadow; 551 err = 0; 552 break; 553 } 554 555 if (vpap) { 556 vpap->next_gpa = vpa; 557 vpap->len = len; 558 vpap->update_pending = 1; 559 } 560 561 spin_unlock(&tvcpu->arch.vpa_update_lock); 562 563 return err; 564 } 565 566 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) 567 { 568 struct kvm *kvm = vcpu->kvm; 569 void *va; 570 unsigned long nb; 571 unsigned long gpa; 572 573 /* 574 * We need to pin the page pointed to by vpap->next_gpa, 575 * but we can't call kvmppc_pin_guest_page under the lock 576 * as it does get_user_pages() and down_read(). So we 577 * have to drop the lock, pin the page, then get the lock 578 * again and check that a new area didn't get registered 579 * in the meantime. 580 */ 581 for (;;) { 582 gpa = vpap->next_gpa; 583 spin_unlock(&vcpu->arch.vpa_update_lock); 584 va = NULL; 585 nb = 0; 586 if (gpa) 587 va = kvmppc_pin_guest_page(kvm, gpa, &nb); 588 spin_lock(&vcpu->arch.vpa_update_lock); 589 if (gpa == vpap->next_gpa) 590 break; 591 /* sigh... unpin that one and try again */ 592 if (va) 593 kvmppc_unpin_guest_page(kvm, va, gpa, false); 594 } 595 596 vpap->update_pending = 0; 597 if (va && nb < vpap->len) { 598 /* 599 * If it's now too short, it must be that userspace 600 * has changed the mappings underlying guest memory, 601 * so unregister the region. 602 */ 603 kvmppc_unpin_guest_page(kvm, va, gpa, false); 604 va = NULL; 605 } 606 if (vpap->pinned_addr) 607 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, 608 vpap->dirty); 609 vpap->gpa = gpa; 610 vpap->pinned_addr = va; 611 vpap->dirty = false; 612 if (va) 613 vpap->pinned_end = va + vpap->len; 614 } 615 616 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 617 { 618 if (!(vcpu->arch.vpa.update_pending || 619 vcpu->arch.slb_shadow.update_pending || 620 vcpu->arch.dtl.update_pending)) 621 return; 622 623 spin_lock(&vcpu->arch.vpa_update_lock); 624 if (vcpu->arch.vpa.update_pending) { 625 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); 626 if (vcpu->arch.vpa.pinned_addr) 627 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 628 } 629 if (vcpu->arch.dtl.update_pending) { 630 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); 631 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 632 vcpu->arch.dtl_index = 0; 633 } 634 if (vcpu->arch.slb_shadow.update_pending) 635 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); 636 spin_unlock(&vcpu->arch.vpa_update_lock); 637 } 638 639 /* 640 * Return the accumulated stolen time for the vcore up until `now'. 641 * The caller should hold the vcore lock. 642 */ 643 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) 644 { 645 u64 p; 646 unsigned long flags; 647 648 spin_lock_irqsave(&vc->stoltb_lock, flags); 649 p = vc->stolen_tb; 650 if (vc->vcore_state != VCORE_INACTIVE && 651 vc->preempt_tb != TB_NIL) 652 p += now - vc->preempt_tb; 653 spin_unlock_irqrestore(&vc->stoltb_lock, flags); 654 return p; 655 } 656 657 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, 658 struct kvmppc_vcore *vc) 659 { 660 struct dtl_entry *dt; 661 struct lppaca *vpa; 662 unsigned long stolen; 663 unsigned long core_stolen; 664 u64 now; 665 unsigned long flags; 666 667 dt = vcpu->arch.dtl_ptr; 668 vpa = vcpu->arch.vpa.pinned_addr; 669 now = mftb(); 670 core_stolen = vcore_stolen_time(vc, now); 671 stolen = core_stolen - vcpu->arch.stolen_logged; 672 vcpu->arch.stolen_logged = core_stolen; 673 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); 674 stolen += vcpu->arch.busy_stolen; 675 vcpu->arch.busy_stolen = 0; 676 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); 677 if (!dt || !vpa) 678 return; 679 memset(dt, 0, sizeof(struct dtl_entry)); 680 dt->dispatch_reason = 7; 681 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); 682 dt->timebase = cpu_to_be64(now + vc->tb_offset); 683 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); 684 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); 685 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); 686 ++dt; 687 if (dt == vcpu->arch.dtl.pinned_end) 688 dt = vcpu->arch.dtl.pinned_addr; 689 vcpu->arch.dtl_ptr = dt; 690 /* order writing *dt vs. writing vpa->dtl_idx */ 691 smp_wmb(); 692 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); 693 vcpu->arch.dtl.dirty = true; 694 } 695 696 /* See if there is a doorbell interrupt pending for a vcpu */ 697 static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) 698 { 699 int thr; 700 struct kvmppc_vcore *vc; 701 702 if (vcpu->arch.doorbell_request) 703 return true; 704 /* 705 * Ensure that the read of vcore->dpdes comes after the read 706 * of vcpu->doorbell_request. This barrier matches the 707 * lwsync in book3s_hv_rmhandlers.S just before the 708 * fast_guest_return label. 709 */ 710 smp_rmb(); 711 vc = vcpu->arch.vcore; 712 thr = vcpu->vcpu_id - vc->first_vcpuid; 713 return !!(vc->dpdes & (1 << thr)); 714 } 715 716 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) 717 { 718 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) 719 return true; 720 if ((!vcpu->arch.vcore->arch_compat) && 721 cpu_has_feature(CPU_FTR_ARCH_207S)) 722 return true; 723 return false; 724 } 725 726 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, 727 unsigned long resource, unsigned long value1, 728 unsigned long value2) 729 { 730 switch (resource) { 731 case H_SET_MODE_RESOURCE_SET_CIABR: 732 if (!kvmppc_power8_compatible(vcpu)) 733 return H_P2; 734 if (value2) 735 return H_P4; 736 if (mflags) 737 return H_UNSUPPORTED_FLAG_START; 738 /* Guests can't breakpoint the hypervisor */ 739 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) 740 return H_P3; 741 vcpu->arch.ciabr = value1; 742 return H_SUCCESS; 743 case H_SET_MODE_RESOURCE_SET_DAWR: 744 if (!kvmppc_power8_compatible(vcpu)) 745 return H_P2; 746 if (!ppc_breakpoint_available()) 747 return H_P2; 748 if (mflags) 749 return H_UNSUPPORTED_FLAG_START; 750 if (value2 & DABRX_HYP) 751 return H_P4; 752 vcpu->arch.dawr = value1; 753 vcpu->arch.dawrx = value2; 754 return H_SUCCESS; 755 default: 756 return H_TOO_HARD; 757 } 758 } 759 760 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) 761 { 762 struct kvmppc_vcore *vcore = target->arch.vcore; 763 764 /* 765 * We expect to have been called by the real mode handler 766 * (kvmppc_rm_h_confer()) which would have directly returned 767 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may 768 * have useful work to do and should not confer) so we don't 769 * recheck that here. 770 */ 771 772 spin_lock(&vcore->lock); 773 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && 774 vcore->vcore_state != VCORE_INACTIVE && 775 vcore->runner) 776 target = vcore->runner; 777 spin_unlock(&vcore->lock); 778 779 return kvm_vcpu_yield_to(target); 780 } 781 782 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) 783 { 784 int yield_count = 0; 785 struct lppaca *lppaca; 786 787 spin_lock(&vcpu->arch.vpa_update_lock); 788 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; 789 if (lppaca) 790 yield_count = be32_to_cpu(lppaca->yield_count); 791 spin_unlock(&vcpu->arch.vpa_update_lock); 792 return yield_count; 793 } 794 795 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) 796 { 797 unsigned long req = kvmppc_get_gpr(vcpu, 3); 798 unsigned long target, ret = H_SUCCESS; 799 int yield_count; 800 struct kvm_vcpu *tvcpu; 801 int idx, rc; 802 803 if (req <= MAX_HCALL_OPCODE && 804 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) 805 return RESUME_HOST; 806 807 switch (req) { 808 case H_CEDE: 809 break; 810 case H_PROD: 811 target = kvmppc_get_gpr(vcpu, 4); 812 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); 813 if (!tvcpu) { 814 ret = H_PARAMETER; 815 break; 816 } 817 tvcpu->arch.prodded = 1; 818 smp_mb(); 819 if (tvcpu->arch.ceded) 820 kvmppc_fast_vcpu_kick_hv(tvcpu); 821 break; 822 case H_CONFER: 823 target = kvmppc_get_gpr(vcpu, 4); 824 if (target == -1) 825 break; 826 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); 827 if (!tvcpu) { 828 ret = H_PARAMETER; 829 break; 830 } 831 yield_count = kvmppc_get_gpr(vcpu, 5); 832 if (kvmppc_get_yield_count(tvcpu) != yield_count) 833 break; 834 kvm_arch_vcpu_yield_to(tvcpu); 835 break; 836 case H_REGISTER_VPA: 837 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), 838 kvmppc_get_gpr(vcpu, 5), 839 kvmppc_get_gpr(vcpu, 6)); 840 break; 841 case H_RTAS: 842 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) 843 return RESUME_HOST; 844 845 idx = srcu_read_lock(&vcpu->kvm->srcu); 846 rc = kvmppc_rtas_hcall(vcpu); 847 srcu_read_unlock(&vcpu->kvm->srcu, idx); 848 849 if (rc == -ENOENT) 850 return RESUME_HOST; 851 else if (rc == 0) 852 break; 853 854 /* Send the error out to userspace via KVM_RUN */ 855 return rc; 856 case H_LOGICAL_CI_LOAD: 857 ret = kvmppc_h_logical_ci_load(vcpu); 858 if (ret == H_TOO_HARD) 859 return RESUME_HOST; 860 break; 861 case H_LOGICAL_CI_STORE: 862 ret = kvmppc_h_logical_ci_store(vcpu); 863 if (ret == H_TOO_HARD) 864 return RESUME_HOST; 865 break; 866 case H_SET_MODE: 867 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), 868 kvmppc_get_gpr(vcpu, 5), 869 kvmppc_get_gpr(vcpu, 6), 870 kvmppc_get_gpr(vcpu, 7)); 871 if (ret == H_TOO_HARD) 872 return RESUME_HOST; 873 break; 874 case H_XIRR: 875 case H_CPPR: 876 case H_EOI: 877 case H_IPI: 878 case H_IPOLL: 879 case H_XIRR_X: 880 if (kvmppc_xics_enabled(vcpu)) { 881 if (xive_enabled()) { 882 ret = H_NOT_AVAILABLE; 883 return RESUME_GUEST; 884 } 885 ret = kvmppc_xics_hcall(vcpu, req); 886 break; 887 } 888 return RESUME_HOST; 889 case H_PUT_TCE: 890 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), 891 kvmppc_get_gpr(vcpu, 5), 892 kvmppc_get_gpr(vcpu, 6)); 893 if (ret == H_TOO_HARD) 894 return RESUME_HOST; 895 break; 896 case H_PUT_TCE_INDIRECT: 897 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4), 898 kvmppc_get_gpr(vcpu, 5), 899 kvmppc_get_gpr(vcpu, 6), 900 kvmppc_get_gpr(vcpu, 7)); 901 if (ret == H_TOO_HARD) 902 return RESUME_HOST; 903 break; 904 case H_STUFF_TCE: 905 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4), 906 kvmppc_get_gpr(vcpu, 5), 907 kvmppc_get_gpr(vcpu, 6), 908 kvmppc_get_gpr(vcpu, 7)); 909 if (ret == H_TOO_HARD) 910 return RESUME_HOST; 911 break; 912 default: 913 return RESUME_HOST; 914 } 915 kvmppc_set_gpr(vcpu, 3, ret); 916 vcpu->arch.hcall_needed = 0; 917 return RESUME_GUEST; 918 } 919 920 static int kvmppc_hcall_impl_hv(unsigned long cmd) 921 { 922 switch (cmd) { 923 case H_CEDE: 924 case H_PROD: 925 case H_CONFER: 926 case H_REGISTER_VPA: 927 case H_SET_MODE: 928 case H_LOGICAL_CI_LOAD: 929 case H_LOGICAL_CI_STORE: 930 #ifdef CONFIG_KVM_XICS 931 case H_XIRR: 932 case H_CPPR: 933 case H_EOI: 934 case H_IPI: 935 case H_IPOLL: 936 case H_XIRR_X: 937 #endif 938 return 1; 939 } 940 941 /* See if it's in the real-mode table */ 942 return kvmppc_hcall_impl_hv_realmode(cmd); 943 } 944 945 static int kvmppc_emulate_debug_inst(struct kvm_run *run, 946 struct kvm_vcpu *vcpu) 947 { 948 u32 last_inst; 949 950 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != 951 EMULATE_DONE) { 952 /* 953 * Fetch failed, so return to guest and 954 * try executing it again. 955 */ 956 return RESUME_GUEST; 957 } 958 959 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { 960 run->exit_reason = KVM_EXIT_DEBUG; 961 run->debug.arch.address = kvmppc_get_pc(vcpu); 962 return RESUME_HOST; 963 } else { 964 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 965 return RESUME_GUEST; 966 } 967 } 968 969 static void do_nothing(void *x) 970 { 971 } 972 973 static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu) 974 { 975 int thr, cpu, pcpu, nthreads; 976 struct kvm_vcpu *v; 977 unsigned long dpdes; 978 979 nthreads = vcpu->kvm->arch.emul_smt_mode; 980 dpdes = 0; 981 cpu = vcpu->vcpu_id & ~(nthreads - 1); 982 for (thr = 0; thr < nthreads; ++thr, ++cpu) { 983 v = kvmppc_find_vcpu(vcpu->kvm, cpu); 984 if (!v) 985 continue; 986 /* 987 * If the vcpu is currently running on a physical cpu thread, 988 * interrupt it in order to pull it out of the guest briefly, 989 * which will update its vcore->dpdes value. 990 */ 991 pcpu = READ_ONCE(v->cpu); 992 if (pcpu >= 0) 993 smp_call_function_single(pcpu, do_nothing, NULL, 1); 994 if (kvmppc_doorbell_pending(v)) 995 dpdes |= 1 << thr; 996 } 997 return dpdes; 998 } 999 1000 /* 1001 * On POWER9, emulate doorbell-related instructions in order to 1002 * give the guest the illusion of running on a multi-threaded core. 1003 * The instructions emulated are msgsndp, msgclrp, mfspr TIR, 1004 * and mfspr DPDES. 1005 */ 1006 static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) 1007 { 1008 u32 inst, rb, thr; 1009 unsigned long arg; 1010 struct kvm *kvm = vcpu->kvm; 1011 struct kvm_vcpu *tvcpu; 1012 1013 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) 1014 return RESUME_GUEST; 1015 if (get_op(inst) != 31) 1016 return EMULATE_FAIL; 1017 rb = get_rb(inst); 1018 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); 1019 switch (get_xop(inst)) { 1020 case OP_31_XOP_MSGSNDP: 1021 arg = kvmppc_get_gpr(vcpu, rb); 1022 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER) 1023 break; 1024 arg &= 0x3f; 1025 if (arg >= kvm->arch.emul_smt_mode) 1026 break; 1027 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); 1028 if (!tvcpu) 1029 break; 1030 if (!tvcpu->arch.doorbell_request) { 1031 tvcpu->arch.doorbell_request = 1; 1032 kvmppc_fast_vcpu_kick_hv(tvcpu); 1033 } 1034 break; 1035 case OP_31_XOP_MSGCLRP: 1036 arg = kvmppc_get_gpr(vcpu, rb); 1037 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER) 1038 break; 1039 vcpu->arch.vcore->dpdes = 0; 1040 vcpu->arch.doorbell_request = 0; 1041 break; 1042 case OP_31_XOP_MFSPR: 1043 switch (get_sprn(inst)) { 1044 case SPRN_TIR: 1045 arg = thr; 1046 break; 1047 case SPRN_DPDES: 1048 arg = kvmppc_read_dpdes(vcpu); 1049 break; 1050 default: 1051 return EMULATE_FAIL; 1052 } 1053 kvmppc_set_gpr(vcpu, get_rt(inst), arg); 1054 break; 1055 default: 1056 return EMULATE_FAIL; 1057 } 1058 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 1059 return RESUME_GUEST; 1060 } 1061 1062 /* Called with vcpu->arch.vcore->lock held */ 1063 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 1064 struct task_struct *tsk) 1065 { 1066 int r = RESUME_HOST; 1067 1068 vcpu->stat.sum_exits++; 1069 1070 /* 1071 * This can happen if an interrupt occurs in the last stages 1072 * of guest entry or the first stages of guest exit (i.e. after 1073 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV 1074 * and before setting it to KVM_GUEST_MODE_HOST_HV). 1075 * That can happen due to a bug, or due to a machine check 1076 * occurring at just the wrong time. 1077 */ 1078 if (vcpu->arch.shregs.msr & MSR_HV) { 1079 printk(KERN_EMERG "KVM trap in HV mode!\n"); 1080 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1081 vcpu->arch.trap, kvmppc_get_pc(vcpu), 1082 vcpu->arch.shregs.msr); 1083 kvmppc_dump_regs(vcpu); 1084 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1085 run->hw.hardware_exit_reason = vcpu->arch.trap; 1086 return RESUME_HOST; 1087 } 1088 run->exit_reason = KVM_EXIT_UNKNOWN; 1089 run->ready_for_interrupt_injection = 1; 1090 switch (vcpu->arch.trap) { 1091 /* We're good on these - the host merely wanted to get our attention */ 1092 case BOOK3S_INTERRUPT_HV_DECREMENTER: 1093 vcpu->stat.dec_exits++; 1094 r = RESUME_GUEST; 1095 break; 1096 case BOOK3S_INTERRUPT_EXTERNAL: 1097 case BOOK3S_INTERRUPT_H_DOORBELL: 1098 case BOOK3S_INTERRUPT_H_VIRT: 1099 vcpu->stat.ext_intr_exits++; 1100 r = RESUME_GUEST; 1101 break; 1102 /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ 1103 case BOOK3S_INTERRUPT_HMI: 1104 case BOOK3S_INTERRUPT_PERFMON: 1105 case BOOK3S_INTERRUPT_SYSTEM_RESET: 1106 r = RESUME_GUEST; 1107 break; 1108 case BOOK3S_INTERRUPT_MACHINE_CHECK: 1109 /* Exit to guest with KVM_EXIT_NMI as exit reason */ 1110 run->exit_reason = KVM_EXIT_NMI; 1111 run->hw.hardware_exit_reason = vcpu->arch.trap; 1112 /* Clear out the old NMI status from run->flags */ 1113 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; 1114 /* Now set the NMI status */ 1115 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) 1116 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; 1117 else 1118 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; 1119 1120 r = RESUME_HOST; 1121 /* Print the MCE event to host console. */ 1122 machine_check_print_event_info(&vcpu->arch.mce_evt, false); 1123 break; 1124 case BOOK3S_INTERRUPT_PROGRAM: 1125 { 1126 ulong flags; 1127 /* 1128 * Normally program interrupts are delivered directly 1129 * to the guest by the hardware, but we can get here 1130 * as a result of a hypervisor emulation interrupt 1131 * (e40) getting turned into a 700 by BML RTAS. 1132 */ 1133 flags = vcpu->arch.shregs.msr & 0x1f0000ull; 1134 kvmppc_core_queue_program(vcpu, flags); 1135 r = RESUME_GUEST; 1136 break; 1137 } 1138 case BOOK3S_INTERRUPT_SYSCALL: 1139 { 1140 /* hcall - punt to userspace */ 1141 int i; 1142 1143 /* hypercall with MSR_PR has already been handled in rmode, 1144 * and never reaches here. 1145 */ 1146 1147 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); 1148 for (i = 0; i < 9; ++i) 1149 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); 1150 run->exit_reason = KVM_EXIT_PAPR_HCALL; 1151 vcpu->arch.hcall_needed = 1; 1152 r = RESUME_HOST; 1153 break; 1154 } 1155 /* 1156 * We get these next two if the guest accesses a page which it thinks 1157 * it has mapped but which is not actually present, either because 1158 * it is for an emulated I/O device or because the corresonding 1159 * host page has been paged out. Any other HDSI/HISI interrupts 1160 * have been handled already. 1161 */ 1162 case BOOK3S_INTERRUPT_H_DATA_STORAGE: 1163 r = RESUME_PAGE_FAULT; 1164 break; 1165 case BOOK3S_INTERRUPT_H_INST_STORAGE: 1166 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); 1167 vcpu->arch.fault_dsisr = 0; 1168 r = RESUME_PAGE_FAULT; 1169 break; 1170 /* 1171 * This occurs if the guest executes an illegal instruction. 1172 * If the guest debug is disabled, generate a program interrupt 1173 * to the guest. If guest debug is enabled, we need to check 1174 * whether the instruction is a software breakpoint instruction. 1175 * Accordingly return to Guest or Host. 1176 */ 1177 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 1178 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) 1179 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? 1180 swab32(vcpu->arch.emul_inst) : 1181 vcpu->arch.emul_inst; 1182 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { 1183 /* Need vcore unlocked to call kvmppc_get_last_inst */ 1184 spin_unlock(&vcpu->arch.vcore->lock); 1185 r = kvmppc_emulate_debug_inst(run, vcpu); 1186 spin_lock(&vcpu->arch.vcore->lock); 1187 } else { 1188 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1189 r = RESUME_GUEST; 1190 } 1191 break; 1192 /* 1193 * This occurs if the guest (kernel or userspace), does something that 1194 * is prohibited by HFSCR. 1195 * On POWER9, this could be a doorbell instruction that we need 1196 * to emulate. 1197 * Otherwise, we just generate a program interrupt to the guest. 1198 */ 1199 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: 1200 r = EMULATE_FAIL; 1201 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && 1202 cpu_has_feature(CPU_FTR_ARCH_300)) { 1203 /* Need vcore unlocked to call kvmppc_get_last_inst */ 1204 spin_unlock(&vcpu->arch.vcore->lock); 1205 r = kvmppc_emulate_doorbell_instr(vcpu); 1206 spin_lock(&vcpu->arch.vcore->lock); 1207 } 1208 if (r == EMULATE_FAIL) { 1209 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1210 r = RESUME_GUEST; 1211 } 1212 break; 1213 1214 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1215 case BOOK3S_INTERRUPT_HV_SOFTPATCH: 1216 /* 1217 * This occurs for various TM-related instructions that 1218 * we need to emulate on POWER9 DD2.2. We have already 1219 * handled the cases where the guest was in real-suspend 1220 * mode and was transitioning to transactional state. 1221 */ 1222 r = kvmhv_p9_tm_emulation(vcpu); 1223 break; 1224 #endif 1225 1226 case BOOK3S_INTERRUPT_HV_RM_HARD: 1227 r = RESUME_PASSTHROUGH; 1228 break; 1229 default: 1230 kvmppc_dump_regs(vcpu); 1231 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1232 vcpu->arch.trap, kvmppc_get_pc(vcpu), 1233 vcpu->arch.shregs.msr); 1234 run->hw.hardware_exit_reason = vcpu->arch.trap; 1235 r = RESUME_HOST; 1236 break; 1237 } 1238 1239 return r; 1240 } 1241 1242 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, 1243 struct kvm_sregs *sregs) 1244 { 1245 int i; 1246 1247 memset(sregs, 0, sizeof(struct kvm_sregs)); 1248 sregs->pvr = vcpu->arch.pvr; 1249 for (i = 0; i < vcpu->arch.slb_max; i++) { 1250 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; 1251 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, 1258 struct kvm_sregs *sregs) 1259 { 1260 int i, j; 1261 1262 /* Only accept the same PVR as the host's, since we can't spoof it */ 1263 if (sregs->pvr != vcpu->arch.pvr) 1264 return -EINVAL; 1265 1266 j = 0; 1267 for (i = 0; i < vcpu->arch.slb_nr; i++) { 1268 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { 1269 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; 1270 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; 1271 ++j; 1272 } 1273 } 1274 vcpu->arch.slb_max = j; 1275 1276 return 0; 1277 } 1278 1279 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, 1280 bool preserve_top32) 1281 { 1282 struct kvm *kvm = vcpu->kvm; 1283 struct kvmppc_vcore *vc = vcpu->arch.vcore; 1284 u64 mask; 1285 1286 mutex_lock(&kvm->lock); 1287 spin_lock(&vc->lock); 1288 /* 1289 * If ILE (interrupt little-endian) has changed, update the 1290 * MSR_LE bit in the intr_msr for each vcpu in this vcore. 1291 */ 1292 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 1293 struct kvm_vcpu *vcpu; 1294 int i; 1295 1296 kvm_for_each_vcpu(i, vcpu, kvm) { 1297 if (vcpu->arch.vcore != vc) 1298 continue; 1299 if (new_lpcr & LPCR_ILE) 1300 vcpu->arch.intr_msr |= MSR_LE; 1301 else 1302 vcpu->arch.intr_msr &= ~MSR_LE; 1303 } 1304 } 1305 1306 /* 1307 * Userspace can only modify DPFD (default prefetch depth), 1308 * ILE (interrupt little-endian) and TC (translation control). 1309 * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.). 1310 */ 1311 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; 1312 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 1313 mask |= LPCR_AIL; 1314 /* 1315 * On POWER9, allow userspace to enable large decrementer for the 1316 * guest, whether or not the host has it enabled. 1317 */ 1318 if (cpu_has_feature(CPU_FTR_ARCH_300)) 1319 mask |= LPCR_LD; 1320 1321 /* Broken 32-bit version of LPCR must not clear top bits */ 1322 if (preserve_top32) 1323 mask &= 0xFFFFFFFF; 1324 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 1325 spin_unlock(&vc->lock); 1326 mutex_unlock(&kvm->lock); 1327 } 1328 1329 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 1330 union kvmppc_one_reg *val) 1331 { 1332 int r = 0; 1333 long int i; 1334 1335 switch (id) { 1336 case KVM_REG_PPC_DEBUG_INST: 1337 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); 1338 break; 1339 case KVM_REG_PPC_HIOR: 1340 *val = get_reg_val(id, 0); 1341 break; 1342 case KVM_REG_PPC_DABR: 1343 *val = get_reg_val(id, vcpu->arch.dabr); 1344 break; 1345 case KVM_REG_PPC_DABRX: 1346 *val = get_reg_val(id, vcpu->arch.dabrx); 1347 break; 1348 case KVM_REG_PPC_DSCR: 1349 *val = get_reg_val(id, vcpu->arch.dscr); 1350 break; 1351 case KVM_REG_PPC_PURR: 1352 *val = get_reg_val(id, vcpu->arch.purr); 1353 break; 1354 case KVM_REG_PPC_SPURR: 1355 *val = get_reg_val(id, vcpu->arch.spurr); 1356 break; 1357 case KVM_REG_PPC_AMR: 1358 *val = get_reg_val(id, vcpu->arch.amr); 1359 break; 1360 case KVM_REG_PPC_UAMOR: 1361 *val = get_reg_val(id, vcpu->arch.uamor); 1362 break; 1363 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: 1364 i = id - KVM_REG_PPC_MMCR0; 1365 *val = get_reg_val(id, vcpu->arch.mmcr[i]); 1366 break; 1367 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: 1368 i = id - KVM_REG_PPC_PMC1; 1369 *val = get_reg_val(id, vcpu->arch.pmc[i]); 1370 break; 1371 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 1372 i = id - KVM_REG_PPC_SPMC1; 1373 *val = get_reg_val(id, vcpu->arch.spmc[i]); 1374 break; 1375 case KVM_REG_PPC_SIAR: 1376 *val = get_reg_val(id, vcpu->arch.siar); 1377 break; 1378 case KVM_REG_PPC_SDAR: 1379 *val = get_reg_val(id, vcpu->arch.sdar); 1380 break; 1381 case KVM_REG_PPC_SIER: 1382 *val = get_reg_val(id, vcpu->arch.sier); 1383 break; 1384 case KVM_REG_PPC_IAMR: 1385 *val = get_reg_val(id, vcpu->arch.iamr); 1386 break; 1387 case KVM_REG_PPC_PSPB: 1388 *val = get_reg_val(id, vcpu->arch.pspb); 1389 break; 1390 case KVM_REG_PPC_DPDES: 1391 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); 1392 break; 1393 case KVM_REG_PPC_VTB: 1394 *val = get_reg_val(id, vcpu->arch.vcore->vtb); 1395 break; 1396 case KVM_REG_PPC_DAWR: 1397 *val = get_reg_val(id, vcpu->arch.dawr); 1398 break; 1399 case KVM_REG_PPC_DAWRX: 1400 *val = get_reg_val(id, vcpu->arch.dawrx); 1401 break; 1402 case KVM_REG_PPC_CIABR: 1403 *val = get_reg_val(id, vcpu->arch.ciabr); 1404 break; 1405 case KVM_REG_PPC_CSIGR: 1406 *val = get_reg_val(id, vcpu->arch.csigr); 1407 break; 1408 case KVM_REG_PPC_TACR: 1409 *val = get_reg_val(id, vcpu->arch.tacr); 1410 break; 1411 case KVM_REG_PPC_TCSCR: 1412 *val = get_reg_val(id, vcpu->arch.tcscr); 1413 break; 1414 case KVM_REG_PPC_PID: 1415 *val = get_reg_val(id, vcpu->arch.pid); 1416 break; 1417 case KVM_REG_PPC_ACOP: 1418 *val = get_reg_val(id, vcpu->arch.acop); 1419 break; 1420 case KVM_REG_PPC_WORT: 1421 *val = get_reg_val(id, vcpu->arch.wort); 1422 break; 1423 case KVM_REG_PPC_TIDR: 1424 *val = get_reg_val(id, vcpu->arch.tid); 1425 break; 1426 case KVM_REG_PPC_PSSCR: 1427 *val = get_reg_val(id, vcpu->arch.psscr); 1428 break; 1429 case KVM_REG_PPC_VPA_ADDR: 1430 spin_lock(&vcpu->arch.vpa_update_lock); 1431 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); 1432 spin_unlock(&vcpu->arch.vpa_update_lock); 1433 break; 1434 case KVM_REG_PPC_VPA_SLB: 1435 spin_lock(&vcpu->arch.vpa_update_lock); 1436 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; 1437 val->vpaval.length = vcpu->arch.slb_shadow.len; 1438 spin_unlock(&vcpu->arch.vpa_update_lock); 1439 break; 1440 case KVM_REG_PPC_VPA_DTL: 1441 spin_lock(&vcpu->arch.vpa_update_lock); 1442 val->vpaval.addr = vcpu->arch.dtl.next_gpa; 1443 val->vpaval.length = vcpu->arch.dtl.len; 1444 spin_unlock(&vcpu->arch.vpa_update_lock); 1445 break; 1446 case KVM_REG_PPC_TB_OFFSET: 1447 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); 1448 break; 1449 case KVM_REG_PPC_LPCR: 1450 case KVM_REG_PPC_LPCR_64: 1451 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); 1452 break; 1453 case KVM_REG_PPC_PPR: 1454 *val = get_reg_val(id, vcpu->arch.ppr); 1455 break; 1456 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1457 case KVM_REG_PPC_TFHAR: 1458 *val = get_reg_val(id, vcpu->arch.tfhar); 1459 break; 1460 case KVM_REG_PPC_TFIAR: 1461 *val = get_reg_val(id, vcpu->arch.tfiar); 1462 break; 1463 case KVM_REG_PPC_TEXASR: 1464 *val = get_reg_val(id, vcpu->arch.texasr); 1465 break; 1466 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: 1467 i = id - KVM_REG_PPC_TM_GPR0; 1468 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); 1469 break; 1470 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: 1471 { 1472 int j; 1473 i = id - KVM_REG_PPC_TM_VSR0; 1474 if (i < 32) 1475 for (j = 0; j < TS_FPRWIDTH; j++) 1476 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; 1477 else { 1478 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1479 val->vval = vcpu->arch.vr_tm.vr[i-32]; 1480 else 1481 r = -ENXIO; 1482 } 1483 break; 1484 } 1485 case KVM_REG_PPC_TM_CR: 1486 *val = get_reg_val(id, vcpu->arch.cr_tm); 1487 break; 1488 case KVM_REG_PPC_TM_XER: 1489 *val = get_reg_val(id, vcpu->arch.xer_tm); 1490 break; 1491 case KVM_REG_PPC_TM_LR: 1492 *val = get_reg_val(id, vcpu->arch.lr_tm); 1493 break; 1494 case KVM_REG_PPC_TM_CTR: 1495 *val = get_reg_val(id, vcpu->arch.ctr_tm); 1496 break; 1497 case KVM_REG_PPC_TM_FPSCR: 1498 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); 1499 break; 1500 case KVM_REG_PPC_TM_AMR: 1501 *val = get_reg_val(id, vcpu->arch.amr_tm); 1502 break; 1503 case KVM_REG_PPC_TM_PPR: 1504 *val = get_reg_val(id, vcpu->arch.ppr_tm); 1505 break; 1506 case KVM_REG_PPC_TM_VRSAVE: 1507 *val = get_reg_val(id, vcpu->arch.vrsave_tm); 1508 break; 1509 case KVM_REG_PPC_TM_VSCR: 1510 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1511 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); 1512 else 1513 r = -ENXIO; 1514 break; 1515 case KVM_REG_PPC_TM_DSCR: 1516 *val = get_reg_val(id, vcpu->arch.dscr_tm); 1517 break; 1518 case KVM_REG_PPC_TM_TAR: 1519 *val = get_reg_val(id, vcpu->arch.tar_tm); 1520 break; 1521 #endif 1522 case KVM_REG_PPC_ARCH_COMPAT: 1523 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); 1524 break; 1525 case KVM_REG_PPC_DEC_EXPIRY: 1526 *val = get_reg_val(id, vcpu->arch.dec_expires + 1527 vcpu->arch.vcore->tb_offset); 1528 break; 1529 default: 1530 r = -EINVAL; 1531 break; 1532 } 1533 1534 return r; 1535 } 1536 1537 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 1538 union kvmppc_one_reg *val) 1539 { 1540 int r = 0; 1541 long int i; 1542 unsigned long addr, len; 1543 1544 switch (id) { 1545 case KVM_REG_PPC_HIOR: 1546 /* Only allow this to be set to zero */ 1547 if (set_reg_val(id, *val)) 1548 r = -EINVAL; 1549 break; 1550 case KVM_REG_PPC_DABR: 1551 vcpu->arch.dabr = set_reg_val(id, *val); 1552 break; 1553 case KVM_REG_PPC_DABRX: 1554 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; 1555 break; 1556 case KVM_REG_PPC_DSCR: 1557 vcpu->arch.dscr = set_reg_val(id, *val); 1558 break; 1559 case KVM_REG_PPC_PURR: 1560 vcpu->arch.purr = set_reg_val(id, *val); 1561 break; 1562 case KVM_REG_PPC_SPURR: 1563 vcpu->arch.spurr = set_reg_val(id, *val); 1564 break; 1565 case KVM_REG_PPC_AMR: 1566 vcpu->arch.amr = set_reg_val(id, *val); 1567 break; 1568 case KVM_REG_PPC_UAMOR: 1569 vcpu->arch.uamor = set_reg_val(id, *val); 1570 break; 1571 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: 1572 i = id - KVM_REG_PPC_MMCR0; 1573 vcpu->arch.mmcr[i] = set_reg_val(id, *val); 1574 break; 1575 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: 1576 i = id - KVM_REG_PPC_PMC1; 1577 vcpu->arch.pmc[i] = set_reg_val(id, *val); 1578 break; 1579 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 1580 i = id - KVM_REG_PPC_SPMC1; 1581 vcpu->arch.spmc[i] = set_reg_val(id, *val); 1582 break; 1583 case KVM_REG_PPC_SIAR: 1584 vcpu->arch.siar = set_reg_val(id, *val); 1585 break; 1586 case KVM_REG_PPC_SDAR: 1587 vcpu->arch.sdar = set_reg_val(id, *val); 1588 break; 1589 case KVM_REG_PPC_SIER: 1590 vcpu->arch.sier = set_reg_val(id, *val); 1591 break; 1592 case KVM_REG_PPC_IAMR: 1593 vcpu->arch.iamr = set_reg_val(id, *val); 1594 break; 1595 case KVM_REG_PPC_PSPB: 1596 vcpu->arch.pspb = set_reg_val(id, *val); 1597 break; 1598 case KVM_REG_PPC_DPDES: 1599 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); 1600 break; 1601 case KVM_REG_PPC_VTB: 1602 vcpu->arch.vcore->vtb = set_reg_val(id, *val); 1603 break; 1604 case KVM_REG_PPC_DAWR: 1605 vcpu->arch.dawr = set_reg_val(id, *val); 1606 break; 1607 case KVM_REG_PPC_DAWRX: 1608 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; 1609 break; 1610 case KVM_REG_PPC_CIABR: 1611 vcpu->arch.ciabr = set_reg_val(id, *val); 1612 /* Don't allow setting breakpoints in hypervisor code */ 1613 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) 1614 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ 1615 break; 1616 case KVM_REG_PPC_CSIGR: 1617 vcpu->arch.csigr = set_reg_val(id, *val); 1618 break; 1619 case KVM_REG_PPC_TACR: 1620 vcpu->arch.tacr = set_reg_val(id, *val); 1621 break; 1622 case KVM_REG_PPC_TCSCR: 1623 vcpu->arch.tcscr = set_reg_val(id, *val); 1624 break; 1625 case KVM_REG_PPC_PID: 1626 vcpu->arch.pid = set_reg_val(id, *val); 1627 break; 1628 case KVM_REG_PPC_ACOP: 1629 vcpu->arch.acop = set_reg_val(id, *val); 1630 break; 1631 case KVM_REG_PPC_WORT: 1632 vcpu->arch.wort = set_reg_val(id, *val); 1633 break; 1634 case KVM_REG_PPC_TIDR: 1635 vcpu->arch.tid = set_reg_val(id, *val); 1636 break; 1637 case KVM_REG_PPC_PSSCR: 1638 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; 1639 break; 1640 case KVM_REG_PPC_VPA_ADDR: 1641 addr = set_reg_val(id, *val); 1642 r = -EINVAL; 1643 if (!addr && (vcpu->arch.slb_shadow.next_gpa || 1644 vcpu->arch.dtl.next_gpa)) 1645 break; 1646 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); 1647 break; 1648 case KVM_REG_PPC_VPA_SLB: 1649 addr = val->vpaval.addr; 1650 len = val->vpaval.length; 1651 r = -EINVAL; 1652 if (addr && !vcpu->arch.vpa.next_gpa) 1653 break; 1654 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); 1655 break; 1656 case KVM_REG_PPC_VPA_DTL: 1657 addr = val->vpaval.addr; 1658 len = val->vpaval.length; 1659 r = -EINVAL; 1660 if (addr && (len < sizeof(struct dtl_entry) || 1661 !vcpu->arch.vpa.next_gpa)) 1662 break; 1663 len -= len % sizeof(struct dtl_entry); 1664 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 1665 break; 1666 case KVM_REG_PPC_TB_OFFSET: 1667 /* 1668 * POWER9 DD1 has an erratum where writing TBU40 causes 1669 * the timebase to lose ticks. So we don't let the 1670 * timebase offset be changed on P9 DD1. (It is 1671 * initialized to zero.) 1672 */ 1673 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 1674 break; 1675 /* round up to multiple of 2^24 */ 1676 vcpu->arch.vcore->tb_offset = 1677 ALIGN(set_reg_val(id, *val), 1UL << 24); 1678 break; 1679 case KVM_REG_PPC_LPCR: 1680 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); 1681 break; 1682 case KVM_REG_PPC_LPCR_64: 1683 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); 1684 break; 1685 case KVM_REG_PPC_PPR: 1686 vcpu->arch.ppr = set_reg_val(id, *val); 1687 break; 1688 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1689 case KVM_REG_PPC_TFHAR: 1690 vcpu->arch.tfhar = set_reg_val(id, *val); 1691 break; 1692 case KVM_REG_PPC_TFIAR: 1693 vcpu->arch.tfiar = set_reg_val(id, *val); 1694 break; 1695 case KVM_REG_PPC_TEXASR: 1696 vcpu->arch.texasr = set_reg_val(id, *val); 1697 break; 1698 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: 1699 i = id - KVM_REG_PPC_TM_GPR0; 1700 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); 1701 break; 1702 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: 1703 { 1704 int j; 1705 i = id - KVM_REG_PPC_TM_VSR0; 1706 if (i < 32) 1707 for (j = 0; j < TS_FPRWIDTH; j++) 1708 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; 1709 else 1710 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1711 vcpu->arch.vr_tm.vr[i-32] = val->vval; 1712 else 1713 r = -ENXIO; 1714 break; 1715 } 1716 case KVM_REG_PPC_TM_CR: 1717 vcpu->arch.cr_tm = set_reg_val(id, *val); 1718 break; 1719 case KVM_REG_PPC_TM_XER: 1720 vcpu->arch.xer_tm = set_reg_val(id, *val); 1721 break; 1722 case KVM_REG_PPC_TM_LR: 1723 vcpu->arch.lr_tm = set_reg_val(id, *val); 1724 break; 1725 case KVM_REG_PPC_TM_CTR: 1726 vcpu->arch.ctr_tm = set_reg_val(id, *val); 1727 break; 1728 case KVM_REG_PPC_TM_FPSCR: 1729 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); 1730 break; 1731 case KVM_REG_PPC_TM_AMR: 1732 vcpu->arch.amr_tm = set_reg_val(id, *val); 1733 break; 1734 case KVM_REG_PPC_TM_PPR: 1735 vcpu->arch.ppr_tm = set_reg_val(id, *val); 1736 break; 1737 case KVM_REG_PPC_TM_VRSAVE: 1738 vcpu->arch.vrsave_tm = set_reg_val(id, *val); 1739 break; 1740 case KVM_REG_PPC_TM_VSCR: 1741 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 1742 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); 1743 else 1744 r = - ENXIO; 1745 break; 1746 case KVM_REG_PPC_TM_DSCR: 1747 vcpu->arch.dscr_tm = set_reg_val(id, *val); 1748 break; 1749 case KVM_REG_PPC_TM_TAR: 1750 vcpu->arch.tar_tm = set_reg_val(id, *val); 1751 break; 1752 #endif 1753 case KVM_REG_PPC_ARCH_COMPAT: 1754 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); 1755 break; 1756 case KVM_REG_PPC_DEC_EXPIRY: 1757 vcpu->arch.dec_expires = set_reg_val(id, *val) - 1758 vcpu->arch.vcore->tb_offset; 1759 break; 1760 default: 1761 r = -EINVAL; 1762 break; 1763 } 1764 1765 return r; 1766 } 1767 1768 /* 1769 * On POWER9, threads are independent and can be in different partitions. 1770 * Therefore we consider each thread to be a subcore. 1771 * There is a restriction that all threads have to be in the same 1772 * MMU mode (radix or HPT), unfortunately, but since we only support 1773 * HPT guests on a HPT host so far, that isn't an impediment yet. 1774 */ 1775 static int threads_per_vcore(struct kvm *kvm) 1776 { 1777 if (kvm->arch.threads_indep) 1778 return 1; 1779 return threads_per_subcore; 1780 } 1781 1782 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) 1783 { 1784 struct kvmppc_vcore *vcore; 1785 1786 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); 1787 1788 if (vcore == NULL) 1789 return NULL; 1790 1791 spin_lock_init(&vcore->lock); 1792 spin_lock_init(&vcore->stoltb_lock); 1793 init_swait_queue_head(&vcore->wq); 1794 vcore->preempt_tb = TB_NIL; 1795 vcore->lpcr = kvm->arch.lpcr; 1796 vcore->first_vcpuid = core * kvm->arch.smt_mode; 1797 vcore->kvm = kvm; 1798 INIT_LIST_HEAD(&vcore->preempt_list); 1799 1800 return vcore; 1801 } 1802 1803 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1804 static struct debugfs_timings_element { 1805 const char *name; 1806 size_t offset; 1807 } timings[] = { 1808 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, 1809 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, 1810 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, 1811 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, 1812 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, 1813 }; 1814 1815 #define N_TIMINGS (ARRAY_SIZE(timings)) 1816 1817 struct debugfs_timings_state { 1818 struct kvm_vcpu *vcpu; 1819 unsigned int buflen; 1820 char buf[N_TIMINGS * 100]; 1821 }; 1822 1823 static int debugfs_timings_open(struct inode *inode, struct file *file) 1824 { 1825 struct kvm_vcpu *vcpu = inode->i_private; 1826 struct debugfs_timings_state *p; 1827 1828 p = kzalloc(sizeof(*p), GFP_KERNEL); 1829 if (!p) 1830 return -ENOMEM; 1831 1832 kvm_get_kvm(vcpu->kvm); 1833 p->vcpu = vcpu; 1834 file->private_data = p; 1835 1836 return nonseekable_open(inode, file); 1837 } 1838 1839 static int debugfs_timings_release(struct inode *inode, struct file *file) 1840 { 1841 struct debugfs_timings_state *p = file->private_data; 1842 1843 kvm_put_kvm(p->vcpu->kvm); 1844 kfree(p); 1845 return 0; 1846 } 1847 1848 static ssize_t debugfs_timings_read(struct file *file, char __user *buf, 1849 size_t len, loff_t *ppos) 1850 { 1851 struct debugfs_timings_state *p = file->private_data; 1852 struct kvm_vcpu *vcpu = p->vcpu; 1853 char *s, *buf_end; 1854 struct kvmhv_tb_accumulator tb; 1855 u64 count; 1856 loff_t pos; 1857 ssize_t n; 1858 int i, loops; 1859 bool ok; 1860 1861 if (!p->buflen) { 1862 s = p->buf; 1863 buf_end = s + sizeof(p->buf); 1864 for (i = 0; i < N_TIMINGS; ++i) { 1865 struct kvmhv_tb_accumulator *acc; 1866 1867 acc = (struct kvmhv_tb_accumulator *) 1868 ((unsigned long)vcpu + timings[i].offset); 1869 ok = false; 1870 for (loops = 0; loops < 1000; ++loops) { 1871 count = acc->seqcount; 1872 if (!(count & 1)) { 1873 smp_rmb(); 1874 tb = *acc; 1875 smp_rmb(); 1876 if (count == acc->seqcount) { 1877 ok = true; 1878 break; 1879 } 1880 } 1881 udelay(1); 1882 } 1883 if (!ok) 1884 snprintf(s, buf_end - s, "%s: stuck\n", 1885 timings[i].name); 1886 else 1887 snprintf(s, buf_end - s, 1888 "%s: %llu %llu %llu %llu\n", 1889 timings[i].name, count / 2, 1890 tb_to_ns(tb.tb_total), 1891 tb_to_ns(tb.tb_min), 1892 tb_to_ns(tb.tb_max)); 1893 s += strlen(s); 1894 } 1895 p->buflen = s - p->buf; 1896 } 1897 1898 pos = *ppos; 1899 if (pos >= p->buflen) 1900 return 0; 1901 if (len > p->buflen - pos) 1902 len = p->buflen - pos; 1903 n = copy_to_user(buf, p->buf + pos, len); 1904 if (n) { 1905 if (n == len) 1906 return -EFAULT; 1907 len -= n; 1908 } 1909 *ppos = pos + len; 1910 return len; 1911 } 1912 1913 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf, 1914 size_t len, loff_t *ppos) 1915 { 1916 return -EACCES; 1917 } 1918 1919 static const struct file_operations debugfs_timings_ops = { 1920 .owner = THIS_MODULE, 1921 .open = debugfs_timings_open, 1922 .release = debugfs_timings_release, 1923 .read = debugfs_timings_read, 1924 .write = debugfs_timings_write, 1925 .llseek = generic_file_llseek, 1926 }; 1927 1928 /* Create a debugfs directory for the vcpu */ 1929 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) 1930 { 1931 char buf[16]; 1932 struct kvm *kvm = vcpu->kvm; 1933 1934 snprintf(buf, sizeof(buf), "vcpu%u", id); 1935 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) 1936 return; 1937 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); 1938 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) 1939 return; 1940 vcpu->arch.debugfs_timings = 1941 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, 1942 vcpu, &debugfs_timings_ops); 1943 } 1944 1945 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ 1946 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) 1947 { 1948 } 1949 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ 1950 1951 static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, 1952 unsigned int id) 1953 { 1954 struct kvm_vcpu *vcpu; 1955 int err; 1956 int core; 1957 struct kvmppc_vcore *vcore; 1958 1959 err = -ENOMEM; 1960 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 1961 if (!vcpu) 1962 goto out; 1963 1964 err = kvm_vcpu_init(vcpu, kvm, id); 1965 if (err) 1966 goto free_vcpu; 1967 1968 vcpu->arch.shared = &vcpu->arch.shregs; 1969 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1970 /* 1971 * The shared struct is never shared on HV, 1972 * so we can always use host endianness 1973 */ 1974 #ifdef __BIG_ENDIAN__ 1975 vcpu->arch.shared_big_endian = true; 1976 #else 1977 vcpu->arch.shared_big_endian = false; 1978 #endif 1979 #endif 1980 vcpu->arch.mmcr[0] = MMCR0_FC; 1981 vcpu->arch.ctrl = CTRL_RUNLATCH; 1982 /* default to host PVR, since we can't spoof it */ 1983 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); 1984 spin_lock_init(&vcpu->arch.vpa_update_lock); 1985 spin_lock_init(&vcpu->arch.tbacct_lock); 1986 vcpu->arch.busy_preempt = TB_NIL; 1987 vcpu->arch.intr_msr = MSR_SF | MSR_ME; 1988 1989 /* 1990 * Set the default HFSCR for the guest from the host value. 1991 * This value is only used on POWER9. 1992 * On POWER9 DD1, TM doesn't work, so we make sure to 1993 * prevent the guest from using it. 1994 * On POWER9, we want to virtualize the doorbell facility, so we 1995 * turn off the HFSCR bit, which causes those instructions to trap. 1996 */ 1997 vcpu->arch.hfscr = mfspr(SPRN_HFSCR); 1998 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) 1999 vcpu->arch.hfscr |= HFSCR_TM; 2000 else if (!cpu_has_feature(CPU_FTR_TM_COMP)) 2001 vcpu->arch.hfscr &= ~HFSCR_TM; 2002 if (cpu_has_feature(CPU_FTR_ARCH_300)) 2003 vcpu->arch.hfscr &= ~HFSCR_MSGP; 2004 2005 kvmppc_mmu_book3s_hv_init(vcpu); 2006 2007 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 2008 2009 init_waitqueue_head(&vcpu->arch.cpu_run); 2010 2011 mutex_lock(&kvm->lock); 2012 vcore = NULL; 2013 err = -EINVAL; 2014 core = id / kvm->arch.smt_mode; 2015 if (core < KVM_MAX_VCORES) { 2016 vcore = kvm->arch.vcores[core]; 2017 if (!vcore) { 2018 err = -ENOMEM; 2019 vcore = kvmppc_vcore_create(kvm, core); 2020 kvm->arch.vcores[core] = vcore; 2021 kvm->arch.online_vcores++; 2022 } 2023 } 2024 mutex_unlock(&kvm->lock); 2025 2026 if (!vcore) 2027 goto free_vcpu; 2028 2029 spin_lock(&vcore->lock); 2030 ++vcore->num_threads; 2031 spin_unlock(&vcore->lock); 2032 vcpu->arch.vcore = vcore; 2033 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; 2034 vcpu->arch.thread_cpu = -1; 2035 vcpu->arch.prev_cpu = -1; 2036 2037 vcpu->arch.cpu_type = KVM_CPU_3S_64; 2038 kvmppc_sanity_check(vcpu); 2039 2040 debugfs_vcpu_init(vcpu, id); 2041 2042 return vcpu; 2043 2044 free_vcpu: 2045 kmem_cache_free(kvm_vcpu_cache, vcpu); 2046 out: 2047 return ERR_PTR(err); 2048 } 2049 2050 static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode, 2051 unsigned long flags) 2052 { 2053 int err; 2054 int esmt = 0; 2055 2056 if (flags) 2057 return -EINVAL; 2058 if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode)) 2059 return -EINVAL; 2060 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 2061 /* 2062 * On POWER8 (or POWER7), the threading mode is "strict", 2063 * so we pack smt_mode vcpus per vcore. 2064 */ 2065 if (smt_mode > threads_per_subcore) 2066 return -EINVAL; 2067 } else { 2068 /* 2069 * On POWER9, the threading mode is "loose", 2070 * so each vcpu gets its own vcore. 2071 */ 2072 esmt = smt_mode; 2073 smt_mode = 1; 2074 } 2075 mutex_lock(&kvm->lock); 2076 err = -EBUSY; 2077 if (!kvm->arch.online_vcores) { 2078 kvm->arch.smt_mode = smt_mode; 2079 kvm->arch.emul_smt_mode = esmt; 2080 err = 0; 2081 } 2082 mutex_unlock(&kvm->lock); 2083 2084 return err; 2085 } 2086 2087 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) 2088 { 2089 if (vpa->pinned_addr) 2090 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, 2091 vpa->dirty); 2092 } 2093 2094 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) 2095 { 2096 spin_lock(&vcpu->arch.vpa_update_lock); 2097 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); 2098 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); 2099 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); 2100 spin_unlock(&vcpu->arch.vpa_update_lock); 2101 kvm_vcpu_uninit(vcpu); 2102 kmem_cache_free(kvm_vcpu_cache, vcpu); 2103 } 2104 2105 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) 2106 { 2107 /* Indicate we want to get back into the guest */ 2108 return 1; 2109 } 2110 2111 static void kvmppc_set_timer(struct kvm_vcpu *vcpu) 2112 { 2113 unsigned long dec_nsec, now; 2114 2115 now = get_tb(); 2116 if (now > vcpu->arch.dec_expires) { 2117 /* decrementer has already gone negative */ 2118 kvmppc_core_queue_dec(vcpu); 2119 kvmppc_core_prepare_to_enter(vcpu); 2120 return; 2121 } 2122 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC 2123 / tb_ticks_per_sec; 2124 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); 2125 vcpu->arch.timer_running = 1; 2126 } 2127 2128 static void kvmppc_end_cede(struct kvm_vcpu *vcpu) 2129 { 2130 vcpu->arch.ceded = 0; 2131 if (vcpu->arch.timer_running) { 2132 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 2133 vcpu->arch.timer_running = 0; 2134 } 2135 } 2136 2137 extern int __kvmppc_vcore_entry(void); 2138 2139 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 2140 struct kvm_vcpu *vcpu) 2141 { 2142 u64 now; 2143 2144 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 2145 return; 2146 spin_lock_irq(&vcpu->arch.tbacct_lock); 2147 now = mftb(); 2148 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - 2149 vcpu->arch.stolen_logged; 2150 vcpu->arch.busy_preempt = now; 2151 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 2152 spin_unlock_irq(&vcpu->arch.tbacct_lock); 2153 --vc->n_runnable; 2154 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); 2155 } 2156 2157 static int kvmppc_grab_hwthread(int cpu) 2158 { 2159 struct paca_struct *tpaca; 2160 long timeout = 10000; 2161 2162 tpaca = paca_ptrs[cpu]; 2163 2164 /* Ensure the thread won't go into the kernel if it wakes */ 2165 tpaca->kvm_hstate.kvm_vcpu = NULL; 2166 tpaca->kvm_hstate.kvm_vcore = NULL; 2167 tpaca->kvm_hstate.napping = 0; 2168 smp_wmb(); 2169 tpaca->kvm_hstate.hwthread_req = 1; 2170 2171 /* 2172 * If the thread is already executing in the kernel (e.g. handling 2173 * a stray interrupt), wait for it to get back to nap mode. 2174 * The smp_mb() is to ensure that our setting of hwthread_req 2175 * is visible before we look at hwthread_state, so if this 2176 * races with the code at system_reset_pSeries and the thread 2177 * misses our setting of hwthread_req, we are sure to see its 2178 * setting of hwthread_state, and vice versa. 2179 */ 2180 smp_mb(); 2181 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { 2182 if (--timeout <= 0) { 2183 pr_err("KVM: couldn't grab cpu %d\n", cpu); 2184 return -EBUSY; 2185 } 2186 udelay(1); 2187 } 2188 return 0; 2189 } 2190 2191 static void kvmppc_release_hwthread(int cpu) 2192 { 2193 struct paca_struct *tpaca; 2194 2195 tpaca = paca_ptrs[cpu]; 2196 tpaca->kvm_hstate.hwthread_req = 0; 2197 tpaca->kvm_hstate.kvm_vcpu = NULL; 2198 tpaca->kvm_hstate.kvm_vcore = NULL; 2199 tpaca->kvm_hstate.kvm_split_mode = NULL; 2200 } 2201 2202 static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) 2203 { 2204 int i; 2205 2206 cpu = cpu_first_thread_sibling(cpu); 2207 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); 2208 /* 2209 * Make sure setting of bit in need_tlb_flush precedes 2210 * testing of cpu_in_guest bits. The matching barrier on 2211 * the other side is the first smp_mb() in kvmppc_run_core(). 2212 */ 2213 smp_mb(); 2214 for (i = 0; i < threads_per_core; ++i) 2215 if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest)) 2216 smp_call_function_single(cpu + i, do_nothing, NULL, 1); 2217 } 2218 2219 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) 2220 { 2221 struct kvm *kvm = vcpu->kvm; 2222 2223 /* 2224 * With radix, the guest can do TLB invalidations itself, 2225 * and it could choose to use the local form (tlbiel) if 2226 * it is invalidating a translation that has only ever been 2227 * used on one vcpu. However, that doesn't mean it has 2228 * only ever been used on one physical cpu, since vcpus 2229 * can move around between pcpus. To cope with this, when 2230 * a vcpu moves from one pcpu to another, we need to tell 2231 * any vcpus running on the same core as this vcpu previously 2232 * ran to flush the TLB. The TLB is shared between threads, 2233 * so we use a single bit in .need_tlb_flush for all 4 threads. 2234 */ 2235 if (vcpu->arch.prev_cpu != pcpu) { 2236 if (vcpu->arch.prev_cpu >= 0 && 2237 cpu_first_thread_sibling(vcpu->arch.prev_cpu) != 2238 cpu_first_thread_sibling(pcpu)) 2239 radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu); 2240 vcpu->arch.prev_cpu = pcpu; 2241 } 2242 } 2243 2244 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) 2245 { 2246 int cpu; 2247 struct paca_struct *tpaca; 2248 struct kvm *kvm = vc->kvm; 2249 2250 cpu = vc->pcpu; 2251 if (vcpu) { 2252 if (vcpu->arch.timer_running) { 2253 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 2254 vcpu->arch.timer_running = 0; 2255 } 2256 cpu += vcpu->arch.ptid; 2257 vcpu->cpu = vc->pcpu; 2258 vcpu->arch.thread_cpu = cpu; 2259 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); 2260 } 2261 tpaca = paca_ptrs[cpu]; 2262 tpaca->kvm_hstate.kvm_vcpu = vcpu; 2263 tpaca->kvm_hstate.ptid = cpu - vc->pcpu; 2264 tpaca->kvm_hstate.fake_suspend = 0; 2265 /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ 2266 smp_wmb(); 2267 tpaca->kvm_hstate.kvm_vcore = vc; 2268 if (cpu != smp_processor_id()) 2269 kvmppc_ipi_thread(cpu); 2270 } 2271 2272 static void kvmppc_wait_for_nap(int n_threads) 2273 { 2274 int cpu = smp_processor_id(); 2275 int i, loops; 2276 2277 if (n_threads <= 1) 2278 return; 2279 for (loops = 0; loops < 1000000; ++loops) { 2280 /* 2281 * Check if all threads are finished. 2282 * We set the vcore pointer when starting a thread 2283 * and the thread clears it when finished, so we look 2284 * for any threads that still have a non-NULL vcore ptr. 2285 */ 2286 for (i = 1; i < n_threads; ++i) 2287 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) 2288 break; 2289 if (i == n_threads) { 2290 HMT_medium(); 2291 return; 2292 } 2293 HMT_low(); 2294 } 2295 HMT_medium(); 2296 for (i = 1; i < n_threads; ++i) 2297 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) 2298 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); 2299 } 2300 2301 /* 2302 * Check that we are on thread 0 and that any other threads in 2303 * this core are off-line. Then grab the threads so they can't 2304 * enter the kernel. 2305 */ 2306 static int on_primary_thread(void) 2307 { 2308 int cpu = smp_processor_id(); 2309 int thr; 2310 2311 /* Are we on a primary subcore? */ 2312 if (cpu_thread_in_subcore(cpu)) 2313 return 0; 2314 2315 thr = 0; 2316 while (++thr < threads_per_subcore) 2317 if (cpu_online(cpu + thr)) 2318 return 0; 2319 2320 /* Grab all hw threads so they can't go into the kernel */ 2321 for (thr = 1; thr < threads_per_subcore; ++thr) { 2322 if (kvmppc_grab_hwthread(cpu + thr)) { 2323 /* Couldn't grab one; let the others go */ 2324 do { 2325 kvmppc_release_hwthread(cpu + thr); 2326 } while (--thr > 0); 2327 return 0; 2328 } 2329 } 2330 return 1; 2331 } 2332 2333 /* 2334 * A list of virtual cores for each physical CPU. 2335 * These are vcores that could run but their runner VCPU tasks are 2336 * (or may be) preempted. 2337 */ 2338 struct preempted_vcore_list { 2339 struct list_head list; 2340 spinlock_t lock; 2341 }; 2342 2343 static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores); 2344 2345 static void init_vcore_lists(void) 2346 { 2347 int cpu; 2348 2349 for_each_possible_cpu(cpu) { 2350 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu); 2351 spin_lock_init(&lp->lock); 2352 INIT_LIST_HEAD(&lp->list); 2353 } 2354 } 2355 2356 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) 2357 { 2358 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); 2359 2360 vc->vcore_state = VCORE_PREEMPT; 2361 vc->pcpu = smp_processor_id(); 2362 if (vc->num_threads < threads_per_vcore(vc->kvm)) { 2363 spin_lock(&lp->lock); 2364 list_add_tail(&vc->preempt_list, &lp->list); 2365 spin_unlock(&lp->lock); 2366 } 2367 2368 /* Start accumulating stolen time */ 2369 kvmppc_core_start_stolen(vc); 2370 } 2371 2372 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) 2373 { 2374 struct preempted_vcore_list *lp; 2375 2376 kvmppc_core_end_stolen(vc); 2377 if (!list_empty(&vc->preempt_list)) { 2378 lp = &per_cpu(preempted_vcores, vc->pcpu); 2379 spin_lock(&lp->lock); 2380 list_del_init(&vc->preempt_list); 2381 spin_unlock(&lp->lock); 2382 } 2383 vc->vcore_state = VCORE_INACTIVE; 2384 } 2385 2386 /* 2387 * This stores information about the virtual cores currently 2388 * assigned to a physical core. 2389 */ 2390 struct core_info { 2391 int n_subcores; 2392 int max_subcore_threads; 2393 int total_threads; 2394 int subcore_threads[MAX_SUBCORES]; 2395 struct kvmppc_vcore *vc[MAX_SUBCORES]; 2396 }; 2397 2398 /* 2399 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7 2400 * respectively in 2-way micro-threading (split-core) mode on POWER8. 2401 */ 2402 static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; 2403 2404 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) 2405 { 2406 memset(cip, 0, sizeof(*cip)); 2407 cip->n_subcores = 1; 2408 cip->max_subcore_threads = vc->num_threads; 2409 cip->total_threads = vc->num_threads; 2410 cip->subcore_threads[0] = vc->num_threads; 2411 cip->vc[0] = vc; 2412 } 2413 2414 static bool subcore_config_ok(int n_subcores, int n_threads) 2415 { 2416 /* 2417 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way 2418 * split-core mode, with one thread per subcore. 2419 */ 2420 if (cpu_has_feature(CPU_FTR_ARCH_300)) 2421 return n_subcores <= 4 && n_threads == 1; 2422 2423 /* On POWER8, can only dynamically split if unsplit to begin with */ 2424 if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS) 2425 return false; 2426 if (n_subcores > MAX_SUBCORES) 2427 return false; 2428 if (n_subcores > 1) { 2429 if (!(dynamic_mt_modes & 2)) 2430 n_subcores = 4; 2431 if (n_subcores > 2 && !(dynamic_mt_modes & 4)) 2432 return false; 2433 } 2434 2435 return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS; 2436 } 2437 2438 static void init_vcore_to_run(struct kvmppc_vcore *vc) 2439 { 2440 vc->entry_exit_map = 0; 2441 vc->in_guest = 0; 2442 vc->napping_threads = 0; 2443 vc->conferring_threads = 0; 2444 } 2445 2446 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) 2447 { 2448 int n_threads = vc->num_threads; 2449 int sub; 2450 2451 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) 2452 return false; 2453 2454 /* Some POWER9 chips require all threads to be in the same MMU mode */ 2455 if (no_mixing_hpt_and_radix && 2456 kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm)) 2457 return false; 2458 2459 if (n_threads < cip->max_subcore_threads) 2460 n_threads = cip->max_subcore_threads; 2461 if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) 2462 return false; 2463 cip->max_subcore_threads = n_threads; 2464 2465 sub = cip->n_subcores; 2466 ++cip->n_subcores; 2467 cip->total_threads += vc->num_threads; 2468 cip->subcore_threads[sub] = vc->num_threads; 2469 cip->vc[sub] = vc; 2470 init_vcore_to_run(vc); 2471 list_del_init(&vc->preempt_list); 2472 2473 return true; 2474 } 2475 2476 /* 2477 * Work out whether it is possible to piggyback the execution of 2478 * vcore *pvc onto the execution of the other vcores described in *cip. 2479 */ 2480 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip, 2481 int target_threads) 2482 { 2483 if (cip->total_threads + pvc->num_threads > target_threads) 2484 return false; 2485 2486 return can_dynamic_split(pvc, cip); 2487 } 2488 2489 static void prepare_threads(struct kvmppc_vcore *vc) 2490 { 2491 int i; 2492 struct kvm_vcpu *vcpu; 2493 2494 for_each_runnable_thread(i, vcpu, vc) { 2495 if (signal_pending(vcpu->arch.run_task)) 2496 vcpu->arch.ret = -EINTR; 2497 else if (vcpu->arch.vpa.update_pending || 2498 vcpu->arch.slb_shadow.update_pending || 2499 vcpu->arch.dtl.update_pending) 2500 vcpu->arch.ret = RESUME_GUEST; 2501 else 2502 continue; 2503 kvmppc_remove_runnable(vc, vcpu); 2504 wake_up(&vcpu->arch.cpu_run); 2505 } 2506 } 2507 2508 static void collect_piggybacks(struct core_info *cip, int target_threads) 2509 { 2510 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); 2511 struct kvmppc_vcore *pvc, *vcnext; 2512 2513 spin_lock(&lp->lock); 2514 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { 2515 if (!spin_trylock(&pvc->lock)) 2516 continue; 2517 prepare_threads(pvc); 2518 if (!pvc->n_runnable) { 2519 list_del_init(&pvc->preempt_list); 2520 if (pvc->runner == NULL) { 2521 pvc->vcore_state = VCORE_INACTIVE; 2522 kvmppc_core_end_stolen(pvc); 2523 } 2524 spin_unlock(&pvc->lock); 2525 continue; 2526 } 2527 if (!can_piggyback(pvc, cip, target_threads)) { 2528 spin_unlock(&pvc->lock); 2529 continue; 2530 } 2531 kvmppc_core_end_stolen(pvc); 2532 pvc->vcore_state = VCORE_PIGGYBACK; 2533 if (cip->total_threads >= target_threads) 2534 break; 2535 } 2536 spin_unlock(&lp->lock); 2537 } 2538 2539 static bool recheck_signals(struct core_info *cip) 2540 { 2541 int sub, i; 2542 struct kvm_vcpu *vcpu; 2543 2544 for (sub = 0; sub < cip->n_subcores; ++sub) 2545 for_each_runnable_thread(i, vcpu, cip->vc[sub]) 2546 if (signal_pending(vcpu->arch.run_task)) 2547 return true; 2548 return false; 2549 } 2550 2551 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) 2552 { 2553 int still_running = 0, i; 2554 u64 now; 2555 long ret; 2556 struct kvm_vcpu *vcpu; 2557 2558 spin_lock(&vc->lock); 2559 now = get_tb(); 2560 for_each_runnable_thread(i, vcpu, vc) { 2561 /* cancel pending dec exception if dec is positive */ 2562 if (now < vcpu->arch.dec_expires && 2563 kvmppc_core_pending_dec(vcpu)) 2564 kvmppc_core_dequeue_dec(vcpu); 2565 2566 trace_kvm_guest_exit(vcpu); 2567 2568 ret = RESUME_GUEST; 2569 if (vcpu->arch.trap) 2570 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, 2571 vcpu->arch.run_task); 2572 2573 vcpu->arch.ret = ret; 2574 vcpu->arch.trap = 0; 2575 2576 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { 2577 if (vcpu->arch.pending_exceptions) 2578 kvmppc_core_prepare_to_enter(vcpu); 2579 if (vcpu->arch.ceded) 2580 kvmppc_set_timer(vcpu); 2581 else 2582 ++still_running; 2583 } else { 2584 kvmppc_remove_runnable(vc, vcpu); 2585 wake_up(&vcpu->arch.cpu_run); 2586 } 2587 } 2588 if (!is_master) { 2589 if (still_running > 0) { 2590 kvmppc_vcore_preempt(vc); 2591 } else if (vc->runner) { 2592 vc->vcore_state = VCORE_PREEMPT; 2593 kvmppc_core_start_stolen(vc); 2594 } else { 2595 vc->vcore_state = VCORE_INACTIVE; 2596 } 2597 if (vc->n_runnable > 0 && vc->runner == NULL) { 2598 /* make sure there's a candidate runner awake */ 2599 i = -1; 2600 vcpu = next_runnable_thread(vc, &i); 2601 wake_up(&vcpu->arch.cpu_run); 2602 } 2603 } 2604 spin_unlock(&vc->lock); 2605 } 2606 2607 /* 2608 * Clear core from the list of active host cores as we are about to 2609 * enter the guest. Only do this if it is the primary thread of the 2610 * core (not if a subcore) that is entering the guest. 2611 */ 2612 static inline int kvmppc_clear_host_core(unsigned int cpu) 2613 { 2614 int core; 2615 2616 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) 2617 return 0; 2618 /* 2619 * Memory barrier can be omitted here as we will do a smp_wmb() 2620 * later in kvmppc_start_thread and we need ensure that state is 2621 * visible to other CPUs only after we enter guest. 2622 */ 2623 core = cpu >> threads_shift; 2624 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; 2625 return 0; 2626 } 2627 2628 /* 2629 * Advertise this core as an active host core since we exited the guest 2630 * Only need to do this if it is the primary thread of the core that is 2631 * exiting. 2632 */ 2633 static inline int kvmppc_set_host_core(unsigned int cpu) 2634 { 2635 int core; 2636 2637 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) 2638 return 0; 2639 2640 /* 2641 * Memory barrier can be omitted here because we do a spin_unlock 2642 * immediately after this which provides the memory barrier. 2643 */ 2644 core = cpu >> threads_shift; 2645 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; 2646 return 0; 2647 } 2648 2649 static void set_irq_happened(int trap) 2650 { 2651 switch (trap) { 2652 case BOOK3S_INTERRUPT_EXTERNAL: 2653 local_paca->irq_happened |= PACA_IRQ_EE; 2654 break; 2655 case BOOK3S_INTERRUPT_H_DOORBELL: 2656 local_paca->irq_happened |= PACA_IRQ_DBELL; 2657 break; 2658 case BOOK3S_INTERRUPT_HMI: 2659 local_paca->irq_happened |= PACA_IRQ_HMI; 2660 break; 2661 case BOOK3S_INTERRUPT_SYSTEM_RESET: 2662 replay_system_reset(); 2663 break; 2664 } 2665 } 2666 2667 /* 2668 * Run a set of guest threads on a physical core. 2669 * Called with vc->lock held. 2670 */ 2671 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) 2672 { 2673 struct kvm_vcpu *vcpu; 2674 int i; 2675 int srcu_idx; 2676 struct core_info core_info; 2677 struct kvmppc_vcore *pvc; 2678 struct kvm_split_mode split_info, *sip; 2679 int split, subcore_size, active; 2680 int sub; 2681 bool thr0_done; 2682 unsigned long cmd_bit, stat_bit; 2683 int pcpu, thr; 2684 int target_threads; 2685 int controlled_threads; 2686 int trap; 2687 bool is_power8; 2688 bool hpt_on_radix; 2689 2690 /* 2691 * Remove from the list any threads that have a signal pending 2692 * or need a VPA update done 2693 */ 2694 prepare_threads(vc); 2695 2696 /* if the runner is no longer runnable, let the caller pick a new one */ 2697 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) 2698 return; 2699 2700 /* 2701 * Initialize *vc. 2702 */ 2703 init_vcore_to_run(vc); 2704 vc->preempt_tb = TB_NIL; 2705 2706 /* 2707 * Number of threads that we will be controlling: the same as 2708 * the number of threads per subcore, except on POWER9, 2709 * where it's 1 because the threads are (mostly) independent. 2710 */ 2711 controlled_threads = threads_per_vcore(vc->kvm); 2712 2713 /* 2714 * Make sure we are running on primary threads, and that secondary 2715 * threads are offline. Also check if the number of threads in this 2716 * guest are greater than the current system threads per guest. 2717 * On POWER9, we need to be not in independent-threads mode if 2718 * this is a HPT guest on a radix host machine where the 2719 * CPU threads may not be in different MMU modes. 2720 */ 2721 hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() && 2722 !kvm_is_radix(vc->kvm); 2723 if (((controlled_threads > 1) && 2724 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) || 2725 (hpt_on_radix && vc->kvm->arch.threads_indep)) { 2726 for_each_runnable_thread(i, vcpu, vc) { 2727 vcpu->arch.ret = -EBUSY; 2728 kvmppc_remove_runnable(vc, vcpu); 2729 wake_up(&vcpu->arch.cpu_run); 2730 } 2731 goto out; 2732 } 2733 2734 /* 2735 * See if we could run any other vcores on the physical core 2736 * along with this one. 2737 */ 2738 init_core_info(&core_info, vc); 2739 pcpu = smp_processor_id(); 2740 target_threads = controlled_threads; 2741 if (target_smt_mode && target_smt_mode < target_threads) 2742 target_threads = target_smt_mode; 2743 if (vc->num_threads < target_threads) 2744 collect_piggybacks(&core_info, target_threads); 2745 2746 /* 2747 * On radix, arrange for TLB flushing if necessary. 2748 * This has to be done before disabling interrupts since 2749 * it uses smp_call_function(). 2750 */ 2751 pcpu = smp_processor_id(); 2752 if (kvm_is_radix(vc->kvm)) { 2753 for (sub = 0; sub < core_info.n_subcores; ++sub) 2754 for_each_runnable_thread(i, vcpu, core_info.vc[sub]) 2755 kvmppc_prepare_radix_vcpu(vcpu, pcpu); 2756 } 2757 2758 /* 2759 * Hard-disable interrupts, and check resched flag and signals. 2760 * If we need to reschedule or deliver a signal, clean up 2761 * and return without going into the guest(s). 2762 * If the mmu_ready flag has been cleared, don't go into the 2763 * guest because that means a HPT resize operation is in progress. 2764 */ 2765 local_irq_disable(); 2766 hard_irq_disable(); 2767 if (lazy_irq_pending() || need_resched() || 2768 recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) { 2769 local_irq_enable(); 2770 vc->vcore_state = VCORE_INACTIVE; 2771 /* Unlock all except the primary vcore */ 2772 for (sub = 1; sub < core_info.n_subcores; ++sub) { 2773 pvc = core_info.vc[sub]; 2774 /* Put back on to the preempted vcores list */ 2775 kvmppc_vcore_preempt(pvc); 2776 spin_unlock(&pvc->lock); 2777 } 2778 for (i = 0; i < controlled_threads; ++i) 2779 kvmppc_release_hwthread(pcpu + i); 2780 return; 2781 } 2782 2783 kvmppc_clear_host_core(pcpu); 2784 2785 /* Decide on micro-threading (split-core) mode */ 2786 subcore_size = threads_per_subcore; 2787 cmd_bit = stat_bit = 0; 2788 split = core_info.n_subcores; 2789 sip = NULL; 2790 is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S) 2791 && !cpu_has_feature(CPU_FTR_ARCH_300); 2792 2793 if (split > 1 || hpt_on_radix) { 2794 sip = &split_info; 2795 memset(&split_info, 0, sizeof(split_info)); 2796 for (sub = 0; sub < core_info.n_subcores; ++sub) 2797 split_info.vc[sub] = core_info.vc[sub]; 2798 2799 if (is_power8) { 2800 if (split == 2 && (dynamic_mt_modes & 2)) { 2801 cmd_bit = HID0_POWER8_1TO2LPAR; 2802 stat_bit = HID0_POWER8_2LPARMODE; 2803 } else { 2804 split = 4; 2805 cmd_bit = HID0_POWER8_1TO4LPAR; 2806 stat_bit = HID0_POWER8_4LPARMODE; 2807 } 2808 subcore_size = MAX_SMT_THREADS / split; 2809 split_info.rpr = mfspr(SPRN_RPR); 2810 split_info.pmmar = mfspr(SPRN_PMMAR); 2811 split_info.ldbar = mfspr(SPRN_LDBAR); 2812 split_info.subcore_size = subcore_size; 2813 } else { 2814 split_info.subcore_size = 1; 2815 if (hpt_on_radix) { 2816 /* Use the split_info for LPCR/LPIDR changes */ 2817 split_info.lpcr_req = vc->lpcr; 2818 split_info.lpidr_req = vc->kvm->arch.lpid; 2819 split_info.host_lpcr = vc->kvm->arch.host_lpcr; 2820 split_info.do_set = 1; 2821 } 2822 } 2823 2824 /* order writes to split_info before kvm_split_mode pointer */ 2825 smp_wmb(); 2826 } 2827 2828 for (thr = 0; thr < controlled_threads; ++thr) { 2829 struct paca_struct *paca = paca_ptrs[pcpu + thr]; 2830 2831 paca->kvm_hstate.tid = thr; 2832 paca->kvm_hstate.napping = 0; 2833 paca->kvm_hstate.kvm_split_mode = sip; 2834 } 2835 2836 /* Initiate micro-threading (split-core) on POWER8 if required */ 2837 if (cmd_bit) { 2838 unsigned long hid0 = mfspr(SPRN_HID0); 2839 2840 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS; 2841 mb(); 2842 mtspr(SPRN_HID0, hid0); 2843 isync(); 2844 for (;;) { 2845 hid0 = mfspr(SPRN_HID0); 2846 if (hid0 & stat_bit) 2847 break; 2848 cpu_relax(); 2849 } 2850 } 2851 2852 /* Start all the threads */ 2853 active = 0; 2854 for (sub = 0; sub < core_info.n_subcores; ++sub) { 2855 thr = is_power8 ? subcore_thread_map[sub] : sub; 2856 thr0_done = false; 2857 active |= 1 << thr; 2858 pvc = core_info.vc[sub]; 2859 pvc->pcpu = pcpu + thr; 2860 for_each_runnable_thread(i, vcpu, pvc) { 2861 kvmppc_start_thread(vcpu, pvc); 2862 kvmppc_create_dtl_entry(vcpu, pvc); 2863 trace_kvm_guest_enter(vcpu); 2864 if (!vcpu->arch.ptid) 2865 thr0_done = true; 2866 active |= 1 << (thr + vcpu->arch.ptid); 2867 } 2868 /* 2869 * We need to start the first thread of each subcore 2870 * even if it doesn't have a vcpu. 2871 */ 2872 if (!thr0_done) 2873 kvmppc_start_thread(NULL, pvc); 2874 } 2875 2876 /* 2877 * Ensure that split_info.do_nap is set after setting 2878 * the vcore pointer in the PACA of the secondaries. 2879 */ 2880 smp_mb(); 2881 2882 /* 2883 * When doing micro-threading, poke the inactive threads as well. 2884 * This gets them to the nap instruction after kvm_do_nap, 2885 * which reduces the time taken to unsplit later. 2886 * For POWER9 HPT guest on radix host, we need all the secondary 2887 * threads woken up so they can do the LPCR/LPIDR change. 2888 */ 2889 if (cmd_bit || hpt_on_radix) { 2890 split_info.do_nap = 1; /* ask secondaries to nap when done */ 2891 for (thr = 1; thr < threads_per_subcore; ++thr) 2892 if (!(active & (1 << thr))) 2893 kvmppc_ipi_thread(pcpu + thr); 2894 } 2895 2896 vc->vcore_state = VCORE_RUNNING; 2897 preempt_disable(); 2898 2899 trace_kvmppc_run_core(vc, 0); 2900 2901 for (sub = 0; sub < core_info.n_subcores; ++sub) 2902 spin_unlock(&core_info.vc[sub]->lock); 2903 2904 /* 2905 * Interrupts will be enabled once we get into the guest, 2906 * so tell lockdep that we're about to enable interrupts. 2907 */ 2908 trace_hardirqs_on(); 2909 2910 guest_enter_irqoff(); 2911 2912 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 2913 2914 trap = __kvmppc_vcore_entry(); 2915 2916 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 2917 2918 trace_hardirqs_off(); 2919 set_irq_happened(trap); 2920 2921 spin_lock(&vc->lock); 2922 /* prevent other vcpu threads from doing kvmppc_start_thread() now */ 2923 vc->vcore_state = VCORE_EXITING; 2924 2925 /* wait for secondary threads to finish writing their state to memory */ 2926 kvmppc_wait_for_nap(controlled_threads); 2927 2928 /* Return to whole-core mode if we split the core earlier */ 2929 if (cmd_bit) { 2930 unsigned long hid0 = mfspr(SPRN_HID0); 2931 unsigned long loops = 0; 2932 2933 hid0 &= ~HID0_POWER8_DYNLPARDIS; 2934 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; 2935 mb(); 2936 mtspr(SPRN_HID0, hid0); 2937 isync(); 2938 for (;;) { 2939 hid0 = mfspr(SPRN_HID0); 2940 if (!(hid0 & stat_bit)) 2941 break; 2942 cpu_relax(); 2943 ++loops; 2944 } 2945 } else if (hpt_on_radix) { 2946 /* Wait for all threads to have seen final sync */ 2947 for (thr = 1; thr < controlled_threads; ++thr) { 2948 struct paca_struct *paca = paca_ptrs[pcpu + thr]; 2949 2950 while (paca->kvm_hstate.kvm_split_mode) { 2951 HMT_low(); 2952 barrier(); 2953 } 2954 HMT_medium(); 2955 } 2956 } 2957 split_info.do_nap = 0; 2958 2959 kvmppc_set_host_core(pcpu); 2960 2961 local_irq_enable(); 2962 guest_exit(); 2963 2964 /* Let secondaries go back to the offline loop */ 2965 for (i = 0; i < controlled_threads; ++i) { 2966 kvmppc_release_hwthread(pcpu + i); 2967 if (sip && sip->napped[i]) 2968 kvmppc_ipi_thread(pcpu + i); 2969 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); 2970 } 2971 2972 spin_unlock(&vc->lock); 2973 2974 /* make sure updates to secondary vcpu structs are visible now */ 2975 smp_mb(); 2976 2977 preempt_enable(); 2978 2979 for (sub = 0; sub < core_info.n_subcores; ++sub) { 2980 pvc = core_info.vc[sub]; 2981 post_guest_process(pvc, pvc == vc); 2982 } 2983 2984 spin_lock(&vc->lock); 2985 2986 out: 2987 vc->vcore_state = VCORE_INACTIVE; 2988 trace_kvmppc_run_core(vc, 1); 2989 } 2990 2991 /* 2992 * Wait for some other vcpu thread to execute us, and 2993 * wake us up when we need to handle something in the host. 2994 */ 2995 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, 2996 struct kvm_vcpu *vcpu, int wait_state) 2997 { 2998 DEFINE_WAIT(wait); 2999 3000 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); 3001 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { 3002 spin_unlock(&vc->lock); 3003 schedule(); 3004 spin_lock(&vc->lock); 3005 } 3006 finish_wait(&vcpu->arch.cpu_run, &wait); 3007 } 3008 3009 static void grow_halt_poll_ns(struct kvmppc_vcore *vc) 3010 { 3011 /* 10us base */ 3012 if (vc->halt_poll_ns == 0 && halt_poll_ns_grow) 3013 vc->halt_poll_ns = 10000; 3014 else 3015 vc->halt_poll_ns *= halt_poll_ns_grow; 3016 } 3017 3018 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc) 3019 { 3020 if (halt_poll_ns_shrink == 0) 3021 vc->halt_poll_ns = 0; 3022 else 3023 vc->halt_poll_ns /= halt_poll_ns_shrink; 3024 } 3025 3026 #ifdef CONFIG_KVM_XICS 3027 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) 3028 { 3029 if (!xive_enabled()) 3030 return false; 3031 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < 3032 vcpu->arch.xive_saved_state.cppr; 3033 } 3034 #else 3035 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) 3036 { 3037 return false; 3038 } 3039 #endif /* CONFIG_KVM_XICS */ 3040 3041 static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu) 3042 { 3043 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || 3044 kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu)) 3045 return true; 3046 3047 return false; 3048 } 3049 3050 /* 3051 * Check to see if any of the runnable vcpus on the vcore have pending 3052 * exceptions or are no longer ceded 3053 */ 3054 static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc) 3055 { 3056 struct kvm_vcpu *vcpu; 3057 int i; 3058 3059 for_each_runnable_thread(i, vcpu, vc) { 3060 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) 3061 return 1; 3062 } 3063 3064 return 0; 3065 } 3066 3067 /* 3068 * All the vcpus in this vcore are idle, so wait for a decrementer 3069 * or external interrupt to one of the vcpus. vc->lock is held. 3070 */ 3071 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) 3072 { 3073 ktime_t cur, start_poll, start_wait; 3074 int do_sleep = 1; 3075 u64 block_ns; 3076 DECLARE_SWAITQUEUE(wait); 3077 3078 /* Poll for pending exceptions and ceded state */ 3079 cur = start_poll = ktime_get(); 3080 if (vc->halt_poll_ns) { 3081 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); 3082 ++vc->runner->stat.halt_attempted_poll; 3083 3084 vc->vcore_state = VCORE_POLLING; 3085 spin_unlock(&vc->lock); 3086 3087 do { 3088 if (kvmppc_vcore_check_block(vc)) { 3089 do_sleep = 0; 3090 break; 3091 } 3092 cur = ktime_get(); 3093 } while (single_task_running() && ktime_before(cur, stop)); 3094 3095 spin_lock(&vc->lock); 3096 vc->vcore_state = VCORE_INACTIVE; 3097 3098 if (!do_sleep) { 3099 ++vc->runner->stat.halt_successful_poll; 3100 goto out; 3101 } 3102 } 3103 3104 prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); 3105 3106 if (kvmppc_vcore_check_block(vc)) { 3107 finish_swait(&vc->wq, &wait); 3108 do_sleep = 0; 3109 /* If we polled, count this as a successful poll */ 3110 if (vc->halt_poll_ns) 3111 ++vc->runner->stat.halt_successful_poll; 3112 goto out; 3113 } 3114 3115 start_wait = ktime_get(); 3116 3117 vc->vcore_state = VCORE_SLEEPING; 3118 trace_kvmppc_vcore_blocked(vc, 0); 3119 spin_unlock(&vc->lock); 3120 schedule(); 3121 finish_swait(&vc->wq, &wait); 3122 spin_lock(&vc->lock); 3123 vc->vcore_state = VCORE_INACTIVE; 3124 trace_kvmppc_vcore_blocked(vc, 1); 3125 ++vc->runner->stat.halt_successful_wait; 3126 3127 cur = ktime_get(); 3128 3129 out: 3130 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); 3131 3132 /* Attribute wait time */ 3133 if (do_sleep) { 3134 vc->runner->stat.halt_wait_ns += 3135 ktime_to_ns(cur) - ktime_to_ns(start_wait); 3136 /* Attribute failed poll time */ 3137 if (vc->halt_poll_ns) 3138 vc->runner->stat.halt_poll_fail_ns += 3139 ktime_to_ns(start_wait) - 3140 ktime_to_ns(start_poll); 3141 } else { 3142 /* Attribute successful poll time */ 3143 if (vc->halt_poll_ns) 3144 vc->runner->stat.halt_poll_success_ns += 3145 ktime_to_ns(cur) - 3146 ktime_to_ns(start_poll); 3147 } 3148 3149 /* Adjust poll time */ 3150 if (halt_poll_ns) { 3151 if (block_ns <= vc->halt_poll_ns) 3152 ; 3153 /* We slept and blocked for longer than the max halt time */ 3154 else if (vc->halt_poll_ns && block_ns > halt_poll_ns) 3155 shrink_halt_poll_ns(vc); 3156 /* We slept and our poll time is too small */ 3157 else if (vc->halt_poll_ns < halt_poll_ns && 3158 block_ns < halt_poll_ns) 3159 grow_halt_poll_ns(vc); 3160 if (vc->halt_poll_ns > halt_poll_ns) 3161 vc->halt_poll_ns = halt_poll_ns; 3162 } else 3163 vc->halt_poll_ns = 0; 3164 3165 trace_kvmppc_vcore_wakeup(do_sleep, block_ns); 3166 } 3167 3168 static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) 3169 { 3170 int r = 0; 3171 struct kvm *kvm = vcpu->kvm; 3172 3173 mutex_lock(&kvm->lock); 3174 if (!kvm->arch.mmu_ready) { 3175 if (!kvm_is_radix(kvm)) 3176 r = kvmppc_hv_setup_htab_rma(vcpu); 3177 if (!r) { 3178 if (cpu_has_feature(CPU_FTR_ARCH_300)) 3179 kvmppc_setup_partition_table(kvm); 3180 kvm->arch.mmu_ready = 1; 3181 } 3182 } 3183 mutex_unlock(&kvm->lock); 3184 return r; 3185 } 3186 3187 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 3188 { 3189 int n_ceded, i, r; 3190 struct kvmppc_vcore *vc; 3191 struct kvm_vcpu *v; 3192 3193 trace_kvmppc_run_vcpu_enter(vcpu); 3194 3195 kvm_run->exit_reason = 0; 3196 vcpu->arch.ret = RESUME_GUEST; 3197 vcpu->arch.trap = 0; 3198 kvmppc_update_vpas(vcpu); 3199 3200 /* 3201 * Synchronize with other threads in this virtual core 3202 */ 3203 vc = vcpu->arch.vcore; 3204 spin_lock(&vc->lock); 3205 vcpu->arch.ceded = 0; 3206 vcpu->arch.run_task = current; 3207 vcpu->arch.kvm_run = kvm_run; 3208 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); 3209 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; 3210 vcpu->arch.busy_preempt = TB_NIL; 3211 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); 3212 ++vc->n_runnable; 3213 3214 /* 3215 * This happens the first time this is called for a vcpu. 3216 * If the vcore is already running, we may be able to start 3217 * this thread straight away and have it join in. 3218 */ 3219 if (!signal_pending(current)) { 3220 if ((vc->vcore_state == VCORE_PIGGYBACK || 3221 vc->vcore_state == VCORE_RUNNING) && 3222 !VCORE_IS_EXITING(vc)) { 3223 kvmppc_create_dtl_entry(vcpu, vc); 3224 kvmppc_start_thread(vcpu, vc); 3225 trace_kvm_guest_enter(vcpu); 3226 } else if (vc->vcore_state == VCORE_SLEEPING) { 3227 swake_up(&vc->wq); 3228 } 3229 3230 } 3231 3232 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && 3233 !signal_pending(current)) { 3234 /* See if the MMU is ready to go */ 3235 if (!vcpu->kvm->arch.mmu_ready) { 3236 spin_unlock(&vc->lock); 3237 r = kvmhv_setup_mmu(vcpu); 3238 spin_lock(&vc->lock); 3239 if (r) { 3240 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3241 kvm_run->fail_entry. 3242 hardware_entry_failure_reason = 0; 3243 vcpu->arch.ret = r; 3244 break; 3245 } 3246 } 3247 3248 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) 3249 kvmppc_vcore_end_preempt(vc); 3250 3251 if (vc->vcore_state != VCORE_INACTIVE) { 3252 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); 3253 continue; 3254 } 3255 for_each_runnable_thread(i, v, vc) { 3256 kvmppc_core_prepare_to_enter(v); 3257 if (signal_pending(v->arch.run_task)) { 3258 kvmppc_remove_runnable(vc, v); 3259 v->stat.signal_exits++; 3260 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; 3261 v->arch.ret = -EINTR; 3262 wake_up(&v->arch.cpu_run); 3263 } 3264 } 3265 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 3266 break; 3267 n_ceded = 0; 3268 for_each_runnable_thread(i, v, vc) { 3269 if (!kvmppc_vcpu_woken(v)) 3270 n_ceded += v->arch.ceded; 3271 else 3272 v->arch.ceded = 0; 3273 } 3274 vc->runner = vcpu; 3275 if (n_ceded == vc->n_runnable) { 3276 kvmppc_vcore_blocked(vc); 3277 } else if (need_resched()) { 3278 kvmppc_vcore_preempt(vc); 3279 /* Let something else run */ 3280 cond_resched_lock(&vc->lock); 3281 if (vc->vcore_state == VCORE_PREEMPT) 3282 kvmppc_vcore_end_preempt(vc); 3283 } else { 3284 kvmppc_run_core(vc); 3285 } 3286 vc->runner = NULL; 3287 } 3288 3289 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && 3290 (vc->vcore_state == VCORE_RUNNING || 3291 vc->vcore_state == VCORE_EXITING || 3292 vc->vcore_state == VCORE_PIGGYBACK)) 3293 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); 3294 3295 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) 3296 kvmppc_vcore_end_preempt(vc); 3297 3298 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { 3299 kvmppc_remove_runnable(vc, vcpu); 3300 vcpu->stat.signal_exits++; 3301 kvm_run->exit_reason = KVM_EXIT_INTR; 3302 vcpu->arch.ret = -EINTR; 3303 } 3304 3305 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { 3306 /* Wake up some vcpu to run the core */ 3307 i = -1; 3308 v = next_runnable_thread(vc, &i); 3309 wake_up(&v->arch.cpu_run); 3310 } 3311 3312 trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); 3313 spin_unlock(&vc->lock); 3314 return vcpu->arch.ret; 3315 } 3316 3317 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) 3318 { 3319 int r; 3320 int srcu_idx; 3321 unsigned long ebb_regs[3] = {}; /* shut up GCC */ 3322 unsigned long user_tar = 0; 3323 unsigned int user_vrsave; 3324 struct kvm *kvm; 3325 3326 if (!vcpu->arch.sane) { 3327 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3328 return -EINVAL; 3329 } 3330 3331 /* 3332 * Don't allow entry with a suspended transaction, because 3333 * the guest entry/exit code will lose it. 3334 * If the guest has TM enabled, save away their TM-related SPRs 3335 * (they will get restored by the TM unavailable interrupt). 3336 */ 3337 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 3338 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && 3339 (current->thread.regs->msr & MSR_TM)) { 3340 if (MSR_TM_ACTIVE(current->thread.regs->msr)) { 3341 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3342 run->fail_entry.hardware_entry_failure_reason = 0; 3343 return -EINVAL; 3344 } 3345 /* Enable TM so we can read the TM SPRs */ 3346 mtmsr(mfmsr() | MSR_TM); 3347 current->thread.tm_tfhar = mfspr(SPRN_TFHAR); 3348 current->thread.tm_tfiar = mfspr(SPRN_TFIAR); 3349 current->thread.tm_texasr = mfspr(SPRN_TEXASR); 3350 current->thread.regs->msr &= ~MSR_TM; 3351 } 3352 #endif 3353 3354 kvmppc_core_prepare_to_enter(vcpu); 3355 3356 /* No need to go into the guest when all we'll do is come back out */ 3357 if (signal_pending(current)) { 3358 run->exit_reason = KVM_EXIT_INTR; 3359 return -EINTR; 3360 } 3361 3362 kvm = vcpu->kvm; 3363 atomic_inc(&kvm->arch.vcpus_running); 3364 /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ 3365 smp_mb(); 3366 3367 flush_all_to_thread(current); 3368 3369 /* Save userspace EBB and other register values */ 3370 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 3371 ebb_regs[0] = mfspr(SPRN_EBBHR); 3372 ebb_regs[1] = mfspr(SPRN_EBBRR); 3373 ebb_regs[2] = mfspr(SPRN_BESCR); 3374 user_tar = mfspr(SPRN_TAR); 3375 } 3376 user_vrsave = mfspr(SPRN_VRSAVE); 3377 3378 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 3379 vcpu->arch.pgdir = current->mm->pgd; 3380 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 3381 3382 do { 3383 r = kvmppc_run_vcpu(run, vcpu); 3384 3385 if (run->exit_reason == KVM_EXIT_PAPR_HCALL && 3386 !(vcpu->arch.shregs.msr & MSR_PR)) { 3387 trace_kvm_hcall_enter(vcpu); 3388 r = kvmppc_pseries_do_hcall(vcpu); 3389 trace_kvm_hcall_exit(vcpu, r); 3390 kvmppc_core_prepare_to_enter(vcpu); 3391 } else if (r == RESUME_PAGE_FAULT) { 3392 srcu_idx = srcu_read_lock(&kvm->srcu); 3393 r = kvmppc_book3s_hv_page_fault(run, vcpu, 3394 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 3395 srcu_read_unlock(&kvm->srcu, srcu_idx); 3396 } else if (r == RESUME_PASSTHROUGH) { 3397 if (WARN_ON(xive_enabled())) 3398 r = H_SUCCESS; 3399 else 3400 r = kvmppc_xics_rm_complete(vcpu, 0); 3401 } 3402 } while (is_kvmppc_resume_guest(r)); 3403 3404 /* Restore userspace EBB and other register values */ 3405 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 3406 mtspr(SPRN_EBBHR, ebb_regs[0]); 3407 mtspr(SPRN_EBBRR, ebb_regs[1]); 3408 mtspr(SPRN_BESCR, ebb_regs[2]); 3409 mtspr(SPRN_TAR, user_tar); 3410 mtspr(SPRN_FSCR, current->thread.fscr); 3411 } 3412 mtspr(SPRN_VRSAVE, user_vrsave); 3413 3414 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 3415 atomic_dec(&kvm->arch.vcpus_running); 3416 return r; 3417 } 3418 3419 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, 3420 int shift, int sllp) 3421 { 3422 (*sps)->page_shift = shift; 3423 (*sps)->slb_enc = sllp; 3424 (*sps)->enc[0].page_shift = shift; 3425 (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); 3426 /* 3427 * Add 16MB MPSS support (may get filtered out by userspace) 3428 */ 3429 if (shift != 24) { 3430 int penc = kvmppc_pgsize_lp_encoding(shift, 24); 3431 if (penc != -1) { 3432 (*sps)->enc[1].page_shift = 24; 3433 (*sps)->enc[1].pte_enc = penc; 3434 } 3435 } 3436 (*sps)++; 3437 } 3438 3439 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, 3440 struct kvm_ppc_smmu_info *info) 3441 { 3442 struct kvm_ppc_one_seg_page_size *sps; 3443 3444 /* 3445 * POWER7, POWER8 and POWER9 all support 32 storage keys for data. 3446 * POWER7 doesn't support keys for instruction accesses, 3447 * POWER8 and POWER9 do. 3448 */ 3449 info->data_keys = 32; 3450 info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; 3451 3452 /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ 3453 info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; 3454 info->slb_size = 32; 3455 3456 /* We only support these sizes for now, and no muti-size segments */ 3457 sps = &info->sps[0]; 3458 kvmppc_add_seg_page_size(&sps, 12, 0); 3459 kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01); 3460 kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L); 3461 3462 return 0; 3463 } 3464 3465 /* 3466 * Get (and clear) the dirty memory log for a memory slot. 3467 */ 3468 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, 3469 struct kvm_dirty_log *log) 3470 { 3471 struct kvm_memslots *slots; 3472 struct kvm_memory_slot *memslot; 3473 int i, r; 3474 unsigned long n; 3475 unsigned long *buf, *p; 3476 struct kvm_vcpu *vcpu; 3477 3478 mutex_lock(&kvm->slots_lock); 3479 3480 r = -EINVAL; 3481 if (log->slot >= KVM_USER_MEM_SLOTS) 3482 goto out; 3483 3484 slots = kvm_memslots(kvm); 3485 memslot = id_to_memslot(slots, log->slot); 3486 r = -ENOENT; 3487 if (!memslot->dirty_bitmap) 3488 goto out; 3489 3490 /* 3491 * Use second half of bitmap area because both HPT and radix 3492 * accumulate bits in the first half. 3493 */ 3494 n = kvm_dirty_bitmap_bytes(memslot); 3495 buf = memslot->dirty_bitmap + n / sizeof(long); 3496 memset(buf, 0, n); 3497 3498 if (kvm_is_radix(kvm)) 3499 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf); 3500 else 3501 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf); 3502 if (r) 3503 goto out; 3504 3505 /* 3506 * We accumulate dirty bits in the first half of the 3507 * memslot's dirty_bitmap area, for when pages are paged 3508 * out or modified by the host directly. Pick up these 3509 * bits and add them to the map. 3510 */ 3511 p = memslot->dirty_bitmap; 3512 for (i = 0; i < n / sizeof(long); ++i) 3513 buf[i] |= xchg(&p[i], 0); 3514 3515 /* Harvest dirty bits from VPA and DTL updates */ 3516 /* Note: we never modify the SLB shadow buffer areas */ 3517 kvm_for_each_vcpu(i, vcpu, kvm) { 3518 spin_lock(&vcpu->arch.vpa_update_lock); 3519 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); 3520 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); 3521 spin_unlock(&vcpu->arch.vpa_update_lock); 3522 } 3523 3524 r = -EFAULT; 3525 if (copy_to_user(log->dirty_bitmap, buf, n)) 3526 goto out; 3527 3528 r = 0; 3529 out: 3530 mutex_unlock(&kvm->slots_lock); 3531 return r; 3532 } 3533 3534 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, 3535 struct kvm_memory_slot *dont) 3536 { 3537 if (!dont || free->arch.rmap != dont->arch.rmap) { 3538 vfree(free->arch.rmap); 3539 free->arch.rmap = NULL; 3540 } 3541 } 3542 3543 static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, 3544 unsigned long npages) 3545 { 3546 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 3547 if (!slot->arch.rmap) 3548 return -ENOMEM; 3549 3550 return 0; 3551 } 3552 3553 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, 3554 struct kvm_memory_slot *memslot, 3555 const struct kvm_userspace_memory_region *mem) 3556 { 3557 return 0; 3558 } 3559 3560 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, 3561 const struct kvm_userspace_memory_region *mem, 3562 const struct kvm_memory_slot *old, 3563 const struct kvm_memory_slot *new) 3564 { 3565 unsigned long npages = mem->memory_size >> PAGE_SHIFT; 3566 3567 /* 3568 * If we are making a new memslot, it might make 3569 * some address that was previously cached as emulated 3570 * MMIO be no longer emulated MMIO, so invalidate 3571 * all the caches of emulated MMIO translations. 3572 */ 3573 if (npages) 3574 atomic64_inc(&kvm->arch.mmio_update); 3575 } 3576 3577 /* 3578 * Update LPCR values in kvm->arch and in vcores. 3579 * Caller must hold kvm->lock. 3580 */ 3581 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) 3582 { 3583 long int i; 3584 u32 cores_done = 0; 3585 3586 if ((kvm->arch.lpcr & mask) == lpcr) 3587 return; 3588 3589 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; 3590 3591 for (i = 0; i < KVM_MAX_VCORES; ++i) { 3592 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; 3593 if (!vc) 3594 continue; 3595 spin_lock(&vc->lock); 3596 vc->lpcr = (vc->lpcr & ~mask) | lpcr; 3597 spin_unlock(&vc->lock); 3598 if (++cores_done >= kvm->arch.online_vcores) 3599 break; 3600 } 3601 } 3602 3603 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) 3604 { 3605 return; 3606 } 3607 3608 void kvmppc_setup_partition_table(struct kvm *kvm) 3609 { 3610 unsigned long dw0, dw1; 3611 3612 if (!kvm_is_radix(kvm)) { 3613 /* PS field - page size for VRMA */ 3614 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | 3615 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); 3616 /* HTABSIZE and HTABORG fields */ 3617 dw0 |= kvm->arch.sdr1; 3618 3619 /* Second dword as set by userspace */ 3620 dw1 = kvm->arch.process_table; 3621 } else { 3622 dw0 = PATB_HR | radix__get_tree_size() | 3623 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; 3624 dw1 = PATB_GR | kvm->arch.process_table; 3625 } 3626 3627 mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1); 3628 } 3629 3630 /* 3631 * Set up HPT (hashed page table) and RMA (real-mode area). 3632 * Must be called with kvm->lock held. 3633 */ 3634 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 3635 { 3636 int err = 0; 3637 struct kvm *kvm = vcpu->kvm; 3638 unsigned long hva; 3639 struct kvm_memory_slot *memslot; 3640 struct vm_area_struct *vma; 3641 unsigned long lpcr = 0, senc; 3642 unsigned long psize, porder; 3643 int srcu_idx; 3644 3645 /* Allocate hashed page table (if not done already) and reset it */ 3646 if (!kvm->arch.hpt.virt) { 3647 int order = KVM_DEFAULT_HPT_ORDER; 3648 struct kvm_hpt_info info; 3649 3650 err = kvmppc_allocate_hpt(&info, order); 3651 /* If we get here, it means userspace didn't specify a 3652 * size explicitly. So, try successively smaller 3653 * sizes if the default failed. */ 3654 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER) 3655 err = kvmppc_allocate_hpt(&info, order); 3656 3657 if (err < 0) { 3658 pr_err("KVM: Couldn't alloc HPT\n"); 3659 goto out; 3660 } 3661 3662 kvmppc_set_hpt(kvm, &info); 3663 } 3664 3665 /* Look up the memslot for guest physical address 0 */ 3666 srcu_idx = srcu_read_lock(&kvm->srcu); 3667 memslot = gfn_to_memslot(kvm, 0); 3668 3669 /* We must have some memory at 0 by now */ 3670 err = -EINVAL; 3671 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 3672 goto out_srcu; 3673 3674 /* Look up the VMA for the start of this memory slot */ 3675 hva = memslot->userspace_addr; 3676 down_read(¤t->mm->mmap_sem); 3677 vma = find_vma(current->mm, hva); 3678 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) 3679 goto up_out; 3680 3681 psize = vma_kernel_pagesize(vma); 3682 3683 up_read(¤t->mm->mmap_sem); 3684 3685 /* We can handle 4k, 64k or 16M pages in the VRMA */ 3686 if (psize >= 0x1000000) 3687 psize = 0x1000000; 3688 else if (psize >= 0x10000) 3689 psize = 0x10000; 3690 else 3691 psize = 0x1000; 3692 porder = __ilog2(psize); 3693 3694 senc = slb_pgsize_encoding(psize); 3695 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 3696 (VRMA_VSID << SLB_VSID_SHIFT_1T); 3697 /* Create HPTEs in the hash page table for the VRMA */ 3698 kvmppc_map_vrma(vcpu, memslot, porder); 3699 3700 /* Update VRMASD field in the LPCR */ 3701 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 3702 /* the -4 is to account for senc values starting at 0x10 */ 3703 lpcr = senc << (LPCR_VRMASD_SH - 4); 3704 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); 3705 } 3706 3707 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ 3708 smp_wmb(); 3709 err = 0; 3710 out_srcu: 3711 srcu_read_unlock(&kvm->srcu, srcu_idx); 3712 out: 3713 return err; 3714 3715 up_out: 3716 up_read(¤t->mm->mmap_sem); 3717 goto out_srcu; 3718 } 3719 3720 /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ 3721 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) 3722 { 3723 kvmppc_free_radix(kvm); 3724 kvmppc_update_lpcr(kvm, LPCR_VPM1, 3725 LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); 3726 kvmppc_rmap_reset(kvm); 3727 kvm->arch.radix = 0; 3728 kvm->arch.process_table = 0; 3729 return 0; 3730 } 3731 3732 /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ 3733 int kvmppc_switch_mmu_to_radix(struct kvm *kvm) 3734 { 3735 int err; 3736 3737 err = kvmppc_init_vm_radix(kvm); 3738 if (err) 3739 return err; 3740 3741 kvmppc_free_hpt(&kvm->arch.hpt); 3742 kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, 3743 LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); 3744 kvm->arch.radix = 1; 3745 return 0; 3746 } 3747 3748 #ifdef CONFIG_KVM_XICS 3749 /* 3750 * Allocate a per-core structure for managing state about which cores are 3751 * running in the host versus the guest and for exchanging data between 3752 * real mode KVM and CPU running in the host. 3753 * This is only done for the first VM. 3754 * The allocated structure stays even if all VMs have stopped. 3755 * It is only freed when the kvm-hv module is unloaded. 3756 * It's OK for this routine to fail, we just don't support host 3757 * core operations like redirecting H_IPI wakeups. 3758 */ 3759 void kvmppc_alloc_host_rm_ops(void) 3760 { 3761 struct kvmppc_host_rm_ops *ops; 3762 unsigned long l_ops; 3763 int cpu, core; 3764 int size; 3765 3766 /* Not the first time here ? */ 3767 if (kvmppc_host_rm_ops_hv != NULL) 3768 return; 3769 3770 ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL); 3771 if (!ops) 3772 return; 3773 3774 size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core); 3775 ops->rm_core = kzalloc(size, GFP_KERNEL); 3776 3777 if (!ops->rm_core) { 3778 kfree(ops); 3779 return; 3780 } 3781 3782 cpus_read_lock(); 3783 3784 for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { 3785 if (!cpu_online(cpu)) 3786 continue; 3787 3788 core = cpu >> threads_shift; 3789 ops->rm_core[core].rm_state.in_host = 1; 3790 } 3791 3792 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; 3793 3794 /* 3795 * Make the contents of the kvmppc_host_rm_ops structure visible 3796 * to other CPUs before we assign it to the global variable. 3797 * Do an atomic assignment (no locks used here), but if someone 3798 * beats us to it, just free our copy and return. 3799 */ 3800 smp_wmb(); 3801 l_ops = (unsigned long) ops; 3802 3803 if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { 3804 cpus_read_unlock(); 3805 kfree(ops->rm_core); 3806 kfree(ops); 3807 return; 3808 } 3809 3810 cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE, 3811 "ppc/kvm_book3s:prepare", 3812 kvmppc_set_host_core, 3813 kvmppc_clear_host_core); 3814 cpus_read_unlock(); 3815 } 3816 3817 void kvmppc_free_host_rm_ops(void) 3818 { 3819 if (kvmppc_host_rm_ops_hv) { 3820 cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE); 3821 kfree(kvmppc_host_rm_ops_hv->rm_core); 3822 kfree(kvmppc_host_rm_ops_hv); 3823 kvmppc_host_rm_ops_hv = NULL; 3824 } 3825 } 3826 #endif 3827 3828 static int kvmppc_core_init_vm_hv(struct kvm *kvm) 3829 { 3830 unsigned long lpcr, lpid; 3831 char buf[32]; 3832 int ret; 3833 3834 /* Allocate the guest's logical partition ID */ 3835 3836 lpid = kvmppc_alloc_lpid(); 3837 if ((long)lpid < 0) 3838 return -ENOMEM; 3839 kvm->arch.lpid = lpid; 3840 3841 kvmppc_alloc_host_rm_ops(); 3842 3843 /* 3844 * Since we don't flush the TLB when tearing down a VM, 3845 * and this lpid might have previously been used, 3846 * make sure we flush on each core before running the new VM. 3847 * On POWER9, the tlbie in mmu_partition_table_set_entry() 3848 * does this flush for us. 3849 */ 3850 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 3851 cpumask_setall(&kvm->arch.need_tlb_flush); 3852 3853 /* Start out with the default set of hcalls enabled */ 3854 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, 3855 sizeof(kvm->arch.enabled_hcalls)); 3856 3857 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 3858 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 3859 3860 /* Init LPCR for virtual RMA mode */ 3861 kvm->arch.host_lpid = mfspr(SPRN_LPID); 3862 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); 3863 lpcr &= LPCR_PECE | LPCR_LPES; 3864 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | 3865 LPCR_VPM0 | LPCR_VPM1; 3866 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | 3867 (VRMA_VSID << SLB_VSID_SHIFT_1T); 3868 /* On POWER8 turn on online bit to enable PURR/SPURR */ 3869 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 3870 lpcr |= LPCR_ONL; 3871 /* 3872 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed) 3873 * Set HVICE bit to enable hypervisor virtualization interrupts. 3874 * Set HEIC to prevent OS interrupts to go to hypervisor (should 3875 * be unnecessary but better safe than sorry in case we re-enable 3876 * EE in HV mode with this LPCR still set) 3877 */ 3878 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 3879 lpcr &= ~LPCR_VPM0; 3880 lpcr |= LPCR_HVICE | LPCR_HEIC; 3881 3882 /* 3883 * If xive is enabled, we route 0x500 interrupts directly 3884 * to the guest. 3885 */ 3886 if (xive_enabled()) 3887 lpcr |= LPCR_LPES; 3888 } 3889 3890 /* 3891 * If the host uses radix, the guest starts out as radix. 3892 */ 3893 if (radix_enabled()) { 3894 kvm->arch.radix = 1; 3895 kvm->arch.mmu_ready = 1; 3896 lpcr &= ~LPCR_VPM1; 3897 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; 3898 ret = kvmppc_init_vm_radix(kvm); 3899 if (ret) { 3900 kvmppc_free_lpid(kvm->arch.lpid); 3901 return ret; 3902 } 3903 kvmppc_setup_partition_table(kvm); 3904 } 3905 3906 kvm->arch.lpcr = lpcr; 3907 3908 /* Initialization for future HPT resizes */ 3909 kvm->arch.resize_hpt = NULL; 3910 3911 /* 3912 * Work out how many sets the TLB has, for the use of 3913 * the TLB invalidation loop in book3s_hv_rmhandlers.S. 3914 */ 3915 if (radix_enabled()) 3916 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ 3917 else if (cpu_has_feature(CPU_FTR_ARCH_300)) 3918 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ 3919 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 3920 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ 3921 else 3922 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ 3923 3924 /* 3925 * Track that we now have a HV mode VM active. This blocks secondary 3926 * CPU threads from coming online. 3927 * On POWER9, we only need to do this if the "indep_threads_mode" 3928 * module parameter has been set to N. 3929 */ 3930 if (cpu_has_feature(CPU_FTR_ARCH_300)) 3931 kvm->arch.threads_indep = indep_threads_mode; 3932 if (!kvm->arch.threads_indep) 3933 kvm_hv_vm_activated(); 3934 3935 /* 3936 * Initialize smt_mode depending on processor. 3937 * POWER8 and earlier have to use "strict" threading, where 3938 * all vCPUs in a vcore have to run on the same (sub)core, 3939 * whereas on POWER9 the threads can each run a different 3940 * guest. 3941 */ 3942 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 3943 kvm->arch.smt_mode = threads_per_subcore; 3944 else 3945 kvm->arch.smt_mode = 1; 3946 kvm->arch.emul_smt_mode = 1; 3947 3948 /* 3949 * Create a debugfs directory for the VM 3950 */ 3951 snprintf(buf, sizeof(buf), "vm%d", current->pid); 3952 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); 3953 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) 3954 kvmppc_mmu_debugfs_init(kvm); 3955 3956 return 0; 3957 } 3958 3959 static void kvmppc_free_vcores(struct kvm *kvm) 3960 { 3961 long int i; 3962 3963 for (i = 0; i < KVM_MAX_VCORES; ++i) 3964 kfree(kvm->arch.vcores[i]); 3965 kvm->arch.online_vcores = 0; 3966 } 3967 3968 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) 3969 { 3970 debugfs_remove_recursive(kvm->arch.debugfs_dir); 3971 3972 if (!kvm->arch.threads_indep) 3973 kvm_hv_vm_deactivated(); 3974 3975 kvmppc_free_vcores(kvm); 3976 3977 kvmppc_free_lpid(kvm->arch.lpid); 3978 3979 if (kvm_is_radix(kvm)) 3980 kvmppc_free_radix(kvm); 3981 else 3982 kvmppc_free_hpt(&kvm->arch.hpt); 3983 3984 kvmppc_free_pimap(kvm); 3985 } 3986 3987 /* We don't need to emulate any privileged instructions or dcbz */ 3988 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 3989 unsigned int inst, int *advance) 3990 { 3991 return EMULATE_FAIL; 3992 } 3993 3994 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, 3995 ulong spr_val) 3996 { 3997 return EMULATE_FAIL; 3998 } 3999 4000 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, 4001 ulong *spr_val) 4002 { 4003 return EMULATE_FAIL; 4004 } 4005 4006 static int kvmppc_core_check_processor_compat_hv(void) 4007 { 4008 if (!cpu_has_feature(CPU_FTR_HVMODE) || 4009 !cpu_has_feature(CPU_FTR_ARCH_206)) 4010 return -EIO; 4011 4012 return 0; 4013 } 4014 4015 #ifdef CONFIG_KVM_XICS 4016 4017 void kvmppc_free_pimap(struct kvm *kvm) 4018 { 4019 kfree(kvm->arch.pimap); 4020 } 4021 4022 static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void) 4023 { 4024 return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL); 4025 } 4026 4027 static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) 4028 { 4029 struct irq_desc *desc; 4030 struct kvmppc_irq_map *irq_map; 4031 struct kvmppc_passthru_irqmap *pimap; 4032 struct irq_chip *chip; 4033 int i, rc = 0; 4034 4035 if (!kvm_irq_bypass) 4036 return 1; 4037 4038 desc = irq_to_desc(host_irq); 4039 if (!desc) 4040 return -EIO; 4041 4042 mutex_lock(&kvm->lock); 4043 4044 pimap = kvm->arch.pimap; 4045 if (pimap == NULL) { 4046 /* First call, allocate structure to hold IRQ map */ 4047 pimap = kvmppc_alloc_pimap(); 4048 if (pimap == NULL) { 4049 mutex_unlock(&kvm->lock); 4050 return -ENOMEM; 4051 } 4052 kvm->arch.pimap = pimap; 4053 } 4054 4055 /* 4056 * For now, we only support interrupts for which the EOI operation 4057 * is an OPAL call followed by a write to XIRR, since that's 4058 * what our real-mode EOI code does, or a XIVE interrupt 4059 */ 4060 chip = irq_data_get_irq_chip(&desc->irq_data); 4061 if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) { 4062 pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", 4063 host_irq, guest_gsi); 4064 mutex_unlock(&kvm->lock); 4065 return -ENOENT; 4066 } 4067 4068 /* 4069 * See if we already have an entry for this guest IRQ number. 4070 * If it's mapped to a hardware IRQ number, that's an error, 4071 * otherwise re-use this entry. 4072 */ 4073 for (i = 0; i < pimap->n_mapped; i++) { 4074 if (guest_gsi == pimap->mapped[i].v_hwirq) { 4075 if (pimap->mapped[i].r_hwirq) { 4076 mutex_unlock(&kvm->lock); 4077 return -EINVAL; 4078 } 4079 break; 4080 } 4081 } 4082 4083 if (i == KVMPPC_PIRQ_MAPPED) { 4084 mutex_unlock(&kvm->lock); 4085 return -EAGAIN; /* table is full */ 4086 } 4087 4088 irq_map = &pimap->mapped[i]; 4089 4090 irq_map->v_hwirq = guest_gsi; 4091 irq_map->desc = desc; 4092 4093 /* 4094 * Order the above two stores before the next to serialize with 4095 * the KVM real mode handler. 4096 */ 4097 smp_wmb(); 4098 irq_map->r_hwirq = desc->irq_data.hwirq; 4099 4100 if (i == pimap->n_mapped) 4101 pimap->n_mapped++; 4102 4103 if (xive_enabled()) 4104 rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc); 4105 else 4106 kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); 4107 if (rc) 4108 irq_map->r_hwirq = 0; 4109 4110 mutex_unlock(&kvm->lock); 4111 4112 return 0; 4113 } 4114 4115 static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) 4116 { 4117 struct irq_desc *desc; 4118 struct kvmppc_passthru_irqmap *pimap; 4119 int i, rc = 0; 4120 4121 if (!kvm_irq_bypass) 4122 return 0; 4123 4124 desc = irq_to_desc(host_irq); 4125 if (!desc) 4126 return -EIO; 4127 4128 mutex_lock(&kvm->lock); 4129 if (!kvm->arch.pimap) 4130 goto unlock; 4131 4132 pimap = kvm->arch.pimap; 4133 4134 for (i = 0; i < pimap->n_mapped; i++) { 4135 if (guest_gsi == pimap->mapped[i].v_hwirq) 4136 break; 4137 } 4138 4139 if (i == pimap->n_mapped) { 4140 mutex_unlock(&kvm->lock); 4141 return -ENODEV; 4142 } 4143 4144 if (xive_enabled()) 4145 rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc); 4146 else 4147 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); 4148 4149 /* invalidate the entry (what do do on error from the above ?) */ 4150 pimap->mapped[i].r_hwirq = 0; 4151 4152 /* 4153 * We don't free this structure even when the count goes to 4154 * zero. The structure is freed when we destroy the VM. 4155 */ 4156 unlock: 4157 mutex_unlock(&kvm->lock); 4158 return rc; 4159 } 4160 4161 static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons, 4162 struct irq_bypass_producer *prod) 4163 { 4164 int ret = 0; 4165 struct kvm_kernel_irqfd *irqfd = 4166 container_of(cons, struct kvm_kernel_irqfd, consumer); 4167 4168 irqfd->producer = prod; 4169 4170 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); 4171 if (ret) 4172 pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n", 4173 prod->irq, irqfd->gsi, ret); 4174 4175 return ret; 4176 } 4177 4178 static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons, 4179 struct irq_bypass_producer *prod) 4180 { 4181 int ret; 4182 struct kvm_kernel_irqfd *irqfd = 4183 container_of(cons, struct kvm_kernel_irqfd, consumer); 4184 4185 irqfd->producer = NULL; 4186 4187 /* 4188 * When producer of consumer is unregistered, we change back to 4189 * default external interrupt handling mode - KVM real mode 4190 * will switch back to host. 4191 */ 4192 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); 4193 if (ret) 4194 pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n", 4195 prod->irq, irqfd->gsi, ret); 4196 } 4197 #endif 4198 4199 static long kvm_arch_vm_ioctl_hv(struct file *filp, 4200 unsigned int ioctl, unsigned long arg) 4201 { 4202 struct kvm *kvm __maybe_unused = filp->private_data; 4203 void __user *argp = (void __user *)arg; 4204 long r; 4205 4206 switch (ioctl) { 4207 4208 case KVM_PPC_ALLOCATE_HTAB: { 4209 u32 htab_order; 4210 4211 r = -EFAULT; 4212 if (get_user(htab_order, (u32 __user *)argp)) 4213 break; 4214 r = kvmppc_alloc_reset_hpt(kvm, htab_order); 4215 if (r) 4216 break; 4217 r = 0; 4218 break; 4219 } 4220 4221 case KVM_PPC_GET_HTAB_FD: { 4222 struct kvm_get_htab_fd ghf; 4223 4224 r = -EFAULT; 4225 if (copy_from_user(&ghf, argp, sizeof(ghf))) 4226 break; 4227 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); 4228 break; 4229 } 4230 4231 case KVM_PPC_RESIZE_HPT_PREPARE: { 4232 struct kvm_ppc_resize_hpt rhpt; 4233 4234 r = -EFAULT; 4235 if (copy_from_user(&rhpt, argp, sizeof(rhpt))) 4236 break; 4237 4238 r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt); 4239 break; 4240 } 4241 4242 case KVM_PPC_RESIZE_HPT_COMMIT: { 4243 struct kvm_ppc_resize_hpt rhpt; 4244 4245 r = -EFAULT; 4246 if (copy_from_user(&rhpt, argp, sizeof(rhpt))) 4247 break; 4248 4249 r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt); 4250 break; 4251 } 4252 4253 default: 4254 r = -ENOTTY; 4255 } 4256 4257 return r; 4258 } 4259 4260 /* 4261 * List of hcall numbers to enable by default. 4262 * For compatibility with old userspace, we enable by default 4263 * all hcalls that were implemented before the hcall-enabling 4264 * facility was added. Note this list should not include H_RTAS. 4265 */ 4266 static unsigned int default_hcall_list[] = { 4267 H_REMOVE, 4268 H_ENTER, 4269 H_READ, 4270 H_PROTECT, 4271 H_BULK_REMOVE, 4272 H_GET_TCE, 4273 H_PUT_TCE, 4274 H_SET_DABR, 4275 H_SET_XDABR, 4276 H_CEDE, 4277 H_PROD, 4278 H_CONFER, 4279 H_REGISTER_VPA, 4280 #ifdef CONFIG_KVM_XICS 4281 H_EOI, 4282 H_CPPR, 4283 H_IPI, 4284 H_IPOLL, 4285 H_XIRR, 4286 H_XIRR_X, 4287 #endif 4288 0 4289 }; 4290 4291 static void init_default_hcalls(void) 4292 { 4293 int i; 4294 unsigned int hcall; 4295 4296 for (i = 0; default_hcall_list[i]; ++i) { 4297 hcall = default_hcall_list[i]; 4298 WARN_ON(!kvmppc_hcall_impl_hv(hcall)); 4299 __set_bit(hcall / 4, default_enabled_hcalls); 4300 } 4301 } 4302 4303 static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) 4304 { 4305 unsigned long lpcr; 4306 int radix; 4307 int err; 4308 4309 /* If not on a POWER9, reject it */ 4310 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 4311 return -ENODEV; 4312 4313 /* If any unknown flags set, reject it */ 4314 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) 4315 return -EINVAL; 4316 4317 /* GR (guest radix) bit in process_table field must match */ 4318 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); 4319 if (!!(cfg->process_table & PATB_GR) != radix) 4320 return -EINVAL; 4321 4322 /* Process table size field must be reasonable, i.e. <= 24 */ 4323 if ((cfg->process_table & PRTS_MASK) > 24) 4324 return -EINVAL; 4325 4326 /* We can change a guest to/from radix now, if the host is radix */ 4327 if (radix && !radix_enabled()) 4328 return -EINVAL; 4329 4330 mutex_lock(&kvm->lock); 4331 if (radix != kvm_is_radix(kvm)) { 4332 if (kvm->arch.mmu_ready) { 4333 kvm->arch.mmu_ready = 0; 4334 /* order mmu_ready vs. vcpus_running */ 4335 smp_mb(); 4336 if (atomic_read(&kvm->arch.vcpus_running)) { 4337 kvm->arch.mmu_ready = 1; 4338 err = -EBUSY; 4339 goto out_unlock; 4340 } 4341 } 4342 if (radix) 4343 err = kvmppc_switch_mmu_to_radix(kvm); 4344 else 4345 err = kvmppc_switch_mmu_to_hpt(kvm); 4346 if (err) 4347 goto out_unlock; 4348 } 4349 4350 kvm->arch.process_table = cfg->process_table; 4351 kvmppc_setup_partition_table(kvm); 4352 4353 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; 4354 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); 4355 err = 0; 4356 4357 out_unlock: 4358 mutex_unlock(&kvm->lock); 4359 return err; 4360 } 4361 4362 static struct kvmppc_ops kvm_ops_hv = { 4363 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, 4364 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, 4365 .get_one_reg = kvmppc_get_one_reg_hv, 4366 .set_one_reg = kvmppc_set_one_reg_hv, 4367 .vcpu_load = kvmppc_core_vcpu_load_hv, 4368 .vcpu_put = kvmppc_core_vcpu_put_hv, 4369 .set_msr = kvmppc_set_msr_hv, 4370 .vcpu_run = kvmppc_vcpu_run_hv, 4371 .vcpu_create = kvmppc_core_vcpu_create_hv, 4372 .vcpu_free = kvmppc_core_vcpu_free_hv, 4373 .check_requests = kvmppc_core_check_requests_hv, 4374 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, 4375 .flush_memslot = kvmppc_core_flush_memslot_hv, 4376 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, 4377 .commit_memory_region = kvmppc_core_commit_memory_region_hv, 4378 .unmap_hva_range = kvm_unmap_hva_range_hv, 4379 .age_hva = kvm_age_hva_hv, 4380 .test_age_hva = kvm_test_age_hva_hv, 4381 .set_spte_hva = kvm_set_spte_hva_hv, 4382 .mmu_destroy = kvmppc_mmu_destroy_hv, 4383 .free_memslot = kvmppc_core_free_memslot_hv, 4384 .create_memslot = kvmppc_core_create_memslot_hv, 4385 .init_vm = kvmppc_core_init_vm_hv, 4386 .destroy_vm = kvmppc_core_destroy_vm_hv, 4387 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, 4388 .emulate_op = kvmppc_core_emulate_op_hv, 4389 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, 4390 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, 4391 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, 4392 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, 4393 .hcall_implemented = kvmppc_hcall_impl_hv, 4394 #ifdef CONFIG_KVM_XICS 4395 .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, 4396 .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, 4397 #endif 4398 .configure_mmu = kvmhv_configure_mmu, 4399 .get_rmmu_info = kvmhv_get_rmmu_info, 4400 .set_smt_mode = kvmhv_set_smt_mode, 4401 }; 4402 4403 static int kvm_init_subcore_bitmap(void) 4404 { 4405 int i, j; 4406 int nr_cores = cpu_nr_cores(); 4407 struct sibling_subcore_state *sibling_subcore_state; 4408 4409 for (i = 0; i < nr_cores; i++) { 4410 int first_cpu = i * threads_per_core; 4411 int node = cpu_to_node(first_cpu); 4412 4413 /* Ignore if it is already allocated. */ 4414 if (paca_ptrs[first_cpu]->sibling_subcore_state) 4415 continue; 4416 4417 sibling_subcore_state = 4418 kmalloc_node(sizeof(struct sibling_subcore_state), 4419 GFP_KERNEL, node); 4420 if (!sibling_subcore_state) 4421 return -ENOMEM; 4422 4423 memset(sibling_subcore_state, 0, 4424 sizeof(struct sibling_subcore_state)); 4425 4426 for (j = 0; j < threads_per_core; j++) { 4427 int cpu = first_cpu + j; 4428 4429 paca_ptrs[cpu]->sibling_subcore_state = 4430 sibling_subcore_state; 4431 } 4432 } 4433 return 0; 4434 } 4435 4436 static int kvmppc_radix_possible(void) 4437 { 4438 return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled(); 4439 } 4440 4441 static int kvmppc_book3s_init_hv(void) 4442 { 4443 int r; 4444 /* 4445 * FIXME!! Do we need to check on all cpus ? 4446 */ 4447 r = kvmppc_core_check_processor_compat_hv(); 4448 if (r < 0) 4449 return -ENODEV; 4450 4451 r = kvm_init_subcore_bitmap(); 4452 if (r) 4453 return r; 4454 4455 /* 4456 * We need a way of accessing the XICS interrupt controller, 4457 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or 4458 * indirectly, via OPAL. 4459 */ 4460 #ifdef CONFIG_SMP 4461 if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) { 4462 struct device_node *np; 4463 4464 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); 4465 if (!np) { 4466 pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); 4467 return -ENODEV; 4468 } 4469 } 4470 #endif 4471 4472 kvm_ops_hv.owner = THIS_MODULE; 4473 kvmppc_hv_ops = &kvm_ops_hv; 4474 4475 init_default_hcalls(); 4476 4477 init_vcore_lists(); 4478 4479 r = kvmppc_mmu_hv_init(); 4480 if (r) 4481 return r; 4482 4483 if (kvmppc_radix_possible()) 4484 r = kvmppc_radix_init(); 4485 4486 /* 4487 * POWER9 chips before version 2.02 can't have some threads in 4488 * HPT mode and some in radix mode on the same core. 4489 */ 4490 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 4491 unsigned int pvr = mfspr(SPRN_PVR); 4492 if ((pvr >> 16) == PVR_POWER9 && 4493 (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) || 4494 ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101))) 4495 no_mixing_hpt_and_radix = true; 4496 } 4497 4498 return r; 4499 } 4500 4501 static void kvmppc_book3s_exit_hv(void) 4502 { 4503 kvmppc_free_host_rm_ops(); 4504 if (kvmppc_radix_possible()) 4505 kvmppc_radix_exit(); 4506 kvmppc_hv_ops = NULL; 4507 } 4508 4509 module_init(kvmppc_book3s_init_hv); 4510 module_exit(kvmppc_book3s_exit_hv); 4511 MODULE_LICENSE("GPL"); 4512 MODULE_ALIAS_MISCDEV(KVM_MINOR); 4513 MODULE_ALIAS("devname:kvm"); 4514