1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * KVM paravirt_ops implementation 4 * 5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright IBM Corporation, 2007 7 * Authors: Anthony Liguori <aliguori@us.ibm.com> 8 */ 9 10 #include <linux/context_tracking.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/kvm_para.h> 14 #include <linux/cpu.h> 15 #include <linux/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/hardirq.h> 18 #include <linux/notifier.h> 19 #include <linux/reboot.h> 20 #include <linux/hash.h> 21 #include <linux/sched.h> 22 #include <linux/slab.h> 23 #include <linux/kprobes.h> 24 #include <linux/debugfs.h> 25 #include <linux/nmi.h> 26 #include <linux/swait.h> 27 #include <asm/timer.h> 28 #include <asm/cpu.h> 29 #include <asm/traps.h> 30 #include <asm/desc.h> 31 #include <asm/tlbflush.h> 32 #include <asm/apic.h> 33 #include <asm/apicdef.h> 34 #include <asm/hypervisor.h> 35 #include <asm/tlb.h> 36 37 static int kvmapf = 1; 38 39 static int __init parse_no_kvmapf(char *arg) 40 { 41 kvmapf = 0; 42 return 0; 43 } 44 45 early_param("no-kvmapf", parse_no_kvmapf); 46 47 static int steal_acc = 1; 48 static int __init parse_no_stealacc(char *arg) 49 { 50 steal_acc = 0; 51 return 0; 52 } 53 54 early_param("no-steal-acc", parse_no_stealacc); 55 56 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); 57 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible; 58 static int has_steal_clock = 0; 59 60 /* 61 * No need for any "IO delay" on KVM 62 */ 63 static void kvm_io_delay(void) 64 { 65 } 66 67 #define KVM_TASK_SLEEP_HASHBITS 8 68 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) 69 70 struct kvm_task_sleep_node { 71 struct hlist_node link; 72 struct swait_queue_head wq; 73 u32 token; 74 int cpu; 75 bool halted; 76 }; 77 78 static struct kvm_task_sleep_head { 79 raw_spinlock_t lock; 80 struct hlist_head list; 81 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; 82 83 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, 84 u32 token) 85 { 86 struct hlist_node *p; 87 88 hlist_for_each(p, &b->list) { 89 struct kvm_task_sleep_node *n = 90 hlist_entry(p, typeof(*n), link); 91 if (n->token == token) 92 return n; 93 } 94 95 return NULL; 96 } 97 98 /* 99 * @interrupt_kernel: Is this called from a routine which interrupts the kernel 100 * (other than user space)? 101 */ 102 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel) 103 { 104 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 105 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 106 struct kvm_task_sleep_node n, *e; 107 DECLARE_SWAITQUEUE(wait); 108 109 rcu_irq_enter(); 110 111 raw_spin_lock(&b->lock); 112 e = _find_apf_task(b, token); 113 if (e) { 114 /* dummy entry exist -> wake up was delivered ahead of PF */ 115 hlist_del(&e->link); 116 kfree(e); 117 raw_spin_unlock(&b->lock); 118 119 rcu_irq_exit(); 120 return; 121 } 122 123 n.token = token; 124 n.cpu = smp_processor_id(); 125 n.halted = is_idle_task(current) || 126 (IS_ENABLED(CONFIG_PREEMPT_COUNT) 127 ? preempt_count() > 1 || rcu_preempt_depth() 128 : interrupt_kernel); 129 init_swait_queue_head(&n.wq); 130 hlist_add_head(&n.link, &b->list); 131 raw_spin_unlock(&b->lock); 132 133 for (;;) { 134 if (!n.halted) 135 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE); 136 if (hlist_unhashed(&n.link)) 137 break; 138 139 rcu_irq_exit(); 140 141 if (!n.halted) { 142 local_irq_enable(); 143 schedule(); 144 local_irq_disable(); 145 } else { 146 /* 147 * We cannot reschedule. So halt. 148 */ 149 native_safe_halt(); 150 local_irq_disable(); 151 } 152 153 rcu_irq_enter(); 154 } 155 if (!n.halted) 156 finish_swait(&n.wq, &wait); 157 158 rcu_irq_exit(); 159 return; 160 } 161 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); 162 163 static void apf_task_wake_one(struct kvm_task_sleep_node *n) 164 { 165 hlist_del_init(&n->link); 166 if (n->halted) 167 smp_send_reschedule(n->cpu); 168 else if (swq_has_sleeper(&n->wq)) 169 swake_up_one(&n->wq); 170 } 171 172 static void apf_task_wake_all(void) 173 { 174 int i; 175 176 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { 177 struct hlist_node *p, *next; 178 struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; 179 raw_spin_lock(&b->lock); 180 hlist_for_each_safe(p, next, &b->list) { 181 struct kvm_task_sleep_node *n = 182 hlist_entry(p, typeof(*n), link); 183 if (n->cpu == smp_processor_id()) 184 apf_task_wake_one(n); 185 } 186 raw_spin_unlock(&b->lock); 187 } 188 } 189 190 void kvm_async_pf_task_wake(u32 token) 191 { 192 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 193 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 194 struct kvm_task_sleep_node *n; 195 196 if (token == ~0) { 197 apf_task_wake_all(); 198 return; 199 } 200 201 again: 202 raw_spin_lock(&b->lock); 203 n = _find_apf_task(b, token); 204 if (!n) { 205 /* 206 * async PF was not yet handled. 207 * Add dummy entry for the token. 208 */ 209 n = kzalloc(sizeof(*n), GFP_ATOMIC); 210 if (!n) { 211 /* 212 * Allocation failed! Busy wait while other cpu 213 * handles async PF. 214 */ 215 raw_spin_unlock(&b->lock); 216 cpu_relax(); 217 goto again; 218 } 219 n->token = token; 220 n->cpu = smp_processor_id(); 221 init_swait_queue_head(&n->wq); 222 hlist_add_head(&n->link, &b->list); 223 } else 224 apf_task_wake_one(n); 225 raw_spin_unlock(&b->lock); 226 return; 227 } 228 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); 229 230 u32 kvm_read_and_reset_pf_reason(void) 231 { 232 u32 reason = 0; 233 234 if (__this_cpu_read(apf_reason.enabled)) { 235 reason = __this_cpu_read(apf_reason.reason); 236 __this_cpu_write(apf_reason.reason, 0); 237 } 238 239 return reason; 240 } 241 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); 242 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason); 243 244 dotraplinkage void 245 do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) 246 { 247 enum ctx_state prev_state; 248 249 switch (kvm_read_and_reset_pf_reason()) { 250 default: 251 do_page_fault(regs, error_code, address); 252 break; 253 case KVM_PV_REASON_PAGE_NOT_PRESENT: 254 /* page is swapped out by the host. */ 255 prev_state = exception_enter(); 256 kvm_async_pf_task_wait((u32)address, !user_mode(regs)); 257 exception_exit(prev_state); 258 break; 259 case KVM_PV_REASON_PAGE_READY: 260 rcu_irq_enter(); 261 kvm_async_pf_task_wake((u32)address); 262 rcu_irq_exit(); 263 break; 264 } 265 } 266 NOKPROBE_SYMBOL(do_async_page_fault); 267 268 static void __init paravirt_ops_setup(void) 269 { 270 pv_info.name = "KVM"; 271 272 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) 273 pv_ops.cpu.io_delay = kvm_io_delay; 274 275 #ifdef CONFIG_X86_IO_APIC 276 no_timer_check = 1; 277 #endif 278 } 279 280 static void kvm_register_steal_time(void) 281 { 282 int cpu = smp_processor_id(); 283 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); 284 285 if (!has_steal_clock) 286 return; 287 288 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); 289 pr_info("kvm-stealtime: cpu %d, msr %llx\n", 290 cpu, (unsigned long long) slow_virt_to_phys(st)); 291 } 292 293 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; 294 295 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val) 296 { 297 /** 298 * This relies on __test_and_clear_bit to modify the memory 299 * in a way that is atomic with respect to the local CPU. 300 * The hypervisor only accesses this memory from the local CPU so 301 * there's no need for lock or memory barriers. 302 * An optimization barrier is implied in apic write. 303 */ 304 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) 305 return; 306 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK); 307 } 308 309 static void kvm_guest_cpu_init(void) 310 { 311 if (!kvm_para_available()) 312 return; 313 314 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { 315 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); 316 317 #ifdef CONFIG_PREEMPT 318 pa |= KVM_ASYNC_PF_SEND_ALWAYS; 319 #endif 320 pa |= KVM_ASYNC_PF_ENABLED; 321 322 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) 323 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 324 325 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); 326 __this_cpu_write(apf_reason.enabled, 1); 327 printk(KERN_INFO"KVM setup async PF for cpu %d\n", 328 smp_processor_id()); 329 } 330 331 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { 332 unsigned long pa; 333 /* Size alignment is implied but just to make it explicit. */ 334 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); 335 __this_cpu_write(kvm_apic_eoi, 0); 336 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) 337 | KVM_MSR_ENABLED; 338 wrmsrl(MSR_KVM_PV_EOI_EN, pa); 339 } 340 341 if (has_steal_clock) 342 kvm_register_steal_time(); 343 } 344 345 static void kvm_pv_disable_apf(void) 346 { 347 if (!__this_cpu_read(apf_reason.enabled)) 348 return; 349 350 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); 351 __this_cpu_write(apf_reason.enabled, 0); 352 353 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", 354 smp_processor_id()); 355 } 356 357 static void kvm_pv_guest_cpu_reboot(void *unused) 358 { 359 /* 360 * We disable PV EOI before we load a new kernel by kexec, 361 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. 362 * New kernel can re-enable when it boots. 363 */ 364 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 365 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 366 kvm_pv_disable_apf(); 367 kvm_disable_steal_time(); 368 } 369 370 static int kvm_pv_reboot_notify(struct notifier_block *nb, 371 unsigned long code, void *unused) 372 { 373 if (code == SYS_RESTART) 374 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); 375 return NOTIFY_DONE; 376 } 377 378 static struct notifier_block kvm_pv_reboot_nb = { 379 .notifier_call = kvm_pv_reboot_notify, 380 }; 381 382 static u64 kvm_steal_clock(int cpu) 383 { 384 u64 steal; 385 struct kvm_steal_time *src; 386 int version; 387 388 src = &per_cpu(steal_time, cpu); 389 do { 390 version = src->version; 391 virt_rmb(); 392 steal = src->steal; 393 virt_rmb(); 394 } while ((version & 1) || (version != src->version)); 395 396 return steal; 397 } 398 399 void kvm_disable_steal_time(void) 400 { 401 if (!has_steal_clock) 402 return; 403 404 wrmsr(MSR_KVM_STEAL_TIME, 0, 0); 405 } 406 407 static inline void __set_percpu_decrypted(void *ptr, unsigned long size) 408 { 409 early_set_memory_decrypted((unsigned long) ptr, size); 410 } 411 412 /* 413 * Iterate through all possible CPUs and map the memory region pointed 414 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once. 415 * 416 * Note: we iterate through all possible CPUs to ensure that CPUs 417 * hotplugged will have their per-cpu variable already mapped as 418 * decrypted. 419 */ 420 static void __init sev_map_percpu_data(void) 421 { 422 int cpu; 423 424 if (!sev_active()) 425 return; 426 427 for_each_possible_cpu(cpu) { 428 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason)); 429 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time)); 430 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi)); 431 } 432 } 433 434 #ifdef CONFIG_SMP 435 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) 436 437 static void __send_ipi_mask(const struct cpumask *mask, int vector) 438 { 439 unsigned long flags; 440 int cpu, apic_id, icr; 441 int min = 0, max = 0; 442 #ifdef CONFIG_X86_64 443 __uint128_t ipi_bitmap = 0; 444 #else 445 u64 ipi_bitmap = 0; 446 #endif 447 long ret; 448 449 if (cpumask_empty(mask)) 450 return; 451 452 local_irq_save(flags); 453 454 switch (vector) { 455 default: 456 icr = APIC_DM_FIXED | vector; 457 break; 458 case NMI_VECTOR: 459 icr = APIC_DM_NMI; 460 break; 461 } 462 463 for_each_cpu(cpu, mask) { 464 apic_id = per_cpu(x86_cpu_to_apicid, cpu); 465 if (!ipi_bitmap) { 466 min = max = apic_id; 467 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { 468 ipi_bitmap <<= min - apic_id; 469 min = apic_id; 470 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { 471 max = apic_id < max ? max : apic_id; 472 } else { 473 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 474 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 475 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); 476 min = max = apic_id; 477 ipi_bitmap = 0; 478 } 479 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); 480 } 481 482 if (ipi_bitmap) { 483 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 484 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 485 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); 486 } 487 488 local_irq_restore(flags); 489 } 490 491 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector) 492 { 493 __send_ipi_mask(mask, vector); 494 } 495 496 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) 497 { 498 unsigned int this_cpu = smp_processor_id(); 499 struct cpumask new_mask; 500 const struct cpumask *local_mask; 501 502 cpumask_copy(&new_mask, mask); 503 cpumask_clear_cpu(this_cpu, &new_mask); 504 local_mask = &new_mask; 505 __send_ipi_mask(local_mask, vector); 506 } 507 508 static void kvm_send_ipi_allbutself(int vector) 509 { 510 kvm_send_ipi_mask_allbutself(cpu_online_mask, vector); 511 } 512 513 static void kvm_send_ipi_all(int vector) 514 { 515 __send_ipi_mask(cpu_online_mask, vector); 516 } 517 518 /* 519 * Set the IPI entry points 520 */ 521 static void kvm_setup_pv_ipi(void) 522 { 523 apic->send_IPI_mask = kvm_send_ipi_mask; 524 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; 525 apic->send_IPI_allbutself = kvm_send_ipi_allbutself; 526 apic->send_IPI_all = kvm_send_ipi_all; 527 pr_info("KVM setup pv IPIs\n"); 528 } 529 530 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask) 531 { 532 int cpu; 533 534 native_send_call_func_ipi(mask); 535 536 /* Make sure other vCPUs get a chance to run if they need to. */ 537 for_each_cpu(cpu, mask) { 538 if (vcpu_is_preempted(cpu)) { 539 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu)); 540 break; 541 } 542 } 543 } 544 545 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) 546 { 547 native_smp_prepare_cpus(max_cpus); 548 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) 549 static_branch_disable(&virt_spin_lock_key); 550 } 551 552 static void __init kvm_smp_prepare_boot_cpu(void) 553 { 554 /* 555 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() 556 * shares the guest physical address with the hypervisor. 557 */ 558 sev_map_percpu_data(); 559 560 kvm_guest_cpu_init(); 561 native_smp_prepare_boot_cpu(); 562 kvm_spinlock_init(); 563 } 564 565 static void kvm_guest_cpu_offline(void) 566 { 567 kvm_disable_steal_time(); 568 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 569 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 570 kvm_pv_disable_apf(); 571 apf_task_wake_all(); 572 } 573 574 static int kvm_cpu_online(unsigned int cpu) 575 { 576 local_irq_disable(); 577 kvm_guest_cpu_init(); 578 local_irq_enable(); 579 return 0; 580 } 581 582 static int kvm_cpu_down_prepare(unsigned int cpu) 583 { 584 local_irq_disable(); 585 kvm_guest_cpu_offline(); 586 local_irq_enable(); 587 return 0; 588 } 589 #endif 590 591 static void __init kvm_apf_trap_init(void) 592 { 593 update_intr_gate(X86_TRAP_PF, async_page_fault); 594 } 595 596 static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask); 597 598 static void kvm_flush_tlb_others(const struct cpumask *cpumask, 599 const struct flush_tlb_info *info) 600 { 601 u8 state; 602 int cpu; 603 struct kvm_steal_time *src; 604 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); 605 606 cpumask_copy(flushmask, cpumask); 607 /* 608 * We have to call flush only on online vCPUs. And 609 * queue flush_on_enter for pre-empted vCPUs 610 */ 611 for_each_cpu(cpu, flushmask) { 612 src = &per_cpu(steal_time, cpu); 613 state = READ_ONCE(src->preempted); 614 if ((state & KVM_VCPU_PREEMPTED)) { 615 if (try_cmpxchg(&src->preempted, &state, 616 state | KVM_VCPU_FLUSH_TLB)) 617 __cpumask_clear_cpu(cpu, flushmask); 618 } 619 } 620 621 native_flush_tlb_others(flushmask, info); 622 } 623 624 static void __init kvm_guest_init(void) 625 { 626 int i; 627 628 if (!kvm_para_available()) 629 return; 630 631 paravirt_ops_setup(); 632 register_reboot_notifier(&kvm_pv_reboot_nb); 633 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) 634 raw_spin_lock_init(&async_pf_sleepers[i].lock); 635 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) 636 x86_init.irqs.trap_init = kvm_apf_trap_init; 637 638 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 639 has_steal_clock = 1; 640 pv_ops.time.steal_clock = kvm_steal_clock; 641 } 642 643 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 644 !kvm_para_has_hint(KVM_HINTS_REALTIME) && 645 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 646 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; 647 pv_ops.mmu.tlb_remove_table = tlb_remove_table; 648 } 649 650 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 651 apic_set_eoi_write(kvm_guest_apic_eoi_write); 652 653 #ifdef CONFIG_SMP 654 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus; 655 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 656 if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && 657 !kvm_para_has_hint(KVM_HINTS_REALTIME) && 658 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 659 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; 660 pr_info("KVM setup pv sched yield\n"); 661 } 662 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online", 663 kvm_cpu_online, kvm_cpu_down_prepare) < 0) 664 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n"); 665 #else 666 sev_map_percpu_data(); 667 kvm_guest_cpu_init(); 668 #endif 669 670 /* 671 * Hard lockup detection is enabled by default. Disable it, as guests 672 * can get false positives too easily, for example if the host is 673 * overcommitted. 674 */ 675 hardlockup_detector_disable(); 676 } 677 678 static noinline uint32_t __kvm_cpuid_base(void) 679 { 680 if (boot_cpu_data.cpuid_level < 0) 681 return 0; /* So we don't blow up on old processors */ 682 683 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 684 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); 685 686 return 0; 687 } 688 689 static inline uint32_t kvm_cpuid_base(void) 690 { 691 static int kvm_cpuid_base = -1; 692 693 if (kvm_cpuid_base == -1) 694 kvm_cpuid_base = __kvm_cpuid_base(); 695 696 return kvm_cpuid_base; 697 } 698 699 bool kvm_para_available(void) 700 { 701 return kvm_cpuid_base() != 0; 702 } 703 EXPORT_SYMBOL_GPL(kvm_para_available); 704 705 unsigned int kvm_arch_para_features(void) 706 { 707 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); 708 } 709 710 unsigned int kvm_arch_para_hints(void) 711 { 712 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES); 713 } 714 715 static uint32_t __init kvm_detect(void) 716 { 717 return kvm_cpuid_base(); 718 } 719 720 static void __init kvm_apic_init(void) 721 { 722 #if defined(CONFIG_SMP) 723 if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI)) 724 kvm_setup_pv_ipi(); 725 #endif 726 } 727 728 static void __init kvm_init_platform(void) 729 { 730 kvmclock_init(); 731 x86_platform.apic_post_init = kvm_apic_init; 732 } 733 734 const __initconst struct hypervisor_x86 x86_hyper_kvm = { 735 .name = "KVM", 736 .detect = kvm_detect, 737 .type = X86_HYPER_KVM, 738 .init.guest_late_init = kvm_guest_init, 739 .init.x2apic_available = kvm_para_available, 740 .init.init_platform = kvm_init_platform, 741 }; 742 743 static __init int activate_jump_labels(void) 744 { 745 if (has_steal_clock) { 746 static_key_slow_inc(¶virt_steal_enabled); 747 if (steal_acc) 748 static_key_slow_inc(¶virt_steal_rq_enabled); 749 } 750 751 return 0; 752 } 753 arch_initcall(activate_jump_labels); 754 755 static __init int kvm_setup_pv_tlb_flush(void) 756 { 757 int cpu; 758 759 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 760 !kvm_para_has_hint(KVM_HINTS_REALTIME) && 761 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 762 for_each_possible_cpu(cpu) { 763 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), 764 GFP_KERNEL, cpu_to_node(cpu)); 765 } 766 pr_info("KVM setup pv remote TLB flush\n"); 767 } 768 769 return 0; 770 } 771 arch_initcall(kvm_setup_pv_tlb_flush); 772 773 #ifdef CONFIG_PARAVIRT_SPINLOCKS 774 775 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ 776 static void kvm_kick_cpu(int cpu) 777 { 778 int apicid; 779 unsigned long flags = 0; 780 781 apicid = per_cpu(x86_cpu_to_apicid, cpu); 782 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); 783 } 784 785 #include <asm/qspinlock.h> 786 787 static void kvm_wait(u8 *ptr, u8 val) 788 { 789 unsigned long flags; 790 791 if (in_nmi()) 792 return; 793 794 local_irq_save(flags); 795 796 if (READ_ONCE(*ptr) != val) 797 goto out; 798 799 /* 800 * halt until it's our turn and kicked. Note that we do safe halt 801 * for irq enabled case to avoid hang when lock info is overwritten 802 * in irq spinlock slowpath and no spurious interrupt occur to save us. 803 */ 804 if (arch_irqs_disabled_flags(flags)) 805 halt(); 806 else 807 safe_halt(); 808 809 out: 810 local_irq_restore(flags); 811 } 812 813 #ifdef CONFIG_X86_32 814 __visible bool __kvm_vcpu_is_preempted(long cpu) 815 { 816 struct kvm_steal_time *src = &per_cpu(steal_time, cpu); 817 818 return !!(src->preempted & KVM_VCPU_PREEMPTED); 819 } 820 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); 821 822 #else 823 824 #include <asm/asm-offsets.h> 825 826 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); 827 828 /* 829 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and 830 * restoring to/from the stack. 831 */ 832 asm( 833 ".pushsection .text;" 834 ".global __raw_callee_save___kvm_vcpu_is_preempted;" 835 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;" 836 "__raw_callee_save___kvm_vcpu_is_preempted:" 837 "movq __per_cpu_offset(,%rdi,8), %rax;" 838 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" 839 "setne %al;" 840 "ret;" 841 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" 842 ".popsection"); 843 844 #endif 845 846 /* 847 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. 848 */ 849 void __init kvm_spinlock_init(void) 850 { 851 if (!kvm_para_available()) 852 return; 853 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ 854 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 855 return; 856 857 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) 858 return; 859 860 /* Don't use the pvqspinlock code if there is only 1 vCPU. */ 861 if (num_possible_cpus() == 1) 862 return; 863 864 __pv_init_lock_hash(); 865 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 866 pv_ops.lock.queued_spin_unlock = 867 PV_CALLEE_SAVE(__pv_queued_spin_unlock); 868 pv_ops.lock.wait = kvm_wait; 869 pv_ops.lock.kick = kvm_kick_cpu; 870 871 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 872 pv_ops.lock.vcpu_is_preempted = 873 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); 874 } 875 } 876 877 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 878