1 /* 2 * KVM paravirt_ops implementation 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 * 18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 19 * Copyright IBM Corporation, 2007 20 * Authors: Anthony Liguori <aliguori@us.ibm.com> 21 */ 22 23 #include <linux/context_tracking.h> 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/kvm_para.h> 27 #include <linux/cpu.h> 28 #include <linux/mm.h> 29 #include <linux/highmem.h> 30 #include <linux/hardirq.h> 31 #include <linux/notifier.h> 32 #include <linux/reboot.h> 33 #include <linux/hash.h> 34 #include <linux/sched.h> 35 #include <linux/slab.h> 36 #include <linux/kprobes.h> 37 #include <linux/debugfs.h> 38 #include <linux/nmi.h> 39 #include <linux/swait.h> 40 #include <asm/timer.h> 41 #include <asm/cpu.h> 42 #include <asm/traps.h> 43 #include <asm/desc.h> 44 #include <asm/tlbflush.h> 45 #include <asm/idle.h> 46 #include <asm/apic.h> 47 #include <asm/apicdef.h> 48 #include <asm/hypervisor.h> 49 #include <asm/kvm_guest.h> 50 51 static int kvmapf = 1; 52 53 static int parse_no_kvmapf(char *arg) 54 { 55 kvmapf = 0; 56 return 0; 57 } 58 59 early_param("no-kvmapf", parse_no_kvmapf); 60 61 static int steal_acc = 1; 62 static int parse_no_stealacc(char *arg) 63 { 64 steal_acc = 0; 65 return 0; 66 } 67 68 early_param("no-steal-acc", parse_no_stealacc); 69 70 static int kvmclock_vsyscall = 1; 71 static int parse_no_kvmclock_vsyscall(char *arg) 72 { 73 kvmclock_vsyscall = 0; 74 return 0; 75 } 76 77 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); 78 79 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); 80 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); 81 static int has_steal_clock = 0; 82 83 /* 84 * No need for any "IO delay" on KVM 85 */ 86 static void kvm_io_delay(void) 87 { 88 } 89 90 #define KVM_TASK_SLEEP_HASHBITS 8 91 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) 92 93 struct kvm_task_sleep_node { 94 struct hlist_node link; 95 struct swait_queue_head wq; 96 u32 token; 97 int cpu; 98 bool halted; 99 }; 100 101 static struct kvm_task_sleep_head { 102 raw_spinlock_t lock; 103 struct hlist_head list; 104 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; 105 106 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, 107 u32 token) 108 { 109 struct hlist_node *p; 110 111 hlist_for_each(p, &b->list) { 112 struct kvm_task_sleep_node *n = 113 hlist_entry(p, typeof(*n), link); 114 if (n->token == token) 115 return n; 116 } 117 118 return NULL; 119 } 120 121 void kvm_async_pf_task_wait(u32 token) 122 { 123 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 124 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 125 struct kvm_task_sleep_node n, *e; 126 DECLARE_SWAITQUEUE(wait); 127 128 rcu_irq_enter(); 129 130 raw_spin_lock(&b->lock); 131 e = _find_apf_task(b, token); 132 if (e) { 133 /* dummy entry exist -> wake up was delivered ahead of PF */ 134 hlist_del(&e->link); 135 kfree(e); 136 raw_spin_unlock(&b->lock); 137 138 rcu_irq_exit(); 139 return; 140 } 141 142 n.token = token; 143 n.cpu = smp_processor_id(); 144 n.halted = is_idle_task(current) || preempt_count() > 1; 145 init_swait_queue_head(&n.wq); 146 hlist_add_head(&n.link, &b->list); 147 raw_spin_unlock(&b->lock); 148 149 for (;;) { 150 if (!n.halted) 151 prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); 152 if (hlist_unhashed(&n.link)) 153 break; 154 155 if (!n.halted) { 156 local_irq_enable(); 157 schedule(); 158 local_irq_disable(); 159 } else { 160 /* 161 * We cannot reschedule. So halt. 162 */ 163 rcu_irq_exit(); 164 native_safe_halt(); 165 rcu_irq_enter(); 166 local_irq_disable(); 167 } 168 } 169 if (!n.halted) 170 finish_swait(&n.wq, &wait); 171 172 rcu_irq_exit(); 173 return; 174 } 175 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); 176 177 static void apf_task_wake_one(struct kvm_task_sleep_node *n) 178 { 179 hlist_del_init(&n->link); 180 if (n->halted) 181 smp_send_reschedule(n->cpu); 182 else if (swait_active(&n->wq)) 183 swake_up(&n->wq); 184 } 185 186 static void apf_task_wake_all(void) 187 { 188 int i; 189 190 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { 191 struct hlist_node *p, *next; 192 struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; 193 raw_spin_lock(&b->lock); 194 hlist_for_each_safe(p, next, &b->list) { 195 struct kvm_task_sleep_node *n = 196 hlist_entry(p, typeof(*n), link); 197 if (n->cpu == smp_processor_id()) 198 apf_task_wake_one(n); 199 } 200 raw_spin_unlock(&b->lock); 201 } 202 } 203 204 void kvm_async_pf_task_wake(u32 token) 205 { 206 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 207 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 208 struct kvm_task_sleep_node *n; 209 210 if (token == ~0) { 211 apf_task_wake_all(); 212 return; 213 } 214 215 again: 216 raw_spin_lock(&b->lock); 217 n = _find_apf_task(b, token); 218 if (!n) { 219 /* 220 * async PF was not yet handled. 221 * Add dummy entry for the token. 222 */ 223 n = kzalloc(sizeof(*n), GFP_ATOMIC); 224 if (!n) { 225 /* 226 * Allocation failed! Busy wait while other cpu 227 * handles async PF. 228 */ 229 raw_spin_unlock(&b->lock); 230 cpu_relax(); 231 goto again; 232 } 233 n->token = token; 234 n->cpu = smp_processor_id(); 235 init_swait_queue_head(&n->wq); 236 hlist_add_head(&n->link, &b->list); 237 } else 238 apf_task_wake_one(n); 239 raw_spin_unlock(&b->lock); 240 return; 241 } 242 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); 243 244 u32 kvm_read_and_reset_pf_reason(void) 245 { 246 u32 reason = 0; 247 248 if (__this_cpu_read(apf_reason.enabled)) { 249 reason = __this_cpu_read(apf_reason.reason); 250 __this_cpu_write(apf_reason.reason, 0); 251 } 252 253 return reason; 254 } 255 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); 256 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason); 257 258 dotraplinkage void 259 do_async_page_fault(struct pt_regs *regs, unsigned long error_code) 260 { 261 enum ctx_state prev_state; 262 263 switch (kvm_read_and_reset_pf_reason()) { 264 default: 265 trace_do_page_fault(regs, error_code); 266 break; 267 case KVM_PV_REASON_PAGE_NOT_PRESENT: 268 /* page is swapped out by the host. */ 269 prev_state = exception_enter(); 270 exit_idle(); 271 kvm_async_pf_task_wait((u32)read_cr2()); 272 exception_exit(prev_state); 273 break; 274 case KVM_PV_REASON_PAGE_READY: 275 rcu_irq_enter(); 276 exit_idle(); 277 kvm_async_pf_task_wake((u32)read_cr2()); 278 rcu_irq_exit(); 279 break; 280 } 281 } 282 NOKPROBE_SYMBOL(do_async_page_fault); 283 284 static void __init paravirt_ops_setup(void) 285 { 286 pv_info.name = "KVM"; 287 288 /* 289 * KVM isn't paravirt in the sense of paravirt_enabled. A KVM 290 * guest kernel works like a bare metal kernel with additional 291 * features, and paravirt_enabled is about features that are 292 * missing. 293 */ 294 pv_info.paravirt_enabled = 0; 295 296 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) 297 pv_cpu_ops.io_delay = kvm_io_delay; 298 299 #ifdef CONFIG_X86_IO_APIC 300 no_timer_check = 1; 301 #endif 302 } 303 304 static void kvm_register_steal_time(void) 305 { 306 int cpu = smp_processor_id(); 307 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); 308 309 if (!has_steal_clock) 310 return; 311 312 memset(st, 0, sizeof(*st)); 313 314 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); 315 pr_info("kvm-stealtime: cpu %d, msr %llx\n", 316 cpu, (unsigned long long) slow_virt_to_phys(st)); 317 } 318 319 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; 320 321 static void kvm_guest_apic_eoi_write(u32 reg, u32 val) 322 { 323 /** 324 * This relies on __test_and_clear_bit to modify the memory 325 * in a way that is atomic with respect to the local CPU. 326 * The hypervisor only accesses this memory from the local CPU so 327 * there's no need for lock or memory barriers. 328 * An optimization barrier is implied in apic write. 329 */ 330 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) 331 return; 332 apic_write(APIC_EOI, APIC_EOI_ACK); 333 } 334 335 static void kvm_guest_cpu_init(void) 336 { 337 if (!kvm_para_available()) 338 return; 339 340 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { 341 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); 342 343 #ifdef CONFIG_PREEMPT 344 pa |= KVM_ASYNC_PF_SEND_ALWAYS; 345 #endif 346 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); 347 __this_cpu_write(apf_reason.enabled, 1); 348 printk(KERN_INFO"KVM setup async PF for cpu %d\n", 349 smp_processor_id()); 350 } 351 352 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { 353 unsigned long pa; 354 /* Size alignment is implied but just to make it explicit. */ 355 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); 356 __this_cpu_write(kvm_apic_eoi, 0); 357 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) 358 | KVM_MSR_ENABLED; 359 wrmsrl(MSR_KVM_PV_EOI_EN, pa); 360 } 361 362 if (has_steal_clock) 363 kvm_register_steal_time(); 364 } 365 366 static void kvm_pv_disable_apf(void) 367 { 368 if (!__this_cpu_read(apf_reason.enabled)) 369 return; 370 371 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); 372 __this_cpu_write(apf_reason.enabled, 0); 373 374 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", 375 smp_processor_id()); 376 } 377 378 static void kvm_pv_guest_cpu_reboot(void *unused) 379 { 380 /* 381 * We disable PV EOI before we load a new kernel by kexec, 382 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. 383 * New kernel can re-enable when it boots. 384 */ 385 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 386 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 387 kvm_pv_disable_apf(); 388 kvm_disable_steal_time(); 389 } 390 391 static int kvm_pv_reboot_notify(struct notifier_block *nb, 392 unsigned long code, void *unused) 393 { 394 if (code == SYS_RESTART) 395 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); 396 return NOTIFY_DONE; 397 } 398 399 static struct notifier_block kvm_pv_reboot_nb = { 400 .notifier_call = kvm_pv_reboot_notify, 401 }; 402 403 static u64 kvm_steal_clock(int cpu) 404 { 405 u64 steal; 406 struct kvm_steal_time *src; 407 int version; 408 409 src = &per_cpu(steal_time, cpu); 410 do { 411 version = src->version; 412 rmb(); 413 steal = src->steal; 414 rmb(); 415 } while ((version & 1) || (version != src->version)); 416 417 return steal; 418 } 419 420 void kvm_disable_steal_time(void) 421 { 422 if (!has_steal_clock) 423 return; 424 425 wrmsr(MSR_KVM_STEAL_TIME, 0, 0); 426 } 427 428 #ifdef CONFIG_SMP 429 static void __init kvm_smp_prepare_boot_cpu(void) 430 { 431 kvm_guest_cpu_init(); 432 native_smp_prepare_boot_cpu(); 433 kvm_spinlock_init(); 434 } 435 436 static void kvm_guest_cpu_online(void *dummy) 437 { 438 kvm_guest_cpu_init(); 439 } 440 441 static void kvm_guest_cpu_offline(void *dummy) 442 { 443 kvm_disable_steal_time(); 444 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 445 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 446 kvm_pv_disable_apf(); 447 apf_task_wake_all(); 448 } 449 450 static int kvm_cpu_notify(struct notifier_block *self, unsigned long action, 451 void *hcpu) 452 { 453 int cpu = (unsigned long)hcpu; 454 switch (action) { 455 case CPU_ONLINE: 456 case CPU_DOWN_FAILED: 457 case CPU_ONLINE_FROZEN: 458 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); 459 break; 460 case CPU_DOWN_PREPARE: 461 case CPU_DOWN_PREPARE_FROZEN: 462 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); 463 break; 464 default: 465 break; 466 } 467 return NOTIFY_OK; 468 } 469 470 static struct notifier_block kvm_cpu_notifier = { 471 .notifier_call = kvm_cpu_notify, 472 }; 473 #endif 474 475 static void __init kvm_apf_trap_init(void) 476 { 477 set_intr_gate(14, async_page_fault); 478 } 479 480 void __init kvm_guest_init(void) 481 { 482 int i; 483 484 if (!kvm_para_available()) 485 return; 486 487 paravirt_ops_setup(); 488 register_reboot_notifier(&kvm_pv_reboot_nb); 489 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) 490 raw_spin_lock_init(&async_pf_sleepers[i].lock); 491 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) 492 x86_init.irqs.trap_init = kvm_apf_trap_init; 493 494 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 495 has_steal_clock = 1; 496 pv_time_ops.steal_clock = kvm_steal_clock; 497 } 498 499 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 500 apic_set_eoi_write(kvm_guest_apic_eoi_write); 501 502 if (kvmclock_vsyscall) 503 kvm_setup_vsyscall_timeinfo(); 504 505 #ifdef CONFIG_SMP 506 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 507 register_cpu_notifier(&kvm_cpu_notifier); 508 #else 509 kvm_guest_cpu_init(); 510 #endif 511 512 /* 513 * Hard lockup detection is enabled by default. Disable it, as guests 514 * can get false positives too easily, for example if the host is 515 * overcommitted. 516 */ 517 hardlockup_detector_disable(); 518 } 519 520 static noinline uint32_t __kvm_cpuid_base(void) 521 { 522 if (boot_cpu_data.cpuid_level < 0) 523 return 0; /* So we don't blow up on old processors */ 524 525 if (cpu_has_hypervisor) 526 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); 527 528 return 0; 529 } 530 531 static inline uint32_t kvm_cpuid_base(void) 532 { 533 static int kvm_cpuid_base = -1; 534 535 if (kvm_cpuid_base == -1) 536 kvm_cpuid_base = __kvm_cpuid_base(); 537 538 return kvm_cpuid_base; 539 } 540 541 bool kvm_para_available(void) 542 { 543 return kvm_cpuid_base() != 0; 544 } 545 EXPORT_SYMBOL_GPL(kvm_para_available); 546 547 unsigned int kvm_arch_para_features(void) 548 { 549 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); 550 } 551 552 static uint32_t __init kvm_detect(void) 553 { 554 return kvm_cpuid_base(); 555 } 556 557 const struct hypervisor_x86 x86_hyper_kvm __refconst = { 558 .name = "KVM", 559 .detect = kvm_detect, 560 .x2apic_available = kvm_para_available, 561 }; 562 EXPORT_SYMBOL_GPL(x86_hyper_kvm); 563 564 static __init int activate_jump_labels(void) 565 { 566 if (has_steal_clock) { 567 static_key_slow_inc(¶virt_steal_enabled); 568 if (steal_acc) 569 static_key_slow_inc(¶virt_steal_rq_enabled); 570 } 571 572 return 0; 573 } 574 arch_initcall(activate_jump_labels); 575 576 #ifdef CONFIG_PARAVIRT_SPINLOCKS 577 578 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ 579 static void kvm_kick_cpu(int cpu) 580 { 581 int apicid; 582 unsigned long flags = 0; 583 584 apicid = per_cpu(x86_cpu_to_apicid, cpu); 585 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); 586 } 587 588 589 #ifdef CONFIG_QUEUED_SPINLOCKS 590 591 #include <asm/qspinlock.h> 592 593 static void kvm_wait(u8 *ptr, u8 val) 594 { 595 unsigned long flags; 596 597 if (in_nmi()) 598 return; 599 600 local_irq_save(flags); 601 602 if (READ_ONCE(*ptr) != val) 603 goto out; 604 605 /* 606 * halt until it's our turn and kicked. Note that we do safe halt 607 * for irq enabled case to avoid hang when lock info is overwritten 608 * in irq spinlock slowpath and no spurious interrupt occur to save us. 609 */ 610 if (arch_irqs_disabled_flags(flags)) 611 halt(); 612 else 613 safe_halt(); 614 615 out: 616 local_irq_restore(flags); 617 } 618 619 #else /* !CONFIG_QUEUED_SPINLOCKS */ 620 621 enum kvm_contention_stat { 622 TAKEN_SLOW, 623 TAKEN_SLOW_PICKUP, 624 RELEASED_SLOW, 625 RELEASED_SLOW_KICKED, 626 NR_CONTENTION_STATS 627 }; 628 629 #ifdef CONFIG_KVM_DEBUG_FS 630 #define HISTO_BUCKETS 30 631 632 static struct kvm_spinlock_stats 633 { 634 u32 contention_stats[NR_CONTENTION_STATS]; 635 u32 histo_spin_blocked[HISTO_BUCKETS+1]; 636 u64 time_blocked; 637 } spinlock_stats; 638 639 static u8 zero_stats; 640 641 static inline void check_zero(void) 642 { 643 u8 ret; 644 u8 old; 645 646 old = READ_ONCE(zero_stats); 647 if (unlikely(old)) { 648 ret = cmpxchg(&zero_stats, old, 0); 649 /* This ensures only one fellow resets the stat */ 650 if (ret == old) 651 memset(&spinlock_stats, 0, sizeof(spinlock_stats)); 652 } 653 } 654 655 static inline void add_stats(enum kvm_contention_stat var, u32 val) 656 { 657 check_zero(); 658 spinlock_stats.contention_stats[var] += val; 659 } 660 661 662 static inline u64 spin_time_start(void) 663 { 664 return sched_clock(); 665 } 666 667 static void __spin_time_accum(u64 delta, u32 *array) 668 { 669 unsigned index; 670 671 index = ilog2(delta); 672 check_zero(); 673 674 if (index < HISTO_BUCKETS) 675 array[index]++; 676 else 677 array[HISTO_BUCKETS]++; 678 } 679 680 static inline void spin_time_accum_blocked(u64 start) 681 { 682 u32 delta; 683 684 delta = sched_clock() - start; 685 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); 686 spinlock_stats.time_blocked += delta; 687 } 688 689 static struct dentry *d_spin_debug; 690 static struct dentry *d_kvm_debug; 691 692 static struct dentry *kvm_init_debugfs(void) 693 { 694 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL); 695 if (!d_kvm_debug) 696 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); 697 698 return d_kvm_debug; 699 } 700 701 static int __init kvm_spinlock_debugfs(void) 702 { 703 struct dentry *d_kvm; 704 705 d_kvm = kvm_init_debugfs(); 706 if (d_kvm == NULL) 707 return -ENOMEM; 708 709 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm); 710 711 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); 712 713 debugfs_create_u32("taken_slow", 0444, d_spin_debug, 714 &spinlock_stats.contention_stats[TAKEN_SLOW]); 715 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, 716 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]); 717 718 debugfs_create_u32("released_slow", 0444, d_spin_debug, 719 &spinlock_stats.contention_stats[RELEASED_SLOW]); 720 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, 721 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]); 722 723 debugfs_create_u64("time_blocked", 0444, d_spin_debug, 724 &spinlock_stats.time_blocked); 725 726 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, 727 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); 728 729 return 0; 730 } 731 fs_initcall(kvm_spinlock_debugfs); 732 #else /* !CONFIG_KVM_DEBUG_FS */ 733 static inline void add_stats(enum kvm_contention_stat var, u32 val) 734 { 735 } 736 737 static inline u64 spin_time_start(void) 738 { 739 return 0; 740 } 741 742 static inline void spin_time_accum_blocked(u64 start) 743 { 744 } 745 #endif /* CONFIG_KVM_DEBUG_FS */ 746 747 struct kvm_lock_waiting { 748 struct arch_spinlock *lock; 749 __ticket_t want; 750 }; 751 752 /* cpus 'waiting' on a spinlock to become available */ 753 static cpumask_t waiting_cpus; 754 755 /* Track spinlock on which a cpu is waiting */ 756 static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting); 757 758 __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 759 { 760 struct kvm_lock_waiting *w; 761 int cpu; 762 u64 start; 763 unsigned long flags; 764 __ticket_t head; 765 766 if (in_nmi()) 767 return; 768 769 w = this_cpu_ptr(&klock_waiting); 770 cpu = smp_processor_id(); 771 start = spin_time_start(); 772 773 /* 774 * Make sure an interrupt handler can't upset things in a 775 * partially setup state. 776 */ 777 local_irq_save(flags); 778 779 /* 780 * The ordering protocol on this is that the "lock" pointer 781 * may only be set non-NULL if the "want" ticket is correct. 782 * If we're updating "want", we must first clear "lock". 783 */ 784 w->lock = NULL; 785 smp_wmb(); 786 w->want = want; 787 smp_wmb(); 788 w->lock = lock; 789 790 add_stats(TAKEN_SLOW, 1); 791 792 /* 793 * This uses set_bit, which is atomic but we should not rely on its 794 * reordering gurantees. So barrier is needed after this call. 795 */ 796 cpumask_set_cpu(cpu, &waiting_cpus); 797 798 barrier(); 799 800 /* 801 * Mark entry to slowpath before doing the pickup test to make 802 * sure we don't deadlock with an unlocker. 803 */ 804 __ticket_enter_slowpath(lock); 805 806 /* make sure enter_slowpath, which is atomic does not cross the read */ 807 smp_mb__after_atomic(); 808 809 /* 810 * check again make sure it didn't become free while 811 * we weren't looking. 812 */ 813 head = READ_ONCE(lock->tickets.head); 814 if (__tickets_equal(head, want)) { 815 add_stats(TAKEN_SLOW_PICKUP, 1); 816 goto out; 817 } 818 819 /* 820 * halt until it's our turn and kicked. Note that we do safe halt 821 * for irq enabled case to avoid hang when lock info is overwritten 822 * in irq spinlock slowpath and no spurious interrupt occur to save us. 823 */ 824 if (arch_irqs_disabled_flags(flags)) 825 halt(); 826 else 827 safe_halt(); 828 829 out: 830 cpumask_clear_cpu(cpu, &waiting_cpus); 831 w->lock = NULL; 832 local_irq_restore(flags); 833 spin_time_accum_blocked(start); 834 } 835 PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning); 836 837 /* Kick vcpu waiting on @lock->head to reach value @ticket */ 838 static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket) 839 { 840 int cpu; 841 842 add_stats(RELEASED_SLOW, 1); 843 for_each_cpu(cpu, &waiting_cpus) { 844 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); 845 if (READ_ONCE(w->lock) == lock && 846 READ_ONCE(w->want) == ticket) { 847 add_stats(RELEASED_SLOW_KICKED, 1); 848 kvm_kick_cpu(cpu); 849 break; 850 } 851 } 852 } 853 854 #endif /* !CONFIG_QUEUED_SPINLOCKS */ 855 856 /* 857 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. 858 */ 859 void __init kvm_spinlock_init(void) 860 { 861 if (!kvm_para_available()) 862 return; 863 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ 864 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 865 return; 866 867 #ifdef CONFIG_QUEUED_SPINLOCKS 868 __pv_init_lock_hash(); 869 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 870 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); 871 pv_lock_ops.wait = kvm_wait; 872 pv_lock_ops.kick = kvm_kick_cpu; 873 #else /* !CONFIG_QUEUED_SPINLOCKS */ 874 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 875 pv_lock_ops.unlock_kick = kvm_unlock_kick; 876 #endif 877 } 878 879 static __init int kvm_spinlock_init_jump(void) 880 { 881 if (!kvm_para_available()) 882 return 0; 883 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 884 return 0; 885 886 static_key_slow_inc(¶virt_ticketlocks_enabled); 887 printk(KERN_INFO "KVM setup paravirtual spinlock\n"); 888 889 return 0; 890 } 891 early_initcall(kvm_spinlock_init_jump); 892 893 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 894