xref: /openbmc/linux/arch/x86/kernel/kvm.c (revision aa3fc090)
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22 
23 #include <linux/context_tracking.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <asm/timer.h>
39 #include <asm/cpu.h>
40 #include <asm/traps.h>
41 #include <asm/desc.h>
42 #include <asm/tlbflush.h>
43 #include <asm/idle.h>
44 #include <asm/apic.h>
45 #include <asm/apicdef.h>
46 #include <asm/hypervisor.h>
47 #include <asm/kvm_guest.h>
48 
49 static int kvmapf = 1;
50 
51 static int parse_no_kvmapf(char *arg)
52 {
53         kvmapf = 0;
54         return 0;
55 }
56 
57 early_param("no-kvmapf", parse_no_kvmapf);
58 
59 static int steal_acc = 1;
60 static int parse_no_stealacc(char *arg)
61 {
62         steal_acc = 0;
63         return 0;
64 }
65 
66 early_param("no-steal-acc", parse_no_stealacc);
67 
68 static int kvmclock_vsyscall = 1;
69 static int parse_no_kvmclock_vsyscall(char *arg)
70 {
71         kvmclock_vsyscall = 0;
72         return 0;
73 }
74 
75 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
76 
77 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
78 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
79 static int has_steal_clock = 0;
80 
81 /*
82  * No need for any "IO delay" on KVM
83  */
84 static void kvm_io_delay(void)
85 {
86 }
87 
88 #define KVM_TASK_SLEEP_HASHBITS 8
89 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
90 
91 struct kvm_task_sleep_node {
92 	struct hlist_node link;
93 	wait_queue_head_t wq;
94 	u32 token;
95 	int cpu;
96 	bool halted;
97 };
98 
99 static struct kvm_task_sleep_head {
100 	spinlock_t lock;
101 	struct hlist_head list;
102 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
103 
104 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
105 						  u32 token)
106 {
107 	struct hlist_node *p;
108 
109 	hlist_for_each(p, &b->list) {
110 		struct kvm_task_sleep_node *n =
111 			hlist_entry(p, typeof(*n), link);
112 		if (n->token == token)
113 			return n;
114 	}
115 
116 	return NULL;
117 }
118 
119 void kvm_async_pf_task_wait(u32 token)
120 {
121 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
122 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
123 	struct kvm_task_sleep_node n, *e;
124 	DEFINE_WAIT(wait);
125 
126 	rcu_irq_enter();
127 
128 	spin_lock(&b->lock);
129 	e = _find_apf_task(b, token);
130 	if (e) {
131 		/* dummy entry exist -> wake up was delivered ahead of PF */
132 		hlist_del(&e->link);
133 		kfree(e);
134 		spin_unlock(&b->lock);
135 
136 		rcu_irq_exit();
137 		return;
138 	}
139 
140 	n.token = token;
141 	n.cpu = smp_processor_id();
142 	n.halted = is_idle_task(current) || preempt_count() > 1;
143 	init_waitqueue_head(&n.wq);
144 	hlist_add_head(&n.link, &b->list);
145 	spin_unlock(&b->lock);
146 
147 	for (;;) {
148 		if (!n.halted)
149 			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
150 		if (hlist_unhashed(&n.link))
151 			break;
152 
153 		if (!n.halted) {
154 			local_irq_enable();
155 			schedule();
156 			local_irq_disable();
157 		} else {
158 			/*
159 			 * We cannot reschedule. So halt.
160 			 */
161 			rcu_irq_exit();
162 			native_safe_halt();
163 			rcu_irq_enter();
164 			local_irq_disable();
165 		}
166 	}
167 	if (!n.halted)
168 		finish_wait(&n.wq, &wait);
169 
170 	rcu_irq_exit();
171 	return;
172 }
173 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
174 
175 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
176 {
177 	hlist_del_init(&n->link);
178 	if (n->halted)
179 		smp_send_reschedule(n->cpu);
180 	else if (waitqueue_active(&n->wq))
181 		wake_up(&n->wq);
182 }
183 
184 static void apf_task_wake_all(void)
185 {
186 	int i;
187 
188 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
189 		struct hlist_node *p, *next;
190 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191 		spin_lock(&b->lock);
192 		hlist_for_each_safe(p, next, &b->list) {
193 			struct kvm_task_sleep_node *n =
194 				hlist_entry(p, typeof(*n), link);
195 			if (n->cpu == smp_processor_id())
196 				apf_task_wake_one(n);
197 		}
198 		spin_unlock(&b->lock);
199 	}
200 }
201 
202 void kvm_async_pf_task_wake(u32 token)
203 {
204 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
205 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
206 	struct kvm_task_sleep_node *n;
207 
208 	if (token == ~0) {
209 		apf_task_wake_all();
210 		return;
211 	}
212 
213 again:
214 	spin_lock(&b->lock);
215 	n = _find_apf_task(b, token);
216 	if (!n) {
217 		/*
218 		 * async PF was not yet handled.
219 		 * Add dummy entry for the token.
220 		 */
221 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
222 		if (!n) {
223 			/*
224 			 * Allocation failed! Busy wait while other cpu
225 			 * handles async PF.
226 			 */
227 			spin_unlock(&b->lock);
228 			cpu_relax();
229 			goto again;
230 		}
231 		n->token = token;
232 		n->cpu = smp_processor_id();
233 		init_waitqueue_head(&n->wq);
234 		hlist_add_head(&n->link, &b->list);
235 	} else
236 		apf_task_wake_one(n);
237 	spin_unlock(&b->lock);
238 	return;
239 }
240 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
241 
242 u32 kvm_read_and_reset_pf_reason(void)
243 {
244 	u32 reason = 0;
245 
246 	if (__get_cpu_var(apf_reason).enabled) {
247 		reason = __get_cpu_var(apf_reason).reason;
248 		__get_cpu_var(apf_reason).reason = 0;
249 	}
250 
251 	return reason;
252 }
253 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
255 
256 dotraplinkage void
257 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
258 {
259 	enum ctx_state prev_state;
260 
261 	switch (kvm_read_and_reset_pf_reason()) {
262 	default:
263 		trace_do_page_fault(regs, error_code);
264 		break;
265 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
266 		/* page is swapped out by the host. */
267 		prev_state = exception_enter();
268 		exit_idle();
269 		kvm_async_pf_task_wait((u32)read_cr2());
270 		exception_exit(prev_state);
271 		break;
272 	case KVM_PV_REASON_PAGE_READY:
273 		rcu_irq_enter();
274 		exit_idle();
275 		kvm_async_pf_task_wake((u32)read_cr2());
276 		rcu_irq_exit();
277 		break;
278 	}
279 }
280 NOKPROBE_SYMBOL(do_async_page_fault);
281 
282 static void __init paravirt_ops_setup(void)
283 {
284 	pv_info.name = "KVM";
285 	pv_info.paravirt_enabled = 1;
286 
287 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
288 		pv_cpu_ops.io_delay = kvm_io_delay;
289 
290 #ifdef CONFIG_X86_IO_APIC
291 	no_timer_check = 1;
292 #endif
293 }
294 
295 static void kvm_register_steal_time(void)
296 {
297 	int cpu = smp_processor_id();
298 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
299 
300 	if (!has_steal_clock)
301 		return;
302 
303 	memset(st, 0, sizeof(*st));
304 
305 	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
306 	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
307 		cpu, (unsigned long long) slow_virt_to_phys(st));
308 }
309 
310 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
311 
312 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
313 {
314 	/**
315 	 * This relies on __test_and_clear_bit to modify the memory
316 	 * in a way that is atomic with respect to the local CPU.
317 	 * The hypervisor only accesses this memory from the local CPU so
318 	 * there's no need for lock or memory barriers.
319 	 * An optimization barrier is implied in apic write.
320 	 */
321 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
322 		return;
323 	apic_write(APIC_EOI, APIC_EOI_ACK);
324 }
325 
326 void kvm_guest_cpu_init(void)
327 {
328 	if (!kvm_para_available())
329 		return;
330 
331 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
332 		u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
333 
334 #ifdef CONFIG_PREEMPT
335 		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
336 #endif
337 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
338 		__get_cpu_var(apf_reason).enabled = 1;
339 		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
340 		       smp_processor_id());
341 	}
342 
343 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
344 		unsigned long pa;
345 		/* Size alignment is implied but just to make it explicit. */
346 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
347 		__get_cpu_var(kvm_apic_eoi) = 0;
348 		pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
349 			| KVM_MSR_ENABLED;
350 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
351 	}
352 
353 	if (has_steal_clock)
354 		kvm_register_steal_time();
355 }
356 
357 static void kvm_pv_disable_apf(void)
358 {
359 	if (!__get_cpu_var(apf_reason).enabled)
360 		return;
361 
362 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
363 	__get_cpu_var(apf_reason).enabled = 0;
364 
365 	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
366 	       smp_processor_id());
367 }
368 
369 static void kvm_pv_guest_cpu_reboot(void *unused)
370 {
371 	/*
372 	 * We disable PV EOI before we load a new kernel by kexec,
373 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
374 	 * New kernel can re-enable when it boots.
375 	 */
376 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
377 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
378 	kvm_pv_disable_apf();
379 	kvm_disable_steal_time();
380 }
381 
382 static int kvm_pv_reboot_notify(struct notifier_block *nb,
383 				unsigned long code, void *unused)
384 {
385 	if (code == SYS_RESTART)
386 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
387 	return NOTIFY_DONE;
388 }
389 
390 static struct notifier_block kvm_pv_reboot_nb = {
391 	.notifier_call = kvm_pv_reboot_notify,
392 };
393 
394 static u64 kvm_steal_clock(int cpu)
395 {
396 	u64 steal;
397 	struct kvm_steal_time *src;
398 	int version;
399 
400 	src = &per_cpu(steal_time, cpu);
401 	do {
402 		version = src->version;
403 		rmb();
404 		steal = src->steal;
405 		rmb();
406 	} while ((version & 1) || (version != src->version));
407 
408 	return steal;
409 }
410 
411 void kvm_disable_steal_time(void)
412 {
413 	if (!has_steal_clock)
414 		return;
415 
416 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
417 }
418 
419 #ifdef CONFIG_SMP
420 static void __init kvm_smp_prepare_boot_cpu(void)
421 {
422 	kvm_guest_cpu_init();
423 	native_smp_prepare_boot_cpu();
424 	kvm_spinlock_init();
425 }
426 
427 static void kvm_guest_cpu_online(void *dummy)
428 {
429 	kvm_guest_cpu_init();
430 }
431 
432 static void kvm_guest_cpu_offline(void *dummy)
433 {
434 	kvm_disable_steal_time();
435 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
436 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
437 	kvm_pv_disable_apf();
438 	apf_task_wake_all();
439 }
440 
441 static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
442 			  void *hcpu)
443 {
444 	int cpu = (unsigned long)hcpu;
445 	switch (action) {
446 	case CPU_ONLINE:
447 	case CPU_DOWN_FAILED:
448 	case CPU_ONLINE_FROZEN:
449 		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
450 		break;
451 	case CPU_DOWN_PREPARE:
452 	case CPU_DOWN_PREPARE_FROZEN:
453 		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
454 		break;
455 	default:
456 		break;
457 	}
458 	return NOTIFY_OK;
459 }
460 
461 static struct notifier_block kvm_cpu_notifier = {
462         .notifier_call  = kvm_cpu_notify,
463 };
464 #endif
465 
466 static void __init kvm_apf_trap_init(void)
467 {
468 	set_intr_gate(14, async_page_fault);
469 }
470 
471 void __init kvm_guest_init(void)
472 {
473 	int i;
474 
475 	if (!kvm_para_available())
476 		return;
477 
478 	paravirt_ops_setup();
479 	register_reboot_notifier(&kvm_pv_reboot_nb);
480 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
481 		spin_lock_init(&async_pf_sleepers[i].lock);
482 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
483 		x86_init.irqs.trap_init = kvm_apf_trap_init;
484 
485 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
486 		has_steal_clock = 1;
487 		pv_time_ops.steal_clock = kvm_steal_clock;
488 	}
489 
490 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
491 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
492 
493 	if (kvmclock_vsyscall)
494 		kvm_setup_vsyscall_timeinfo();
495 
496 #ifdef CONFIG_SMP
497 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
498 	register_cpu_notifier(&kvm_cpu_notifier);
499 #else
500 	kvm_guest_cpu_init();
501 #endif
502 }
503 
504 static noinline uint32_t __kvm_cpuid_base(void)
505 {
506 	if (boot_cpu_data.cpuid_level < 0)
507 		return 0;	/* So we don't blow up on old processors */
508 
509 	if (cpu_has_hypervisor)
510 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
511 
512 	return 0;
513 }
514 
515 static inline uint32_t kvm_cpuid_base(void)
516 {
517 	static int kvm_cpuid_base = -1;
518 
519 	if (kvm_cpuid_base == -1)
520 		kvm_cpuid_base = __kvm_cpuid_base();
521 
522 	return kvm_cpuid_base;
523 }
524 
525 bool kvm_para_available(void)
526 {
527 	return kvm_cpuid_base() != 0;
528 }
529 EXPORT_SYMBOL_GPL(kvm_para_available);
530 
531 unsigned int kvm_arch_para_features(void)
532 {
533 	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
534 }
535 
536 static uint32_t __init kvm_detect(void)
537 {
538 	return kvm_cpuid_base();
539 }
540 
541 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
542 	.name			= "KVM",
543 	.detect			= kvm_detect,
544 	.x2apic_available	= kvm_para_available,
545 };
546 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
547 
548 static __init int activate_jump_labels(void)
549 {
550 	if (has_steal_clock) {
551 		static_key_slow_inc(&paravirt_steal_enabled);
552 		if (steal_acc)
553 			static_key_slow_inc(&paravirt_steal_rq_enabled);
554 	}
555 
556 	return 0;
557 }
558 arch_initcall(activate_jump_labels);
559 
560 #ifdef CONFIG_PARAVIRT_SPINLOCKS
561 
562 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
563 static void kvm_kick_cpu(int cpu)
564 {
565 	int apicid;
566 	unsigned long flags = 0;
567 
568 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
569 	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
570 }
571 
572 enum kvm_contention_stat {
573 	TAKEN_SLOW,
574 	TAKEN_SLOW_PICKUP,
575 	RELEASED_SLOW,
576 	RELEASED_SLOW_KICKED,
577 	NR_CONTENTION_STATS
578 };
579 
580 #ifdef CONFIG_KVM_DEBUG_FS
581 #define HISTO_BUCKETS	30
582 
583 static struct kvm_spinlock_stats
584 {
585 	u32 contention_stats[NR_CONTENTION_STATS];
586 	u32 histo_spin_blocked[HISTO_BUCKETS+1];
587 	u64 time_blocked;
588 } spinlock_stats;
589 
590 static u8 zero_stats;
591 
592 static inline void check_zero(void)
593 {
594 	u8 ret;
595 	u8 old;
596 
597 	old = ACCESS_ONCE(zero_stats);
598 	if (unlikely(old)) {
599 		ret = cmpxchg(&zero_stats, old, 0);
600 		/* This ensures only one fellow resets the stat */
601 		if (ret == old)
602 			memset(&spinlock_stats, 0, sizeof(spinlock_stats));
603 	}
604 }
605 
606 static inline void add_stats(enum kvm_contention_stat var, u32 val)
607 {
608 	check_zero();
609 	spinlock_stats.contention_stats[var] += val;
610 }
611 
612 
613 static inline u64 spin_time_start(void)
614 {
615 	return sched_clock();
616 }
617 
618 static void __spin_time_accum(u64 delta, u32 *array)
619 {
620 	unsigned index;
621 
622 	index = ilog2(delta);
623 	check_zero();
624 
625 	if (index < HISTO_BUCKETS)
626 		array[index]++;
627 	else
628 		array[HISTO_BUCKETS]++;
629 }
630 
631 static inline void spin_time_accum_blocked(u64 start)
632 {
633 	u32 delta;
634 
635 	delta = sched_clock() - start;
636 	__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
637 	spinlock_stats.time_blocked += delta;
638 }
639 
640 static struct dentry *d_spin_debug;
641 static struct dentry *d_kvm_debug;
642 
643 struct dentry *kvm_init_debugfs(void)
644 {
645 	d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
646 	if (!d_kvm_debug)
647 		printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
648 
649 	return d_kvm_debug;
650 }
651 
652 static int __init kvm_spinlock_debugfs(void)
653 {
654 	struct dentry *d_kvm;
655 
656 	d_kvm = kvm_init_debugfs();
657 	if (d_kvm == NULL)
658 		return -ENOMEM;
659 
660 	d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
661 
662 	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
663 
664 	debugfs_create_u32("taken_slow", 0444, d_spin_debug,
665 		   &spinlock_stats.contention_stats[TAKEN_SLOW]);
666 	debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
667 		   &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
668 
669 	debugfs_create_u32("released_slow", 0444, d_spin_debug,
670 		   &spinlock_stats.contention_stats[RELEASED_SLOW]);
671 	debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
672 		   &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
673 
674 	debugfs_create_u64("time_blocked", 0444, d_spin_debug,
675 			   &spinlock_stats.time_blocked);
676 
677 	debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
678 		     spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
679 
680 	return 0;
681 }
682 fs_initcall(kvm_spinlock_debugfs);
683 #else  /* !CONFIG_KVM_DEBUG_FS */
684 static inline void add_stats(enum kvm_contention_stat var, u32 val)
685 {
686 }
687 
688 static inline u64 spin_time_start(void)
689 {
690 	return 0;
691 }
692 
693 static inline void spin_time_accum_blocked(u64 start)
694 {
695 }
696 #endif  /* CONFIG_KVM_DEBUG_FS */
697 
698 struct kvm_lock_waiting {
699 	struct arch_spinlock *lock;
700 	__ticket_t want;
701 };
702 
703 /* cpus 'waiting' on a spinlock to become available */
704 static cpumask_t waiting_cpus;
705 
706 /* Track spinlock on which a cpu is waiting */
707 static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
708 
709 __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
710 {
711 	struct kvm_lock_waiting *w;
712 	int cpu;
713 	u64 start;
714 	unsigned long flags;
715 
716 	if (in_nmi())
717 		return;
718 
719 	w = &__get_cpu_var(klock_waiting);
720 	cpu = smp_processor_id();
721 	start = spin_time_start();
722 
723 	/*
724 	 * Make sure an interrupt handler can't upset things in a
725 	 * partially setup state.
726 	 */
727 	local_irq_save(flags);
728 
729 	/*
730 	 * The ordering protocol on this is that the "lock" pointer
731 	 * may only be set non-NULL if the "want" ticket is correct.
732 	 * If we're updating "want", we must first clear "lock".
733 	 */
734 	w->lock = NULL;
735 	smp_wmb();
736 	w->want = want;
737 	smp_wmb();
738 	w->lock = lock;
739 
740 	add_stats(TAKEN_SLOW, 1);
741 
742 	/*
743 	 * This uses set_bit, which is atomic but we should not rely on its
744 	 * reordering gurantees. So barrier is needed after this call.
745 	 */
746 	cpumask_set_cpu(cpu, &waiting_cpus);
747 
748 	barrier();
749 
750 	/*
751 	 * Mark entry to slowpath before doing the pickup test to make
752 	 * sure we don't deadlock with an unlocker.
753 	 */
754 	__ticket_enter_slowpath(lock);
755 
756 	/*
757 	 * check again make sure it didn't become free while
758 	 * we weren't looking.
759 	 */
760 	if (ACCESS_ONCE(lock->tickets.head) == want) {
761 		add_stats(TAKEN_SLOW_PICKUP, 1);
762 		goto out;
763 	}
764 
765 	/*
766 	 * halt until it's our turn and kicked. Note that we do safe halt
767 	 * for irq enabled case to avoid hang when lock info is overwritten
768 	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
769 	 */
770 	if (arch_irqs_disabled_flags(flags))
771 		halt();
772 	else
773 		safe_halt();
774 
775 out:
776 	cpumask_clear_cpu(cpu, &waiting_cpus);
777 	w->lock = NULL;
778 	local_irq_restore(flags);
779 	spin_time_accum_blocked(start);
780 }
781 PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
782 
783 /* Kick vcpu waiting on @lock->head to reach value @ticket */
784 static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
785 {
786 	int cpu;
787 
788 	add_stats(RELEASED_SLOW, 1);
789 	for_each_cpu(cpu, &waiting_cpus) {
790 		const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
791 		if (ACCESS_ONCE(w->lock) == lock &&
792 		    ACCESS_ONCE(w->want) == ticket) {
793 			add_stats(RELEASED_SLOW_KICKED, 1);
794 			kvm_kick_cpu(cpu);
795 			break;
796 		}
797 	}
798 }
799 
800 /*
801  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
802  */
803 void __init kvm_spinlock_init(void)
804 {
805 	if (!kvm_para_available())
806 		return;
807 	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
808 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
809 		return;
810 
811 	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
812 	pv_lock_ops.unlock_kick = kvm_unlock_kick;
813 }
814 
815 static __init int kvm_spinlock_init_jump(void)
816 {
817 	if (!kvm_para_available())
818 		return 0;
819 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
820 		return 0;
821 
822 	static_key_slow_inc(&paravirt_ticketlocks_enabled);
823 	printk(KERN_INFO "KVM setup paravirtual spinlock\n");
824 
825 	return 0;
826 }
827 early_initcall(kvm_spinlock_init_jump);
828 
829 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
830