xref: /openbmc/linux/arch/x86/kernel/kvm.c (revision 35f752be)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * KVM paravirt_ops implementation
4  *
5  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  * Copyright IBM Corporation, 2007
7  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
8  */
9 
10 #define pr_fmt(fmt) "kvm-guest: " fmt
11 
12 #include <linux/context_tracking.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/kvm_para.h>
17 #include <linux/cpu.h>
18 #include <linux/mm.h>
19 #include <linux/highmem.h>
20 #include <linux/hardirq.h>
21 #include <linux/notifier.h>
22 #include <linux/reboot.h>
23 #include <linux/hash.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/kprobes.h>
27 #include <linux/nmi.h>
28 #include <linux/swait.h>
29 #include <asm/timer.h>
30 #include <asm/cpu.h>
31 #include <asm/traps.h>
32 #include <asm/desc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/apic.h>
35 #include <asm/apicdef.h>
36 #include <asm/hypervisor.h>
37 #include <asm/tlb.h>
38 #include <asm/cpuidle_haltpoll.h>
39 #include <asm/ptrace.h>
40 #include <asm/svm.h>
41 
42 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
43 
44 static int kvmapf = 1;
45 
46 static int __init parse_no_kvmapf(char *arg)
47 {
48         kvmapf = 0;
49         return 0;
50 }
51 
52 early_param("no-kvmapf", parse_no_kvmapf);
53 
54 static int steal_acc = 1;
55 static int __init parse_no_stealacc(char *arg)
56 {
57         steal_acc = 0;
58         return 0;
59 }
60 
61 early_param("no-steal-acc", parse_no_stealacc);
62 
63 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
64 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
65 static int has_steal_clock = 0;
66 
67 /*
68  * No need for any "IO delay" on KVM
69  */
70 static void kvm_io_delay(void)
71 {
72 }
73 
74 #define KVM_TASK_SLEEP_HASHBITS 8
75 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
76 
77 struct kvm_task_sleep_node {
78 	struct hlist_node link;
79 	struct swait_queue_head wq;
80 	u32 token;
81 	int cpu;
82 };
83 
84 static struct kvm_task_sleep_head {
85 	raw_spinlock_t lock;
86 	struct hlist_head list;
87 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
88 
89 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
90 						  u32 token)
91 {
92 	struct hlist_node *p;
93 
94 	hlist_for_each(p, &b->list) {
95 		struct kvm_task_sleep_node *n =
96 			hlist_entry(p, typeof(*n), link);
97 		if (n->token == token)
98 			return n;
99 	}
100 
101 	return NULL;
102 }
103 
104 static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
105 {
106 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
107 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
108 	struct kvm_task_sleep_node *e;
109 
110 	raw_spin_lock(&b->lock);
111 	e = _find_apf_task(b, token);
112 	if (e) {
113 		/* dummy entry exist -> wake up was delivered ahead of PF */
114 		hlist_del(&e->link);
115 		raw_spin_unlock(&b->lock);
116 		kfree(e);
117 		return false;
118 	}
119 
120 	n->token = token;
121 	n->cpu = smp_processor_id();
122 	init_swait_queue_head(&n->wq);
123 	hlist_add_head(&n->link, &b->list);
124 	raw_spin_unlock(&b->lock);
125 	return true;
126 }
127 
128 /*
129  * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
130  * @token:	Token to identify the sleep node entry
131  *
132  * Invoked from the async pagefault handling code or from the VM exit page
133  * fault handler. In both cases RCU is watching.
134  */
135 void kvm_async_pf_task_wait_schedule(u32 token)
136 {
137 	struct kvm_task_sleep_node n;
138 	DECLARE_SWAITQUEUE(wait);
139 
140 	lockdep_assert_irqs_disabled();
141 
142 	if (!kvm_async_pf_queue_task(token, &n))
143 		return;
144 
145 	for (;;) {
146 		prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
147 		if (hlist_unhashed(&n.link))
148 			break;
149 
150 		local_irq_enable();
151 		schedule();
152 		local_irq_disable();
153 	}
154 	finish_swait(&n.wq, &wait);
155 }
156 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
157 
158 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
159 {
160 	hlist_del_init(&n->link);
161 	if (swq_has_sleeper(&n->wq))
162 		swake_up_one(&n->wq);
163 }
164 
165 static void apf_task_wake_all(void)
166 {
167 	int i;
168 
169 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
170 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
171 		struct kvm_task_sleep_node *n;
172 		struct hlist_node *p, *next;
173 
174 		raw_spin_lock(&b->lock);
175 		hlist_for_each_safe(p, next, &b->list) {
176 			n = hlist_entry(p, typeof(*n), link);
177 			if (n->cpu == smp_processor_id())
178 				apf_task_wake_one(n);
179 		}
180 		raw_spin_unlock(&b->lock);
181 	}
182 }
183 
184 void kvm_async_pf_task_wake(u32 token)
185 {
186 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
187 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
188 	struct kvm_task_sleep_node *n;
189 
190 	if (token == ~0) {
191 		apf_task_wake_all();
192 		return;
193 	}
194 
195 again:
196 	raw_spin_lock(&b->lock);
197 	n = _find_apf_task(b, token);
198 	if (!n) {
199 		/*
200 		 * async PF was not yet handled.
201 		 * Add dummy entry for the token.
202 		 */
203 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
204 		if (!n) {
205 			/*
206 			 * Allocation failed! Busy wait while other cpu
207 			 * handles async PF.
208 			 */
209 			raw_spin_unlock(&b->lock);
210 			cpu_relax();
211 			goto again;
212 		}
213 		n->token = token;
214 		n->cpu = smp_processor_id();
215 		init_swait_queue_head(&n->wq);
216 		hlist_add_head(&n->link, &b->list);
217 	} else {
218 		apf_task_wake_one(n);
219 	}
220 	raw_spin_unlock(&b->lock);
221 	return;
222 }
223 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
224 
225 noinstr u32 kvm_read_and_reset_apf_flags(void)
226 {
227 	u32 flags = 0;
228 
229 	if (__this_cpu_read(apf_reason.enabled)) {
230 		flags = __this_cpu_read(apf_reason.flags);
231 		__this_cpu_write(apf_reason.flags, 0);
232 	}
233 
234 	return flags;
235 }
236 EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
237 
238 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
239 {
240 	u32 flags = kvm_read_and_reset_apf_flags();
241 	irqentry_state_t state;
242 
243 	if (!flags)
244 		return false;
245 
246 	state = irqentry_enter(regs);
247 	instrumentation_begin();
248 
249 	/*
250 	 * If the host managed to inject an async #PF into an interrupt
251 	 * disabled region, then die hard as this is not going to end well
252 	 * and the host side is seriously broken.
253 	 */
254 	if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
255 		panic("Host injected async #PF in interrupt disabled region\n");
256 
257 	if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
258 		if (unlikely(!(user_mode(regs))))
259 			panic("Host injected async #PF in kernel mode\n");
260 		/* Page is swapped out by the host. */
261 		kvm_async_pf_task_wait_schedule(token);
262 	} else {
263 		WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
264 	}
265 
266 	instrumentation_end();
267 	irqentry_exit(regs, state);
268 	return true;
269 }
270 
271 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
272 {
273 	struct pt_regs *old_regs = set_irq_regs(regs);
274 	u32 token;
275 
276 	ack_APIC_irq();
277 
278 	inc_irq_stat(irq_hv_callback_count);
279 
280 	if (__this_cpu_read(apf_reason.enabled)) {
281 		token = __this_cpu_read(apf_reason.token);
282 		kvm_async_pf_task_wake(token);
283 		__this_cpu_write(apf_reason.token, 0);
284 		wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
285 	}
286 
287 	set_irq_regs(old_regs);
288 }
289 
290 static void __init paravirt_ops_setup(void)
291 {
292 	pv_info.name = "KVM";
293 
294 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
295 		pv_ops.cpu.io_delay = kvm_io_delay;
296 
297 #ifdef CONFIG_X86_IO_APIC
298 	no_timer_check = 1;
299 #endif
300 }
301 
302 static void kvm_register_steal_time(void)
303 {
304 	int cpu = smp_processor_id();
305 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
306 
307 	if (!has_steal_clock)
308 		return;
309 
310 	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
311 	pr_info("stealtime: cpu %d, msr %llx\n", cpu,
312 		(unsigned long long) slow_virt_to_phys(st));
313 }
314 
315 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
316 
317 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
318 {
319 	/**
320 	 * This relies on __test_and_clear_bit to modify the memory
321 	 * in a way that is atomic with respect to the local CPU.
322 	 * The hypervisor only accesses this memory from the local CPU so
323 	 * there's no need for lock or memory barriers.
324 	 * An optimization barrier is implied in apic write.
325 	 */
326 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
327 		return;
328 	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
329 }
330 
331 static void kvm_guest_cpu_init(void)
332 {
333 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
334 		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
335 
336 		WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
337 
338 		pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
339 		pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
340 
341 		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
342 			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
343 
344 		wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
345 
346 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
347 		__this_cpu_write(apf_reason.enabled, 1);
348 		pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
349 	}
350 
351 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
352 		unsigned long pa;
353 
354 		/* Size alignment is implied but just to make it explicit. */
355 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
356 		__this_cpu_write(kvm_apic_eoi, 0);
357 		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
358 			| KVM_MSR_ENABLED;
359 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
360 	}
361 
362 	if (has_steal_clock)
363 		kvm_register_steal_time();
364 }
365 
366 static void kvm_pv_disable_apf(void)
367 {
368 	if (!__this_cpu_read(apf_reason.enabled))
369 		return;
370 
371 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
372 	__this_cpu_write(apf_reason.enabled, 0);
373 
374 	pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
375 }
376 
377 static void kvm_pv_guest_cpu_reboot(void *unused)
378 {
379 	/*
380 	 * We disable PV EOI before we load a new kernel by kexec,
381 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
382 	 * New kernel can re-enable when it boots.
383 	 */
384 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
385 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
386 	kvm_pv_disable_apf();
387 	kvm_disable_steal_time();
388 }
389 
390 static int kvm_pv_reboot_notify(struct notifier_block *nb,
391 				unsigned long code, void *unused)
392 {
393 	if (code == SYS_RESTART)
394 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
395 	return NOTIFY_DONE;
396 }
397 
398 static struct notifier_block kvm_pv_reboot_nb = {
399 	.notifier_call = kvm_pv_reboot_notify,
400 };
401 
402 static u64 kvm_steal_clock(int cpu)
403 {
404 	u64 steal;
405 	struct kvm_steal_time *src;
406 	int version;
407 
408 	src = &per_cpu(steal_time, cpu);
409 	do {
410 		version = src->version;
411 		virt_rmb();
412 		steal = src->steal;
413 		virt_rmb();
414 	} while ((version & 1) || (version != src->version));
415 
416 	return steal;
417 }
418 
419 void kvm_disable_steal_time(void)
420 {
421 	if (!has_steal_clock)
422 		return;
423 
424 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
425 }
426 
427 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
428 {
429 	early_set_memory_decrypted((unsigned long) ptr, size);
430 }
431 
432 /*
433  * Iterate through all possible CPUs and map the memory region pointed
434  * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
435  *
436  * Note: we iterate through all possible CPUs to ensure that CPUs
437  * hotplugged will have their per-cpu variable already mapped as
438  * decrypted.
439  */
440 static void __init sev_map_percpu_data(void)
441 {
442 	int cpu;
443 
444 	if (!sev_active())
445 		return;
446 
447 	for_each_possible_cpu(cpu) {
448 		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
449 		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
450 		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
451 	}
452 }
453 
454 #ifdef CONFIG_SMP
455 
456 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
457 
458 static bool pv_tlb_flush_supported(void)
459 {
460 	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
461 		!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
462 		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
463 }
464 
465 static bool pv_ipi_supported(void)
466 {
467 	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
468 }
469 
470 static bool pv_sched_yield_supported(void)
471 {
472 	return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
473 		!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
474 	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
475 }
476 
477 #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
478 
479 static void __send_ipi_mask(const struct cpumask *mask, int vector)
480 {
481 	unsigned long flags;
482 	int cpu, apic_id, icr;
483 	int min = 0, max = 0;
484 #ifdef CONFIG_X86_64
485 	__uint128_t ipi_bitmap = 0;
486 #else
487 	u64 ipi_bitmap = 0;
488 #endif
489 	long ret;
490 
491 	if (cpumask_empty(mask))
492 		return;
493 
494 	local_irq_save(flags);
495 
496 	switch (vector) {
497 	default:
498 		icr = APIC_DM_FIXED | vector;
499 		break;
500 	case NMI_VECTOR:
501 		icr = APIC_DM_NMI;
502 		break;
503 	}
504 
505 	for_each_cpu(cpu, mask) {
506 		apic_id = per_cpu(x86_cpu_to_apicid, cpu);
507 		if (!ipi_bitmap) {
508 			min = max = apic_id;
509 		} else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
510 			ipi_bitmap <<= min - apic_id;
511 			min = apic_id;
512 		} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
513 			max = apic_id < max ? max : apic_id;
514 		} else {
515 			ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
516 				(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
517 			WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
518 				  ret);
519 			min = max = apic_id;
520 			ipi_bitmap = 0;
521 		}
522 		__set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
523 	}
524 
525 	if (ipi_bitmap) {
526 		ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
527 			(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
528 		WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
529 			  ret);
530 	}
531 
532 	local_irq_restore(flags);
533 }
534 
535 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
536 {
537 	__send_ipi_mask(mask, vector);
538 }
539 
540 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
541 {
542 	unsigned int this_cpu = smp_processor_id();
543 	struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
544 	const struct cpumask *local_mask;
545 
546 	cpumask_copy(new_mask, mask);
547 	cpumask_clear_cpu(this_cpu, new_mask);
548 	local_mask = new_mask;
549 	__send_ipi_mask(local_mask, vector);
550 }
551 
552 /*
553  * Set the IPI entry points
554  */
555 static void kvm_setup_pv_ipi(void)
556 {
557 	apic->send_IPI_mask = kvm_send_ipi_mask;
558 	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
559 	pr_info("setup PV IPIs\n");
560 }
561 
562 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
563 {
564 	int cpu;
565 
566 	native_send_call_func_ipi(mask);
567 
568 	/* Make sure other vCPUs get a chance to run if they need to. */
569 	for_each_cpu(cpu, mask) {
570 		if (vcpu_is_preempted(cpu)) {
571 			kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
572 			break;
573 		}
574 	}
575 }
576 
577 static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
578 			const struct flush_tlb_info *info)
579 {
580 	u8 state;
581 	int cpu;
582 	struct kvm_steal_time *src;
583 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
584 
585 	cpumask_copy(flushmask, cpumask);
586 	/*
587 	 * We have to call flush only on online vCPUs. And
588 	 * queue flush_on_enter for pre-empted vCPUs
589 	 */
590 	for_each_cpu(cpu, flushmask) {
591 		/*
592 		 * The local vCPU is never preempted, so we do not explicitly
593 		 * skip check for local vCPU - it will never be cleared from
594 		 * flushmask.
595 		 */
596 		src = &per_cpu(steal_time, cpu);
597 		state = READ_ONCE(src->preempted);
598 		if ((state & KVM_VCPU_PREEMPTED)) {
599 			if (try_cmpxchg(&src->preempted, &state,
600 					state | KVM_VCPU_FLUSH_TLB))
601 				__cpumask_clear_cpu(cpu, flushmask);
602 		}
603 	}
604 
605 	native_flush_tlb_multi(flushmask, info);
606 }
607 
608 static __init int kvm_alloc_cpumask(void)
609 {
610 	int cpu;
611 
612 	if (!kvm_para_available() || nopv)
613 		return 0;
614 
615 	if (pv_tlb_flush_supported() || pv_ipi_supported())
616 		for_each_possible_cpu(cpu) {
617 			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
618 				GFP_KERNEL, cpu_to_node(cpu));
619 		}
620 
621 	return 0;
622 }
623 arch_initcall(kvm_alloc_cpumask);
624 
625 static void __init kvm_smp_prepare_boot_cpu(void)
626 {
627 	/*
628 	 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
629 	 * shares the guest physical address with the hypervisor.
630 	 */
631 	sev_map_percpu_data();
632 
633 	kvm_guest_cpu_init();
634 	native_smp_prepare_boot_cpu();
635 	kvm_spinlock_init();
636 }
637 
638 static void kvm_guest_cpu_offline(void)
639 {
640 	kvm_disable_steal_time();
641 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
642 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
643 	kvm_pv_disable_apf();
644 	apf_task_wake_all();
645 }
646 
647 static int kvm_cpu_online(unsigned int cpu)
648 {
649 	local_irq_disable();
650 	kvm_guest_cpu_init();
651 	local_irq_enable();
652 	return 0;
653 }
654 
655 static int kvm_cpu_down_prepare(unsigned int cpu)
656 {
657 	local_irq_disable();
658 	kvm_guest_cpu_offline();
659 	local_irq_enable();
660 	return 0;
661 }
662 
663 #endif
664 
665 static void __init kvm_guest_init(void)
666 {
667 	int i;
668 
669 	paravirt_ops_setup();
670 	register_reboot_notifier(&kvm_pv_reboot_nb);
671 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
672 		raw_spin_lock_init(&async_pf_sleepers[i].lock);
673 
674 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
675 		has_steal_clock = 1;
676 		static_call_update(pv_steal_clock, kvm_steal_clock);
677 	}
678 
679 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
680 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
681 
682 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
683 		static_branch_enable(&kvm_async_pf_enabled);
684 		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
685 	}
686 
687 #ifdef CONFIG_SMP
688 	if (pv_tlb_flush_supported()) {
689 		pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
690 		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
691 		pr_info("KVM setup pv remote TLB flush\n");
692 	}
693 
694 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
695 	if (pv_sched_yield_supported()) {
696 		smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
697 		pr_info("setup PV sched yield\n");
698 	}
699 	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
700 				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
701 		pr_err("failed to install cpu hotplug callbacks\n");
702 #else
703 	sev_map_percpu_data();
704 	kvm_guest_cpu_init();
705 #endif
706 
707 	/*
708 	 * Hard lockup detection is enabled by default. Disable it, as guests
709 	 * can get false positives too easily, for example if the host is
710 	 * overcommitted.
711 	 */
712 	hardlockup_detector_disable();
713 }
714 
715 static noinline uint32_t __kvm_cpuid_base(void)
716 {
717 	if (boot_cpu_data.cpuid_level < 0)
718 		return 0;	/* So we don't blow up on old processors */
719 
720 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
721 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
722 
723 	return 0;
724 }
725 
726 static inline uint32_t kvm_cpuid_base(void)
727 {
728 	static int kvm_cpuid_base = -1;
729 
730 	if (kvm_cpuid_base == -1)
731 		kvm_cpuid_base = __kvm_cpuid_base();
732 
733 	return kvm_cpuid_base;
734 }
735 
736 bool kvm_para_available(void)
737 {
738 	return kvm_cpuid_base() != 0;
739 }
740 EXPORT_SYMBOL_GPL(kvm_para_available);
741 
742 unsigned int kvm_arch_para_features(void)
743 {
744 	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
745 }
746 
747 unsigned int kvm_arch_para_hints(void)
748 {
749 	return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
750 }
751 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
752 
753 static uint32_t __init kvm_detect(void)
754 {
755 	return kvm_cpuid_base();
756 }
757 
758 static void __init kvm_apic_init(void)
759 {
760 #ifdef CONFIG_SMP
761 	if (pv_ipi_supported())
762 		kvm_setup_pv_ipi();
763 #endif
764 }
765 
766 static bool __init kvm_msi_ext_dest_id(void)
767 {
768 	return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
769 }
770 
771 static void __init kvm_init_platform(void)
772 {
773 	kvmclock_init();
774 	x86_platform.apic_post_init = kvm_apic_init;
775 }
776 
777 #if defined(CONFIG_AMD_MEM_ENCRYPT)
778 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
779 {
780 	/* RAX and CPL are already in the GHCB */
781 	ghcb_set_rbx(ghcb, regs->bx);
782 	ghcb_set_rcx(ghcb, regs->cx);
783 	ghcb_set_rdx(ghcb, regs->dx);
784 	ghcb_set_rsi(ghcb, regs->si);
785 }
786 
787 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
788 {
789 	/* No checking of the return state needed */
790 	return true;
791 }
792 #endif
793 
794 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
795 	.name				= "KVM",
796 	.detect				= kvm_detect,
797 	.type				= X86_HYPER_KVM,
798 	.init.guest_late_init		= kvm_guest_init,
799 	.init.x2apic_available		= kvm_para_available,
800 	.init.msi_ext_dest_id		= kvm_msi_ext_dest_id,
801 	.init.init_platform		= kvm_init_platform,
802 #if defined(CONFIG_AMD_MEM_ENCRYPT)
803 	.runtime.sev_es_hcall_prepare	= kvm_sev_es_hcall_prepare,
804 	.runtime.sev_es_hcall_finish	= kvm_sev_es_hcall_finish,
805 #endif
806 };
807 
808 static __init int activate_jump_labels(void)
809 {
810 	if (has_steal_clock) {
811 		static_key_slow_inc(&paravirt_steal_enabled);
812 		if (steal_acc)
813 			static_key_slow_inc(&paravirt_steal_rq_enabled);
814 	}
815 
816 	return 0;
817 }
818 arch_initcall(activate_jump_labels);
819 
820 #ifdef CONFIG_PARAVIRT_SPINLOCKS
821 
822 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
823 static void kvm_kick_cpu(int cpu)
824 {
825 	int apicid;
826 	unsigned long flags = 0;
827 
828 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
829 	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
830 }
831 
832 #include <asm/qspinlock.h>
833 
834 static void kvm_wait(u8 *ptr, u8 val)
835 {
836 	if (in_nmi())
837 		return;
838 
839 	/*
840 	 * halt until it's our turn and kicked. Note that we do safe halt
841 	 * for irq enabled case to avoid hang when lock info is overwritten
842 	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
843 	 */
844 	if (irqs_disabled()) {
845 		if (READ_ONCE(*ptr) == val)
846 			halt();
847 	} else {
848 		local_irq_disable();
849 
850 		if (READ_ONCE(*ptr) == val)
851 			safe_halt();
852 
853 		local_irq_enable();
854 	}
855 }
856 
857 #ifdef CONFIG_X86_32
858 __visible bool __kvm_vcpu_is_preempted(long cpu)
859 {
860 	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
861 
862 	return !!(src->preempted & KVM_VCPU_PREEMPTED);
863 }
864 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
865 
866 #else
867 
868 #include <asm/asm-offsets.h>
869 
870 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
871 
872 /*
873  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
874  * restoring to/from the stack.
875  */
876 asm(
877 ".pushsection .text;"
878 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
879 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
880 "__raw_callee_save___kvm_vcpu_is_preempted:"
881 "movq	__per_cpu_offset(,%rdi,8), %rax;"
882 "cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
883 "setne	%al;"
884 "ret;"
885 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
886 ".popsection");
887 
888 #endif
889 
890 /*
891  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
892  */
893 void __init kvm_spinlock_init(void)
894 {
895 	/*
896 	 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
897 	 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
898 	 * preferred over native qspinlock when vCPU is preempted.
899 	 */
900 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
901 		pr_info("PV spinlocks disabled, no host support\n");
902 		return;
903 	}
904 
905 	/*
906 	 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
907 	 * are available.
908 	 */
909 	if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
910 		pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
911 		goto out;
912 	}
913 
914 	if (num_possible_cpus() == 1) {
915 		pr_info("PV spinlocks disabled, single CPU\n");
916 		goto out;
917 	}
918 
919 	if (nopvspin) {
920 		pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
921 		goto out;
922 	}
923 
924 	pr_info("PV spinlocks enabled\n");
925 
926 	__pv_init_lock_hash();
927 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
928 	pv_ops.lock.queued_spin_unlock =
929 		PV_CALLEE_SAVE(__pv_queued_spin_unlock);
930 	pv_ops.lock.wait = kvm_wait;
931 	pv_ops.lock.kick = kvm_kick_cpu;
932 
933 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
934 		pv_ops.lock.vcpu_is_preempted =
935 			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
936 	}
937 	/*
938 	 * When PV spinlock is enabled which is preferred over
939 	 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
940 	 * Just disable it anyway.
941 	 */
942 out:
943 	static_branch_disable(&virt_spin_lock_key);
944 }
945 
946 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
947 
948 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
949 
950 static void kvm_disable_host_haltpoll(void *i)
951 {
952 	wrmsrl(MSR_KVM_POLL_CONTROL, 0);
953 }
954 
955 static void kvm_enable_host_haltpoll(void *i)
956 {
957 	wrmsrl(MSR_KVM_POLL_CONTROL, 1);
958 }
959 
960 void arch_haltpoll_enable(unsigned int cpu)
961 {
962 	if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
963 		pr_err_once("host does not support poll control\n");
964 		pr_err_once("host upgrade recommended\n");
965 		return;
966 	}
967 
968 	/* Enable guest halt poll disables host halt poll */
969 	smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
970 }
971 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
972 
973 void arch_haltpoll_disable(unsigned int cpu)
974 {
975 	if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
976 		return;
977 
978 	/* Disable guest halt poll enables host halt poll */
979 	smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
980 }
981 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
982 #endif
983