xref: /openbmc/linux/arch/x86/kernel/kvm.c (revision c8dbaa22)
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22 
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
41 #include <asm/cpu.h>
42 #include <asm/traps.h>
43 #include <asm/desc.h>
44 #include <asm/tlbflush.h>
45 #include <asm/apic.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
48 #include <asm/kvm_guest.h>
49 
50 static int kvmapf = 1;
51 
52 static int parse_no_kvmapf(char *arg)
53 {
54         kvmapf = 0;
55         return 0;
56 }
57 
58 early_param("no-kvmapf", parse_no_kvmapf);
59 
60 static int steal_acc = 1;
61 static int parse_no_stealacc(char *arg)
62 {
63         steal_acc = 0;
64         return 0;
65 }
66 
67 early_param("no-steal-acc", parse_no_stealacc);
68 
69 static int kvmclock_vsyscall = 1;
70 static int parse_no_kvmclock_vsyscall(char *arg)
71 {
72         kvmclock_vsyscall = 0;
73         return 0;
74 }
75 
76 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77 
78 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
80 static int has_steal_clock = 0;
81 
82 /*
83  * No need for any "IO delay" on KVM
84  */
85 static void kvm_io_delay(void)
86 {
87 }
88 
89 #define KVM_TASK_SLEEP_HASHBITS 8
90 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91 
92 struct kvm_task_sleep_node {
93 	struct hlist_node link;
94 	struct swait_queue_head wq;
95 	u32 token;
96 	int cpu;
97 	bool halted;
98 };
99 
100 static struct kvm_task_sleep_head {
101 	raw_spinlock_t lock;
102 	struct hlist_head list;
103 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104 
105 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106 						  u32 token)
107 {
108 	struct hlist_node *p;
109 
110 	hlist_for_each(p, &b->list) {
111 		struct kvm_task_sleep_node *n =
112 			hlist_entry(p, typeof(*n), link);
113 		if (n->token == token)
114 			return n;
115 	}
116 
117 	return NULL;
118 }
119 
120 void kvm_async_pf_task_wait(u32 token)
121 {
122 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
123 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
124 	struct kvm_task_sleep_node n, *e;
125 	DECLARE_SWAITQUEUE(wait);
126 
127 	rcu_irq_enter();
128 
129 	raw_spin_lock(&b->lock);
130 	e = _find_apf_task(b, token);
131 	if (e) {
132 		/* dummy entry exist -> wake up was delivered ahead of PF */
133 		hlist_del(&e->link);
134 		kfree(e);
135 		raw_spin_unlock(&b->lock);
136 
137 		rcu_irq_exit();
138 		return;
139 	}
140 
141 	n.token = token;
142 	n.cpu = smp_processor_id();
143 	n.halted = is_idle_task(current) || preempt_count() > 1;
144 	init_swait_queue_head(&n.wq);
145 	hlist_add_head(&n.link, &b->list);
146 	raw_spin_unlock(&b->lock);
147 
148 	for (;;) {
149 		if (!n.halted)
150 			prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
151 		if (hlist_unhashed(&n.link))
152 			break;
153 
154 		rcu_irq_exit();
155 
156 		if (!n.halted) {
157 			local_irq_enable();
158 			schedule();
159 			local_irq_disable();
160 		} else {
161 			/*
162 			 * We cannot reschedule. So halt.
163 			 */
164 			native_safe_halt();
165 			local_irq_disable();
166 		}
167 
168 		rcu_irq_enter();
169 	}
170 	if (!n.halted)
171 		finish_swait(&n.wq, &wait);
172 
173 	rcu_irq_exit();
174 	return;
175 }
176 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
177 
178 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
179 {
180 	hlist_del_init(&n->link);
181 	if (n->halted)
182 		smp_send_reschedule(n->cpu);
183 	else if (swait_active(&n->wq))
184 		swake_up(&n->wq);
185 }
186 
187 static void apf_task_wake_all(void)
188 {
189 	int i;
190 
191 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
192 		struct hlist_node *p, *next;
193 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
194 		raw_spin_lock(&b->lock);
195 		hlist_for_each_safe(p, next, &b->list) {
196 			struct kvm_task_sleep_node *n =
197 				hlist_entry(p, typeof(*n), link);
198 			if (n->cpu == smp_processor_id())
199 				apf_task_wake_one(n);
200 		}
201 		raw_spin_unlock(&b->lock);
202 	}
203 }
204 
205 void kvm_async_pf_task_wake(u32 token)
206 {
207 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
208 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
209 	struct kvm_task_sleep_node *n;
210 
211 	if (token == ~0) {
212 		apf_task_wake_all();
213 		return;
214 	}
215 
216 again:
217 	raw_spin_lock(&b->lock);
218 	n = _find_apf_task(b, token);
219 	if (!n) {
220 		/*
221 		 * async PF was not yet handled.
222 		 * Add dummy entry for the token.
223 		 */
224 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
225 		if (!n) {
226 			/*
227 			 * Allocation failed! Busy wait while other cpu
228 			 * handles async PF.
229 			 */
230 			raw_spin_unlock(&b->lock);
231 			cpu_relax();
232 			goto again;
233 		}
234 		n->token = token;
235 		n->cpu = smp_processor_id();
236 		init_swait_queue_head(&n->wq);
237 		hlist_add_head(&n->link, &b->list);
238 	} else
239 		apf_task_wake_one(n);
240 	raw_spin_unlock(&b->lock);
241 	return;
242 }
243 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
244 
245 u32 kvm_read_and_reset_pf_reason(void)
246 {
247 	u32 reason = 0;
248 
249 	if (__this_cpu_read(apf_reason.enabled)) {
250 		reason = __this_cpu_read(apf_reason.reason);
251 		__this_cpu_write(apf_reason.reason, 0);
252 	}
253 
254 	return reason;
255 }
256 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
257 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
258 
259 dotraplinkage void
260 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
261 {
262 	enum ctx_state prev_state;
263 
264 	switch (kvm_read_and_reset_pf_reason()) {
265 	default:
266 		trace_do_page_fault(regs, error_code);
267 		break;
268 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
269 		/* page is swapped out by the host. */
270 		prev_state = exception_enter();
271 		kvm_async_pf_task_wait((u32)read_cr2());
272 		exception_exit(prev_state);
273 		break;
274 	case KVM_PV_REASON_PAGE_READY:
275 		rcu_irq_enter();
276 		kvm_async_pf_task_wake((u32)read_cr2());
277 		rcu_irq_exit();
278 		break;
279 	}
280 }
281 NOKPROBE_SYMBOL(do_async_page_fault);
282 
283 static void __init paravirt_ops_setup(void)
284 {
285 	pv_info.name = "KVM";
286 
287 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
288 		pv_cpu_ops.io_delay = kvm_io_delay;
289 
290 #ifdef CONFIG_X86_IO_APIC
291 	no_timer_check = 1;
292 #endif
293 }
294 
295 static void kvm_register_steal_time(void)
296 {
297 	int cpu = smp_processor_id();
298 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
299 
300 	if (!has_steal_clock)
301 		return;
302 
303 	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
304 	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
305 		cpu, (unsigned long long) slow_virt_to_phys(st));
306 }
307 
308 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
309 
310 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
311 {
312 	/**
313 	 * This relies on __test_and_clear_bit to modify the memory
314 	 * in a way that is atomic with respect to the local CPU.
315 	 * The hypervisor only accesses this memory from the local CPU so
316 	 * there's no need for lock or memory barriers.
317 	 * An optimization barrier is implied in apic write.
318 	 */
319 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
320 		return;
321 	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
322 }
323 
324 static void kvm_guest_cpu_init(void)
325 {
326 	if (!kvm_para_available())
327 		return;
328 
329 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
330 		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
331 
332 #ifdef CONFIG_PREEMPT
333 		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
334 #endif
335 		pa |= KVM_ASYNC_PF_ENABLED;
336 
337 		/* Async page fault support for L1 hypervisor is optional */
338 		if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
339 			(pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
340 			wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
341 		__this_cpu_write(apf_reason.enabled, 1);
342 		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
343 		       smp_processor_id());
344 	}
345 
346 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
347 		unsigned long pa;
348 		/* Size alignment is implied but just to make it explicit. */
349 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
350 		__this_cpu_write(kvm_apic_eoi, 0);
351 		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
352 			| KVM_MSR_ENABLED;
353 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
354 	}
355 
356 	if (has_steal_clock)
357 		kvm_register_steal_time();
358 }
359 
360 static void kvm_pv_disable_apf(void)
361 {
362 	if (!__this_cpu_read(apf_reason.enabled))
363 		return;
364 
365 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
366 	__this_cpu_write(apf_reason.enabled, 0);
367 
368 	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
369 	       smp_processor_id());
370 }
371 
372 static void kvm_pv_guest_cpu_reboot(void *unused)
373 {
374 	/*
375 	 * We disable PV EOI before we load a new kernel by kexec,
376 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
377 	 * New kernel can re-enable when it boots.
378 	 */
379 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
380 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
381 	kvm_pv_disable_apf();
382 	kvm_disable_steal_time();
383 }
384 
385 static int kvm_pv_reboot_notify(struct notifier_block *nb,
386 				unsigned long code, void *unused)
387 {
388 	if (code == SYS_RESTART)
389 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
390 	return NOTIFY_DONE;
391 }
392 
393 static struct notifier_block kvm_pv_reboot_nb = {
394 	.notifier_call = kvm_pv_reboot_notify,
395 };
396 
397 static u64 kvm_steal_clock(int cpu)
398 {
399 	u64 steal;
400 	struct kvm_steal_time *src;
401 	int version;
402 
403 	src = &per_cpu(steal_time, cpu);
404 	do {
405 		version = src->version;
406 		virt_rmb();
407 		steal = src->steal;
408 		virt_rmb();
409 	} while ((version & 1) || (version != src->version));
410 
411 	return steal;
412 }
413 
414 void kvm_disable_steal_time(void)
415 {
416 	if (!has_steal_clock)
417 		return;
418 
419 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
420 }
421 
422 #ifdef CONFIG_SMP
423 static void __init kvm_smp_prepare_boot_cpu(void)
424 {
425 	kvm_guest_cpu_init();
426 	native_smp_prepare_boot_cpu();
427 	kvm_spinlock_init();
428 }
429 
430 static void kvm_guest_cpu_offline(void)
431 {
432 	kvm_disable_steal_time();
433 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
434 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
435 	kvm_pv_disable_apf();
436 	apf_task_wake_all();
437 }
438 
439 static int kvm_cpu_online(unsigned int cpu)
440 {
441 	local_irq_disable();
442 	kvm_guest_cpu_init();
443 	local_irq_enable();
444 	return 0;
445 }
446 
447 static int kvm_cpu_down_prepare(unsigned int cpu)
448 {
449 	local_irq_disable();
450 	kvm_guest_cpu_offline();
451 	local_irq_enable();
452 	return 0;
453 }
454 #endif
455 
456 static void __init kvm_apf_trap_init(void)
457 {
458 	set_intr_gate(14, async_page_fault);
459 }
460 
461 void __init kvm_guest_init(void)
462 {
463 	int i;
464 
465 	if (!kvm_para_available())
466 		return;
467 
468 	paravirt_ops_setup();
469 	register_reboot_notifier(&kvm_pv_reboot_nb);
470 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
471 		raw_spin_lock_init(&async_pf_sleepers[i].lock);
472 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
473 		x86_init.irqs.trap_init = kvm_apf_trap_init;
474 
475 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
476 		has_steal_clock = 1;
477 		pv_time_ops.steal_clock = kvm_steal_clock;
478 	}
479 
480 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
481 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
482 
483 	if (kvmclock_vsyscall)
484 		kvm_setup_vsyscall_timeinfo();
485 
486 #ifdef CONFIG_SMP
487 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
488 	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
489 				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
490 		pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
491 #else
492 	kvm_guest_cpu_init();
493 #endif
494 
495 	/*
496 	 * Hard lockup detection is enabled by default. Disable it, as guests
497 	 * can get false positives too easily, for example if the host is
498 	 * overcommitted.
499 	 */
500 	hardlockup_detector_disable();
501 }
502 
503 static noinline uint32_t __kvm_cpuid_base(void)
504 {
505 	if (boot_cpu_data.cpuid_level < 0)
506 		return 0;	/* So we don't blow up on old processors */
507 
508 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
509 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
510 
511 	return 0;
512 }
513 
514 static inline uint32_t kvm_cpuid_base(void)
515 {
516 	static int kvm_cpuid_base = -1;
517 
518 	if (kvm_cpuid_base == -1)
519 		kvm_cpuid_base = __kvm_cpuid_base();
520 
521 	return kvm_cpuid_base;
522 }
523 
524 bool kvm_para_available(void)
525 {
526 	return kvm_cpuid_base() != 0;
527 }
528 EXPORT_SYMBOL_GPL(kvm_para_available);
529 
530 unsigned int kvm_arch_para_features(void)
531 {
532 	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
533 }
534 
535 static uint32_t __init kvm_detect(void)
536 {
537 	return kvm_cpuid_base();
538 }
539 
540 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
541 	.name			= "KVM",
542 	.detect			= kvm_detect,
543 	.x2apic_available	= kvm_para_available,
544 };
545 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
546 
547 static __init int activate_jump_labels(void)
548 {
549 	if (has_steal_clock) {
550 		static_key_slow_inc(&paravirt_steal_enabled);
551 		if (steal_acc)
552 			static_key_slow_inc(&paravirt_steal_rq_enabled);
553 	}
554 
555 	return 0;
556 }
557 arch_initcall(activate_jump_labels);
558 
559 #ifdef CONFIG_PARAVIRT_SPINLOCKS
560 
561 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
562 static void kvm_kick_cpu(int cpu)
563 {
564 	int apicid;
565 	unsigned long flags = 0;
566 
567 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
568 	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
569 }
570 
571 #include <asm/qspinlock.h>
572 
573 static void kvm_wait(u8 *ptr, u8 val)
574 {
575 	unsigned long flags;
576 
577 	if (in_nmi())
578 		return;
579 
580 	local_irq_save(flags);
581 
582 	if (READ_ONCE(*ptr) != val)
583 		goto out;
584 
585 	/*
586 	 * halt until it's our turn and kicked. Note that we do safe halt
587 	 * for irq enabled case to avoid hang when lock info is overwritten
588 	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
589 	 */
590 	if (arch_irqs_disabled_flags(flags))
591 		halt();
592 	else
593 		safe_halt();
594 
595 out:
596 	local_irq_restore(flags);
597 }
598 
599 #ifdef CONFIG_X86_32
600 __visible bool __kvm_vcpu_is_preempted(long cpu)
601 {
602 	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
603 
604 	return !!src->preempted;
605 }
606 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
607 
608 #else
609 
610 #include <asm/asm-offsets.h>
611 
612 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
613 
614 /*
615  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
616  * restoring to/from the stack.
617  */
618 asm(
619 ".pushsection .text;"
620 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
621 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
622 "__raw_callee_save___kvm_vcpu_is_preempted:"
623 "movq	__per_cpu_offset(,%rdi,8), %rax;"
624 "cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
625 "setne	%al;"
626 "ret;"
627 ".popsection");
628 
629 #endif
630 
631 /*
632  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
633  */
634 void __init kvm_spinlock_init(void)
635 {
636 	if (!kvm_para_available())
637 		return;
638 	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
639 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
640 		return;
641 
642 	__pv_init_lock_hash();
643 	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
644 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
645 	pv_lock_ops.wait = kvm_wait;
646 	pv_lock_ops.kick = kvm_kick_cpu;
647 
648 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
649 		pv_lock_ops.vcpu_is_preempted =
650 			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
651 	}
652 }
653 
654 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
655