xref: /openbmc/linux/arch/x86/kernel/kvm.c (revision 93032e31)
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22 
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
41 #include <asm/cpu.h>
42 #include <asm/traps.h>
43 #include <asm/desc.h>
44 #include <asm/tlbflush.h>
45 #include <asm/idle.h>
46 #include <asm/apic.h>
47 #include <asm/apicdef.h>
48 #include <asm/hypervisor.h>
49 #include <asm/kvm_guest.h>
50 
51 static int kvmapf = 1;
52 
53 static int parse_no_kvmapf(char *arg)
54 {
55         kvmapf = 0;
56         return 0;
57 }
58 
59 early_param("no-kvmapf", parse_no_kvmapf);
60 
61 static int steal_acc = 1;
62 static int parse_no_stealacc(char *arg)
63 {
64         steal_acc = 0;
65         return 0;
66 }
67 
68 early_param("no-steal-acc", parse_no_stealacc);
69 
70 static int kvmclock_vsyscall = 1;
71 static int parse_no_kvmclock_vsyscall(char *arg)
72 {
73         kvmclock_vsyscall = 0;
74         return 0;
75 }
76 
77 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
78 
79 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
80 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
81 static int has_steal_clock = 0;
82 
83 /*
84  * No need for any "IO delay" on KVM
85  */
86 static void kvm_io_delay(void)
87 {
88 }
89 
90 #define KVM_TASK_SLEEP_HASHBITS 8
91 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
92 
93 struct kvm_task_sleep_node {
94 	struct hlist_node link;
95 	struct swait_queue_head wq;
96 	u32 token;
97 	int cpu;
98 	bool halted;
99 };
100 
101 static struct kvm_task_sleep_head {
102 	raw_spinlock_t lock;
103 	struct hlist_head list;
104 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
105 
106 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
107 						  u32 token)
108 {
109 	struct hlist_node *p;
110 
111 	hlist_for_each(p, &b->list) {
112 		struct kvm_task_sleep_node *n =
113 			hlist_entry(p, typeof(*n), link);
114 		if (n->token == token)
115 			return n;
116 	}
117 
118 	return NULL;
119 }
120 
121 void kvm_async_pf_task_wait(u32 token)
122 {
123 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
124 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
125 	struct kvm_task_sleep_node n, *e;
126 	DECLARE_SWAITQUEUE(wait);
127 
128 	rcu_irq_enter();
129 
130 	raw_spin_lock(&b->lock);
131 	e = _find_apf_task(b, token);
132 	if (e) {
133 		/* dummy entry exist -> wake up was delivered ahead of PF */
134 		hlist_del(&e->link);
135 		kfree(e);
136 		raw_spin_unlock(&b->lock);
137 
138 		rcu_irq_exit();
139 		return;
140 	}
141 
142 	n.token = token;
143 	n.cpu = smp_processor_id();
144 	n.halted = is_idle_task(current) || preempt_count() > 1;
145 	init_swait_queue_head(&n.wq);
146 	hlist_add_head(&n.link, &b->list);
147 	raw_spin_unlock(&b->lock);
148 
149 	for (;;) {
150 		if (!n.halted)
151 			prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
152 		if (hlist_unhashed(&n.link))
153 			break;
154 
155 		if (!n.halted) {
156 			local_irq_enable();
157 			schedule();
158 			local_irq_disable();
159 		} else {
160 			/*
161 			 * We cannot reschedule. So halt.
162 			 */
163 			rcu_irq_exit();
164 			native_safe_halt();
165 			rcu_irq_enter();
166 			local_irq_disable();
167 		}
168 	}
169 	if (!n.halted)
170 		finish_swait(&n.wq, &wait);
171 
172 	rcu_irq_exit();
173 	return;
174 }
175 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
176 
177 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
178 {
179 	hlist_del_init(&n->link);
180 	if (n->halted)
181 		smp_send_reschedule(n->cpu);
182 	else if (swait_active(&n->wq))
183 		swake_up(&n->wq);
184 }
185 
186 static void apf_task_wake_all(void)
187 {
188 	int i;
189 
190 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
191 		struct hlist_node *p, *next;
192 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
193 		raw_spin_lock(&b->lock);
194 		hlist_for_each_safe(p, next, &b->list) {
195 			struct kvm_task_sleep_node *n =
196 				hlist_entry(p, typeof(*n), link);
197 			if (n->cpu == smp_processor_id())
198 				apf_task_wake_one(n);
199 		}
200 		raw_spin_unlock(&b->lock);
201 	}
202 }
203 
204 void kvm_async_pf_task_wake(u32 token)
205 {
206 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
207 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
208 	struct kvm_task_sleep_node *n;
209 
210 	if (token == ~0) {
211 		apf_task_wake_all();
212 		return;
213 	}
214 
215 again:
216 	raw_spin_lock(&b->lock);
217 	n = _find_apf_task(b, token);
218 	if (!n) {
219 		/*
220 		 * async PF was not yet handled.
221 		 * Add dummy entry for the token.
222 		 */
223 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
224 		if (!n) {
225 			/*
226 			 * Allocation failed! Busy wait while other cpu
227 			 * handles async PF.
228 			 */
229 			raw_spin_unlock(&b->lock);
230 			cpu_relax();
231 			goto again;
232 		}
233 		n->token = token;
234 		n->cpu = smp_processor_id();
235 		init_swait_queue_head(&n->wq);
236 		hlist_add_head(&n->link, &b->list);
237 	} else
238 		apf_task_wake_one(n);
239 	raw_spin_unlock(&b->lock);
240 	return;
241 }
242 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
243 
244 u32 kvm_read_and_reset_pf_reason(void)
245 {
246 	u32 reason = 0;
247 
248 	if (__this_cpu_read(apf_reason.enabled)) {
249 		reason = __this_cpu_read(apf_reason.reason);
250 		__this_cpu_write(apf_reason.reason, 0);
251 	}
252 
253 	return reason;
254 }
255 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
256 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
257 
258 dotraplinkage void
259 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
260 {
261 	enum ctx_state prev_state;
262 
263 	switch (kvm_read_and_reset_pf_reason()) {
264 	default:
265 		trace_do_page_fault(regs, error_code);
266 		break;
267 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
268 		/* page is swapped out by the host. */
269 		prev_state = exception_enter();
270 		exit_idle();
271 		kvm_async_pf_task_wait((u32)read_cr2());
272 		exception_exit(prev_state);
273 		break;
274 	case KVM_PV_REASON_PAGE_READY:
275 		rcu_irq_enter();
276 		exit_idle();
277 		kvm_async_pf_task_wake((u32)read_cr2());
278 		rcu_irq_exit();
279 		break;
280 	}
281 }
282 NOKPROBE_SYMBOL(do_async_page_fault);
283 
284 static void __init paravirt_ops_setup(void)
285 {
286 	pv_info.name = "KVM";
287 
288 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
289 		pv_cpu_ops.io_delay = kvm_io_delay;
290 
291 #ifdef CONFIG_X86_IO_APIC
292 	no_timer_check = 1;
293 #endif
294 }
295 
296 static void kvm_register_steal_time(void)
297 {
298 	int cpu = smp_processor_id();
299 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
300 
301 	if (!has_steal_clock)
302 		return;
303 
304 	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
305 	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
306 		cpu, (unsigned long long) slow_virt_to_phys(st));
307 }
308 
309 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
310 
311 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
312 {
313 	/**
314 	 * This relies on __test_and_clear_bit to modify the memory
315 	 * in a way that is atomic with respect to the local CPU.
316 	 * The hypervisor only accesses this memory from the local CPU so
317 	 * there's no need for lock or memory barriers.
318 	 * An optimization barrier is implied in apic write.
319 	 */
320 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
321 		return;
322 	apic_write(APIC_EOI, APIC_EOI_ACK);
323 }
324 
325 static void kvm_guest_cpu_init(void)
326 {
327 	if (!kvm_para_available())
328 		return;
329 
330 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
331 		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
332 
333 #ifdef CONFIG_PREEMPT
334 		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
335 #endif
336 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
337 		__this_cpu_write(apf_reason.enabled, 1);
338 		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
339 		       smp_processor_id());
340 	}
341 
342 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
343 		unsigned long pa;
344 		/* Size alignment is implied but just to make it explicit. */
345 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
346 		__this_cpu_write(kvm_apic_eoi, 0);
347 		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
348 			| KVM_MSR_ENABLED;
349 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
350 	}
351 
352 	if (has_steal_clock)
353 		kvm_register_steal_time();
354 }
355 
356 static void kvm_pv_disable_apf(void)
357 {
358 	if (!__this_cpu_read(apf_reason.enabled))
359 		return;
360 
361 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
362 	__this_cpu_write(apf_reason.enabled, 0);
363 
364 	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
365 	       smp_processor_id());
366 }
367 
368 static void kvm_pv_guest_cpu_reboot(void *unused)
369 {
370 	/*
371 	 * We disable PV EOI before we load a new kernel by kexec,
372 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
373 	 * New kernel can re-enable when it boots.
374 	 */
375 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
376 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
377 	kvm_pv_disable_apf();
378 	kvm_disable_steal_time();
379 }
380 
381 static int kvm_pv_reboot_notify(struct notifier_block *nb,
382 				unsigned long code, void *unused)
383 {
384 	if (code == SYS_RESTART)
385 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
386 	return NOTIFY_DONE;
387 }
388 
389 static struct notifier_block kvm_pv_reboot_nb = {
390 	.notifier_call = kvm_pv_reboot_notify,
391 };
392 
393 static u64 kvm_steal_clock(int cpu)
394 {
395 	u64 steal;
396 	struct kvm_steal_time *src;
397 	int version;
398 
399 	src = &per_cpu(steal_time, cpu);
400 	do {
401 		version = src->version;
402 		rmb();
403 		steal = src->steal;
404 		rmb();
405 	} while ((version & 1) || (version != src->version));
406 
407 	return steal;
408 }
409 
410 void kvm_disable_steal_time(void)
411 {
412 	if (!has_steal_clock)
413 		return;
414 
415 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
416 }
417 
418 #ifdef CONFIG_SMP
419 static void __init kvm_smp_prepare_boot_cpu(void)
420 {
421 	kvm_guest_cpu_init();
422 	native_smp_prepare_boot_cpu();
423 	kvm_spinlock_init();
424 }
425 
426 static void kvm_guest_cpu_offline(void)
427 {
428 	kvm_disable_steal_time();
429 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
430 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
431 	kvm_pv_disable_apf();
432 	apf_task_wake_all();
433 }
434 
435 static int kvm_cpu_online(unsigned int cpu)
436 {
437 	local_irq_disable();
438 	kvm_guest_cpu_init();
439 	local_irq_enable();
440 	return 0;
441 }
442 
443 static int kvm_cpu_down_prepare(unsigned int cpu)
444 {
445 	local_irq_disable();
446 	kvm_guest_cpu_offline();
447 	local_irq_enable();
448 	return 0;
449 }
450 #endif
451 
452 static void __init kvm_apf_trap_init(void)
453 {
454 	set_intr_gate(14, async_page_fault);
455 }
456 
457 void __init kvm_guest_init(void)
458 {
459 	int i;
460 
461 	if (!kvm_para_available())
462 		return;
463 
464 	paravirt_ops_setup();
465 	register_reboot_notifier(&kvm_pv_reboot_nb);
466 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
467 		raw_spin_lock_init(&async_pf_sleepers[i].lock);
468 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
469 		x86_init.irqs.trap_init = kvm_apf_trap_init;
470 
471 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
472 		has_steal_clock = 1;
473 		pv_time_ops.steal_clock = kvm_steal_clock;
474 	}
475 
476 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
477 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
478 
479 	if (kvmclock_vsyscall)
480 		kvm_setup_vsyscall_timeinfo();
481 
482 #ifdef CONFIG_SMP
483 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
484 	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
485 				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
486 		pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
487 #else
488 	kvm_guest_cpu_init();
489 #endif
490 
491 	/*
492 	 * Hard lockup detection is enabled by default. Disable it, as guests
493 	 * can get false positives too easily, for example if the host is
494 	 * overcommitted.
495 	 */
496 	hardlockup_detector_disable();
497 }
498 
499 static noinline uint32_t __kvm_cpuid_base(void)
500 {
501 	if (boot_cpu_data.cpuid_level < 0)
502 		return 0;	/* So we don't blow up on old processors */
503 
504 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
505 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
506 
507 	return 0;
508 }
509 
510 static inline uint32_t kvm_cpuid_base(void)
511 {
512 	static int kvm_cpuid_base = -1;
513 
514 	if (kvm_cpuid_base == -1)
515 		kvm_cpuid_base = __kvm_cpuid_base();
516 
517 	return kvm_cpuid_base;
518 }
519 
520 bool kvm_para_available(void)
521 {
522 	return kvm_cpuid_base() != 0;
523 }
524 EXPORT_SYMBOL_GPL(kvm_para_available);
525 
526 unsigned int kvm_arch_para_features(void)
527 {
528 	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
529 }
530 
531 static uint32_t __init kvm_detect(void)
532 {
533 	return kvm_cpuid_base();
534 }
535 
536 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
537 	.name			= "KVM",
538 	.detect			= kvm_detect,
539 	.x2apic_available	= kvm_para_available,
540 };
541 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
542 
543 static __init int activate_jump_labels(void)
544 {
545 	if (has_steal_clock) {
546 		static_key_slow_inc(&paravirt_steal_enabled);
547 		if (steal_acc)
548 			static_key_slow_inc(&paravirt_steal_rq_enabled);
549 	}
550 
551 	return 0;
552 }
553 arch_initcall(activate_jump_labels);
554 
555 #ifdef CONFIG_PARAVIRT_SPINLOCKS
556 
557 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
558 static void kvm_kick_cpu(int cpu)
559 {
560 	int apicid;
561 	unsigned long flags = 0;
562 
563 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
564 	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
565 }
566 
567 #include <asm/qspinlock.h>
568 
569 static void kvm_wait(u8 *ptr, u8 val)
570 {
571 	unsigned long flags;
572 
573 	if (in_nmi())
574 		return;
575 
576 	local_irq_save(flags);
577 
578 	if (READ_ONCE(*ptr) != val)
579 		goto out;
580 
581 	/*
582 	 * halt until it's our turn and kicked. Note that we do safe halt
583 	 * for irq enabled case to avoid hang when lock info is overwritten
584 	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
585 	 */
586 	if (arch_irqs_disabled_flags(flags))
587 		halt();
588 	else
589 		safe_halt();
590 
591 out:
592 	local_irq_restore(flags);
593 }
594 
595 /*
596  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
597  */
598 void __init kvm_spinlock_init(void)
599 {
600 	if (!kvm_para_available())
601 		return;
602 	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
603 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
604 		return;
605 
606 	__pv_init_lock_hash();
607 	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
608 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
609 	pv_lock_ops.wait = kvm_wait;
610 	pv_lock_ops.kick = kvm_kick_cpu;
611 }
612 
613 static __init int kvm_spinlock_init_jump(void)
614 {
615 	if (!kvm_para_available())
616 		return 0;
617 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
618 		return 0;
619 
620 	static_key_slow_inc(&paravirt_ticketlocks_enabled);
621 	printk(KERN_INFO "KVM setup paravirtual spinlock\n");
622 
623 	return 0;
624 }
625 early_initcall(kvm_spinlock_init_jump);
626 
627 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
628