xref: /openbmc/linux/arch/x86/kernel/cpu/mshyperv.c (revision a531b0c2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HyperV  Detection code.
4  *
5  * Copyright (C) 2010, Novell, Inc.
6  * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/time.h>
11 #include <linux/clocksource.h>
12 #include <linux/init.h>
13 #include <linux/export.h>
14 #include <linux/hardirq.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kexec.h>
19 #include <linux/i8253.h>
20 #include <linux/random.h>
21 #include <asm/processor.h>
22 #include <asm/hypervisor.h>
23 #include <asm/hyperv-tlfs.h>
24 #include <asm/mshyperv.h>
25 #include <asm/desc.h>
26 #include <asm/idtentry.h>
27 #include <asm/irq_regs.h>
28 #include <asm/i8259.h>
29 #include <asm/apic.h>
30 #include <asm/timer.h>
31 #include <asm/reboot.h>
32 #include <asm/nmi.h>
33 #include <clocksource/hyperv_timer.h>
34 #include <asm/numa.h>
35 
36 /* Is Linux running as the root partition? */
37 bool hv_root_partition;
38 struct ms_hyperv_info ms_hyperv;
39 
40 #if IS_ENABLED(CONFIG_HYPERV)
41 static void (*vmbus_handler)(void);
42 static void (*hv_stimer0_handler)(void);
43 static void (*hv_kexec_handler)(void);
44 static void (*hv_crash_handler)(struct pt_regs *regs);
45 
46 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
47 {
48 	struct pt_regs *old_regs = set_irq_regs(regs);
49 
50 	inc_irq_stat(irq_hv_callback_count);
51 	if (vmbus_handler)
52 		vmbus_handler();
53 
54 	if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
55 		ack_APIC_irq();
56 
57 	set_irq_regs(old_regs);
58 }
59 
60 void hv_setup_vmbus_handler(void (*handler)(void))
61 {
62 	vmbus_handler = handler;
63 }
64 
65 void hv_remove_vmbus_handler(void)
66 {
67 	/* We have no way to deallocate the interrupt gate */
68 	vmbus_handler = NULL;
69 }
70 
71 /*
72  * Routines to do per-architecture handling of stimer0
73  * interrupts when in Direct Mode
74  */
75 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
76 {
77 	struct pt_regs *old_regs = set_irq_regs(regs);
78 
79 	inc_irq_stat(hyperv_stimer0_count);
80 	if (hv_stimer0_handler)
81 		hv_stimer0_handler();
82 	add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
83 	ack_APIC_irq();
84 
85 	set_irq_regs(old_regs);
86 }
87 
88 /* For x86/x64, override weak placeholders in hyperv_timer.c */
89 void hv_setup_stimer0_handler(void (*handler)(void))
90 {
91 	hv_stimer0_handler = handler;
92 }
93 
94 void hv_remove_stimer0_handler(void)
95 {
96 	/* We have no way to deallocate the interrupt gate */
97 	hv_stimer0_handler = NULL;
98 }
99 
100 void hv_setup_kexec_handler(void (*handler)(void))
101 {
102 	hv_kexec_handler = handler;
103 }
104 
105 void hv_remove_kexec_handler(void)
106 {
107 	hv_kexec_handler = NULL;
108 }
109 
110 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
111 {
112 	hv_crash_handler = handler;
113 }
114 
115 void hv_remove_crash_handler(void)
116 {
117 	hv_crash_handler = NULL;
118 }
119 
120 #ifdef CONFIG_KEXEC_CORE
121 static void hv_machine_shutdown(void)
122 {
123 	if (kexec_in_progress && hv_kexec_handler)
124 		hv_kexec_handler();
125 
126 	/*
127 	 * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
128 	 * corrupts the old VP Assist Pages and can crash the kexec kernel.
129 	 */
130 	if (kexec_in_progress && hyperv_init_cpuhp > 0)
131 		cpuhp_remove_state(hyperv_init_cpuhp);
132 
133 	/* The function calls stop_other_cpus(). */
134 	native_machine_shutdown();
135 
136 	/* Disable the hypercall page when there is only 1 active CPU. */
137 	if (kexec_in_progress)
138 		hyperv_cleanup();
139 }
140 
141 static void hv_machine_crash_shutdown(struct pt_regs *regs)
142 {
143 	if (hv_crash_handler)
144 		hv_crash_handler(regs);
145 
146 	/* The function calls crash_smp_send_stop(). */
147 	native_machine_crash_shutdown(regs);
148 
149 	/* Disable the hypercall page when there is only 1 active CPU. */
150 	hyperv_cleanup();
151 }
152 #endif /* CONFIG_KEXEC_CORE */
153 #endif /* CONFIG_HYPERV */
154 
155 static uint32_t  __init ms_hyperv_platform(void)
156 {
157 	u32 eax;
158 	u32 hyp_signature[3];
159 
160 	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
161 		return 0;
162 
163 	cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
164 	      &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
165 
166 	if (eax < HYPERV_CPUID_MIN || eax > HYPERV_CPUID_MAX ||
167 	    memcmp("Microsoft Hv", hyp_signature, 12))
168 		return 0;
169 
170 	/* HYPERCALL and VP_INDEX MSRs are mandatory for all features. */
171 	eax = cpuid_eax(HYPERV_CPUID_FEATURES);
172 	if (!(eax & HV_MSR_HYPERCALL_AVAILABLE)) {
173 		pr_warn("x86/hyperv: HYPERCALL MSR not available.\n");
174 		return 0;
175 	}
176 	if (!(eax & HV_MSR_VP_INDEX_AVAILABLE)) {
177 		pr_warn("x86/hyperv: VP_INDEX MSR not available.\n");
178 		return 0;
179 	}
180 
181 	return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
182 }
183 
184 static unsigned char hv_get_nmi_reason(void)
185 {
186 	return 0;
187 }
188 
189 #ifdef CONFIG_X86_LOCAL_APIC
190 /*
191  * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
192  * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle
193  * unknown NMI on the first CPU which gets it.
194  */
195 static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
196 {
197 	static atomic_t nmi_cpu = ATOMIC_INIT(-1);
198 
199 	if (!unknown_nmi_panic)
200 		return NMI_DONE;
201 
202 	if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
203 		return NMI_HANDLED;
204 
205 	return NMI_DONE;
206 }
207 #endif
208 
209 static unsigned long hv_get_tsc_khz(void)
210 {
211 	unsigned long freq;
212 
213 	rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
214 
215 	return freq / 1000;
216 }
217 
218 #if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV)
219 static void __init hv_smp_prepare_boot_cpu(void)
220 {
221 	native_smp_prepare_boot_cpu();
222 #if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS)
223 	hv_init_spinlocks();
224 #endif
225 }
226 
227 static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
228 {
229 #ifdef CONFIG_X86_64
230 	int i;
231 	int ret;
232 #endif
233 
234 	native_smp_prepare_cpus(max_cpus);
235 
236 #ifdef CONFIG_X86_64
237 	for_each_present_cpu(i) {
238 		if (i == 0)
239 			continue;
240 		ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
241 		BUG_ON(ret);
242 	}
243 
244 	for_each_present_cpu(i) {
245 		if (i == 0)
246 			continue;
247 		ret = hv_call_create_vp(numa_cpu_node(i), hv_current_partition_id, i, i);
248 		BUG_ON(ret);
249 	}
250 #endif
251 }
252 #endif
253 
254 static void __init ms_hyperv_init_platform(void)
255 {
256 	int hv_max_functions_eax;
257 	int hv_host_info_eax;
258 	int hv_host_info_ebx;
259 	int hv_host_info_ecx;
260 	int hv_host_info_edx;
261 
262 #ifdef CONFIG_PARAVIRT
263 	pv_info.name = "Hyper-V";
264 #endif
265 
266 	/*
267 	 * Extract the features and hints
268 	 */
269 	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
270 	ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES);
271 	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
272 	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
273 
274 	hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
275 
276 	pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
277 		ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
278 		ms_hyperv.misc_features);
279 
280 	ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
281 	ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
282 
283 	pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
284 		 ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
285 
286 	/*
287 	 * Check CPU management privilege.
288 	 *
289 	 * To mirror what Windows does we should extract CPU management
290 	 * features and use the ReservedIdentityBit to detect if Linux is the
291 	 * root partition. But that requires negotiating CPU management
292 	 * interface (a process to be finalized).
293 	 *
294 	 * For now, use the privilege flag as the indicator for running as
295 	 * root.
296 	 */
297 	if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) {
298 		hv_root_partition = true;
299 		pr_info("Hyper-V: running as root partition\n");
300 	}
301 
302 	/*
303 	 * Extract host information.
304 	 */
305 	if (hv_max_functions_eax >= HYPERV_CPUID_VERSION) {
306 		hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION);
307 		hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION);
308 		hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION);
309 		hv_host_info_edx = cpuid_edx(HYPERV_CPUID_VERSION);
310 
311 		pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d\n",
312 			hv_host_info_eax, hv_host_info_ebx >> 16,
313 			hv_host_info_ebx & 0xFFFF, hv_host_info_ecx,
314 			hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
315 	}
316 
317 	if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
318 	    ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
319 		x86_platform.calibrate_tsc = hv_get_tsc_khz;
320 		x86_platform.calibrate_cpu = hv_get_tsc_khz;
321 	}
322 
323 	if (ms_hyperv.priv_high & HV_ISOLATION) {
324 		ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG);
325 		ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG);
326 		ms_hyperv.shared_gpa_boundary =
327 			BIT_ULL(ms_hyperv.shared_gpa_boundary_bits);
328 
329 		pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
330 			ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
331 
332 		if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP)
333 			static_branch_enable(&isolation_type_snp);
334 	}
335 
336 	if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
337 		ms_hyperv.nested_features =
338 			cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
339 		pr_info("Hyper-V: Nested features: 0x%x\n",
340 			ms_hyperv.nested_features);
341 	}
342 
343 #ifdef CONFIG_X86_LOCAL_APIC
344 	if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
345 	    ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
346 		/*
347 		 * Get the APIC frequency.
348 		 */
349 		u64	hv_lapic_frequency;
350 
351 		rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
352 		hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
353 		lapic_timer_period = hv_lapic_frequency;
354 		pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
355 			lapic_timer_period);
356 	}
357 
358 	register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
359 			     "hv_nmi_unknown");
360 #endif
361 
362 #ifdef CONFIG_X86_IO_APIC
363 	no_timer_check = 1;
364 #endif
365 
366 #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
367 	machine_ops.shutdown = hv_machine_shutdown;
368 	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
369 #endif
370 	if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
371 		/*
372 		 * Writing to synthetic MSR 0x40000118 updates/changes the
373 		 * guest visible CPUIDs. Setting bit 0 of this MSR  enables
374 		 * guests to report invariant TSC feature through CPUID
375 		 * instruction, CPUID 0x800000007/EDX, bit 8. See code in
376 		 * early_init_intel() where this bit is examined. The
377 		 * setting of this MSR bit should happen before init_intel()
378 		 * is called.
379 		 */
380 		wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1);
381 		setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
382 	}
383 
384 	/*
385 	 * Generation 2 instances don't support reading the NMI status from
386 	 * 0x61 port.
387 	 */
388 	if (efi_enabled(EFI_BOOT))
389 		x86_platform.get_nmi_reason = hv_get_nmi_reason;
390 
391 	/*
392 	 * Hyper-V VMs have a PIT emulation quirk such that zeroing the
393 	 * counter register during PIT shutdown restarts the PIT. So it
394 	 * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
395 	 * to false tells pit_shutdown() not to zero the counter so that
396 	 * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
397 	 * and setting this value has no effect.
398 	 */
399 	i8253_clear_counter_on_shutdown = false;
400 
401 #if IS_ENABLED(CONFIG_HYPERV)
402 	/*
403 	 * Setup the hook to get control post apic initialization.
404 	 */
405 	x86_platform.apic_post_init = hyperv_init;
406 	hyperv_setup_mmu_ops();
407 	/* Setup the IDT for hypervisor callback */
408 	alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_hyperv_callback);
409 
410 	/* Setup the IDT for reenlightenment notifications */
411 	if (ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT) {
412 		alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
413 				asm_sysvec_hyperv_reenlightenment);
414 	}
415 
416 	/* Setup the IDT for stimer0 */
417 	if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) {
418 		alloc_intr_gate(HYPERV_STIMER0_VECTOR,
419 				asm_sysvec_hyperv_stimer0);
420 	}
421 
422 # ifdef CONFIG_SMP
423 	smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
424 	if (hv_root_partition)
425 		smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
426 # endif
427 
428 	/*
429 	 * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
430 	 * set x2apic destination mode to physical mode when x2apic is available
431 	 * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
432 	 * have 8-bit APIC id.
433 	 */
434 # ifdef CONFIG_X86_X2APIC
435 	if (x2apic_supported())
436 		x2apic_phys = 1;
437 # endif
438 
439 	/* Register Hyper-V specific clocksource */
440 	hv_init_clocksource();
441 #endif
442 	/*
443 	 * TSC should be marked as unstable only after Hyper-V
444 	 * clocksource has been initialized. This ensures that the
445 	 * stability of the sched_clock is not altered.
446 	 */
447 	if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
448 		mark_tsc_unstable("running on Hyper-V");
449 }
450 
451 static bool __init ms_hyperv_x2apic_available(void)
452 {
453 	return x2apic_supported();
454 }
455 
456 /*
457  * If ms_hyperv_msi_ext_dest_id() returns true, hyperv_prepare_irq_remapping()
458  * returns -ENODEV and the Hyper-V IOMMU driver is not used; instead, the
459  * generic support of the 15-bit APIC ID is used: see __irq_msi_compose_msg().
460  *
461  * Note: for a VM on Hyper-V, the I/O-APIC is the only device which
462  * (logically) generates MSIs directly to the system APIC irq domain.
463  * There is no HPET, and PCI MSI/MSI-X interrupts are remapped by the
464  * pci-hyperv host bridge.
465  */
466 static bool __init ms_hyperv_msi_ext_dest_id(void)
467 {
468 	u32 eax;
469 
470 	eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_INTERFACE);
471 	if (eax != HYPERV_VS_INTERFACE_EAX_SIGNATURE)
472 		return false;
473 
474 	eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_PROPERTIES);
475 	return eax & HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE;
476 }
477 
478 const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
479 	.name			= "Microsoft Hyper-V",
480 	.detect			= ms_hyperv_platform,
481 	.type			= X86_HYPER_MS_HYPERV,
482 	.init.x2apic_available	= ms_hyperv_x2apic_available,
483 	.init.msi_ext_dest_id	= ms_hyperv_msi_ext_dest_id,
484 	.init.init_platform	= ms_hyperv_init_platform,
485 };
486