xref: /openbmc/linux/drivers/hv/hv_common.c (revision 45cc35e8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Architecture neutral utility routines for interacting with
5  * Hyper-V. This file is specifically for code that must be
6  * built-in to the kernel image when CONFIG_HYPERV is set
7  * (vs. being in a module) because it is called from architecture
8  * specific code under arch/.
9  *
10  * Copyright (C) 2021, Microsoft, Inc.
11  *
12  * Author : Michael Kelley <mikelley@microsoft.com>
13  */
14 
15 #include <linux/types.h>
16 #include <linux/acpi.h>
17 #include <linux/export.h>
18 #include <linux/bitfield.h>
19 #include <linux/cpumask.h>
20 #include <linux/panic_notifier.h>
21 #include <linux/ptrace.h>
22 #include <linux/slab.h>
23 #include <linux/dma-map-ops.h>
24 #include <asm/hyperv-tlfs.h>
25 #include <asm/mshyperv.h>
26 
27 /*
28  * hv_root_partition, ms_hyperv and hv_nested are defined here with other
29  * Hyper-V specific globals so they are shared across all architectures and are
30  * built only when CONFIG_HYPERV is defined.  But on x86,
31  * ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not
32  * defined, and it uses these three variables.  So mark them as __weak
33  * here, allowing for an overriding definition in the module containing
34  * ms_hyperv_init_platform().
35  */
36 bool __weak hv_root_partition;
37 EXPORT_SYMBOL_GPL(hv_root_partition);
38 
39 bool __weak hv_nested;
40 EXPORT_SYMBOL_GPL(hv_nested);
41 
42 struct ms_hyperv_info __weak ms_hyperv;
43 EXPORT_SYMBOL_GPL(ms_hyperv);
44 
45 u32 *hv_vp_index;
46 EXPORT_SYMBOL_GPL(hv_vp_index);
47 
48 u32 hv_max_vp_index;
49 EXPORT_SYMBOL_GPL(hv_max_vp_index);
50 
51 void * __percpu *hyperv_pcpu_input_arg;
52 EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
53 
54 void * __percpu *hyperv_pcpu_output_arg;
55 EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
56 
57 /*
58  * Hyper-V specific initialization and shutdown code that is
59  * common across all architectures.  Called from architecture
60  * specific initialization functions.
61  */
62 
63 void __init hv_common_free(void)
64 {
65 	kfree(hv_vp_index);
66 	hv_vp_index = NULL;
67 
68 	free_percpu(hyperv_pcpu_output_arg);
69 	hyperv_pcpu_output_arg = NULL;
70 
71 	free_percpu(hyperv_pcpu_input_arg);
72 	hyperv_pcpu_input_arg = NULL;
73 }
74 
75 int __init hv_common_init(void)
76 {
77 	int i;
78 
79 	/*
80 	 * Hyper-V expects to get crash register data or kmsg when
81 	 * crash enlightment is available and system crashes. Set
82 	 * crash_kexec_post_notifiers to be true to make sure that
83 	 * calling crash enlightment interface before running kdump
84 	 * kernel.
85 	 */
86 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
87 		crash_kexec_post_notifiers = true;
88 		pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n");
89 	}
90 
91 	/*
92 	 * Allocate the per-CPU state for the hypercall input arg.
93 	 * If this allocation fails, we will not be able to setup
94 	 * (per-CPU) hypercall input page and thus this failure is
95 	 * fatal on Hyper-V.
96 	 */
97 	hyperv_pcpu_input_arg = alloc_percpu(void  *);
98 	BUG_ON(!hyperv_pcpu_input_arg);
99 
100 	/* Allocate the per-CPU state for output arg for root */
101 	if (hv_root_partition) {
102 		hyperv_pcpu_output_arg = alloc_percpu(void *);
103 		BUG_ON(!hyperv_pcpu_output_arg);
104 	}
105 
106 	hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
107 				    GFP_KERNEL);
108 	if (!hv_vp_index) {
109 		hv_common_free();
110 		return -ENOMEM;
111 	}
112 
113 	for (i = 0; i < num_possible_cpus(); i++)
114 		hv_vp_index[i] = VP_INVAL;
115 
116 	return 0;
117 }
118 
119 /*
120  * Hyper-V specific initialization and die code for
121  * individual CPUs that is common across all architectures.
122  * Called by the CPU hotplug mechanism.
123  */
124 
125 int hv_common_cpu_init(unsigned int cpu)
126 {
127 	void **inputarg, **outputarg;
128 	u64 msr_vp_index;
129 	gfp_t flags;
130 	int pgcount = hv_root_partition ? 2 : 1;
131 
132 	/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
133 	flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
134 
135 	inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
136 	*inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
137 	if (!(*inputarg))
138 		return -ENOMEM;
139 
140 	if (hv_root_partition) {
141 		outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
142 		*outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE;
143 	}
144 
145 	msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
146 
147 	hv_vp_index[cpu] = msr_vp_index;
148 
149 	if (msr_vp_index > hv_max_vp_index)
150 		hv_max_vp_index = msr_vp_index;
151 
152 	return 0;
153 }
154 
155 int hv_common_cpu_die(unsigned int cpu)
156 {
157 	unsigned long flags;
158 	void **inputarg, **outputarg;
159 	void *mem;
160 
161 	local_irq_save(flags);
162 
163 	inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
164 	mem = *inputarg;
165 	*inputarg = NULL;
166 
167 	if (hv_root_partition) {
168 		outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
169 		*outputarg = NULL;
170 	}
171 
172 	local_irq_restore(flags);
173 
174 	kfree(mem);
175 
176 	return 0;
177 }
178 
179 /* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
180 bool hv_query_ext_cap(u64 cap_query)
181 {
182 	/*
183 	 * The address of the 'hv_extended_cap' variable will be used as an
184 	 * output parameter to the hypercall below and so it should be
185 	 * compatible with 'virt_to_phys'. Which means, it's address should be
186 	 * directly mapped. Use 'static' to keep it compatible; stack variables
187 	 * can be virtually mapped, making them incompatible with
188 	 * 'virt_to_phys'.
189 	 * Hypercall input/output addresses should also be 8-byte aligned.
190 	 */
191 	static u64 hv_extended_cap __aligned(8);
192 	static bool hv_extended_cap_queried;
193 	u64 status;
194 
195 	/*
196 	 * Querying extended capabilities is an extended hypercall. Check if the
197 	 * partition supports extended hypercall, first.
198 	 */
199 	if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS))
200 		return false;
201 
202 	/* Extended capabilities do not change at runtime. */
203 	if (hv_extended_cap_queried)
204 		return hv_extended_cap & cap_query;
205 
206 	status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL,
207 				 &hv_extended_cap);
208 
209 	/*
210 	 * The query extended capabilities hypercall should not fail under
211 	 * any normal circumstances. Avoid repeatedly making the hypercall, on
212 	 * error.
213 	 */
214 	hv_extended_cap_queried = true;
215 	if (!hv_result_success(status)) {
216 		pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n",
217 		       status);
218 		return false;
219 	}
220 
221 	return hv_extended_cap & cap_query;
222 }
223 EXPORT_SYMBOL_GPL(hv_query_ext_cap);
224 
225 void hv_setup_dma_ops(struct device *dev, bool coherent)
226 {
227 	/*
228 	 * Hyper-V does not offer a vIOMMU in the guest
229 	 * VM, so pass 0/NULL for the IOMMU settings
230 	 */
231 	arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
232 }
233 EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
234 
235 bool hv_is_hibernation_supported(void)
236 {
237 	return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
238 }
239 EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
240 
241 /*
242  * Default function to read the Hyper-V reference counter, independent
243  * of whether Hyper-V enlightened clocks/timers are being used. But on
244  * architectures where it is used, Hyper-V enlightenment code in
245  * hyperv_timer.c may override this function.
246  */
247 static u64 __hv_read_ref_counter(void)
248 {
249 	return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
250 }
251 
252 u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter;
253 EXPORT_SYMBOL_GPL(hv_read_reference_counter);
254 
255 /* These __weak functions provide default "no-op" behavior and
256  * may be overridden by architecture specific versions. Architectures
257  * for which the default "no-op" behavior is sufficient can leave
258  * them unimplemented and not be cluttered with a bunch of stub
259  * functions in arch-specific code.
260  */
261 
262 bool __weak hv_is_isolation_supported(void)
263 {
264 	return false;
265 }
266 EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
267 
268 bool __weak hv_isolation_type_snp(void)
269 {
270 	return false;
271 }
272 EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
273 
274 void __weak hv_setup_vmbus_handler(void (*handler)(void))
275 {
276 }
277 EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler);
278 
279 void __weak hv_remove_vmbus_handler(void)
280 {
281 }
282 EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
283 
284 void __weak hv_setup_kexec_handler(void (*handler)(void))
285 {
286 }
287 EXPORT_SYMBOL_GPL(hv_setup_kexec_handler);
288 
289 void __weak hv_remove_kexec_handler(void)
290 {
291 }
292 EXPORT_SYMBOL_GPL(hv_remove_kexec_handler);
293 
294 void __weak hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
295 {
296 }
297 EXPORT_SYMBOL_GPL(hv_setup_crash_handler);
298 
299 void __weak hv_remove_crash_handler(void)
300 {
301 }
302 EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
303 
304 void __weak hyperv_cleanup(void)
305 {
306 }
307 EXPORT_SYMBOL_GPL(hyperv_cleanup);
308 
309 u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
310 {
311 	return HV_STATUS_INVALID_PARAMETER;
312 }
313 EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
314 
315 void __weak *hv_map_memory(void *addr, unsigned long size)
316 {
317 	return NULL;
318 }
319 EXPORT_SYMBOL_GPL(hv_map_memory);
320 
321 void __weak hv_unmap_memory(void *addr)
322 {
323 }
324 EXPORT_SYMBOL_GPL(hv_unmap_memory);
325