1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /*
4  * Linux-specific definitions for managing interactions with Microsoft's
5  * Hyper-V hypervisor. The definitions in this file are architecture
6  * independent. See arch/<arch>/include/asm/mshyperv.h for definitions
7  * that are specific to architecture <arch>.
8  *
9  * Definitions that are specified in the Hyper-V Top Level Functional
10  * Spec (TLFS) should not go in this file, but should instead go in
11  * hyperv-tlfs.h.
12  *
13  * Copyright (C) 2019, Microsoft, Inc.
14  *
15  * Author : Michael Kelley <mikelley@microsoft.com>
16  */
17 
18 #ifndef _ASM_GENERIC_MSHYPERV_H
19 #define _ASM_GENERIC_MSHYPERV_H
20 
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/bitops.h>
24 #include <linux/cpumask.h>
25 #include <linux/nmi.h>
26 #include <asm/ptrace.h>
27 #include <asm/hyperv-tlfs.h>
28 
29 #define VTPM_BASE_ADDRESS 0xfed40000
30 
31 struct ms_hyperv_info {
32 	u32 features;
33 	u32 priv_high;
34 	u32 misc_features;
35 	u32 hints;
36 	u32 nested_features;
37 	u32 max_vp_index;
38 	u32 max_lp_index;
39 	union {
40 		u32 isolation_config_a;
41 		struct {
42 			u32 paravisor_present : 1;
43 			u32 reserved_a1 : 31;
44 		};
45 	};
46 	union {
47 		u32 isolation_config_b;
48 		struct {
49 			u32 cvm_type : 4;
50 			u32 reserved_b1 : 1;
51 			u32 shared_gpa_boundary_active : 1;
52 			u32 shared_gpa_boundary_bits : 6;
53 			u32 reserved_b2 : 20;
54 		};
55 	};
56 	u64 shared_gpa_boundary;
57 };
58 extern struct ms_hyperv_info ms_hyperv;
59 extern bool hv_nested;
60 
61 extern void * __percpu *hyperv_pcpu_input_arg;
62 extern void * __percpu *hyperv_pcpu_output_arg;
63 
64 extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
65 extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
66 extern bool hv_isolation_type_snp(void);
67 extern bool hv_isolation_type_en_snp(void);
68 
69 /* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
70 static inline int hv_result(u64 status)
71 {
72 	return status & HV_HYPERCALL_RESULT_MASK;
73 }
74 
75 static inline bool hv_result_success(u64 status)
76 {
77 	return hv_result(status) == HV_STATUS_SUCCESS;
78 }
79 
80 static inline unsigned int hv_repcomp(u64 status)
81 {
82 	/* Bits [43:32] of status have 'Reps completed' data. */
83 	return (status & HV_HYPERCALL_REP_COMP_MASK) >>
84 			 HV_HYPERCALL_REP_COMP_OFFSET;
85 }
86 
87 /*
88  * Rep hypercalls. Callers of this functions are supposed to ensure that
89  * rep_count and varhead_size comply with Hyper-V hypercall definition.
90  */
91 static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
92 				      void *input, void *output)
93 {
94 	u64 control = code;
95 	u64 status;
96 	u16 rep_comp;
97 
98 	control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
99 	control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
100 
101 	do {
102 		status = hv_do_hypercall(control, input, output);
103 		if (!hv_result_success(status))
104 			return status;
105 
106 		rep_comp = hv_repcomp(status);
107 
108 		control &= ~HV_HYPERCALL_REP_START_MASK;
109 		control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
110 
111 		touch_nmi_watchdog();
112 	} while (rep_comp < rep_count);
113 
114 	return status;
115 }
116 
117 /* Generate the guest OS identifier as described in the Hyper-V TLFS */
118 static inline u64 hv_generate_guest_id(u64 kernel_version)
119 {
120 	u64 guest_id;
121 
122 	guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
123 	guest_id |= (kernel_version << 16);
124 
125 	return guest_id;
126 }
127 
128 /* Free the message slot and signal end-of-message if required */
129 static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
130 {
131 	/*
132 	 * On crash we're reading some other CPU's message page and we need
133 	 * to be careful: this other CPU may already had cleared the header
134 	 * and the host may already had delivered some other message there.
135 	 * In case we blindly write msg->header.message_type we're going
136 	 * to lose it. We can still lose a message of the same type but
137 	 * we count on the fact that there can only be one
138 	 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
139 	 * on crash.
140 	 */
141 	if (cmpxchg(&msg->header.message_type, old_msg_type,
142 		    HVMSG_NONE) != old_msg_type)
143 		return;
144 
145 	/*
146 	 * The cmxchg() above does an implicit memory barrier to
147 	 * ensure the write to MessageType (ie set to
148 	 * HVMSG_NONE) happens before we read the
149 	 * MessagePending and EOMing. Otherwise, the EOMing
150 	 * will not deliver any more messages since there is
151 	 * no empty slot
152 	 */
153 	if (msg->header.message_flags.msg_pending) {
154 		/*
155 		 * This will cause message queue rescan to
156 		 * possibly deliver another msg from the
157 		 * hypervisor
158 		 */
159 		hv_set_register(HV_REGISTER_EOM, 0);
160 	}
161 }
162 
163 void hv_setup_vmbus_handler(void (*handler)(void));
164 void hv_remove_vmbus_handler(void);
165 void hv_setup_stimer0_handler(void (*handler)(void));
166 void hv_remove_stimer0_handler(void);
167 
168 void hv_setup_kexec_handler(void (*handler)(void));
169 void hv_remove_kexec_handler(void);
170 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
171 void hv_remove_crash_handler(void);
172 
173 extern int vmbus_interrupt;
174 extern int vmbus_irq;
175 
176 extern bool hv_root_partition;
177 
178 #if IS_ENABLED(CONFIG_HYPERV)
179 /*
180  * Hypervisor's notion of virtual processor ID is different from
181  * Linux' notion of CPU ID. This information can only be retrieved
182  * in the context of the calling CPU. Setup a map for easy access
183  * to this information.
184  */
185 extern u32 *hv_vp_index;
186 extern u32 hv_max_vp_index;
187 
188 extern u64 (*hv_read_reference_counter)(void);
189 
190 /* Sentinel value for an uninitialized entry in hv_vp_index array */
191 #define VP_INVAL	U32_MAX
192 
193 int __init hv_common_init(void);
194 void __init hv_common_free(void);
195 int hv_common_cpu_init(unsigned int cpu);
196 int hv_common_cpu_die(unsigned int cpu);
197 
198 void *hv_alloc_hyperv_page(void);
199 void *hv_alloc_hyperv_zeroed_page(void);
200 void hv_free_hyperv_page(unsigned long addr);
201 
202 /**
203  * hv_cpu_number_to_vp_number() - Map CPU to VP.
204  * @cpu_number: CPU number in Linux terms
205  *
206  * This function returns the mapping between the Linux processor
207  * number and the hypervisor's virtual processor number, useful
208  * in making hypercalls and such that talk about specific
209  * processors.
210  *
211  * Return: Virtual processor number in Hyper-V terms
212  */
213 static inline int hv_cpu_number_to_vp_number(int cpu_number)
214 {
215 	return hv_vp_index[cpu_number];
216 }
217 
218 static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
219 				    const struct cpumask *cpus,
220 				    bool (*func)(int cpu))
221 {
222 	int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
223 	int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK;
224 
225 	/* vpset.valid_bank_mask can represent up to HV_MAX_SPARSE_VCPU_BANKS banks */
226 	if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS)
227 		return 0;
228 
229 	/*
230 	 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
231 	 * structs are not cleared between calls, we risk flushing unneeded
232 	 * vCPUs otherwise.
233 	 */
234 	for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++)
235 		vpset->bank_contents[vcpu_bank] = 0;
236 
237 	/*
238 	 * Some banks may end up being empty but this is acceptable.
239 	 */
240 	for_each_cpu(cpu, cpus) {
241 		if (func && func(cpu))
242 			continue;
243 		vcpu = hv_cpu_number_to_vp_number(cpu);
244 		if (vcpu == VP_INVAL)
245 			return -1;
246 		vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK;
247 		vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK;
248 		__set_bit(vcpu_offset, (unsigned long *)
249 			  &vpset->bank_contents[vcpu_bank]);
250 		if (vcpu_bank >= nr_bank)
251 			nr_bank = vcpu_bank + 1;
252 	}
253 	vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
254 	return nr_bank;
255 }
256 
257 /*
258  * Convert a Linux cpumask into a Hyper-V VPset. In the _skip variant,
259  * 'func' is called for each CPU present in cpumask.  If 'func' returns
260  * true, that CPU is skipped -- i.e., that CPU from cpumask is *not*
261  * added to the Hyper-V VPset. If 'func' is NULL, no CPUs are
262  * skipped.
263  */
264 static inline int cpumask_to_vpset(struct hv_vpset *vpset,
265 				    const struct cpumask *cpus)
266 {
267 	return __cpumask_to_vpset(vpset, cpus, NULL);
268 }
269 
270 static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset,
271 				    const struct cpumask *cpus,
272 				    bool (*func)(int cpu))
273 {
274 	return __cpumask_to_vpset(vpset, cpus, func);
275 }
276 
277 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
278 bool hv_is_hyperv_initialized(void);
279 bool hv_is_hibernation_supported(void);
280 enum hv_isolation_type hv_get_isolation_type(void);
281 bool hv_is_isolation_supported(void);
282 bool hv_isolation_type_snp(void);
283 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
284 void hyperv_cleanup(void);
285 bool hv_query_ext_cap(u64 cap_query);
286 void hv_setup_dma_ops(struct device *dev, bool coherent);
287 #else /* CONFIG_HYPERV */
288 static inline bool hv_is_hyperv_initialized(void) { return false; }
289 static inline bool hv_is_hibernation_supported(void) { return false; }
290 static inline void hyperv_cleanup(void) {}
291 static inline bool hv_is_isolation_supported(void) { return false; }
292 static inline enum hv_isolation_type hv_get_isolation_type(void)
293 {
294 	return HV_ISOLATION_TYPE_NONE;
295 }
296 #endif /* CONFIG_HYPERV */
297 
298 #endif
299