xref: /openbmc/linux/arch/x86/include/asm/mshyperv.h (revision b9d8cf2eb3ceecdee3434b87763492aee9e28845)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2e08cae41SH. Peter Anvin #ifndef _ASM_X86_MSHYPER_H
3e08cae41SH. Peter Anvin #define _ASM_X86_MSHYPER_H
4a2a47c6cSKy Srinivasan 
5e08cae41SH. Peter Anvin #include <linux/types.h>
6806c8927SVitaly Kuznetsov #include <linux/nmi.h>
71cf106d9SBoqun Feng #include <linux/msi.h>
8fc53662fSVitaly Kuznetsov #include <asm/io.h>
95a485803SVitaly Kuznetsov #include <asm/hyperv-tlfs.h>
10e70e5892SDavid Woodhouse #include <asm/nospec-branch.h>
11*b9d8cf2eSMichael Kelley #include <asm/paravirt.h>
12e08cae41SH. Peter Anvin 
13cc4edae4SLan Tianyu typedef int (*hyperv_fill_flush_list_func)(
14cc4edae4SLan Tianyu 		struct hv_guest_mapping_flush_list *flush,
15cc4edae4SLan Tianyu 		void *data);
16cc4edae4SLan Tianyu 
17619a4c8bSMichael Kelley #define hv_init_timer(timer, tick) \
18619a4c8bSMichael Kelley 	wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick)
19619a4c8bSMichael Kelley #define hv_init_timer_config(timer, val) \
20619a4c8bSMichael Kelley 	wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val)
21d5116b40SK. Y. Srinivasan 
22155e4a2fSK. Y. Srinivasan #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
23155e4a2fSK. Y. Srinivasan #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
24155e4a2fSK. Y. Srinivasan 
258e307bf8SK. Y. Srinivasan #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
268e307bf8SK. Y. Srinivasan #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
278e307bf8SK. Y. Srinivasan 
2806d1d98aSK. Y. Srinivasan #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
2906d1d98aSK. Y. Srinivasan #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
3006d1d98aSK. Y. Srinivasan 
317297ff0cSK. Y. Srinivasan #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
327297ff0cSK. Y. Srinivasan 
33765e33f5SMichael Kelley #define hv_signal_eom() wrmsrl(HV_X64_MSR_EOM, 0)
34765e33f5SMichael Kelley 
35619a4c8bSMichael Kelley #define hv_get_synint_state(int_num, val) \
36619a4c8bSMichael Kelley 	rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
37619a4c8bSMichael Kelley #define hv_set_synint_state(int_num, val) \
38619a4c8bSMichael Kelley 	wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
392ddddd0bSMichael Kelley #define hv_recommend_using_aeoi() \
402ddddd0bSMichael Kelley 	(!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
4137e11d5cSK. Y. Srinivasan 
429d9c9656SSunil Muthuswamy #define hv_get_crash_ctl(val) \
439d9c9656SSunil Muthuswamy 	rdmsrl(HV_X64_MSR_CRASH_CTL, val)
44cf910e83SSeiji Aguchi 
45dd2cb348SMichael Kelley #define hv_get_time_ref_count(val) \
46dd2cb348SMichael Kelley 	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val)
47dd2cb348SMichael Kelley 
48dd2cb348SMichael Kelley #define hv_get_reference_tsc(val) \
49dd2cb348SMichael Kelley 	rdmsrl(HV_X64_MSR_REFERENCE_TSC, val)
50dd2cb348SMichael Kelley #define hv_set_reference_tsc(val) \
51dd2cb348SMichael Kelley 	wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
52dd2cb348SMichael Kelley #define hv_set_clocksource_vdso(val) \
53b95a8a27SThomas Gleixner 	((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
54eec399ddSThomas Gleixner #define hv_enable_vdso_clocksource() \
55b95a8a27SThomas Gleixner 	vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
56dd2cb348SMichael Kelley #define hv_get_raw_timer() rdtsc_ordered()
57dd2cb348SMichael Kelley 
58*b9d8cf2eSMichael Kelley /*
59*b9d8cf2eSMichael Kelley  * Reference to pv_ops must be inline so objtool
60*b9d8cf2eSMichael Kelley  * detection of noinstr violations can work correctly.
61*b9d8cf2eSMichael Kelley  */
62*b9d8cf2eSMichael Kelley static __always_inline void hv_setup_sched_clock(void *sched_clock)
63*b9d8cf2eSMichael Kelley {
64*b9d8cf2eSMichael Kelley #ifdef CONFIG_PARAVIRT
65*b9d8cf2eSMichael Kelley 	pv_ops.time.sched_clock = sched_clock;
66*b9d8cf2eSMichael Kelley #endif
67*b9d8cf2eSMichael Kelley }
68*b9d8cf2eSMichael Kelley 
69bc2b0331SK. Y. Srinivasan void hyperv_vector_handler(struct pt_regs *regs);
708730046cSK. Y. Srinivasan 
71248e742aSMichael Kelley static inline void hv_enable_stimer0_percpu_irq(int irq) {}
72248e742aSMichael Kelley static inline void hv_disable_stimer0_percpu_irq(int irq) {}
73248e742aSMichael Kelley 
74248e742aSMichael Kelley 
758730046cSK. Y. Srinivasan #if IS_ENABLED(CONFIG_HYPERV)
76fc53662fSVitaly Kuznetsov extern void *hv_hypercall_pg;
7768bb7bfbSK. Y. Srinivasan extern void  __percpu  **hyperv_pcpu_input_arg;
78fc53662fSVitaly Kuznetsov 
79fc53662fSVitaly Kuznetsov static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
80fc53662fSVitaly Kuznetsov {
81fc53662fSVitaly Kuznetsov 	u64 input_address = input ? virt_to_phys(input) : 0;
82fc53662fSVitaly Kuznetsov 	u64 output_address = output ? virt_to_phys(output) : 0;
83fc53662fSVitaly Kuznetsov 	u64 hv_status;
84fc53662fSVitaly Kuznetsov 
85fc53662fSVitaly Kuznetsov #ifdef CONFIG_X86_64
86fc53662fSVitaly Kuznetsov 	if (!hv_hypercall_pg)
87fc53662fSVitaly Kuznetsov 		return U64_MAX;
88fc53662fSVitaly Kuznetsov 
89fc53662fSVitaly Kuznetsov 	__asm__ __volatile__("mov %4, %%r8\n"
90e70e5892SDavid Woodhouse 			     CALL_NOSPEC
91f5caf621SJosh Poimboeuf 			     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
92fc53662fSVitaly Kuznetsov 			       "+c" (control), "+d" (input_address)
93e70e5892SDavid Woodhouse 			     :  "r" (output_address),
94e70e5892SDavid Woodhouse 				THUNK_TARGET(hv_hypercall_pg)
95fc53662fSVitaly Kuznetsov 			     : "cc", "memory", "r8", "r9", "r10", "r11");
96fc53662fSVitaly Kuznetsov #else
97fc53662fSVitaly Kuznetsov 	u32 input_address_hi = upper_32_bits(input_address);
98fc53662fSVitaly Kuznetsov 	u32 input_address_lo = lower_32_bits(input_address);
99fc53662fSVitaly Kuznetsov 	u32 output_address_hi = upper_32_bits(output_address);
100fc53662fSVitaly Kuznetsov 	u32 output_address_lo = lower_32_bits(output_address);
101fc53662fSVitaly Kuznetsov 
102fc53662fSVitaly Kuznetsov 	if (!hv_hypercall_pg)
103fc53662fSVitaly Kuznetsov 		return U64_MAX;
104fc53662fSVitaly Kuznetsov 
105e70e5892SDavid Woodhouse 	__asm__ __volatile__(CALL_NOSPEC
106fc53662fSVitaly Kuznetsov 			     : "=A" (hv_status),
107f5caf621SJosh Poimboeuf 			       "+c" (input_address_lo), ASM_CALL_CONSTRAINT
108fc53662fSVitaly Kuznetsov 			     : "A" (control),
109fc53662fSVitaly Kuznetsov 			       "b" (input_address_hi),
110fc53662fSVitaly Kuznetsov 			       "D"(output_address_hi), "S"(output_address_lo),
111e70e5892SDavid Woodhouse 			       THUNK_TARGET(hv_hypercall_pg)
112fc53662fSVitaly Kuznetsov 			     : "cc", "memory");
113fc53662fSVitaly Kuznetsov #endif /* !x86_64 */
114fc53662fSVitaly Kuznetsov 	return hv_status;
115fc53662fSVitaly Kuznetsov }
116dee863b5SVitaly Kuznetsov 
1176a8edbd0SVitaly Kuznetsov /* Fast hypercall with 8 bytes of input and no output */
1186a8edbd0SVitaly Kuznetsov static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
1196a8edbd0SVitaly Kuznetsov {
1206a8edbd0SVitaly Kuznetsov 	u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
1216a8edbd0SVitaly Kuznetsov 
1226a8edbd0SVitaly Kuznetsov #ifdef CONFIG_X86_64
1236a8edbd0SVitaly Kuznetsov 	{
124e70e5892SDavid Woodhouse 		__asm__ __volatile__(CALL_NOSPEC
125f5caf621SJosh Poimboeuf 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
1266a8edbd0SVitaly Kuznetsov 				       "+c" (control), "+d" (input1)
127e70e5892SDavid Woodhouse 				     : THUNK_TARGET(hv_hypercall_pg)
1286a8edbd0SVitaly Kuznetsov 				     : "cc", "r8", "r9", "r10", "r11");
1296a8edbd0SVitaly Kuznetsov 	}
1306a8edbd0SVitaly Kuznetsov #else
1316a8edbd0SVitaly Kuznetsov 	{
1326a8edbd0SVitaly Kuznetsov 		u32 input1_hi = upper_32_bits(input1);
1336a8edbd0SVitaly Kuznetsov 		u32 input1_lo = lower_32_bits(input1);
1346a8edbd0SVitaly Kuznetsov 
135e70e5892SDavid Woodhouse 		__asm__ __volatile__ (CALL_NOSPEC
1366a8edbd0SVitaly Kuznetsov 				      : "=A"(hv_status),
1376a8edbd0SVitaly Kuznetsov 					"+c"(input1_lo),
138f5caf621SJosh Poimboeuf 					ASM_CALL_CONSTRAINT
1396a8edbd0SVitaly Kuznetsov 				      :	"A" (control),
1406a8edbd0SVitaly Kuznetsov 					"b" (input1_hi),
141e70e5892SDavid Woodhouse 					THUNK_TARGET(hv_hypercall_pg)
1426a8edbd0SVitaly Kuznetsov 				      : "cc", "edi", "esi");
1436a8edbd0SVitaly Kuznetsov 	}
1446a8edbd0SVitaly Kuznetsov #endif
1456a8edbd0SVitaly Kuznetsov 		return hv_status;
1466a8edbd0SVitaly Kuznetsov }
1476a8edbd0SVitaly Kuznetsov 
14853e52966SVitaly Kuznetsov /* Fast hypercall with 16 bytes of input */
14953e52966SVitaly Kuznetsov static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
15053e52966SVitaly Kuznetsov {
15153e52966SVitaly Kuznetsov 	u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
15253e52966SVitaly Kuznetsov 
15353e52966SVitaly Kuznetsov #ifdef CONFIG_X86_64
15453e52966SVitaly Kuznetsov 	{
15553e52966SVitaly Kuznetsov 		__asm__ __volatile__("mov %4, %%r8\n"
15653e52966SVitaly Kuznetsov 				     CALL_NOSPEC
15753e52966SVitaly Kuznetsov 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
15853e52966SVitaly Kuznetsov 				       "+c" (control), "+d" (input1)
15953e52966SVitaly Kuznetsov 				     : "r" (input2),
16053e52966SVitaly Kuznetsov 				       THUNK_TARGET(hv_hypercall_pg)
16153e52966SVitaly Kuznetsov 				     : "cc", "r8", "r9", "r10", "r11");
16253e52966SVitaly Kuznetsov 	}
16353e52966SVitaly Kuznetsov #else
16453e52966SVitaly Kuznetsov 	{
16553e52966SVitaly Kuznetsov 		u32 input1_hi = upper_32_bits(input1);
16653e52966SVitaly Kuznetsov 		u32 input1_lo = lower_32_bits(input1);
16753e52966SVitaly Kuznetsov 		u32 input2_hi = upper_32_bits(input2);
16853e52966SVitaly Kuznetsov 		u32 input2_lo = lower_32_bits(input2);
16953e52966SVitaly Kuznetsov 
17053e52966SVitaly Kuznetsov 		__asm__ __volatile__ (CALL_NOSPEC
17153e52966SVitaly Kuznetsov 				      : "=A"(hv_status),
17253e52966SVitaly Kuznetsov 					"+c"(input1_lo), ASM_CALL_CONSTRAINT
17353e52966SVitaly Kuznetsov 				      :	"A" (control), "b" (input1_hi),
17453e52966SVitaly Kuznetsov 					"D"(input2_hi), "S"(input2_lo),
17553e52966SVitaly Kuznetsov 					THUNK_TARGET(hv_hypercall_pg)
17653e52966SVitaly Kuznetsov 				      : "cc");
17753e52966SVitaly Kuznetsov 	}
17853e52966SVitaly Kuznetsov #endif
17953e52966SVitaly Kuznetsov 	return hv_status;
18053e52966SVitaly Kuznetsov }
18153e52966SVitaly Kuznetsov 
182806c8927SVitaly Kuznetsov /*
183806c8927SVitaly Kuznetsov  * Rep hypercalls. Callers of this functions are supposed to ensure that
184806c8927SVitaly Kuznetsov  * rep_count and varhead_size comply with Hyper-V hypercall definition.
185806c8927SVitaly Kuznetsov  */
186806c8927SVitaly Kuznetsov static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
187806c8927SVitaly Kuznetsov 				      void *input, void *output)
188806c8927SVitaly Kuznetsov {
189806c8927SVitaly Kuznetsov 	u64 control = code;
190806c8927SVitaly Kuznetsov 	u64 status;
191806c8927SVitaly Kuznetsov 	u16 rep_comp;
192806c8927SVitaly Kuznetsov 
193806c8927SVitaly Kuznetsov 	control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
194806c8927SVitaly Kuznetsov 	control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
195806c8927SVitaly Kuznetsov 
196806c8927SVitaly Kuznetsov 	do {
197806c8927SVitaly Kuznetsov 		status = hv_do_hypercall(control, input, output);
198806c8927SVitaly Kuznetsov 		if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
199806c8927SVitaly Kuznetsov 			return status;
200806c8927SVitaly Kuznetsov 
201806c8927SVitaly Kuznetsov 		/* Bits 32-43 of status have 'Reps completed' data. */
202806c8927SVitaly Kuznetsov 		rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
203806c8927SVitaly Kuznetsov 			HV_HYPERCALL_REP_COMP_OFFSET;
204806c8927SVitaly Kuznetsov 
205806c8927SVitaly Kuznetsov 		control &= ~HV_HYPERCALL_REP_START_MASK;
206806c8927SVitaly Kuznetsov 		control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
207806c8927SVitaly Kuznetsov 
208806c8927SVitaly Kuznetsov 		touch_nmi_watchdog();
209806c8927SVitaly Kuznetsov 	} while (rep_comp < rep_count);
210806c8927SVitaly Kuznetsov 
211806c8927SVitaly Kuznetsov 	return status;
212806c8927SVitaly Kuznetsov }
213806c8927SVitaly Kuznetsov 
214a46d15ccSVitaly Kuznetsov extern struct hv_vp_assist_page **hv_vp_assist_page;
215a46d15ccSVitaly Kuznetsov 
216a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
217a46d15ccSVitaly Kuznetsov {
218a46d15ccSVitaly Kuznetsov 	if (!hv_vp_assist_page)
219a46d15ccSVitaly Kuznetsov 		return NULL;
220a46d15ccSVitaly Kuznetsov 
221a46d15ccSVitaly Kuznetsov 	return hv_vp_assist_page[cpu];
222a46d15ccSVitaly Kuznetsov }
2237415aea6SVitaly Kuznetsov 
2246b48cb5fSK. Y. Srinivasan void __init hyperv_init(void);
2252ffd9e33SVitaly Kuznetsov void hyperv_setup_mmu_ops(void);
2268c3e44bdSMaya Nakamura void *hv_alloc_hyperv_page(void);
227fa36dcdfSHimadri Pandya void *hv_alloc_hyperv_zeroed_page(void);
2288c3e44bdSMaya Nakamura void hv_free_hyperv_page(unsigned long addr);
22993286261SVitaly Kuznetsov void set_hv_tscchange_cb(void (*cb)(void));
23093286261SVitaly Kuznetsov void clear_hv_tscchange_cb(void);
23193286261SVitaly Kuznetsov void hyperv_stop_tsc_emulation(void);
232eb914cfeSTianyu Lan int hyperv_flush_guest_mapping(u64 as);
233cc4edae4SLan Tianyu int hyperv_flush_guest_mapping_range(u64 as,
234cc4edae4SLan Tianyu 		hyperv_fill_flush_list_func fill_func, void *data);
235cc4edae4SLan Tianyu int hyperv_fill_flush_guest_mapping_list(
236cc4edae4SLan Tianyu 		struct hv_guest_mapping_flush_list *flush,
237cc4edae4SLan Tianyu 		u64 start_gfn, u64 end_gfn);
2382d2ccf24SThomas Gleixner 
2392d2ccf24SThomas Gleixner #ifdef CONFIG_X86_64
2406b48cb5fSK. Y. Srinivasan void hv_apic_init(void);
2413a025de6SYi Sun void __init hv_init_spinlocks(void);
2423a025de6SYi Sun bool hv_vcpu_is_preempted(int vcpu);
2432d2ccf24SThomas Gleixner #else
2442d2ccf24SThomas Gleixner static inline void hv_apic_init(void) {}
2452d2ccf24SThomas Gleixner #endif
2462d2ccf24SThomas Gleixner 
2471cf106d9SBoqun Feng static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
2481cf106d9SBoqun Feng 					      struct msi_desc *msi_desc)
2491cf106d9SBoqun Feng {
2501cf106d9SBoqun Feng 	msi_entry->address = msi_desc->msg.address_lo;
2511cf106d9SBoqun Feng 	msi_entry->data = msi_desc->msg.data;
2521cf106d9SBoqun Feng }
2531cf106d9SBoqun Feng 
25479cadff2SVitaly Kuznetsov #else /* CONFIG_HYPERV */
25579cadff2SVitaly Kuznetsov static inline void hyperv_init(void) {}
2562ffd9e33SVitaly Kuznetsov static inline void hyperv_setup_mmu_ops(void) {}
2578c3e44bdSMaya Nakamura static inline void *hv_alloc_hyperv_page(void) { return NULL; }
2588c3e44bdSMaya Nakamura static inline void hv_free_hyperv_page(unsigned long addr) {}
25993286261SVitaly Kuznetsov static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
26093286261SVitaly Kuznetsov static inline void clear_hv_tscchange_cb(void) {}
26193286261SVitaly Kuznetsov static inline void hyperv_stop_tsc_emulation(void) {};
262a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
263a46d15ccSVitaly Kuznetsov {
264a46d15ccSVitaly Kuznetsov 	return NULL;
265a46d15ccSVitaly Kuznetsov }
266eb914cfeSTianyu Lan static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
267cc4edae4SLan Tianyu static inline int hyperv_flush_guest_mapping_range(u64 as,
268cc4edae4SLan Tianyu 		hyperv_fill_flush_list_func fill_func, void *data)
269cc4edae4SLan Tianyu {
270cc4edae4SLan Tianyu 	return -1;
271cc4edae4SLan Tianyu }
27279cadff2SVitaly Kuznetsov #endif /* CONFIG_HYPERV */
27379cadff2SVitaly Kuznetsov 
274765e33f5SMichael Kelley 
275765e33f5SMichael Kelley #include <asm-generic/mshyperv.h>
276765e33f5SMichael Kelley 
277a2a47c6cSKy Srinivasan #endif
278