1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2e08cae41SH. Peter Anvin #ifndef _ASM_X86_MSHYPER_H 3e08cae41SH. Peter Anvin #define _ASM_X86_MSHYPER_H 4a2a47c6cSKy Srinivasan 5e08cae41SH. Peter Anvin #include <linux/types.h> 6806c8927SVitaly Kuznetsov #include <linux/nmi.h> 71cf106d9SBoqun Feng #include <linux/msi.h> 8fc53662fSVitaly Kuznetsov #include <asm/io.h> 95a485803SVitaly Kuznetsov #include <asm/hyperv-tlfs.h> 10e70e5892SDavid Woodhouse #include <asm/nospec-branch.h> 11b9d8cf2eSMichael Kelley #include <asm/paravirt.h> 12e08cae41SH. Peter Anvin 13cc4edae4SLan Tianyu typedef int (*hyperv_fill_flush_list_func)( 14cc4edae4SLan Tianyu struct hv_guest_mapping_flush_list *flush, 15cc4edae4SLan Tianyu void *data); 16cc4edae4SLan Tianyu 17619a4c8bSMichael Kelley #define hv_init_timer(timer, tick) \ 18619a4c8bSMichael Kelley wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick) 19619a4c8bSMichael Kelley #define hv_init_timer_config(timer, val) \ 20619a4c8bSMichael Kelley wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val) 21d5116b40SK. Y. Srinivasan 22155e4a2fSK. Y. Srinivasan #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val) 23155e4a2fSK. Y. Srinivasan #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val) 24155e4a2fSK. Y. Srinivasan 258e307bf8SK. Y. Srinivasan #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val) 268e307bf8SK. Y. Srinivasan #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val) 278e307bf8SK. Y. Srinivasan 2806d1d98aSK. Y. Srinivasan #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val) 2906d1d98aSK. Y. Srinivasan #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val) 3006d1d98aSK. Y. Srinivasan 317297ff0cSK. Y. Srinivasan #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index) 327297ff0cSK. Y. Srinivasan 33765e33f5SMichael Kelley #define hv_signal_eom() wrmsrl(HV_X64_MSR_EOM, 0) 34765e33f5SMichael Kelley 35619a4c8bSMichael Kelley #define hv_get_synint_state(int_num, val) \ 36619a4c8bSMichael Kelley rdmsrl(HV_X64_MSR_SINT0 + int_num, val) 37619a4c8bSMichael Kelley #define hv_set_synint_state(int_num, val) \ 38619a4c8bSMichael Kelley wrmsrl(HV_X64_MSR_SINT0 + int_num, val) 392ddddd0bSMichael Kelley #define hv_recommend_using_aeoi() \ 402ddddd0bSMichael Kelley (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)) 4137e11d5cSK. Y. Srinivasan 429d9c9656SSunil Muthuswamy #define hv_get_crash_ctl(val) \ 439d9c9656SSunil Muthuswamy rdmsrl(HV_X64_MSR_CRASH_CTL, val) 44cf910e83SSeiji Aguchi 45dd2cb348SMichael Kelley #define hv_get_time_ref_count(val) \ 46dd2cb348SMichael Kelley rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val) 47dd2cb348SMichael Kelley 48dd2cb348SMichael Kelley #define hv_get_reference_tsc(val) \ 49dd2cb348SMichael Kelley rdmsrl(HV_X64_MSR_REFERENCE_TSC, val) 50dd2cb348SMichael Kelley #define hv_set_reference_tsc(val) \ 51dd2cb348SMichael Kelley wrmsrl(HV_X64_MSR_REFERENCE_TSC, val) 52dd2cb348SMichael Kelley #define hv_set_clocksource_vdso(val) \ 53b95a8a27SThomas Gleixner ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK) 54eec399ddSThomas Gleixner #define hv_enable_vdso_clocksource() \ 55b95a8a27SThomas Gleixner vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK); 56dd2cb348SMichael Kelley #define hv_get_raw_timer() rdtsc_ordered() 57626b901fSMichael Kelley #define hv_get_vector() HYPERVISOR_CALLBACK_VECTOR 58dd2cb348SMichael Kelley 59b9d8cf2eSMichael Kelley /* 60b9d8cf2eSMichael Kelley * Reference to pv_ops must be inline so objtool 61b9d8cf2eSMichael Kelley * detection of noinstr violations can work correctly. 62b9d8cf2eSMichael Kelley */ 63b9d8cf2eSMichael Kelley static __always_inline void hv_setup_sched_clock(void *sched_clock) 64b9d8cf2eSMichael Kelley { 65b9d8cf2eSMichael Kelley #ifdef CONFIG_PARAVIRT 66b9d8cf2eSMichael Kelley pv_ops.time.sched_clock = sched_clock; 67b9d8cf2eSMichael Kelley #endif 68b9d8cf2eSMichael Kelley } 69b9d8cf2eSMichael Kelley 70bc2b0331SK. Y. Srinivasan void hyperv_vector_handler(struct pt_regs *regs); 718730046cSK. Y. Srinivasan 72248e742aSMichael Kelley static inline void hv_enable_stimer0_percpu_irq(int irq) {} 73248e742aSMichael Kelley static inline void hv_disable_stimer0_percpu_irq(int irq) {} 74248e742aSMichael Kelley 75248e742aSMichael Kelley 768730046cSK. Y. Srinivasan #if IS_ENABLED(CONFIG_HYPERV) 77dfe94d40SDexuan Cui extern int hyperv_init_cpuhp; 78dfe94d40SDexuan Cui 79fc53662fSVitaly Kuznetsov extern void *hv_hypercall_pg; 8068bb7bfbSK. Y. Srinivasan extern void __percpu **hyperv_pcpu_input_arg; 815d0f077eSWei Liu extern void __percpu **hyperv_pcpu_output_arg; 82fc53662fSVitaly Kuznetsov 83*99a0f46aSWei Liu extern u64 hv_current_partition_id; 84*99a0f46aSWei Liu 85fc53662fSVitaly Kuznetsov static inline u64 hv_do_hypercall(u64 control, void *input, void *output) 86fc53662fSVitaly Kuznetsov { 87fc53662fSVitaly Kuznetsov u64 input_address = input ? virt_to_phys(input) : 0; 88fc53662fSVitaly Kuznetsov u64 output_address = output ? virt_to_phys(output) : 0; 89fc53662fSVitaly Kuznetsov u64 hv_status; 90fc53662fSVitaly Kuznetsov 91fc53662fSVitaly Kuznetsov #ifdef CONFIG_X86_64 92fc53662fSVitaly Kuznetsov if (!hv_hypercall_pg) 93fc53662fSVitaly Kuznetsov return U64_MAX; 94fc53662fSVitaly Kuznetsov 95fc53662fSVitaly Kuznetsov __asm__ __volatile__("mov %4, %%r8\n" 96e70e5892SDavid Woodhouse CALL_NOSPEC 97f5caf621SJosh Poimboeuf : "=a" (hv_status), ASM_CALL_CONSTRAINT, 98fc53662fSVitaly Kuznetsov "+c" (control), "+d" (input_address) 99e70e5892SDavid Woodhouse : "r" (output_address), 100e70e5892SDavid Woodhouse THUNK_TARGET(hv_hypercall_pg) 101fc53662fSVitaly Kuznetsov : "cc", "memory", "r8", "r9", "r10", "r11"); 102fc53662fSVitaly Kuznetsov #else 103fc53662fSVitaly Kuznetsov u32 input_address_hi = upper_32_bits(input_address); 104fc53662fSVitaly Kuznetsov u32 input_address_lo = lower_32_bits(input_address); 105fc53662fSVitaly Kuznetsov u32 output_address_hi = upper_32_bits(output_address); 106fc53662fSVitaly Kuznetsov u32 output_address_lo = lower_32_bits(output_address); 107fc53662fSVitaly Kuznetsov 108fc53662fSVitaly Kuznetsov if (!hv_hypercall_pg) 109fc53662fSVitaly Kuznetsov return U64_MAX; 110fc53662fSVitaly Kuznetsov 111e70e5892SDavid Woodhouse __asm__ __volatile__(CALL_NOSPEC 112fc53662fSVitaly Kuznetsov : "=A" (hv_status), 113f5caf621SJosh Poimboeuf "+c" (input_address_lo), ASM_CALL_CONSTRAINT 114fc53662fSVitaly Kuznetsov : "A" (control), 115fc53662fSVitaly Kuznetsov "b" (input_address_hi), 116fc53662fSVitaly Kuznetsov "D"(output_address_hi), "S"(output_address_lo), 117e70e5892SDavid Woodhouse THUNK_TARGET(hv_hypercall_pg) 118fc53662fSVitaly Kuznetsov : "cc", "memory"); 119fc53662fSVitaly Kuznetsov #endif /* !x86_64 */ 120fc53662fSVitaly Kuznetsov return hv_status; 121fc53662fSVitaly Kuznetsov } 122dee863b5SVitaly Kuznetsov 1236a8edbd0SVitaly Kuznetsov /* Fast hypercall with 8 bytes of input and no output */ 1246a8edbd0SVitaly Kuznetsov static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) 1256a8edbd0SVitaly Kuznetsov { 1266a8edbd0SVitaly Kuznetsov u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; 1276a8edbd0SVitaly Kuznetsov 1286a8edbd0SVitaly Kuznetsov #ifdef CONFIG_X86_64 1296a8edbd0SVitaly Kuznetsov { 130e70e5892SDavid Woodhouse __asm__ __volatile__(CALL_NOSPEC 131f5caf621SJosh Poimboeuf : "=a" (hv_status), ASM_CALL_CONSTRAINT, 1326a8edbd0SVitaly Kuznetsov "+c" (control), "+d" (input1) 133e70e5892SDavid Woodhouse : THUNK_TARGET(hv_hypercall_pg) 1346a8edbd0SVitaly Kuznetsov : "cc", "r8", "r9", "r10", "r11"); 1356a8edbd0SVitaly Kuznetsov } 1366a8edbd0SVitaly Kuznetsov #else 1376a8edbd0SVitaly Kuznetsov { 1386a8edbd0SVitaly Kuznetsov u32 input1_hi = upper_32_bits(input1); 1396a8edbd0SVitaly Kuznetsov u32 input1_lo = lower_32_bits(input1); 1406a8edbd0SVitaly Kuznetsov 141e70e5892SDavid Woodhouse __asm__ __volatile__ (CALL_NOSPEC 1426a8edbd0SVitaly Kuznetsov : "=A"(hv_status), 1436a8edbd0SVitaly Kuznetsov "+c"(input1_lo), 144f5caf621SJosh Poimboeuf ASM_CALL_CONSTRAINT 1456a8edbd0SVitaly Kuznetsov : "A" (control), 1466a8edbd0SVitaly Kuznetsov "b" (input1_hi), 147e70e5892SDavid Woodhouse THUNK_TARGET(hv_hypercall_pg) 1486a8edbd0SVitaly Kuznetsov : "cc", "edi", "esi"); 1496a8edbd0SVitaly Kuznetsov } 1506a8edbd0SVitaly Kuznetsov #endif 1516a8edbd0SVitaly Kuznetsov return hv_status; 1526a8edbd0SVitaly Kuznetsov } 1536a8edbd0SVitaly Kuznetsov 15453e52966SVitaly Kuznetsov /* Fast hypercall with 16 bytes of input */ 15553e52966SVitaly Kuznetsov static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) 15653e52966SVitaly Kuznetsov { 15753e52966SVitaly Kuznetsov u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; 15853e52966SVitaly Kuznetsov 15953e52966SVitaly Kuznetsov #ifdef CONFIG_X86_64 16053e52966SVitaly Kuznetsov { 16153e52966SVitaly Kuznetsov __asm__ __volatile__("mov %4, %%r8\n" 16253e52966SVitaly Kuznetsov CALL_NOSPEC 16353e52966SVitaly Kuznetsov : "=a" (hv_status), ASM_CALL_CONSTRAINT, 16453e52966SVitaly Kuznetsov "+c" (control), "+d" (input1) 16553e52966SVitaly Kuznetsov : "r" (input2), 16653e52966SVitaly Kuznetsov THUNK_TARGET(hv_hypercall_pg) 16753e52966SVitaly Kuznetsov : "cc", "r8", "r9", "r10", "r11"); 16853e52966SVitaly Kuznetsov } 16953e52966SVitaly Kuznetsov #else 17053e52966SVitaly Kuznetsov { 17153e52966SVitaly Kuznetsov u32 input1_hi = upper_32_bits(input1); 17253e52966SVitaly Kuznetsov u32 input1_lo = lower_32_bits(input1); 17353e52966SVitaly Kuznetsov u32 input2_hi = upper_32_bits(input2); 17453e52966SVitaly Kuznetsov u32 input2_lo = lower_32_bits(input2); 17553e52966SVitaly Kuznetsov 17653e52966SVitaly Kuznetsov __asm__ __volatile__ (CALL_NOSPEC 17753e52966SVitaly Kuznetsov : "=A"(hv_status), 17853e52966SVitaly Kuznetsov "+c"(input1_lo), ASM_CALL_CONSTRAINT 17953e52966SVitaly Kuznetsov : "A" (control), "b" (input1_hi), 18053e52966SVitaly Kuznetsov "D"(input2_hi), "S"(input2_lo), 18153e52966SVitaly Kuznetsov THUNK_TARGET(hv_hypercall_pg) 18253e52966SVitaly Kuznetsov : "cc"); 18353e52966SVitaly Kuznetsov } 18453e52966SVitaly Kuznetsov #endif 18553e52966SVitaly Kuznetsov return hv_status; 18653e52966SVitaly Kuznetsov } 18753e52966SVitaly Kuznetsov 188806c8927SVitaly Kuznetsov /* 189806c8927SVitaly Kuznetsov * Rep hypercalls. Callers of this functions are supposed to ensure that 190806c8927SVitaly Kuznetsov * rep_count and varhead_size comply with Hyper-V hypercall definition. 191806c8927SVitaly Kuznetsov */ 192806c8927SVitaly Kuznetsov static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, 193806c8927SVitaly Kuznetsov void *input, void *output) 194806c8927SVitaly Kuznetsov { 195806c8927SVitaly Kuznetsov u64 control = code; 196806c8927SVitaly Kuznetsov u64 status; 197806c8927SVitaly Kuznetsov u16 rep_comp; 198806c8927SVitaly Kuznetsov 199806c8927SVitaly Kuznetsov control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; 200806c8927SVitaly Kuznetsov control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; 201806c8927SVitaly Kuznetsov 202806c8927SVitaly Kuznetsov do { 203806c8927SVitaly Kuznetsov status = hv_do_hypercall(control, input, output); 204806c8927SVitaly Kuznetsov if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) 205806c8927SVitaly Kuznetsov return status; 206806c8927SVitaly Kuznetsov 207806c8927SVitaly Kuznetsov /* Bits 32-43 of status have 'Reps completed' data. */ 208806c8927SVitaly Kuznetsov rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >> 209806c8927SVitaly Kuznetsov HV_HYPERCALL_REP_COMP_OFFSET; 210806c8927SVitaly Kuznetsov 211806c8927SVitaly Kuznetsov control &= ~HV_HYPERCALL_REP_START_MASK; 212806c8927SVitaly Kuznetsov control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET; 213806c8927SVitaly Kuznetsov 214806c8927SVitaly Kuznetsov touch_nmi_watchdog(); 215806c8927SVitaly Kuznetsov } while (rep_comp < rep_count); 216806c8927SVitaly Kuznetsov 217806c8927SVitaly Kuznetsov return status; 218806c8927SVitaly Kuznetsov } 219806c8927SVitaly Kuznetsov 220a46d15ccSVitaly Kuznetsov extern struct hv_vp_assist_page **hv_vp_assist_page; 221a46d15ccSVitaly Kuznetsov 222a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 223a46d15ccSVitaly Kuznetsov { 224a46d15ccSVitaly Kuznetsov if (!hv_vp_assist_page) 225a46d15ccSVitaly Kuznetsov return NULL; 226a46d15ccSVitaly Kuznetsov 227a46d15ccSVitaly Kuznetsov return hv_vp_assist_page[cpu]; 228a46d15ccSVitaly Kuznetsov } 2297415aea6SVitaly Kuznetsov 2306b48cb5fSK. Y. Srinivasan void __init hyperv_init(void); 2312ffd9e33SVitaly Kuznetsov void hyperv_setup_mmu_ops(void); 2328c3e44bdSMaya Nakamura void *hv_alloc_hyperv_page(void); 233fa36dcdfSHimadri Pandya void *hv_alloc_hyperv_zeroed_page(void); 2348c3e44bdSMaya Nakamura void hv_free_hyperv_page(unsigned long addr); 23593286261SVitaly Kuznetsov void set_hv_tscchange_cb(void (*cb)(void)); 23693286261SVitaly Kuznetsov void clear_hv_tscchange_cb(void); 23793286261SVitaly Kuznetsov void hyperv_stop_tsc_emulation(void); 238eb914cfeSTianyu Lan int hyperv_flush_guest_mapping(u64 as); 239cc4edae4SLan Tianyu int hyperv_flush_guest_mapping_range(u64 as, 240cc4edae4SLan Tianyu hyperv_fill_flush_list_func fill_func, void *data); 241cc4edae4SLan Tianyu int hyperv_fill_flush_guest_mapping_list( 242cc4edae4SLan Tianyu struct hv_guest_mapping_flush_list *flush, 243cc4edae4SLan Tianyu u64 start_gfn, u64 end_gfn); 2442d2ccf24SThomas Gleixner 245e9977202SWei Liu extern bool hv_root_partition; 246e9977202SWei Liu 2472d2ccf24SThomas Gleixner #ifdef CONFIG_X86_64 2486b48cb5fSK. Y. Srinivasan void hv_apic_init(void); 2493a025de6SYi Sun void __init hv_init_spinlocks(void); 2503a025de6SYi Sun bool hv_vcpu_is_preempted(int vcpu); 2512d2ccf24SThomas Gleixner #else 2522d2ccf24SThomas Gleixner static inline void hv_apic_init(void) {} 2532d2ccf24SThomas Gleixner #endif 2542d2ccf24SThomas Gleixner 2551cf106d9SBoqun Feng static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, 2561cf106d9SBoqun Feng struct msi_desc *msi_desc) 2571cf106d9SBoqun Feng { 2581cf106d9SBoqun Feng msi_entry->address = msi_desc->msg.address_lo; 2591cf106d9SBoqun Feng msi_entry->data = msi_desc->msg.data; 2601cf106d9SBoqun Feng } 2611cf106d9SBoqun Feng 26279cadff2SVitaly Kuznetsov #else /* CONFIG_HYPERV */ 26379cadff2SVitaly Kuznetsov static inline void hyperv_init(void) {} 2642ffd9e33SVitaly Kuznetsov static inline void hyperv_setup_mmu_ops(void) {} 2658c3e44bdSMaya Nakamura static inline void *hv_alloc_hyperv_page(void) { return NULL; } 2668c3e44bdSMaya Nakamura static inline void hv_free_hyperv_page(unsigned long addr) {} 26793286261SVitaly Kuznetsov static inline void set_hv_tscchange_cb(void (*cb)(void)) {} 26893286261SVitaly Kuznetsov static inline void clear_hv_tscchange_cb(void) {} 26993286261SVitaly Kuznetsov static inline void hyperv_stop_tsc_emulation(void) {}; 270a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 271a46d15ccSVitaly Kuznetsov { 272a46d15ccSVitaly Kuznetsov return NULL; 273a46d15ccSVitaly Kuznetsov } 274eb914cfeSTianyu Lan static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } 275cc4edae4SLan Tianyu static inline int hyperv_flush_guest_mapping_range(u64 as, 276cc4edae4SLan Tianyu hyperv_fill_flush_list_func fill_func, void *data) 277cc4edae4SLan Tianyu { 278cc4edae4SLan Tianyu return -1; 279cc4edae4SLan Tianyu } 28079cadff2SVitaly Kuznetsov #endif /* CONFIG_HYPERV */ 28179cadff2SVitaly Kuznetsov 282765e33f5SMichael Kelley 283765e33f5SMichael Kelley #include <asm-generic/mshyperv.h> 284765e33f5SMichael Kelley 285a2a47c6cSKy Srinivasan #endif 286