1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2e08cae41SH. Peter Anvin #ifndef _ASM_X86_MSHYPER_H 3e08cae41SH. Peter Anvin #define _ASM_X86_MSHYPER_H 4a2a47c6cSKy Srinivasan 5e08cae41SH. Peter Anvin #include <linux/types.h> 626fcd952SThomas Gleixner #include <linux/atomic.h> 7806c8927SVitaly Kuznetsov #include <linux/nmi.h> 8fc53662fSVitaly Kuznetsov #include <asm/io.h> 95a485803SVitaly Kuznetsov #include <asm/hyperv-tlfs.h> 10e70e5892SDavid Woodhouse #include <asm/nospec-branch.h> 11e08cae41SH. Peter Anvin 12e08cae41SH. Peter Anvin struct ms_hyperv_info { 13e08cae41SH. Peter Anvin u32 features; 14cc2dd402SDenis V. Lunev u32 misc_features; 15e08cae41SH. Peter Anvin u32 hints; 165431390bSVitaly Kuznetsov u32 nested_features; 17dd018597SVitaly Kuznetsov u32 max_vp_index; 18dd018597SVitaly Kuznetsov u32 max_lp_index; 19e08cae41SH. Peter Anvin }; 20e08cae41SH. Peter Anvin 21e08cae41SH. Peter Anvin extern struct ms_hyperv_info ms_hyperv; 22a2a47c6cSKy Srinivasan 233f646ed7SK. Y. Srinivasan 24352c9624SK. Y. Srinivasan /* 25415bd1cdSVitaly Kuznetsov * Generate the guest ID. 26352c9624SK. Y. Srinivasan */ 27352c9624SK. Y. Srinivasan 28352c9624SK. Y. Srinivasan static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, 29352c9624SK. Y. Srinivasan __u64 d_info2) 30352c9624SK. Y. Srinivasan { 31352c9624SK. Y. Srinivasan __u64 guest_id = 0; 32352c9624SK. Y. Srinivasan 339b06e101SK. Y. Srinivasan guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); 34352c9624SK. Y. Srinivasan guest_id |= (d_info1 << 48); 35352c9624SK. Y. Srinivasan guest_id |= (kernel_version << 16); 36352c9624SK. Y. Srinivasan guest_id |= d_info2; 37352c9624SK. Y. Srinivasan 38352c9624SK. Y. Srinivasan return guest_id; 39352c9624SK. Y. Srinivasan } 40352c9624SK. Y. Srinivasan 41e810e48cSK. Y. Srinivasan 42e810e48cSK. Y. Srinivasan /* Free the message slot and signal end-of-message if required */ 43e810e48cSK. Y. Srinivasan static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) 44e810e48cSK. Y. Srinivasan { 45e810e48cSK. Y. Srinivasan /* 46e810e48cSK. Y. Srinivasan * On crash we're reading some other CPU's message page and we need 47e810e48cSK. Y. Srinivasan * to be careful: this other CPU may already had cleared the header 48e810e48cSK. Y. Srinivasan * and the host may already had delivered some other message there. 49e810e48cSK. Y. Srinivasan * In case we blindly write msg->header.message_type we're going 50e810e48cSK. Y. Srinivasan * to lose it. We can still lose a message of the same type but 51e810e48cSK. Y. Srinivasan * we count on the fact that there can only be one 52e810e48cSK. Y. Srinivasan * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages 53e810e48cSK. Y. Srinivasan * on crash. 54e810e48cSK. Y. Srinivasan */ 55e810e48cSK. Y. Srinivasan if (cmpxchg(&msg->header.message_type, old_msg_type, 56e810e48cSK. Y. Srinivasan HVMSG_NONE) != old_msg_type) 57e810e48cSK. Y. Srinivasan return; 58e810e48cSK. Y. Srinivasan 59e810e48cSK. Y. Srinivasan /* 60e810e48cSK. Y. Srinivasan * Make sure the write to MessageType (ie set to 61e810e48cSK. Y. Srinivasan * HVMSG_NONE) happens before we read the 62e810e48cSK. Y. Srinivasan * MessagePending and EOMing. Otherwise, the EOMing 63e810e48cSK. Y. Srinivasan * will not deliver any more messages since there is 64e810e48cSK. Y. Srinivasan * no empty slot 65e810e48cSK. Y. Srinivasan */ 66e810e48cSK. Y. Srinivasan mb(); 67e810e48cSK. Y. Srinivasan 68e810e48cSK. Y. Srinivasan if (msg->header.message_flags.msg_pending) { 69e810e48cSK. Y. Srinivasan /* 70e810e48cSK. Y. Srinivasan * This will cause message queue rescan to 71e810e48cSK. Y. Srinivasan * possibly deliver another msg from the 72e810e48cSK. Y. Srinivasan * hypervisor 73e810e48cSK. Y. Srinivasan */ 74e810e48cSK. Y. Srinivasan wrmsrl(HV_X64_MSR_EOM, 0); 75e810e48cSK. Y. Srinivasan } 76e810e48cSK. Y. Srinivasan } 77e810e48cSK. Y. Srinivasan 78d5116b40SK. Y. Srinivasan #define hv_init_timer(timer, tick) wrmsrl(timer, tick) 79d5116b40SK. Y. Srinivasan #define hv_init_timer_config(config, val) wrmsrl(config, val) 80d5116b40SK. Y. Srinivasan 81155e4a2fSK. Y. Srinivasan #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val) 82155e4a2fSK. Y. Srinivasan #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val) 83155e4a2fSK. Y. Srinivasan 848e307bf8SK. Y. Srinivasan #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val) 858e307bf8SK. Y. Srinivasan #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val) 868e307bf8SK. Y. Srinivasan 8706d1d98aSK. Y. Srinivasan #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val) 8806d1d98aSK. Y. Srinivasan #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val) 8906d1d98aSK. Y. Srinivasan 907297ff0cSK. Y. Srinivasan #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index) 917297ff0cSK. Y. Srinivasan 9237e11d5cSK. Y. Srinivasan #define hv_get_synint_state(int_num, val) rdmsrl(int_num, val) 9337e11d5cSK. Y. Srinivasan #define hv_set_synint_state(int_num, val) wrmsrl(int_num, val) 9437e11d5cSK. Y. Srinivasan 95bc2b0331SK. Y. Srinivasan void hyperv_callback_vector(void); 9693286261SVitaly Kuznetsov void hyperv_reenlightenment_vector(void); 97cf910e83SSeiji Aguchi #ifdef CONFIG_TRACING 98cf910e83SSeiji Aguchi #define trace_hyperv_callback_vector hyperv_callback_vector 99cf910e83SSeiji Aguchi #endif 100bc2b0331SK. Y. Srinivasan void hyperv_vector_handler(struct pt_regs *regs); 10176d388cdSThomas Gleixner void hv_setup_vmbus_irq(void (*handler)(void)); 10276d388cdSThomas Gleixner void hv_remove_vmbus_irq(void); 103bc2b0331SK. Y. Srinivasan 1042517281dSVitaly Kuznetsov void hv_setup_kexec_handler(void (*handler)(void)); 1052517281dSVitaly Kuznetsov void hv_remove_kexec_handler(void); 106b4370df2SVitaly Kuznetsov void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); 107b4370df2SVitaly Kuznetsov void hv_remove_crash_handler(void); 1088730046cSK. Y. Srinivasan 109248e742aSMichael Kelley /* 110248e742aSMichael Kelley * Routines for stimer0 Direct Mode handling. 111248e742aSMichael Kelley * On x86/x64, there are no percpu actions to take. 112248e742aSMichael Kelley */ 113248e742aSMichael Kelley void hv_stimer0_vector_handler(struct pt_regs *regs); 114248e742aSMichael Kelley void hv_stimer0_callback_vector(void); 115248e742aSMichael Kelley int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void)); 116248e742aSMichael Kelley void hv_remove_stimer0_irq(int irq); 117248e742aSMichael Kelley 118248e742aSMichael Kelley static inline void hv_enable_stimer0_percpu_irq(int irq) {} 119248e742aSMichael Kelley static inline void hv_disable_stimer0_percpu_irq(int irq) {} 120248e742aSMichael Kelley 121248e742aSMichael Kelley 1228730046cSK. Y. Srinivasan #if IS_ENABLED(CONFIG_HYPERV) 123dee863b5SVitaly Kuznetsov extern struct clocksource *hyperv_cs; 124fc53662fSVitaly Kuznetsov extern void *hv_hypercall_pg; 12568bb7bfbSK. Y. Srinivasan extern void __percpu **hyperv_pcpu_input_arg; 126fc53662fSVitaly Kuznetsov 127fc53662fSVitaly Kuznetsov static inline u64 hv_do_hypercall(u64 control, void *input, void *output) 128fc53662fSVitaly Kuznetsov { 129fc53662fSVitaly Kuznetsov u64 input_address = input ? virt_to_phys(input) : 0; 130fc53662fSVitaly Kuznetsov u64 output_address = output ? virt_to_phys(output) : 0; 131fc53662fSVitaly Kuznetsov u64 hv_status; 132fc53662fSVitaly Kuznetsov 133fc53662fSVitaly Kuznetsov #ifdef CONFIG_X86_64 134fc53662fSVitaly Kuznetsov if (!hv_hypercall_pg) 135fc53662fSVitaly Kuznetsov return U64_MAX; 136fc53662fSVitaly Kuznetsov 137fc53662fSVitaly Kuznetsov __asm__ __volatile__("mov %4, %%r8\n" 138e70e5892SDavid Woodhouse CALL_NOSPEC 139f5caf621SJosh Poimboeuf : "=a" (hv_status), ASM_CALL_CONSTRAINT, 140fc53662fSVitaly Kuznetsov "+c" (control), "+d" (input_address) 141e70e5892SDavid Woodhouse : "r" (output_address), 142e70e5892SDavid Woodhouse THUNK_TARGET(hv_hypercall_pg) 143fc53662fSVitaly Kuznetsov : "cc", "memory", "r8", "r9", "r10", "r11"); 144fc53662fSVitaly Kuznetsov #else 145fc53662fSVitaly Kuznetsov u32 input_address_hi = upper_32_bits(input_address); 146fc53662fSVitaly Kuznetsov u32 input_address_lo = lower_32_bits(input_address); 147fc53662fSVitaly Kuznetsov u32 output_address_hi = upper_32_bits(output_address); 148fc53662fSVitaly Kuznetsov u32 output_address_lo = lower_32_bits(output_address); 149fc53662fSVitaly Kuznetsov 150fc53662fSVitaly Kuznetsov if (!hv_hypercall_pg) 151fc53662fSVitaly Kuznetsov return U64_MAX; 152fc53662fSVitaly Kuznetsov 153e70e5892SDavid Woodhouse __asm__ __volatile__(CALL_NOSPEC 154fc53662fSVitaly Kuznetsov : "=A" (hv_status), 155f5caf621SJosh Poimboeuf "+c" (input_address_lo), ASM_CALL_CONSTRAINT 156fc53662fSVitaly Kuznetsov : "A" (control), 157fc53662fSVitaly Kuznetsov "b" (input_address_hi), 158fc53662fSVitaly Kuznetsov "D"(output_address_hi), "S"(output_address_lo), 159e70e5892SDavid Woodhouse THUNK_TARGET(hv_hypercall_pg) 160fc53662fSVitaly Kuznetsov : "cc", "memory"); 161fc53662fSVitaly Kuznetsov #endif /* !x86_64 */ 162fc53662fSVitaly Kuznetsov return hv_status; 163fc53662fSVitaly Kuznetsov } 164dee863b5SVitaly Kuznetsov 1656a8edbd0SVitaly Kuznetsov /* Fast hypercall with 8 bytes of input and no output */ 1666a8edbd0SVitaly Kuznetsov static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) 1676a8edbd0SVitaly Kuznetsov { 1686a8edbd0SVitaly Kuznetsov u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; 1696a8edbd0SVitaly Kuznetsov 1706a8edbd0SVitaly Kuznetsov #ifdef CONFIG_X86_64 1716a8edbd0SVitaly Kuznetsov { 172e70e5892SDavid Woodhouse __asm__ __volatile__(CALL_NOSPEC 173f5caf621SJosh Poimboeuf : "=a" (hv_status), ASM_CALL_CONSTRAINT, 1746a8edbd0SVitaly Kuznetsov "+c" (control), "+d" (input1) 175e70e5892SDavid Woodhouse : THUNK_TARGET(hv_hypercall_pg) 1766a8edbd0SVitaly Kuznetsov : "cc", "r8", "r9", "r10", "r11"); 1776a8edbd0SVitaly Kuznetsov } 1786a8edbd0SVitaly Kuznetsov #else 1796a8edbd0SVitaly Kuznetsov { 1806a8edbd0SVitaly Kuznetsov u32 input1_hi = upper_32_bits(input1); 1816a8edbd0SVitaly Kuznetsov u32 input1_lo = lower_32_bits(input1); 1826a8edbd0SVitaly Kuznetsov 183e70e5892SDavid Woodhouse __asm__ __volatile__ (CALL_NOSPEC 1846a8edbd0SVitaly Kuznetsov : "=A"(hv_status), 1856a8edbd0SVitaly Kuznetsov "+c"(input1_lo), 186f5caf621SJosh Poimboeuf ASM_CALL_CONSTRAINT 1876a8edbd0SVitaly Kuznetsov : "A" (control), 1886a8edbd0SVitaly Kuznetsov "b" (input1_hi), 189e70e5892SDavid Woodhouse THUNK_TARGET(hv_hypercall_pg) 1906a8edbd0SVitaly Kuznetsov : "cc", "edi", "esi"); 1916a8edbd0SVitaly Kuznetsov } 1926a8edbd0SVitaly Kuznetsov #endif 1936a8edbd0SVitaly Kuznetsov return hv_status; 1946a8edbd0SVitaly Kuznetsov } 1956a8edbd0SVitaly Kuznetsov 196*53e52966SVitaly Kuznetsov /* Fast hypercall with 16 bytes of input */ 197*53e52966SVitaly Kuznetsov static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) 198*53e52966SVitaly Kuznetsov { 199*53e52966SVitaly Kuznetsov u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; 200*53e52966SVitaly Kuznetsov 201*53e52966SVitaly Kuznetsov #ifdef CONFIG_X86_64 202*53e52966SVitaly Kuznetsov { 203*53e52966SVitaly Kuznetsov __asm__ __volatile__("mov %4, %%r8\n" 204*53e52966SVitaly Kuznetsov CALL_NOSPEC 205*53e52966SVitaly Kuznetsov : "=a" (hv_status), ASM_CALL_CONSTRAINT, 206*53e52966SVitaly Kuznetsov "+c" (control), "+d" (input1) 207*53e52966SVitaly Kuznetsov : "r" (input2), 208*53e52966SVitaly Kuznetsov THUNK_TARGET(hv_hypercall_pg) 209*53e52966SVitaly Kuznetsov : "cc", "r8", "r9", "r10", "r11"); 210*53e52966SVitaly Kuznetsov } 211*53e52966SVitaly Kuznetsov #else 212*53e52966SVitaly Kuznetsov { 213*53e52966SVitaly Kuznetsov u32 input1_hi = upper_32_bits(input1); 214*53e52966SVitaly Kuznetsov u32 input1_lo = lower_32_bits(input1); 215*53e52966SVitaly Kuznetsov u32 input2_hi = upper_32_bits(input2); 216*53e52966SVitaly Kuznetsov u32 input2_lo = lower_32_bits(input2); 217*53e52966SVitaly Kuznetsov 218*53e52966SVitaly Kuznetsov __asm__ __volatile__ (CALL_NOSPEC 219*53e52966SVitaly Kuznetsov : "=A"(hv_status), 220*53e52966SVitaly Kuznetsov "+c"(input1_lo), ASM_CALL_CONSTRAINT 221*53e52966SVitaly Kuznetsov : "A" (control), "b" (input1_hi), 222*53e52966SVitaly Kuznetsov "D"(input2_hi), "S"(input2_lo), 223*53e52966SVitaly Kuznetsov THUNK_TARGET(hv_hypercall_pg) 224*53e52966SVitaly Kuznetsov : "cc"); 225*53e52966SVitaly Kuznetsov } 226*53e52966SVitaly Kuznetsov #endif 227*53e52966SVitaly Kuznetsov return hv_status; 228*53e52966SVitaly Kuznetsov } 229*53e52966SVitaly Kuznetsov 230806c8927SVitaly Kuznetsov /* 231806c8927SVitaly Kuznetsov * Rep hypercalls. Callers of this functions are supposed to ensure that 232806c8927SVitaly Kuznetsov * rep_count and varhead_size comply with Hyper-V hypercall definition. 233806c8927SVitaly Kuznetsov */ 234806c8927SVitaly Kuznetsov static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, 235806c8927SVitaly Kuznetsov void *input, void *output) 236806c8927SVitaly Kuznetsov { 237806c8927SVitaly Kuznetsov u64 control = code; 238806c8927SVitaly Kuznetsov u64 status; 239806c8927SVitaly Kuznetsov u16 rep_comp; 240806c8927SVitaly Kuznetsov 241806c8927SVitaly Kuznetsov control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; 242806c8927SVitaly Kuznetsov control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; 243806c8927SVitaly Kuznetsov 244806c8927SVitaly Kuznetsov do { 245806c8927SVitaly Kuznetsov status = hv_do_hypercall(control, input, output); 246806c8927SVitaly Kuznetsov if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) 247806c8927SVitaly Kuznetsov return status; 248806c8927SVitaly Kuznetsov 249806c8927SVitaly Kuznetsov /* Bits 32-43 of status have 'Reps completed' data. */ 250806c8927SVitaly Kuznetsov rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >> 251806c8927SVitaly Kuznetsov HV_HYPERCALL_REP_COMP_OFFSET; 252806c8927SVitaly Kuznetsov 253806c8927SVitaly Kuznetsov control &= ~HV_HYPERCALL_REP_START_MASK; 254806c8927SVitaly Kuznetsov control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET; 255806c8927SVitaly Kuznetsov 256806c8927SVitaly Kuznetsov touch_nmi_watchdog(); 257806c8927SVitaly Kuznetsov } while (rep_comp < rep_count); 258806c8927SVitaly Kuznetsov 259806c8927SVitaly Kuznetsov return status; 260806c8927SVitaly Kuznetsov } 261806c8927SVitaly Kuznetsov 2627415aea6SVitaly Kuznetsov /* 2637415aea6SVitaly Kuznetsov * Hypervisor's notion of virtual processor ID is different from 2647415aea6SVitaly Kuznetsov * Linux' notion of CPU ID. This information can only be retrieved 2657415aea6SVitaly Kuznetsov * in the context of the calling CPU. Setup a map for easy access 2667415aea6SVitaly Kuznetsov * to this information. 2677415aea6SVitaly Kuznetsov */ 2687415aea6SVitaly Kuznetsov extern u32 *hv_vp_index; 269a3b74243SVitaly Kuznetsov extern u32 hv_max_vp_index; 270a46d15ccSVitaly Kuznetsov extern struct hv_vp_assist_page **hv_vp_assist_page; 271a46d15ccSVitaly Kuznetsov 272a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 273a46d15ccSVitaly Kuznetsov { 274a46d15ccSVitaly Kuznetsov if (!hv_vp_assist_page) 275a46d15ccSVitaly Kuznetsov return NULL; 276a46d15ccSVitaly Kuznetsov 277a46d15ccSVitaly Kuznetsov return hv_vp_assist_page[cpu]; 278a46d15ccSVitaly Kuznetsov } 2797415aea6SVitaly Kuznetsov 2807415aea6SVitaly Kuznetsov /** 2817415aea6SVitaly Kuznetsov * hv_cpu_number_to_vp_number() - Map CPU to VP. 2827415aea6SVitaly Kuznetsov * @cpu_number: CPU number in Linux terms 2837415aea6SVitaly Kuznetsov * 2847415aea6SVitaly Kuznetsov * This function returns the mapping between the Linux processor 2857415aea6SVitaly Kuznetsov * number and the hypervisor's virtual processor number, useful 2867415aea6SVitaly Kuznetsov * in making hypercalls and such that talk about specific 2877415aea6SVitaly Kuznetsov * processors. 2887415aea6SVitaly Kuznetsov * 2897415aea6SVitaly Kuznetsov * Return: Virtual processor number in Hyper-V terms 2907415aea6SVitaly Kuznetsov */ 2917415aea6SVitaly Kuznetsov static inline int hv_cpu_number_to_vp_number(int cpu_number) 2927415aea6SVitaly Kuznetsov { 2937415aea6SVitaly Kuznetsov return hv_vp_index[cpu_number]; 2947415aea6SVitaly Kuznetsov } 29573638cddSK. Y. Srinivasan 296366f03b0SK. Y. Srinivasan static inline int cpumask_to_vpset(struct hv_vpset *vpset, 297366f03b0SK. Y. Srinivasan const struct cpumask *cpus) 298366f03b0SK. Y. Srinivasan { 299366f03b0SK. Y. Srinivasan int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; 300366f03b0SK. Y. Srinivasan 301366f03b0SK. Y. Srinivasan /* valid_bank_mask can represent up to 64 banks */ 302366f03b0SK. Y. Srinivasan if (hv_max_vp_index / 64 >= 64) 303366f03b0SK. Y. Srinivasan return 0; 304366f03b0SK. Y. Srinivasan 305366f03b0SK. Y. Srinivasan /* 306c9c92beeSVitaly Kuznetsov * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex 307366f03b0SK. Y. Srinivasan * structs are not cleared between calls, we risk flushing unneeded 308366f03b0SK. Y. Srinivasan * vCPUs otherwise. 309366f03b0SK. Y. Srinivasan */ 310366f03b0SK. Y. Srinivasan for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) 311366f03b0SK. Y. Srinivasan vpset->bank_contents[vcpu_bank] = 0; 312366f03b0SK. Y. Srinivasan 313366f03b0SK. Y. Srinivasan /* 314366f03b0SK. Y. Srinivasan * Some banks may end up being empty but this is acceptable. 315366f03b0SK. Y. Srinivasan */ 316366f03b0SK. Y. Srinivasan for_each_cpu(cpu, cpus) { 317366f03b0SK. Y. Srinivasan vcpu = hv_cpu_number_to_vp_number(cpu); 318366f03b0SK. Y. Srinivasan vcpu_bank = vcpu / 64; 319366f03b0SK. Y. Srinivasan vcpu_offset = vcpu % 64; 320366f03b0SK. Y. Srinivasan __set_bit(vcpu_offset, (unsigned long *) 321366f03b0SK. Y. Srinivasan &vpset->bank_contents[vcpu_bank]); 322366f03b0SK. Y. Srinivasan if (vcpu_bank >= nr_bank) 323366f03b0SK. Y. Srinivasan nr_bank = vcpu_bank + 1; 324366f03b0SK. Y. Srinivasan } 325366f03b0SK. Y. Srinivasan vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); 326366f03b0SK. Y. Srinivasan return nr_bank; 327366f03b0SK. Y. Srinivasan } 328366f03b0SK. Y. Srinivasan 3296b48cb5fSK. Y. Srinivasan void __init hyperv_init(void); 3302ffd9e33SVitaly Kuznetsov void hyperv_setup_mmu_ops(void); 3317ed4325aSK. Y. Srinivasan void hyperv_report_panic(struct pt_regs *regs, long err); 3324a5f3cdeSMichael Kelley bool hv_is_hyperv_initialized(void); 333d6f3609dSVitaly Kuznetsov void hyperv_cleanup(void); 33493286261SVitaly Kuznetsov 33593286261SVitaly Kuznetsov void hyperv_reenlightenment_intr(struct pt_regs *regs); 33693286261SVitaly Kuznetsov void set_hv_tscchange_cb(void (*cb)(void)); 33793286261SVitaly Kuznetsov void clear_hv_tscchange_cb(void); 33893286261SVitaly Kuznetsov void hyperv_stop_tsc_emulation(void); 3392d2ccf24SThomas Gleixner 3402d2ccf24SThomas Gleixner #ifdef CONFIG_X86_64 3416b48cb5fSK. Y. Srinivasan void hv_apic_init(void); 3422d2ccf24SThomas Gleixner #else 3432d2ccf24SThomas Gleixner static inline void hv_apic_init(void) {} 3442d2ccf24SThomas Gleixner #endif 3452d2ccf24SThomas Gleixner 34679cadff2SVitaly Kuznetsov #else /* CONFIG_HYPERV */ 34779cadff2SVitaly Kuznetsov static inline void hyperv_init(void) {} 3484a5f3cdeSMichael Kelley static inline bool hv_is_hyperv_initialized(void) { return false; } 34979cadff2SVitaly Kuznetsov static inline void hyperv_cleanup(void) {} 3502ffd9e33SVitaly Kuznetsov static inline void hyperv_setup_mmu_ops(void) {} 35193286261SVitaly Kuznetsov static inline void set_hv_tscchange_cb(void (*cb)(void)) {} 35293286261SVitaly Kuznetsov static inline void clear_hv_tscchange_cb(void) {} 35393286261SVitaly Kuznetsov static inline void hyperv_stop_tsc_emulation(void) {}; 354a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 355a46d15ccSVitaly Kuznetsov { 356a46d15ccSVitaly Kuznetsov return NULL; 357a46d15ccSVitaly Kuznetsov } 35879cadff2SVitaly Kuznetsov #endif /* CONFIG_HYPERV */ 35979cadff2SVitaly Kuznetsov 360bd2a9adaSVitaly Kuznetsov #ifdef CONFIG_HYPERV_TSCPAGE 361bd2a9adaSVitaly Kuznetsov struct ms_hyperv_tsc_page *hv_get_tsc_page(void); 362e2768eaaSVitaly Kuznetsov static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, 363e2768eaaSVitaly Kuznetsov u64 *cur_tsc) 3640733379bSVitaly Kuznetsov { 365e2768eaaSVitaly Kuznetsov u64 scale, offset; 3660733379bSVitaly Kuznetsov u32 sequence; 3670733379bSVitaly Kuznetsov 3680733379bSVitaly Kuznetsov /* 3690733379bSVitaly Kuznetsov * The protocol for reading Hyper-V TSC page is specified in Hypervisor 3700733379bSVitaly Kuznetsov * Top-Level Functional Specification ver. 3.0 and above. To get the 3710733379bSVitaly Kuznetsov * reference time we must do the following: 3720733379bSVitaly Kuznetsov * - READ ReferenceTscSequence 3730733379bSVitaly Kuznetsov * A special '0' value indicates the time source is unreliable and we 3740733379bSVitaly Kuznetsov * need to use something else. The currently published specification 3750733379bSVitaly Kuznetsov * versions (up to 4.0b) contain a mistake and wrongly claim '-1' 3760733379bSVitaly Kuznetsov * instead of '0' as the special value, see commit c35b82ef0294. 3770733379bSVitaly Kuznetsov * - ReferenceTime = 3780733379bSVitaly Kuznetsov * ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset 3790733379bSVitaly Kuznetsov * - READ ReferenceTscSequence again. In case its value has changed 3800733379bSVitaly Kuznetsov * since our first reading we need to discard ReferenceTime and repeat 3810733379bSVitaly Kuznetsov * the whole sequence as the hypervisor was updating the page in 3820733379bSVitaly Kuznetsov * between. 3830733379bSVitaly Kuznetsov */ 3840733379bSVitaly Kuznetsov do { 3850733379bSVitaly Kuznetsov sequence = READ_ONCE(tsc_pg->tsc_sequence); 3860733379bSVitaly Kuznetsov if (!sequence) 3870733379bSVitaly Kuznetsov return U64_MAX; 3880733379bSVitaly Kuznetsov /* 3890733379bSVitaly Kuznetsov * Make sure we read sequence before we read other values from 3900733379bSVitaly Kuznetsov * TSC page. 3910733379bSVitaly Kuznetsov */ 3920733379bSVitaly Kuznetsov smp_rmb(); 3930733379bSVitaly Kuznetsov 3940733379bSVitaly Kuznetsov scale = READ_ONCE(tsc_pg->tsc_scale); 3950733379bSVitaly Kuznetsov offset = READ_ONCE(tsc_pg->tsc_offset); 396e2768eaaSVitaly Kuznetsov *cur_tsc = rdtsc_ordered(); 3970733379bSVitaly Kuznetsov 3980733379bSVitaly Kuznetsov /* 3990733379bSVitaly Kuznetsov * Make sure we read sequence after we read all other values 4000733379bSVitaly Kuznetsov * from TSC page. 4010733379bSVitaly Kuznetsov */ 4020733379bSVitaly Kuznetsov smp_rmb(); 4030733379bSVitaly Kuznetsov 4040733379bSVitaly Kuznetsov } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence); 4050733379bSVitaly Kuznetsov 406e2768eaaSVitaly Kuznetsov return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset; 407e2768eaaSVitaly Kuznetsov } 408e2768eaaSVitaly Kuznetsov 409e2768eaaSVitaly Kuznetsov static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) 410e2768eaaSVitaly Kuznetsov { 411e2768eaaSVitaly Kuznetsov u64 cur_tsc; 412e2768eaaSVitaly Kuznetsov 413e2768eaaSVitaly Kuznetsov return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc); 4140733379bSVitaly Kuznetsov } 4150733379bSVitaly Kuznetsov 416bd2a9adaSVitaly Kuznetsov #else 417bd2a9adaSVitaly Kuznetsov static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void) 418bd2a9adaSVitaly Kuznetsov { 419bd2a9adaSVitaly Kuznetsov return NULL; 420bd2a9adaSVitaly Kuznetsov } 421e2768eaaSVitaly Kuznetsov 422e2768eaaSVitaly Kuznetsov static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, 423e2768eaaSVitaly Kuznetsov u64 *cur_tsc) 424e2768eaaSVitaly Kuznetsov { 425e2768eaaSVitaly Kuznetsov BUG(); 426e2768eaaSVitaly Kuznetsov return U64_MAX; 427e2768eaaSVitaly Kuznetsov } 428bd2a9adaSVitaly Kuznetsov #endif 429a2a47c6cSKy Srinivasan #endif 430