1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MSHYPER_H 3 #define _ASM_X86_MSHYPER_H 4 5 #include <linux/types.h> 6 #include <linux/nmi.h> 7 #include <linux/msi.h> 8 #include <asm/io.h> 9 #include <asm/hyperv-tlfs.h> 10 #include <asm/nospec-branch.h> 11 #include <asm/paravirt.h> 12 #include <asm/mshyperv.h> 13 14 /* 15 * Hyper-V always provides a single IO-APIC at this MMIO address. 16 * Ideally, the value should be looked up in ACPI tables, but it 17 * is needed for mapping the IO-APIC early in boot on Confidential 18 * VMs, before ACPI functions can be used. 19 */ 20 #define HV_IOAPIC_BASE_ADDRESS 0xfec00000 21 22 union hv_ghcb; 23 24 DECLARE_STATIC_KEY_FALSE(isolation_type_snp); 25 26 typedef int (*hyperv_fill_flush_list_func)( 27 struct hv_guest_mapping_flush_list *flush, 28 void *data); 29 30 void hyperv_vector_handler(struct pt_regs *regs); 31 32 #if IS_ENABLED(CONFIG_HYPERV) 33 extern int hyperv_init_cpuhp; 34 35 extern void *hv_hypercall_pg; 36 37 extern u64 hv_current_partition_id; 38 39 extern union hv_ghcb * __percpu *hv_ghcb_pg; 40 41 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); 42 int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); 43 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); 44 45 static inline u64 hv_do_hypercall(u64 control, void *input, void *output) 46 { 47 u64 input_address = input ? virt_to_phys(input) : 0; 48 u64 output_address = output ? virt_to_phys(output) : 0; 49 u64 hv_status; 50 51 #ifdef CONFIG_X86_64 52 if (!hv_hypercall_pg) 53 return U64_MAX; 54 55 __asm__ __volatile__("mov %4, %%r8\n" 56 CALL_NOSPEC 57 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 58 "+c" (control), "+d" (input_address) 59 : "r" (output_address), 60 THUNK_TARGET(hv_hypercall_pg) 61 : "cc", "memory", "r8", "r9", "r10", "r11"); 62 #else 63 u32 input_address_hi = upper_32_bits(input_address); 64 u32 input_address_lo = lower_32_bits(input_address); 65 u32 output_address_hi = upper_32_bits(output_address); 66 u32 output_address_lo = lower_32_bits(output_address); 67 68 if (!hv_hypercall_pg) 69 return U64_MAX; 70 71 __asm__ __volatile__(CALL_NOSPEC 72 : "=A" (hv_status), 73 "+c" (input_address_lo), ASM_CALL_CONSTRAINT 74 : "A" (control), 75 "b" (input_address_hi), 76 "D"(output_address_hi), "S"(output_address_lo), 77 THUNK_TARGET(hv_hypercall_pg) 78 : "cc", "memory"); 79 #endif /* !x86_64 */ 80 return hv_status; 81 } 82 83 /* Hypercall to the L0 hypervisor */ 84 static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output) 85 { 86 return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output); 87 } 88 89 /* Fast hypercall with 8 bytes of input and no output */ 90 static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1) 91 { 92 u64 hv_status; 93 94 #ifdef CONFIG_X86_64 95 { 96 __asm__ __volatile__(CALL_NOSPEC 97 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 98 "+c" (control), "+d" (input1) 99 : THUNK_TARGET(hv_hypercall_pg) 100 : "cc", "r8", "r9", "r10", "r11"); 101 } 102 #else 103 { 104 u32 input1_hi = upper_32_bits(input1); 105 u32 input1_lo = lower_32_bits(input1); 106 107 __asm__ __volatile__ (CALL_NOSPEC 108 : "=A"(hv_status), 109 "+c"(input1_lo), 110 ASM_CALL_CONSTRAINT 111 : "A" (control), 112 "b" (input1_hi), 113 THUNK_TARGET(hv_hypercall_pg) 114 : "cc", "edi", "esi"); 115 } 116 #endif 117 return hv_status; 118 } 119 120 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) 121 { 122 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 123 124 return _hv_do_fast_hypercall8(control, input1); 125 } 126 127 static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1) 128 { 129 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED; 130 131 return _hv_do_fast_hypercall8(control, input1); 132 } 133 134 /* Fast hypercall with 16 bytes of input */ 135 static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2) 136 { 137 u64 hv_status; 138 139 #ifdef CONFIG_X86_64 140 { 141 __asm__ __volatile__("mov %4, %%r8\n" 142 CALL_NOSPEC 143 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 144 "+c" (control), "+d" (input1) 145 : "r" (input2), 146 THUNK_TARGET(hv_hypercall_pg) 147 : "cc", "r8", "r9", "r10", "r11"); 148 } 149 #else 150 { 151 u32 input1_hi = upper_32_bits(input1); 152 u32 input1_lo = lower_32_bits(input1); 153 u32 input2_hi = upper_32_bits(input2); 154 u32 input2_lo = lower_32_bits(input2); 155 156 __asm__ __volatile__ (CALL_NOSPEC 157 : "=A"(hv_status), 158 "+c"(input1_lo), ASM_CALL_CONSTRAINT 159 : "A" (control), "b" (input1_hi), 160 "D"(input2_hi), "S"(input2_lo), 161 THUNK_TARGET(hv_hypercall_pg) 162 : "cc"); 163 } 164 #endif 165 return hv_status; 166 } 167 168 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) 169 { 170 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 171 172 return _hv_do_fast_hypercall16(control, input1, input2); 173 } 174 175 static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2) 176 { 177 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED; 178 179 return _hv_do_fast_hypercall16(control, input1, input2); 180 } 181 182 extern struct hv_vp_assist_page **hv_vp_assist_page; 183 184 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 185 { 186 if (!hv_vp_assist_page) 187 return NULL; 188 189 return hv_vp_assist_page[cpu]; 190 } 191 192 void __init hyperv_init(void); 193 void hyperv_setup_mmu_ops(void); 194 void set_hv_tscchange_cb(void (*cb)(void)); 195 void clear_hv_tscchange_cb(void); 196 void hyperv_stop_tsc_emulation(void); 197 int hyperv_flush_guest_mapping(u64 as); 198 int hyperv_flush_guest_mapping_range(u64 as, 199 hyperv_fill_flush_list_func fill_func, void *data); 200 int hyperv_fill_flush_guest_mapping_list( 201 struct hv_guest_mapping_flush_list *flush, 202 u64 start_gfn, u64 end_gfn); 203 204 #ifdef CONFIG_X86_64 205 void hv_apic_init(void); 206 void __init hv_init_spinlocks(void); 207 bool hv_vcpu_is_preempted(int vcpu); 208 #else 209 static inline void hv_apic_init(void) {} 210 #endif 211 212 struct irq_domain *hv_create_pci_msi_domain(void); 213 214 int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, 215 struct hv_interrupt_entry *entry); 216 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); 217 218 #ifdef CONFIG_AMD_MEM_ENCRYPT 219 void hv_ghcb_msr_write(u64 msr, u64 value); 220 void hv_ghcb_msr_read(u64 msr, u64 *value); 221 bool hv_ghcb_negotiate_protocol(void); 222 void hv_ghcb_terminate(unsigned int set, unsigned int reason); 223 void hv_vtom_init(void); 224 #else 225 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} 226 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {} 227 static inline bool hv_ghcb_negotiate_protocol(void) { return false; } 228 static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {} 229 static inline void hv_vtom_init(void) {} 230 #endif 231 232 extern bool hv_isolation_type_snp(void); 233 234 static inline bool hv_is_synic_reg(unsigned int reg) 235 { 236 return (reg >= HV_REGISTER_SCONTROL) && 237 (reg <= HV_REGISTER_SINT15); 238 } 239 240 static inline bool hv_is_sint_reg(unsigned int reg) 241 { 242 return (reg >= HV_REGISTER_SINT0) && 243 (reg <= HV_REGISTER_SINT15); 244 } 245 246 u64 hv_get_register(unsigned int reg); 247 void hv_set_register(unsigned int reg, u64 value); 248 u64 hv_get_non_nested_register(unsigned int reg); 249 void hv_set_non_nested_register(unsigned int reg, u64 value); 250 251 #else /* CONFIG_HYPERV */ 252 static inline void hyperv_init(void) {} 253 static inline void hyperv_setup_mmu_ops(void) {} 254 static inline void set_hv_tscchange_cb(void (*cb)(void)) {} 255 static inline void clear_hv_tscchange_cb(void) {} 256 static inline void hyperv_stop_tsc_emulation(void) {}; 257 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 258 { 259 return NULL; 260 } 261 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } 262 static inline int hyperv_flush_guest_mapping_range(u64 as, 263 hyperv_fill_flush_list_func fill_func, void *data) 264 { 265 return -1; 266 } 267 static inline void hv_set_register(unsigned int reg, u64 value) { } 268 static inline u64 hv_get_register(unsigned int reg) { return 0; } 269 static inline void hv_set_non_nested_register(unsigned int reg, u64 value) { } 270 static inline u64 hv_get_non_nested_register(unsigned int reg) { return 0; } 271 #endif /* CONFIG_HYPERV */ 272 273 274 #include <asm-generic/mshyperv.h> 275 276 #endif 277