1 /* 2 * X86 specific Hyper-V initialization code. 3 * 4 * Copyright (C) 2016, Microsoft, Inc. 5 * 6 * Author : K. Y. Srinivasan <kys@microsoft.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 as published 10 * by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 15 * NON INFRINGEMENT. See the GNU General Public License for more 16 * details. 17 * 18 */ 19 20 #include <linux/efi.h> 21 #include <linux/types.h> 22 #include <asm/apic.h> 23 #include <asm/desc.h> 24 #include <asm/hypervisor.h> 25 #include <asm/hyperv-tlfs.h> 26 #include <asm/mshyperv.h> 27 #include <linux/version.h> 28 #include <linux/vmalloc.h> 29 #include <linux/mm.h> 30 #include <linux/clockchips.h> 31 #include <linux/hyperv.h> 32 #include <linux/slab.h> 33 #include <linux/cpuhotplug.h> 34 35 #ifdef CONFIG_HYPERV_TSCPAGE 36 37 static struct ms_hyperv_tsc_page *tsc_pg; 38 39 struct ms_hyperv_tsc_page *hv_get_tsc_page(void) 40 { 41 return tsc_pg; 42 } 43 EXPORT_SYMBOL_GPL(hv_get_tsc_page); 44 45 static u64 read_hv_clock_tsc(struct clocksource *arg) 46 { 47 u64 current_tick = hv_read_tsc_page(tsc_pg); 48 49 if (current_tick == U64_MAX) 50 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); 51 52 return current_tick; 53 } 54 55 static struct clocksource hyperv_cs_tsc = { 56 .name = "hyperv_clocksource_tsc_page", 57 .rating = 400, 58 .read = read_hv_clock_tsc, 59 .mask = CLOCKSOURCE_MASK(64), 60 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 61 }; 62 #endif 63 64 static u64 read_hv_clock_msr(struct clocksource *arg) 65 { 66 u64 current_tick; 67 /* 68 * Read the partition counter to get the current tick count. This count 69 * is set to 0 when the partition is created and is incremented in 70 * 100 nanosecond units. 71 */ 72 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); 73 return current_tick; 74 } 75 76 static struct clocksource hyperv_cs_msr = { 77 .name = "hyperv_clocksource_msr", 78 .rating = 400, 79 .read = read_hv_clock_msr, 80 .mask = CLOCKSOURCE_MASK(64), 81 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 82 }; 83 84 void *hv_hypercall_pg; 85 EXPORT_SYMBOL_GPL(hv_hypercall_pg); 86 struct clocksource *hyperv_cs; 87 EXPORT_SYMBOL_GPL(hyperv_cs); 88 89 u32 *hv_vp_index; 90 EXPORT_SYMBOL_GPL(hv_vp_index); 91 92 struct hv_vp_assist_page **hv_vp_assist_page; 93 EXPORT_SYMBOL_GPL(hv_vp_assist_page); 94 95 void __percpu **hyperv_pcpu_input_arg; 96 EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); 97 98 u32 hv_max_vp_index; 99 100 static int hv_cpu_init(unsigned int cpu) 101 { 102 u64 msr_vp_index; 103 struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; 104 void **input_arg; 105 106 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 107 *input_arg = page_address(alloc_page(GFP_KERNEL)); 108 109 hv_get_vp_index(msr_vp_index); 110 111 hv_vp_index[smp_processor_id()] = msr_vp_index; 112 113 if (msr_vp_index > hv_max_vp_index) 114 hv_max_vp_index = msr_vp_index; 115 116 if (!hv_vp_assist_page) 117 return 0; 118 119 if (!*hvp) 120 *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); 121 122 if (*hvp) { 123 u64 val; 124 125 val = vmalloc_to_pfn(*hvp); 126 val = (val << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) | 127 HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; 128 129 wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, val); 130 } 131 132 return 0; 133 } 134 135 static void (*hv_reenlightenment_cb)(void); 136 137 static void hv_reenlightenment_notify(struct work_struct *dummy) 138 { 139 struct hv_tsc_emulation_status emu_status; 140 141 rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 142 143 /* Don't issue the callback if TSC accesses are not emulated */ 144 if (hv_reenlightenment_cb && emu_status.inprogress) 145 hv_reenlightenment_cb(); 146 } 147 static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify); 148 149 void hyperv_stop_tsc_emulation(void) 150 { 151 u64 freq; 152 struct hv_tsc_emulation_status emu_status; 153 154 rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 155 emu_status.inprogress = 0; 156 wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 157 158 rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq); 159 tsc_khz = div64_u64(freq, 1000); 160 } 161 EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation); 162 163 static inline bool hv_reenlightenment_available(void) 164 { 165 /* 166 * Check for required features and priviliges to make TSC frequency 167 * change notifications work. 168 */ 169 return ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && 170 ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE && 171 ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT; 172 } 173 174 __visible void __irq_entry hyperv_reenlightenment_intr(struct pt_regs *regs) 175 { 176 entering_ack_irq(); 177 178 inc_irq_stat(irq_hv_reenlightenment_count); 179 180 schedule_delayed_work(&hv_reenlightenment_work, HZ/10); 181 182 exiting_irq(); 183 } 184 185 void set_hv_tscchange_cb(void (*cb)(void)) 186 { 187 struct hv_reenlightenment_control re_ctrl = { 188 .vector = HYPERV_REENLIGHTENMENT_VECTOR, 189 .enabled = 1, 190 .target_vp = hv_vp_index[smp_processor_id()] 191 }; 192 struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1}; 193 194 if (!hv_reenlightenment_available()) { 195 pr_warn("Hyper-V: reenlightenment support is unavailable\n"); 196 return; 197 } 198 199 hv_reenlightenment_cb = cb; 200 201 /* Make sure callback is registered before we write to MSRs */ 202 wmb(); 203 204 wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 205 wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl)); 206 } 207 EXPORT_SYMBOL_GPL(set_hv_tscchange_cb); 208 209 void clear_hv_tscchange_cb(void) 210 { 211 struct hv_reenlightenment_control re_ctrl; 212 213 if (!hv_reenlightenment_available()) 214 return; 215 216 rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 217 re_ctrl.enabled = 0; 218 wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 219 220 hv_reenlightenment_cb = NULL; 221 } 222 EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb); 223 224 static int hv_cpu_die(unsigned int cpu) 225 { 226 struct hv_reenlightenment_control re_ctrl; 227 unsigned int new_cpu; 228 unsigned long flags; 229 void **input_arg; 230 void *input_pg = NULL; 231 232 local_irq_save(flags); 233 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 234 input_pg = *input_arg; 235 *input_arg = NULL; 236 local_irq_restore(flags); 237 free_page((unsigned long)input_pg); 238 239 if (hv_vp_assist_page && hv_vp_assist_page[cpu]) 240 wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0); 241 242 if (hv_reenlightenment_cb == NULL) 243 return 0; 244 245 rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 246 if (re_ctrl.target_vp == hv_vp_index[cpu]) { 247 /* Reassign to some other online CPU */ 248 new_cpu = cpumask_any_but(cpu_online_mask, cpu); 249 250 re_ctrl.target_vp = hv_vp_index[new_cpu]; 251 wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 252 } 253 254 return 0; 255 } 256 257 static int __init hv_pci_init(void) 258 { 259 int gen2vm = efi_enabled(EFI_BOOT); 260 261 /* 262 * For Generation-2 VM, we exit from pci_arch_init() by returning 0. 263 * The purpose is to suppress the harmless warning: 264 * "PCI: Fatal: No config space access function found" 265 */ 266 if (gen2vm) 267 return 0; 268 269 /* For Generation-1 VM, we'll proceed in pci_arch_init(). */ 270 return 1; 271 } 272 273 /* 274 * This function is to be invoked early in the boot sequence after the 275 * hypervisor has been detected. 276 * 277 * 1. Setup the hypercall page. 278 * 2. Register Hyper-V specific clocksource. 279 * 3. Setup Hyper-V specific APIC entry points. 280 */ 281 void __init hyperv_init(void) 282 { 283 u64 guest_id, required_msrs; 284 union hv_x64_msr_hypercall_contents hypercall_msr; 285 int cpuhp, i; 286 287 if (x86_hyper_type != X86_HYPER_MS_HYPERV) 288 return; 289 290 /* Absolutely required MSRs */ 291 required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE | 292 HV_X64_MSR_VP_INDEX_AVAILABLE; 293 294 if ((ms_hyperv.features & required_msrs) != required_msrs) 295 return; 296 297 /* 298 * Allocate the per-CPU state for the hypercall input arg. 299 * If this allocation fails, we will not be able to setup 300 * (per-CPU) hypercall input page and thus this failure is 301 * fatal on Hyper-V. 302 */ 303 hyperv_pcpu_input_arg = alloc_percpu(void *); 304 305 BUG_ON(hyperv_pcpu_input_arg == NULL); 306 307 /* Allocate percpu VP index */ 308 hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), 309 GFP_KERNEL); 310 if (!hv_vp_index) 311 return; 312 313 for (i = 0; i < num_possible_cpus(); i++) 314 hv_vp_index[i] = VP_INVAL; 315 316 hv_vp_assist_page = kcalloc(num_possible_cpus(), 317 sizeof(*hv_vp_assist_page), GFP_KERNEL); 318 if (!hv_vp_assist_page) { 319 ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; 320 goto free_vp_index; 321 } 322 323 cpuhp = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online", 324 hv_cpu_init, hv_cpu_die); 325 if (cpuhp < 0) 326 goto free_vp_assist_page; 327 328 /* 329 * Setup the hypercall page and enable hypercalls. 330 * 1. Register the guest ID 331 * 2. Enable the hypercall and register the hypercall page 332 */ 333 guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0); 334 wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); 335 336 hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); 337 if (hv_hypercall_pg == NULL) { 338 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 339 goto remove_cpuhp_state; 340 } 341 342 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 343 hypercall_msr.enable = 1; 344 hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); 345 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 346 347 hv_apic_init(); 348 349 x86_init.pci.arch_init = hv_pci_init; 350 351 /* 352 * Register Hyper-V specific clocksource. 353 */ 354 #ifdef CONFIG_HYPERV_TSCPAGE 355 if (ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE) { 356 union hv_x64_msr_hypercall_contents tsc_msr; 357 358 tsc_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); 359 if (!tsc_pg) 360 goto register_msr_cs; 361 362 hyperv_cs = &hyperv_cs_tsc; 363 364 rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); 365 366 tsc_msr.enable = 1; 367 tsc_msr.guest_physical_address = vmalloc_to_pfn(tsc_pg); 368 369 wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); 370 371 hyperv_cs_tsc.archdata.vclock_mode = VCLOCK_HVCLOCK; 372 373 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); 374 return; 375 } 376 register_msr_cs: 377 #endif 378 /* 379 * For 32 bit guests just use the MSR based mechanism for reading 380 * the partition counter. 381 */ 382 383 hyperv_cs = &hyperv_cs_msr; 384 if (ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE) 385 clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); 386 387 return; 388 389 remove_cpuhp_state: 390 cpuhp_remove_state(cpuhp); 391 free_vp_assist_page: 392 kfree(hv_vp_assist_page); 393 hv_vp_assist_page = NULL; 394 free_vp_index: 395 kfree(hv_vp_index); 396 hv_vp_index = NULL; 397 } 398 399 /* 400 * This routine is called before kexec/kdump, it does the required cleanup. 401 */ 402 void hyperv_cleanup(void) 403 { 404 union hv_x64_msr_hypercall_contents hypercall_msr; 405 406 /* Reset our OS id */ 407 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 408 409 /* Reset the hypercall page */ 410 hypercall_msr.as_uint64 = 0; 411 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 412 413 /* Reset the TSC page */ 414 hypercall_msr.as_uint64 = 0; 415 wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); 416 } 417 EXPORT_SYMBOL_GPL(hyperv_cleanup); 418 419 void hyperv_report_panic(struct pt_regs *regs, long err) 420 { 421 static bool panic_reported; 422 u64 guest_id; 423 424 /* 425 * We prefer to report panic on 'die' chain as we have proper 426 * registers to report, but if we miss it (e.g. on BUG()) we need 427 * to report it on 'panic'. 428 */ 429 if (panic_reported) 430 return; 431 panic_reported = true; 432 433 rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); 434 435 wrmsrl(HV_X64_MSR_CRASH_P0, err); 436 wrmsrl(HV_X64_MSR_CRASH_P1, guest_id); 437 wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip); 438 wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax); 439 wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp); 440 441 /* 442 * Let Hyper-V know there is crash data available 443 */ 444 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); 445 } 446 EXPORT_SYMBOL_GPL(hyperv_report_panic); 447 448 /** 449 * hyperv_report_panic_msg - report panic message to Hyper-V 450 * @pa: physical address of the panic page containing the message 451 * @size: size of the message in the page 452 */ 453 void hyperv_report_panic_msg(phys_addr_t pa, size_t size) 454 { 455 /* 456 * P3 to contain the physical address of the panic page & P4 to 457 * contain the size of the panic data in that page. Rest of the 458 * registers are no-op when the NOTIFY_MSG flag is set. 459 */ 460 wrmsrl(HV_X64_MSR_CRASH_P0, 0); 461 wrmsrl(HV_X64_MSR_CRASH_P1, 0); 462 wrmsrl(HV_X64_MSR_CRASH_P2, 0); 463 wrmsrl(HV_X64_MSR_CRASH_P3, pa); 464 wrmsrl(HV_X64_MSR_CRASH_P4, size); 465 466 /* 467 * Let Hyper-V know there is crash data available along with 468 * the panic message. 469 */ 470 wrmsrl(HV_X64_MSR_CRASH_CTL, 471 (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG)); 472 } 473 EXPORT_SYMBOL_GPL(hyperv_report_panic_msg); 474 475 bool hv_is_hyperv_initialized(void) 476 { 477 union hv_x64_msr_hypercall_contents hypercall_msr; 478 479 /* 480 * Ensure that we're really on Hyper-V, and not a KVM or Xen 481 * emulation of Hyper-V 482 */ 483 if (x86_hyper_type != X86_HYPER_MS_HYPERV) 484 return false; 485 486 /* 487 * Verify that earlier initialization succeeded by checking 488 * that the hypercall page is setup 489 */ 490 hypercall_msr.as_uint64 = 0; 491 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 492 493 return hypercall_msr.enable; 494 } 495 EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized); 496