1649472a1SPeng Hao // SPDX-License-Identifier: GPL-2.0-or-later
2790c73f6SGlauber de Oliveira Costa /* KVM paravirtual clock driver. A clocksource implementation
3790c73f6SGlauber de Oliveira Costa Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
4790c73f6SGlauber de Oliveira Costa */
5790c73f6SGlauber de Oliveira Costa
6790c73f6SGlauber de Oliveira Costa #include <linux/clocksource.h>
7790c73f6SGlauber de Oliveira Costa #include <linux/kvm_para.h>
8f6e16d5aSGerd Hoffmann #include <asm/pvclock.h>
9790c73f6SGlauber de Oliveira Costa #include <asm/msr.h>
10790c73f6SGlauber de Oliveira Costa #include <asm/apic.h>
11790c73f6SGlauber de Oliveira Costa #include <linux/percpu.h>
123b5d56b9SEric B Munson #include <linux/hardirq.h>
1395a3d445SThomas Gleixner #include <linux/cpuhotplug.h>
140ad83caaSLuiz Capitulino #include <linux/sched.h>
15e6017571SIngo Molnar #include <linux/sched/clock.h>
16368a540eSPavel Tatashin #include <linux/mm.h>
17958f338eSLinus Torvalds #include <linux/slab.h>
186a1cac56SBrijesh Singh #include <linux/set_memory.h>
194d96f910STom Lendacky #include <linux/cc_platform.h>
20736decacSThomas Gleixner
21e499a9b6SThomas Gleixner #include <asm/hypervisor.h>
22736decacSThomas Gleixner #include <asm/x86_init.h>
23f4066c2bSMarcelo Tosatti #include <asm/kvmclock.h>
24790c73f6SGlauber de Oliveira Costa
2542f8df93SThomas Gleixner static int kvmclock __initdata = 1;
26e499a9b6SThomas Gleixner static int kvmclock_vsyscall __initdata = 1;
27*db448ac9SKirill A. Shutemov static int msr_kvm_system_time __ro_after_init;
28*db448ac9SKirill A. Shutemov static int msr_kvm_wall_clock __ro_after_init;
2942f8df93SThomas Gleixner static u64 kvm_sched_clock_offset __ro_after_init;
30790c73f6SGlauber de Oliveira Costa
parse_no_kvmclock(char * arg)31146c394dSThomas Gleixner static int __init parse_no_kvmclock(char *arg)
32790c73f6SGlauber de Oliveira Costa {
33790c73f6SGlauber de Oliveira Costa kvmclock = 0;
34790c73f6SGlauber de Oliveira Costa return 0;
35790c73f6SGlauber de Oliveira Costa }
36790c73f6SGlauber de Oliveira Costa early_param("no-kvmclock", parse_no_kvmclock);
37790c73f6SGlauber de Oliveira Costa
parse_no_kvmclock_vsyscall(char * arg)38e499a9b6SThomas Gleixner static int __init parse_no_kvmclock_vsyscall(char *arg)
39e499a9b6SThomas Gleixner {
40e499a9b6SThomas Gleixner kvmclock_vsyscall = 0;
41e499a9b6SThomas Gleixner return 0;
42e499a9b6SThomas Gleixner }
43e499a9b6SThomas Gleixner early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
44e499a9b6SThomas Gleixner
45368a540eSPavel Tatashin /* Aligned to page sizes to match whats mapped via vsyscalls to userspace */
4695a3d445SThomas Gleixner #define HVC_BOOT_ARRAY_SIZE \
4795a3d445SThomas Gleixner (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
48368a540eSPavel Tatashin
4995a3d445SThomas Gleixner static struct pvclock_vsyscall_time_info
506a1cac56SBrijesh Singh hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
516a1cac56SBrijesh Singh static struct pvclock_wall_clock wall_clock __bss_decrypted;
526a1cac56SBrijesh Singh static struct pvclock_vsyscall_time_info *hvclock_mem;
53ad9af930SZelin Deng DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
54ad9af930SZelin Deng EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
55790c73f6SGlauber de Oliveira Costa
56790c73f6SGlauber de Oliveira Costa /*
57790c73f6SGlauber de Oliveira Costa * The wallclock is the time of day when we booted. Since then, some time may
58790c73f6SGlauber de Oliveira Costa * have elapsed since the hypervisor wrote the data. So we try to account for
59790c73f6SGlauber de Oliveira Costa * that with system time
60790c73f6SGlauber de Oliveira Costa */
kvm_get_wallclock(struct timespec64 * now)61e27c4929SArnd Bergmann static void kvm_get_wallclock(struct timespec64 *now)
62790c73f6SGlauber de Oliveira Costa {
63146c394dSThomas Gleixner wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
6495a3d445SThomas Gleixner preempt_disable();
6595a3d445SThomas Gleixner pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
6695a3d445SThomas Gleixner preempt_enable();
67790c73f6SGlauber de Oliveira Costa }
68790c73f6SGlauber de Oliveira Costa
kvm_set_wallclock(const struct timespec64 * now)69e27c4929SArnd Bergmann static int kvm_set_wallclock(const struct timespec64 *now)
70790c73f6SGlauber de Oliveira Costa {
7100875520SJason Gunthorpe return -ENODEV;
72790c73f6SGlauber de Oliveira Costa }
73790c73f6SGlauber de Oliveira Costa
kvm_clock_read(void)745c5e9a2bSPeter Zijlstra static u64 kvm_clock_read(void)
75790c73f6SGlauber de Oliveira Costa {
76a5a1d1c2SThomas Gleixner u64 ret;
77790c73f6SGlauber de Oliveira Costa
7895ef1e52SAvi Kivity preempt_disable_notrace();
798739c681SPeter Zijlstra ret = pvclock_clocksource_read_nowd(this_cpu_pvti());
8095ef1e52SAvi Kivity preempt_enable_notrace();
81f6e16d5aSGerd Hoffmann return ret;
82790c73f6SGlauber de Oliveira Costa }
83f6e16d5aSGerd Hoffmann
kvm_clock_get_cycles(struct clocksource * cs)84a5a1d1c2SThomas Gleixner static u64 kvm_clock_get_cycles(struct clocksource *cs)
858e19608eSMagnus Damm {
868e19608eSMagnus Damm return kvm_clock_read();
878e19608eSMagnus Damm }
888e19608eSMagnus Damm
kvm_sched_clock_read(void)898739c681SPeter Zijlstra static noinstr u64 kvm_sched_clock_read(void)
9072c930dcSRadim Krčmář {
915c5e9a2bSPeter Zijlstra return pvclock_clocksource_read_nowd(this_cpu_pvti()) - kvm_sched_clock_offset;
9272c930dcSRadim Krčmář }
9372c930dcSRadim Krčmář
kvm_sched_clock_init(bool stable)9472c930dcSRadim Krčmář static inline void kvm_sched_clock_init(bool stable)
9572c930dcSRadim Krčmář {
96b5179ec4SPavel Tatashin if (!stable)
97acb04058SPeter Zijlstra clear_sched_clock_stable();
9872c930dcSRadim Krčmář kvm_sched_clock_offset = kvm_clock_read();
99a0e2bf7cSJuergen Gross paravirt_set_sched_clock(kvm_sched_clock_read);
10072c930dcSRadim Krčmář
101146c394dSThomas Gleixner pr_info("kvm-clock: using sched offset of %llu cycles",
10272c930dcSRadim Krčmář kvm_sched_clock_offset);
10372c930dcSRadim Krčmář
10472c930dcSRadim Krčmář BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
10572c930dcSRadim Krčmář sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
10672c930dcSRadim Krčmář }
10772c930dcSRadim Krčmář
1080293615fSGlauber Costa /*
1090293615fSGlauber Costa * If we don't do that, there is the possibility that the guest
1100293615fSGlauber Costa * will calibrate under heavy load - thus, getting a lower lpj -
1110293615fSGlauber Costa * and execute the delays themselves without load. This is wrong,
1120293615fSGlauber Costa * because no delay loop can finish beforehand.
1130293615fSGlauber Costa * Any heuristics is subject to fail, because ultimately, a large
1140293615fSGlauber Costa * poll of guests can be running and trouble each other. So we preset
1150293615fSGlauber Costa * lpj here
1160293615fSGlauber Costa */
kvm_get_tsc_khz(void)1170293615fSGlauber Costa static unsigned long kvm_get_tsc_khz(void)
1180293615fSGlauber Costa {
119e10f7805SPeng Hao setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
12095a3d445SThomas Gleixner return pvclock_tsc_khz(this_cpu_pvti());
1210293615fSGlauber Costa }
1220293615fSGlauber Costa
kvm_get_preset_lpj(void)1231088c6eeSDou Liyang static void __init kvm_get_preset_lpj(void)
1240293615fSGlauber Costa {
1250293615fSGlauber Costa unsigned long khz;
1260293615fSGlauber Costa u64 lpj;
1270293615fSGlauber Costa
128e93353c9SEduardo Habkost khz = kvm_get_tsc_khz();
1290293615fSGlauber Costa
1300293615fSGlauber Costa lpj = ((u64)khz * 1000);
1310293615fSGlauber Costa do_div(lpj, HZ);
1320293615fSGlauber Costa preset_lpj = lpj;
1330293615fSGlauber Costa }
1340293615fSGlauber Costa
kvm_check_and_clear_guest_paused(void)1353b5d56b9SEric B Munson bool kvm_check_and_clear_guest_paused(void)
1363b5d56b9SEric B Munson {
13795a3d445SThomas Gleixner struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
138146c394dSThomas Gleixner bool ret = false;
1393b5d56b9SEric B Munson
14095a3d445SThomas Gleixner if (!src)
1417069ed67SMarcelo Tosatti return ret;
1427069ed67SMarcelo Tosatti
14395a3d445SThomas Gleixner if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) {
14495a3d445SThomas Gleixner src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED;
145d63285e9SMarcelo Tosatti pvclock_touch_watchdogs();
1463b5d56b9SEric B Munson ret = true;
1473b5d56b9SEric B Munson }
1483b5d56b9SEric B Munson return ret;
1493b5d56b9SEric B Munson }
1503b5d56b9SEric B Munson
kvm_cs_enable(struct clocksource * cs)151eec399ddSThomas Gleixner static int kvm_cs_enable(struct clocksource *cs)
152eec399ddSThomas Gleixner {
153b95a8a27SThomas Gleixner vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
154eec399ddSThomas Gleixner return 0;
155eec399ddSThomas Gleixner }
156eec399ddSThomas Gleixner
157f4066c2bSMarcelo Tosatti struct clocksource kvm_clock = {
158790c73f6SGlauber de Oliveira Costa .name = "kvm-clock",
1598e19608eSMagnus Damm .read = kvm_clock_get_cycles,
160790c73f6SGlauber de Oliveira Costa .rating = 400,
161790c73f6SGlauber de Oliveira Costa .mask = CLOCKSOURCE_MASK(64),
162790c73f6SGlauber de Oliveira Costa .flags = CLOCK_SOURCE_IS_CONTINUOUS,
163eec399ddSThomas Gleixner .enable = kvm_cs_enable,
164790c73f6SGlauber de Oliveira Costa };
165f4066c2bSMarcelo Tosatti EXPORT_SYMBOL_GPL(kvm_clock);
166790c73f6SGlauber de Oliveira Costa
kvm_register_clock(char * txt)1677a5ddc8fSThomas Gleixner static void kvm_register_clock(char *txt)
168790c73f6SGlauber de Oliveira Costa {
16995a3d445SThomas Gleixner struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
1707a5ddc8fSThomas Gleixner u64 pa;
17119b6a85bSArjan Koers
17295a3d445SThomas Gleixner if (!src)
1737a5ddc8fSThomas Gleixner return;
174fe1140ccSJan Kiszka
17595a3d445SThomas Gleixner pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
1767a5ddc8fSThomas Gleixner wrmsrl(msr_kvm_system_time, pa);
177f3f26daeSDavid Woodhouse pr_debug("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
178790c73f6SGlauber de Oliveira Costa }
179790c73f6SGlauber de Oliveira Costa
kvm_save_sched_clock_state(void)180b74f05d6SMarcelo Tosatti static void kvm_save_sched_clock_state(void)
181b74f05d6SMarcelo Tosatti {
182b74f05d6SMarcelo Tosatti }
183b74f05d6SMarcelo Tosatti
kvm_restore_sched_clock_state(void)184b74f05d6SMarcelo Tosatti static void kvm_restore_sched_clock_state(void)
185b74f05d6SMarcelo Tosatti {
186b74f05d6SMarcelo Tosatti kvm_register_clock("primary cpu clock, resume");
187b74f05d6SMarcelo Tosatti }
188b74f05d6SMarcelo Tosatti
189b8ba5f10SGlauber Costa #ifdef CONFIG_X86_LOCAL_APIC
kvm_setup_secondary_clock(void)190148f9bb8SPaul Gortmaker static void kvm_setup_secondary_clock(void)
191790c73f6SGlauber de Oliveira Costa {
1927a5ddc8fSThomas Gleixner kvm_register_clock("secondary cpu clock");
193790c73f6SGlauber de Oliveira Costa }
194b8ba5f10SGlauber Costa #endif
195790c73f6SGlauber de Oliveira Costa
kvmclock_disable(void)196c02027b5SVitaly Kuznetsov void kvmclock_disable(void)
1971e977aa1SGlauber Costa {
198*db448ac9SKirill A. Shutemov if (msr_kvm_system_time)
199838815a7SGlauber Costa native_write_msr(msr_kvm_system_time, 0, 0);
2001e977aa1SGlauber Costa }
2011e977aa1SGlauber Costa
kvmclock_init_mem(void)2026a1cac56SBrijesh Singh static void __init kvmclock_init_mem(void)
2036a1cac56SBrijesh Singh {
2046a1cac56SBrijesh Singh unsigned long ncpus;
2056a1cac56SBrijesh Singh unsigned int order;
2066a1cac56SBrijesh Singh struct page *p;
2076a1cac56SBrijesh Singh int r;
2086a1cac56SBrijesh Singh
2096a1cac56SBrijesh Singh if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
2106a1cac56SBrijesh Singh return;
2116a1cac56SBrijesh Singh
2126a1cac56SBrijesh Singh ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
2136a1cac56SBrijesh Singh order = get_order(ncpus * sizeof(*hvclock_mem));
2146a1cac56SBrijesh Singh
2156a1cac56SBrijesh Singh p = alloc_pages(GFP_KERNEL, order);
2166a1cac56SBrijesh Singh if (!p) {
2176a1cac56SBrijesh Singh pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
2186a1cac56SBrijesh Singh return;
2196a1cac56SBrijesh Singh }
2206a1cac56SBrijesh Singh
2216a1cac56SBrijesh Singh hvclock_mem = page_address(p);
2226a1cac56SBrijesh Singh
2236a1cac56SBrijesh Singh /*
2246a1cac56SBrijesh Singh * hvclock is shared between the guest and the hypervisor, must
2256a1cac56SBrijesh Singh * be mapped decrypted.
2266a1cac56SBrijesh Singh */
2274d96f910STom Lendacky if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
2286a1cac56SBrijesh Singh r = set_memory_decrypted((unsigned long) hvclock_mem,
2296a1cac56SBrijesh Singh 1UL << order);
2306a1cac56SBrijesh Singh if (r) {
2316a1cac56SBrijesh Singh __free_pages(p, order);
2326a1cac56SBrijesh Singh hvclock_mem = NULL;
2336a1cac56SBrijesh Singh pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
2346a1cac56SBrijesh Singh return;
2356a1cac56SBrijesh Singh }
2366a1cac56SBrijesh Singh }
2376a1cac56SBrijesh Singh
2386a1cac56SBrijesh Singh memset(hvclock_mem, 0, PAGE_SIZE << order);
2396a1cac56SBrijesh Singh }
2406a1cac56SBrijesh Singh
kvm_setup_vsyscall_timeinfo(void)241e499a9b6SThomas Gleixner static int __init kvm_setup_vsyscall_timeinfo(void)
242e499a9b6SThomas Gleixner {
24377d72792SWanpeng Li if (!kvm_para_available() || !kvmclock || nopv)
2443c51d0a6SWanpeng Li return 0;
2453c51d0a6SWanpeng Li
246d7eb79c6SWanpeng Li kvmclock_init_mem();
247e499a9b6SThomas Gleixner
248d7eb79c6SWanpeng Li #ifdef CONFIG_X86_64
249d7eb79c6SWanpeng Li if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
250d7eb79c6SWanpeng Li u8 flags;
251e499a9b6SThomas Gleixner
25295a3d445SThomas Gleixner flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
253e499a9b6SThomas Gleixner if (!(flags & PVCLOCK_TSC_STABLE_BIT))
25495a3d445SThomas Gleixner return 0;
255e499a9b6SThomas Gleixner
256b95a8a27SThomas Gleixner kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
257d7eb79c6SWanpeng Li }
258e499a9b6SThomas Gleixner #endif
2596a1cac56SBrijesh Singh
260e499a9b6SThomas Gleixner return 0;
261e499a9b6SThomas Gleixner }
262e499a9b6SThomas Gleixner early_initcall(kvm_setup_vsyscall_timeinfo);
263e499a9b6SThomas Gleixner
kvmclock_setup_percpu(unsigned int cpu)26495a3d445SThomas Gleixner static int kvmclock_setup_percpu(unsigned int cpu)
26595a3d445SThomas Gleixner {
26695a3d445SThomas Gleixner struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
26795a3d445SThomas Gleixner
26895a3d445SThomas Gleixner /*
26995a3d445SThomas Gleixner * The per cpu area setup replicates CPU0 data to all cpu
27095a3d445SThomas Gleixner * pointers. So carefully check. CPU0 has been set up in init
27195a3d445SThomas Gleixner * already.
27295a3d445SThomas Gleixner */
27395a3d445SThomas Gleixner if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
27495a3d445SThomas Gleixner return 0;
27595a3d445SThomas Gleixner
27695a3d445SThomas Gleixner /* Use the static page for the first CPUs, allocate otherwise */
27795a3d445SThomas Gleixner if (cpu < HVC_BOOT_ARRAY_SIZE)
27895a3d445SThomas Gleixner p = &hv_clock_boot[cpu];
2796a1cac56SBrijesh Singh else if (hvclock_mem)
2806a1cac56SBrijesh Singh p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
28195a3d445SThomas Gleixner else
2826a1cac56SBrijesh Singh return -ENOMEM;
28395a3d445SThomas Gleixner
28495a3d445SThomas Gleixner per_cpu(hv_clock_per_cpu, cpu) = p;
28595a3d445SThomas Gleixner return p ? 0 : -ENOMEM;
28695a3d445SThomas Gleixner }
28795a3d445SThomas Gleixner
kvmclock_init(void)288790c73f6SGlauber de Oliveira Costa void __init kvmclock_init(void)
289790c73f6SGlauber de Oliveira Costa {
2900ad83caaSLuiz Capitulino u8 flags;
291ed55705dSMarcelo Tosatti
292146c394dSThomas Gleixner if (!kvm_para_available() || !kvmclock)
293790c73f6SGlauber de Oliveira Costa return;
294790c73f6SGlauber de Oliveira Costa
295146c394dSThomas Gleixner if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
296838815a7SGlauber Costa msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
297838815a7SGlauber Costa msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
298*db448ac9SKirill A. Shutemov } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
299*db448ac9SKirill A. Shutemov msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
300*db448ac9SKirill A. Shutemov msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
301*db448ac9SKirill A. Shutemov } else {
302838815a7SGlauber Costa return;
303146c394dSThomas Gleixner }
304838815a7SGlauber Costa
30595a3d445SThomas Gleixner if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu",
30695a3d445SThomas Gleixner kvmclock_setup_percpu, NULL) < 0) {
30795a3d445SThomas Gleixner return;
30895a3d445SThomas Gleixner }
30995a3d445SThomas Gleixner
310146c394dSThomas Gleixner pr_info("kvm-clock: Using msrs %x and %x",
311819aeee0SBrijesh Singh msr_kvm_system_time, msr_kvm_wall_clock);
312819aeee0SBrijesh Singh
31395a3d445SThomas Gleixner this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]);
3147a5ddc8fSThomas Gleixner kvm_register_clock("primary cpu clock");
31595a3d445SThomas Gleixner pvclock_set_pvti_cpu0_va(hv_clock_boot);
31694ffba48SRadim Krčmář
31772c930dcSRadim Krčmář if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
31872c930dcSRadim Krčmář pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
31972c930dcSRadim Krčmář
32095a3d445SThomas Gleixner flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
32172c930dcSRadim Krčmář kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT);
32272c930dcSRadim Krčmář
3232d826404SThomas Gleixner x86_platform.calibrate_tsc = kvm_get_tsc_khz;
324a4497a86SPrarit Bhargava x86_platform.calibrate_cpu = kvm_get_tsc_khz;
3257bd867dfSFeng Tang x86_platform.get_wallclock = kvm_get_wallclock;
3267bd867dfSFeng Tang x86_platform.set_wallclock = kvm_set_wallclock;
327b8ba5f10SGlauber Costa #ifdef CONFIG_X86_LOCAL_APIC
328146c394dSThomas Gleixner x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;
329b8ba5f10SGlauber Costa #endif
330b74f05d6SMarcelo Tosatti x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
331b74f05d6SMarcelo Tosatti x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
3320293615fSGlauber Costa kvm_get_preset_lpj();
3337539b174SMarcelo Tosatti
3347539b174SMarcelo Tosatti /*
3357539b174SMarcelo Tosatti * X86_FEATURE_NONSTOP_TSC is TSC runs at constant rate
3367539b174SMarcelo Tosatti * with P/T states and does not stop in deep C-states.
3377539b174SMarcelo Tosatti *
3387539b174SMarcelo Tosatti * Invariant TSC exposed by host means kvmclock is not necessary:
3397539b174SMarcelo Tosatti * can use TSC as clocksource.
3407539b174SMarcelo Tosatti *
3417539b174SMarcelo Tosatti */
3427539b174SMarcelo Tosatti if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
3437539b174SMarcelo Tosatti boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
3447539b174SMarcelo Tosatti !check_tsc_unstable())
3457539b174SMarcelo Tosatti kvm_clock.rating = 299;
3467539b174SMarcelo Tosatti
347b01cc1b0SJohn Stultz clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
348423cd25aSGlauber Costa pv_info.name = "KVM";
349790c73f6SGlauber de Oliveira Costa }
350