1a8cf291bSJianyong Wu // SPDX-License-Identifier: GPL-2.0-or-later
2a8cf291bSJianyong Wu /*
3a8cf291bSJianyong Wu * Virtual PTP 1588 clock for use with KVM guests
4a8cf291bSJianyong Wu *
5a8cf291bSJianyong Wu * Copyright (C) 2017 Red Hat Inc.
6a8cf291bSJianyong Wu */
7a8cf291bSJianyong Wu
8a8cf291bSJianyong Wu #include <linux/device.h>
9a8cf291bSJianyong Wu #include <linux/kernel.h>
10a8cf291bSJianyong Wu #include <asm/pvclock.h>
11a8cf291bSJianyong Wu #include <asm/kvmclock.h>
12a8cf291bSJianyong Wu #include <linux/module.h>
13a8cf291bSJianyong Wu #include <uapi/asm/kvm_para.h>
14a8cf291bSJianyong Wu #include <uapi/linux/kvm_para.h>
15a8cf291bSJianyong Wu #include <linux/ptp_clock_kernel.h>
16a8cf291bSJianyong Wu #include <linux/ptp_kvm.h>
176365ba64SJeremi Piotrowski #include <linux/set_memory.h>
18a8cf291bSJianyong Wu
19a8cf291bSJianyong Wu static phys_addr_t clock_pair_gpa;
206365ba64SJeremi Piotrowski static struct kvm_clock_pairing clock_pair_glbl;
216365ba64SJeremi Piotrowski static struct kvm_clock_pairing *clock_pair;
22a8cf291bSJianyong Wu
kvm_arch_ptp_init(void)23a8cf291bSJianyong Wu int kvm_arch_ptp_init(void)
24a8cf291bSJianyong Wu {
256365ba64SJeremi Piotrowski struct page *p;
26a8cf291bSJianyong Wu long ret;
27a8cf291bSJianyong Wu
28a8cf291bSJianyong Wu if (!kvm_para_available())
29*74f0a691SThomas Weißschuh return -EOPNOTSUPP;
30a8cf291bSJianyong Wu
316365ba64SJeremi Piotrowski if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
326365ba64SJeremi Piotrowski p = alloc_page(GFP_KERNEL | __GFP_ZERO);
336365ba64SJeremi Piotrowski if (!p)
346365ba64SJeremi Piotrowski return -ENOMEM;
356365ba64SJeremi Piotrowski
366365ba64SJeremi Piotrowski clock_pair = page_address(p);
376365ba64SJeremi Piotrowski ret = set_memory_decrypted((unsigned long)clock_pair, 1);
386365ba64SJeremi Piotrowski if (ret) {
396365ba64SJeremi Piotrowski __free_page(p);
406365ba64SJeremi Piotrowski clock_pair = NULL;
416365ba64SJeremi Piotrowski goto nofree;
426365ba64SJeremi Piotrowski }
436365ba64SJeremi Piotrowski } else {
446365ba64SJeremi Piotrowski clock_pair = &clock_pair_glbl;
456365ba64SJeremi Piotrowski }
466365ba64SJeremi Piotrowski
476365ba64SJeremi Piotrowski clock_pair_gpa = slow_virt_to_phys(clock_pair);
486365ba64SJeremi Piotrowski if (!pvclock_get_pvti_cpu0_va()) {
49*74f0a691SThomas Weißschuh ret = -EOPNOTSUPP;
506365ba64SJeremi Piotrowski goto err;
516365ba64SJeremi Piotrowski }
52a8cf291bSJianyong Wu
53a8cf291bSJianyong Wu ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
54a8cf291bSJianyong Wu KVM_CLOCK_PAIRING_WALLCLOCK);
556365ba64SJeremi Piotrowski if (ret == -KVM_ENOSYS) {
56*74f0a691SThomas Weißschuh ret = -EOPNOTSUPP;
576365ba64SJeremi Piotrowski goto err;
586365ba64SJeremi Piotrowski }
59a8cf291bSJianyong Wu
60c2402d43SKele Huang return ret;
616365ba64SJeremi Piotrowski
626365ba64SJeremi Piotrowski err:
636365ba64SJeremi Piotrowski kvm_arch_ptp_exit();
646365ba64SJeremi Piotrowski nofree:
656365ba64SJeremi Piotrowski return ret;
666365ba64SJeremi Piotrowski }
676365ba64SJeremi Piotrowski
kvm_arch_ptp_exit(void)686365ba64SJeremi Piotrowski void kvm_arch_ptp_exit(void)
696365ba64SJeremi Piotrowski {
706365ba64SJeremi Piotrowski if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
716365ba64SJeremi Piotrowski WARN_ON(set_memory_encrypted((unsigned long)clock_pair, 1));
726365ba64SJeremi Piotrowski free_page((unsigned long)clock_pair);
736365ba64SJeremi Piotrowski clock_pair = NULL;
746365ba64SJeremi Piotrowski }
75a8cf291bSJianyong Wu }
76a8cf291bSJianyong Wu
kvm_arch_ptp_get_clock(struct timespec64 * ts)77a8cf291bSJianyong Wu int kvm_arch_ptp_get_clock(struct timespec64 *ts)
78a8cf291bSJianyong Wu {
79a8cf291bSJianyong Wu long ret;
80a8cf291bSJianyong Wu
81a8cf291bSJianyong Wu ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
82a8cf291bSJianyong Wu clock_pair_gpa,
83a8cf291bSJianyong Wu KVM_CLOCK_PAIRING_WALLCLOCK);
84a8cf291bSJianyong Wu if (ret != 0) {
85a8cf291bSJianyong Wu pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
86a8cf291bSJianyong Wu return -EOPNOTSUPP;
87a8cf291bSJianyong Wu }
88a8cf291bSJianyong Wu
896365ba64SJeremi Piotrowski ts->tv_sec = clock_pair->sec;
906365ba64SJeremi Piotrowski ts->tv_nsec = clock_pair->nsec;
91a8cf291bSJianyong Wu
92a8cf291bSJianyong Wu return 0;
93a8cf291bSJianyong Wu }
94a8cf291bSJianyong Wu
kvm_arch_ptp_get_crosststamp(u64 * cycle,struct timespec64 * tspec,struct clocksource ** cs)95a8cf291bSJianyong Wu int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
96a8cf291bSJianyong Wu struct clocksource **cs)
97a8cf291bSJianyong Wu {
98a8cf291bSJianyong Wu struct pvclock_vcpu_time_info *src;
99a8cf291bSJianyong Wu unsigned int version;
100a8cf291bSJianyong Wu long ret;
101a8cf291bSJianyong Wu
102773e89abSZelin Deng src = this_cpu_pvti();
103a8cf291bSJianyong Wu
104a8cf291bSJianyong Wu do {
105a8cf291bSJianyong Wu /*
106a8cf291bSJianyong Wu * We are using a TSC value read in the hosts
107a8cf291bSJianyong Wu * kvm_hc_clock_pairing handling.
108a8cf291bSJianyong Wu * So any changes to tsc_to_system_mul
109a8cf291bSJianyong Wu * and tsc_shift or any other pvclock
110a8cf291bSJianyong Wu * data invalidate that measurement.
111a8cf291bSJianyong Wu */
112a8cf291bSJianyong Wu version = pvclock_read_begin(src);
113a8cf291bSJianyong Wu
114a8cf291bSJianyong Wu ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
115a8cf291bSJianyong Wu clock_pair_gpa,
116a8cf291bSJianyong Wu KVM_CLOCK_PAIRING_WALLCLOCK);
117a8cf291bSJianyong Wu if (ret != 0) {
118a8cf291bSJianyong Wu pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
119a8cf291bSJianyong Wu return -EOPNOTSUPP;
120a8cf291bSJianyong Wu }
1216365ba64SJeremi Piotrowski tspec->tv_sec = clock_pair->sec;
1226365ba64SJeremi Piotrowski tspec->tv_nsec = clock_pair->nsec;
1236365ba64SJeremi Piotrowski *cycle = __pvclock_read_cycles(src, clock_pair->tsc);
124a8cf291bSJianyong Wu } while (pvclock_read_retry(src, version));
125a8cf291bSJianyong Wu
126a8cf291bSJianyong Wu *cs = &kvm_clock;
127a8cf291bSJianyong Wu
128a8cf291bSJianyong Wu return 0;
129a8cf291bSJianyong Wu }
130