xref: /openbmc/linux/arch/arm64/kvm/pvtime.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Arm Ltd.
3 
4 #include <linux/arm-smccc.h>
5 #include <linux/kvm_host.h>
6 
7 #include <asm/kvm_mmu.h>
8 #include <asm/pvclock-abi.h>
9 
10 #include <kvm/arm_hypercalls.h>
11 
12 void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
13 {
14 	struct kvm *kvm = vcpu->kvm;
15 	u64 steal;
16 	__le64 steal_le;
17 	u64 offset;
18 	int idx;
19 	u64 base = vcpu->arch.steal.base;
20 
21 	if (base == GPA_INVALID)
22 		return;
23 
24 	/* Let's do the local bookkeeping */
25 	steal = vcpu->arch.steal.steal;
26 	steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
27 	vcpu->arch.steal.last_steal = current->sched_info.run_delay;
28 	vcpu->arch.steal.steal = steal;
29 
30 	steal_le = cpu_to_le64(steal);
31 	idx = srcu_read_lock(&kvm->srcu);
32 	offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
33 	kvm_put_guest(kvm, base + offset, steal_le, u64);
34 	srcu_read_unlock(&kvm->srcu, idx);
35 }
36 
37 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
38 {
39 	u32 feature = smccc_get_arg1(vcpu);
40 	long val = SMCCC_RET_NOT_SUPPORTED;
41 
42 	switch (feature) {
43 	case ARM_SMCCC_HV_PV_TIME_FEATURES:
44 	case ARM_SMCCC_HV_PV_TIME_ST:
45 		val = SMCCC_RET_SUCCESS;
46 		break;
47 	}
48 
49 	return val;
50 }
51 
52 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
53 {
54 	struct pvclock_vcpu_stolen_time init_values = {};
55 	struct kvm *kvm = vcpu->kvm;
56 	u64 base = vcpu->arch.steal.base;
57 	int idx;
58 
59 	if (base == GPA_INVALID)
60 		return base;
61 
62 	/*
63 	 * Start counting stolen time from the time the guest requests
64 	 * the feature enabled.
65 	 */
66 	vcpu->arch.steal.steal = 0;
67 	vcpu->arch.steal.last_steal = current->sched_info.run_delay;
68 
69 	idx = srcu_read_lock(&kvm->srcu);
70 	kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
71 	srcu_read_unlock(&kvm->srcu, idx);
72 
73 	return base;
74 }
75 
76 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
77 			    struct kvm_device_attr *attr)
78 {
79 	u64 __user *user = (u64 __user *)attr->addr;
80 	struct kvm *kvm = vcpu->kvm;
81 	u64 ipa;
82 	int ret = 0;
83 	int idx;
84 
85 	if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
86 		return -ENXIO;
87 
88 	if (get_user(ipa, user))
89 		return -EFAULT;
90 	if (!IS_ALIGNED(ipa, 64))
91 		return -EINVAL;
92 	if (vcpu->arch.steal.base != GPA_INVALID)
93 		return -EEXIST;
94 
95 	/* Check the address is in a valid memslot */
96 	idx = srcu_read_lock(&kvm->srcu);
97 	if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
98 		ret = -EINVAL;
99 	srcu_read_unlock(&kvm->srcu, idx);
100 
101 	if (!ret)
102 		vcpu->arch.steal.base = ipa;
103 
104 	return ret;
105 }
106 
107 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
108 			    struct kvm_device_attr *attr)
109 {
110 	u64 __user *user = (u64 __user *)attr->addr;
111 	u64 ipa;
112 
113 	if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
114 		return -ENXIO;
115 
116 	ipa = vcpu->arch.steal.base;
117 
118 	if (put_user(ipa, user))
119 		return -EFAULT;
120 	return 0;
121 }
122 
123 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
124 			    struct kvm_device_attr *attr)
125 {
126 	switch (attr->attr) {
127 	case KVM_ARM_VCPU_PVTIME_IPA:
128 		return 0;
129 	}
130 	return -ENXIO;
131 }
132