xref: /openbmc/linux/arch/arm64/kvm/pvtime.c (revision 4eb5928d)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Arm Ltd.
3 
4 #include <linux/arm-smccc.h>
5 #include <linux/kvm_host.h>
6 #include <linux/sched/stat.h>
7 
8 #include <asm/kvm_mmu.h>
9 #include <asm/pvclock-abi.h>
10 
11 #include <kvm/arm_hypercalls.h>
12 
13 void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
14 {
15 	struct kvm *kvm = vcpu->kvm;
16 	u64 steal;
17 	__le64 steal_le;
18 	u64 offset;
19 	int idx;
20 	u64 base = vcpu->arch.steal.base;
21 
22 	if (base == GPA_INVALID)
23 		return;
24 
25 	/* Let's do the local bookkeeping */
26 	steal = vcpu->arch.steal.steal;
27 	steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
28 	vcpu->arch.steal.last_steal = current->sched_info.run_delay;
29 	vcpu->arch.steal.steal = steal;
30 
31 	steal_le = cpu_to_le64(steal);
32 	idx = srcu_read_lock(&kvm->srcu);
33 	offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
34 	kvm_put_guest(kvm, base + offset, steal_le, u64);
35 	srcu_read_unlock(&kvm->srcu, idx);
36 }
37 
38 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
39 {
40 	u32 feature = smccc_get_arg1(vcpu);
41 	long val = SMCCC_RET_NOT_SUPPORTED;
42 
43 	switch (feature) {
44 	case ARM_SMCCC_HV_PV_TIME_FEATURES:
45 	case ARM_SMCCC_HV_PV_TIME_ST:
46 		val = SMCCC_RET_SUCCESS;
47 		break;
48 	}
49 
50 	return val;
51 }
52 
53 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
54 {
55 	struct pvclock_vcpu_stolen_time init_values = {};
56 	struct kvm *kvm = vcpu->kvm;
57 	u64 base = vcpu->arch.steal.base;
58 	int idx;
59 
60 	if (base == GPA_INVALID)
61 		return base;
62 
63 	/*
64 	 * Start counting stolen time from the time the guest requests
65 	 * the feature enabled.
66 	 */
67 	vcpu->arch.steal.steal = 0;
68 	vcpu->arch.steal.last_steal = current->sched_info.run_delay;
69 
70 	idx = srcu_read_lock(&kvm->srcu);
71 	kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
72 	srcu_read_unlock(&kvm->srcu, idx);
73 
74 	return base;
75 }
76 
77 static bool kvm_arm_pvtime_supported(void)
78 {
79 	return !!sched_info_on();
80 }
81 
82 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
83 			    struct kvm_device_attr *attr)
84 {
85 	u64 __user *user = (u64 __user *)attr->addr;
86 	struct kvm *kvm = vcpu->kvm;
87 	u64 ipa;
88 	int ret = 0;
89 	int idx;
90 
91 	if (!kvm_arm_pvtime_supported() ||
92 	    attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
93 		return -ENXIO;
94 
95 	if (get_user(ipa, user))
96 		return -EFAULT;
97 	if (!IS_ALIGNED(ipa, 64))
98 		return -EINVAL;
99 	if (vcpu->arch.steal.base != GPA_INVALID)
100 		return -EEXIST;
101 
102 	/* Check the address is in a valid memslot */
103 	idx = srcu_read_lock(&kvm->srcu);
104 	if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
105 		ret = -EINVAL;
106 	srcu_read_unlock(&kvm->srcu, idx);
107 
108 	if (!ret)
109 		vcpu->arch.steal.base = ipa;
110 
111 	return ret;
112 }
113 
114 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
115 			    struct kvm_device_attr *attr)
116 {
117 	u64 __user *user = (u64 __user *)attr->addr;
118 	u64 ipa;
119 
120 	if (!kvm_arm_pvtime_supported() ||
121 	    attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
122 		return -ENXIO;
123 
124 	ipa = vcpu->arch.steal.base;
125 
126 	if (put_user(ipa, user))
127 		return -EFAULT;
128 	return 0;
129 }
130 
131 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
132 			    struct kvm_device_attr *attr)
133 {
134 	switch (attr->attr) {
135 	case KVM_ARM_VCPU_PVTIME_IPA:
136 		if (kvm_arm_pvtime_supported())
137 			return 0;
138 	}
139 	return -ENXIO;
140 }
141