hypercalls.c (b22498c4846b52a5df2cc821d97c4049df0cf67a) | hypercalls.c (6dcf7316e05eccded11fc640813c8a8879f271a6) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2// Copyright (C) 2019 Arm Ltd. 3 4#include <linux/arm-smccc.h> 5#include <linux/kvm_host.h> 6 7#include <asm/kvm_emulate.h> 8 --- 51 unchanged lines hidden (view full) --- 60 * nobody will give a damn about it). 61 */ 62 val[0] = upper_32_bits(systime_snapshot.real); 63 val[1] = lower_32_bits(systime_snapshot.real); 64 val[2] = upper_32_bits(cycles); 65 val[3] = lower_32_bits(cycles); 66} 67 | 1// SPDX-License-Identifier: GPL-2.0 2// Copyright (C) 2019 Arm Ltd. 3 4#include <linux/arm-smccc.h> 5#include <linux/kvm_host.h> 6 7#include <asm/kvm_emulate.h> 8 --- 51 unchanged lines hidden (view full) --- 60 * nobody will give a damn about it). 61 */ 62 val[0] = upper_32_bits(systime_snapshot.real); 63 val[1] = lower_32_bits(systime_snapshot.real); 64 val[2] = upper_32_bits(cycles); 65 val[3] = lower_32_bits(cycles); 66} 67 |
68static bool kvm_hvc_call_default_allowed(u32 func_id) | 68static bool kvm_smccc_default_allowed(u32 func_id) |
69{ 70 switch (func_id) { 71 /* 72 * List of function-ids that are not gated with the bitmapped 73 * feature firmware registers, and are to be allowed for 74 * servicing the call by default. 75 */ 76 case ARM_SMCCC_VERSION_FUNC_ID: --- 11 unchanged lines hidden (view full) --- 88 */ 89 if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3)) 90 return true; 91 92 return false; 93 } 94} 95 | 69{ 70 switch (func_id) { 71 /* 72 * List of function-ids that are not gated with the bitmapped 73 * feature firmware registers, and are to be allowed for 74 * servicing the call by default. 75 */ 76 case ARM_SMCCC_VERSION_FUNC_ID: --- 11 unchanged lines hidden (view full) --- 88 */ 89 if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3)) 90 return true; 91 92 return false; 93 } 94} 95 |
96static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id) | 96static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) |
97{ 98 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; 99 100 switch (func_id) { 101 case ARM_SMCCC_TRNG_VERSION: 102 case ARM_SMCCC_TRNG_FEATURES: 103 case ARM_SMCCC_TRNG_GET_UUID: 104 case ARM_SMCCC_TRNG_RND32: --- 7 unchanged lines hidden (view full) --- 112 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: 113 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: 114 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT, 115 &smccc_feat->vendor_hyp_bmap); 116 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: 117 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP, 118 &smccc_feat->vendor_hyp_bmap); 119 default: | 97{ 98 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; 99 100 switch (func_id) { 101 case ARM_SMCCC_TRNG_VERSION: 102 case ARM_SMCCC_TRNG_FEATURES: 103 case ARM_SMCCC_TRNG_GET_UUID: 104 case ARM_SMCCC_TRNG_RND32: --- 7 unchanged lines hidden (view full) --- 112 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: 113 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: 114 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT, 115 &smccc_feat->vendor_hyp_bmap); 116 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: 117 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP, 118 &smccc_feat->vendor_hyp_bmap); 119 default: |
120 return kvm_hvc_call_default_allowed(func_id); | 120 return false; |
121 } 122} 123 | 121 } 122} 123 |
124int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) | 124#define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID 125#define SMC32_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 126 ARM_SMCCC_SMC_32, \ 127 0, ARM_SMCCC_FUNC_MASK) 128 129#define SMC64_ARCH_RANGE_BEGIN ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 130 ARM_SMCCC_SMC_64, \ 131 0, 0) 132#define SMC64_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 133 ARM_SMCCC_SMC_64, \ 134 0, ARM_SMCCC_FUNC_MASK) 135 136static void init_smccc_filter(struct kvm *kvm) |
125{ | 137{ |
138 int r; 139 140 mt_init(&kvm->arch.smccc_filter); 141 142 /* 143 * Prevent userspace from handling any SMCCC calls in the architecture 144 * range, avoiding the risk of misrepresenting Spectre mitigation status 145 * to the guest. 146 */ 147 r = mtree_insert_range(&kvm->arch.smccc_filter, 148 SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END, 149 xa_mk_value(KVM_SMCCC_FILTER_HANDLE), 150 GFP_KERNEL_ACCOUNT); 151 WARN_ON_ONCE(r); 152 153 r = mtree_insert_range(&kvm->arch.smccc_filter, 154 SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END, 155 xa_mk_value(KVM_SMCCC_FILTER_HANDLE), 156 GFP_KERNEL_ACCOUNT); 157 WARN_ON_ONCE(r); 158 159} 160 161static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr) 162{ 163 const void *zero_page = page_to_virt(ZERO_PAGE(0)); 164 struct kvm_smccc_filter filter; 165 u32 start, end; 166 int r; 167 168 if (copy_from_user(&filter, uaddr, sizeof(filter))) 169 return -EFAULT; 170 171 if (memcmp(filter.pad, zero_page, sizeof(filter.pad))) 172 return -EINVAL; 173 174 start = filter.base; 175 end = start + filter.nr_functions - 1; 176 177 if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS) 178 return -EINVAL; 179 180 mutex_lock(&kvm->arch.config_lock); 181 182 if (kvm_vm_has_ran_once(kvm)) { 183 r = -EBUSY; 184 goto out_unlock; 185 } 186 187 r = mtree_insert_range(&kvm->arch.smccc_filter, start, end, 188 xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT); 189 if (r) 190 goto out_unlock; 191 192 set_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags); 193 194out_unlock: 195 mutex_unlock(&kvm->arch.config_lock); 196 return r; 197} 198 199static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id) 200{ 201 unsigned long idx = func_id; 202 void *val; 203 204 if (!test_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags)) 205 return KVM_SMCCC_FILTER_HANDLE; 206 207 /* 208 * But where's the error handling, you say? 209 * 210 * mt_find() returns NULL if no entry was found, which just so happens 211 * to match KVM_SMCCC_FILTER_HANDLE. 212 */ 213 val = mt_find(&kvm->arch.smccc_filter, &idx, idx); 214 return xa_to_value(val); 215} 216 217static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id) 218{ 219 /* 220 * Intervening actions in the SMCCC filter take precedence over the 221 * pseudo-firmware register bitmaps. 222 */ 223 u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id); 224 if (action != KVM_SMCCC_FILTER_HANDLE) 225 return action; 226 227 if (kvm_smccc_test_fw_bmap(vcpu, func_id) || 228 kvm_smccc_default_allowed(func_id)) 229 return KVM_SMCCC_FILTER_HANDLE; 230 231 return KVM_SMCCC_FILTER_DENY; 232} 233 234static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id) 235{ 236 u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); 237 struct kvm_run *run = vcpu->run; 238 u64 flags = 0; 239 240 if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64) 241 flags |= KVM_HYPERCALL_EXIT_SMC; 242 243 if (!kvm_vcpu_trap_il_is32bit(vcpu)) 244 flags |= KVM_HYPERCALL_EXIT_16BIT; 245 246 run->exit_reason = KVM_EXIT_HYPERCALL; 247 run->hypercall = (typeof(run->hypercall)) { 248 .nr = func_id, 249 .flags = flags, 250 }; 251} 252 253int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) 254{ |
|
126 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; 127 u32 func_id = smccc_get_function(vcpu); 128 u64 val[4] = {SMCCC_RET_NOT_SUPPORTED}; 129 u32 feature; | 255 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; 256 u32 func_id = smccc_get_function(vcpu); 257 u64 val[4] = {SMCCC_RET_NOT_SUPPORTED}; 258 u32 feature; |
259 u8 action; |
|
130 gpa_t gpa; 131 | 260 gpa_t gpa; 261 |
132 if (!kvm_hvc_call_allowed(vcpu, func_id)) | 262 action = kvm_smccc_get_action(vcpu, func_id); 263 switch (action) { 264 case KVM_SMCCC_FILTER_HANDLE: 265 break; 266 case KVM_SMCCC_FILTER_DENY: |
133 goto out; | 267 goto out; |
268 case KVM_SMCCC_FILTER_FWD_TO_USER: 269 kvm_prepare_hypercall_exit(vcpu, func_id); 270 return 0; 271 default: 272 WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action); 273 goto out; 274 } |
|
134 135 switch (func_id) { 136 case ARM_SMCCC_VERSION_FUNC_ID: 137 val[0] = ARM_SMCCC_VERSION_1_1; 138 break; 139 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: 140 feature = smccc_get_arg1(vcpu); 141 switch (feature) { --- 98 unchanged lines hidden (view full) --- 240 241void kvm_arm_init_hypercalls(struct kvm *kvm) 242{ 243 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat; 244 245 smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES; 246 smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES; 247 smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; | 275 276 switch (func_id) { 277 case ARM_SMCCC_VERSION_FUNC_ID: 278 val[0] = ARM_SMCCC_VERSION_1_1; 279 break; 280 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: 281 feature = smccc_get_arg1(vcpu); 282 switch (feature) { --- 98 unchanged lines hidden (view full) --- 381 382void kvm_arm_init_hypercalls(struct kvm *kvm) 383{ 384 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat; 385 386 smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES; 387 smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES; 388 smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; |
389 390 init_smccc_filter(kvm); |
|
248} 249 | 391} 392 |
393void kvm_arm_teardown_hypercalls(struct kvm *kvm) 394{ 395 mtree_destroy(&kvm->arch.smccc_filter); 396} 397 |
|
250int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) 251{ 252 return ARRAY_SIZE(kvm_arm_fw_reg_ids); 253} 254 255int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 256{ 257 int i; --- 116 unchanged lines hidden (view full) --- 374 } 375 376 /* Check for unsupported bit */ 377 if (val & ~fw_reg_features) 378 return -EINVAL; 379 380 mutex_lock(&kvm->arch.config_lock); 381 | 398int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) 399{ 400 return ARRAY_SIZE(kvm_arm_fw_reg_ids); 401} 402 403int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 404{ 405 int i; --- 116 unchanged lines hidden (view full) --- 522 } 523 524 /* Check for unsupported bit */ 525 if (val & ~fw_reg_features) 526 return -EINVAL; 527 528 mutex_lock(&kvm->arch.config_lock); 529 |
382 if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) && 383 val != *fw_reg_bmap) { | 530 if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) { |
384 ret = -EBUSY; 385 goto out; 386 } 387 388 WRITE_ONCE(*fw_reg_bmap, val); 389out: 390 mutex_unlock(&kvm->arch.config_lock); 391 return ret; --- 82 unchanged lines hidden (view full) --- 474 case KVM_REG_ARM_VENDOR_HYP_BMAP: 475 return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val); 476 default: 477 return -ENOENT; 478 } 479 480 return -EINVAL; 481} | 531 ret = -EBUSY; 532 goto out; 533 } 534 535 WRITE_ONCE(*fw_reg_bmap, val); 536out: 537 mutex_unlock(&kvm->arch.config_lock); 538 return ret; --- 82 unchanged lines hidden (view full) --- 621 case KVM_REG_ARM_VENDOR_HYP_BMAP: 622 return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val); 623 default: 624 return -ENOENT; 625 } 626 627 return -EINVAL; 628} |
629 630int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 631{ 632 switch (attr->attr) { 633 case KVM_ARM_VM_SMCCC_FILTER: 634 return 0; 635 default: 636 return -ENXIO; 637 } 638} 639 640int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 641{ 642 void __user *uaddr = (void __user *)attr->addr; 643 644 switch (attr->attr) { 645 case KVM_ARM_VM_SMCCC_FILTER: 646 return kvm_smccc_set_filter(kvm, uaddr); 647 default: 648 return -ENXIO; 649 } 650} |
|