1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 - Google Inc 4 * Author: Andrew Scull <ascull@google.com> 5 */ 6 7 #include <hyp/adjust_pc.h> 8 9 #include <asm/pgtable-types.h> 10 #include <asm/kvm_asm.h> 11 #include <asm/kvm_emulate.h> 12 #include <asm/kvm_host.h> 13 #include <asm/kvm_hyp.h> 14 #include <asm/kvm_mmu.h> 15 16 #include <nvhe/ffa.h> 17 #include <nvhe/mem_protect.h> 18 #include <nvhe/mm.h> 19 #include <nvhe/pkvm.h> 20 #include <nvhe/trap_handler.h> 21 22 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); 23 24 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); 25 26 static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) 27 { 28 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; 29 30 hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt; 31 32 hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); 33 hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl; 34 35 hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu; 36 37 hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2; 38 hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; 39 hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2; 40 41 hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; 42 hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state; 43 44 hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr); 45 hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state; 46 47 hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2; 48 49 hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3; 50 } 51 52 static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) 53 { 54 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; 55 struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3; 56 struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3; 57 unsigned int i; 58 59 host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt; 60 61 host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2; 62 host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2; 63 64 host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault; 65 66 host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags; 67 host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state; 68 69 host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr; 70 for (i = 0; i < hyp_cpu_if->used_lrs; ++i) 71 host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i]; 72 } 73 74 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) 75 { 76 DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1); 77 int ret; 78 79 host_vcpu = kern_hyp_va(host_vcpu); 80 81 if (unlikely(is_protected_kvm_enabled())) { 82 struct pkvm_hyp_vcpu *hyp_vcpu; 83 struct kvm *host_kvm; 84 85 host_kvm = kern_hyp_va(host_vcpu->kvm); 86 hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle, 87 host_vcpu->vcpu_idx); 88 if (!hyp_vcpu) { 89 ret = -EINVAL; 90 goto out; 91 } 92 93 flush_hyp_vcpu(hyp_vcpu); 94 95 ret = __kvm_vcpu_run(&hyp_vcpu->vcpu); 96 97 sync_hyp_vcpu(hyp_vcpu); 98 pkvm_put_hyp_vcpu(hyp_vcpu); 99 } else { 100 /* The host is fully trusted, run its vCPU directly. */ 101 ret = __kvm_vcpu_run(host_vcpu); 102 } 103 104 out: 105 cpu_reg(host_ctxt, 1) = ret; 106 } 107 108 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) 109 { 110 DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); 111 112 __kvm_adjust_pc(kern_hyp_va(vcpu)); 113 } 114 115 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) 116 { 117 __kvm_flush_vm_context(); 118 } 119 120 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt) 121 { 122 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); 123 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); 124 DECLARE_REG(int, level, host_ctxt, 3); 125 126 __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level); 127 } 128 129 static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt) 130 { 131 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); 132 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); 133 DECLARE_REG(int, level, host_ctxt, 3); 134 135 __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level); 136 } 137 138 static void 139 handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt) 140 { 141 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); 142 DECLARE_REG(phys_addr_t, start, host_ctxt, 2); 143 DECLARE_REG(unsigned long, pages, host_ctxt, 3); 144 145 __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages); 146 } 147 148 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) 149 { 150 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); 151 152 __kvm_tlb_flush_vmid(kern_hyp_va(mmu)); 153 } 154 155 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt) 156 { 157 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); 158 159 __kvm_flush_cpu_context(kern_hyp_va(mmu)); 160 } 161 162 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt) 163 { 164 __kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1)); 165 } 166 167 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt) 168 { 169 u64 tmp; 170 171 tmp = read_sysreg_el2(SYS_SCTLR); 172 tmp |= SCTLR_ELx_DSSBS; 173 write_sysreg_el2(tmp, SYS_SCTLR); 174 } 175 176 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt) 177 { 178 cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config(); 179 } 180 181 static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt) 182 { 183 cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr(); 184 } 185 186 static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt) 187 { 188 __vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1)); 189 } 190 191 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt) 192 { 193 __vgic_v3_init_lrs(); 194 } 195 196 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt) 197 { 198 cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2(); 199 } 200 201 static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt) 202 { 203 DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1); 204 205 __vgic_v3_save_aprs(kern_hyp_va(cpu_if)); 206 } 207 208 static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) 209 { 210 DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1); 211 212 __vgic_v3_restore_aprs(kern_hyp_va(cpu_if)); 213 } 214 215 static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt) 216 { 217 DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); 218 DECLARE_REG(unsigned long, size, host_ctxt, 2); 219 DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3); 220 DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4); 221 DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5); 222 223 /* 224 * __pkvm_init() will return only if an error occurred, otherwise it 225 * will tail-call in __pkvm_init_finalise() which will have to deal 226 * with the host context directly. 227 */ 228 cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base, 229 hyp_va_bits); 230 } 231 232 static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt) 233 { 234 DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1); 235 236 cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot); 237 } 238 239 static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt) 240 { 241 DECLARE_REG(u64, pfn, host_ctxt, 1); 242 243 cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn); 244 } 245 246 static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt) 247 { 248 DECLARE_REG(u64, pfn, host_ctxt, 1); 249 250 cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn); 251 } 252 253 static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt) 254 { 255 DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); 256 DECLARE_REG(size_t, size, host_ctxt, 2); 257 DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3); 258 259 /* 260 * __pkvm_create_private_mapping() populates a pointer with the 261 * hypervisor start address of the allocation. 262 * 263 * However, handle___pkvm_create_private_mapping() hypercall crosses the 264 * EL1/EL2 boundary so the pointer would not be valid in this context. 265 * 266 * Instead pass the allocation address as the return value (or return 267 * ERR_PTR() on failure). 268 */ 269 unsigned long haddr; 270 int err = __pkvm_create_private_mapping(phys, size, prot, &haddr); 271 272 if (err) 273 haddr = (unsigned long)ERR_PTR(err); 274 275 cpu_reg(host_ctxt, 1) = haddr; 276 } 277 278 static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt) 279 { 280 cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize(); 281 } 282 283 static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt) 284 { 285 DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); 286 287 __pkvm_vcpu_init_traps(kern_hyp_va(vcpu)); 288 } 289 290 static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt) 291 { 292 DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1); 293 DECLARE_REG(unsigned long, vm_hva, host_ctxt, 2); 294 DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3); 295 296 host_kvm = kern_hyp_va(host_kvm); 297 cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, vm_hva, pgd_hva); 298 } 299 300 static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt) 301 { 302 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); 303 DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2); 304 DECLARE_REG(unsigned long, vcpu_hva, host_ctxt, 3); 305 306 host_vcpu = kern_hyp_va(host_vcpu); 307 cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu, vcpu_hva); 308 } 309 310 static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt) 311 { 312 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); 313 314 cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle); 315 } 316 317 typedef void (*hcall_t)(struct kvm_cpu_context *); 318 319 #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x 320 321 static const hcall_t host_hcall[] = { 322 /* ___kvm_hyp_init */ 323 HANDLE_FUNC(__kvm_get_mdcr_el2), 324 HANDLE_FUNC(__pkvm_init), 325 HANDLE_FUNC(__pkvm_create_private_mapping), 326 HANDLE_FUNC(__pkvm_cpu_set_vector), 327 HANDLE_FUNC(__kvm_enable_ssbs), 328 HANDLE_FUNC(__vgic_v3_init_lrs), 329 HANDLE_FUNC(__vgic_v3_get_gic_config), 330 HANDLE_FUNC(__pkvm_prot_finalize), 331 332 HANDLE_FUNC(__pkvm_host_share_hyp), 333 HANDLE_FUNC(__pkvm_host_unshare_hyp), 334 HANDLE_FUNC(__kvm_adjust_pc), 335 HANDLE_FUNC(__kvm_vcpu_run), 336 HANDLE_FUNC(__kvm_flush_vm_context), 337 HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), 338 HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh), 339 HANDLE_FUNC(__kvm_tlb_flush_vmid), 340 HANDLE_FUNC(__kvm_tlb_flush_vmid_range), 341 HANDLE_FUNC(__kvm_flush_cpu_context), 342 HANDLE_FUNC(__kvm_timer_set_cntvoff), 343 HANDLE_FUNC(__vgic_v3_read_vmcr), 344 HANDLE_FUNC(__vgic_v3_write_vmcr), 345 HANDLE_FUNC(__vgic_v3_save_aprs), 346 HANDLE_FUNC(__vgic_v3_restore_aprs), 347 HANDLE_FUNC(__pkvm_vcpu_init_traps), 348 HANDLE_FUNC(__pkvm_init_vm), 349 HANDLE_FUNC(__pkvm_init_vcpu), 350 HANDLE_FUNC(__pkvm_teardown_vm), 351 }; 352 353 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) 354 { 355 DECLARE_REG(unsigned long, id, host_ctxt, 0); 356 unsigned long hcall_min = 0; 357 hcall_t hfn; 358 359 /* 360 * If pKVM has been initialised then reject any calls to the 361 * early "privileged" hypercalls. Note that we cannot reject 362 * calls to __pkvm_prot_finalize for two reasons: (1) The static 363 * key used to determine initialisation must be toggled prior to 364 * finalisation and (2) finalisation is performed on a per-CPU 365 * basis. This is all fine, however, since __pkvm_prot_finalize 366 * returns -EPERM after the first call for a given CPU. 367 */ 368 if (static_branch_unlikely(&kvm_protected_mode_initialized)) 369 hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize; 370 371 id &= ~ARM_SMCCC_CALL_HINTS; 372 id -= KVM_HOST_SMCCC_ID(0); 373 374 if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall))) 375 goto inval; 376 377 hfn = host_hcall[id]; 378 if (unlikely(!hfn)) 379 goto inval; 380 381 cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS; 382 hfn(host_ctxt); 383 384 return; 385 inval: 386 cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED; 387 } 388 389 static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt) 390 { 391 __kvm_hyp_host_forward_smc(host_ctxt); 392 } 393 394 static void handle_host_smc(struct kvm_cpu_context *host_ctxt) 395 { 396 DECLARE_REG(u64, func_id, host_ctxt, 0); 397 bool handled; 398 399 func_id &= ~ARM_SMCCC_CALL_HINTS; 400 401 handled = kvm_host_psci_handler(host_ctxt, func_id); 402 if (!handled) 403 handled = kvm_host_ffa_handler(host_ctxt, func_id); 404 if (!handled) 405 default_host_smc_handler(host_ctxt); 406 407 /* SMC was trapped, move ELR past the current PC. */ 408 kvm_skip_host_instr(); 409 } 410 411 void handle_trap(struct kvm_cpu_context *host_ctxt) 412 { 413 u64 esr = read_sysreg_el2(SYS_ESR); 414 415 switch (ESR_ELx_EC(esr)) { 416 case ESR_ELx_EC_HVC64: 417 handle_host_hcall(host_ctxt); 418 break; 419 case ESR_ELx_EC_SMC64: 420 handle_host_smc(host_ctxt); 421 break; 422 case ESR_ELx_EC_SVE: 423 if (has_hvhe()) 424 sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN | 425 CPACR_EL1_ZEN_EL0EN)); 426 else 427 sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); 428 isb(); 429 sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); 430 break; 431 case ESR_ELx_EC_IABT_LOW: 432 case ESR_ELx_EC_DABT_LOW: 433 handle_host_mem_abort(host_ctxt); 434 break; 435 default: 436 BUG(); 437 } 438 } 439