1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2021 Google LLC 4 * Author: Fuad Tabba <tabba@google.com> 5 */ 6 7 #include <linux/irqchip/arm-gic-v3.h> 8 9 #include <asm/kvm_asm.h> 10 #include <asm/kvm_mmu.h> 11 12 #include <hyp/adjust_pc.h> 13 14 #include <nvhe/fixed_config.h> 15 16 #include "../../sys_regs.h" 17 18 /* 19 * Copies of the host's CPU features registers holding sanitized values at hyp. 20 */ 21 u64 id_aa64pfr0_el1_sys_val; 22 u64 id_aa64pfr1_el1_sys_val; 23 u64 id_aa64isar0_el1_sys_val; 24 u64 id_aa64isar1_el1_sys_val; 25 u64 id_aa64isar2_el1_sys_val; 26 u64 id_aa64mmfr0_el1_sys_val; 27 u64 id_aa64mmfr1_el1_sys_val; 28 u64 id_aa64mmfr2_el1_sys_val; 29 30 /* 31 * Inject an unknown/undefined exception to an AArch64 guest while most of its 32 * sysregs are live. 33 */ 34 static void inject_undef64(struct kvm_vcpu *vcpu) 35 { 36 u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); 37 38 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 39 *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); 40 41 vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | 42 KVM_ARM64_EXCEPT_AA64_ELx_SYNC | 43 KVM_ARM64_PENDING_EXCEPTION); 44 45 __kvm_adjust_pc(vcpu); 46 47 write_sysreg_el1(esr, SYS_ESR); 48 write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR); 49 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 50 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 51 } 52 53 /* 54 * Returns the restricted features values of the feature register based on the 55 * limitations in restrict_fields. 56 * A feature id field value of 0b0000 does not impose any restrictions. 57 * Note: Use only for unsigned feature field values. 58 */ 59 static u64 get_restricted_features_unsigned(u64 sys_reg_val, 60 u64 restrict_fields) 61 { 62 u64 value = 0UL; 63 u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); 64 65 /* 66 * According to the Arm Architecture Reference Manual, feature fields 67 * use increasing values to indicate increases in functionality. 68 * Iterate over the restricted feature fields and calculate the minimum 69 * unsigned value between the one supported by the system, and what the 70 * value is being restricted to. 71 */ 72 while (sys_reg_val && restrict_fields) { 73 value |= min(sys_reg_val & mask, restrict_fields & mask); 74 sys_reg_val &= ~mask; 75 restrict_fields &= ~mask; 76 mask <<= ARM64_FEATURE_FIELD_BITS; 77 } 78 79 return value; 80 } 81 82 /* 83 * Functions that return the value of feature id registers for protected VMs 84 * based on allowed features, system features, and KVM support. 85 */ 86 87 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) 88 { 89 const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); 90 u64 set_mask = 0; 91 u64 allow_mask = PVM_ID_AA64PFR0_ALLOW; 92 93 set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val, 94 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); 95 96 /* Spectre and Meltdown mitigation in KVM */ 97 set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), 98 (u64)kvm->arch.pfr0_csv2); 99 set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), 100 (u64)kvm->arch.pfr0_csv3); 101 102 return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; 103 } 104 105 static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) 106 { 107 const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); 108 u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; 109 110 if (!kvm_has_mte(kvm)) 111 allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); 112 113 return id_aa64pfr1_el1_sys_val & allow_mask; 114 } 115 116 static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu) 117 { 118 /* 119 * No support for Scalable Vectors, therefore, hyp has no sanitized 120 * copy of the feature id register. 121 */ 122 BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL); 123 return 0; 124 } 125 126 static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu) 127 { 128 /* 129 * No support for debug, including breakpoints, and watchpoints, 130 * therefore, pKVM has no sanitized copy of the feature id register. 131 */ 132 BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL); 133 return 0; 134 } 135 136 static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu) 137 { 138 /* 139 * No support for debug, therefore, hyp has no sanitized copy of the 140 * feature id register. 141 */ 142 BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL); 143 return 0; 144 } 145 146 static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu) 147 { 148 /* 149 * No support for implementation defined features, therefore, hyp has no 150 * sanitized copy of the feature id register. 151 */ 152 BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL); 153 return 0; 154 } 155 156 static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu) 157 { 158 /* 159 * No support for implementation defined features, therefore, hyp has no 160 * sanitized copy of the feature id register. 161 */ 162 BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL); 163 return 0; 164 } 165 166 static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu) 167 { 168 return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW; 169 } 170 171 static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) 172 { 173 u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW; 174 175 if (!vcpu_has_ptrauth(vcpu)) 176 allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | 177 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | 178 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | 179 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI)); 180 181 return id_aa64isar1_el1_sys_val & allow_mask; 182 } 183 184 static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu) 185 { 186 u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW; 187 188 if (!vcpu_has_ptrauth(vcpu)) 189 allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) | 190 ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3)); 191 192 return id_aa64isar2_el1_sys_val & allow_mask; 193 } 194 195 static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu) 196 { 197 u64 set_mask; 198 199 set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val, 200 PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED); 201 202 return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask; 203 } 204 205 static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu) 206 { 207 return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW; 208 } 209 210 static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu) 211 { 212 return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW; 213 } 214 215 /* Read a sanitized cpufeature ID register by its encoding */ 216 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) 217 { 218 switch (id) { 219 case SYS_ID_AA64PFR0_EL1: 220 return get_pvm_id_aa64pfr0(vcpu); 221 case SYS_ID_AA64PFR1_EL1: 222 return get_pvm_id_aa64pfr1(vcpu); 223 case SYS_ID_AA64ZFR0_EL1: 224 return get_pvm_id_aa64zfr0(vcpu); 225 case SYS_ID_AA64DFR0_EL1: 226 return get_pvm_id_aa64dfr0(vcpu); 227 case SYS_ID_AA64DFR1_EL1: 228 return get_pvm_id_aa64dfr1(vcpu); 229 case SYS_ID_AA64AFR0_EL1: 230 return get_pvm_id_aa64afr0(vcpu); 231 case SYS_ID_AA64AFR1_EL1: 232 return get_pvm_id_aa64afr1(vcpu); 233 case SYS_ID_AA64ISAR0_EL1: 234 return get_pvm_id_aa64isar0(vcpu); 235 case SYS_ID_AA64ISAR1_EL1: 236 return get_pvm_id_aa64isar1(vcpu); 237 case SYS_ID_AA64ISAR2_EL1: 238 return get_pvm_id_aa64isar2(vcpu); 239 case SYS_ID_AA64MMFR0_EL1: 240 return get_pvm_id_aa64mmfr0(vcpu); 241 case SYS_ID_AA64MMFR1_EL1: 242 return get_pvm_id_aa64mmfr1(vcpu); 243 case SYS_ID_AA64MMFR2_EL1: 244 return get_pvm_id_aa64mmfr2(vcpu); 245 default: 246 /* 247 * Should never happen because all cases are covered in 248 * pvm_sys_reg_descs[]. 249 */ 250 WARN_ON(1); 251 break; 252 } 253 254 return 0; 255 } 256 257 static u64 read_id_reg(const struct kvm_vcpu *vcpu, 258 struct sys_reg_desc const *r) 259 { 260 return pvm_read_id_reg(vcpu, reg_to_encoding(r)); 261 } 262 263 /* Handler to RAZ/WI sysregs */ 264 static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 265 const struct sys_reg_desc *r) 266 { 267 if (!p->is_write) 268 p->regval = 0; 269 270 return true; 271 } 272 273 /* 274 * Accessor for AArch32 feature id registers. 275 * 276 * The value of these registers is "unknown" according to the spec if AArch32 277 * isn't supported. 278 */ 279 static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, 280 struct sys_reg_params *p, 281 const struct sys_reg_desc *r) 282 { 283 if (p->is_write) { 284 inject_undef64(vcpu); 285 return false; 286 } 287 288 /* 289 * No support for AArch32 guests, therefore, pKVM has no sanitized copy 290 * of AArch32 feature id registers. 291 */ 292 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), 293 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); 294 295 return pvm_access_raz_wi(vcpu, p, r); 296 } 297 298 /* 299 * Accessor for AArch64 feature id registers. 300 * 301 * If access is allowed, set the regval to the protected VM's view of the 302 * register and return true. 303 * Otherwise, inject an undefined exception and return false. 304 */ 305 static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu, 306 struct sys_reg_params *p, 307 const struct sys_reg_desc *r) 308 { 309 if (p->is_write) { 310 inject_undef64(vcpu); 311 return false; 312 } 313 314 p->regval = read_id_reg(vcpu, r); 315 return true; 316 } 317 318 static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu, 319 struct sys_reg_params *p, 320 const struct sys_reg_desc *r) 321 { 322 /* pVMs only support GICv3. 'nuf said. */ 323 if (!p->is_write) 324 p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE; 325 326 return true; 327 } 328 329 /* Mark the specified system register as an AArch32 feature id register. */ 330 #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 } 331 332 /* Mark the specified system register as an AArch64 feature id register. */ 333 #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 } 334 335 /* Mark the specified system register as Read-As-Zero/Write-Ignored */ 336 #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi } 337 338 /* Mark the specified system register as not being handled in hyp. */ 339 #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL } 340 341 /* 342 * Architected system registers. 343 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 344 * 345 * NOTE: Anything not explicitly listed here is *restricted by default*, i.e., 346 * it will lead to injecting an exception into the guest. 347 */ 348 static const struct sys_reg_desc pvm_sys_reg_descs[] = { 349 /* Cache maintenance by set/way operations are restricted. */ 350 351 /* Debug and Trace Registers are restricted. */ 352 353 /* AArch64 mappings of the AArch32 ID registers */ 354 /* CRm=1 */ 355 AARCH32(SYS_ID_PFR0_EL1), 356 AARCH32(SYS_ID_PFR1_EL1), 357 AARCH32(SYS_ID_DFR0_EL1), 358 AARCH32(SYS_ID_AFR0_EL1), 359 AARCH32(SYS_ID_MMFR0_EL1), 360 AARCH32(SYS_ID_MMFR1_EL1), 361 AARCH32(SYS_ID_MMFR2_EL1), 362 AARCH32(SYS_ID_MMFR3_EL1), 363 364 /* CRm=2 */ 365 AARCH32(SYS_ID_ISAR0_EL1), 366 AARCH32(SYS_ID_ISAR1_EL1), 367 AARCH32(SYS_ID_ISAR2_EL1), 368 AARCH32(SYS_ID_ISAR3_EL1), 369 AARCH32(SYS_ID_ISAR4_EL1), 370 AARCH32(SYS_ID_ISAR5_EL1), 371 AARCH32(SYS_ID_MMFR4_EL1), 372 AARCH32(SYS_ID_ISAR6_EL1), 373 374 /* CRm=3 */ 375 AARCH32(SYS_MVFR0_EL1), 376 AARCH32(SYS_MVFR1_EL1), 377 AARCH32(SYS_MVFR2_EL1), 378 AARCH32(SYS_ID_PFR2_EL1), 379 AARCH32(SYS_ID_DFR1_EL1), 380 AARCH32(SYS_ID_MMFR5_EL1), 381 382 /* AArch64 ID registers */ 383 /* CRm=4 */ 384 AARCH64(SYS_ID_AA64PFR0_EL1), 385 AARCH64(SYS_ID_AA64PFR1_EL1), 386 AARCH64(SYS_ID_AA64ZFR0_EL1), 387 AARCH64(SYS_ID_AA64DFR0_EL1), 388 AARCH64(SYS_ID_AA64DFR1_EL1), 389 AARCH64(SYS_ID_AA64AFR0_EL1), 390 AARCH64(SYS_ID_AA64AFR1_EL1), 391 AARCH64(SYS_ID_AA64ISAR0_EL1), 392 AARCH64(SYS_ID_AA64ISAR1_EL1), 393 AARCH64(SYS_ID_AA64MMFR0_EL1), 394 AARCH64(SYS_ID_AA64MMFR1_EL1), 395 AARCH64(SYS_ID_AA64MMFR2_EL1), 396 397 /* Scalable Vector Registers are restricted. */ 398 399 RAZ_WI(SYS_ERRIDR_EL1), 400 RAZ_WI(SYS_ERRSELR_EL1), 401 RAZ_WI(SYS_ERXFR_EL1), 402 RAZ_WI(SYS_ERXCTLR_EL1), 403 RAZ_WI(SYS_ERXSTATUS_EL1), 404 RAZ_WI(SYS_ERXADDR_EL1), 405 RAZ_WI(SYS_ERXMISC0_EL1), 406 RAZ_WI(SYS_ERXMISC1_EL1), 407 408 /* Performance Monitoring Registers are restricted. */ 409 410 /* Limited Ordering Regions Registers are restricted. */ 411 412 HOST_HANDLED(SYS_ICC_SGI1R_EL1), 413 HOST_HANDLED(SYS_ICC_ASGI1R_EL1), 414 HOST_HANDLED(SYS_ICC_SGI0R_EL1), 415 { SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, }, 416 417 HOST_HANDLED(SYS_CCSIDR_EL1), 418 HOST_HANDLED(SYS_CLIDR_EL1), 419 HOST_HANDLED(SYS_CSSELR_EL1), 420 HOST_HANDLED(SYS_CTR_EL0), 421 422 /* Performance Monitoring Registers are restricted. */ 423 424 /* Activity Monitoring Registers are restricted. */ 425 426 HOST_HANDLED(SYS_CNTP_TVAL_EL0), 427 HOST_HANDLED(SYS_CNTP_CTL_EL0), 428 HOST_HANDLED(SYS_CNTP_CVAL_EL0), 429 430 /* Performance Monitoring Registers are restricted. */ 431 }; 432 433 /* 434 * Checks that the sysreg table is unique and in-order. 435 * 436 * Returns 0 if the table is consistent, or 1 otherwise. 437 */ 438 int kvm_check_pvm_sysreg_table(void) 439 { 440 unsigned int i; 441 442 for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) { 443 if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0) 444 return 1; 445 } 446 447 return 0; 448 } 449 450 /* 451 * Handler for protected VM MSR, MRS or System instruction execution. 452 * 453 * Returns true if the hypervisor has handled the exit, and control should go 454 * back to the guest, or false if it hasn't, to be handled by the host. 455 */ 456 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) 457 { 458 const struct sys_reg_desc *r; 459 struct sys_reg_params params; 460 unsigned long esr = kvm_vcpu_get_esr(vcpu); 461 int Rt = kvm_vcpu_sys_get_rt(vcpu); 462 463 params = esr_sys64_to_params(esr); 464 params.regval = vcpu_get_reg(vcpu, Rt); 465 466 r = find_reg(¶ms, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs)); 467 468 /* Undefined (RESTRICTED). */ 469 if (r == NULL) { 470 inject_undef64(vcpu); 471 return true; 472 } 473 474 /* Handled by the host (HOST_HANDLED) */ 475 if (r->access == NULL) 476 return false; 477 478 /* Handled by hyp: skip instruction if instructed to do so. */ 479 if (r->access(vcpu, ¶ms, r)) 480 __kvm_skip_instr(vcpu); 481 482 if (!params.is_write) 483 vcpu_set_reg(vcpu, Rt, params.regval); 484 485 return true; 486 } 487 488 /* 489 * Handler for protected VM restricted exceptions. 490 * 491 * Inject an undefined exception into the guest and return true to indicate that 492 * the hypervisor has handled the exit, and control should go back to the guest. 493 */ 494 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code) 495 { 496 inject_undef64(vcpu); 497 return true; 498 } 499