1 /* 2 * QEMU AArch64 CPU 3 * 4 * Copyright (c) 2013 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qapi/error.h" 23 #include "cpu.h" 24 #include "cpregs.h" 25 #include "qemu/module.h" 26 #include "qemu/units.h" 27 #include "system/kvm.h" 28 #include "system/hvf.h" 29 #include "system/qtest.h" 30 #include "system/tcg.h" 31 #include "kvm_arm.h" 32 #include "hvf_arm.h" 33 #include "qapi/visitor.h" 34 #include "hw/qdev-properties.h" 35 #include "internals.h" 36 #include "cpu-features.h" 37 #include "cpregs.h" 38 39 /* convert between <register>_IDX and SYS_<register> */ 40 #define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \ 41 [NAME##_IDX] = SYS_##NAME, 42 43 const uint32_t id_register_sysreg[NUM_ID_IDX] = { 44 #include "cpu-sysregs.h.inc" 45 }; 46 47 #undef DEF 48 #define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \ 49 case SYS_##NAME: return NAME##_IDX; 50 51 int get_sysreg_idx(ARMSysRegs sysreg) 52 { 53 switch (sysreg) { 54 #include "cpu-sysregs.h.inc" 55 } 56 g_assert_not_reached(); 57 } 58 59 #undef DEF 60 61 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) 62 { 63 /* 64 * If any vector lengths are explicitly enabled with sve<N> properties, 65 * then all other lengths are implicitly disabled. If sve-max-vq is 66 * specified then it is the same as explicitly enabling all lengths 67 * up to and including the specified maximum, which means all larger 68 * lengths will be implicitly disabled. If no sve<N> properties 69 * are enabled and sve-max-vq is not specified, then all lengths not 70 * explicitly disabled will be enabled. Additionally, all power-of-two 71 * vector lengths less than the maximum enabled length will be 72 * automatically enabled and all vector lengths larger than the largest 73 * disabled power-of-two vector length will be automatically disabled. 74 * Errors are generated if the user provided input that interferes with 75 * any of the above. Finally, if SVE is not disabled, then at least one 76 * vector length must be enabled. 77 */ 78 uint32_t vq_map = cpu->sve_vq.map; 79 uint32_t vq_init = cpu->sve_vq.init; 80 uint32_t vq_supported; 81 uint32_t vq_mask = 0; 82 uint32_t tmp, vq, max_vq = 0; 83 84 /* 85 * CPU models specify a set of supported vector lengths which are 86 * enabled by default. Attempting to enable any vector length not set 87 * in the supported bitmap results in an error. When KVM is enabled we 88 * fetch the supported bitmap from the host. 89 */ 90 if (kvm_enabled()) { 91 if (kvm_arm_sve_supported()) { 92 cpu->sve_vq.supported = kvm_arm_sve_get_vls(cpu); 93 vq_supported = cpu->sve_vq.supported; 94 } else { 95 assert(!cpu_isar_feature(aa64_sve, cpu)); 96 vq_supported = 0; 97 } 98 } else { 99 vq_supported = cpu->sve_vq.supported; 100 } 101 102 /* 103 * Process explicit sve<N> properties. 104 * From the properties, sve_vq_map<N> implies sve_vq_init<N>. 105 * Check first for any sve<N> enabled. 106 */ 107 if (vq_map != 0) { 108 max_vq = 32 - clz32(vq_map); 109 vq_mask = MAKE_64BIT_MASK(0, max_vq); 110 111 if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) { 112 error_setg(errp, "cannot enable sve%d", max_vq * 128); 113 error_append_hint(errp, "sve%d is larger than the maximum vector " 114 "length, sve-max-vq=%d (%d bits)\n", 115 max_vq * 128, cpu->sve_max_vq, 116 cpu->sve_max_vq * 128); 117 return; 118 } 119 120 if (kvm_enabled()) { 121 /* 122 * For KVM we have to automatically enable all supported uninitialized 123 * lengths, even when the smaller lengths are not all powers-of-two. 124 */ 125 vq_map |= vq_supported & ~vq_init & vq_mask; 126 } else { 127 /* Propagate enabled bits down through required powers-of-two. */ 128 vq_map |= SVE_VQ_POW2_MAP & ~vq_init & vq_mask; 129 } 130 } else if (cpu->sve_max_vq == 0) { 131 /* 132 * No explicit bits enabled, and no implicit bits from sve-max-vq. 133 */ 134 if (!cpu_isar_feature(aa64_sve, cpu)) { 135 /* 136 * SVE is disabled and so are all vector lengths. Good. 137 * Disable all SVE extensions as well. 138 */ 139 SET_IDREG(&cpu->isar, ID_AA64ZFR0, 0); 140 return; 141 } 142 143 if (kvm_enabled()) { 144 /* Disabling a supported length disables all larger lengths. */ 145 tmp = vq_init & vq_supported; 146 } else { 147 /* Disabling a power-of-two disables all larger lengths. */ 148 tmp = vq_init & SVE_VQ_POW2_MAP; 149 } 150 vq = ctz32(tmp) + 1; 151 152 max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ; 153 vq_mask = max_vq > 0 ? MAKE_64BIT_MASK(0, max_vq) : 0; 154 vq_map = vq_supported & ~vq_init & vq_mask; 155 156 if (vq_map == 0) { 157 error_setg(errp, "cannot disable sve%d", vq * 128); 158 error_append_hint(errp, "Disabling sve%d results in all " 159 "vector lengths being disabled.\n", 160 vq * 128); 161 error_append_hint(errp, "With SVE enabled, at least one " 162 "vector length must be enabled.\n"); 163 return; 164 } 165 166 max_vq = 32 - clz32(vq_map); 167 vq_mask = MAKE_64BIT_MASK(0, max_vq); 168 } 169 170 /* 171 * Process the sve-max-vq property. 172 * Note that we know from the above that no bit above 173 * sve-max-vq is currently set. 174 */ 175 if (cpu->sve_max_vq != 0) { 176 max_vq = cpu->sve_max_vq; 177 vq_mask = MAKE_64BIT_MASK(0, max_vq); 178 179 if (vq_init & ~vq_map & (1 << (max_vq - 1))) { 180 error_setg(errp, "cannot disable sve%d", max_vq * 128); 181 error_append_hint(errp, "The maximum vector length must be " 182 "enabled, sve-max-vq=%d (%d bits)\n", 183 max_vq, max_vq * 128); 184 return; 185 } 186 187 /* Set all bits not explicitly set within sve-max-vq. */ 188 vq_map |= ~vq_init & vq_mask; 189 } 190 191 /* 192 * We should know what max-vq is now. Also, as we're done 193 * manipulating sve-vq-map, we ensure any bits above max-vq 194 * are clear, just in case anybody looks. 195 */ 196 assert(max_vq != 0); 197 assert(vq_mask != 0); 198 vq_map &= vq_mask; 199 200 /* Ensure the set of lengths matches what is supported. */ 201 tmp = vq_map ^ (vq_supported & vq_mask); 202 if (tmp) { 203 vq = 32 - clz32(tmp); 204 if (vq_map & (1 << (vq - 1))) { 205 if (cpu->sve_max_vq) { 206 error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq); 207 error_append_hint(errp, "This CPU does not support " 208 "the vector length %d-bits.\n", vq * 128); 209 error_append_hint(errp, "It may not be possible to use " 210 "sve-max-vq with this CPU. Try " 211 "using only sve<N> properties.\n"); 212 } else { 213 error_setg(errp, "cannot enable sve%d", vq * 128); 214 if (vq_supported) { 215 error_append_hint(errp, "This CPU does not support " 216 "the vector length %d-bits.\n", vq * 128); 217 } else { 218 error_append_hint(errp, "SVE not supported by KVM " 219 "on this host\n"); 220 } 221 } 222 return; 223 } else { 224 if (kvm_enabled()) { 225 error_setg(errp, "cannot disable sve%d", vq * 128); 226 error_append_hint(errp, "The KVM host requires all " 227 "supported vector lengths smaller " 228 "than %d bits to also be enabled.\n", 229 max_vq * 128); 230 return; 231 } else { 232 /* Ensure all required powers-of-two are enabled. */ 233 tmp = SVE_VQ_POW2_MAP & vq_mask & ~vq_map; 234 if (tmp) { 235 vq = 32 - clz32(tmp); 236 error_setg(errp, "cannot disable sve%d", vq * 128); 237 error_append_hint(errp, "sve%d is required as it " 238 "is a power-of-two length smaller " 239 "than the maximum, sve%d\n", 240 vq * 128, max_vq * 128); 241 return; 242 } 243 } 244 } 245 } 246 247 /* 248 * Now that we validated all our vector lengths, the only question 249 * left to answer is if we even want SVE at all. 250 */ 251 if (!cpu_isar_feature(aa64_sve, cpu)) { 252 error_setg(errp, "cannot enable sve%d", max_vq * 128); 253 error_append_hint(errp, "SVE must be enabled to enable vector " 254 "lengths.\n"); 255 error_append_hint(errp, "Add sve=on to the CPU property list.\n"); 256 return; 257 } 258 259 /* From now on sve_max_vq is the actual maximum supported length. */ 260 cpu->sve_max_vq = max_vq; 261 cpu->sve_vq.map = vq_map; 262 } 263 264 /* 265 * Note that cpu_arm_{get,set}_vq cannot use the simpler 266 * object_property_add_bool interface because they make use of the 267 * contents of "name" to determine which bit on which to operate. 268 */ 269 static void cpu_arm_get_vq(Object *obj, Visitor *v, const char *name, 270 void *opaque, Error **errp) 271 { 272 ARMCPU *cpu = ARM_CPU(obj); 273 ARMVQMap *vq_map = opaque; 274 uint32_t vq = atoi(&name[3]) / 128; 275 bool sve = vq_map == &cpu->sve_vq; 276 bool value; 277 278 /* All vector lengths are disabled when feature is off. */ 279 if (sve 280 ? !cpu_isar_feature(aa64_sve, cpu) 281 : !cpu_isar_feature(aa64_sme, cpu)) { 282 value = false; 283 } else { 284 value = extract32(vq_map->map, vq - 1, 1); 285 } 286 visit_type_bool(v, name, &value, errp); 287 } 288 289 static void cpu_arm_set_vq(Object *obj, Visitor *v, const char *name, 290 void *opaque, Error **errp) 291 { 292 ARMVQMap *vq_map = opaque; 293 uint32_t vq = atoi(&name[3]) / 128; 294 bool value; 295 296 if (!visit_type_bool(v, name, &value, errp)) { 297 return; 298 } 299 300 vq_map->map = deposit32(vq_map->map, vq - 1, 1, value); 301 vq_map->init |= 1 << (vq - 1); 302 } 303 304 static bool cpu_arm_get_sve(Object *obj, Error **errp) 305 { 306 ARMCPU *cpu = ARM_CPU(obj); 307 return cpu_isar_feature(aa64_sve, cpu); 308 } 309 310 static void cpu_arm_set_sve(Object *obj, bool value, Error **errp) 311 { 312 ARMCPU *cpu = ARM_CPU(obj); 313 314 if (value && kvm_enabled() && !kvm_arm_sve_supported()) { 315 error_setg(errp, "'sve' feature not supported by KVM on this host"); 316 return; 317 } 318 319 FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, SVE, value); 320 } 321 322 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp) 323 { 324 uint32_t vq_map = cpu->sme_vq.map; 325 uint32_t vq_init = cpu->sme_vq.init; 326 uint32_t vq_supported = cpu->sme_vq.supported; 327 uint32_t vq; 328 329 if (vq_map == 0) { 330 if (!cpu_isar_feature(aa64_sme, cpu)) { 331 SET_IDREG(&cpu->isar, ID_AA64SMFR0, 0); 332 return; 333 } 334 335 /* TODO: KVM will require limitations via SMCR_EL2. */ 336 vq_map = vq_supported & ~vq_init; 337 338 if (vq_map == 0) { 339 vq = ctz32(vq_supported) + 1; 340 error_setg(errp, "cannot disable sme%d", vq * 128); 341 error_append_hint(errp, "All SME vector lengths are disabled.\n"); 342 error_append_hint(errp, "With SME enabled, at least one " 343 "vector length must be enabled.\n"); 344 return; 345 } 346 } else { 347 if (!cpu_isar_feature(aa64_sme, cpu)) { 348 vq = 32 - clz32(vq_map); 349 error_setg(errp, "cannot enable sme%d", vq * 128); 350 error_append_hint(errp, "SME must be enabled to enable " 351 "vector lengths.\n"); 352 error_append_hint(errp, "Add sme=on to the CPU property list.\n"); 353 return; 354 } 355 /* TODO: KVM will require limitations via SMCR_EL2. */ 356 } 357 358 cpu->sme_vq.map = vq_map; 359 } 360 361 static bool cpu_arm_get_sme(Object *obj, Error **errp) 362 { 363 ARMCPU *cpu = ARM_CPU(obj); 364 return cpu_isar_feature(aa64_sme, cpu); 365 } 366 367 static void cpu_arm_set_sme(Object *obj, bool value, Error **errp) 368 { 369 ARMCPU *cpu = ARM_CPU(obj); 370 371 FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR1, SME, value); 372 } 373 374 static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp) 375 { 376 ARMCPU *cpu = ARM_CPU(obj); 377 return cpu_isar_feature(aa64_sme, cpu) && 378 cpu_isar_feature(aa64_sme_fa64, cpu); 379 } 380 381 static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp) 382 { 383 ARMCPU *cpu = ARM_CPU(obj); 384 385 FIELD_DP64_IDREG(&cpu->isar, ID_AA64SMFR0, FA64, value); 386 } 387 388 #ifdef CONFIG_USER_ONLY 389 /* Mirror linux /proc/sys/abi/{sve,sme}_default_vector_length. */ 390 static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v, 391 const char *name, void *opaque, 392 Error **errp) 393 { 394 uint32_t *ptr_default_vq = opaque; 395 int32_t default_len, default_vq, remainder; 396 397 if (!visit_type_int32(v, name, &default_len, errp)) { 398 return; 399 } 400 401 /* Undocumented, but the kernel allows -1 to indicate "maximum". */ 402 if (default_len == -1) { 403 *ptr_default_vq = ARM_MAX_VQ; 404 return; 405 } 406 407 default_vq = default_len / 16; 408 remainder = default_len % 16; 409 410 /* 411 * Note that the 512 max comes from include/uapi/asm/sve_context.h 412 * and is the maximum architectural width of ZCR_ELx.LEN. 413 */ 414 if (remainder || default_vq < 1 || default_vq > 512) { 415 ARMCPU *cpu = ARM_CPU(obj); 416 const char *which = 417 (ptr_default_vq == &cpu->sve_default_vq ? "sve" : "sme"); 418 419 error_setg(errp, "cannot set %s-default-vector-length", which); 420 if (remainder) { 421 error_append_hint(errp, "Vector length not a multiple of 16\n"); 422 } else if (default_vq < 1) { 423 error_append_hint(errp, "Vector length smaller than 16\n"); 424 } else { 425 error_append_hint(errp, "Vector length larger than %d\n", 426 512 * 16); 427 } 428 return; 429 } 430 431 *ptr_default_vq = default_vq; 432 } 433 434 static void cpu_arm_get_default_vec_len(Object *obj, Visitor *v, 435 const char *name, void *opaque, 436 Error **errp) 437 { 438 uint32_t *ptr_default_vq = opaque; 439 int32_t value = *ptr_default_vq * 16; 440 441 visit_type_int32(v, name, &value, errp); 442 } 443 #endif 444 445 void aarch64_add_sve_properties(Object *obj) 446 { 447 ARMCPU *cpu = ARM_CPU(obj); 448 uint32_t vq; 449 450 object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve); 451 452 for (vq = 1; vq <= ARM_MAX_VQ; ++vq) { 453 char name[8]; 454 snprintf(name, sizeof(name), "sve%d", vq * 128); 455 object_property_add(obj, name, "bool", cpu_arm_get_vq, 456 cpu_arm_set_vq, NULL, &cpu->sve_vq); 457 } 458 459 #ifdef CONFIG_USER_ONLY 460 /* Mirror linux /proc/sys/abi/sve_default_vector_length. */ 461 object_property_add(obj, "sve-default-vector-length", "int32", 462 cpu_arm_get_default_vec_len, 463 cpu_arm_set_default_vec_len, NULL, 464 &cpu->sve_default_vq); 465 #endif 466 } 467 468 void aarch64_add_sme_properties(Object *obj) 469 { 470 ARMCPU *cpu = ARM_CPU(obj); 471 uint32_t vq; 472 473 object_property_add_bool(obj, "sme", cpu_arm_get_sme, cpu_arm_set_sme); 474 object_property_add_bool(obj, "sme_fa64", cpu_arm_get_sme_fa64, 475 cpu_arm_set_sme_fa64); 476 477 for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) { 478 char name[8]; 479 snprintf(name, sizeof(name), "sme%d", vq * 128); 480 object_property_add(obj, name, "bool", cpu_arm_get_vq, 481 cpu_arm_set_vq, NULL, &cpu->sme_vq); 482 } 483 484 #ifdef CONFIG_USER_ONLY 485 /* Mirror linux /proc/sys/abi/sme_default_vector_length. */ 486 object_property_add(obj, "sme-default-vector-length", "int32", 487 cpu_arm_get_default_vec_len, 488 cpu_arm_set_default_vec_len, NULL, 489 &cpu->sme_default_vq); 490 #endif 491 } 492 493 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) 494 { 495 ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu); 496 ARMISARegisters *isar = &cpu->isar; 497 uint64_t isar1, isar2; 498 499 /* 500 * These properties enable or disable Pauth as a whole, or change 501 * the pauth algorithm, but do not change the set of features that 502 * are present. We have saved a copy of those features above and 503 * will now place it into the field that chooses the algorithm. 504 * 505 * Begin by disabling all fields. 506 */ 507 isar1 = GET_IDREG(isar, ID_AA64ISAR1); 508 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, 0); 509 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 0); 510 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0); 511 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0); 512 513 isar2 = GET_IDREG(isar, ID_AA64ISAR2); 514 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, 0); 515 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 0); 516 517 if (kvm_enabled() || hvf_enabled()) { 518 /* 519 * Exit early if PAuth is enabled and fall through to disable it. 520 * The algorithm selection properties are not present. 521 */ 522 if (cpu->prop_pauth) { 523 if (features == 0) { 524 error_setg(errp, "'pauth' feature not supported by " 525 "%s on this host", current_accel_name()); 526 } 527 return; 528 } 529 } else { 530 /* Pauth properties are only present when the model supports it. */ 531 if (features == 0) { 532 assert(!cpu->prop_pauth); 533 return; 534 } 535 536 if (cpu->prop_pauth) { 537 if ((cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) || 538 (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma5) || 539 (cpu->prop_pauth_qarma3 && cpu->prop_pauth_qarma5)) { 540 error_setg(errp, 541 "cannot enable pauth-impdef, pauth-qarma3 and " 542 "pauth-qarma5 at the same time"); 543 return; 544 } 545 546 bool use_default = !cpu->prop_pauth_qarma5 && 547 !cpu->prop_pauth_qarma3 && 548 !cpu->prop_pauth_impdef; 549 550 if (cpu->prop_pauth_qarma5 || 551 (use_default && 552 cpu->backcompat_pauth_default_use_qarma5)) { 553 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features); 554 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1); 555 } else if (cpu->prop_pauth_qarma3) { 556 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features); 557 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1); 558 } else if (cpu->prop_pauth_impdef || 559 (use_default && 560 !cpu->backcompat_pauth_default_use_qarma5)) { 561 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features); 562 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1); 563 } else { 564 g_assert_not_reached(); 565 } 566 } else if (cpu->prop_pauth_impdef || 567 cpu->prop_pauth_qarma3 || 568 cpu->prop_pauth_qarma5) { 569 error_setg(errp, "cannot enable pauth-impdef, pauth-qarma3 or " 570 "pauth-qarma5 without pauth"); 571 error_append_hint(errp, "Add pauth=on to the CPU property list.\n"); 572 } 573 } 574 575 SET_IDREG(isar, ID_AA64ISAR1, isar1); 576 SET_IDREG(isar, ID_AA64ISAR2, isar2); 577 } 578 579 static const Property arm_cpu_pauth_property = 580 DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true); 581 static const Property arm_cpu_pauth_impdef_property = 582 DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false); 583 static const Property arm_cpu_pauth_qarma3_property = 584 DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false); 585 static Property arm_cpu_pauth_qarma5_property = 586 DEFINE_PROP_BOOL("pauth-qarma5", ARMCPU, prop_pauth_qarma5, false); 587 588 void aarch64_add_pauth_properties(Object *obj) 589 { 590 ARMCPU *cpu = ARM_CPU(obj); 591 592 /* Default to PAUTH on, with the architected algorithm on TCG. */ 593 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property); 594 if (kvm_enabled() || hvf_enabled()) { 595 /* 596 * Mirror PAuth support from the probed sysregs back into the 597 * property for KVM or hvf. Is it just a bit backward? Yes it is! 598 * Note that prop_pauth is true whether the host CPU supports the 599 * architected QARMA5 algorithm or the IMPDEF one. We don't 600 * provide the separate pauth-impdef property for KVM or hvf, 601 * only for TCG. 602 */ 603 cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu); 604 } else { 605 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property); 606 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property); 607 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma5_property); 608 } 609 } 610 611 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) 612 { 613 uint64_t t; 614 615 /* 616 * We only install the property for tcg -cpu max; this is the 617 * only situation in which the cpu field can be true. 618 */ 619 if (!cpu->prop_lpa2) { 620 return; 621 } 622 623 t = GET_IDREG(&cpu->isar, ID_AA64MMFR0); 624 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2); /* 16k pages w/ LPA2 */ 625 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1); /* 4k pages w/ LPA2 */ 626 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */ 627 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3); /* 4k stage2 w/ LPA2 */ 628 SET_IDREG(&cpu->isar, ID_AA64MMFR0, t); 629 } 630 631 static void aarch64_a57_initfn(Object *obj) 632 { 633 ARMCPU *cpu = ARM_CPU(obj); 634 ARMISARegisters *isar = &cpu->isar; 635 636 cpu->dtb_compatible = "arm,cortex-a57"; 637 set_feature(&cpu->env, ARM_FEATURE_V8); 638 set_feature(&cpu->env, ARM_FEATURE_NEON); 639 set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); 640 set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ); 641 set_feature(&cpu->env, ARM_FEATURE_AARCH64); 642 set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); 643 set_feature(&cpu->env, ARM_FEATURE_EL2); 644 set_feature(&cpu->env, ARM_FEATURE_EL3); 645 set_feature(&cpu->env, ARM_FEATURE_PMU); 646 cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57; 647 cpu->midr = 0x411fd070; 648 cpu->revidr = 0x00000000; 649 cpu->reset_fpsid = 0x41034070; 650 cpu->isar.mvfr0 = 0x10110222; 651 cpu->isar.mvfr1 = 0x12111111; 652 cpu->isar.mvfr2 = 0x00000043; 653 cpu->ctr = 0x8444c004; 654 cpu->reset_sctlr = 0x00c50838; 655 SET_IDREG(isar, ID_PFR0, 0x00000131); 656 SET_IDREG(isar, ID_PFR1, 0x00011011); 657 SET_IDREG(isar, ID_DFR0, 0x03010066); 658 cpu->id_afr0 = 0x00000000; 659 SET_IDREG(isar, ID_MMFR0, 0x10101105); 660 SET_IDREG(isar, ID_MMFR1, 0x40000000); 661 SET_IDREG(isar, ID_MMFR2, 0x01260000); 662 SET_IDREG(isar, ID_MMFR3, 0x02102211); 663 SET_IDREG(isar, ID_ISAR0, 0x02101110); 664 SET_IDREG(isar, ID_ISAR1, 0x13112111); 665 SET_IDREG(isar, ID_ISAR2, 0x21232042); 666 SET_IDREG(isar, ID_ISAR3, 0x01112131); 667 SET_IDREG(isar, ID_ISAR4, 0x00011142); 668 SET_IDREG(isar, ID_ISAR5, 0x00011121); 669 SET_IDREG(isar, ID_ISAR6, 0); 670 SET_IDREG(isar, ID_AA64PFR0, 0x00002222); 671 SET_IDREG(isar, ID_AA64DFR0, 0x10305106); 672 SET_IDREG(isar, ID_AA64ISAR0, 0x00011120); 673 SET_IDREG(isar, ID_AA64MMFR0, 0x00001124); 674 cpu->isar.dbgdidr = 0x3516d000; 675 cpu->isar.dbgdevid = 0x01110f13; 676 cpu->isar.dbgdevid1 = 0x2; 677 cpu->isar.reset_pmcr_el0 = 0x41013000; 678 cpu->clidr = 0x0a200023; 679 /* 32KB L1 dcache */ 680 cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7); 681 /* 48KB L1 icache */ 682 cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 3, 64, 48 * KiB, 2); 683 /* 2048KB L2 cache */ 684 cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 2 * MiB, 7); 685 cpu->dcz_blocksize = 4; /* 64 bytes */ 686 cpu->gic_num_lrs = 4; 687 cpu->gic_vpribits = 5; 688 cpu->gic_vprebits = 5; 689 cpu->gic_pribits = 5; 690 define_cortex_a72_a57_a53_cp_reginfo(cpu); 691 } 692 693 static void aarch64_a53_initfn(Object *obj) 694 { 695 ARMCPU *cpu = ARM_CPU(obj); 696 ARMISARegisters *isar = &cpu->isar; 697 698 cpu->dtb_compatible = "arm,cortex-a53"; 699 set_feature(&cpu->env, ARM_FEATURE_V8); 700 set_feature(&cpu->env, ARM_FEATURE_NEON); 701 set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); 702 set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ); 703 set_feature(&cpu->env, ARM_FEATURE_AARCH64); 704 set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); 705 set_feature(&cpu->env, ARM_FEATURE_EL2); 706 set_feature(&cpu->env, ARM_FEATURE_EL3); 707 set_feature(&cpu->env, ARM_FEATURE_PMU); 708 cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53; 709 cpu->midr = 0x410fd034; 710 cpu->revidr = 0x00000100; 711 cpu->reset_fpsid = 0x41034070; 712 cpu->isar.mvfr0 = 0x10110222; 713 cpu->isar.mvfr1 = 0x12111111; 714 cpu->isar.mvfr2 = 0x00000043; 715 cpu->ctr = 0x84448004; /* L1Ip = VIPT */ 716 cpu->reset_sctlr = 0x00c50838; 717 SET_IDREG(isar, ID_PFR0, 0x00000131); 718 SET_IDREG(isar, ID_PFR1, 0x00011011); 719 SET_IDREG(isar, ID_DFR0, 0x03010066); 720 cpu->id_afr0 = 0x00000000; 721 SET_IDREG(isar, ID_MMFR0, 0x10101105); 722 SET_IDREG(isar, ID_MMFR1, 0x40000000); 723 SET_IDREG(isar, ID_MMFR2, 0x01260000); 724 SET_IDREG(isar, ID_MMFR3, 0x02102211); 725 SET_IDREG(isar, ID_ISAR0, 0x02101110); 726 SET_IDREG(isar, ID_ISAR1, 0x13112111); 727 SET_IDREG(isar, ID_ISAR2, 0x21232042); 728 SET_IDREG(isar, ID_ISAR3, 0x01112131); 729 SET_IDREG(isar, ID_ISAR4, 0x00011142); 730 SET_IDREG(isar, ID_ISAR5, 0x00011121); 731 SET_IDREG(isar, ID_ISAR6, 0); 732 SET_IDREG(isar, ID_AA64PFR0, 0x00002222); 733 SET_IDREG(isar, ID_AA64DFR0, 0x10305106); 734 SET_IDREG(isar, ID_AA64ISAR0, 0x00011120); 735 SET_IDREG(isar, ID_AA64MMFR0, 0x00001122); /* 40 bit physical addr */ 736 cpu->isar.dbgdidr = 0x3516d000; 737 cpu->isar.dbgdevid = 0x00110f13; 738 cpu->isar.dbgdevid1 = 0x1; 739 cpu->isar.reset_pmcr_el0 = 0x41033000; 740 cpu->clidr = 0x0a200023; 741 /* 32KB L1 dcache */ 742 cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7); 743 /* 32KB L1 icache */ 744 cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 1, 64, 32 * KiB, 2); 745 /* 1024KB L2 cache */ 746 cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 1 * MiB, 7); 747 cpu->dcz_blocksize = 4; /* 64 bytes */ 748 cpu->gic_num_lrs = 4; 749 cpu->gic_vpribits = 5; 750 cpu->gic_vprebits = 5; 751 cpu->gic_pribits = 5; 752 define_cortex_a72_a57_a53_cp_reginfo(cpu); 753 } 754 755 static void aarch64_host_initfn(Object *obj) 756 { 757 #if defined(CONFIG_KVM) 758 ARMCPU *cpu = ARM_CPU(obj); 759 kvm_arm_set_cpu_features_from_host(cpu); 760 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 761 aarch64_add_sve_properties(obj); 762 aarch64_add_pauth_properties(obj); 763 } 764 #elif defined(CONFIG_HVF) 765 ARMCPU *cpu = ARM_CPU(obj); 766 hvf_arm_set_cpu_features_from_host(cpu); 767 aarch64_add_pauth_properties(obj); 768 #else 769 g_assert_not_reached(); 770 #endif 771 } 772 773 static void aarch64_max_initfn(Object *obj) 774 { 775 if (kvm_enabled() || hvf_enabled()) { 776 /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */ 777 aarch64_host_initfn(obj); 778 return; 779 } 780 781 if (tcg_enabled() || qtest_enabled()) { 782 aarch64_a57_initfn(obj); 783 } 784 785 /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */ 786 if (tcg_enabled()) { 787 aarch64_max_tcg_initfn(obj); 788 } 789 } 790 791 static const ARMCPUInfo aarch64_cpus[] = { 792 { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, 793 { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, 794 { .name = "max", .initfn = aarch64_max_initfn }, 795 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 796 { .name = "host", .initfn = aarch64_host_initfn }, 797 #endif 798 }; 799 800 static void aarch64_cpu_register_types(void) 801 { 802 size_t i; 803 804 for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) { 805 arm_cpu_register(&aarch64_cpus[i]); 806 } 807 } 808 809 type_init(aarch64_cpu_register_types) 810