1 /*
2 * QEMU AArch64 CPU
3 *
4 * Copyright (c) 2013 Linaro Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "cpregs.h"
25 #include "qemu/module.h"
26 #include "qemu/units.h"
27 #include "system/kvm.h"
28 #include "system/hvf.h"
29 #include "system/qtest.h"
30 #include "system/tcg.h"
31 #include "kvm_arm.h"
32 #include "hvf_arm.h"
33 #include "qapi/visitor.h"
34 #include "hw/qdev-properties.h"
35 #include "internals.h"
36 #include "cpu-features.h"
37 #include "cpregs.h"
38
39 /* convert between <register>_IDX and SYS_<register> */
40 #define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
41 [NAME##_IDX] = SYS_##NAME,
42
43 const uint32_t id_register_sysreg[NUM_ID_IDX] = {
44 #include "cpu-sysregs.h.inc"
45 };
46
47 #undef DEF
48 #define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
49 case SYS_##NAME: return NAME##_IDX;
50
get_sysreg_idx(ARMSysRegs sysreg)51 int get_sysreg_idx(ARMSysRegs sysreg)
52 {
53 switch (sysreg) {
54 #include "cpu-sysregs.h.inc"
55 }
56 g_assert_not_reached();
57 }
58
59 #undef DEF
60
arm_cpu_sve_finalize(ARMCPU * cpu,Error ** errp)61 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
62 {
63 /*
64 * If any vector lengths are explicitly enabled with sve<N> properties,
65 * then all other lengths are implicitly disabled. If sve-max-vq is
66 * specified then it is the same as explicitly enabling all lengths
67 * up to and including the specified maximum, which means all larger
68 * lengths will be implicitly disabled. If no sve<N> properties
69 * are enabled and sve-max-vq is not specified, then all lengths not
70 * explicitly disabled will be enabled. Additionally, all power-of-two
71 * vector lengths less than the maximum enabled length will be
72 * automatically enabled and all vector lengths larger than the largest
73 * disabled power-of-two vector length will be automatically disabled.
74 * Errors are generated if the user provided input that interferes with
75 * any of the above. Finally, if SVE is not disabled, then at least one
76 * vector length must be enabled.
77 */
78 uint32_t vq_map = cpu->sve_vq.map;
79 uint32_t vq_init = cpu->sve_vq.init;
80 uint32_t vq_supported;
81 uint32_t vq_mask = 0;
82 uint32_t tmp, vq, max_vq = 0;
83
84 /*
85 * CPU models specify a set of supported vector lengths which are
86 * enabled by default. Attempting to enable any vector length not set
87 * in the supported bitmap results in an error. When KVM is enabled we
88 * fetch the supported bitmap from the host.
89 */
90 if (kvm_enabled()) {
91 if (kvm_arm_sve_supported()) {
92 cpu->sve_vq.supported = kvm_arm_sve_get_vls(cpu);
93 vq_supported = cpu->sve_vq.supported;
94 } else {
95 assert(!cpu_isar_feature(aa64_sve, cpu));
96 vq_supported = 0;
97 }
98 } else {
99 vq_supported = cpu->sve_vq.supported;
100 }
101
102 /*
103 * Process explicit sve<N> properties.
104 * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
105 * Check first for any sve<N> enabled.
106 */
107 if (vq_map != 0) {
108 max_vq = 32 - clz32(vq_map);
109 vq_mask = MAKE_64BIT_MASK(0, max_vq);
110
111 if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
112 error_setg(errp, "cannot enable sve%d", max_vq * 128);
113 error_append_hint(errp, "sve%d is larger than the maximum vector "
114 "length, sve-max-vq=%d (%d bits)\n",
115 max_vq * 128, cpu->sve_max_vq,
116 cpu->sve_max_vq * 128);
117 return;
118 }
119
120 if (kvm_enabled()) {
121 /*
122 * For KVM we have to automatically enable all supported uninitialized
123 * lengths, even when the smaller lengths are not all powers-of-two.
124 */
125 vq_map |= vq_supported & ~vq_init & vq_mask;
126 } else {
127 /* Propagate enabled bits down through required powers-of-two. */
128 vq_map |= SVE_VQ_POW2_MAP & ~vq_init & vq_mask;
129 }
130 } else if (cpu->sve_max_vq == 0) {
131 /*
132 * No explicit bits enabled, and no implicit bits from sve-max-vq.
133 */
134 if (!cpu_isar_feature(aa64_sve, cpu)) {
135 /*
136 * SVE is disabled and so are all vector lengths. Good.
137 * Disable all SVE extensions as well.
138 */
139 SET_IDREG(&cpu->isar, ID_AA64ZFR0, 0);
140 return;
141 }
142
143 if (kvm_enabled()) {
144 /* Disabling a supported length disables all larger lengths. */
145 tmp = vq_init & vq_supported;
146 } else {
147 /* Disabling a power-of-two disables all larger lengths. */
148 tmp = vq_init & SVE_VQ_POW2_MAP;
149 }
150 vq = ctz32(tmp) + 1;
151
152 max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
153 vq_mask = max_vq > 0 ? MAKE_64BIT_MASK(0, max_vq) : 0;
154 vq_map = vq_supported & ~vq_init & vq_mask;
155
156 if (vq_map == 0) {
157 error_setg(errp, "cannot disable sve%d", vq * 128);
158 error_append_hint(errp, "Disabling sve%d results in all "
159 "vector lengths being disabled.\n",
160 vq * 128);
161 error_append_hint(errp, "With SVE enabled, at least one "
162 "vector length must be enabled.\n");
163 return;
164 }
165
166 max_vq = 32 - clz32(vq_map);
167 vq_mask = MAKE_64BIT_MASK(0, max_vq);
168 }
169
170 /*
171 * Process the sve-max-vq property.
172 * Note that we know from the above that no bit above
173 * sve-max-vq is currently set.
174 */
175 if (cpu->sve_max_vq != 0) {
176 max_vq = cpu->sve_max_vq;
177 vq_mask = MAKE_64BIT_MASK(0, max_vq);
178
179 if (vq_init & ~vq_map & (1 << (max_vq - 1))) {
180 error_setg(errp, "cannot disable sve%d", max_vq * 128);
181 error_append_hint(errp, "The maximum vector length must be "
182 "enabled, sve-max-vq=%d (%d bits)\n",
183 max_vq, max_vq * 128);
184 return;
185 }
186
187 /* Set all bits not explicitly set within sve-max-vq. */
188 vq_map |= ~vq_init & vq_mask;
189 }
190
191 /*
192 * We should know what max-vq is now. Also, as we're done
193 * manipulating sve-vq-map, we ensure any bits above max-vq
194 * are clear, just in case anybody looks.
195 */
196 assert(max_vq != 0);
197 assert(vq_mask != 0);
198 vq_map &= vq_mask;
199
200 /* Ensure the set of lengths matches what is supported. */
201 tmp = vq_map ^ (vq_supported & vq_mask);
202 if (tmp) {
203 vq = 32 - clz32(tmp);
204 if (vq_map & (1 << (vq - 1))) {
205 if (cpu->sve_max_vq) {
206 error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
207 error_append_hint(errp, "This CPU does not support "
208 "the vector length %d-bits.\n", vq * 128);
209 error_append_hint(errp, "It may not be possible to use "
210 "sve-max-vq with this CPU. Try "
211 "using only sve<N> properties.\n");
212 } else {
213 error_setg(errp, "cannot enable sve%d", vq * 128);
214 if (vq_supported) {
215 error_append_hint(errp, "This CPU does not support "
216 "the vector length %d-bits.\n", vq * 128);
217 } else {
218 error_append_hint(errp, "SVE not supported by KVM "
219 "on this host\n");
220 }
221 }
222 return;
223 } else {
224 if (kvm_enabled()) {
225 error_setg(errp, "cannot disable sve%d", vq * 128);
226 error_append_hint(errp, "The KVM host requires all "
227 "supported vector lengths smaller "
228 "than %d bits to also be enabled.\n",
229 max_vq * 128);
230 return;
231 } else {
232 /* Ensure all required powers-of-two are enabled. */
233 tmp = SVE_VQ_POW2_MAP & vq_mask & ~vq_map;
234 if (tmp) {
235 vq = 32 - clz32(tmp);
236 error_setg(errp, "cannot disable sve%d", vq * 128);
237 error_append_hint(errp, "sve%d is required as it "
238 "is a power-of-two length smaller "
239 "than the maximum, sve%d\n",
240 vq * 128, max_vq * 128);
241 return;
242 }
243 }
244 }
245 }
246
247 /*
248 * Now that we validated all our vector lengths, the only question
249 * left to answer is if we even want SVE at all.
250 */
251 if (!cpu_isar_feature(aa64_sve, cpu)) {
252 error_setg(errp, "cannot enable sve%d", max_vq * 128);
253 error_append_hint(errp, "SVE must be enabled to enable vector "
254 "lengths.\n");
255 error_append_hint(errp, "Add sve=on to the CPU property list.\n");
256 return;
257 }
258
259 /* From now on sve_max_vq is the actual maximum supported length. */
260 cpu->sve_max_vq = max_vq;
261 cpu->sve_vq.map = vq_map;
262
263 /* FEAT_F64MM requires the existence of a 256-bit vector size. */
264 if (max_vq < 2) {
265 uint64_t t = GET_IDREG(&cpu->isar, ID_AA64ZFR0);
266 t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 0);
267 SET_IDREG(&cpu->isar, ID_AA64ZFR0, t);
268 }
269 }
270
271 /*
272 * Note that cpu_arm_{get,set}_vq cannot use the simpler
273 * object_property_add_bool interface because they make use of the
274 * contents of "name" to determine which bit on which to operate.
275 */
cpu_arm_get_vq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)276 static void cpu_arm_get_vq(Object *obj, Visitor *v, const char *name,
277 void *opaque, Error **errp)
278 {
279 ARMCPU *cpu = ARM_CPU(obj);
280 ARMVQMap *vq_map = opaque;
281 uint32_t vq = atoi(&name[3]) / 128;
282 bool sve = vq_map == &cpu->sve_vq;
283 bool value;
284
285 /* All vector lengths are disabled when feature is off. */
286 if (sve
287 ? !cpu_isar_feature(aa64_sve, cpu)
288 : !cpu_isar_feature(aa64_sme, cpu)) {
289 value = false;
290 } else {
291 value = extract32(vq_map->map, vq - 1, 1);
292 }
293 visit_type_bool(v, name, &value, errp);
294 }
295
cpu_arm_set_vq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)296 static void cpu_arm_set_vq(Object *obj, Visitor *v, const char *name,
297 void *opaque, Error **errp)
298 {
299 ARMVQMap *vq_map = opaque;
300 uint32_t vq = atoi(&name[3]) / 128;
301 bool value;
302
303 if (!visit_type_bool(v, name, &value, errp)) {
304 return;
305 }
306
307 vq_map->map = deposit32(vq_map->map, vq - 1, 1, value);
308 vq_map->init |= 1 << (vq - 1);
309 }
310
cpu_arm_get_sve(Object * obj,Error ** errp)311 static bool cpu_arm_get_sve(Object *obj, Error **errp)
312 {
313 ARMCPU *cpu = ARM_CPU(obj);
314 return cpu_isar_feature(aa64_sve, cpu);
315 }
316
cpu_arm_set_sve(Object * obj,bool value,Error ** errp)317 static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
318 {
319 ARMCPU *cpu = ARM_CPU(obj);
320
321 if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
322 error_setg(errp, "'sve' feature not supported by KVM on this host");
323 return;
324 }
325
326 FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, SVE, value);
327 }
328
arm_cpu_sme_finalize(ARMCPU * cpu,Error ** errp)329 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
330 {
331 uint32_t vq_map = cpu->sme_vq.map;
332 uint32_t vq_init = cpu->sme_vq.init;
333 uint32_t vq_supported = cpu->sme_vq.supported;
334 uint32_t vq;
335
336 if (vq_map == 0) {
337 if (!cpu_isar_feature(aa64_sme, cpu)) {
338 SET_IDREG(&cpu->isar, ID_AA64SMFR0, 0);
339 return;
340 }
341
342 /* TODO: KVM will require limitations via SMCR_EL2. */
343 vq_map = vq_supported & ~vq_init;
344
345 if (vq_map == 0) {
346 vq = ctz32(vq_supported) + 1;
347 error_setg(errp, "cannot disable sme%d", vq * 128);
348 error_append_hint(errp, "All SME vector lengths are disabled.\n");
349 error_append_hint(errp, "With SME enabled, at least one "
350 "vector length must be enabled.\n");
351 return;
352 }
353 } else {
354 if (!cpu_isar_feature(aa64_sme, cpu)) {
355 vq = 32 - clz32(vq_map);
356 error_setg(errp, "cannot enable sme%d", vq * 128);
357 error_append_hint(errp, "SME must be enabled to enable "
358 "vector lengths.\n");
359 error_append_hint(errp, "Add sme=on to the CPU property list.\n");
360 return;
361 }
362 /* TODO: KVM will require limitations via SMCR_EL2. */
363 }
364
365 cpu->sme_vq.map = vq_map;
366 cpu->sme_max_vq = 32 - clz32(vq_map);
367 }
368
cpu_arm_get_sme(Object * obj,Error ** errp)369 static bool cpu_arm_get_sme(Object *obj, Error **errp)
370 {
371 ARMCPU *cpu = ARM_CPU(obj);
372 return cpu_isar_feature(aa64_sme, cpu);
373 }
374
cpu_arm_set_sme(Object * obj,bool value,Error ** errp)375 static void cpu_arm_set_sme(Object *obj, bool value, Error **errp)
376 {
377 ARMCPU *cpu = ARM_CPU(obj);
378
379 FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR1, SME, value);
380 }
381
cpu_arm_get_sme_fa64(Object * obj,Error ** errp)382 static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
383 {
384 ARMCPU *cpu = ARM_CPU(obj);
385 return cpu_isar_feature(aa64_sme, cpu) &&
386 cpu_isar_feature(aa64_sme_fa64, cpu);
387 }
388
cpu_arm_set_sme_fa64(Object * obj,bool value,Error ** errp)389 static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp)
390 {
391 ARMCPU *cpu = ARM_CPU(obj);
392
393 FIELD_DP64_IDREG(&cpu->isar, ID_AA64SMFR0, FA64, value);
394 }
395
396 #ifdef CONFIG_USER_ONLY
397 /* Mirror linux /proc/sys/abi/{sve,sme}_default_vector_length. */
cpu_arm_set_default_vec_len(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)398 static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v,
399 const char *name, void *opaque,
400 Error **errp)
401 {
402 uint32_t *ptr_default_vq = opaque;
403 int32_t default_len, default_vq, remainder;
404
405 if (!visit_type_int32(v, name, &default_len, errp)) {
406 return;
407 }
408
409 /* Undocumented, but the kernel allows -1 to indicate "maximum". */
410 if (default_len == -1) {
411 *ptr_default_vq = ARM_MAX_VQ;
412 return;
413 }
414
415 default_vq = default_len / 16;
416 remainder = default_len % 16;
417
418 /*
419 * Note that the 512 max comes from include/uapi/asm/sve_context.h
420 * and is the maximum architectural width of ZCR_ELx.LEN.
421 */
422 if (remainder || default_vq < 1 || default_vq > 512) {
423 ARMCPU *cpu = ARM_CPU(obj);
424 const char *which =
425 (ptr_default_vq == &cpu->sve_default_vq ? "sve" : "sme");
426
427 error_setg(errp, "cannot set %s-default-vector-length", which);
428 if (remainder) {
429 error_append_hint(errp, "Vector length not a multiple of 16\n");
430 } else if (default_vq < 1) {
431 error_append_hint(errp, "Vector length smaller than 16\n");
432 } else {
433 error_append_hint(errp, "Vector length larger than %d\n",
434 512 * 16);
435 }
436 return;
437 }
438
439 *ptr_default_vq = default_vq;
440 }
441
cpu_arm_get_default_vec_len(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)442 static void cpu_arm_get_default_vec_len(Object *obj, Visitor *v,
443 const char *name, void *opaque,
444 Error **errp)
445 {
446 uint32_t *ptr_default_vq = opaque;
447 int32_t value = *ptr_default_vq * 16;
448
449 visit_type_int32(v, name, &value, errp);
450 }
451 #endif
452
aarch64_add_sve_properties(Object * obj)453 void aarch64_add_sve_properties(Object *obj)
454 {
455 ARMCPU *cpu = ARM_CPU(obj);
456 uint32_t vq;
457
458 object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
459
460 for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
461 char name[8];
462 snprintf(name, sizeof(name), "sve%d", vq * 128);
463 object_property_add(obj, name, "bool", cpu_arm_get_vq,
464 cpu_arm_set_vq, NULL, &cpu->sve_vq);
465 }
466
467 #ifdef CONFIG_USER_ONLY
468 /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
469 object_property_add(obj, "sve-default-vector-length", "int32",
470 cpu_arm_get_default_vec_len,
471 cpu_arm_set_default_vec_len, NULL,
472 &cpu->sve_default_vq);
473 #endif
474 }
475
aarch64_add_sme_properties(Object * obj)476 void aarch64_add_sme_properties(Object *obj)
477 {
478 ARMCPU *cpu = ARM_CPU(obj);
479 uint32_t vq;
480
481 object_property_add_bool(obj, "sme", cpu_arm_get_sme, cpu_arm_set_sme);
482 object_property_add_bool(obj, "sme_fa64", cpu_arm_get_sme_fa64,
483 cpu_arm_set_sme_fa64);
484
485 for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
486 char name[8];
487 snprintf(name, sizeof(name), "sme%d", vq * 128);
488 object_property_add(obj, name, "bool", cpu_arm_get_vq,
489 cpu_arm_set_vq, NULL, &cpu->sme_vq);
490 }
491
492 #ifdef CONFIG_USER_ONLY
493 /* Mirror linux /proc/sys/abi/sme_default_vector_length. */
494 object_property_add(obj, "sme-default-vector-length", "int32",
495 cpu_arm_get_default_vec_len,
496 cpu_arm_set_default_vec_len, NULL,
497 &cpu->sme_default_vq);
498 #endif
499 }
500
arm_cpu_pauth_finalize(ARMCPU * cpu,Error ** errp)501 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
502 {
503 ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu);
504 ARMISARegisters *isar = &cpu->isar;
505 uint64_t isar1, isar2;
506
507 /*
508 * These properties enable or disable Pauth as a whole, or change
509 * the pauth algorithm, but do not change the set of features that
510 * are present. We have saved a copy of those features above and
511 * will now place it into the field that chooses the algorithm.
512 *
513 * Begin by disabling all fields.
514 */
515 isar1 = GET_IDREG(isar, ID_AA64ISAR1);
516 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, 0);
517 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 0);
518 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0);
519 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0);
520
521 isar2 = GET_IDREG(isar, ID_AA64ISAR2);
522 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, 0);
523 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 0);
524
525 if (kvm_enabled() || hvf_enabled()) {
526 /*
527 * Exit early if PAuth is enabled and fall through to disable it.
528 * The algorithm selection properties are not present.
529 */
530 if (cpu->prop_pauth) {
531 if (features == 0) {
532 error_setg(errp, "'pauth' feature not supported by "
533 "%s on this host", current_accel_name());
534 }
535 return;
536 }
537 } else {
538 /* Pauth properties are only present when the model supports it. */
539 if (features == 0) {
540 assert(!cpu->prop_pauth);
541 return;
542 }
543
544 if (cpu->prop_pauth) {
545 if ((cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) ||
546 (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma5) ||
547 (cpu->prop_pauth_qarma3 && cpu->prop_pauth_qarma5)) {
548 error_setg(errp,
549 "cannot enable pauth-impdef, pauth-qarma3 and "
550 "pauth-qarma5 at the same time");
551 return;
552 }
553
554 bool use_default = !cpu->prop_pauth_qarma5 &&
555 !cpu->prop_pauth_qarma3 &&
556 !cpu->prop_pauth_impdef;
557
558 if (cpu->prop_pauth_qarma5 ||
559 (use_default &&
560 cpu->backcompat_pauth_default_use_qarma5)) {
561 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
562 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
563 } else if (cpu->prop_pauth_qarma3) {
564 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features);
565 isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1);
566 } else if (cpu->prop_pauth_impdef ||
567 (use_default &&
568 !cpu->backcompat_pauth_default_use_qarma5)) {
569 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
570 isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
571 } else {
572 g_assert_not_reached();
573 }
574 } else if (cpu->prop_pauth_impdef ||
575 cpu->prop_pauth_qarma3 ||
576 cpu->prop_pauth_qarma5) {
577 error_setg(errp, "cannot enable pauth-impdef, pauth-qarma3 or "
578 "pauth-qarma5 without pauth");
579 error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
580 }
581 }
582
583 SET_IDREG(isar, ID_AA64ISAR1, isar1);
584 SET_IDREG(isar, ID_AA64ISAR2, isar2);
585 }
586
587 static const Property arm_cpu_pauth_property =
588 DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
589 static const Property arm_cpu_pauth_impdef_property =
590 DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
591 static const Property arm_cpu_pauth_qarma3_property =
592 DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false);
593 static Property arm_cpu_pauth_qarma5_property =
594 DEFINE_PROP_BOOL("pauth-qarma5", ARMCPU, prop_pauth_qarma5, false);
595
aarch64_add_pauth_properties(Object * obj)596 void aarch64_add_pauth_properties(Object *obj)
597 {
598 ARMCPU *cpu = ARM_CPU(obj);
599
600 /* Default to PAUTH on, with the architected algorithm on TCG. */
601 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
602 if (kvm_enabled() || hvf_enabled()) {
603 /*
604 * Mirror PAuth support from the probed sysregs back into the
605 * property for KVM or hvf. Is it just a bit backward? Yes it is!
606 * Note that prop_pauth is true whether the host CPU supports the
607 * architected QARMA5 algorithm or the IMPDEF one. We don't
608 * provide the separate pauth-impdef property for KVM or hvf,
609 * only for TCG.
610 */
611 cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu);
612 } else {
613 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
614 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property);
615 qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma5_property);
616 }
617 }
618
arm_cpu_lpa2_finalize(ARMCPU * cpu,Error ** errp)619 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
620 {
621 uint64_t t;
622
623 /*
624 * We only install the property for tcg -cpu max; this is the
625 * only situation in which the cpu field can be true.
626 */
627 if (!cpu->prop_lpa2) {
628 return;
629 }
630
631 t = GET_IDREG(&cpu->isar, ID_AA64MMFR0);
632 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2); /* 16k pages w/ LPA2 */
633 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1); /* 4k pages w/ LPA2 */
634 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */
635 t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3); /* 4k stage2 w/ LPA2 */
636 SET_IDREG(&cpu->isar, ID_AA64MMFR0, t);
637 }
638
aarch64_a57_initfn(Object * obj)639 static void aarch64_a57_initfn(Object *obj)
640 {
641 ARMCPU *cpu = ARM_CPU(obj);
642 ARMISARegisters *isar = &cpu->isar;
643
644 cpu->dtb_compatible = "arm,cortex-a57";
645 set_feature(&cpu->env, ARM_FEATURE_V8);
646 set_feature(&cpu->env, ARM_FEATURE_NEON);
647 set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
648 set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
649 set_feature(&cpu->env, ARM_FEATURE_AARCH64);
650 set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
651 set_feature(&cpu->env, ARM_FEATURE_EL2);
652 set_feature(&cpu->env, ARM_FEATURE_EL3);
653 set_feature(&cpu->env, ARM_FEATURE_PMU);
654 cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
655 cpu->midr = 0x411fd070;
656 cpu->revidr = 0x00000000;
657 cpu->reset_fpsid = 0x41034070;
658 cpu->isar.mvfr0 = 0x10110222;
659 cpu->isar.mvfr1 = 0x12111111;
660 cpu->isar.mvfr2 = 0x00000043;
661 cpu->ctr = 0x8444c004;
662 cpu->reset_sctlr = 0x00c50838;
663 SET_IDREG(isar, ID_PFR0, 0x00000131);
664 SET_IDREG(isar, ID_PFR1, 0x00011011);
665 SET_IDREG(isar, ID_DFR0, 0x03010066);
666 SET_IDREG(isar, ID_AFR0, 0x00000000);
667 SET_IDREG(isar, ID_MMFR0, 0x10101105);
668 SET_IDREG(isar, ID_MMFR1, 0x40000000);
669 SET_IDREG(isar, ID_MMFR2, 0x01260000);
670 SET_IDREG(isar, ID_MMFR3, 0x02102211);
671 SET_IDREG(isar, ID_ISAR0, 0x02101110);
672 SET_IDREG(isar, ID_ISAR1, 0x13112111);
673 SET_IDREG(isar, ID_ISAR2, 0x21232042);
674 SET_IDREG(isar, ID_ISAR3, 0x01112131);
675 SET_IDREG(isar, ID_ISAR4, 0x00011142);
676 SET_IDREG(isar, ID_ISAR5, 0x00011121);
677 SET_IDREG(isar, ID_ISAR6, 0);
678 SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
679 SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
680 SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
681 SET_IDREG(isar, ID_AA64MMFR0, 0x00001124);
682 cpu->isar.dbgdidr = 0x3516d000;
683 cpu->isar.dbgdevid = 0x01110f13;
684 cpu->isar.dbgdevid1 = 0x2;
685 cpu->isar.reset_pmcr_el0 = 0x41013000;
686 SET_IDREG(isar, CLIDR, 0x0a200023);
687 /* 32KB L1 dcache */
688 cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
689 /* 48KB L1 icache */
690 cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 3, 64, 48 * KiB, 2);
691 /* 2048KB L2 cache */
692 cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 2 * MiB, 7);
693 cpu->dcz_blocksize = 4; /* 64 bytes */
694 cpu->gic_num_lrs = 4;
695 cpu->gic_vpribits = 5;
696 cpu->gic_vprebits = 5;
697 cpu->gic_pribits = 5;
698 define_cortex_a72_a57_a53_cp_reginfo(cpu);
699 }
700
aarch64_a53_initfn(Object * obj)701 static void aarch64_a53_initfn(Object *obj)
702 {
703 ARMCPU *cpu = ARM_CPU(obj);
704 ARMISARegisters *isar = &cpu->isar;
705
706 cpu->dtb_compatible = "arm,cortex-a53";
707 set_feature(&cpu->env, ARM_FEATURE_V8);
708 set_feature(&cpu->env, ARM_FEATURE_NEON);
709 set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
710 set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
711 set_feature(&cpu->env, ARM_FEATURE_AARCH64);
712 set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
713 set_feature(&cpu->env, ARM_FEATURE_EL2);
714 set_feature(&cpu->env, ARM_FEATURE_EL3);
715 set_feature(&cpu->env, ARM_FEATURE_PMU);
716 cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
717 cpu->midr = 0x410fd034;
718 cpu->revidr = 0x00000100;
719 cpu->reset_fpsid = 0x41034070;
720 cpu->isar.mvfr0 = 0x10110222;
721 cpu->isar.mvfr1 = 0x12111111;
722 cpu->isar.mvfr2 = 0x00000043;
723 cpu->ctr = 0x84448004; /* L1Ip = VIPT */
724 cpu->reset_sctlr = 0x00c50838;
725 SET_IDREG(isar, ID_PFR0, 0x00000131);
726 SET_IDREG(isar, ID_PFR1, 0x00011011);
727 SET_IDREG(isar, ID_DFR0, 0x03010066);
728 SET_IDREG(isar, ID_AFR0, 0x00000000);
729 SET_IDREG(isar, ID_MMFR0, 0x10101105);
730 SET_IDREG(isar, ID_MMFR1, 0x40000000);
731 SET_IDREG(isar, ID_MMFR2, 0x01260000);
732 SET_IDREG(isar, ID_MMFR3, 0x02102211);
733 SET_IDREG(isar, ID_ISAR0, 0x02101110);
734 SET_IDREG(isar, ID_ISAR1, 0x13112111);
735 SET_IDREG(isar, ID_ISAR2, 0x21232042);
736 SET_IDREG(isar, ID_ISAR3, 0x01112131);
737 SET_IDREG(isar, ID_ISAR4, 0x00011142);
738 SET_IDREG(isar, ID_ISAR5, 0x00011121);
739 SET_IDREG(isar, ID_ISAR6, 0);
740 SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
741 SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
742 SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
743 SET_IDREG(isar, ID_AA64MMFR0, 0x00001122); /* 40 bit physical addr */
744 cpu->isar.dbgdidr = 0x3516d000;
745 cpu->isar.dbgdevid = 0x00110f13;
746 cpu->isar.dbgdevid1 = 0x1;
747 cpu->isar.reset_pmcr_el0 = 0x41033000;
748 SET_IDREG(isar, CLIDR, 0x0a200023);
749 /* 32KB L1 dcache */
750 cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
751 /* 32KB L1 icache */
752 cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 1, 64, 32 * KiB, 2);
753 /* 1024KB L2 cache */
754 cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 1 * MiB, 7);
755 cpu->dcz_blocksize = 4; /* 64 bytes */
756 cpu->gic_num_lrs = 4;
757 cpu->gic_vpribits = 5;
758 cpu->gic_vprebits = 5;
759 cpu->gic_pribits = 5;
760 define_cortex_a72_a57_a53_cp_reginfo(cpu);
761 }
762
aarch64_host_initfn(Object * obj)763 static void aarch64_host_initfn(Object *obj)
764 {
765 #if defined(CONFIG_KVM)
766 ARMCPU *cpu = ARM_CPU(obj);
767 kvm_arm_set_cpu_features_from_host(cpu);
768 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
769 aarch64_add_sve_properties(obj);
770 aarch64_add_pauth_properties(obj);
771 }
772 #elif defined(CONFIG_HVF)
773 ARMCPU *cpu = ARM_CPU(obj);
774 hvf_arm_set_cpu_features_from_host(cpu);
775 aarch64_add_pauth_properties(obj);
776 #else
777 g_assert_not_reached();
778 #endif
779 }
780
aarch64_max_initfn(Object * obj)781 static void aarch64_max_initfn(Object *obj)
782 {
783 if (kvm_enabled() || hvf_enabled()) {
784 /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
785 aarch64_host_initfn(obj);
786 return;
787 }
788
789 if (tcg_enabled() || qtest_enabled()) {
790 aarch64_a57_initfn(obj);
791 }
792
793 /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */
794 if (tcg_enabled()) {
795 aarch64_max_tcg_initfn(obj);
796 }
797 }
798
799 static const ARMCPUInfo aarch64_cpus[] = {
800 { .name = "cortex-a57", .initfn = aarch64_a57_initfn },
801 { .name = "cortex-a53", .initfn = aarch64_a53_initfn },
802 { .name = "max", .initfn = aarch64_max_initfn },
803 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
804 { .name = "host", .initfn = aarch64_host_initfn },
805 #endif
806 };
807
aarch64_cpu_register_types(void)808 static void aarch64_cpu_register_types(void)
809 {
810 size_t i;
811
812 for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
813 arm_cpu_register(&aarch64_cpus[i]);
814 }
815 }
816
817 type_init(aarch64_cpu_register_types)
818