xref: /openbmc/qemu/target/arm/cpu64.c (revision 033a4f15336646c5dbc07587a7924d71c12a9525)
1 /*
2  * QEMU AArch64 CPU
3  *
4  * Copyright (c) 2013 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #ifdef CONFIG_TCG
25 #include "hw/core/tcg-cpu-ops.h"
26 #endif /* CONFIG_TCG */
27 #include "qemu/module.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/loader.h"
30 #endif
31 #include "sysemu/kvm.h"
32 #include "sysemu/hvf.h"
33 #include "kvm_arm.h"
34 #include "hvf_arm.h"
35 #include "qapi/visitor.h"
36 #include "hw/qdev-properties.h"
37 #include "internals.h"
38 
39 
40 static void aarch64_a57_initfn(Object *obj)
41 {
42     ARMCPU *cpu = ARM_CPU(obj);
43 
44     cpu->dtb_compatible = "arm,cortex-a57";
45     set_feature(&cpu->env, ARM_FEATURE_V8);
46     set_feature(&cpu->env, ARM_FEATURE_NEON);
47     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
48     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
49     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
50     set_feature(&cpu->env, ARM_FEATURE_EL2);
51     set_feature(&cpu->env, ARM_FEATURE_EL3);
52     set_feature(&cpu->env, ARM_FEATURE_PMU);
53     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
54     cpu->midr = 0x411fd070;
55     cpu->revidr = 0x00000000;
56     cpu->reset_fpsid = 0x41034070;
57     cpu->isar.mvfr0 = 0x10110222;
58     cpu->isar.mvfr1 = 0x12111111;
59     cpu->isar.mvfr2 = 0x00000043;
60     cpu->ctr = 0x8444c004;
61     cpu->reset_sctlr = 0x00c50838;
62     cpu->isar.id_pfr0 = 0x00000131;
63     cpu->isar.id_pfr1 = 0x00011011;
64     cpu->isar.id_dfr0 = 0x03010066;
65     cpu->id_afr0 = 0x00000000;
66     cpu->isar.id_mmfr0 = 0x10101105;
67     cpu->isar.id_mmfr1 = 0x40000000;
68     cpu->isar.id_mmfr2 = 0x01260000;
69     cpu->isar.id_mmfr3 = 0x02102211;
70     cpu->isar.id_isar0 = 0x02101110;
71     cpu->isar.id_isar1 = 0x13112111;
72     cpu->isar.id_isar2 = 0x21232042;
73     cpu->isar.id_isar3 = 0x01112131;
74     cpu->isar.id_isar4 = 0x00011142;
75     cpu->isar.id_isar5 = 0x00011121;
76     cpu->isar.id_isar6 = 0;
77     cpu->isar.id_aa64pfr0 = 0x00002222;
78     cpu->isar.id_aa64dfr0 = 0x10305106;
79     cpu->isar.id_aa64isar0 = 0x00011120;
80     cpu->isar.id_aa64mmfr0 = 0x00001124;
81     cpu->isar.dbgdidr = 0x3516d000;
82     cpu->clidr = 0x0a200023;
83     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
84     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
85     cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
86     cpu->dcz_blocksize = 4; /* 64 bytes */
87     cpu->gic_num_lrs = 4;
88     cpu->gic_vpribits = 5;
89     cpu->gic_vprebits = 5;
90     define_cortex_a72_a57_a53_cp_reginfo(cpu);
91 }
92 
93 static void aarch64_a53_initfn(Object *obj)
94 {
95     ARMCPU *cpu = ARM_CPU(obj);
96 
97     cpu->dtb_compatible = "arm,cortex-a53";
98     set_feature(&cpu->env, ARM_FEATURE_V8);
99     set_feature(&cpu->env, ARM_FEATURE_NEON);
100     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
101     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
102     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
103     set_feature(&cpu->env, ARM_FEATURE_EL2);
104     set_feature(&cpu->env, ARM_FEATURE_EL3);
105     set_feature(&cpu->env, ARM_FEATURE_PMU);
106     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
107     cpu->midr = 0x410fd034;
108     cpu->revidr = 0x00000000;
109     cpu->reset_fpsid = 0x41034070;
110     cpu->isar.mvfr0 = 0x10110222;
111     cpu->isar.mvfr1 = 0x12111111;
112     cpu->isar.mvfr2 = 0x00000043;
113     cpu->ctr = 0x84448004; /* L1Ip = VIPT */
114     cpu->reset_sctlr = 0x00c50838;
115     cpu->isar.id_pfr0 = 0x00000131;
116     cpu->isar.id_pfr1 = 0x00011011;
117     cpu->isar.id_dfr0 = 0x03010066;
118     cpu->id_afr0 = 0x00000000;
119     cpu->isar.id_mmfr0 = 0x10101105;
120     cpu->isar.id_mmfr1 = 0x40000000;
121     cpu->isar.id_mmfr2 = 0x01260000;
122     cpu->isar.id_mmfr3 = 0x02102211;
123     cpu->isar.id_isar0 = 0x02101110;
124     cpu->isar.id_isar1 = 0x13112111;
125     cpu->isar.id_isar2 = 0x21232042;
126     cpu->isar.id_isar3 = 0x01112131;
127     cpu->isar.id_isar4 = 0x00011142;
128     cpu->isar.id_isar5 = 0x00011121;
129     cpu->isar.id_isar6 = 0;
130     cpu->isar.id_aa64pfr0 = 0x00002222;
131     cpu->isar.id_aa64dfr0 = 0x10305106;
132     cpu->isar.id_aa64isar0 = 0x00011120;
133     cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
134     cpu->isar.dbgdidr = 0x3516d000;
135     cpu->clidr = 0x0a200023;
136     cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
137     cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
138     cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
139     cpu->dcz_blocksize = 4; /* 64 bytes */
140     cpu->gic_num_lrs = 4;
141     cpu->gic_vpribits = 5;
142     cpu->gic_vprebits = 5;
143     define_cortex_a72_a57_a53_cp_reginfo(cpu);
144 }
145 
146 static void aarch64_a72_initfn(Object *obj)
147 {
148     ARMCPU *cpu = ARM_CPU(obj);
149 
150     cpu->dtb_compatible = "arm,cortex-a72";
151     set_feature(&cpu->env, ARM_FEATURE_V8);
152     set_feature(&cpu->env, ARM_FEATURE_NEON);
153     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
154     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
155     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
156     set_feature(&cpu->env, ARM_FEATURE_EL2);
157     set_feature(&cpu->env, ARM_FEATURE_EL3);
158     set_feature(&cpu->env, ARM_FEATURE_PMU);
159     cpu->midr = 0x410fd083;
160     cpu->revidr = 0x00000000;
161     cpu->reset_fpsid = 0x41034080;
162     cpu->isar.mvfr0 = 0x10110222;
163     cpu->isar.mvfr1 = 0x12111111;
164     cpu->isar.mvfr2 = 0x00000043;
165     cpu->ctr = 0x8444c004;
166     cpu->reset_sctlr = 0x00c50838;
167     cpu->isar.id_pfr0 = 0x00000131;
168     cpu->isar.id_pfr1 = 0x00011011;
169     cpu->isar.id_dfr0 = 0x03010066;
170     cpu->id_afr0 = 0x00000000;
171     cpu->isar.id_mmfr0 = 0x10201105;
172     cpu->isar.id_mmfr1 = 0x40000000;
173     cpu->isar.id_mmfr2 = 0x01260000;
174     cpu->isar.id_mmfr3 = 0x02102211;
175     cpu->isar.id_isar0 = 0x02101110;
176     cpu->isar.id_isar1 = 0x13112111;
177     cpu->isar.id_isar2 = 0x21232042;
178     cpu->isar.id_isar3 = 0x01112131;
179     cpu->isar.id_isar4 = 0x00011142;
180     cpu->isar.id_isar5 = 0x00011121;
181     cpu->isar.id_aa64pfr0 = 0x00002222;
182     cpu->isar.id_aa64dfr0 = 0x10305106;
183     cpu->isar.id_aa64isar0 = 0x00011120;
184     cpu->isar.id_aa64mmfr0 = 0x00001124;
185     cpu->isar.dbgdidr = 0x3516d000;
186     cpu->clidr = 0x0a200023;
187     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
188     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
189     cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
190     cpu->dcz_blocksize = 4; /* 64 bytes */
191     cpu->gic_num_lrs = 4;
192     cpu->gic_vpribits = 5;
193     cpu->gic_vprebits = 5;
194     define_cortex_a72_a57_a53_cp_reginfo(cpu);
195 }
196 
197 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
198 {
199     /*
200      * If any vector lengths are explicitly enabled with sve<N> properties,
201      * then all other lengths are implicitly disabled.  If sve-max-vq is
202      * specified then it is the same as explicitly enabling all lengths
203      * up to and including the specified maximum, which means all larger
204      * lengths will be implicitly disabled.  If no sve<N> properties
205      * are enabled and sve-max-vq is not specified, then all lengths not
206      * explicitly disabled will be enabled.  Additionally, all power-of-two
207      * vector lengths less than the maximum enabled length will be
208      * automatically enabled and all vector lengths larger than the largest
209      * disabled power-of-two vector length will be automatically disabled.
210      * Errors are generated if the user provided input that interferes with
211      * any of the above.  Finally, if SVE is not disabled, then at least one
212      * vector length must be enabled.
213      */
214     DECLARE_BITMAP(tmp, ARM_MAX_VQ);
215     uint32_t vq, max_vq = 0;
216 
217     /*
218      * CPU models specify a set of supported vector lengths which are
219      * enabled by default.  Attempting to enable any vector length not set
220      * in the supported bitmap results in an error.  When KVM is enabled we
221      * fetch the supported bitmap from the host.
222      */
223     if (kvm_enabled() && kvm_arm_sve_supported()) {
224         kvm_arm_sve_get_vls(CPU(cpu), cpu->sve_vq_supported);
225     } else if (kvm_enabled()) {
226         assert(!cpu_isar_feature(aa64_sve, cpu));
227     }
228 
229     /*
230      * Process explicit sve<N> properties.
231      * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
232      * Check first for any sve<N> enabled.
233      */
234     if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
235         max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
236 
237         if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
238             error_setg(errp, "cannot enable sve%d", max_vq * 128);
239             error_append_hint(errp, "sve%d is larger than the maximum vector "
240                               "length, sve-max-vq=%d (%d bits)\n",
241                               max_vq * 128, cpu->sve_max_vq,
242                               cpu->sve_max_vq * 128);
243             return;
244         }
245 
246         if (kvm_enabled()) {
247             /*
248              * For KVM we have to automatically enable all supported unitialized
249              * lengths, even when the smaller lengths are not all powers-of-two.
250              */
251             bitmap_andnot(tmp, cpu->sve_vq_supported, cpu->sve_vq_init, max_vq);
252             bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
253         } else {
254             /* Propagate enabled bits down through required powers-of-two. */
255             for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
256                 if (!test_bit(vq - 1, cpu->sve_vq_init)) {
257                     set_bit(vq - 1, cpu->sve_vq_map);
258                 }
259             }
260         }
261     } else if (cpu->sve_max_vq == 0) {
262         /*
263          * No explicit bits enabled, and no implicit bits from sve-max-vq.
264          */
265         if (!cpu_isar_feature(aa64_sve, cpu)) {
266             /* SVE is disabled and so are all vector lengths.  Good. */
267             return;
268         }
269 
270         if (kvm_enabled()) {
271             /* Disabling a supported length disables all larger lengths. */
272             for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
273                 if (test_bit(vq - 1, cpu->sve_vq_init) &&
274                     test_bit(vq - 1, cpu->sve_vq_supported)) {
275                     break;
276                 }
277             }
278         } else {
279             /* Disabling a power-of-two disables all larger lengths. */
280             for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
281                 if (test_bit(vq - 1, cpu->sve_vq_init)) {
282                     break;
283                 }
284             }
285         }
286 
287         max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
288         bitmap_andnot(cpu->sve_vq_map, cpu->sve_vq_supported,
289                       cpu->sve_vq_init, max_vq);
290         if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
291             error_setg(errp, "cannot disable sve%d", vq * 128);
292             error_append_hint(errp, "Disabling sve%d results in all "
293                               "vector lengths being disabled.\n",
294                               vq * 128);
295             error_append_hint(errp, "With SVE enabled, at least one "
296                               "vector length must be enabled.\n");
297             return;
298         }
299 
300         max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
301     }
302 
303     /*
304      * Process the sve-max-vq property.
305      * Note that we know from the above that no bit above
306      * sve-max-vq is currently set.
307      */
308     if (cpu->sve_max_vq != 0) {
309         max_vq = cpu->sve_max_vq;
310 
311         if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
312             test_bit(max_vq - 1, cpu->sve_vq_init)) {
313             error_setg(errp, "cannot disable sve%d", max_vq * 128);
314             error_append_hint(errp, "The maximum vector length must be "
315                               "enabled, sve-max-vq=%d (%d bits)\n",
316                               max_vq, max_vq * 128);
317             return;
318         }
319 
320         /* Set all bits not explicitly set within sve-max-vq. */
321         bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
322         bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
323     }
324 
325     /*
326      * We should know what max-vq is now.  Also, as we're done
327      * manipulating sve-vq-map, we ensure any bits above max-vq
328      * are clear, just in case anybody looks.
329      */
330     assert(max_vq != 0);
331     bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
332 
333     /* Ensure the set of lengths matches what is supported. */
334     bitmap_xor(tmp, cpu->sve_vq_map, cpu->sve_vq_supported, max_vq);
335     if (!bitmap_empty(tmp, max_vq)) {
336         vq = find_last_bit(tmp, max_vq) + 1;
337         if (test_bit(vq - 1, cpu->sve_vq_map)) {
338             if (cpu->sve_max_vq) {
339                 error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
340                 error_append_hint(errp, "This CPU does not support "
341                                   "the vector length %d-bits.\n", vq * 128);
342                 error_append_hint(errp, "It may not be possible to use "
343                                   "sve-max-vq with this CPU. Try "
344                                   "using only sve<N> properties.\n");
345             } else {
346                 error_setg(errp, "cannot enable sve%d", vq * 128);
347                 error_append_hint(errp, "This CPU does not support "
348                                   "the vector length %d-bits.\n", vq * 128);
349             }
350             return;
351         } else {
352             if (kvm_enabled()) {
353                 error_setg(errp, "cannot disable sve%d", vq * 128);
354                 error_append_hint(errp, "The KVM host requires all "
355                                   "supported vector lengths smaller "
356                                   "than %d bits to also be enabled.\n",
357                                   max_vq * 128);
358                 return;
359             } else {
360                 /* Ensure all required powers-of-two are enabled. */
361                 for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
362                     if (!test_bit(vq - 1, cpu->sve_vq_map)) {
363                         error_setg(errp, "cannot disable sve%d", vq * 128);
364                         error_append_hint(errp, "sve%d is required as it "
365                                           "is a power-of-two length smaller "
366                                           "than the maximum, sve%d\n",
367                                           vq * 128, max_vq * 128);
368                         return;
369                     }
370                 }
371             }
372         }
373     }
374 
375     /*
376      * Now that we validated all our vector lengths, the only question
377      * left to answer is if we even want SVE at all.
378      */
379     if (!cpu_isar_feature(aa64_sve, cpu)) {
380         error_setg(errp, "cannot enable sve%d", max_vq * 128);
381         error_append_hint(errp, "SVE must be enabled to enable vector "
382                           "lengths.\n");
383         error_append_hint(errp, "Add sve=on to the CPU property list.\n");
384         return;
385     }
386 
387     /* From now on sve_max_vq is the actual maximum supported length. */
388     cpu->sve_max_vq = max_vq;
389 }
390 
391 static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
392                                    void *opaque, Error **errp)
393 {
394     ARMCPU *cpu = ARM_CPU(obj);
395     uint32_t value;
396 
397     /* All vector lengths are disabled when SVE is off. */
398     if (!cpu_isar_feature(aa64_sve, cpu)) {
399         value = 0;
400     } else {
401         value = cpu->sve_max_vq;
402     }
403     visit_type_uint32(v, name, &value, errp);
404 }
405 
406 static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
407                                    void *opaque, Error **errp)
408 {
409     ARMCPU *cpu = ARM_CPU(obj);
410     uint32_t max_vq;
411 
412     if (!visit_type_uint32(v, name, &max_vq, errp)) {
413         return;
414     }
415 
416     if (kvm_enabled() && !kvm_arm_sve_supported()) {
417         error_setg(errp, "cannot set sve-max-vq");
418         error_append_hint(errp, "SVE not supported by KVM on this host\n");
419         return;
420     }
421 
422     if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
423         error_setg(errp, "unsupported SVE vector length");
424         error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
425                           ARM_MAX_VQ);
426         return;
427     }
428 
429     cpu->sve_max_vq = max_vq;
430 }
431 
432 /*
433  * Note that cpu_arm_get/set_sve_vq cannot use the simpler
434  * object_property_add_bool interface because they make use
435  * of the contents of "name" to determine which bit on which
436  * to operate.
437  */
438 static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
439                                void *opaque, Error **errp)
440 {
441     ARMCPU *cpu = ARM_CPU(obj);
442     uint32_t vq = atoi(&name[3]) / 128;
443     bool value;
444 
445     /* All vector lengths are disabled when SVE is off. */
446     if (!cpu_isar_feature(aa64_sve, cpu)) {
447         value = false;
448     } else {
449         value = test_bit(vq - 1, cpu->sve_vq_map);
450     }
451     visit_type_bool(v, name, &value, errp);
452 }
453 
454 static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
455                                void *opaque, Error **errp)
456 {
457     ARMCPU *cpu = ARM_CPU(obj);
458     uint32_t vq = atoi(&name[3]) / 128;
459     bool value;
460 
461     if (!visit_type_bool(v, name, &value, errp)) {
462         return;
463     }
464 
465     if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
466         error_setg(errp, "cannot enable %s", name);
467         error_append_hint(errp, "SVE not supported by KVM on this host\n");
468         return;
469     }
470 
471     if (value) {
472         set_bit(vq - 1, cpu->sve_vq_map);
473     } else {
474         clear_bit(vq - 1, cpu->sve_vq_map);
475     }
476     set_bit(vq - 1, cpu->sve_vq_init);
477 }
478 
479 static bool cpu_arm_get_sve(Object *obj, Error **errp)
480 {
481     ARMCPU *cpu = ARM_CPU(obj);
482     return cpu_isar_feature(aa64_sve, cpu);
483 }
484 
485 static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
486 {
487     ARMCPU *cpu = ARM_CPU(obj);
488     uint64_t t;
489 
490     if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
491         error_setg(errp, "'sve' feature not supported by KVM on this host");
492         return;
493     }
494 
495     t = cpu->isar.id_aa64pfr0;
496     t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
497     cpu->isar.id_aa64pfr0 = t;
498 }
499 
500 #ifdef CONFIG_USER_ONLY
501 /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
502 static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
503                                             const char *name, void *opaque,
504                                             Error **errp)
505 {
506     ARMCPU *cpu = ARM_CPU(obj);
507     int32_t default_len, default_vq, remainder;
508 
509     if (!visit_type_int32(v, name, &default_len, errp)) {
510         return;
511     }
512 
513     /* Undocumented, but the kernel allows -1 to indicate "maximum". */
514     if (default_len == -1) {
515         cpu->sve_default_vq = ARM_MAX_VQ;
516         return;
517     }
518 
519     default_vq = default_len / 16;
520     remainder = default_len % 16;
521 
522     /*
523      * Note that the 512 max comes from include/uapi/asm/sve_context.h
524      * and is the maximum architectural width of ZCR_ELx.LEN.
525      */
526     if (remainder || default_vq < 1 || default_vq > 512) {
527         error_setg(errp, "cannot set sve-default-vector-length");
528         if (remainder) {
529             error_append_hint(errp, "Vector length not a multiple of 16\n");
530         } else if (default_vq < 1) {
531             error_append_hint(errp, "Vector length smaller than 16\n");
532         } else {
533             error_append_hint(errp, "Vector length larger than %d\n",
534                               512 * 16);
535         }
536         return;
537     }
538 
539     cpu->sve_default_vq = default_vq;
540 }
541 
542 static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
543                                             const char *name, void *opaque,
544                                             Error **errp)
545 {
546     ARMCPU *cpu = ARM_CPU(obj);
547     int32_t value = cpu->sve_default_vq * 16;
548 
549     visit_type_int32(v, name, &value, errp);
550 }
551 #endif
552 
553 void aarch64_add_sve_properties(Object *obj)
554 {
555     uint32_t vq;
556 
557     object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
558 
559     for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
560         char name[8];
561         sprintf(name, "sve%d", vq * 128);
562         object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
563                             cpu_arm_set_sve_vq, NULL, NULL);
564     }
565 
566 #ifdef CONFIG_USER_ONLY
567     /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
568     object_property_add(obj, "sve-default-vector-length", "int32",
569                         cpu_arm_get_sve_default_vec_len,
570                         cpu_arm_set_sve_default_vec_len, NULL, NULL);
571 #endif
572 }
573 
574 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
575 {
576     int arch_val = 0, impdef_val = 0;
577     uint64_t t;
578 
579     /* Exit early if PAuth is enabled, and fall through to disable it */
580     if ((kvm_enabled() || hvf_enabled()) && cpu->prop_pauth) {
581         if (!cpu_isar_feature(aa64_pauth, cpu)) {
582             error_setg(errp, "'pauth' feature not supported by %s on this host",
583                        kvm_enabled() ? "KVM" : "hvf");
584         }
585 
586         return;
587     }
588 
589     /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
590     if (cpu->prop_pauth) {
591         if (cpu->prop_pauth_impdef) {
592             impdef_val = 1;
593         } else {
594             arch_val = 1;
595         }
596     } else if (cpu->prop_pauth_impdef) {
597         error_setg(errp, "cannot enable pauth-impdef without pauth");
598         error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
599     }
600 
601     t = cpu->isar.id_aa64isar1;
602     t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
603     t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
604     t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
605     t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
606     cpu->isar.id_aa64isar1 = t;
607 }
608 
609 static Property arm_cpu_pauth_property =
610     DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
611 static Property arm_cpu_pauth_impdef_property =
612     DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
613 
614 void aarch64_add_pauth_properties(Object *obj)
615 {
616     ARMCPU *cpu = ARM_CPU(obj);
617 
618     /* Default to PAUTH on, with the architected algorithm on TCG. */
619     qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
620     if (kvm_enabled() || hvf_enabled()) {
621         /*
622          * Mirror PAuth support from the probed sysregs back into the
623          * property for KVM or hvf. Is it just a bit backward? Yes it is!
624          * Note that prop_pauth is true whether the host CPU supports the
625          * architected QARMA5 algorithm or the IMPDEF one. We don't
626          * provide the separate pauth-impdef property for KVM or hvf,
627          * only for TCG.
628          */
629         cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu);
630     } else {
631         qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
632     }
633 }
634 
635 static Property arm_cpu_lpa2_property =
636     DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
637 
638 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
639 {
640     uint64_t t;
641 
642     /*
643      * We only install the property for tcg -cpu max; this is the
644      * only situation in which the cpu field can be true.
645      */
646     if (!cpu->prop_lpa2) {
647         return;
648     }
649 
650     t = cpu->isar.id_aa64mmfr0;
651     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2);   /* 16k pages w/ LPA2 */
652     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1);    /*  4k pages w/ LPA2 */
653     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */
654     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3);  /*  4k stage2 w/ LPA2 */
655     cpu->isar.id_aa64mmfr0 = t;
656 }
657 
658 static void aarch64_host_initfn(Object *obj)
659 {
660 #if defined(CONFIG_KVM)
661     ARMCPU *cpu = ARM_CPU(obj);
662     kvm_arm_set_cpu_features_from_host(cpu);
663     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
664         aarch64_add_sve_properties(obj);
665         aarch64_add_pauth_properties(obj);
666     }
667 #elif defined(CONFIG_HVF)
668     ARMCPU *cpu = ARM_CPU(obj);
669     hvf_arm_set_cpu_features_from_host(cpu);
670     aarch64_add_pauth_properties(obj);
671 #else
672     g_assert_not_reached();
673 #endif
674 }
675 
676 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
677  * otherwise, a CPU with as many features enabled as our emulation supports.
678  * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
679  * this only needs to handle 64 bits.
680  */
681 static void aarch64_max_initfn(Object *obj)
682 {
683     ARMCPU *cpu = ARM_CPU(obj);
684     uint64_t t;
685 
686     if (kvm_enabled() || hvf_enabled()) {
687         /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
688         aarch64_host_initfn(obj);
689         return;
690     }
691 
692     /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */
693 
694     aarch64_a57_initfn(obj);
695 
696     /*
697      * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
698      * one and try to apply errata workarounds or use impdef features we
699      * don't provide.
700      * An IMPLEMENTER field of 0 means "reserved for software use";
701      * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
702      * to see which features are present";
703      * the VARIANT, PARTNUM and REVISION fields are all implementation
704      * defined and we choose to define PARTNUM just in case guest
705      * code needs to distinguish this QEMU CPU from other software
706      * implementations, though this shouldn't be needed.
707      */
708     t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
709     t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
710     t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
711     t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
712     t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
713     cpu->midr = t;
714 
715     t = cpu->isar.id_aa64isar0;
716     t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2);      /* FEAT_PMULL */
717     t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);     /* FEAT_SHA1 */
718     t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2);     /* FEAT_SHA512 */
719     t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
720     t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);   /* FEAT_LSE */
721     t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);      /* FEAT_RDM */
722     t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);     /* FEAT_SHA3 */
723     t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);      /* FEAT_SM3 */
724     t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);      /* FEAT_SM4 */
725     t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);       /* FEAT_DotProd */
726     t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);      /* FEAT_FHM */
727     t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2);       /* FEAT_FlagM2 */
728     t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2);      /* FEAT_TLBIRANGE */
729     t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);     /* FEAT_RNG */
730     cpu->isar.id_aa64isar0 = t;
731 
732     t = cpu->isar.id_aa64isar1;
733     t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);      /* FEAT_DPB2 */
734     t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);    /* FEAT_JSCVT */
735     t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);     /* FEAT_FCMA */
736     t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2);    /* FEAT_LRCPC2 */
737     t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);  /* FEAT_FRINTTS */
738     t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);       /* FEAT_SB */
739     t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);  /* FEAT_SPECRES */
740     t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);     /* FEAT_BF16 */
741     t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);     /* FEAT_I8MM */
742     cpu->isar.id_aa64isar1 = t;
743 
744     t = cpu->isar.id_aa64pfr0;
745     t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);        /* FEAT_FP16 */
746     t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);   /* FEAT_FP16 */
747     t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
748     t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);      /* FEAT_SEL2 */
749     t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);       /* FEAT_DIT */
750     cpu->isar.id_aa64pfr0 = t;
751 
752     t = cpu->isar.id_aa64pfr1;
753     t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);        /* FEAT_BTI */
754     t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);      /* FEAT_SSBS2 */
755     /*
756      * Begin with full support for MTE. This will be downgraded to MTE=0
757      * during realize if the board provides no tag memory, much like
758      * we do for EL2 with the virtualization=on property.
759      */
760     t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);       /* FEAT_MTE3 */
761     cpu->isar.id_aa64pfr1 = t;
762 
763     t = cpu->isar.id_aa64mmfr0;
764     t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
765     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1);   /* 16k pages supported */
766     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
767     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */
768     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2);  /*  4k stage2 supported */
769     cpu->isar.id_aa64mmfr0 = t;
770 
771     t = cpu->isar.id_aa64mmfr1;
772     t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
773     t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);       /* FEAT_VHE */
774     t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1);     /* FEAT_HPDS */
775     t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);       /* FEAT_LOR */
776     t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2);      /* FEAT_PAN2 */
777     t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1);      /* FEAT_XNX */
778     cpu->isar.id_aa64mmfr1 = t;
779 
780     t = cpu->isar.id_aa64mmfr2;
781     t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1);      /* FEAT_TTCNP */
782     t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);      /* FEAT_UAO */
783     t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1);  /* FEAT_LVA */
784     t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1);       /* FEAT_TTST */
785     t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1);      /* FEAT_TTL */
786     t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2);      /* FEAT_BBM at level 2 */
787     cpu->isar.id_aa64mmfr2 = t;
788 
789     t = cpu->isar.id_aa64zfr0;
790     t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
791     t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2);       /* FEAT_SVE_PMULL128 */
792     t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);   /* FEAT_SVE_BitPerm */
793     t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);  /* FEAT_BF16 */
794     t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);      /* FEAT_SVE_SHA3 */
795     t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);       /* FEAT_SVE_SM4 */
796     t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);      /* FEAT_I8MM */
797     t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);     /* FEAT_F32MM */
798     t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);     /* FEAT_F64MM */
799     cpu->isar.id_aa64zfr0 = t;
800 
801     t = cpu->isar.id_aa64dfr0;
802     t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 8);  /* FEAT_Debugv8p2 */
803     t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5);    /* FEAT_PMUv3p4 */
804     cpu->isar.id_aa64dfr0 = t;
805 
806     /* Replicate the same data to the 32-bit id registers.  */
807     aa32_max_features(cpu);
808 
809 #ifdef CONFIG_USER_ONLY
810     /*
811      * For usermode -cpu max we can use a larger and more efficient DCZ
812      * blocksize since we don't have to follow what the hardware does.
813      */
814     cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
815     cpu->dcz_blocksize = 7; /*  512 bytes */
816 #endif
817 
818     bitmap_fill(cpu->sve_vq_supported, ARM_MAX_VQ);
819 
820     aarch64_add_pauth_properties(obj);
821     aarch64_add_sve_properties(obj);
822     object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
823                         cpu_max_set_sve_max_vq, NULL, NULL);
824     qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
825 }
826 
827 static void aarch64_a64fx_initfn(Object *obj)
828 {
829     ARMCPU *cpu = ARM_CPU(obj);
830 
831     cpu->dtb_compatible = "arm,a64fx";
832     set_feature(&cpu->env, ARM_FEATURE_V8);
833     set_feature(&cpu->env, ARM_FEATURE_NEON);
834     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
835     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
836     set_feature(&cpu->env, ARM_FEATURE_EL2);
837     set_feature(&cpu->env, ARM_FEATURE_EL3);
838     set_feature(&cpu->env, ARM_FEATURE_PMU);
839     cpu->midr = 0x461f0010;
840     cpu->revidr = 0x00000000;
841     cpu->ctr = 0x86668006;
842     cpu->reset_sctlr = 0x30000180;
843     cpu->isar.id_aa64pfr0 =   0x0000000101111111; /* No RAS Extensions */
844     cpu->isar.id_aa64pfr1 = 0x0000000000000000;
845     cpu->isar.id_aa64dfr0 = 0x0000000010305408;
846     cpu->isar.id_aa64dfr1 = 0x0000000000000000;
847     cpu->id_aa64afr0 = 0x0000000000000000;
848     cpu->id_aa64afr1 = 0x0000000000000000;
849     cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
850     cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
851     cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
852     cpu->isar.id_aa64isar0 = 0x0000000010211120;
853     cpu->isar.id_aa64isar1 = 0x0000000000010001;
854     cpu->isar.id_aa64zfr0 = 0x0000000000000000;
855     cpu->clidr = 0x0000000080000023;
856     cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */
857     cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */
858     cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */
859     cpu->dcz_blocksize = 6; /* 256 bytes */
860     cpu->gic_num_lrs = 4;
861     cpu->gic_vpribits = 5;
862     cpu->gic_vprebits = 5;
863 
864     /* Suppport of A64FX's vector length are 128,256 and 512bit only */
865     aarch64_add_sve_properties(obj);
866     bitmap_zero(cpu->sve_vq_supported, ARM_MAX_VQ);
867     set_bit(0, cpu->sve_vq_supported); /* 128bit */
868     set_bit(1, cpu->sve_vq_supported); /* 256bit */
869     set_bit(3, cpu->sve_vq_supported); /* 512bit */
870 
871     /* TODO:  Add A64FX specific HPC extension registers */
872 }
873 
874 static const ARMCPUInfo aarch64_cpus[] = {
875     { .name = "cortex-a57",         .initfn = aarch64_a57_initfn },
876     { .name = "cortex-a53",         .initfn = aarch64_a53_initfn },
877     { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
878     { .name = "a64fx",              .initfn = aarch64_a64fx_initfn },
879     { .name = "max",                .initfn = aarch64_max_initfn },
880 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
881     { .name = "host",               .initfn = aarch64_host_initfn },
882 #endif
883 };
884 
885 static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
886 {
887     ARMCPU *cpu = ARM_CPU(obj);
888 
889     return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
890 }
891 
892 static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
893 {
894     ARMCPU *cpu = ARM_CPU(obj);
895 
896     /* At this time, this property is only allowed if KVM is enabled.  This
897      * restriction allows us to avoid fixing up functionality that assumes a
898      * uniform execution state like do_interrupt.
899      */
900     if (value == false) {
901         if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
902             error_setg(errp, "'aarch64' feature cannot be disabled "
903                              "unless KVM is enabled and 32-bit EL1 "
904                              "is supported");
905             return;
906         }
907         unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
908     } else {
909         set_feature(&cpu->env, ARM_FEATURE_AARCH64);
910     }
911 }
912 
913 static void aarch64_cpu_finalizefn(Object *obj)
914 {
915 }
916 
917 static gchar *aarch64_gdb_arch_name(CPUState *cs)
918 {
919     return g_strdup("aarch64");
920 }
921 
922 static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
923 {
924     CPUClass *cc = CPU_CLASS(oc);
925 
926     cc->gdb_read_register = aarch64_cpu_gdb_read_register;
927     cc->gdb_write_register = aarch64_cpu_gdb_write_register;
928     cc->gdb_num_core_regs = 34;
929     cc->gdb_core_xml_file = "aarch64-core.xml";
930     cc->gdb_arch_name = aarch64_gdb_arch_name;
931 
932     object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
933                                    aarch64_cpu_set_aarch64);
934     object_class_property_set_description(oc, "aarch64",
935                                           "Set on/off to enable/disable aarch64 "
936                                           "execution state ");
937 }
938 
939 static void aarch64_cpu_instance_init(Object *obj)
940 {
941     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
942 
943     acc->info->initfn(obj);
944     arm_cpu_post_init(obj);
945 }
946 
947 static void cpu_register_class_init(ObjectClass *oc, void *data)
948 {
949     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
950 
951     acc->info = data;
952 }
953 
954 void aarch64_cpu_register(const ARMCPUInfo *info)
955 {
956     TypeInfo type_info = {
957         .parent = TYPE_AARCH64_CPU,
958         .instance_size = sizeof(ARMCPU),
959         .instance_init = aarch64_cpu_instance_init,
960         .class_size = sizeof(ARMCPUClass),
961         .class_init = info->class_init ?: cpu_register_class_init,
962         .class_data = (void *)info,
963     };
964 
965     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
966     type_register(&type_info);
967     g_free((void *)type_info.name);
968 }
969 
970 static const TypeInfo aarch64_cpu_type_info = {
971     .name = TYPE_AARCH64_CPU,
972     .parent = TYPE_ARM_CPU,
973     .instance_size = sizeof(ARMCPU),
974     .instance_finalize = aarch64_cpu_finalizefn,
975     .abstract = true,
976     .class_size = sizeof(AArch64CPUClass),
977     .class_init = aarch64_cpu_class_init,
978 };
979 
980 static void aarch64_cpu_register_types(void)
981 {
982     size_t i;
983 
984     type_register_static(&aarch64_cpu_type_info);
985 
986     for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
987         aarch64_cpu_register(&aarch64_cpus[i]);
988     }
989 }
990 
991 type_init(aarch64_cpu_register_types)
992