xref: /openbmc/qemu/target/arm/cpu64.c (revision cf7c6d1004eaaae85fd6156556e2f38ff493ef48)
1 /*
2  * QEMU AArch64 CPU
3  *
4  * Copyright (c) 2013 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #ifdef CONFIG_TCG
25 #include "hw/core/tcg-cpu-ops.h"
26 #endif /* CONFIG_TCG */
27 #include "qemu/module.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/loader.h"
30 #endif
31 #include "sysemu/kvm.h"
32 #include "sysemu/hvf.h"
33 #include "kvm_arm.h"
34 #include "hvf_arm.h"
35 #include "qapi/visitor.h"
36 #include "hw/qdev-properties.h"
37 #include "cpregs.h"
38 
39 
40 #ifndef CONFIG_USER_ONLY
41 static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
42 {
43     ARMCPU *cpu = env_archcpu(env);
44 
45     /* Number of cores is in [25:24]; otherwise we RAZ */
46     return (cpu->core_count - 1) << 24;
47 }
48 #endif
49 
50 static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
51 #ifndef CONFIG_USER_ONLY
52     { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
53       .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
54       .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
55       .writefn = arm_cp_write_ignore },
56     { .name = "L2CTLR",
57       .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
58       .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
59       .writefn = arm_cp_write_ignore },
60 #endif
61     { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
62       .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
63       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
64     { .name = "L2ECTLR",
65       .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
66       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
67     { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
68       .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
69       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
70     { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
71       .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
72       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
73     { .name = "CPUACTLR",
74       .cp = 15, .opc1 = 0, .crm = 15,
75       .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
76     { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
77       .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
78       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
79     { .name = "CPUECTLR",
80       .cp = 15, .opc1 = 1, .crm = 15,
81       .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
82     { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
83       .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
84       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
85     { .name = "CPUMERRSR",
86       .cp = 15, .opc1 = 2, .crm = 15,
87       .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
88     { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
89       .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
90       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
91     { .name = "L2MERRSR",
92       .cp = 15, .opc1 = 3, .crm = 15,
93       .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
94     REGINFO_SENTINEL
95 };
96 
97 static void aarch64_a57_initfn(Object *obj)
98 {
99     ARMCPU *cpu = ARM_CPU(obj);
100 
101     cpu->dtb_compatible = "arm,cortex-a57";
102     set_feature(&cpu->env, ARM_FEATURE_V8);
103     set_feature(&cpu->env, ARM_FEATURE_NEON);
104     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
105     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
106     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
107     set_feature(&cpu->env, ARM_FEATURE_EL2);
108     set_feature(&cpu->env, ARM_FEATURE_EL3);
109     set_feature(&cpu->env, ARM_FEATURE_PMU);
110     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
111     cpu->midr = 0x411fd070;
112     cpu->revidr = 0x00000000;
113     cpu->reset_fpsid = 0x41034070;
114     cpu->isar.mvfr0 = 0x10110222;
115     cpu->isar.mvfr1 = 0x12111111;
116     cpu->isar.mvfr2 = 0x00000043;
117     cpu->ctr = 0x8444c004;
118     cpu->reset_sctlr = 0x00c50838;
119     cpu->isar.id_pfr0 = 0x00000131;
120     cpu->isar.id_pfr1 = 0x00011011;
121     cpu->isar.id_dfr0 = 0x03010066;
122     cpu->id_afr0 = 0x00000000;
123     cpu->isar.id_mmfr0 = 0x10101105;
124     cpu->isar.id_mmfr1 = 0x40000000;
125     cpu->isar.id_mmfr2 = 0x01260000;
126     cpu->isar.id_mmfr3 = 0x02102211;
127     cpu->isar.id_isar0 = 0x02101110;
128     cpu->isar.id_isar1 = 0x13112111;
129     cpu->isar.id_isar2 = 0x21232042;
130     cpu->isar.id_isar3 = 0x01112131;
131     cpu->isar.id_isar4 = 0x00011142;
132     cpu->isar.id_isar5 = 0x00011121;
133     cpu->isar.id_isar6 = 0;
134     cpu->isar.id_aa64pfr0 = 0x00002222;
135     cpu->isar.id_aa64dfr0 = 0x10305106;
136     cpu->isar.id_aa64isar0 = 0x00011120;
137     cpu->isar.id_aa64mmfr0 = 0x00001124;
138     cpu->isar.dbgdidr = 0x3516d000;
139     cpu->clidr = 0x0a200023;
140     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
141     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
142     cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
143     cpu->dcz_blocksize = 4; /* 64 bytes */
144     cpu->gic_num_lrs = 4;
145     cpu->gic_vpribits = 5;
146     cpu->gic_vprebits = 5;
147     define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
148 }
149 
150 static void aarch64_a53_initfn(Object *obj)
151 {
152     ARMCPU *cpu = ARM_CPU(obj);
153 
154     cpu->dtb_compatible = "arm,cortex-a53";
155     set_feature(&cpu->env, ARM_FEATURE_V8);
156     set_feature(&cpu->env, ARM_FEATURE_NEON);
157     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
158     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
159     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
160     set_feature(&cpu->env, ARM_FEATURE_EL2);
161     set_feature(&cpu->env, ARM_FEATURE_EL3);
162     set_feature(&cpu->env, ARM_FEATURE_PMU);
163     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
164     cpu->midr = 0x410fd034;
165     cpu->revidr = 0x00000000;
166     cpu->reset_fpsid = 0x41034070;
167     cpu->isar.mvfr0 = 0x10110222;
168     cpu->isar.mvfr1 = 0x12111111;
169     cpu->isar.mvfr2 = 0x00000043;
170     cpu->ctr = 0x84448004; /* L1Ip = VIPT */
171     cpu->reset_sctlr = 0x00c50838;
172     cpu->isar.id_pfr0 = 0x00000131;
173     cpu->isar.id_pfr1 = 0x00011011;
174     cpu->isar.id_dfr0 = 0x03010066;
175     cpu->id_afr0 = 0x00000000;
176     cpu->isar.id_mmfr0 = 0x10101105;
177     cpu->isar.id_mmfr1 = 0x40000000;
178     cpu->isar.id_mmfr2 = 0x01260000;
179     cpu->isar.id_mmfr3 = 0x02102211;
180     cpu->isar.id_isar0 = 0x02101110;
181     cpu->isar.id_isar1 = 0x13112111;
182     cpu->isar.id_isar2 = 0x21232042;
183     cpu->isar.id_isar3 = 0x01112131;
184     cpu->isar.id_isar4 = 0x00011142;
185     cpu->isar.id_isar5 = 0x00011121;
186     cpu->isar.id_isar6 = 0;
187     cpu->isar.id_aa64pfr0 = 0x00002222;
188     cpu->isar.id_aa64dfr0 = 0x10305106;
189     cpu->isar.id_aa64isar0 = 0x00011120;
190     cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
191     cpu->isar.dbgdidr = 0x3516d000;
192     cpu->clidr = 0x0a200023;
193     cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
194     cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
195     cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
196     cpu->dcz_blocksize = 4; /* 64 bytes */
197     cpu->gic_num_lrs = 4;
198     cpu->gic_vpribits = 5;
199     cpu->gic_vprebits = 5;
200     define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
201 }
202 
203 static void aarch64_a72_initfn(Object *obj)
204 {
205     ARMCPU *cpu = ARM_CPU(obj);
206 
207     cpu->dtb_compatible = "arm,cortex-a72";
208     set_feature(&cpu->env, ARM_FEATURE_V8);
209     set_feature(&cpu->env, ARM_FEATURE_NEON);
210     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
211     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
212     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
213     set_feature(&cpu->env, ARM_FEATURE_EL2);
214     set_feature(&cpu->env, ARM_FEATURE_EL3);
215     set_feature(&cpu->env, ARM_FEATURE_PMU);
216     cpu->midr = 0x410fd083;
217     cpu->revidr = 0x00000000;
218     cpu->reset_fpsid = 0x41034080;
219     cpu->isar.mvfr0 = 0x10110222;
220     cpu->isar.mvfr1 = 0x12111111;
221     cpu->isar.mvfr2 = 0x00000043;
222     cpu->ctr = 0x8444c004;
223     cpu->reset_sctlr = 0x00c50838;
224     cpu->isar.id_pfr0 = 0x00000131;
225     cpu->isar.id_pfr1 = 0x00011011;
226     cpu->isar.id_dfr0 = 0x03010066;
227     cpu->id_afr0 = 0x00000000;
228     cpu->isar.id_mmfr0 = 0x10201105;
229     cpu->isar.id_mmfr1 = 0x40000000;
230     cpu->isar.id_mmfr2 = 0x01260000;
231     cpu->isar.id_mmfr3 = 0x02102211;
232     cpu->isar.id_isar0 = 0x02101110;
233     cpu->isar.id_isar1 = 0x13112111;
234     cpu->isar.id_isar2 = 0x21232042;
235     cpu->isar.id_isar3 = 0x01112131;
236     cpu->isar.id_isar4 = 0x00011142;
237     cpu->isar.id_isar5 = 0x00011121;
238     cpu->isar.id_aa64pfr0 = 0x00002222;
239     cpu->isar.id_aa64dfr0 = 0x10305106;
240     cpu->isar.id_aa64isar0 = 0x00011120;
241     cpu->isar.id_aa64mmfr0 = 0x00001124;
242     cpu->isar.dbgdidr = 0x3516d000;
243     cpu->clidr = 0x0a200023;
244     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
245     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
246     cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
247     cpu->dcz_blocksize = 4; /* 64 bytes */
248     cpu->gic_num_lrs = 4;
249     cpu->gic_vpribits = 5;
250     cpu->gic_vprebits = 5;
251     define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
252 }
253 
254 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
255 {
256     /*
257      * If any vector lengths are explicitly enabled with sve<N> properties,
258      * then all other lengths are implicitly disabled.  If sve-max-vq is
259      * specified then it is the same as explicitly enabling all lengths
260      * up to and including the specified maximum, which means all larger
261      * lengths will be implicitly disabled.  If no sve<N> properties
262      * are enabled and sve-max-vq is not specified, then all lengths not
263      * explicitly disabled will be enabled.  Additionally, all power-of-two
264      * vector lengths less than the maximum enabled length will be
265      * automatically enabled and all vector lengths larger than the largest
266      * disabled power-of-two vector length will be automatically disabled.
267      * Errors are generated if the user provided input that interferes with
268      * any of the above.  Finally, if SVE is not disabled, then at least one
269      * vector length must be enabled.
270      */
271     DECLARE_BITMAP(tmp, ARM_MAX_VQ);
272     uint32_t vq, max_vq = 0;
273 
274     /*
275      * CPU models specify a set of supported vector lengths which are
276      * enabled by default.  Attempting to enable any vector length not set
277      * in the supported bitmap results in an error.  When KVM is enabled we
278      * fetch the supported bitmap from the host.
279      */
280     if (kvm_enabled() && kvm_arm_sve_supported()) {
281         kvm_arm_sve_get_vls(CPU(cpu), cpu->sve_vq_supported);
282     } else if (kvm_enabled()) {
283         assert(!cpu_isar_feature(aa64_sve, cpu));
284     }
285 
286     /*
287      * Process explicit sve<N> properties.
288      * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
289      * Check first for any sve<N> enabled.
290      */
291     if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
292         max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
293 
294         if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
295             error_setg(errp, "cannot enable sve%d", max_vq * 128);
296             error_append_hint(errp, "sve%d is larger than the maximum vector "
297                               "length, sve-max-vq=%d (%d bits)\n",
298                               max_vq * 128, cpu->sve_max_vq,
299                               cpu->sve_max_vq * 128);
300             return;
301         }
302 
303         if (kvm_enabled()) {
304             /*
305              * For KVM we have to automatically enable all supported unitialized
306              * lengths, even when the smaller lengths are not all powers-of-two.
307              */
308             bitmap_andnot(tmp, cpu->sve_vq_supported, cpu->sve_vq_init, max_vq);
309             bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
310         } else {
311             /* Propagate enabled bits down through required powers-of-two. */
312             for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
313                 if (!test_bit(vq - 1, cpu->sve_vq_init)) {
314                     set_bit(vq - 1, cpu->sve_vq_map);
315                 }
316             }
317         }
318     } else if (cpu->sve_max_vq == 0) {
319         /*
320          * No explicit bits enabled, and no implicit bits from sve-max-vq.
321          */
322         if (!cpu_isar_feature(aa64_sve, cpu)) {
323             /* SVE is disabled and so are all vector lengths.  Good. */
324             return;
325         }
326 
327         if (kvm_enabled()) {
328             /* Disabling a supported length disables all larger lengths. */
329             for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
330                 if (test_bit(vq - 1, cpu->sve_vq_init) &&
331                     test_bit(vq - 1, cpu->sve_vq_supported)) {
332                     break;
333                 }
334             }
335         } else {
336             /* Disabling a power-of-two disables all larger lengths. */
337             for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
338                 if (test_bit(vq - 1, cpu->sve_vq_init)) {
339                     break;
340                 }
341             }
342         }
343 
344         max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
345         bitmap_andnot(cpu->sve_vq_map, cpu->sve_vq_supported,
346                       cpu->sve_vq_init, max_vq);
347         if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
348             error_setg(errp, "cannot disable sve%d", vq * 128);
349             error_append_hint(errp, "Disabling sve%d results in all "
350                               "vector lengths being disabled.\n",
351                               vq * 128);
352             error_append_hint(errp, "With SVE enabled, at least one "
353                               "vector length must be enabled.\n");
354             return;
355         }
356 
357         max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
358     }
359 
360     /*
361      * Process the sve-max-vq property.
362      * Note that we know from the above that no bit above
363      * sve-max-vq is currently set.
364      */
365     if (cpu->sve_max_vq != 0) {
366         max_vq = cpu->sve_max_vq;
367 
368         if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
369             test_bit(max_vq - 1, cpu->sve_vq_init)) {
370             error_setg(errp, "cannot disable sve%d", max_vq * 128);
371             error_append_hint(errp, "The maximum vector length must be "
372                               "enabled, sve-max-vq=%d (%d bits)\n",
373                               max_vq, max_vq * 128);
374             return;
375         }
376 
377         /* Set all bits not explicitly set within sve-max-vq. */
378         bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
379         bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
380     }
381 
382     /*
383      * We should know what max-vq is now.  Also, as we're done
384      * manipulating sve-vq-map, we ensure any bits above max-vq
385      * are clear, just in case anybody looks.
386      */
387     assert(max_vq != 0);
388     bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
389 
390     /* Ensure the set of lengths matches what is supported. */
391     bitmap_xor(tmp, cpu->sve_vq_map, cpu->sve_vq_supported, max_vq);
392     if (!bitmap_empty(tmp, max_vq)) {
393         vq = find_last_bit(tmp, max_vq) + 1;
394         if (test_bit(vq - 1, cpu->sve_vq_map)) {
395             if (cpu->sve_max_vq) {
396                 error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
397                 error_append_hint(errp, "This CPU does not support "
398                                   "the vector length %d-bits.\n", vq * 128);
399                 error_append_hint(errp, "It may not be possible to use "
400                                   "sve-max-vq with this CPU. Try "
401                                   "using only sve<N> properties.\n");
402             } else {
403                 error_setg(errp, "cannot enable sve%d", vq * 128);
404                 error_append_hint(errp, "This CPU does not support "
405                                   "the vector length %d-bits.\n", vq * 128);
406             }
407             return;
408         } else {
409             if (kvm_enabled()) {
410                 error_setg(errp, "cannot disable sve%d", vq * 128);
411                 error_append_hint(errp, "The KVM host requires all "
412                                   "supported vector lengths smaller "
413                                   "than %d bits to also be enabled.\n",
414                                   max_vq * 128);
415                 return;
416             } else {
417                 /* Ensure all required powers-of-two are enabled. */
418                 for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
419                     if (!test_bit(vq - 1, cpu->sve_vq_map)) {
420                         error_setg(errp, "cannot disable sve%d", vq * 128);
421                         error_append_hint(errp, "sve%d is required as it "
422                                           "is a power-of-two length smaller "
423                                           "than the maximum, sve%d\n",
424                                           vq * 128, max_vq * 128);
425                         return;
426                     }
427                 }
428             }
429         }
430     }
431 
432     /*
433      * Now that we validated all our vector lengths, the only question
434      * left to answer is if we even want SVE at all.
435      */
436     if (!cpu_isar_feature(aa64_sve, cpu)) {
437         error_setg(errp, "cannot enable sve%d", max_vq * 128);
438         error_append_hint(errp, "SVE must be enabled to enable vector "
439                           "lengths.\n");
440         error_append_hint(errp, "Add sve=on to the CPU property list.\n");
441         return;
442     }
443 
444     /* From now on sve_max_vq is the actual maximum supported length. */
445     cpu->sve_max_vq = max_vq;
446 }
447 
448 static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
449                                    void *opaque, Error **errp)
450 {
451     ARMCPU *cpu = ARM_CPU(obj);
452     uint32_t value;
453 
454     /* All vector lengths are disabled when SVE is off. */
455     if (!cpu_isar_feature(aa64_sve, cpu)) {
456         value = 0;
457     } else {
458         value = cpu->sve_max_vq;
459     }
460     visit_type_uint32(v, name, &value, errp);
461 }
462 
463 static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
464                                    void *opaque, Error **errp)
465 {
466     ARMCPU *cpu = ARM_CPU(obj);
467     uint32_t max_vq;
468 
469     if (!visit_type_uint32(v, name, &max_vq, errp)) {
470         return;
471     }
472 
473     if (kvm_enabled() && !kvm_arm_sve_supported()) {
474         error_setg(errp, "cannot set sve-max-vq");
475         error_append_hint(errp, "SVE not supported by KVM on this host\n");
476         return;
477     }
478 
479     if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
480         error_setg(errp, "unsupported SVE vector length");
481         error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
482                           ARM_MAX_VQ);
483         return;
484     }
485 
486     cpu->sve_max_vq = max_vq;
487 }
488 
489 /*
490  * Note that cpu_arm_get/set_sve_vq cannot use the simpler
491  * object_property_add_bool interface because they make use
492  * of the contents of "name" to determine which bit on which
493  * to operate.
494  */
495 static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
496                                void *opaque, Error **errp)
497 {
498     ARMCPU *cpu = ARM_CPU(obj);
499     uint32_t vq = atoi(&name[3]) / 128;
500     bool value;
501 
502     /* All vector lengths are disabled when SVE is off. */
503     if (!cpu_isar_feature(aa64_sve, cpu)) {
504         value = false;
505     } else {
506         value = test_bit(vq - 1, cpu->sve_vq_map);
507     }
508     visit_type_bool(v, name, &value, errp);
509 }
510 
511 static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
512                                void *opaque, Error **errp)
513 {
514     ARMCPU *cpu = ARM_CPU(obj);
515     uint32_t vq = atoi(&name[3]) / 128;
516     bool value;
517 
518     if (!visit_type_bool(v, name, &value, errp)) {
519         return;
520     }
521 
522     if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
523         error_setg(errp, "cannot enable %s", name);
524         error_append_hint(errp, "SVE not supported by KVM on this host\n");
525         return;
526     }
527 
528     if (value) {
529         set_bit(vq - 1, cpu->sve_vq_map);
530     } else {
531         clear_bit(vq - 1, cpu->sve_vq_map);
532     }
533     set_bit(vq - 1, cpu->sve_vq_init);
534 }
535 
536 static bool cpu_arm_get_sve(Object *obj, Error **errp)
537 {
538     ARMCPU *cpu = ARM_CPU(obj);
539     return cpu_isar_feature(aa64_sve, cpu);
540 }
541 
542 static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
543 {
544     ARMCPU *cpu = ARM_CPU(obj);
545     uint64_t t;
546 
547     if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
548         error_setg(errp, "'sve' feature not supported by KVM on this host");
549         return;
550     }
551 
552     t = cpu->isar.id_aa64pfr0;
553     t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
554     cpu->isar.id_aa64pfr0 = t;
555 }
556 
557 #ifdef CONFIG_USER_ONLY
558 /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
559 static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
560                                             const char *name, void *opaque,
561                                             Error **errp)
562 {
563     ARMCPU *cpu = ARM_CPU(obj);
564     int32_t default_len, default_vq, remainder;
565 
566     if (!visit_type_int32(v, name, &default_len, errp)) {
567         return;
568     }
569 
570     /* Undocumented, but the kernel allows -1 to indicate "maximum". */
571     if (default_len == -1) {
572         cpu->sve_default_vq = ARM_MAX_VQ;
573         return;
574     }
575 
576     default_vq = default_len / 16;
577     remainder = default_len % 16;
578 
579     /*
580      * Note that the 512 max comes from include/uapi/asm/sve_context.h
581      * and is the maximum architectural width of ZCR_ELx.LEN.
582      */
583     if (remainder || default_vq < 1 || default_vq > 512) {
584         error_setg(errp, "cannot set sve-default-vector-length");
585         if (remainder) {
586             error_append_hint(errp, "Vector length not a multiple of 16\n");
587         } else if (default_vq < 1) {
588             error_append_hint(errp, "Vector length smaller than 16\n");
589         } else {
590             error_append_hint(errp, "Vector length larger than %d\n",
591                               512 * 16);
592         }
593         return;
594     }
595 
596     cpu->sve_default_vq = default_vq;
597 }
598 
599 static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
600                                             const char *name, void *opaque,
601                                             Error **errp)
602 {
603     ARMCPU *cpu = ARM_CPU(obj);
604     int32_t value = cpu->sve_default_vq * 16;
605 
606     visit_type_int32(v, name, &value, errp);
607 }
608 #endif
609 
610 void aarch64_add_sve_properties(Object *obj)
611 {
612     uint32_t vq;
613 
614     object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
615 
616     for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
617         char name[8];
618         sprintf(name, "sve%d", vq * 128);
619         object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
620                             cpu_arm_set_sve_vq, NULL, NULL);
621     }
622 
623 #ifdef CONFIG_USER_ONLY
624     /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
625     object_property_add(obj, "sve-default-vector-length", "int32",
626                         cpu_arm_get_sve_default_vec_len,
627                         cpu_arm_set_sve_default_vec_len, NULL, NULL);
628 #endif
629 }
630 
631 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
632 {
633     int arch_val = 0, impdef_val = 0;
634     uint64_t t;
635 
636     /* Exit early if PAuth is enabled, and fall through to disable it */
637     if ((kvm_enabled() || hvf_enabled()) && cpu->prop_pauth) {
638         if (!cpu_isar_feature(aa64_pauth, cpu)) {
639             error_setg(errp, "'pauth' feature not supported by %s on this host",
640                        kvm_enabled() ? "KVM" : "hvf");
641         }
642 
643         return;
644     }
645 
646     /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
647     if (cpu->prop_pauth) {
648         if (cpu->prop_pauth_impdef) {
649             impdef_val = 1;
650         } else {
651             arch_val = 1;
652         }
653     } else if (cpu->prop_pauth_impdef) {
654         error_setg(errp, "cannot enable pauth-impdef without pauth");
655         error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
656     }
657 
658     t = cpu->isar.id_aa64isar1;
659     t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
660     t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
661     t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
662     t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
663     cpu->isar.id_aa64isar1 = t;
664 }
665 
666 static Property arm_cpu_pauth_property =
667     DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
668 static Property arm_cpu_pauth_impdef_property =
669     DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
670 
671 void aarch64_add_pauth_properties(Object *obj)
672 {
673     ARMCPU *cpu = ARM_CPU(obj);
674 
675     /* Default to PAUTH on, with the architected algorithm on TCG. */
676     qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
677     if (kvm_enabled() || hvf_enabled()) {
678         /*
679          * Mirror PAuth support from the probed sysregs back into the
680          * property for KVM or hvf. Is it just a bit backward? Yes it is!
681          * Note that prop_pauth is true whether the host CPU supports the
682          * architected QARMA5 algorithm or the IMPDEF one. We don't
683          * provide the separate pauth-impdef property for KVM or hvf,
684          * only for TCG.
685          */
686         cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu);
687     } else {
688         qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
689     }
690 }
691 
692 static Property arm_cpu_lpa2_property =
693     DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
694 
695 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
696 {
697     uint64_t t;
698 
699     /*
700      * We only install the property for tcg -cpu max; this is the
701      * only situation in which the cpu field can be true.
702      */
703     if (!cpu->prop_lpa2) {
704         return;
705     }
706 
707     t = cpu->isar.id_aa64mmfr0;
708     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2);   /* 16k pages w/ LPA2 */
709     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1);    /*  4k pages w/ LPA2 */
710     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */
711     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3);  /*  4k stage2 w/ LPA2 */
712     cpu->isar.id_aa64mmfr0 = t;
713 }
714 
715 static void aarch64_host_initfn(Object *obj)
716 {
717 #if defined(CONFIG_KVM)
718     ARMCPU *cpu = ARM_CPU(obj);
719     kvm_arm_set_cpu_features_from_host(cpu);
720     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
721         aarch64_add_sve_properties(obj);
722         aarch64_add_pauth_properties(obj);
723     }
724 #elif defined(CONFIG_HVF)
725     ARMCPU *cpu = ARM_CPU(obj);
726     hvf_arm_set_cpu_features_from_host(cpu);
727     aarch64_add_pauth_properties(obj);
728 #else
729     g_assert_not_reached();
730 #endif
731 }
732 
733 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
734  * otherwise, a CPU with as many features enabled as our emulation supports.
735  * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
736  * this only needs to handle 64 bits.
737  */
738 static void aarch64_max_initfn(Object *obj)
739 {
740     ARMCPU *cpu = ARM_CPU(obj);
741     uint64_t t;
742     uint32_t u;
743 
744     if (kvm_enabled() || hvf_enabled()) {
745         /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
746         aarch64_host_initfn(obj);
747         return;
748     }
749 
750     /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */
751 
752     aarch64_a57_initfn(obj);
753 
754     /*
755      * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
756      * one and try to apply errata workarounds or use impdef features we
757      * don't provide.
758      * An IMPLEMENTER field of 0 means "reserved for software use";
759      * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
760      * to see which features are present";
761      * the VARIANT, PARTNUM and REVISION fields are all implementation
762      * defined and we choose to define PARTNUM just in case guest
763      * code needs to distinguish this QEMU CPU from other software
764      * implementations, though this shouldn't be needed.
765      */
766     t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
767     t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
768     t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
769     t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
770     t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
771     cpu->midr = t;
772 
773     t = cpu->isar.id_aa64isar0;
774     t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
775     t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
776     t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
777     t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
778     t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
779     t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
780     t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
781     t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
782     t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
783     t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
784     t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
785     t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
786     t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
787     t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
788     cpu->isar.id_aa64isar0 = t;
789 
790     t = cpu->isar.id_aa64isar1;
791     t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);
792     t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
793     t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
794     t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
795     t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
796     t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
797     t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
798     t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
799     t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
800     cpu->isar.id_aa64isar1 = t;
801 
802     t = cpu->isar.id_aa64pfr0;
803     t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
804     t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
805     t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
806     t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
807     t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);
808     cpu->isar.id_aa64pfr0 = t;
809 
810     t = cpu->isar.id_aa64pfr1;
811     t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
812     t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);
813     /*
814      * Begin with full support for MTE. This will be downgraded to MTE=0
815      * during realize if the board provides no tag memory, much like
816      * we do for EL2 with the virtualization=on property.
817      */
818     t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
819     cpu->isar.id_aa64pfr1 = t;
820 
821     t = cpu->isar.id_aa64mmfr0;
822     t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
823     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1);   /* 16k pages supported */
824     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
825     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */
826     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2);  /*  4k stage2 supported */
827     cpu->isar.id_aa64mmfr0 = t;
828 
829     t = cpu->isar.id_aa64mmfr1;
830     t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
831     t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
832     t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
833     t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
834     t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
835     t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
836     cpu->isar.id_aa64mmfr1 = t;
837 
838     t = cpu->isar.id_aa64mmfr2;
839     t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
840     t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */
841     t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
842     t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
843     t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
844     t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
845     cpu->isar.id_aa64mmfr2 = t;
846 
847     t = cpu->isar.id_aa64zfr0;
848     t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
849     t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2);  /* PMULL */
850     t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
851     t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
852     t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
853     t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
854     t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
855     t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);
856     t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
857     cpu->isar.id_aa64zfr0 = t;
858 
859     /* Replicate the same data to the 32-bit id registers.  */
860     u = cpu->isar.id_isar5;
861     u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
862     u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
863     u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
864     u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
865     u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
866     u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
867     cpu->isar.id_isar5 = u;
868 
869     u = cpu->isar.id_isar6;
870     u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
871     u = FIELD_DP32(u, ID_ISAR6, DP, 1);
872     u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
873     u = FIELD_DP32(u, ID_ISAR6, SB, 1);
874     u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
875     u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
876     u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
877     cpu->isar.id_isar6 = u;
878 
879     u = cpu->isar.id_pfr0;
880     u = FIELD_DP32(u, ID_PFR0, DIT, 1);
881     cpu->isar.id_pfr0 = u;
882 
883     u = cpu->isar.id_pfr2;
884     u = FIELD_DP32(u, ID_PFR2, SSBS, 1);
885     cpu->isar.id_pfr2 = u;
886 
887     u = cpu->isar.id_mmfr3;
888     u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
889     cpu->isar.id_mmfr3 = u;
890 
891     u = cpu->isar.id_mmfr4;
892     u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
893     u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
894     u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
895     u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
896     cpu->isar.id_mmfr4 = u;
897 
898     t = cpu->isar.id_aa64dfr0;
899     t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
900     cpu->isar.id_aa64dfr0 = t;
901 
902     u = cpu->isar.id_dfr0;
903     u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
904     cpu->isar.id_dfr0 = u;
905 
906     u = cpu->isar.mvfr1;
907     u = FIELD_DP32(u, MVFR1, FPHP, 3);      /* v8.2-FP16 */
908     u = FIELD_DP32(u, MVFR1, SIMDHP, 2);    /* v8.2-FP16 */
909     cpu->isar.mvfr1 = u;
910 
911 #ifdef CONFIG_USER_ONLY
912     /*
913      * For usermode -cpu max we can use a larger and more efficient DCZ
914      * blocksize since we don't have to follow what the hardware does.
915      */
916     cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
917     cpu->dcz_blocksize = 7; /*  512 bytes */
918 #endif
919 
920     bitmap_fill(cpu->sve_vq_supported, ARM_MAX_VQ);
921 
922     aarch64_add_pauth_properties(obj);
923     aarch64_add_sve_properties(obj);
924     object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
925                         cpu_max_set_sve_max_vq, NULL, NULL);
926     qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
927 }
928 
929 static void aarch64_a64fx_initfn(Object *obj)
930 {
931     ARMCPU *cpu = ARM_CPU(obj);
932 
933     cpu->dtb_compatible = "arm,a64fx";
934     set_feature(&cpu->env, ARM_FEATURE_V8);
935     set_feature(&cpu->env, ARM_FEATURE_NEON);
936     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
937     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
938     set_feature(&cpu->env, ARM_FEATURE_EL2);
939     set_feature(&cpu->env, ARM_FEATURE_EL3);
940     set_feature(&cpu->env, ARM_FEATURE_PMU);
941     cpu->midr = 0x461f0010;
942     cpu->revidr = 0x00000000;
943     cpu->ctr = 0x86668006;
944     cpu->reset_sctlr = 0x30000180;
945     cpu->isar.id_aa64pfr0 =   0x0000000101111111; /* No RAS Extensions */
946     cpu->isar.id_aa64pfr1 = 0x0000000000000000;
947     cpu->isar.id_aa64dfr0 = 0x0000000010305408;
948     cpu->isar.id_aa64dfr1 = 0x0000000000000000;
949     cpu->id_aa64afr0 = 0x0000000000000000;
950     cpu->id_aa64afr1 = 0x0000000000000000;
951     cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
952     cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
953     cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
954     cpu->isar.id_aa64isar0 = 0x0000000010211120;
955     cpu->isar.id_aa64isar1 = 0x0000000000010001;
956     cpu->isar.id_aa64zfr0 = 0x0000000000000000;
957     cpu->clidr = 0x0000000080000023;
958     cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */
959     cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */
960     cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */
961     cpu->dcz_blocksize = 6; /* 256 bytes */
962     cpu->gic_num_lrs = 4;
963     cpu->gic_vpribits = 5;
964     cpu->gic_vprebits = 5;
965 
966     /* Suppport of A64FX's vector length are 128,256 and 512bit only */
967     aarch64_add_sve_properties(obj);
968     bitmap_zero(cpu->sve_vq_supported, ARM_MAX_VQ);
969     set_bit(0, cpu->sve_vq_supported); /* 128bit */
970     set_bit(1, cpu->sve_vq_supported); /* 256bit */
971     set_bit(3, cpu->sve_vq_supported); /* 512bit */
972 
973     /* TODO:  Add A64FX specific HPC extension registers */
974 }
975 
976 static const ARMCPUInfo aarch64_cpus[] = {
977     { .name = "cortex-a57",         .initfn = aarch64_a57_initfn },
978     { .name = "cortex-a53",         .initfn = aarch64_a53_initfn },
979     { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
980     { .name = "a64fx",              .initfn = aarch64_a64fx_initfn },
981     { .name = "max",                .initfn = aarch64_max_initfn },
982 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
983     { .name = "host",               .initfn = aarch64_host_initfn },
984 #endif
985 };
986 
987 static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
988 {
989     ARMCPU *cpu = ARM_CPU(obj);
990 
991     return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
992 }
993 
994 static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
995 {
996     ARMCPU *cpu = ARM_CPU(obj);
997 
998     /* At this time, this property is only allowed if KVM is enabled.  This
999      * restriction allows us to avoid fixing up functionality that assumes a
1000      * uniform execution state like do_interrupt.
1001      */
1002     if (value == false) {
1003         if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
1004             error_setg(errp, "'aarch64' feature cannot be disabled "
1005                              "unless KVM is enabled and 32-bit EL1 "
1006                              "is supported");
1007             return;
1008         }
1009         unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
1010     } else {
1011         set_feature(&cpu->env, ARM_FEATURE_AARCH64);
1012     }
1013 }
1014 
1015 static void aarch64_cpu_finalizefn(Object *obj)
1016 {
1017 }
1018 
1019 static gchar *aarch64_gdb_arch_name(CPUState *cs)
1020 {
1021     return g_strdup("aarch64");
1022 }
1023 
1024 static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
1025 {
1026     CPUClass *cc = CPU_CLASS(oc);
1027 
1028     cc->gdb_read_register = aarch64_cpu_gdb_read_register;
1029     cc->gdb_write_register = aarch64_cpu_gdb_write_register;
1030     cc->gdb_num_core_regs = 34;
1031     cc->gdb_core_xml_file = "aarch64-core.xml";
1032     cc->gdb_arch_name = aarch64_gdb_arch_name;
1033 
1034     object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
1035                                    aarch64_cpu_set_aarch64);
1036     object_class_property_set_description(oc, "aarch64",
1037                                           "Set on/off to enable/disable aarch64 "
1038                                           "execution state ");
1039 }
1040 
1041 static void aarch64_cpu_instance_init(Object *obj)
1042 {
1043     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
1044 
1045     acc->info->initfn(obj);
1046     arm_cpu_post_init(obj);
1047 }
1048 
1049 static void cpu_register_class_init(ObjectClass *oc, void *data)
1050 {
1051     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
1052 
1053     acc->info = data;
1054 }
1055 
1056 void aarch64_cpu_register(const ARMCPUInfo *info)
1057 {
1058     TypeInfo type_info = {
1059         .parent = TYPE_AARCH64_CPU,
1060         .instance_size = sizeof(ARMCPU),
1061         .instance_init = aarch64_cpu_instance_init,
1062         .class_size = sizeof(ARMCPUClass),
1063         .class_init = info->class_init ?: cpu_register_class_init,
1064         .class_data = (void *)info,
1065     };
1066 
1067     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
1068     type_register(&type_info);
1069     g_free((void *)type_info.name);
1070 }
1071 
1072 static const TypeInfo aarch64_cpu_type_info = {
1073     .name = TYPE_AARCH64_CPU,
1074     .parent = TYPE_ARM_CPU,
1075     .instance_size = sizeof(ARMCPU),
1076     .instance_finalize = aarch64_cpu_finalizefn,
1077     .abstract = true,
1078     .class_size = sizeof(AArch64CPUClass),
1079     .class_init = aarch64_cpu_class_init,
1080 };
1081 
1082 static void aarch64_cpu_register_types(void)
1083 {
1084     size_t i;
1085 
1086     type_register_static(&aarch64_cpu_type_info);
1087 
1088     for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
1089         aarch64_cpu_register(&aarch64_cpus[i]);
1090     }
1091 }
1092 
1093 type_init(aarch64_cpu_register_types)
1094