xref: /openbmc/qemu/target/arm/cpu.c (revision 701bff24)
1 /*
2  * QEMU ARM CPU
3  *
4  * Copyright (c) 2012 SUSE LINUX Products GmbH
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/qemu-print.h"
23 #include "qemu/timer.h"
24 #include "qemu/log.h"
25 #include "exec/page-vary.h"
26 #include "target/arm/idau.h"
27 #include "qemu/module.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "cpu.h"
31 #ifdef CONFIG_TCG
32 #include "hw/core/tcg-cpu-ops.h"
33 #endif /* CONFIG_TCG */
34 #include "internals.h"
35 #include "exec/exec-all.h"
36 #include "hw/qdev-properties.h"
37 #if !defined(CONFIG_USER_ONLY)
38 #include "hw/loader.h"
39 #include "hw/boards.h"
40 #endif
41 #include "sysemu/tcg.h"
42 #include "sysemu/qtest.h"
43 #include "sysemu/hw_accel.h"
44 #include "kvm_arm.h"
45 #include "disas/capstone.h"
46 #include "fpu/softfloat.h"
47 #include "cpregs.h"
48 
49 static void arm_cpu_set_pc(CPUState *cs, vaddr value)
50 {
51     ARMCPU *cpu = ARM_CPU(cs);
52     CPUARMState *env = &cpu->env;
53 
54     if (is_a64(env)) {
55         env->pc = value;
56         env->thumb = false;
57     } else {
58         env->regs[15] = value & ~1;
59         env->thumb = value & 1;
60     }
61 }
62 
63 static vaddr arm_cpu_get_pc(CPUState *cs)
64 {
65     ARMCPU *cpu = ARM_CPU(cs);
66     CPUARMState *env = &cpu->env;
67 
68     if (is_a64(env)) {
69         return env->pc;
70     } else {
71         return env->regs[15];
72     }
73 }
74 
75 #ifdef CONFIG_TCG
76 void arm_cpu_synchronize_from_tb(CPUState *cs,
77                                  const TranslationBlock *tb)
78 {
79     /* The program counter is always up to date with TARGET_TB_PCREL. */
80     if (!TARGET_TB_PCREL) {
81         CPUARMState *env = cs->env_ptr;
82         /*
83          * It's OK to look at env for the current mode here, because it's
84          * never possible for an AArch64 TB to chain to an AArch32 TB.
85          */
86         if (is_a64(env)) {
87             env->pc = tb_pc(tb);
88         } else {
89             env->regs[15] = tb_pc(tb);
90         }
91     }
92 }
93 #endif /* CONFIG_TCG */
94 
95 static bool arm_cpu_has_work(CPUState *cs)
96 {
97     ARMCPU *cpu = ARM_CPU(cs);
98 
99     return (cpu->power_state != PSCI_OFF)
100         && cs->interrupt_request &
101         (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
102          | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
103          | CPU_INTERRUPT_EXITTB);
104 }
105 
106 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
107                                  void *opaque)
108 {
109     ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
110 
111     entry->hook = hook;
112     entry->opaque = opaque;
113 
114     QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
115 }
116 
117 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
118                                  void *opaque)
119 {
120     ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
121 
122     entry->hook = hook;
123     entry->opaque = opaque;
124 
125     QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
126 }
127 
128 static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
129 {
130     /* Reset a single ARMCPRegInfo register */
131     ARMCPRegInfo *ri = value;
132     ARMCPU *cpu = opaque;
133 
134     if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS)) {
135         return;
136     }
137 
138     if (ri->resetfn) {
139         ri->resetfn(&cpu->env, ri);
140         return;
141     }
142 
143     /* A zero offset is never possible as it would be regs[0]
144      * so we use it to indicate that reset is being handled elsewhere.
145      * This is basically only used for fields in non-core coprocessors
146      * (like the pxa2xx ones).
147      */
148     if (!ri->fieldoffset) {
149         return;
150     }
151 
152     if (cpreg_field_is_64bit(ri)) {
153         CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
154     } else {
155         CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
156     }
157 }
158 
159 static void cp_reg_check_reset(gpointer key, gpointer value,  gpointer opaque)
160 {
161     /* Purely an assertion check: we've already done reset once,
162      * so now check that running the reset for the cpreg doesn't
163      * change its value. This traps bugs where two different cpregs
164      * both try to reset the same state field but to different values.
165      */
166     ARMCPRegInfo *ri = value;
167     ARMCPU *cpu = opaque;
168     uint64_t oldvalue, newvalue;
169 
170     if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
171         return;
172     }
173 
174     oldvalue = read_raw_cp_reg(&cpu->env, ri);
175     cp_reg_reset(key, value, opaque);
176     newvalue = read_raw_cp_reg(&cpu->env, ri);
177     assert(oldvalue == newvalue);
178 }
179 
180 static void arm_cpu_reset(DeviceState *dev)
181 {
182     CPUState *s = CPU(dev);
183     ARMCPU *cpu = ARM_CPU(s);
184     ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
185     CPUARMState *env = &cpu->env;
186 
187     acc->parent_reset(dev);
188 
189     memset(env, 0, offsetof(CPUARMState, end_reset_fields));
190 
191     g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
192     g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
193 
194     env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
195     env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
196     env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
197     env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
198 
199     cpu->power_state = s->start_powered_off ? PSCI_OFF : PSCI_ON;
200 
201     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
202         env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
203     }
204 
205     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
206         /* 64 bit CPUs always start in 64 bit mode */
207         env->aarch64 = true;
208 #if defined(CONFIG_USER_ONLY)
209         env->pstate = PSTATE_MODE_EL0t;
210         /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
211         env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
212         /* Enable all PAC keys.  */
213         env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
214                                   SCTLR_EnDA | SCTLR_EnDB);
215         /* Trap on btype=3 for PACIxSP. */
216         env->cp15.sctlr_el[1] |= SCTLR_BT0;
217         /* and to the FP/Neon instructions */
218         env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
219                                          CPACR_EL1, FPEN, 3);
220         /* and to the SVE instructions, with default vector length */
221         if (cpu_isar_feature(aa64_sve, cpu)) {
222             env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
223                                              CPACR_EL1, ZEN, 3);
224             env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
225         }
226         /* and for SME instructions, with default vector length, and TPIDR2 */
227         if (cpu_isar_feature(aa64_sme, cpu)) {
228             env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
229             env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
230                                              CPACR_EL1, SMEN, 3);
231             env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
232             if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
233                 env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
234                                                  SMCR, FA64, 1);
235             }
236         }
237         /*
238          * Enable 48-bit address space (TODO: take reserved_va into account).
239          * Enable TBI0 but not TBI1.
240          * Note that this must match useronly_clean_ptr.
241          */
242         env->cp15.tcr_el[1] = 5 | (1ULL << 37);
243 
244         /* Enable MTE */
245         if (cpu_isar_feature(aa64_mte, cpu)) {
246             /* Enable tag access, but leave TCF0 as No Effect (0). */
247             env->cp15.sctlr_el[1] |= SCTLR_ATA0;
248             /*
249              * Exclude all tags, so that tag 0 is always used.
250              * This corresponds to Linux current->thread.gcr_incl = 0.
251              *
252              * Set RRND, so that helper_irg() will generate a seed later.
253              * Here in cpu_reset(), the crypto subsystem has not yet been
254              * initialized.
255              */
256             env->cp15.gcr_el1 = 0x1ffff;
257         }
258         /*
259          * Disable access to SCXTNUM_EL0 from CSV2_1p2.
260          * This is not yet exposed from the Linux kernel in any way.
261          */
262         env->cp15.sctlr_el[1] |= SCTLR_TSCXT;
263 #else
264         /* Reset into the highest available EL */
265         if (arm_feature(env, ARM_FEATURE_EL3)) {
266             env->pstate = PSTATE_MODE_EL3h;
267         } else if (arm_feature(env, ARM_FEATURE_EL2)) {
268             env->pstate = PSTATE_MODE_EL2h;
269         } else {
270             env->pstate = PSTATE_MODE_EL1h;
271         }
272 
273         /* Sample rvbar at reset.  */
274         env->cp15.rvbar = cpu->rvbar_prop;
275         env->pc = env->cp15.rvbar;
276 #endif
277     } else {
278 #if defined(CONFIG_USER_ONLY)
279         /* Userspace expects access to cp10 and cp11 for FP/Neon */
280         env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
281                                          CPACR, CP10, 3);
282         env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
283                                          CPACR, CP11, 3);
284 #endif
285     }
286 
287 #if defined(CONFIG_USER_ONLY)
288     env->uncached_cpsr = ARM_CPU_MODE_USR;
289     /* For user mode we must enable access to coprocessors */
290     env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
291     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
292         env->cp15.c15_cpar = 3;
293     } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
294         env->cp15.c15_cpar = 1;
295     }
296 #else
297 
298     /*
299      * If the highest available EL is EL2, AArch32 will start in Hyp
300      * mode; otherwise it starts in SVC. Note that if we start in
301      * AArch64 then these values in the uncached_cpsr will be ignored.
302      */
303     if (arm_feature(env, ARM_FEATURE_EL2) &&
304         !arm_feature(env, ARM_FEATURE_EL3)) {
305         env->uncached_cpsr = ARM_CPU_MODE_HYP;
306     } else {
307         env->uncached_cpsr = ARM_CPU_MODE_SVC;
308     }
309     env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
310 
311     /* AArch32 has a hard highvec setting of 0xFFFF0000.  If we are currently
312      * executing as AArch32 then check if highvecs are enabled and
313      * adjust the PC accordingly.
314      */
315     if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
316         env->regs[15] = 0xFFFF0000;
317     }
318 
319     env->vfp.xregs[ARM_VFP_FPEXC] = 0;
320 #endif
321 
322     if (arm_feature(env, ARM_FEATURE_M)) {
323 #ifndef CONFIG_USER_ONLY
324         uint32_t initial_msp; /* Loaded from 0x0 */
325         uint32_t initial_pc; /* Loaded from 0x4 */
326         uint8_t *rom;
327         uint32_t vecbase;
328 #endif
329 
330         if (cpu_isar_feature(aa32_lob, cpu)) {
331             /*
332              * LTPSIZE is constant 4 if MVE not implemented, and resets
333              * to an UNKNOWN value if MVE is implemented. We choose to
334              * always reset to 4.
335              */
336             env->v7m.ltpsize = 4;
337             /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
338             env->v7m.fpdscr[M_REG_NS] = 4 << FPCR_LTPSIZE_SHIFT;
339             env->v7m.fpdscr[M_REG_S] = 4 << FPCR_LTPSIZE_SHIFT;
340         }
341 
342         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
343             env->v7m.secure = true;
344         } else {
345             /* This bit resets to 0 if security is supported, but 1 if
346              * it is not. The bit is not present in v7M, but we set it
347              * here so we can avoid having to make checks on it conditional
348              * on ARM_FEATURE_V8 (we don't let the guest see the bit).
349              */
350             env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
351             /*
352              * Set NSACR to indicate "NS access permitted to everything";
353              * this avoids having to have all the tests of it being
354              * conditional on ARM_FEATURE_M_SECURITY. Note also that from
355              * v8.1M the guest-visible value of NSACR in a CPU without the
356              * Security Extension is 0xcff.
357              */
358             env->v7m.nsacr = 0xcff;
359         }
360 
361         /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
362          * that it resets to 1, so QEMU always does that rather than making
363          * it dependent on CPU model. In v8M it is RES1.
364          */
365         env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
366         env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
367         if (arm_feature(env, ARM_FEATURE_V8)) {
368             /* in v8M the NONBASETHRDENA bit [0] is RES1 */
369             env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
370             env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
371         }
372         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
373             env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
374             env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
375         }
376 
377         if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
378             env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK;
379             env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
380                 R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
381         }
382 
383 #ifndef CONFIG_USER_ONLY
384         /* Unlike A/R profile, M profile defines the reset LR value */
385         env->regs[14] = 0xffffffff;
386 
387         env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
388         env->v7m.vecbase[M_REG_NS] = cpu->init_nsvtor & 0xffffff80;
389 
390         /* Load the initial SP and PC from offset 0 and 4 in the vector table */
391         vecbase = env->v7m.vecbase[env->v7m.secure];
392         rom = rom_ptr_for_as(s->as, vecbase, 8);
393         if (rom) {
394             /* Address zero is covered by ROM which hasn't yet been
395              * copied into physical memory.
396              */
397             initial_msp = ldl_p(rom);
398             initial_pc = ldl_p(rom + 4);
399         } else {
400             /* Address zero not covered by a ROM blob, or the ROM blob
401              * is in non-modifiable memory and this is a second reset after
402              * it got copied into memory. In the latter case, rom_ptr
403              * will return a NULL pointer and we should use ldl_phys instead.
404              */
405             initial_msp = ldl_phys(s->as, vecbase);
406             initial_pc = ldl_phys(s->as, vecbase + 4);
407         }
408 
409         qemu_log_mask(CPU_LOG_INT,
410                       "Loaded reset SP 0x%x PC 0x%x from vector table\n",
411                       initial_msp, initial_pc);
412 
413         env->regs[13] = initial_msp & 0xFFFFFFFC;
414         env->regs[15] = initial_pc & ~1;
415         env->thumb = initial_pc & 1;
416 #else
417         /*
418          * For user mode we run non-secure and with access to the FPU.
419          * The FPU context is active (ie does not need further setup)
420          * and is owned by non-secure.
421          */
422         env->v7m.secure = false;
423         env->v7m.nsacr = 0xcff;
424         env->v7m.cpacr[M_REG_NS] = 0xf0ffff;
425         env->v7m.fpccr[M_REG_S] &=
426             ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK);
427         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
428 #endif
429     }
430 
431     /* M profile requires that reset clears the exclusive monitor;
432      * A profile does not, but clearing it makes more sense than having it
433      * set with an exclusive access on address zero.
434      */
435     arm_clear_exclusive(env);
436 
437     if (arm_feature(env, ARM_FEATURE_PMSA)) {
438         if (cpu->pmsav7_dregion > 0) {
439             if (arm_feature(env, ARM_FEATURE_V8)) {
440                 memset(env->pmsav8.rbar[M_REG_NS], 0,
441                        sizeof(*env->pmsav8.rbar[M_REG_NS])
442                        * cpu->pmsav7_dregion);
443                 memset(env->pmsav8.rlar[M_REG_NS], 0,
444                        sizeof(*env->pmsav8.rlar[M_REG_NS])
445                        * cpu->pmsav7_dregion);
446                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
447                     memset(env->pmsav8.rbar[M_REG_S], 0,
448                            sizeof(*env->pmsav8.rbar[M_REG_S])
449                            * cpu->pmsav7_dregion);
450                     memset(env->pmsav8.rlar[M_REG_S], 0,
451                            sizeof(*env->pmsav8.rlar[M_REG_S])
452                            * cpu->pmsav7_dregion);
453                 }
454             } else if (arm_feature(env, ARM_FEATURE_V7)) {
455                 memset(env->pmsav7.drbar, 0,
456                        sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
457                 memset(env->pmsav7.drsr, 0,
458                        sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
459                 memset(env->pmsav7.dracr, 0,
460                        sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
461             }
462         }
463         env->pmsav7.rnr[M_REG_NS] = 0;
464         env->pmsav7.rnr[M_REG_S] = 0;
465         env->pmsav8.mair0[M_REG_NS] = 0;
466         env->pmsav8.mair0[M_REG_S] = 0;
467         env->pmsav8.mair1[M_REG_NS] = 0;
468         env->pmsav8.mair1[M_REG_S] = 0;
469     }
470 
471     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
472         if (cpu->sau_sregion > 0) {
473             memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
474             memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
475         }
476         env->sau.rnr = 0;
477         /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
478          * the Cortex-M33 does.
479          */
480         env->sau.ctrl = 0;
481     }
482 
483     set_flush_to_zero(1, &env->vfp.standard_fp_status);
484     set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
485     set_default_nan_mode(1, &env->vfp.standard_fp_status);
486     set_default_nan_mode(1, &env->vfp.standard_fp_status_f16);
487     set_float_detect_tininess(float_tininess_before_rounding,
488                               &env->vfp.fp_status);
489     set_float_detect_tininess(float_tininess_before_rounding,
490                               &env->vfp.standard_fp_status);
491     set_float_detect_tininess(float_tininess_before_rounding,
492                               &env->vfp.fp_status_f16);
493     set_float_detect_tininess(float_tininess_before_rounding,
494                               &env->vfp.standard_fp_status_f16);
495 #ifndef CONFIG_USER_ONLY
496     if (kvm_enabled()) {
497         kvm_arm_reset_vcpu(cpu);
498     }
499 #endif
500 
501     hw_breakpoint_update_all(cpu);
502     hw_watchpoint_update_all(cpu);
503     arm_rebuild_hflags(env);
504 }
505 
506 #ifndef CONFIG_USER_ONLY
507 
508 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
509                                      unsigned int target_el,
510                                      unsigned int cur_el, bool secure,
511                                      uint64_t hcr_el2)
512 {
513     CPUARMState *env = cs->env_ptr;
514     bool pstate_unmasked;
515     bool unmasked = false;
516 
517     /*
518      * Don't take exceptions if they target a lower EL.
519      * This check should catch any exceptions that would not be taken
520      * but left pending.
521      */
522     if (cur_el > target_el) {
523         return false;
524     }
525 
526     switch (excp_idx) {
527     case EXCP_FIQ:
528         pstate_unmasked = !(env->daif & PSTATE_F);
529         break;
530 
531     case EXCP_IRQ:
532         pstate_unmasked = !(env->daif & PSTATE_I);
533         break;
534 
535     case EXCP_VFIQ:
536         if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
537             /* VFIQs are only taken when hypervized.  */
538             return false;
539         }
540         return !(env->daif & PSTATE_F);
541     case EXCP_VIRQ:
542         if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
543             /* VIRQs are only taken when hypervized.  */
544             return false;
545         }
546         return !(env->daif & PSTATE_I);
547     case EXCP_VSERR:
548         if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
549             /* VIRQs are only taken when hypervized.  */
550             return false;
551         }
552         return !(env->daif & PSTATE_A);
553     default:
554         g_assert_not_reached();
555     }
556 
557     /*
558      * Use the target EL, current execution state and SCR/HCR settings to
559      * determine whether the corresponding CPSR bit is used to mask the
560      * interrupt.
561      */
562     if ((target_el > cur_el) && (target_el != 1)) {
563         /* Exceptions targeting a higher EL may not be maskable */
564         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
565             /*
566              * 64-bit masking rules are simple: exceptions to EL3
567              * can't be masked, and exceptions to EL2 can only be
568              * masked from Secure state. The HCR and SCR settings
569              * don't affect the masking logic, only the interrupt routing.
570              */
571             if (target_el == 3 || !secure || (env->cp15.scr_el3 & SCR_EEL2)) {
572                 unmasked = true;
573             }
574         } else {
575             /*
576              * The old 32-bit-only environment has a more complicated
577              * masking setup. HCR and SCR bits not only affect interrupt
578              * routing but also change the behaviour of masking.
579              */
580             bool hcr, scr;
581 
582             switch (excp_idx) {
583             case EXCP_FIQ:
584                 /*
585                  * If FIQs are routed to EL3 or EL2 then there are cases where
586                  * we override the CPSR.F in determining if the exception is
587                  * masked or not. If neither of these are set then we fall back
588                  * to the CPSR.F setting otherwise we further assess the state
589                  * below.
590                  */
591                 hcr = hcr_el2 & HCR_FMO;
592                 scr = (env->cp15.scr_el3 & SCR_FIQ);
593 
594                 /*
595                  * When EL3 is 32-bit, the SCR.FW bit controls whether the
596                  * CPSR.F bit masks FIQ interrupts when taken in non-secure
597                  * state. If SCR.FW is set then FIQs can be masked by CPSR.F
598                  * when non-secure but only when FIQs are only routed to EL3.
599                  */
600                 scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
601                 break;
602             case EXCP_IRQ:
603                 /*
604                  * When EL3 execution state is 32-bit, if HCR.IMO is set then
605                  * we may override the CPSR.I masking when in non-secure state.
606                  * The SCR.IRQ setting has already been taken into consideration
607                  * when setting the target EL, so it does not have a further
608                  * affect here.
609                  */
610                 hcr = hcr_el2 & HCR_IMO;
611                 scr = false;
612                 break;
613             default:
614                 g_assert_not_reached();
615             }
616 
617             if ((scr || hcr) && !secure) {
618                 unmasked = true;
619             }
620         }
621     }
622 
623     /*
624      * The PSTATE bits only mask the interrupt if we have not overriden the
625      * ability above.
626      */
627     return unmasked || pstate_unmasked;
628 }
629 
630 static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
631 {
632     CPUClass *cc = CPU_GET_CLASS(cs);
633     CPUARMState *env = cs->env_ptr;
634     uint32_t cur_el = arm_current_el(env);
635     bool secure = arm_is_secure(env);
636     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
637     uint32_t target_el;
638     uint32_t excp_idx;
639 
640     /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
641 
642     if (interrupt_request & CPU_INTERRUPT_FIQ) {
643         excp_idx = EXCP_FIQ;
644         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
645         if (arm_excp_unmasked(cs, excp_idx, target_el,
646                               cur_el, secure, hcr_el2)) {
647             goto found;
648         }
649     }
650     if (interrupt_request & CPU_INTERRUPT_HARD) {
651         excp_idx = EXCP_IRQ;
652         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
653         if (arm_excp_unmasked(cs, excp_idx, target_el,
654                               cur_el, secure, hcr_el2)) {
655             goto found;
656         }
657     }
658     if (interrupt_request & CPU_INTERRUPT_VIRQ) {
659         excp_idx = EXCP_VIRQ;
660         target_el = 1;
661         if (arm_excp_unmasked(cs, excp_idx, target_el,
662                               cur_el, secure, hcr_el2)) {
663             goto found;
664         }
665     }
666     if (interrupt_request & CPU_INTERRUPT_VFIQ) {
667         excp_idx = EXCP_VFIQ;
668         target_el = 1;
669         if (arm_excp_unmasked(cs, excp_idx, target_el,
670                               cur_el, secure, hcr_el2)) {
671             goto found;
672         }
673     }
674     if (interrupt_request & CPU_INTERRUPT_VSERR) {
675         excp_idx = EXCP_VSERR;
676         target_el = 1;
677         if (arm_excp_unmasked(cs, excp_idx, target_el,
678                               cur_el, secure, hcr_el2)) {
679             /* Taking a virtual abort clears HCR_EL2.VSE */
680             env->cp15.hcr_el2 &= ~HCR_VSE;
681             cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
682             goto found;
683         }
684     }
685     return false;
686 
687  found:
688     cs->exception_index = excp_idx;
689     env->exception.target_el = target_el;
690     cc->tcg_ops->do_interrupt(cs);
691     return true;
692 }
693 #endif /* !CONFIG_USER_ONLY */
694 
695 void arm_cpu_update_virq(ARMCPU *cpu)
696 {
697     /*
698      * Update the interrupt level for VIRQ, which is the logical OR of
699      * the HCR_EL2.VI bit and the input line level from the GIC.
700      */
701     CPUARMState *env = &cpu->env;
702     CPUState *cs = CPU(cpu);
703 
704     bool new_state = (env->cp15.hcr_el2 & HCR_VI) ||
705         (env->irq_line_state & CPU_INTERRUPT_VIRQ);
706 
707     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
708         if (new_state) {
709             cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
710         } else {
711             cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
712         }
713     }
714 }
715 
716 void arm_cpu_update_vfiq(ARMCPU *cpu)
717 {
718     /*
719      * Update the interrupt level for VFIQ, which is the logical OR of
720      * the HCR_EL2.VF bit and the input line level from the GIC.
721      */
722     CPUARMState *env = &cpu->env;
723     CPUState *cs = CPU(cpu);
724 
725     bool new_state = (env->cp15.hcr_el2 & HCR_VF) ||
726         (env->irq_line_state & CPU_INTERRUPT_VFIQ);
727 
728     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
729         if (new_state) {
730             cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
731         } else {
732             cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
733         }
734     }
735 }
736 
737 void arm_cpu_update_vserr(ARMCPU *cpu)
738 {
739     /*
740      * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
741      */
742     CPUARMState *env = &cpu->env;
743     CPUState *cs = CPU(cpu);
744 
745     bool new_state = env->cp15.hcr_el2 & HCR_VSE;
746 
747     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
748         if (new_state) {
749             cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
750         } else {
751             cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
752         }
753     }
754 }
755 
756 #ifndef CONFIG_USER_ONLY
757 static void arm_cpu_set_irq(void *opaque, int irq, int level)
758 {
759     ARMCPU *cpu = opaque;
760     CPUARMState *env = &cpu->env;
761     CPUState *cs = CPU(cpu);
762     static const int mask[] = {
763         [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
764         [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
765         [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
766         [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
767     };
768 
769     if (!arm_feature(env, ARM_FEATURE_EL2) &&
770         (irq == ARM_CPU_VIRQ || irq == ARM_CPU_VFIQ)) {
771         /*
772          * The GIC might tell us about VIRQ and VFIQ state, but if we don't
773          * have EL2 support we don't care. (Unless the guest is doing something
774          * silly this will only be calls saying "level is still 0".)
775          */
776         return;
777     }
778 
779     if (level) {
780         env->irq_line_state |= mask[irq];
781     } else {
782         env->irq_line_state &= ~mask[irq];
783     }
784 
785     switch (irq) {
786     case ARM_CPU_VIRQ:
787         arm_cpu_update_virq(cpu);
788         break;
789     case ARM_CPU_VFIQ:
790         arm_cpu_update_vfiq(cpu);
791         break;
792     case ARM_CPU_IRQ:
793     case ARM_CPU_FIQ:
794         if (level) {
795             cpu_interrupt(cs, mask[irq]);
796         } else {
797             cpu_reset_interrupt(cs, mask[irq]);
798         }
799         break;
800     default:
801         g_assert_not_reached();
802     }
803 }
804 
805 static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
806 {
807 #ifdef CONFIG_KVM
808     ARMCPU *cpu = opaque;
809     CPUARMState *env = &cpu->env;
810     CPUState *cs = CPU(cpu);
811     uint32_t linestate_bit;
812     int irq_id;
813 
814     switch (irq) {
815     case ARM_CPU_IRQ:
816         irq_id = KVM_ARM_IRQ_CPU_IRQ;
817         linestate_bit = CPU_INTERRUPT_HARD;
818         break;
819     case ARM_CPU_FIQ:
820         irq_id = KVM_ARM_IRQ_CPU_FIQ;
821         linestate_bit = CPU_INTERRUPT_FIQ;
822         break;
823     default:
824         g_assert_not_reached();
825     }
826 
827     if (level) {
828         env->irq_line_state |= linestate_bit;
829     } else {
830         env->irq_line_state &= ~linestate_bit;
831     }
832     kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
833 #endif
834 }
835 
836 static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
837 {
838     ARMCPU *cpu = ARM_CPU(cs);
839     CPUARMState *env = &cpu->env;
840 
841     cpu_synchronize_state(cs);
842     return arm_cpu_data_is_big_endian(env);
843 }
844 
845 #endif
846 
847 static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
848 {
849     ARMCPU *ac = ARM_CPU(cpu);
850     CPUARMState *env = &ac->env;
851     bool sctlr_b;
852 
853     if (is_a64(env)) {
854         info->cap_arch = CS_ARCH_ARM64;
855         info->cap_insn_unit = 4;
856         info->cap_insn_split = 4;
857     } else {
858         int cap_mode;
859         if (env->thumb) {
860             info->cap_insn_unit = 2;
861             info->cap_insn_split = 4;
862             cap_mode = CS_MODE_THUMB;
863         } else {
864             info->cap_insn_unit = 4;
865             info->cap_insn_split = 4;
866             cap_mode = CS_MODE_ARM;
867         }
868         if (arm_feature(env, ARM_FEATURE_V8)) {
869             cap_mode |= CS_MODE_V8;
870         }
871         if (arm_feature(env, ARM_FEATURE_M)) {
872             cap_mode |= CS_MODE_MCLASS;
873         }
874         info->cap_arch = CS_ARCH_ARM;
875         info->cap_mode = cap_mode;
876     }
877 
878     sctlr_b = arm_sctlr_b(env);
879     if (bswap_code(sctlr_b)) {
880 #if TARGET_BIG_ENDIAN
881         info->endian = BFD_ENDIAN_LITTLE;
882 #else
883         info->endian = BFD_ENDIAN_BIG;
884 #endif
885     }
886     info->flags &= ~INSN_ARM_BE32;
887 #ifndef CONFIG_USER_ONLY
888     if (sctlr_b) {
889         info->flags |= INSN_ARM_BE32;
890     }
891 #endif
892 }
893 
894 #ifdef TARGET_AARCH64
895 
896 static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
897 {
898     ARMCPU *cpu = ARM_CPU(cs);
899     CPUARMState *env = &cpu->env;
900     uint32_t psr = pstate_read(env);
901     int i;
902     int el = arm_current_el(env);
903     const char *ns_status;
904     bool sve;
905 
906     qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
907     for (i = 0; i < 32; i++) {
908         if (i == 31) {
909             qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
910         } else {
911             qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
912                          (i + 2) % 3 ? " " : "\n");
913         }
914     }
915 
916     if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
917         ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
918     } else {
919         ns_status = "";
920     }
921     qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
922                  psr,
923                  psr & PSTATE_N ? 'N' : '-',
924                  psr & PSTATE_Z ? 'Z' : '-',
925                  psr & PSTATE_C ? 'C' : '-',
926                  psr & PSTATE_V ? 'V' : '-',
927                  ns_status,
928                  el,
929                  psr & PSTATE_SP ? 'h' : 't');
930 
931     if (cpu_isar_feature(aa64_sme, cpu)) {
932         qemu_fprintf(f, "  SVCR=%08" PRIx64 " %c%c",
933                      env->svcr,
934                      (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
935                      (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
936     }
937     if (cpu_isar_feature(aa64_bti, cpu)) {
938         qemu_fprintf(f, "  BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
939     }
940     if (!(flags & CPU_DUMP_FPU)) {
941         qemu_fprintf(f, "\n");
942         return;
943     }
944     if (fp_exception_el(env, el) != 0) {
945         qemu_fprintf(f, "    FPU disabled\n");
946         return;
947     }
948     qemu_fprintf(f, "     FPCR=%08x FPSR=%08x\n",
949                  vfp_get_fpcr(env), vfp_get_fpsr(env));
950 
951     if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
952         sve = sme_exception_el(env, el) == 0;
953     } else if (cpu_isar_feature(aa64_sve, cpu)) {
954         sve = sve_exception_el(env, el) == 0;
955     } else {
956         sve = false;
957     }
958 
959     if (sve) {
960         int j, zcr_len = sve_vqm1_for_el(env, el);
961 
962         for (i = 0; i <= FFR_PRED_NUM; i++) {
963             bool eol;
964             if (i == FFR_PRED_NUM) {
965                 qemu_fprintf(f, "FFR=");
966                 /* It's last, so end the line.  */
967                 eol = true;
968             } else {
969                 qemu_fprintf(f, "P%02d=", i);
970                 switch (zcr_len) {
971                 case 0:
972                     eol = i % 8 == 7;
973                     break;
974                 case 1:
975                     eol = i % 6 == 5;
976                     break;
977                 case 2:
978                 case 3:
979                     eol = i % 3 == 2;
980                     break;
981                 default:
982                     /* More than one quadword per predicate.  */
983                     eol = true;
984                     break;
985                 }
986             }
987             for (j = zcr_len / 4; j >= 0; j--) {
988                 int digits;
989                 if (j * 4 + 4 <= zcr_len + 1) {
990                     digits = 16;
991                 } else {
992                     digits = (zcr_len % 4 + 1) * 4;
993                 }
994                 qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
995                              env->vfp.pregs[i].p[j],
996                              j ? ":" : eol ? "\n" : " ");
997             }
998         }
999 
1000         for (i = 0; i < 32; i++) {
1001             if (zcr_len == 0) {
1002                 qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
1003                              i, env->vfp.zregs[i].d[1],
1004                              env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
1005             } else if (zcr_len == 1) {
1006                 qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
1007                              ":%016" PRIx64 ":%016" PRIx64 "\n",
1008                              i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
1009                              env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
1010             } else {
1011                 for (j = zcr_len; j >= 0; j--) {
1012                     bool odd = (zcr_len - j) % 2 != 0;
1013                     if (j == zcr_len) {
1014                         qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
1015                     } else if (!odd) {
1016                         if (j > 0) {
1017                             qemu_fprintf(f, "   [%x-%x]=", j, j - 1);
1018                         } else {
1019                             qemu_fprintf(f, "     [%x]=", j);
1020                         }
1021                     }
1022                     qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
1023                                  env->vfp.zregs[i].d[j * 2 + 1],
1024                                  env->vfp.zregs[i].d[j * 2],
1025                                  odd || j == 0 ? "\n" : ":");
1026                 }
1027             }
1028         }
1029     } else {
1030         for (i = 0; i < 32; i++) {
1031             uint64_t *q = aa64_vfp_qreg(env, i);
1032             qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
1033                          i, q[1], q[0], (i & 1 ? "\n" : " "));
1034         }
1035     }
1036 }
1037 
1038 #else
1039 
1040 static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1041 {
1042     g_assert_not_reached();
1043 }
1044 
1045 #endif
1046 
1047 static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1048 {
1049     ARMCPU *cpu = ARM_CPU(cs);
1050     CPUARMState *env = &cpu->env;
1051     int i;
1052 
1053     if (is_a64(env)) {
1054         aarch64_cpu_dump_state(cs, f, flags);
1055         return;
1056     }
1057 
1058     for (i = 0; i < 16; i++) {
1059         qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
1060         if ((i % 4) == 3) {
1061             qemu_fprintf(f, "\n");
1062         } else {
1063             qemu_fprintf(f, " ");
1064         }
1065     }
1066 
1067     if (arm_feature(env, ARM_FEATURE_M)) {
1068         uint32_t xpsr = xpsr_read(env);
1069         const char *mode;
1070         const char *ns_status = "";
1071 
1072         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1073             ns_status = env->v7m.secure ? "S " : "NS ";
1074         }
1075 
1076         if (xpsr & XPSR_EXCP) {
1077             mode = "handler";
1078         } else {
1079             if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
1080                 mode = "unpriv-thread";
1081             } else {
1082                 mode = "priv-thread";
1083             }
1084         }
1085 
1086         qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
1087                      xpsr,
1088                      xpsr & XPSR_N ? 'N' : '-',
1089                      xpsr & XPSR_Z ? 'Z' : '-',
1090                      xpsr & XPSR_C ? 'C' : '-',
1091                      xpsr & XPSR_V ? 'V' : '-',
1092                      xpsr & XPSR_T ? 'T' : 'A',
1093                      ns_status,
1094                      mode);
1095     } else {
1096         uint32_t psr = cpsr_read(env);
1097         const char *ns_status = "";
1098 
1099         if (arm_feature(env, ARM_FEATURE_EL3) &&
1100             (psr & CPSR_M) != ARM_CPU_MODE_MON) {
1101             ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
1102         }
1103 
1104         qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
1105                      psr,
1106                      psr & CPSR_N ? 'N' : '-',
1107                      psr & CPSR_Z ? 'Z' : '-',
1108                      psr & CPSR_C ? 'C' : '-',
1109                      psr & CPSR_V ? 'V' : '-',
1110                      psr & CPSR_T ? 'T' : 'A',
1111                      ns_status,
1112                      aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
1113     }
1114 
1115     if (flags & CPU_DUMP_FPU) {
1116         int numvfpregs = 0;
1117         if (cpu_isar_feature(aa32_simd_r32, cpu)) {
1118             numvfpregs = 32;
1119         } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
1120             numvfpregs = 16;
1121         }
1122         for (i = 0; i < numvfpregs; i++) {
1123             uint64_t v = *aa32_vfp_dreg(env, i);
1124             qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
1125                          i * 2, (uint32_t)v,
1126                          i * 2 + 1, (uint32_t)(v >> 32),
1127                          i, v);
1128         }
1129         qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
1130         if (cpu_isar_feature(aa32_mve, cpu)) {
1131             qemu_fprintf(f, "VPR: %08x\n", env->v7m.vpr);
1132         }
1133     }
1134 }
1135 
1136 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
1137 {
1138     uint32_t Aff1 = idx / clustersz;
1139     uint32_t Aff0 = idx % clustersz;
1140     return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
1141 }
1142 
1143 static void arm_cpu_initfn(Object *obj)
1144 {
1145     ARMCPU *cpu = ARM_CPU(obj);
1146 
1147     cpu_set_cpustate_pointers(cpu);
1148     cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal,
1149                                          NULL, g_free);
1150 
1151     QLIST_INIT(&cpu->pre_el_change_hooks);
1152     QLIST_INIT(&cpu->el_change_hooks);
1153 
1154 #ifdef CONFIG_USER_ONLY
1155 # ifdef TARGET_AARCH64
1156     /*
1157      * The linux kernel defaults to 512-bit for SVE, and 256-bit for SME.
1158      * These values were chosen to fit within the default signal frame.
1159      * See documentation for /proc/sys/abi/{sve,sme}_default_vector_length,
1160      * and our corresponding cpu property.
1161      */
1162     cpu->sve_default_vq = 4;
1163     cpu->sme_default_vq = 2;
1164 # endif
1165 #else
1166     /* Our inbound IRQ and FIQ lines */
1167     if (kvm_enabled()) {
1168         /* VIRQ and VFIQ are unused with KVM but we add them to maintain
1169          * the same interface as non-KVM CPUs.
1170          */
1171         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
1172     } else {
1173         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
1174     }
1175 
1176     qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
1177                        ARRAY_SIZE(cpu->gt_timer_outputs));
1178 
1179     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
1180                              "gicv3-maintenance-interrupt", 1);
1181     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
1182                              "pmu-interrupt", 1);
1183 #endif
1184 
1185     /* DTB consumers generally don't in fact care what the 'compatible'
1186      * string is, so always provide some string and trust that a hypothetical
1187      * picky DTB consumer will also provide a helpful error message.
1188      */
1189     cpu->dtb_compatible = "qemu,unknown";
1190     cpu->psci_version = QEMU_PSCI_VERSION_0_1; /* By default assume PSCI v0.1 */
1191     cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
1192 
1193     if (tcg_enabled() || hvf_enabled()) {
1194         /* TCG and HVF implement PSCI 1.1 */
1195         cpu->psci_version = QEMU_PSCI_VERSION_1_1;
1196     }
1197 }
1198 
1199 static Property arm_cpu_gt_cntfrq_property =
1200             DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz,
1201                                NANOSECONDS_PER_SECOND / GTIMER_SCALE);
1202 
1203 static Property arm_cpu_reset_cbar_property =
1204             DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
1205 
1206 static Property arm_cpu_reset_hivecs_property =
1207             DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
1208 
1209 #ifndef CONFIG_USER_ONLY
1210 static Property arm_cpu_has_el2_property =
1211             DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
1212 
1213 static Property arm_cpu_has_el3_property =
1214             DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
1215 #endif
1216 
1217 static Property arm_cpu_cfgend_property =
1218             DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
1219 
1220 static Property arm_cpu_has_vfp_property =
1221             DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
1222 
1223 static Property arm_cpu_has_neon_property =
1224             DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
1225 
1226 static Property arm_cpu_has_dsp_property =
1227             DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
1228 
1229 static Property arm_cpu_has_mpu_property =
1230             DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
1231 
1232 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
1233  * because the CPU initfn will have already set cpu->pmsav7_dregion to
1234  * the right value for that particular CPU type, and we don't want
1235  * to override that with an incorrect constant value.
1236  */
1237 static Property arm_cpu_pmsav7_dregion_property =
1238             DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
1239                                            pmsav7_dregion,
1240                                            qdev_prop_uint32, uint32_t);
1241 
1242 static bool arm_get_pmu(Object *obj, Error **errp)
1243 {
1244     ARMCPU *cpu = ARM_CPU(obj);
1245 
1246     return cpu->has_pmu;
1247 }
1248 
1249 static void arm_set_pmu(Object *obj, bool value, Error **errp)
1250 {
1251     ARMCPU *cpu = ARM_CPU(obj);
1252 
1253     if (value) {
1254         if (kvm_enabled() && !kvm_arm_pmu_supported()) {
1255             error_setg(errp, "'pmu' feature not supported by KVM on this host");
1256             return;
1257         }
1258         set_feature(&cpu->env, ARM_FEATURE_PMU);
1259     } else {
1260         unset_feature(&cpu->env, ARM_FEATURE_PMU);
1261     }
1262     cpu->has_pmu = value;
1263 }
1264 
1265 unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
1266 {
1267     /*
1268      * The exact approach to calculating guest ticks is:
1269      *
1270      *     muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
1271      *              NANOSECONDS_PER_SECOND);
1272      *
1273      * We don't do that. Rather we intentionally use integer division
1274      * truncation below and in the caller for the conversion of host monotonic
1275      * time to guest ticks to provide the exact inverse for the semantics of
1276      * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
1277      * it loses precision when representing frequencies where
1278      * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
1279      * provide an exact inverse leads to scheduling timers with negative
1280      * periods, which in turn leads to sticky behaviour in the guest.
1281      *
1282      * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
1283      * cannot become zero.
1284      */
1285     return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ?
1286       NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1;
1287 }
1288 
1289 void arm_cpu_post_init(Object *obj)
1290 {
1291     ARMCPU *cpu = ARM_CPU(obj);
1292 
1293     /* M profile implies PMSA. We have to do this here rather than
1294      * in realize with the other feature-implication checks because
1295      * we look at the PMSA bit to see if we should add some properties.
1296      */
1297     if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
1298         set_feature(&cpu->env, ARM_FEATURE_PMSA);
1299     }
1300 
1301     if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
1302         arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
1303         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
1304     }
1305 
1306     if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
1307         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property);
1308     }
1309 
1310     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1311         object_property_add_uint64_ptr(obj, "rvbar",
1312                                        &cpu->rvbar_prop,
1313                                        OBJ_PROP_FLAG_READWRITE);
1314     }
1315 
1316 #ifndef CONFIG_USER_ONLY
1317     if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
1318         /* Add the has_el3 state CPU property only if EL3 is allowed.  This will
1319          * prevent "has_el3" from existing on CPUs which cannot support EL3.
1320          */
1321         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property);
1322 
1323         object_property_add_link(obj, "secure-memory",
1324                                  TYPE_MEMORY_REGION,
1325                                  (Object **)&cpu->secure_memory,
1326                                  qdev_prop_allow_set_link_before_realize,
1327                                  OBJ_PROP_LINK_STRONG);
1328     }
1329 
1330     if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
1331         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property);
1332     }
1333 #endif
1334 
1335     if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
1336         cpu->has_pmu = true;
1337         object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu);
1338     }
1339 
1340     /*
1341      * Allow user to turn off VFP and Neon support, but only for TCG --
1342      * KVM does not currently allow us to lie to the guest about its
1343      * ID/feature registers, so the guest always sees what the host has.
1344      */
1345     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
1346         ? cpu_isar_feature(aa64_fp_simd, cpu)
1347         : cpu_isar_feature(aa32_vfp, cpu)) {
1348         cpu->has_vfp = true;
1349         if (!kvm_enabled()) {
1350             qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property);
1351         }
1352     }
1353 
1354     if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
1355         cpu->has_neon = true;
1356         if (!kvm_enabled()) {
1357             qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property);
1358         }
1359     }
1360 
1361     if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
1362         arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
1363         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
1364     }
1365 
1366     if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
1367         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
1368         if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1369             qdev_property_add_static(DEVICE(obj),
1370                                      &arm_cpu_pmsav7_dregion_property);
1371         }
1372     }
1373 
1374     if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
1375         object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
1376                                  qdev_prop_allow_set_link_before_realize,
1377                                  OBJ_PROP_LINK_STRONG);
1378         /*
1379          * M profile: initial value of the Secure VTOR. We can't just use
1380          * a simple DEFINE_PROP_UINT32 for this because we want to permit
1381          * the property to be set after realize.
1382          */
1383         object_property_add_uint32_ptr(obj, "init-svtor",
1384                                        &cpu->init_svtor,
1385                                        OBJ_PROP_FLAG_READWRITE);
1386     }
1387     if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
1388         /*
1389          * Initial value of the NS VTOR (for cores without the Security
1390          * extension, this is the only VTOR)
1391          */
1392         object_property_add_uint32_ptr(obj, "init-nsvtor",
1393                                        &cpu->init_nsvtor,
1394                                        OBJ_PROP_FLAG_READWRITE);
1395     }
1396 
1397     /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
1398     object_property_add_uint32_ptr(obj, "psci-conduit",
1399                                    &cpu->psci_conduit,
1400                                    OBJ_PROP_FLAG_READWRITE);
1401 
1402     qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
1403 
1404     if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
1405         qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
1406     }
1407 
1408     if (kvm_enabled()) {
1409         kvm_arm_add_vcpu_properties(obj);
1410     }
1411 
1412 #ifndef CONFIG_USER_ONLY
1413     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
1414         cpu_isar_feature(aa64_mte, cpu)) {
1415         object_property_add_link(obj, "tag-memory",
1416                                  TYPE_MEMORY_REGION,
1417                                  (Object **)&cpu->tag_memory,
1418                                  qdev_prop_allow_set_link_before_realize,
1419                                  OBJ_PROP_LINK_STRONG);
1420 
1421         if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
1422             object_property_add_link(obj, "secure-tag-memory",
1423                                      TYPE_MEMORY_REGION,
1424                                      (Object **)&cpu->secure_tag_memory,
1425                                      qdev_prop_allow_set_link_before_realize,
1426                                      OBJ_PROP_LINK_STRONG);
1427         }
1428     }
1429 #endif
1430 }
1431 
1432 static void arm_cpu_finalizefn(Object *obj)
1433 {
1434     ARMCPU *cpu = ARM_CPU(obj);
1435     ARMELChangeHook *hook, *next;
1436 
1437     g_hash_table_destroy(cpu->cp_regs);
1438 
1439     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
1440         QLIST_REMOVE(hook, node);
1441         g_free(hook);
1442     }
1443     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
1444         QLIST_REMOVE(hook, node);
1445         g_free(hook);
1446     }
1447 #ifndef CONFIG_USER_ONLY
1448     if (cpu->pmu_timer) {
1449         timer_free(cpu->pmu_timer);
1450     }
1451 #endif
1452 }
1453 
1454 void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
1455 {
1456     Error *local_err = NULL;
1457 
1458 #ifdef TARGET_AARCH64
1459     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1460         arm_cpu_sve_finalize(cpu, &local_err);
1461         if (local_err != NULL) {
1462             error_propagate(errp, local_err);
1463             return;
1464         }
1465 
1466         arm_cpu_sme_finalize(cpu, &local_err);
1467         if (local_err != NULL) {
1468             error_propagate(errp, local_err);
1469             return;
1470         }
1471 
1472         arm_cpu_pauth_finalize(cpu, &local_err);
1473         if (local_err != NULL) {
1474             error_propagate(errp, local_err);
1475             return;
1476         }
1477 
1478         arm_cpu_lpa2_finalize(cpu, &local_err);
1479         if (local_err != NULL) {
1480             error_propagate(errp, local_err);
1481             return;
1482         }
1483     }
1484 #endif
1485 
1486     if (kvm_enabled()) {
1487         kvm_arm_steal_time_finalize(cpu, &local_err);
1488         if (local_err != NULL) {
1489             error_propagate(errp, local_err);
1490             return;
1491         }
1492     }
1493 }
1494 
1495 static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
1496 {
1497     CPUState *cs = CPU(dev);
1498     ARMCPU *cpu = ARM_CPU(dev);
1499     ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
1500     CPUARMState *env = &cpu->env;
1501     int pagebits;
1502     Error *local_err = NULL;
1503     bool no_aa32 = false;
1504 
1505     /* If we needed to query the host kernel for the CPU features
1506      * then it's possible that might have failed in the initfn, but
1507      * this is the first point where we can report it.
1508      */
1509     if (cpu->host_cpu_probe_failed) {
1510         if (!kvm_enabled() && !hvf_enabled()) {
1511             error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
1512         } else {
1513             error_setg(errp, "Failed to retrieve host CPU features");
1514         }
1515         return;
1516     }
1517 
1518 #ifndef CONFIG_USER_ONLY
1519     /* The NVIC and M-profile CPU are two halves of a single piece of
1520      * hardware; trying to use one without the other is a command line
1521      * error and will result in segfaults if not caught here.
1522      */
1523     if (arm_feature(env, ARM_FEATURE_M)) {
1524         if (!env->nvic) {
1525             error_setg(errp, "This board cannot be used with Cortex-M CPUs");
1526             return;
1527         }
1528     } else {
1529         if (env->nvic) {
1530             error_setg(errp, "This board can only be used with Cortex-M CPUs");
1531             return;
1532         }
1533     }
1534 
1535     if (!tcg_enabled() && !qtest_enabled()) {
1536         /*
1537          * We assume that no accelerator except TCG (and the "not really an
1538          * accelerator" qtest) can handle these features, because Arm hardware
1539          * virtualization can't virtualize them.
1540          *
1541          * Catch all the cases which might cause us to create more than one
1542          * address space for the CPU (otherwise we will assert() later in
1543          * cpu_address_space_init()).
1544          */
1545         if (arm_feature(env, ARM_FEATURE_M)) {
1546             error_setg(errp,
1547                        "Cannot enable %s when using an M-profile guest CPU",
1548                        current_accel_name());
1549             return;
1550         }
1551         if (cpu->has_el3) {
1552             error_setg(errp,
1553                        "Cannot enable %s when guest CPU has EL3 enabled",
1554                        current_accel_name());
1555             return;
1556         }
1557         if (cpu->tag_memory) {
1558             error_setg(errp,
1559                        "Cannot enable %s when guest CPUs has MTE enabled",
1560                        current_accel_name());
1561             return;
1562         }
1563     }
1564 
1565     {
1566         uint64_t scale;
1567 
1568         if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1569             if (!cpu->gt_cntfrq_hz) {
1570                 error_setg(errp, "Invalid CNTFRQ: %"PRId64"Hz",
1571                            cpu->gt_cntfrq_hz);
1572                 return;
1573             }
1574             scale = gt_cntfrq_period_ns(cpu);
1575         } else {
1576             scale = GTIMER_SCALE;
1577         }
1578 
1579         cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1580                                                arm_gt_ptimer_cb, cpu);
1581         cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1582                                                arm_gt_vtimer_cb, cpu);
1583         cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1584                                               arm_gt_htimer_cb, cpu);
1585         cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1586                                               arm_gt_stimer_cb, cpu);
1587         cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1588                                                   arm_gt_hvtimer_cb, cpu);
1589     }
1590 #endif
1591 
1592     cpu_exec_realizefn(cs, &local_err);
1593     if (local_err != NULL) {
1594         error_propagate(errp, local_err);
1595         return;
1596     }
1597 
1598     arm_cpu_finalize_features(cpu, &local_err);
1599     if (local_err != NULL) {
1600         error_propagate(errp, local_err);
1601         return;
1602     }
1603 
1604     if (arm_feature(env, ARM_FEATURE_AARCH64) &&
1605         cpu->has_vfp != cpu->has_neon) {
1606         /*
1607          * This is an architectural requirement for AArch64; AArch32 is
1608          * more flexible and permits VFP-no-Neon and Neon-no-VFP.
1609          */
1610         error_setg(errp,
1611                    "AArch64 CPUs must have both VFP and Neon or neither");
1612         return;
1613     }
1614 
1615     if (!cpu->has_vfp) {
1616         uint64_t t;
1617         uint32_t u;
1618 
1619         t = cpu->isar.id_aa64isar1;
1620         t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
1621         cpu->isar.id_aa64isar1 = t;
1622 
1623         t = cpu->isar.id_aa64pfr0;
1624         t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
1625         cpu->isar.id_aa64pfr0 = t;
1626 
1627         u = cpu->isar.id_isar6;
1628         u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
1629         u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
1630         cpu->isar.id_isar6 = u;
1631 
1632         u = cpu->isar.mvfr0;
1633         u = FIELD_DP32(u, MVFR0, FPSP, 0);
1634         u = FIELD_DP32(u, MVFR0, FPDP, 0);
1635         u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
1636         u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
1637         u = FIELD_DP32(u, MVFR0, FPROUND, 0);
1638         if (!arm_feature(env, ARM_FEATURE_M)) {
1639             u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
1640             u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
1641         }
1642         cpu->isar.mvfr0 = u;
1643 
1644         u = cpu->isar.mvfr1;
1645         u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
1646         u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
1647         u = FIELD_DP32(u, MVFR1, FPHP, 0);
1648         if (arm_feature(env, ARM_FEATURE_M)) {
1649             u = FIELD_DP32(u, MVFR1, FP16, 0);
1650         }
1651         cpu->isar.mvfr1 = u;
1652 
1653         u = cpu->isar.mvfr2;
1654         u = FIELD_DP32(u, MVFR2, FPMISC, 0);
1655         cpu->isar.mvfr2 = u;
1656     }
1657 
1658     if (!cpu->has_neon) {
1659         uint64_t t;
1660         uint32_t u;
1661 
1662         unset_feature(env, ARM_FEATURE_NEON);
1663 
1664         t = cpu->isar.id_aa64isar0;
1665         t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
1666         t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
1667         t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
1668         t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 0);
1669         t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
1670         t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
1671         t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
1672         cpu->isar.id_aa64isar0 = t;
1673 
1674         t = cpu->isar.id_aa64isar1;
1675         t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
1676         t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
1677         t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
1678         cpu->isar.id_aa64isar1 = t;
1679 
1680         t = cpu->isar.id_aa64pfr0;
1681         t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
1682         cpu->isar.id_aa64pfr0 = t;
1683 
1684         u = cpu->isar.id_isar5;
1685         u = FIELD_DP32(u, ID_ISAR5, AES, 0);
1686         u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
1687         u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
1688         u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
1689         u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
1690         cpu->isar.id_isar5 = u;
1691 
1692         u = cpu->isar.id_isar6;
1693         u = FIELD_DP32(u, ID_ISAR6, DP, 0);
1694         u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
1695         u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
1696         u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
1697         cpu->isar.id_isar6 = u;
1698 
1699         if (!arm_feature(env, ARM_FEATURE_M)) {
1700             u = cpu->isar.mvfr1;
1701             u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
1702             u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
1703             u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
1704             u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
1705             cpu->isar.mvfr1 = u;
1706 
1707             u = cpu->isar.mvfr2;
1708             u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
1709             cpu->isar.mvfr2 = u;
1710         }
1711     }
1712 
1713     if (!cpu->has_neon && !cpu->has_vfp) {
1714         uint64_t t;
1715         uint32_t u;
1716 
1717         t = cpu->isar.id_aa64isar0;
1718         t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
1719         cpu->isar.id_aa64isar0 = t;
1720 
1721         t = cpu->isar.id_aa64isar1;
1722         t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
1723         cpu->isar.id_aa64isar1 = t;
1724 
1725         u = cpu->isar.mvfr0;
1726         u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
1727         cpu->isar.mvfr0 = u;
1728 
1729         /* Despite the name, this field covers both VFP and Neon */
1730         u = cpu->isar.mvfr1;
1731         u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
1732         cpu->isar.mvfr1 = u;
1733     }
1734 
1735     if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
1736         uint32_t u;
1737 
1738         unset_feature(env, ARM_FEATURE_THUMB_DSP);
1739 
1740         u = cpu->isar.id_isar1;
1741         u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
1742         cpu->isar.id_isar1 = u;
1743 
1744         u = cpu->isar.id_isar2;
1745         u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
1746         u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
1747         cpu->isar.id_isar2 = u;
1748 
1749         u = cpu->isar.id_isar3;
1750         u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
1751         u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
1752         cpu->isar.id_isar3 = u;
1753     }
1754 
1755     /* Some features automatically imply others: */
1756     if (arm_feature(env, ARM_FEATURE_V8)) {
1757         if (arm_feature(env, ARM_FEATURE_M)) {
1758             set_feature(env, ARM_FEATURE_V7);
1759         } else {
1760             set_feature(env, ARM_FEATURE_V7VE);
1761         }
1762     }
1763 
1764     /*
1765      * There exist AArch64 cpus without AArch32 support.  When KVM
1766      * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
1767      * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
1768      * As a general principle, we also do not make ID register
1769      * consistency checks anywhere unless using TCG, because only
1770      * for TCG would a consistency-check failure be a QEMU bug.
1771      */
1772     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1773         no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
1774     }
1775 
1776     if (arm_feature(env, ARM_FEATURE_V7VE)) {
1777         /* v7 Virtualization Extensions. In real hardware this implies
1778          * EL2 and also the presence of the Security Extensions.
1779          * For QEMU, for backwards-compatibility we implement some
1780          * CPUs or CPU configs which have no actual EL2 or EL3 but do
1781          * include the various other features that V7VE implies.
1782          * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
1783          * Security Extensions is ARM_FEATURE_EL3.
1784          */
1785         assert(!tcg_enabled() || no_aa32 ||
1786                cpu_isar_feature(aa32_arm_div, cpu));
1787         set_feature(env, ARM_FEATURE_LPAE);
1788         set_feature(env, ARM_FEATURE_V7);
1789     }
1790     if (arm_feature(env, ARM_FEATURE_V7)) {
1791         set_feature(env, ARM_FEATURE_VAPA);
1792         set_feature(env, ARM_FEATURE_THUMB2);
1793         set_feature(env, ARM_FEATURE_MPIDR);
1794         if (!arm_feature(env, ARM_FEATURE_M)) {
1795             set_feature(env, ARM_FEATURE_V6K);
1796         } else {
1797             set_feature(env, ARM_FEATURE_V6);
1798         }
1799 
1800         /* Always define VBAR for V7 CPUs even if it doesn't exist in
1801          * non-EL3 configs. This is needed by some legacy boards.
1802          */
1803         set_feature(env, ARM_FEATURE_VBAR);
1804     }
1805     if (arm_feature(env, ARM_FEATURE_V6K)) {
1806         set_feature(env, ARM_FEATURE_V6);
1807         set_feature(env, ARM_FEATURE_MVFR);
1808     }
1809     if (arm_feature(env, ARM_FEATURE_V6)) {
1810         set_feature(env, ARM_FEATURE_V5);
1811         if (!arm_feature(env, ARM_FEATURE_M)) {
1812             assert(!tcg_enabled() || no_aa32 ||
1813                    cpu_isar_feature(aa32_jazelle, cpu));
1814             set_feature(env, ARM_FEATURE_AUXCR);
1815         }
1816     }
1817     if (arm_feature(env, ARM_FEATURE_V5)) {
1818         set_feature(env, ARM_FEATURE_V4T);
1819     }
1820     if (arm_feature(env, ARM_FEATURE_LPAE)) {
1821         set_feature(env, ARM_FEATURE_V7MP);
1822     }
1823     if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
1824         set_feature(env, ARM_FEATURE_CBAR);
1825     }
1826     if (arm_feature(env, ARM_FEATURE_THUMB2) &&
1827         !arm_feature(env, ARM_FEATURE_M)) {
1828         set_feature(env, ARM_FEATURE_THUMB_DSP);
1829     }
1830 
1831     /*
1832      * We rely on no XScale CPU having VFP so we can use the same bits in the
1833      * TB flags field for VECSTRIDE and XSCALE_CPAR.
1834      */
1835     assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) ||
1836            !cpu_isar_feature(aa32_vfp_simd, cpu) ||
1837            !arm_feature(env, ARM_FEATURE_XSCALE));
1838 
1839     if (arm_feature(env, ARM_FEATURE_V7) &&
1840         !arm_feature(env, ARM_FEATURE_M) &&
1841         !arm_feature(env, ARM_FEATURE_PMSA)) {
1842         /* v7VMSA drops support for the old ARMv5 tiny pages, so we
1843          * can use 4K pages.
1844          */
1845         pagebits = 12;
1846     } else {
1847         /* For CPUs which might have tiny 1K pages, or which have an
1848          * MPU and might have small region sizes, stick with 1K pages.
1849          */
1850         pagebits = 10;
1851     }
1852     if (!set_preferred_target_page_bits(pagebits)) {
1853         /* This can only ever happen for hotplugging a CPU, or if
1854          * the board code incorrectly creates a CPU which it has
1855          * promised via minimum_page_size that it will not.
1856          */
1857         error_setg(errp, "This CPU requires a smaller page size than the "
1858                    "system is using");
1859         return;
1860     }
1861 
1862     /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
1863      * We don't support setting cluster ID ([16..23]) (known as Aff2
1864      * in later ARM ARM versions), or any of the higher affinity level fields,
1865      * so these bits always RAZ.
1866      */
1867     if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
1868         cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
1869                                                ARM_DEFAULT_CPUS_PER_CLUSTER);
1870     }
1871 
1872     if (cpu->reset_hivecs) {
1873             cpu->reset_sctlr |= (1 << 13);
1874     }
1875 
1876     if (cpu->cfgend) {
1877         if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1878             cpu->reset_sctlr |= SCTLR_EE;
1879         } else {
1880             cpu->reset_sctlr |= SCTLR_B;
1881         }
1882     }
1883 
1884     if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) {
1885         /* If the has_el3 CPU property is disabled then we need to disable the
1886          * feature.
1887          */
1888         unset_feature(env, ARM_FEATURE_EL3);
1889 
1890         /*
1891          * Disable the security extension feature bits in the processor
1892          * feature registers as well.
1893          */
1894         cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
1895         cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
1896         cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
1897                                            ID_AA64PFR0, EL3, 0);
1898     }
1899 
1900     if (!cpu->has_el2) {
1901         unset_feature(env, ARM_FEATURE_EL2);
1902     }
1903 
1904     if (!cpu->has_pmu) {
1905         unset_feature(env, ARM_FEATURE_PMU);
1906     }
1907     if (arm_feature(env, ARM_FEATURE_PMU)) {
1908         pmu_init(cpu);
1909 
1910         if (!kvm_enabled()) {
1911             arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
1912             arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
1913         }
1914 
1915 #ifndef CONFIG_USER_ONLY
1916         cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
1917                 cpu);
1918 #endif
1919     } else {
1920         cpu->isar.id_aa64dfr0 =
1921             FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
1922         cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
1923         cpu->pmceid0 = 0;
1924         cpu->pmceid1 = 0;
1925     }
1926 
1927     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1928         /*
1929          * Disable the hypervisor feature bits in the processor feature
1930          * registers if we don't have EL2.
1931          */
1932         cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
1933                                            ID_AA64PFR0, EL2, 0);
1934         cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
1935                                        ID_PFR1, VIRTUALIZATION, 0);
1936     }
1937 
1938 #ifndef CONFIG_USER_ONLY
1939     if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
1940         /*
1941          * Disable the MTE feature bits if we do not have tag-memory
1942          * provided by the machine.
1943          */
1944         cpu->isar.id_aa64pfr1 =
1945             FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
1946     }
1947 #endif
1948 
1949     if (tcg_enabled()) {
1950         /*
1951          * Don't report the Statistical Profiling Extension in the ID
1952          * registers, because TCG doesn't implement it yet (not even a
1953          * minimal stub version) and guests will fall over when they
1954          * try to access the non-existent system registers for it.
1955          */
1956         cpu->isar.id_aa64dfr0 =
1957             FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
1958     }
1959 
1960     /* MPU can be configured out of a PMSA CPU either by setting has-mpu
1961      * to false or by setting pmsav7-dregion to 0.
1962      */
1963     if (!cpu->has_mpu) {
1964         cpu->pmsav7_dregion = 0;
1965     }
1966     if (cpu->pmsav7_dregion == 0) {
1967         cpu->has_mpu = false;
1968     }
1969 
1970     if (arm_feature(env, ARM_FEATURE_PMSA) &&
1971         arm_feature(env, ARM_FEATURE_V7)) {
1972         uint32_t nr = cpu->pmsav7_dregion;
1973 
1974         if (nr > 0xff) {
1975             error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
1976             return;
1977         }
1978 
1979         if (nr) {
1980             if (arm_feature(env, ARM_FEATURE_V8)) {
1981                 /* PMSAv8 */
1982                 env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
1983                 env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
1984                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1985                     env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
1986                     env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
1987                 }
1988             } else {
1989                 env->pmsav7.drbar = g_new0(uint32_t, nr);
1990                 env->pmsav7.drsr = g_new0(uint32_t, nr);
1991                 env->pmsav7.dracr = g_new0(uint32_t, nr);
1992             }
1993         }
1994     }
1995 
1996     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1997         uint32_t nr = cpu->sau_sregion;
1998 
1999         if (nr > 0xff) {
2000             error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
2001             return;
2002         }
2003 
2004         if (nr) {
2005             env->sau.rbar = g_new0(uint32_t, nr);
2006             env->sau.rlar = g_new0(uint32_t, nr);
2007         }
2008     }
2009 
2010     if (arm_feature(env, ARM_FEATURE_EL3)) {
2011         set_feature(env, ARM_FEATURE_VBAR);
2012     }
2013 
2014     register_cp_regs_for_features(cpu);
2015     arm_cpu_register_gdb_regs_for_features(cpu);
2016 
2017     init_cpreg_list(cpu);
2018 
2019 #ifndef CONFIG_USER_ONLY
2020     MachineState *ms = MACHINE(qdev_get_machine());
2021     unsigned int smp_cpus = ms->smp.cpus;
2022     bool has_secure = cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY);
2023 
2024     /*
2025      * We must set cs->num_ases to the final value before
2026      * the first call to cpu_address_space_init.
2027      */
2028     if (cpu->tag_memory != NULL) {
2029         cs->num_ases = 3 + has_secure;
2030     } else {
2031         cs->num_ases = 1 + has_secure;
2032     }
2033 
2034     if (has_secure) {
2035         if (!cpu->secure_memory) {
2036             cpu->secure_memory = cs->memory;
2037         }
2038         cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
2039                                cpu->secure_memory);
2040     }
2041 
2042     if (cpu->tag_memory != NULL) {
2043         cpu_address_space_init(cs, ARMASIdx_TagNS, "cpu-tag-memory",
2044                                cpu->tag_memory);
2045         if (has_secure) {
2046             cpu_address_space_init(cs, ARMASIdx_TagS, "cpu-tag-memory",
2047                                    cpu->secure_tag_memory);
2048         }
2049     }
2050 
2051     cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
2052 
2053     /* No core_count specified, default to smp_cpus. */
2054     if (cpu->core_count == -1) {
2055         cpu->core_count = smp_cpus;
2056     }
2057 #endif
2058 
2059     if (tcg_enabled()) {
2060         int dcz_blocklen = 4 << cpu->dcz_blocksize;
2061 
2062         /*
2063          * We only support DCZ blocklen that fits on one page.
2064          *
2065          * Architectually this is always true.  However TARGET_PAGE_SIZE
2066          * is variable and, for compatibility with -machine virt-2.7,
2067          * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
2068          * But even then, while the largest architectural DCZ blocklen
2069          * is 2KiB, no cpu actually uses such a large blocklen.
2070          */
2071         assert(dcz_blocklen <= TARGET_PAGE_SIZE);
2072 
2073         /*
2074          * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
2075          * both nibbles of each byte storing tag data may be written at once.
2076          * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
2077          */
2078         if (cpu_isar_feature(aa64_mte, cpu)) {
2079             assert(dcz_blocklen >= 2 * TAG_GRANULE);
2080         }
2081     }
2082 
2083     qemu_init_vcpu(cs);
2084     cpu_reset(cs);
2085 
2086     acc->parent_realize(dev, errp);
2087 }
2088 
2089 static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
2090 {
2091     ObjectClass *oc;
2092     char *typename;
2093     char **cpuname;
2094     const char *cpunamestr;
2095 
2096     cpuname = g_strsplit(cpu_model, ",", 1);
2097     cpunamestr = cpuname[0];
2098 #ifdef CONFIG_USER_ONLY
2099     /* For backwards compatibility usermode emulation allows "-cpu any",
2100      * which has the same semantics as "-cpu max".
2101      */
2102     if (!strcmp(cpunamestr, "any")) {
2103         cpunamestr = "max";
2104     }
2105 #endif
2106     typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
2107     oc = object_class_by_name(typename);
2108     g_strfreev(cpuname);
2109     g_free(typename);
2110     if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
2111         object_class_is_abstract(oc)) {
2112         return NULL;
2113     }
2114     return oc;
2115 }
2116 
2117 static Property arm_cpu_properties[] = {
2118     DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
2119     DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
2120                         mp_affinity, ARM64_AFFINITY_INVALID),
2121     DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
2122     DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
2123     DEFINE_PROP_END_OF_LIST()
2124 };
2125 
2126 static gchar *arm_gdb_arch_name(CPUState *cs)
2127 {
2128     ARMCPU *cpu = ARM_CPU(cs);
2129     CPUARMState *env = &cpu->env;
2130 
2131     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2132         return g_strdup("iwmmxt");
2133     }
2134     return g_strdup("arm");
2135 }
2136 
2137 #ifndef CONFIG_USER_ONLY
2138 #include "hw/core/sysemu-cpu-ops.h"
2139 
2140 static const struct SysemuCPUOps arm_sysemu_ops = {
2141     .get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
2142     .asidx_from_attrs = arm_asidx_from_attrs,
2143     .write_elf32_note = arm_cpu_write_elf32_note,
2144     .write_elf64_note = arm_cpu_write_elf64_note,
2145     .virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
2146     .legacy_vmsd = &vmstate_arm_cpu,
2147 };
2148 #endif
2149 
2150 #ifdef CONFIG_TCG
2151 static const struct TCGCPUOps arm_tcg_ops = {
2152     .initialize = arm_translate_init,
2153     .synchronize_from_tb = arm_cpu_synchronize_from_tb,
2154     .debug_excp_handler = arm_debug_excp_handler,
2155 
2156 #ifdef CONFIG_USER_ONLY
2157     .record_sigsegv = arm_cpu_record_sigsegv,
2158     .record_sigbus = arm_cpu_record_sigbus,
2159 #else
2160     .tlb_fill = arm_cpu_tlb_fill,
2161     .cpu_exec_interrupt = arm_cpu_exec_interrupt,
2162     .do_interrupt = arm_cpu_do_interrupt,
2163     .do_transaction_failed = arm_cpu_do_transaction_failed,
2164     .do_unaligned_access = arm_cpu_do_unaligned_access,
2165     .adjust_watchpoint_address = arm_adjust_watchpoint_address,
2166     .debug_check_watchpoint = arm_debug_check_watchpoint,
2167     .debug_check_breakpoint = arm_debug_check_breakpoint,
2168 #endif /* !CONFIG_USER_ONLY */
2169 };
2170 #endif /* CONFIG_TCG */
2171 
2172 static void arm_cpu_class_init(ObjectClass *oc, void *data)
2173 {
2174     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2175     CPUClass *cc = CPU_CLASS(acc);
2176     DeviceClass *dc = DEVICE_CLASS(oc);
2177 
2178     device_class_set_parent_realize(dc, arm_cpu_realizefn,
2179                                     &acc->parent_realize);
2180 
2181     device_class_set_props(dc, arm_cpu_properties);
2182     device_class_set_parent_reset(dc, arm_cpu_reset, &acc->parent_reset);
2183 
2184     cc->class_by_name = arm_cpu_class_by_name;
2185     cc->has_work = arm_cpu_has_work;
2186     cc->dump_state = arm_cpu_dump_state;
2187     cc->set_pc = arm_cpu_set_pc;
2188     cc->get_pc = arm_cpu_get_pc;
2189     cc->gdb_read_register = arm_cpu_gdb_read_register;
2190     cc->gdb_write_register = arm_cpu_gdb_write_register;
2191 #ifndef CONFIG_USER_ONLY
2192     cc->sysemu_ops = &arm_sysemu_ops;
2193 #endif
2194     cc->gdb_num_core_regs = 26;
2195     cc->gdb_core_xml_file = "arm-core.xml";
2196     cc->gdb_arch_name = arm_gdb_arch_name;
2197     cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
2198     cc->gdb_stop_before_watchpoint = true;
2199     cc->disas_set_info = arm_disas_set_info;
2200 
2201 #ifdef CONFIG_TCG
2202     cc->tcg_ops = &arm_tcg_ops;
2203 #endif /* CONFIG_TCG */
2204 }
2205 
2206 static void arm_cpu_instance_init(Object *obj)
2207 {
2208     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
2209 
2210     acc->info->initfn(obj);
2211     arm_cpu_post_init(obj);
2212 }
2213 
2214 static void cpu_register_class_init(ObjectClass *oc, void *data)
2215 {
2216     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2217 
2218     acc->info = data;
2219 }
2220 
2221 void arm_cpu_register(const ARMCPUInfo *info)
2222 {
2223     TypeInfo type_info = {
2224         .parent = TYPE_ARM_CPU,
2225         .instance_size = sizeof(ARMCPU),
2226         .instance_align = __alignof__(ARMCPU),
2227         .instance_init = arm_cpu_instance_init,
2228         .class_size = sizeof(ARMCPUClass),
2229         .class_init = info->class_init ?: cpu_register_class_init,
2230         .class_data = (void *)info,
2231     };
2232 
2233     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
2234     type_register(&type_info);
2235     g_free((void *)type_info.name);
2236 }
2237 
2238 static const TypeInfo arm_cpu_type_info = {
2239     .name = TYPE_ARM_CPU,
2240     .parent = TYPE_CPU,
2241     .instance_size = sizeof(ARMCPU),
2242     .instance_align = __alignof__(ARMCPU),
2243     .instance_init = arm_cpu_initfn,
2244     .instance_finalize = arm_cpu_finalizefn,
2245     .abstract = true,
2246     .class_size = sizeof(ARMCPUClass),
2247     .class_init = arm_cpu_class_init,
2248 };
2249 
2250 static void arm_cpu_register_types(void)
2251 {
2252     type_register_static(&arm_cpu_type_info);
2253 }
2254 
2255 type_init(arm_cpu_register_types)
2256