xref: /openbmc/qemu/target/arm/cpu.c (revision 200280af0e19bfaeb9431eb0ee1ee2d8bf8d3a0a)
1 /*
2  * QEMU ARM CPU
3  *
4  * Copyright (c) 2012 SUSE LINUX Products GmbH
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "target/arm/idau.h"
23 #include "qemu/error-report.h"
24 #include "qapi/error.h"
25 #include "cpu.h"
26 #include "internals.h"
27 #include "qemu-common.h"
28 #include "exec/exec-all.h"
29 #include "hw/qdev-properties.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/loader.h"
32 #endif
33 #include "hw/arm/arm.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/hw_accel.h"
36 #include "kvm_arm.h"
37 #include "disas/capstone.h"
38 #include "fpu/softfloat.h"
39 
40 static void arm_cpu_set_pc(CPUState *cs, vaddr value)
41 {
42     ARMCPU *cpu = ARM_CPU(cs);
43     CPUARMState *env = &cpu->env;
44 
45     if (is_a64(env)) {
46         env->pc = value;
47         env->thumb = 0;
48     } else {
49         env->regs[15] = value & ~1;
50         env->thumb = value & 1;
51     }
52 }
53 
54 static void arm_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
55 {
56     ARMCPU *cpu = ARM_CPU(cs);
57     CPUARMState *env = &cpu->env;
58 
59     /*
60      * It's OK to look at env for the current mode here, because it's
61      * never possible for an AArch64 TB to chain to an AArch32 TB.
62      */
63     if (is_a64(env)) {
64         env->pc = tb->pc;
65     } else {
66         env->regs[15] = tb->pc;
67     }
68 }
69 
70 static bool arm_cpu_has_work(CPUState *cs)
71 {
72     ARMCPU *cpu = ARM_CPU(cs);
73 
74     return (cpu->power_state != PSCI_OFF)
75         && cs->interrupt_request &
76         (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
77          | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
78          | CPU_INTERRUPT_EXITTB);
79 }
80 
81 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
82                                  void *opaque)
83 {
84     ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
85 
86     entry->hook = hook;
87     entry->opaque = opaque;
88 
89     QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
90 }
91 
92 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
93                                  void *opaque)
94 {
95     ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
96 
97     entry->hook = hook;
98     entry->opaque = opaque;
99 
100     QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
101 }
102 
103 static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
104 {
105     /* Reset a single ARMCPRegInfo register */
106     ARMCPRegInfo *ri = value;
107     ARMCPU *cpu = opaque;
108 
109     if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
110         return;
111     }
112 
113     if (ri->resetfn) {
114         ri->resetfn(&cpu->env, ri);
115         return;
116     }
117 
118     /* A zero offset is never possible as it would be regs[0]
119      * so we use it to indicate that reset is being handled elsewhere.
120      * This is basically only used for fields in non-core coprocessors
121      * (like the pxa2xx ones).
122      */
123     if (!ri->fieldoffset) {
124         return;
125     }
126 
127     if (cpreg_field_is_64bit(ri)) {
128         CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
129     } else {
130         CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
131     }
132 }
133 
134 static void cp_reg_check_reset(gpointer key, gpointer value,  gpointer opaque)
135 {
136     /* Purely an assertion check: we've already done reset once,
137      * so now check that running the reset for the cpreg doesn't
138      * change its value. This traps bugs where two different cpregs
139      * both try to reset the same state field but to different values.
140      */
141     ARMCPRegInfo *ri = value;
142     ARMCPU *cpu = opaque;
143     uint64_t oldvalue, newvalue;
144 
145     if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
146         return;
147     }
148 
149     oldvalue = read_raw_cp_reg(&cpu->env, ri);
150     cp_reg_reset(key, value, opaque);
151     newvalue = read_raw_cp_reg(&cpu->env, ri);
152     assert(oldvalue == newvalue);
153 }
154 
155 /* CPUClass::reset() */
156 static void arm_cpu_reset(CPUState *s)
157 {
158     ARMCPU *cpu = ARM_CPU(s);
159     ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
160     CPUARMState *env = &cpu->env;
161 
162     acc->parent_reset(s);
163 
164     memset(env, 0, offsetof(CPUARMState, end_reset_fields));
165 
166     g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
167     g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
168 
169     env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
170     env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
171     env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
172     env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
173 
174     cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
175     s->halted = cpu->start_powered_off;
176 
177     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
178         env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
179     }
180 
181     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
182         /* 64 bit CPUs always start in 64 bit mode */
183         env->aarch64 = 1;
184 #if defined(CONFIG_USER_ONLY)
185         env->pstate = PSTATE_MODE_EL0t;
186         /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
187         env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
188         /* Enable all PAC keys.  */
189         env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
190                                   SCTLR_EnDA | SCTLR_EnDB);
191         /* Enable all PAC instructions */
192         env->cp15.hcr_el2 |= HCR_API;
193         env->cp15.scr_el3 |= SCR_API;
194         /* and to the FP/Neon instructions */
195         env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
196         /* and to the SVE instructions */
197         env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
198         env->cp15.cptr_el[3] |= CPTR_EZ;
199         /* with maximum vector length */
200         env->vfp.zcr_el[1] = cpu->sve_max_vq - 1;
201         env->vfp.zcr_el[2] = env->vfp.zcr_el[1];
202         env->vfp.zcr_el[3] = env->vfp.zcr_el[1];
203 #else
204         /* Reset into the highest available EL */
205         if (arm_feature(env, ARM_FEATURE_EL3)) {
206             env->pstate = PSTATE_MODE_EL3h;
207         } else if (arm_feature(env, ARM_FEATURE_EL2)) {
208             env->pstate = PSTATE_MODE_EL2h;
209         } else {
210             env->pstate = PSTATE_MODE_EL1h;
211         }
212         env->pc = cpu->rvbar;
213 #endif
214     } else {
215 #if defined(CONFIG_USER_ONLY)
216         /* Userspace expects access to cp10 and cp11 for FP/Neon */
217         env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
218 #endif
219     }
220 
221 #if defined(CONFIG_USER_ONLY)
222     env->uncached_cpsr = ARM_CPU_MODE_USR;
223     /* For user mode we must enable access to coprocessors */
224     env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
225     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
226         env->cp15.c15_cpar = 3;
227     } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
228         env->cp15.c15_cpar = 1;
229     }
230 #else
231 
232     /*
233      * If the highest available EL is EL2, AArch32 will start in Hyp
234      * mode; otherwise it starts in SVC. Note that if we start in
235      * AArch64 then these values in the uncached_cpsr will be ignored.
236      */
237     if (arm_feature(env, ARM_FEATURE_EL2) &&
238         !arm_feature(env, ARM_FEATURE_EL3)) {
239         env->uncached_cpsr = ARM_CPU_MODE_HYP;
240     } else {
241         env->uncached_cpsr = ARM_CPU_MODE_SVC;
242     }
243     env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
244 
245     if (arm_feature(env, ARM_FEATURE_M)) {
246         uint32_t initial_msp; /* Loaded from 0x0 */
247         uint32_t initial_pc; /* Loaded from 0x4 */
248         uint8_t *rom;
249         uint32_t vecbase;
250 
251         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
252             env->v7m.secure = true;
253         } else {
254             /* This bit resets to 0 if security is supported, but 1 if
255              * it is not. The bit is not present in v7M, but we set it
256              * here so we can avoid having to make checks on it conditional
257              * on ARM_FEATURE_V8 (we don't let the guest see the bit).
258              */
259             env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
260         }
261 
262         /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
263          * that it resets to 1, so QEMU always does that rather than making
264          * it dependent on CPU model. In v8M it is RES1.
265          */
266         env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
267         env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
268         if (arm_feature(env, ARM_FEATURE_V8)) {
269             /* in v8M the NONBASETHRDENA bit [0] is RES1 */
270             env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
271             env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
272         }
273         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
274             env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
275             env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
276         }
277 
278         /* Unlike A/R profile, M profile defines the reset LR value */
279         env->regs[14] = 0xffffffff;
280 
281         env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
282 
283         /* Load the initial SP and PC from offset 0 and 4 in the vector table */
284         vecbase = env->v7m.vecbase[env->v7m.secure];
285         rom = rom_ptr(vecbase, 8);
286         if (rom) {
287             /* Address zero is covered by ROM which hasn't yet been
288              * copied into physical memory.
289              */
290             initial_msp = ldl_p(rom);
291             initial_pc = ldl_p(rom + 4);
292         } else {
293             /* Address zero not covered by a ROM blob, or the ROM blob
294              * is in non-modifiable memory and this is a second reset after
295              * it got copied into memory. In the latter case, rom_ptr
296              * will return a NULL pointer and we should use ldl_phys instead.
297              */
298             initial_msp = ldl_phys(s->as, vecbase);
299             initial_pc = ldl_phys(s->as, vecbase + 4);
300         }
301 
302         env->regs[13] = initial_msp & 0xFFFFFFFC;
303         env->regs[15] = initial_pc & ~1;
304         env->thumb = initial_pc & 1;
305     }
306 
307     /* AArch32 has a hard highvec setting of 0xFFFF0000.  If we are currently
308      * executing as AArch32 then check if highvecs are enabled and
309      * adjust the PC accordingly.
310      */
311     if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
312         env->regs[15] = 0xFFFF0000;
313     }
314 
315     /* M profile requires that reset clears the exclusive monitor;
316      * A profile does not, but clearing it makes more sense than having it
317      * set with an exclusive access on address zero.
318      */
319     arm_clear_exclusive(env);
320 
321     env->vfp.xregs[ARM_VFP_FPEXC] = 0;
322 #endif
323 
324     if (arm_feature(env, ARM_FEATURE_PMSA)) {
325         if (cpu->pmsav7_dregion > 0) {
326             if (arm_feature(env, ARM_FEATURE_V8)) {
327                 memset(env->pmsav8.rbar[M_REG_NS], 0,
328                        sizeof(*env->pmsav8.rbar[M_REG_NS])
329                        * cpu->pmsav7_dregion);
330                 memset(env->pmsav8.rlar[M_REG_NS], 0,
331                        sizeof(*env->pmsav8.rlar[M_REG_NS])
332                        * cpu->pmsav7_dregion);
333                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
334                     memset(env->pmsav8.rbar[M_REG_S], 0,
335                            sizeof(*env->pmsav8.rbar[M_REG_S])
336                            * cpu->pmsav7_dregion);
337                     memset(env->pmsav8.rlar[M_REG_S], 0,
338                            sizeof(*env->pmsav8.rlar[M_REG_S])
339                            * cpu->pmsav7_dregion);
340                 }
341             } else if (arm_feature(env, ARM_FEATURE_V7)) {
342                 memset(env->pmsav7.drbar, 0,
343                        sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
344                 memset(env->pmsav7.drsr, 0,
345                        sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
346                 memset(env->pmsav7.dracr, 0,
347                        sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
348             }
349         }
350         env->pmsav7.rnr[M_REG_NS] = 0;
351         env->pmsav7.rnr[M_REG_S] = 0;
352         env->pmsav8.mair0[M_REG_NS] = 0;
353         env->pmsav8.mair0[M_REG_S] = 0;
354         env->pmsav8.mair1[M_REG_NS] = 0;
355         env->pmsav8.mair1[M_REG_S] = 0;
356     }
357 
358     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
359         if (cpu->sau_sregion > 0) {
360             memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
361             memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
362         }
363         env->sau.rnr = 0;
364         /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
365          * the Cortex-M33 does.
366          */
367         env->sau.ctrl = 0;
368     }
369 
370     set_flush_to_zero(1, &env->vfp.standard_fp_status);
371     set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
372     set_default_nan_mode(1, &env->vfp.standard_fp_status);
373     set_float_detect_tininess(float_tininess_before_rounding,
374                               &env->vfp.fp_status);
375     set_float_detect_tininess(float_tininess_before_rounding,
376                               &env->vfp.standard_fp_status);
377     set_float_detect_tininess(float_tininess_before_rounding,
378                               &env->vfp.fp_status_f16);
379 #ifndef CONFIG_USER_ONLY
380     if (kvm_enabled()) {
381         kvm_arm_reset_vcpu(cpu);
382     }
383 #endif
384 
385     hw_breakpoint_update_all(cpu);
386     hw_watchpoint_update_all(cpu);
387 }
388 
389 bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
390 {
391     CPUClass *cc = CPU_GET_CLASS(cs);
392     CPUARMState *env = cs->env_ptr;
393     uint32_t cur_el = arm_current_el(env);
394     bool secure = arm_is_secure(env);
395     uint32_t target_el;
396     uint32_t excp_idx;
397     bool ret = false;
398 
399     if (interrupt_request & CPU_INTERRUPT_FIQ) {
400         excp_idx = EXCP_FIQ;
401         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
402         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
403             cs->exception_index = excp_idx;
404             env->exception.target_el = target_el;
405             cc->do_interrupt(cs);
406             ret = true;
407         }
408     }
409     if (interrupt_request & CPU_INTERRUPT_HARD) {
410         excp_idx = EXCP_IRQ;
411         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
412         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
413             cs->exception_index = excp_idx;
414             env->exception.target_el = target_el;
415             cc->do_interrupt(cs);
416             ret = true;
417         }
418     }
419     if (interrupt_request & CPU_INTERRUPT_VIRQ) {
420         excp_idx = EXCP_VIRQ;
421         target_el = 1;
422         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
423             cs->exception_index = excp_idx;
424             env->exception.target_el = target_el;
425             cc->do_interrupt(cs);
426             ret = true;
427         }
428     }
429     if (interrupt_request & CPU_INTERRUPT_VFIQ) {
430         excp_idx = EXCP_VFIQ;
431         target_el = 1;
432         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
433             cs->exception_index = excp_idx;
434             env->exception.target_el = target_el;
435             cc->do_interrupt(cs);
436             ret = true;
437         }
438     }
439 
440     return ret;
441 }
442 
443 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
444 static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
445 {
446     CPUClass *cc = CPU_GET_CLASS(cs);
447     ARMCPU *cpu = ARM_CPU(cs);
448     CPUARMState *env = &cpu->env;
449     bool ret = false;
450 
451     /* ARMv7-M interrupt masking works differently than -A or -R.
452      * There is no FIQ/IRQ distinction. Instead of I and F bits
453      * masking FIQ and IRQ interrupts, an exception is taken only
454      * if it is higher priority than the current execution priority
455      * (which depends on state like BASEPRI, FAULTMASK and the
456      * currently active exception).
457      */
458     if (interrupt_request & CPU_INTERRUPT_HARD
459         && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
460         cs->exception_index = EXCP_IRQ;
461         cc->do_interrupt(cs);
462         ret = true;
463     }
464     return ret;
465 }
466 #endif
467 
468 void arm_cpu_update_virq(ARMCPU *cpu)
469 {
470     /*
471      * Update the interrupt level for VIRQ, which is the logical OR of
472      * the HCR_EL2.VI bit and the input line level from the GIC.
473      */
474     CPUARMState *env = &cpu->env;
475     CPUState *cs = CPU(cpu);
476 
477     bool new_state = (env->cp15.hcr_el2 & HCR_VI) ||
478         (env->irq_line_state & CPU_INTERRUPT_VIRQ);
479 
480     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
481         if (new_state) {
482             cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
483         } else {
484             cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
485         }
486     }
487 }
488 
489 void arm_cpu_update_vfiq(ARMCPU *cpu)
490 {
491     /*
492      * Update the interrupt level for VFIQ, which is the logical OR of
493      * the HCR_EL2.VF bit and the input line level from the GIC.
494      */
495     CPUARMState *env = &cpu->env;
496     CPUState *cs = CPU(cpu);
497 
498     bool new_state = (env->cp15.hcr_el2 & HCR_VF) ||
499         (env->irq_line_state & CPU_INTERRUPT_VFIQ);
500 
501     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
502         if (new_state) {
503             cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
504         } else {
505             cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
506         }
507     }
508 }
509 
510 #ifndef CONFIG_USER_ONLY
511 static void arm_cpu_set_irq(void *opaque, int irq, int level)
512 {
513     ARMCPU *cpu = opaque;
514     CPUARMState *env = &cpu->env;
515     CPUState *cs = CPU(cpu);
516     static const int mask[] = {
517         [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
518         [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
519         [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
520         [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
521     };
522 
523     if (level) {
524         env->irq_line_state |= mask[irq];
525     } else {
526         env->irq_line_state &= ~mask[irq];
527     }
528 
529     switch (irq) {
530     case ARM_CPU_VIRQ:
531         assert(arm_feature(env, ARM_FEATURE_EL2));
532         arm_cpu_update_virq(cpu);
533         break;
534     case ARM_CPU_VFIQ:
535         assert(arm_feature(env, ARM_FEATURE_EL2));
536         arm_cpu_update_vfiq(cpu);
537         break;
538     case ARM_CPU_IRQ:
539     case ARM_CPU_FIQ:
540         if (level) {
541             cpu_interrupt(cs, mask[irq]);
542         } else {
543             cpu_reset_interrupt(cs, mask[irq]);
544         }
545         break;
546     default:
547         g_assert_not_reached();
548     }
549 }
550 
551 static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
552 {
553 #ifdef CONFIG_KVM
554     ARMCPU *cpu = opaque;
555     CPUARMState *env = &cpu->env;
556     CPUState *cs = CPU(cpu);
557     int kvm_irq = KVM_ARM_IRQ_TYPE_CPU << KVM_ARM_IRQ_TYPE_SHIFT;
558     uint32_t linestate_bit;
559 
560     switch (irq) {
561     case ARM_CPU_IRQ:
562         kvm_irq |= KVM_ARM_IRQ_CPU_IRQ;
563         linestate_bit = CPU_INTERRUPT_HARD;
564         break;
565     case ARM_CPU_FIQ:
566         kvm_irq |= KVM_ARM_IRQ_CPU_FIQ;
567         linestate_bit = CPU_INTERRUPT_FIQ;
568         break;
569     default:
570         g_assert_not_reached();
571     }
572 
573     if (level) {
574         env->irq_line_state |= linestate_bit;
575     } else {
576         env->irq_line_state &= ~linestate_bit;
577     }
578 
579     kvm_irq |= cs->cpu_index << KVM_ARM_IRQ_VCPU_SHIFT;
580     kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
581 #endif
582 }
583 
584 static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
585 {
586     ARMCPU *cpu = ARM_CPU(cs);
587     CPUARMState *env = &cpu->env;
588 
589     cpu_synchronize_state(cs);
590     return arm_cpu_data_is_big_endian(env);
591 }
592 
593 #endif
594 
595 static inline void set_feature(CPUARMState *env, int feature)
596 {
597     env->features |= 1ULL << feature;
598 }
599 
600 static inline void unset_feature(CPUARMState *env, int feature)
601 {
602     env->features &= ~(1ULL << feature);
603 }
604 
605 static int
606 print_insn_thumb1(bfd_vma pc, disassemble_info *info)
607 {
608   return print_insn_arm(pc | 1, info);
609 }
610 
611 static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
612 {
613     ARMCPU *ac = ARM_CPU(cpu);
614     CPUARMState *env = &ac->env;
615     bool sctlr_b;
616 
617     if (is_a64(env)) {
618         /* We might not be compiled with the A64 disassembler
619          * because it needs a C++ compiler. Leave print_insn
620          * unset in this case to use the caller default behaviour.
621          */
622 #if defined(CONFIG_ARM_A64_DIS)
623         info->print_insn = print_insn_arm_a64;
624 #endif
625         info->cap_arch = CS_ARCH_ARM64;
626         info->cap_insn_unit = 4;
627         info->cap_insn_split = 4;
628     } else {
629         int cap_mode;
630         if (env->thumb) {
631             info->print_insn = print_insn_thumb1;
632             info->cap_insn_unit = 2;
633             info->cap_insn_split = 4;
634             cap_mode = CS_MODE_THUMB;
635         } else {
636             info->print_insn = print_insn_arm;
637             info->cap_insn_unit = 4;
638             info->cap_insn_split = 4;
639             cap_mode = CS_MODE_ARM;
640         }
641         if (arm_feature(env, ARM_FEATURE_V8)) {
642             cap_mode |= CS_MODE_V8;
643         }
644         if (arm_feature(env, ARM_FEATURE_M)) {
645             cap_mode |= CS_MODE_MCLASS;
646         }
647         info->cap_arch = CS_ARCH_ARM;
648         info->cap_mode = cap_mode;
649     }
650 
651     sctlr_b = arm_sctlr_b(env);
652     if (bswap_code(sctlr_b)) {
653 #ifdef TARGET_WORDS_BIGENDIAN
654         info->endian = BFD_ENDIAN_LITTLE;
655 #else
656         info->endian = BFD_ENDIAN_BIG;
657 #endif
658     }
659     info->flags &= ~INSN_ARM_BE32;
660 #ifndef CONFIG_USER_ONLY
661     if (sctlr_b) {
662         info->flags |= INSN_ARM_BE32;
663     }
664 #endif
665 }
666 
667 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
668 {
669     uint32_t Aff1 = idx / clustersz;
670     uint32_t Aff0 = idx % clustersz;
671     return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
672 }
673 
674 static void cpreg_hashtable_data_destroy(gpointer data)
675 {
676     /*
677      * Destroy function for cpu->cp_regs hashtable data entries.
678      * We must free the name string because it was g_strdup()ed in
679      * add_cpreg_to_hashtable(). It's OK to cast away the 'const'
680      * from r->name because we know we definitely allocated it.
681      */
682     ARMCPRegInfo *r = data;
683 
684     g_free((void *)r->name);
685     g_free(r);
686 }
687 
688 static void arm_cpu_initfn(Object *obj)
689 {
690     CPUState *cs = CPU(obj);
691     ARMCPU *cpu = ARM_CPU(obj);
692 
693     cs->env_ptr = &cpu->env;
694     cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
695                                          g_free, cpreg_hashtable_data_destroy);
696 
697     QLIST_INIT(&cpu->pre_el_change_hooks);
698     QLIST_INIT(&cpu->el_change_hooks);
699 
700 #ifndef CONFIG_USER_ONLY
701     /* Our inbound IRQ and FIQ lines */
702     if (kvm_enabled()) {
703         /* VIRQ and VFIQ are unused with KVM but we add them to maintain
704          * the same interface as non-KVM CPUs.
705          */
706         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
707     } else {
708         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
709     }
710 
711     qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
712                        ARRAY_SIZE(cpu->gt_timer_outputs));
713 
714     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
715                              "gicv3-maintenance-interrupt", 1);
716     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
717                              "pmu-interrupt", 1);
718 #endif
719 
720     /* DTB consumers generally don't in fact care what the 'compatible'
721      * string is, so always provide some string and trust that a hypothetical
722      * picky DTB consumer will also provide a helpful error message.
723      */
724     cpu->dtb_compatible = "qemu,unknown";
725     cpu->psci_version = 1; /* By default assume PSCI v0.1 */
726     cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
727 
728     if (tcg_enabled()) {
729         cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
730     }
731 }
732 
733 static Property arm_cpu_reset_cbar_property =
734             DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
735 
736 static Property arm_cpu_reset_hivecs_property =
737             DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
738 
739 static Property arm_cpu_rvbar_property =
740             DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
741 
742 static Property arm_cpu_has_el2_property =
743             DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
744 
745 static Property arm_cpu_has_el3_property =
746             DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
747 
748 static Property arm_cpu_cfgend_property =
749             DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
750 
751 /* use property name "pmu" to match other archs and virt tools */
752 static Property arm_cpu_has_pmu_property =
753             DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
754 
755 static Property arm_cpu_has_mpu_property =
756             DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
757 
758 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
759  * because the CPU initfn will have already set cpu->pmsav7_dregion to
760  * the right value for that particular CPU type, and we don't want
761  * to override that with an incorrect constant value.
762  */
763 static Property arm_cpu_pmsav7_dregion_property =
764             DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
765                                            pmsav7_dregion,
766                                            qdev_prop_uint32, uint32_t);
767 
768 /* M profile: initial value of the Secure VTOR */
769 static Property arm_cpu_initsvtor_property =
770             DEFINE_PROP_UINT32("init-svtor", ARMCPU, init_svtor, 0);
771 
772 void arm_cpu_post_init(Object *obj)
773 {
774     ARMCPU *cpu = ARM_CPU(obj);
775 
776     /* M profile implies PMSA. We have to do this here rather than
777      * in realize with the other feature-implication checks because
778      * we look at the PMSA bit to see if we should add some properties.
779      */
780     if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
781         set_feature(&cpu->env, ARM_FEATURE_PMSA);
782     }
783 
784     if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
785         arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
786         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
787                                  &error_abort);
788     }
789 
790     if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
791         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
792                                  &error_abort);
793     }
794 
795     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
796         qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property,
797                                  &error_abort);
798     }
799 
800     if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
801         /* Add the has_el3 state CPU property only if EL3 is allowed.  This will
802          * prevent "has_el3" from existing on CPUs which cannot support EL3.
803          */
804         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
805                                  &error_abort);
806 
807 #ifndef CONFIG_USER_ONLY
808         object_property_add_link(obj, "secure-memory",
809                                  TYPE_MEMORY_REGION,
810                                  (Object **)&cpu->secure_memory,
811                                  qdev_prop_allow_set_link_before_realize,
812                                  OBJ_PROP_LINK_STRONG,
813                                  &error_abort);
814 #endif
815     }
816 
817     if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
818         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property,
819                                  &error_abort);
820     }
821 
822     if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
823         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
824                                  &error_abort);
825     }
826 
827     if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
828         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
829                                  &error_abort);
830         if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
831             qdev_property_add_static(DEVICE(obj),
832                                      &arm_cpu_pmsav7_dregion_property,
833                                      &error_abort);
834         }
835     }
836 
837     if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
838         object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
839                                  qdev_prop_allow_set_link_before_realize,
840                                  OBJ_PROP_LINK_STRONG,
841                                  &error_abort);
842         qdev_property_add_static(DEVICE(obj), &arm_cpu_initsvtor_property,
843                                  &error_abort);
844     }
845 
846     qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property,
847                              &error_abort);
848 }
849 
850 static void arm_cpu_finalizefn(Object *obj)
851 {
852     ARMCPU *cpu = ARM_CPU(obj);
853     ARMELChangeHook *hook, *next;
854 
855     g_hash_table_destroy(cpu->cp_regs);
856 
857     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
858         QLIST_REMOVE(hook, node);
859         g_free(hook);
860     }
861     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
862         QLIST_REMOVE(hook, node);
863         g_free(hook);
864     }
865 #ifndef CONFIG_USER_ONLY
866     if (cpu->pmu_timer) {
867         timer_del(cpu->pmu_timer);
868         timer_deinit(cpu->pmu_timer);
869         timer_free(cpu->pmu_timer);
870     }
871 #endif
872 }
873 
874 static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
875 {
876     CPUState *cs = CPU(dev);
877     ARMCPU *cpu = ARM_CPU(dev);
878     ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
879     CPUARMState *env = &cpu->env;
880     int pagebits;
881     Error *local_err = NULL;
882     bool no_aa32 = false;
883 
884     /* If we needed to query the host kernel for the CPU features
885      * then it's possible that might have failed in the initfn, but
886      * this is the first point where we can report it.
887      */
888     if (cpu->host_cpu_probe_failed) {
889         if (!kvm_enabled()) {
890             error_setg(errp, "The 'host' CPU type can only be used with KVM");
891         } else {
892             error_setg(errp, "Failed to retrieve host CPU features");
893         }
894         return;
895     }
896 
897 #ifndef CONFIG_USER_ONLY
898     /* The NVIC and M-profile CPU are two halves of a single piece of
899      * hardware; trying to use one without the other is a command line
900      * error and will result in segfaults if not caught here.
901      */
902     if (arm_feature(env, ARM_FEATURE_M)) {
903         if (!env->nvic) {
904             error_setg(errp, "This board cannot be used with Cortex-M CPUs");
905             return;
906         }
907     } else {
908         if (env->nvic) {
909             error_setg(errp, "This board can only be used with Cortex-M CPUs");
910             return;
911         }
912     }
913 
914     cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
915                                            arm_gt_ptimer_cb, cpu);
916     cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
917                                            arm_gt_vtimer_cb, cpu);
918     cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
919                                           arm_gt_htimer_cb, cpu);
920     cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
921                                           arm_gt_stimer_cb, cpu);
922 #endif
923 
924     cpu_exec_realizefn(cs, &local_err);
925     if (local_err != NULL) {
926         error_propagate(errp, local_err);
927         return;
928     }
929 
930     /* Some features automatically imply others: */
931     if (arm_feature(env, ARM_FEATURE_V8)) {
932         if (arm_feature(env, ARM_FEATURE_M)) {
933             set_feature(env, ARM_FEATURE_V7);
934         } else {
935             set_feature(env, ARM_FEATURE_V7VE);
936         }
937     }
938 
939     /*
940      * There exist AArch64 cpus without AArch32 support.  When KVM
941      * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
942      * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
943      */
944     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
945         no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
946     }
947 
948     if (arm_feature(env, ARM_FEATURE_V7VE)) {
949         /* v7 Virtualization Extensions. In real hardware this implies
950          * EL2 and also the presence of the Security Extensions.
951          * For QEMU, for backwards-compatibility we implement some
952          * CPUs or CPU configs which have no actual EL2 or EL3 but do
953          * include the various other features that V7VE implies.
954          * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
955          * Security Extensions is ARM_FEATURE_EL3.
956          */
957         assert(no_aa32 || cpu_isar_feature(arm_div, cpu));
958         set_feature(env, ARM_FEATURE_LPAE);
959         set_feature(env, ARM_FEATURE_V7);
960     }
961     if (arm_feature(env, ARM_FEATURE_V7)) {
962         set_feature(env, ARM_FEATURE_VAPA);
963         set_feature(env, ARM_FEATURE_THUMB2);
964         set_feature(env, ARM_FEATURE_MPIDR);
965         if (!arm_feature(env, ARM_FEATURE_M)) {
966             set_feature(env, ARM_FEATURE_V6K);
967         } else {
968             set_feature(env, ARM_FEATURE_V6);
969         }
970 
971         /* Always define VBAR for V7 CPUs even if it doesn't exist in
972          * non-EL3 configs. This is needed by some legacy boards.
973          */
974         set_feature(env, ARM_FEATURE_VBAR);
975     }
976     if (arm_feature(env, ARM_FEATURE_V6K)) {
977         set_feature(env, ARM_FEATURE_V6);
978         set_feature(env, ARM_FEATURE_MVFR);
979     }
980     if (arm_feature(env, ARM_FEATURE_V6)) {
981         set_feature(env, ARM_FEATURE_V5);
982         if (!arm_feature(env, ARM_FEATURE_M)) {
983             assert(no_aa32 || cpu_isar_feature(jazelle, cpu));
984             set_feature(env, ARM_FEATURE_AUXCR);
985         }
986     }
987     if (arm_feature(env, ARM_FEATURE_V5)) {
988         set_feature(env, ARM_FEATURE_V4T);
989     }
990     if (arm_feature(env, ARM_FEATURE_VFP4)) {
991         set_feature(env, ARM_FEATURE_VFP3);
992         set_feature(env, ARM_FEATURE_VFP_FP16);
993     }
994     if (arm_feature(env, ARM_FEATURE_VFP3)) {
995         set_feature(env, ARM_FEATURE_VFP);
996     }
997     if (arm_feature(env, ARM_FEATURE_LPAE)) {
998         set_feature(env, ARM_FEATURE_V7MP);
999         set_feature(env, ARM_FEATURE_PXN);
1000     }
1001     if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
1002         set_feature(env, ARM_FEATURE_CBAR);
1003     }
1004     if (arm_feature(env, ARM_FEATURE_THUMB2) &&
1005         !arm_feature(env, ARM_FEATURE_M)) {
1006         set_feature(env, ARM_FEATURE_THUMB_DSP);
1007     }
1008 
1009     if (arm_feature(env, ARM_FEATURE_V7) &&
1010         !arm_feature(env, ARM_FEATURE_M) &&
1011         !arm_feature(env, ARM_FEATURE_PMSA)) {
1012         /* v7VMSA drops support for the old ARMv5 tiny pages, so we
1013          * can use 4K pages.
1014          */
1015         pagebits = 12;
1016     } else {
1017         /* For CPUs which might have tiny 1K pages, or which have an
1018          * MPU and might have small region sizes, stick with 1K pages.
1019          */
1020         pagebits = 10;
1021     }
1022     if (!set_preferred_target_page_bits(pagebits)) {
1023         /* This can only ever happen for hotplugging a CPU, or if
1024          * the board code incorrectly creates a CPU which it has
1025          * promised via minimum_page_size that it will not.
1026          */
1027         error_setg(errp, "This CPU requires a smaller page size than the "
1028                    "system is using");
1029         return;
1030     }
1031 
1032     /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
1033      * We don't support setting cluster ID ([16..23]) (known as Aff2
1034      * in later ARM ARM versions), or any of the higher affinity level fields,
1035      * so these bits always RAZ.
1036      */
1037     if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
1038         cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
1039                                                ARM_DEFAULT_CPUS_PER_CLUSTER);
1040     }
1041 
1042     if (cpu->reset_hivecs) {
1043             cpu->reset_sctlr |= (1 << 13);
1044     }
1045 
1046     if (cpu->cfgend) {
1047         if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1048             cpu->reset_sctlr |= SCTLR_EE;
1049         } else {
1050             cpu->reset_sctlr |= SCTLR_B;
1051         }
1052     }
1053 
1054     if (!cpu->has_el3) {
1055         /* If the has_el3 CPU property is disabled then we need to disable the
1056          * feature.
1057          */
1058         unset_feature(env, ARM_FEATURE_EL3);
1059 
1060         /* Disable the security extension feature bits in the processor feature
1061          * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
1062          */
1063         cpu->id_pfr1 &= ~0xf0;
1064         cpu->isar.id_aa64pfr0 &= ~0xf000;
1065     }
1066 
1067     if (!cpu->has_el2) {
1068         unset_feature(env, ARM_FEATURE_EL2);
1069     }
1070 
1071     if (!cpu->has_pmu) {
1072         unset_feature(env, ARM_FEATURE_PMU);
1073     }
1074     if (arm_feature(env, ARM_FEATURE_PMU)) {
1075         pmu_init(cpu);
1076 
1077         if (!kvm_enabled()) {
1078             arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
1079             arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
1080         }
1081 
1082 #ifndef CONFIG_USER_ONLY
1083         cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
1084                 cpu);
1085 #endif
1086     } else {
1087         cpu->id_aa64dfr0 &= ~0xf00;
1088         cpu->pmceid0 = 0;
1089         cpu->pmceid1 = 0;
1090     }
1091 
1092     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1093         /* Disable the hypervisor feature bits in the processor feature
1094          * registers if we don't have EL2. These are id_pfr1[15:12] and
1095          * id_aa64pfr0_el1[11:8].
1096          */
1097         cpu->isar.id_aa64pfr0 &= ~0xf00;
1098         cpu->id_pfr1 &= ~0xf000;
1099     }
1100 
1101     /* MPU can be configured out of a PMSA CPU either by setting has-mpu
1102      * to false or by setting pmsav7-dregion to 0.
1103      */
1104     if (!cpu->has_mpu) {
1105         cpu->pmsav7_dregion = 0;
1106     }
1107     if (cpu->pmsav7_dregion == 0) {
1108         cpu->has_mpu = false;
1109     }
1110 
1111     if (arm_feature(env, ARM_FEATURE_PMSA) &&
1112         arm_feature(env, ARM_FEATURE_V7)) {
1113         uint32_t nr = cpu->pmsav7_dregion;
1114 
1115         if (nr > 0xff) {
1116             error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
1117             return;
1118         }
1119 
1120         if (nr) {
1121             if (arm_feature(env, ARM_FEATURE_V8)) {
1122                 /* PMSAv8 */
1123                 env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
1124                 env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
1125                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1126                     env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
1127                     env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
1128                 }
1129             } else {
1130                 env->pmsav7.drbar = g_new0(uint32_t, nr);
1131                 env->pmsav7.drsr = g_new0(uint32_t, nr);
1132                 env->pmsav7.dracr = g_new0(uint32_t, nr);
1133             }
1134         }
1135     }
1136 
1137     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1138         uint32_t nr = cpu->sau_sregion;
1139 
1140         if (nr > 0xff) {
1141             error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
1142             return;
1143         }
1144 
1145         if (nr) {
1146             env->sau.rbar = g_new0(uint32_t, nr);
1147             env->sau.rlar = g_new0(uint32_t, nr);
1148         }
1149     }
1150 
1151     if (arm_feature(env, ARM_FEATURE_EL3)) {
1152         set_feature(env, ARM_FEATURE_VBAR);
1153     }
1154 
1155     register_cp_regs_for_features(cpu);
1156     arm_cpu_register_gdb_regs_for_features(cpu);
1157 
1158     init_cpreg_list(cpu);
1159 
1160 #ifndef CONFIG_USER_ONLY
1161     if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1162         cs->num_ases = 2;
1163 
1164         if (!cpu->secure_memory) {
1165             cpu->secure_memory = cs->memory;
1166         }
1167         cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
1168                                cpu->secure_memory);
1169     } else {
1170         cs->num_ases = 1;
1171     }
1172     cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
1173 
1174     /* No core_count specified, default to smp_cpus. */
1175     if (cpu->core_count == -1) {
1176         cpu->core_count = smp_cpus;
1177     }
1178 #endif
1179 
1180     qemu_init_vcpu(cs);
1181     cpu_reset(cs);
1182 
1183     acc->parent_realize(dev, errp);
1184 }
1185 
1186 static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
1187 {
1188     ObjectClass *oc;
1189     char *typename;
1190     char **cpuname;
1191     const char *cpunamestr;
1192 
1193     cpuname = g_strsplit(cpu_model, ",", 1);
1194     cpunamestr = cpuname[0];
1195 #ifdef CONFIG_USER_ONLY
1196     /* For backwards compatibility usermode emulation allows "-cpu any",
1197      * which has the same semantics as "-cpu max".
1198      */
1199     if (!strcmp(cpunamestr, "any")) {
1200         cpunamestr = "max";
1201     }
1202 #endif
1203     typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
1204     oc = object_class_by_name(typename);
1205     g_strfreev(cpuname);
1206     g_free(typename);
1207     if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
1208         object_class_is_abstract(oc)) {
1209         return NULL;
1210     }
1211     return oc;
1212 }
1213 
1214 /* CPU models. These are not needed for the AArch64 linux-user build. */
1215 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
1216 
1217 static void arm926_initfn(Object *obj)
1218 {
1219     ARMCPU *cpu = ARM_CPU(obj);
1220 
1221     cpu->dtb_compatible = "arm,arm926";
1222     set_feature(&cpu->env, ARM_FEATURE_V5);
1223     set_feature(&cpu->env, ARM_FEATURE_VFP);
1224     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1225     set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1226     cpu->midr = 0x41069265;
1227     cpu->reset_fpsid = 0x41011090;
1228     cpu->ctr = 0x1dd20d2;
1229     cpu->reset_sctlr = 0x00090078;
1230 
1231     /*
1232      * ARMv5 does not have the ID_ISAR registers, but we can still
1233      * set the field to indicate Jazelle support within QEMU.
1234      */
1235     cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
1236 }
1237 
1238 static void arm946_initfn(Object *obj)
1239 {
1240     ARMCPU *cpu = ARM_CPU(obj);
1241 
1242     cpu->dtb_compatible = "arm,arm946";
1243     set_feature(&cpu->env, ARM_FEATURE_V5);
1244     set_feature(&cpu->env, ARM_FEATURE_PMSA);
1245     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1246     cpu->midr = 0x41059461;
1247     cpu->ctr = 0x0f004006;
1248     cpu->reset_sctlr = 0x00000078;
1249 }
1250 
1251 static void arm1026_initfn(Object *obj)
1252 {
1253     ARMCPU *cpu = ARM_CPU(obj);
1254 
1255     cpu->dtb_compatible = "arm,arm1026";
1256     set_feature(&cpu->env, ARM_FEATURE_V5);
1257     set_feature(&cpu->env, ARM_FEATURE_VFP);
1258     set_feature(&cpu->env, ARM_FEATURE_AUXCR);
1259     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1260     set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1261     cpu->midr = 0x4106a262;
1262     cpu->reset_fpsid = 0x410110a0;
1263     cpu->ctr = 0x1dd20d2;
1264     cpu->reset_sctlr = 0x00090078;
1265     cpu->reset_auxcr = 1;
1266 
1267     /*
1268      * ARMv5 does not have the ID_ISAR registers, but we can still
1269      * set the field to indicate Jazelle support within QEMU.
1270      */
1271     cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
1272 
1273     {
1274         /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
1275         ARMCPRegInfo ifar = {
1276             .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1277             .access = PL1_RW,
1278             .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
1279             .resetvalue = 0
1280         };
1281         define_one_arm_cp_reg(cpu, &ifar);
1282     }
1283 }
1284 
1285 static void arm1136_r2_initfn(Object *obj)
1286 {
1287     ARMCPU *cpu = ARM_CPU(obj);
1288     /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
1289      * older core than plain "arm1136". In particular this does not
1290      * have the v6K features.
1291      * These ID register values are correct for 1136 but may be wrong
1292      * for 1136_r2 (in particular r0p2 does not actually implement most
1293      * of the ID registers).
1294      */
1295 
1296     cpu->dtb_compatible = "arm,arm1136";
1297     set_feature(&cpu->env, ARM_FEATURE_V6);
1298     set_feature(&cpu->env, ARM_FEATURE_VFP);
1299     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1300     set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1301     set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1302     cpu->midr = 0x4107b362;
1303     cpu->reset_fpsid = 0x410120b4;
1304     cpu->isar.mvfr0 = 0x11111111;
1305     cpu->isar.mvfr1 = 0x00000000;
1306     cpu->ctr = 0x1dd20d2;
1307     cpu->reset_sctlr = 0x00050078;
1308     cpu->id_pfr0 = 0x111;
1309     cpu->id_pfr1 = 0x1;
1310     cpu->id_dfr0 = 0x2;
1311     cpu->id_afr0 = 0x3;
1312     cpu->id_mmfr0 = 0x01130003;
1313     cpu->id_mmfr1 = 0x10030302;
1314     cpu->id_mmfr2 = 0x01222110;
1315     cpu->isar.id_isar0 = 0x00140011;
1316     cpu->isar.id_isar1 = 0x12002111;
1317     cpu->isar.id_isar2 = 0x11231111;
1318     cpu->isar.id_isar3 = 0x01102131;
1319     cpu->isar.id_isar4 = 0x141;
1320     cpu->reset_auxcr = 7;
1321 }
1322 
1323 static void arm1136_initfn(Object *obj)
1324 {
1325     ARMCPU *cpu = ARM_CPU(obj);
1326 
1327     cpu->dtb_compatible = "arm,arm1136";
1328     set_feature(&cpu->env, ARM_FEATURE_V6K);
1329     set_feature(&cpu->env, ARM_FEATURE_V6);
1330     set_feature(&cpu->env, ARM_FEATURE_VFP);
1331     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1332     set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1333     set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1334     cpu->midr = 0x4117b363;
1335     cpu->reset_fpsid = 0x410120b4;
1336     cpu->isar.mvfr0 = 0x11111111;
1337     cpu->isar.mvfr1 = 0x00000000;
1338     cpu->ctr = 0x1dd20d2;
1339     cpu->reset_sctlr = 0x00050078;
1340     cpu->id_pfr0 = 0x111;
1341     cpu->id_pfr1 = 0x1;
1342     cpu->id_dfr0 = 0x2;
1343     cpu->id_afr0 = 0x3;
1344     cpu->id_mmfr0 = 0x01130003;
1345     cpu->id_mmfr1 = 0x10030302;
1346     cpu->id_mmfr2 = 0x01222110;
1347     cpu->isar.id_isar0 = 0x00140011;
1348     cpu->isar.id_isar1 = 0x12002111;
1349     cpu->isar.id_isar2 = 0x11231111;
1350     cpu->isar.id_isar3 = 0x01102131;
1351     cpu->isar.id_isar4 = 0x141;
1352     cpu->reset_auxcr = 7;
1353 }
1354 
1355 static void arm1176_initfn(Object *obj)
1356 {
1357     ARMCPU *cpu = ARM_CPU(obj);
1358 
1359     cpu->dtb_compatible = "arm,arm1176";
1360     set_feature(&cpu->env, ARM_FEATURE_V6K);
1361     set_feature(&cpu->env, ARM_FEATURE_VFP);
1362     set_feature(&cpu->env, ARM_FEATURE_VAPA);
1363     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1364     set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1365     set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1366     set_feature(&cpu->env, ARM_FEATURE_EL3);
1367     cpu->midr = 0x410fb767;
1368     cpu->reset_fpsid = 0x410120b5;
1369     cpu->isar.mvfr0 = 0x11111111;
1370     cpu->isar.mvfr1 = 0x00000000;
1371     cpu->ctr = 0x1dd20d2;
1372     cpu->reset_sctlr = 0x00050078;
1373     cpu->id_pfr0 = 0x111;
1374     cpu->id_pfr1 = 0x11;
1375     cpu->id_dfr0 = 0x33;
1376     cpu->id_afr0 = 0;
1377     cpu->id_mmfr0 = 0x01130003;
1378     cpu->id_mmfr1 = 0x10030302;
1379     cpu->id_mmfr2 = 0x01222100;
1380     cpu->isar.id_isar0 = 0x0140011;
1381     cpu->isar.id_isar1 = 0x12002111;
1382     cpu->isar.id_isar2 = 0x11231121;
1383     cpu->isar.id_isar3 = 0x01102131;
1384     cpu->isar.id_isar4 = 0x01141;
1385     cpu->reset_auxcr = 7;
1386 }
1387 
1388 static void arm11mpcore_initfn(Object *obj)
1389 {
1390     ARMCPU *cpu = ARM_CPU(obj);
1391 
1392     cpu->dtb_compatible = "arm,arm11mpcore";
1393     set_feature(&cpu->env, ARM_FEATURE_V6K);
1394     set_feature(&cpu->env, ARM_FEATURE_VFP);
1395     set_feature(&cpu->env, ARM_FEATURE_VAPA);
1396     set_feature(&cpu->env, ARM_FEATURE_MPIDR);
1397     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1398     cpu->midr = 0x410fb022;
1399     cpu->reset_fpsid = 0x410120b4;
1400     cpu->isar.mvfr0 = 0x11111111;
1401     cpu->isar.mvfr1 = 0x00000000;
1402     cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
1403     cpu->id_pfr0 = 0x111;
1404     cpu->id_pfr1 = 0x1;
1405     cpu->id_dfr0 = 0;
1406     cpu->id_afr0 = 0x2;
1407     cpu->id_mmfr0 = 0x01100103;
1408     cpu->id_mmfr1 = 0x10020302;
1409     cpu->id_mmfr2 = 0x01222000;
1410     cpu->isar.id_isar0 = 0x00100011;
1411     cpu->isar.id_isar1 = 0x12002111;
1412     cpu->isar.id_isar2 = 0x11221011;
1413     cpu->isar.id_isar3 = 0x01102131;
1414     cpu->isar.id_isar4 = 0x141;
1415     cpu->reset_auxcr = 1;
1416 }
1417 
1418 static void cortex_m0_initfn(Object *obj)
1419 {
1420     ARMCPU *cpu = ARM_CPU(obj);
1421     set_feature(&cpu->env, ARM_FEATURE_V6);
1422     set_feature(&cpu->env, ARM_FEATURE_M);
1423 
1424     cpu->midr = 0x410cc200;
1425 }
1426 
1427 static void cortex_m3_initfn(Object *obj)
1428 {
1429     ARMCPU *cpu = ARM_CPU(obj);
1430     set_feature(&cpu->env, ARM_FEATURE_V7);
1431     set_feature(&cpu->env, ARM_FEATURE_M);
1432     set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
1433     cpu->midr = 0x410fc231;
1434     cpu->pmsav7_dregion = 8;
1435     cpu->id_pfr0 = 0x00000030;
1436     cpu->id_pfr1 = 0x00000200;
1437     cpu->id_dfr0 = 0x00100000;
1438     cpu->id_afr0 = 0x00000000;
1439     cpu->id_mmfr0 = 0x00000030;
1440     cpu->id_mmfr1 = 0x00000000;
1441     cpu->id_mmfr2 = 0x00000000;
1442     cpu->id_mmfr3 = 0x00000000;
1443     cpu->isar.id_isar0 = 0x01141110;
1444     cpu->isar.id_isar1 = 0x02111000;
1445     cpu->isar.id_isar2 = 0x21112231;
1446     cpu->isar.id_isar3 = 0x01111110;
1447     cpu->isar.id_isar4 = 0x01310102;
1448     cpu->isar.id_isar5 = 0x00000000;
1449     cpu->isar.id_isar6 = 0x00000000;
1450 }
1451 
1452 static void cortex_m4_initfn(Object *obj)
1453 {
1454     ARMCPU *cpu = ARM_CPU(obj);
1455 
1456     set_feature(&cpu->env, ARM_FEATURE_V7);
1457     set_feature(&cpu->env, ARM_FEATURE_M);
1458     set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
1459     set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
1460     cpu->midr = 0x410fc240; /* r0p0 */
1461     cpu->pmsav7_dregion = 8;
1462     cpu->id_pfr0 = 0x00000030;
1463     cpu->id_pfr1 = 0x00000200;
1464     cpu->id_dfr0 = 0x00100000;
1465     cpu->id_afr0 = 0x00000000;
1466     cpu->id_mmfr0 = 0x00000030;
1467     cpu->id_mmfr1 = 0x00000000;
1468     cpu->id_mmfr2 = 0x00000000;
1469     cpu->id_mmfr3 = 0x00000000;
1470     cpu->isar.id_isar0 = 0x01141110;
1471     cpu->isar.id_isar1 = 0x02111000;
1472     cpu->isar.id_isar2 = 0x21112231;
1473     cpu->isar.id_isar3 = 0x01111110;
1474     cpu->isar.id_isar4 = 0x01310102;
1475     cpu->isar.id_isar5 = 0x00000000;
1476     cpu->isar.id_isar6 = 0x00000000;
1477 }
1478 
1479 static void cortex_m33_initfn(Object *obj)
1480 {
1481     ARMCPU *cpu = ARM_CPU(obj);
1482 
1483     set_feature(&cpu->env, ARM_FEATURE_V8);
1484     set_feature(&cpu->env, ARM_FEATURE_M);
1485     set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
1486     set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
1487     set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
1488     cpu->midr = 0x410fd213; /* r0p3 */
1489     cpu->pmsav7_dregion = 16;
1490     cpu->sau_sregion = 8;
1491     cpu->id_pfr0 = 0x00000030;
1492     cpu->id_pfr1 = 0x00000210;
1493     cpu->id_dfr0 = 0x00200000;
1494     cpu->id_afr0 = 0x00000000;
1495     cpu->id_mmfr0 = 0x00101F40;
1496     cpu->id_mmfr1 = 0x00000000;
1497     cpu->id_mmfr2 = 0x01000000;
1498     cpu->id_mmfr3 = 0x00000000;
1499     cpu->isar.id_isar0 = 0x01101110;
1500     cpu->isar.id_isar1 = 0x02212000;
1501     cpu->isar.id_isar2 = 0x20232232;
1502     cpu->isar.id_isar3 = 0x01111131;
1503     cpu->isar.id_isar4 = 0x01310132;
1504     cpu->isar.id_isar5 = 0x00000000;
1505     cpu->isar.id_isar6 = 0x00000000;
1506     cpu->clidr = 0x00000000;
1507     cpu->ctr = 0x8000c000;
1508 }
1509 
1510 static void arm_v7m_class_init(ObjectClass *oc, void *data)
1511 {
1512     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
1513     CPUClass *cc = CPU_CLASS(oc);
1514 
1515     acc->info = data;
1516 #ifndef CONFIG_USER_ONLY
1517     cc->do_interrupt = arm_v7m_cpu_do_interrupt;
1518 #endif
1519 
1520     cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
1521 }
1522 
1523 static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
1524     /* Dummy the TCM region regs for the moment */
1525     { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
1526       .access = PL1_RW, .type = ARM_CP_CONST },
1527     { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
1528       .access = PL1_RW, .type = ARM_CP_CONST },
1529     { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
1530       .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
1531     REGINFO_SENTINEL
1532 };
1533 
1534 static void cortex_r5_initfn(Object *obj)
1535 {
1536     ARMCPU *cpu = ARM_CPU(obj);
1537 
1538     set_feature(&cpu->env, ARM_FEATURE_V7);
1539     set_feature(&cpu->env, ARM_FEATURE_V7MP);
1540     set_feature(&cpu->env, ARM_FEATURE_PMSA);
1541     cpu->midr = 0x411fc153; /* r1p3 */
1542     cpu->id_pfr0 = 0x0131;
1543     cpu->id_pfr1 = 0x001;
1544     cpu->id_dfr0 = 0x010400;
1545     cpu->id_afr0 = 0x0;
1546     cpu->id_mmfr0 = 0x0210030;
1547     cpu->id_mmfr1 = 0x00000000;
1548     cpu->id_mmfr2 = 0x01200000;
1549     cpu->id_mmfr3 = 0x0211;
1550     cpu->isar.id_isar0 = 0x02101111;
1551     cpu->isar.id_isar1 = 0x13112111;
1552     cpu->isar.id_isar2 = 0x21232141;
1553     cpu->isar.id_isar3 = 0x01112131;
1554     cpu->isar.id_isar4 = 0x0010142;
1555     cpu->isar.id_isar5 = 0x0;
1556     cpu->isar.id_isar6 = 0x0;
1557     cpu->mp_is_up = true;
1558     cpu->pmsav7_dregion = 16;
1559     define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
1560 }
1561 
1562 static void cortex_r5f_initfn(Object *obj)
1563 {
1564     ARMCPU *cpu = ARM_CPU(obj);
1565 
1566     cortex_r5_initfn(obj);
1567     set_feature(&cpu->env, ARM_FEATURE_VFP3);
1568 }
1569 
1570 static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
1571     { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
1572       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1573     { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
1574       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1575     REGINFO_SENTINEL
1576 };
1577 
1578 static void cortex_a8_initfn(Object *obj)
1579 {
1580     ARMCPU *cpu = ARM_CPU(obj);
1581 
1582     cpu->dtb_compatible = "arm,cortex-a8";
1583     set_feature(&cpu->env, ARM_FEATURE_V7);
1584     set_feature(&cpu->env, ARM_FEATURE_VFP3);
1585     set_feature(&cpu->env, ARM_FEATURE_NEON);
1586     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1587     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1588     set_feature(&cpu->env, ARM_FEATURE_EL3);
1589     cpu->midr = 0x410fc080;
1590     cpu->reset_fpsid = 0x410330c0;
1591     cpu->isar.mvfr0 = 0x11110222;
1592     cpu->isar.mvfr1 = 0x00011111;
1593     cpu->ctr = 0x82048004;
1594     cpu->reset_sctlr = 0x00c50078;
1595     cpu->id_pfr0 = 0x1031;
1596     cpu->id_pfr1 = 0x11;
1597     cpu->id_dfr0 = 0x400;
1598     cpu->id_afr0 = 0;
1599     cpu->id_mmfr0 = 0x31100003;
1600     cpu->id_mmfr1 = 0x20000000;
1601     cpu->id_mmfr2 = 0x01202000;
1602     cpu->id_mmfr3 = 0x11;
1603     cpu->isar.id_isar0 = 0x00101111;
1604     cpu->isar.id_isar1 = 0x12112111;
1605     cpu->isar.id_isar2 = 0x21232031;
1606     cpu->isar.id_isar3 = 0x11112131;
1607     cpu->isar.id_isar4 = 0x00111142;
1608     cpu->dbgdidr = 0x15141000;
1609     cpu->clidr = (1 << 27) | (2 << 24) | 3;
1610     cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
1611     cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
1612     cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
1613     cpu->reset_auxcr = 2;
1614     define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
1615 }
1616 
1617 static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
1618     /* power_control should be set to maximum latency. Again,
1619      * default to 0 and set by private hook
1620      */
1621     { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1622       .access = PL1_RW, .resetvalue = 0,
1623       .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
1624     { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
1625       .access = PL1_RW, .resetvalue = 0,
1626       .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
1627     { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
1628       .access = PL1_RW, .resetvalue = 0,
1629       .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
1630     { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1631       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1632     /* TLB lockdown control */
1633     { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
1634       .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
1635     { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
1636       .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
1637     { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
1638       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1639     { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
1640       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1641     { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
1642       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1643     REGINFO_SENTINEL
1644 };
1645 
1646 static void cortex_a9_initfn(Object *obj)
1647 {
1648     ARMCPU *cpu = ARM_CPU(obj);
1649 
1650     cpu->dtb_compatible = "arm,cortex-a9";
1651     set_feature(&cpu->env, ARM_FEATURE_V7);
1652     set_feature(&cpu->env, ARM_FEATURE_VFP3);
1653     set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
1654     set_feature(&cpu->env, ARM_FEATURE_NEON);
1655     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1656     set_feature(&cpu->env, ARM_FEATURE_EL3);
1657     /* Note that A9 supports the MP extensions even for
1658      * A9UP and single-core A9MP (which are both different
1659      * and valid configurations; we don't model A9UP).
1660      */
1661     set_feature(&cpu->env, ARM_FEATURE_V7MP);
1662     set_feature(&cpu->env, ARM_FEATURE_CBAR);
1663     cpu->midr = 0x410fc090;
1664     cpu->reset_fpsid = 0x41033090;
1665     cpu->isar.mvfr0 = 0x11110222;
1666     cpu->isar.mvfr1 = 0x01111111;
1667     cpu->ctr = 0x80038003;
1668     cpu->reset_sctlr = 0x00c50078;
1669     cpu->id_pfr0 = 0x1031;
1670     cpu->id_pfr1 = 0x11;
1671     cpu->id_dfr0 = 0x000;
1672     cpu->id_afr0 = 0;
1673     cpu->id_mmfr0 = 0x00100103;
1674     cpu->id_mmfr1 = 0x20000000;
1675     cpu->id_mmfr2 = 0x01230000;
1676     cpu->id_mmfr3 = 0x00002111;
1677     cpu->isar.id_isar0 = 0x00101111;
1678     cpu->isar.id_isar1 = 0x13112111;
1679     cpu->isar.id_isar2 = 0x21232041;
1680     cpu->isar.id_isar3 = 0x11112131;
1681     cpu->isar.id_isar4 = 0x00111142;
1682     cpu->dbgdidr = 0x35141000;
1683     cpu->clidr = (1 << 27) | (1 << 24) | 3;
1684     cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
1685     cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
1686     define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
1687 }
1688 
1689 #ifndef CONFIG_USER_ONLY
1690 static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1691 {
1692     /* Linux wants the number of processors from here.
1693      * Might as well set the interrupt-controller bit too.
1694      */
1695     return ((smp_cpus - 1) << 24) | (1 << 23);
1696 }
1697 #endif
1698 
1699 static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
1700 #ifndef CONFIG_USER_ONLY
1701     { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
1702       .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
1703       .writefn = arm_cp_write_ignore, },
1704 #endif
1705     { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
1706       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1707     REGINFO_SENTINEL
1708 };
1709 
1710 static void cortex_a7_initfn(Object *obj)
1711 {
1712     ARMCPU *cpu = ARM_CPU(obj);
1713 
1714     cpu->dtb_compatible = "arm,cortex-a7";
1715     set_feature(&cpu->env, ARM_FEATURE_V7VE);
1716     set_feature(&cpu->env, ARM_FEATURE_VFP4);
1717     set_feature(&cpu->env, ARM_FEATURE_NEON);
1718     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1719     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1720     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1721     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1722     set_feature(&cpu->env, ARM_FEATURE_EL2);
1723     set_feature(&cpu->env, ARM_FEATURE_EL3);
1724     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
1725     cpu->midr = 0x410fc075;
1726     cpu->reset_fpsid = 0x41023075;
1727     cpu->isar.mvfr0 = 0x10110222;
1728     cpu->isar.mvfr1 = 0x11111111;
1729     cpu->ctr = 0x84448003;
1730     cpu->reset_sctlr = 0x00c50078;
1731     cpu->id_pfr0 = 0x00001131;
1732     cpu->id_pfr1 = 0x00011011;
1733     cpu->id_dfr0 = 0x02010555;
1734     cpu->id_afr0 = 0x00000000;
1735     cpu->id_mmfr0 = 0x10101105;
1736     cpu->id_mmfr1 = 0x40000000;
1737     cpu->id_mmfr2 = 0x01240000;
1738     cpu->id_mmfr3 = 0x02102211;
1739     /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
1740      * table 4-41 gives 0x02101110, which includes the arm div insns.
1741      */
1742     cpu->isar.id_isar0 = 0x02101110;
1743     cpu->isar.id_isar1 = 0x13112111;
1744     cpu->isar.id_isar2 = 0x21232041;
1745     cpu->isar.id_isar3 = 0x11112131;
1746     cpu->isar.id_isar4 = 0x10011142;
1747     cpu->dbgdidr = 0x3515f005;
1748     cpu->clidr = 0x0a200023;
1749     cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
1750     cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
1751     cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
1752     define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
1753 }
1754 
1755 static void cortex_a15_initfn(Object *obj)
1756 {
1757     ARMCPU *cpu = ARM_CPU(obj);
1758 
1759     cpu->dtb_compatible = "arm,cortex-a15";
1760     set_feature(&cpu->env, ARM_FEATURE_V7VE);
1761     set_feature(&cpu->env, ARM_FEATURE_VFP4);
1762     set_feature(&cpu->env, ARM_FEATURE_NEON);
1763     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1764     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1765     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1766     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1767     set_feature(&cpu->env, ARM_FEATURE_EL2);
1768     set_feature(&cpu->env, ARM_FEATURE_EL3);
1769     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
1770     cpu->midr = 0x412fc0f1;
1771     cpu->reset_fpsid = 0x410430f0;
1772     cpu->isar.mvfr0 = 0x10110222;
1773     cpu->isar.mvfr1 = 0x11111111;
1774     cpu->ctr = 0x8444c004;
1775     cpu->reset_sctlr = 0x00c50078;
1776     cpu->id_pfr0 = 0x00001131;
1777     cpu->id_pfr1 = 0x00011011;
1778     cpu->id_dfr0 = 0x02010555;
1779     cpu->id_afr0 = 0x00000000;
1780     cpu->id_mmfr0 = 0x10201105;
1781     cpu->id_mmfr1 = 0x20000000;
1782     cpu->id_mmfr2 = 0x01240000;
1783     cpu->id_mmfr3 = 0x02102211;
1784     cpu->isar.id_isar0 = 0x02101110;
1785     cpu->isar.id_isar1 = 0x13112111;
1786     cpu->isar.id_isar2 = 0x21232041;
1787     cpu->isar.id_isar3 = 0x11112131;
1788     cpu->isar.id_isar4 = 0x10011142;
1789     cpu->dbgdidr = 0x3515f021;
1790     cpu->clidr = 0x0a200023;
1791     cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
1792     cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
1793     cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
1794     define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
1795 }
1796 
1797 static void ti925t_initfn(Object *obj)
1798 {
1799     ARMCPU *cpu = ARM_CPU(obj);
1800     set_feature(&cpu->env, ARM_FEATURE_V4T);
1801     set_feature(&cpu->env, ARM_FEATURE_OMAPCP);
1802     cpu->midr = ARM_CPUID_TI925T;
1803     cpu->ctr = 0x5109149;
1804     cpu->reset_sctlr = 0x00000070;
1805 }
1806 
1807 static void sa1100_initfn(Object *obj)
1808 {
1809     ARMCPU *cpu = ARM_CPU(obj);
1810 
1811     cpu->dtb_compatible = "intel,sa1100";
1812     set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1813     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1814     cpu->midr = 0x4401A11B;
1815     cpu->reset_sctlr = 0x00000070;
1816 }
1817 
1818 static void sa1110_initfn(Object *obj)
1819 {
1820     ARMCPU *cpu = ARM_CPU(obj);
1821     set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1822     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1823     cpu->midr = 0x6901B119;
1824     cpu->reset_sctlr = 0x00000070;
1825 }
1826 
1827 static void pxa250_initfn(Object *obj)
1828 {
1829     ARMCPU *cpu = ARM_CPU(obj);
1830 
1831     cpu->dtb_compatible = "marvell,xscale";
1832     set_feature(&cpu->env, ARM_FEATURE_V5);
1833     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1834     cpu->midr = 0x69052100;
1835     cpu->ctr = 0xd172172;
1836     cpu->reset_sctlr = 0x00000078;
1837 }
1838 
1839 static void pxa255_initfn(Object *obj)
1840 {
1841     ARMCPU *cpu = ARM_CPU(obj);
1842 
1843     cpu->dtb_compatible = "marvell,xscale";
1844     set_feature(&cpu->env, ARM_FEATURE_V5);
1845     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1846     cpu->midr = 0x69052d00;
1847     cpu->ctr = 0xd172172;
1848     cpu->reset_sctlr = 0x00000078;
1849 }
1850 
1851 static void pxa260_initfn(Object *obj)
1852 {
1853     ARMCPU *cpu = ARM_CPU(obj);
1854 
1855     cpu->dtb_compatible = "marvell,xscale";
1856     set_feature(&cpu->env, ARM_FEATURE_V5);
1857     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1858     cpu->midr = 0x69052903;
1859     cpu->ctr = 0xd172172;
1860     cpu->reset_sctlr = 0x00000078;
1861 }
1862 
1863 static void pxa261_initfn(Object *obj)
1864 {
1865     ARMCPU *cpu = ARM_CPU(obj);
1866 
1867     cpu->dtb_compatible = "marvell,xscale";
1868     set_feature(&cpu->env, ARM_FEATURE_V5);
1869     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1870     cpu->midr = 0x69052d05;
1871     cpu->ctr = 0xd172172;
1872     cpu->reset_sctlr = 0x00000078;
1873 }
1874 
1875 static void pxa262_initfn(Object *obj)
1876 {
1877     ARMCPU *cpu = ARM_CPU(obj);
1878 
1879     cpu->dtb_compatible = "marvell,xscale";
1880     set_feature(&cpu->env, ARM_FEATURE_V5);
1881     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1882     cpu->midr = 0x69052d06;
1883     cpu->ctr = 0xd172172;
1884     cpu->reset_sctlr = 0x00000078;
1885 }
1886 
1887 static void pxa270a0_initfn(Object *obj)
1888 {
1889     ARMCPU *cpu = ARM_CPU(obj);
1890 
1891     cpu->dtb_compatible = "marvell,xscale";
1892     set_feature(&cpu->env, ARM_FEATURE_V5);
1893     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1894     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1895     cpu->midr = 0x69054110;
1896     cpu->ctr = 0xd172172;
1897     cpu->reset_sctlr = 0x00000078;
1898 }
1899 
1900 static void pxa270a1_initfn(Object *obj)
1901 {
1902     ARMCPU *cpu = ARM_CPU(obj);
1903 
1904     cpu->dtb_compatible = "marvell,xscale";
1905     set_feature(&cpu->env, ARM_FEATURE_V5);
1906     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1907     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1908     cpu->midr = 0x69054111;
1909     cpu->ctr = 0xd172172;
1910     cpu->reset_sctlr = 0x00000078;
1911 }
1912 
1913 static void pxa270b0_initfn(Object *obj)
1914 {
1915     ARMCPU *cpu = ARM_CPU(obj);
1916 
1917     cpu->dtb_compatible = "marvell,xscale";
1918     set_feature(&cpu->env, ARM_FEATURE_V5);
1919     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1920     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1921     cpu->midr = 0x69054112;
1922     cpu->ctr = 0xd172172;
1923     cpu->reset_sctlr = 0x00000078;
1924 }
1925 
1926 static void pxa270b1_initfn(Object *obj)
1927 {
1928     ARMCPU *cpu = ARM_CPU(obj);
1929 
1930     cpu->dtb_compatible = "marvell,xscale";
1931     set_feature(&cpu->env, ARM_FEATURE_V5);
1932     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1933     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1934     cpu->midr = 0x69054113;
1935     cpu->ctr = 0xd172172;
1936     cpu->reset_sctlr = 0x00000078;
1937 }
1938 
1939 static void pxa270c0_initfn(Object *obj)
1940 {
1941     ARMCPU *cpu = ARM_CPU(obj);
1942 
1943     cpu->dtb_compatible = "marvell,xscale";
1944     set_feature(&cpu->env, ARM_FEATURE_V5);
1945     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1946     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1947     cpu->midr = 0x69054114;
1948     cpu->ctr = 0xd172172;
1949     cpu->reset_sctlr = 0x00000078;
1950 }
1951 
1952 static void pxa270c5_initfn(Object *obj)
1953 {
1954     ARMCPU *cpu = ARM_CPU(obj);
1955 
1956     cpu->dtb_compatible = "marvell,xscale";
1957     set_feature(&cpu->env, ARM_FEATURE_V5);
1958     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1959     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1960     cpu->midr = 0x69054117;
1961     cpu->ctr = 0xd172172;
1962     cpu->reset_sctlr = 0x00000078;
1963 }
1964 
1965 #ifndef TARGET_AARCH64
1966 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
1967  * otherwise, a CPU with as many features enabled as our emulation supports.
1968  * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
1969  * this only needs to handle 32 bits.
1970  */
1971 static void arm_max_initfn(Object *obj)
1972 {
1973     ARMCPU *cpu = ARM_CPU(obj);
1974 
1975     if (kvm_enabled()) {
1976         kvm_arm_set_cpu_features_from_host(cpu);
1977     } else {
1978         cortex_a15_initfn(obj);
1979 #ifdef CONFIG_USER_ONLY
1980         /* We don't set these in system emulation mode for the moment,
1981          * since we don't correctly set (all of) the ID registers to
1982          * advertise them.
1983          */
1984         set_feature(&cpu->env, ARM_FEATURE_V8);
1985         {
1986             uint32_t t;
1987 
1988             t = cpu->isar.id_isar5;
1989             t = FIELD_DP32(t, ID_ISAR5, AES, 2);
1990             t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
1991             t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
1992             t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
1993             t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
1994             t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
1995             cpu->isar.id_isar5 = t;
1996 
1997             t = cpu->isar.id_isar6;
1998             t = FIELD_DP32(t, ID_ISAR6, DP, 1);
1999             cpu->isar.id_isar6 = t;
2000 
2001             t = cpu->id_mmfr4;
2002             t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
2003             cpu->id_mmfr4 = t;
2004         }
2005 #endif
2006     }
2007 }
2008 #endif
2009 
2010 #endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
2011 
2012 struct ARMCPUInfo {
2013     const char *name;
2014     void (*initfn)(Object *obj);
2015     void (*class_init)(ObjectClass *oc, void *data);
2016 };
2017 
2018 static const ARMCPUInfo arm_cpus[] = {
2019 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
2020     { .name = "arm926",      .initfn = arm926_initfn },
2021     { .name = "arm946",      .initfn = arm946_initfn },
2022     { .name = "arm1026",     .initfn = arm1026_initfn },
2023     /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
2024      * older core than plain "arm1136". In particular this does not
2025      * have the v6K features.
2026      */
2027     { .name = "arm1136-r2",  .initfn = arm1136_r2_initfn },
2028     { .name = "arm1136",     .initfn = arm1136_initfn },
2029     { .name = "arm1176",     .initfn = arm1176_initfn },
2030     { .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
2031     { .name = "cortex-m0",   .initfn = cortex_m0_initfn,
2032                              .class_init = arm_v7m_class_init },
2033     { .name = "cortex-m3",   .initfn = cortex_m3_initfn,
2034                              .class_init = arm_v7m_class_init },
2035     { .name = "cortex-m4",   .initfn = cortex_m4_initfn,
2036                              .class_init = arm_v7m_class_init },
2037     { .name = "cortex-m33",  .initfn = cortex_m33_initfn,
2038                              .class_init = arm_v7m_class_init },
2039     { .name = "cortex-r5",   .initfn = cortex_r5_initfn },
2040     { .name = "cortex-r5f",  .initfn = cortex_r5f_initfn },
2041     { .name = "cortex-a7",   .initfn = cortex_a7_initfn },
2042     { .name = "cortex-a8",   .initfn = cortex_a8_initfn },
2043     { .name = "cortex-a9",   .initfn = cortex_a9_initfn },
2044     { .name = "cortex-a15",  .initfn = cortex_a15_initfn },
2045     { .name = "ti925t",      .initfn = ti925t_initfn },
2046     { .name = "sa1100",      .initfn = sa1100_initfn },
2047     { .name = "sa1110",      .initfn = sa1110_initfn },
2048     { .name = "pxa250",      .initfn = pxa250_initfn },
2049     { .name = "pxa255",      .initfn = pxa255_initfn },
2050     { .name = "pxa260",      .initfn = pxa260_initfn },
2051     { .name = "pxa261",      .initfn = pxa261_initfn },
2052     { .name = "pxa262",      .initfn = pxa262_initfn },
2053     /* "pxa270" is an alias for "pxa270-a0" */
2054     { .name = "pxa270",      .initfn = pxa270a0_initfn },
2055     { .name = "pxa270-a0",   .initfn = pxa270a0_initfn },
2056     { .name = "pxa270-a1",   .initfn = pxa270a1_initfn },
2057     { .name = "pxa270-b0",   .initfn = pxa270b0_initfn },
2058     { .name = "pxa270-b1",   .initfn = pxa270b1_initfn },
2059     { .name = "pxa270-c0",   .initfn = pxa270c0_initfn },
2060     { .name = "pxa270-c5",   .initfn = pxa270c5_initfn },
2061 #ifndef TARGET_AARCH64
2062     { .name = "max",         .initfn = arm_max_initfn },
2063 #endif
2064 #ifdef CONFIG_USER_ONLY
2065     { .name = "any",         .initfn = arm_max_initfn },
2066 #endif
2067 #endif
2068     { .name = NULL }
2069 };
2070 
2071 static Property arm_cpu_properties[] = {
2072     DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
2073     DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
2074     DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
2075     DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
2076                         mp_affinity, ARM64_AFFINITY_INVALID),
2077     DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
2078     DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
2079     DEFINE_PROP_END_OF_LIST()
2080 };
2081 
2082 #ifdef CONFIG_USER_ONLY
2083 static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
2084                                     int rw, int mmu_idx)
2085 {
2086     ARMCPU *cpu = ARM_CPU(cs);
2087     CPUARMState *env = &cpu->env;
2088 
2089     env->exception.vaddress = address;
2090     if (rw == 2) {
2091         cs->exception_index = EXCP_PREFETCH_ABORT;
2092     } else {
2093         cs->exception_index = EXCP_DATA_ABORT;
2094     }
2095     return 1;
2096 }
2097 #endif
2098 
2099 static gchar *arm_gdb_arch_name(CPUState *cs)
2100 {
2101     ARMCPU *cpu = ARM_CPU(cs);
2102     CPUARMState *env = &cpu->env;
2103 
2104     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2105         return g_strdup("iwmmxt");
2106     }
2107     return g_strdup("arm");
2108 }
2109 
2110 static void arm_cpu_class_init(ObjectClass *oc, void *data)
2111 {
2112     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2113     CPUClass *cc = CPU_CLASS(acc);
2114     DeviceClass *dc = DEVICE_CLASS(oc);
2115 
2116     device_class_set_parent_realize(dc, arm_cpu_realizefn,
2117                                     &acc->parent_realize);
2118     dc->props = arm_cpu_properties;
2119 
2120     acc->parent_reset = cc->reset;
2121     cc->reset = arm_cpu_reset;
2122 
2123     cc->class_by_name = arm_cpu_class_by_name;
2124     cc->has_work = arm_cpu_has_work;
2125     cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
2126     cc->dump_state = arm_cpu_dump_state;
2127     cc->set_pc = arm_cpu_set_pc;
2128     cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
2129     cc->gdb_read_register = arm_cpu_gdb_read_register;
2130     cc->gdb_write_register = arm_cpu_gdb_write_register;
2131 #ifdef CONFIG_USER_ONLY
2132     cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
2133 #else
2134     cc->do_interrupt = arm_cpu_do_interrupt;
2135     cc->do_unaligned_access = arm_cpu_do_unaligned_access;
2136     cc->do_transaction_failed = arm_cpu_do_transaction_failed;
2137     cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
2138     cc->asidx_from_attrs = arm_asidx_from_attrs;
2139     cc->vmsd = &vmstate_arm_cpu;
2140     cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
2141     cc->write_elf64_note = arm_cpu_write_elf64_note;
2142     cc->write_elf32_note = arm_cpu_write_elf32_note;
2143 #endif
2144     cc->gdb_num_core_regs = 26;
2145     cc->gdb_core_xml_file = "arm-core.xml";
2146     cc->gdb_arch_name = arm_gdb_arch_name;
2147     cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
2148     cc->gdb_stop_before_watchpoint = true;
2149     cc->debug_excp_handler = arm_debug_excp_handler;
2150     cc->debug_check_watchpoint = arm_debug_check_watchpoint;
2151 #if !defined(CONFIG_USER_ONLY)
2152     cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
2153 #endif
2154 
2155     cc->disas_set_info = arm_disas_set_info;
2156 #ifdef CONFIG_TCG
2157     cc->tcg_initialize = arm_translate_init;
2158 #endif
2159 }
2160 
2161 #ifdef CONFIG_KVM
2162 static void arm_host_initfn(Object *obj)
2163 {
2164     ARMCPU *cpu = ARM_CPU(obj);
2165 
2166     kvm_arm_set_cpu_features_from_host(cpu);
2167     arm_cpu_post_init(obj);
2168 }
2169 
2170 static const TypeInfo host_arm_cpu_type_info = {
2171     .name = TYPE_ARM_HOST_CPU,
2172 #ifdef TARGET_AARCH64
2173     .parent = TYPE_AARCH64_CPU,
2174 #else
2175     .parent = TYPE_ARM_CPU,
2176 #endif
2177     .instance_init = arm_host_initfn,
2178 };
2179 
2180 #endif
2181 
2182 static void arm_cpu_instance_init(Object *obj)
2183 {
2184     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
2185 
2186     acc->info->initfn(obj);
2187     arm_cpu_post_init(obj);
2188 }
2189 
2190 static void cpu_register_class_init(ObjectClass *oc, void *data)
2191 {
2192     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2193 
2194     acc->info = data;
2195 }
2196 
2197 static void cpu_register(const ARMCPUInfo *info)
2198 {
2199     TypeInfo type_info = {
2200         .parent = TYPE_ARM_CPU,
2201         .instance_size = sizeof(ARMCPU),
2202         .instance_init = arm_cpu_instance_init,
2203         .class_size = sizeof(ARMCPUClass),
2204         .class_init = info->class_init ?: cpu_register_class_init,
2205         .class_data = (void *)info,
2206     };
2207 
2208     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
2209     type_register(&type_info);
2210     g_free((void *)type_info.name);
2211 }
2212 
2213 static const TypeInfo arm_cpu_type_info = {
2214     .name = TYPE_ARM_CPU,
2215     .parent = TYPE_CPU,
2216     .instance_size = sizeof(ARMCPU),
2217     .instance_init = arm_cpu_initfn,
2218     .instance_finalize = arm_cpu_finalizefn,
2219     .abstract = true,
2220     .class_size = sizeof(ARMCPUClass),
2221     .class_init = arm_cpu_class_init,
2222 };
2223 
2224 static const TypeInfo idau_interface_type_info = {
2225     .name = TYPE_IDAU_INTERFACE,
2226     .parent = TYPE_INTERFACE,
2227     .class_size = sizeof(IDAUInterfaceClass),
2228 };
2229 
2230 static void arm_cpu_register_types(void)
2231 {
2232     const ARMCPUInfo *info = arm_cpus;
2233 
2234     type_register_static(&arm_cpu_type_info);
2235     type_register_static(&idau_interface_type_info);
2236 
2237     while (info->name) {
2238         cpu_register(info);
2239         info++;
2240     }
2241 
2242 #ifdef CONFIG_KVM
2243     type_register_static(&host_arm_cpu_type_info);
2244 #endif
2245 }
2246 
2247 type_init(arm_cpu_register_types)
2248