1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "trace.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/page-protection.h"
16 #include "exec/mmap-lock.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/timer.h"
19 #include "qemu/bitops.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/cputlb.h"
22 #include "exec/translation-block.h"
23 #include "hw/irq.h"
24 #include "system/cpu-timers.h"
25 #include "exec/icount.h"
26 #include "system/kvm.h"
27 #include "system/tcg.h"
28 #include "qapi/error.h"
29 #include "qemu/guest-random.h"
30 #ifdef CONFIG_TCG
31 #include "accel/tcg/probe.h"
32 #include "accel/tcg/getpc.h"
33 #include "semihosting/common-semi.h"
34 #endif
35 #include "cpregs.h"
36 #include "target/arm/gtimer.h"
37
38 #define HELPER_H "tcg/helper.h"
39 #include "exec/helper-proto.h.inc"
40
41 static void switch_mode(CPUARMState *env, int mode);
42
raw_read(CPUARMState * env,const ARMCPRegInfo * ri)43 uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
44 {
45 assert(ri->fieldoffset);
46 if (cpreg_field_is_64bit(ri)) {
47 return CPREG_FIELD64(env, ri);
48 } else {
49 return CPREG_FIELD32(env, ri);
50 }
51 }
52
raw_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)53 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
54 {
55 assert(ri->fieldoffset);
56 if (cpreg_field_is_64bit(ri)) {
57 CPREG_FIELD64(env, ri) = value;
58 } else {
59 CPREG_FIELD32(env, ri) = value;
60 }
61 }
62
raw_ptr(CPUARMState * env,const ARMCPRegInfo * ri)63 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
64 {
65 return (char *)env + ri->fieldoffset;
66 }
67
read_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri)68 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
69 {
70 /* Raw read of a coprocessor register (as needed for migration, etc). */
71 if (ri->type & ARM_CP_CONST) {
72 return ri->resetvalue;
73 } else if (ri->raw_readfn) {
74 return ri->raw_readfn(env, ri);
75 } else if (ri->readfn) {
76 return ri->readfn(env, ri);
77 } else {
78 return raw_read(env, ri);
79 }
80 }
81
write_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t v)82 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
83 uint64_t v)
84 {
85 /*
86 * Raw write of a coprocessor register (as needed for migration, etc).
87 * Note that constant registers are treated as write-ignored; the
88 * caller should check for success by whether a readback gives the
89 * value written.
90 */
91 if (ri->type & ARM_CP_CONST) {
92 return;
93 } else if (ri->raw_writefn) {
94 ri->raw_writefn(env, ri, v);
95 } else if (ri->writefn) {
96 ri->writefn(env, ri, v);
97 } else {
98 raw_write(env, ri, v);
99 }
100 }
101
raw_accessors_invalid(const ARMCPRegInfo * ri)102 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
103 {
104 /*
105 * Return true if the regdef would cause an assertion if you called
106 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
107 * program bug for it not to have the NO_RAW flag).
108 * NB that returning false here doesn't necessarily mean that calling
109 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
110 * read/write access functions which are safe for raw use" from "has
111 * read/write access functions which have side effects but has forgotten
112 * to provide raw access functions".
113 * The tests here line up with the conditions in read/write_raw_cp_reg()
114 * and assertions in raw_read()/raw_write().
115 */
116 if ((ri->type & ARM_CP_CONST) ||
117 ri->fieldoffset ||
118 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
119 return false;
120 }
121 return true;
122 }
123
write_cpustate_to_list(ARMCPU * cpu,bool kvm_sync)124 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
125 {
126 /* Write the coprocessor state from cpu->env to the (index,value) list. */
127 int i;
128 bool ok = true;
129
130 for (i = 0; i < cpu->cpreg_array_len; i++) {
131 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
132 const ARMCPRegInfo *ri;
133 uint64_t newval;
134
135 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
136 if (!ri) {
137 ok = false;
138 continue;
139 }
140 if (ri->type & ARM_CP_NO_RAW) {
141 continue;
142 }
143
144 newval = read_raw_cp_reg(&cpu->env, ri);
145 if (kvm_sync) {
146 /*
147 * Only sync if the previous list->cpustate sync succeeded.
148 * Rather than tracking the success/failure state for every
149 * item in the list, we just recheck "does the raw write we must
150 * have made in write_list_to_cpustate() read back OK" here.
151 */
152 uint64_t oldval = cpu->cpreg_values[i];
153
154 if (oldval == newval) {
155 continue;
156 }
157
158 write_raw_cp_reg(&cpu->env, ri, oldval);
159 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
160 continue;
161 }
162
163 write_raw_cp_reg(&cpu->env, ri, newval);
164 }
165 cpu->cpreg_values[i] = newval;
166 }
167 return ok;
168 }
169
write_list_to_cpustate(ARMCPU * cpu)170 bool write_list_to_cpustate(ARMCPU *cpu)
171 {
172 int i;
173 bool ok = true;
174
175 for (i = 0; i < cpu->cpreg_array_len; i++) {
176 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
177 uint64_t v = cpu->cpreg_values[i];
178 const ARMCPRegInfo *ri;
179
180 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
181 if (!ri) {
182 ok = false;
183 continue;
184 }
185 if (ri->type & ARM_CP_NO_RAW) {
186 continue;
187 }
188 /*
189 * Write value and confirm it reads back as written
190 * (to catch read-only registers and partially read-only
191 * registers where the incoming migration value doesn't match)
192 */
193 write_raw_cp_reg(&cpu->env, ri, v);
194 if (read_raw_cp_reg(&cpu->env, ri) != v) {
195 ok = false;
196 }
197 }
198 return ok;
199 }
200
add_cpreg_to_list(gpointer key,gpointer opaque)201 static void add_cpreg_to_list(gpointer key, gpointer opaque)
202 {
203 ARMCPU *cpu = opaque;
204 uint32_t regidx = (uintptr_t)key;
205 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
206
207 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
208 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
209 /* The value array need not be initialized at this point */
210 cpu->cpreg_array_len++;
211 }
212 }
213
count_cpreg(gpointer key,gpointer opaque)214 static void count_cpreg(gpointer key, gpointer opaque)
215 {
216 ARMCPU *cpu = opaque;
217 const ARMCPRegInfo *ri;
218
219 ri = g_hash_table_lookup(cpu->cp_regs, key);
220
221 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
222 cpu->cpreg_array_len++;
223 }
224 }
225
cpreg_key_compare(gconstpointer a,gconstpointer b,gpointer d)226 static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d)
227 {
228 uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
229 uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
230
231 if (aidx > bidx) {
232 return 1;
233 }
234 if (aidx < bidx) {
235 return -1;
236 }
237 return 0;
238 }
239
init_cpreg_list(ARMCPU * cpu)240 void init_cpreg_list(ARMCPU *cpu)
241 {
242 /*
243 * Initialise the cpreg_tuples[] array based on the cp_regs hash.
244 * Note that we require cpreg_tuples[] to be sorted by key ID.
245 */
246 GList *keys;
247 int arraylen;
248
249 keys = g_hash_table_get_keys(cpu->cp_regs);
250 keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL);
251
252 cpu->cpreg_array_len = 0;
253
254 g_list_foreach(keys, count_cpreg, cpu);
255
256 arraylen = cpu->cpreg_array_len;
257 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
258 cpu->cpreg_values = g_new(uint64_t, arraylen);
259 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
260 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
261 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
262 cpu->cpreg_array_len = 0;
263
264 g_list_foreach(keys, add_cpreg_to_list, cpu);
265
266 assert(cpu->cpreg_array_len == arraylen);
267
268 g_list_free(keys);
269 }
270
arm_pan_enabled(CPUARMState * env)271 bool arm_pan_enabled(CPUARMState *env)
272 {
273 if (is_a64(env)) {
274 if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
275 return false;
276 }
277 return env->pstate & PSTATE_PAN;
278 } else {
279 return env->uncached_cpsr & CPSR_PAN;
280 }
281 }
282
283 /*
284 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
285 */
access_el3_aa32ns(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)286 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
287 const ARMCPRegInfo *ri,
288 bool isread)
289 {
290 if (!is_a64(env) && arm_current_el(env) == 3 &&
291 arm_is_secure_below_el3(env)) {
292 return CP_ACCESS_UNDEFINED;
293 }
294 return CP_ACCESS_OK;
295 }
296
297 /*
298 * Some secure-only AArch32 registers trap to EL3 if used from
299 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
300 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
301 * We assume that the .access field is set to PL1_RW.
302 */
access_trap_aa32s_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)303 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
304 const ARMCPRegInfo *ri,
305 bool isread)
306 {
307 if (arm_current_el(env) == 3) {
308 return CP_ACCESS_OK;
309 }
310 if (arm_is_secure_below_el3(env)) {
311 if (env->cp15.scr_el3 & SCR_EEL2) {
312 return CP_ACCESS_TRAP_EL2;
313 }
314 return CP_ACCESS_TRAP_EL3;
315 }
316 /* This will be EL1 NS and EL2 NS, which just UNDEF */
317 return CP_ACCESS_UNDEFINED;
318 }
319
320 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
access_tvm_trvm(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)321 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
322 bool isread)
323 {
324 if (arm_current_el(env) == 1) {
325 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
326 if (arm_hcr_el2_eff(env) & trap) {
327 return CP_ACCESS_TRAP_EL2;
328 }
329 }
330 return CP_ACCESS_OK;
331 }
332
333 /* Check for traps from EL1 due to HCR_EL2.TSW. */
access_tsw(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)334 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
335 bool isread)
336 {
337 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
338 return CP_ACCESS_TRAP_EL2;
339 }
340 return CP_ACCESS_OK;
341 }
342
343 /* Check for traps from EL1 due to HCR_EL2.TACR. */
access_tacr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)344 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
345 bool isread)
346 {
347 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
348 return CP_ACCESS_TRAP_EL2;
349 }
350 return CP_ACCESS_OK;
351 }
352
dacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)353 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
354 {
355 ARMCPU *cpu = env_archcpu(env);
356
357 raw_write(env, ri, value);
358 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
359 }
360
fcse_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)361 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
362 {
363 ARMCPU *cpu = env_archcpu(env);
364
365 if (raw_read(env, ri) != value) {
366 /*
367 * Unlike real hardware the qemu TLB uses virtual addresses,
368 * not modified virtual addresses, so this causes a TLB flush.
369 */
370 tlb_flush(CPU(cpu));
371 raw_write(env, ri, value);
372 }
373 }
374
contextidr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)375 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
376 uint64_t value)
377 {
378 ARMCPU *cpu = env_archcpu(env);
379
380 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
381 && !extended_addresses_enabled(env)) {
382 /*
383 * For VMSA (when not using the LPAE long descriptor page table
384 * format) this register includes the ASID, so do a TLB flush.
385 * For PMSA it is purely a process ID and no action is needed.
386 */
387 tlb_flush(CPU(cpu));
388 }
389 raw_write(env, ri, value);
390 }
391
alle1_tlbmask(CPUARMState * env)392 int alle1_tlbmask(CPUARMState *env)
393 {
394 /*
395 * Note that the 'ALL' scope must invalidate both stage 1 and
396 * stage 2 translations, whereas most other scopes only invalidate
397 * stage 1 translations.
398 *
399 * For AArch32 this is only used for TLBIALLNSNH and VTTBR
400 * writes, so only needs to apply to NS PL1&0, not S PL1&0.
401 */
402 return (ARMMMUIdxBit_E10_1 |
403 ARMMMUIdxBit_E10_1_PAN |
404 ARMMMUIdxBit_E10_0 |
405 ARMMMUIdxBit_Stage2 |
406 ARMMMUIdxBit_Stage2_S);
407 }
408
409 static const ARMCPRegInfo cp_reginfo[] = {
410 /*
411 * Define the secure and non-secure FCSE identifier CP registers
412 * separately because there is no secure bank in V8 (no _EL3). This allows
413 * the secure register to be properly reset and migrated. There is also no
414 * v8 EL1 version of the register so the non-secure instance stands alone.
415 */
416 { .name = "FCSEIDR",
417 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
418 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
419 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
420 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
421 { .name = "FCSEIDR_S",
422 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
423 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
424 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
425 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
426 /*
427 * Define the secure and non-secure context identifier CP registers
428 * separately because there is no secure bank in V8 (no _EL3). This allows
429 * the secure register to be properly reset and migrated. In the
430 * non-secure case, the 32-bit register will have reset and migration
431 * disabled during registration as it is handled by the 64-bit instance.
432 */
433 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
434 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
435 .access = PL1_RW, .accessfn = access_tvm_trvm,
436 .fgt = FGT_CONTEXTIDR_EL1,
437 .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
438 .secure = ARM_CP_SECSTATE_NS,
439 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
440 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
441 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
442 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
443 .access = PL1_RW, .accessfn = access_tvm_trvm,
444 .secure = ARM_CP_SECSTATE_S,
445 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
446 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
447 };
448
449 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
450 /*
451 * NB: Some of these registers exist in v8 but with more precise
452 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
453 */
454 /* MMU Domain access control / MPU write buffer control */
455 { .name = "DACR",
456 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
457 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
458 .writefn = dacr_write, .raw_writefn = raw_write,
459 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
460 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
461 /*
462 * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
463 * For v6 and v5, these mappings are overly broad.
464 */
465 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
466 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
467 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
468 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
469 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
470 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
471 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
472 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
473 /* Cache maintenance ops; some of this space may be overridden later. */
474 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
475 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
476 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
477 };
478
479 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
480 /*
481 * Not all pre-v6 cores implemented this WFI, so this is slightly
482 * over-broad.
483 */
484 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
485 .access = PL1_W, .type = ARM_CP_WFI },
486 };
487
488 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
489 /*
490 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
491 * is UNPREDICTABLE; we choose to NOP as most implementations do).
492 */
493 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
494 .access = PL1_W, .type = ARM_CP_WFI },
495 /*
496 * L1 cache lockdown. Not architectural in v6 and earlier but in practice
497 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
498 * OMAPCP will override this space.
499 */
500 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
501 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
502 .resetvalue = 0 },
503 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
504 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
505 .resetvalue = 0 },
506 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
507 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
508 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
509 .resetvalue = 0 },
510 /*
511 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
512 * implementing it as RAZ means the "debug architecture version" bits
513 * will read as a reserved value, which should cause Linux to not try
514 * to use the debug hardware.
515 */
516 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
517 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
518 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
519 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
520 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
521 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
522 };
523
cpacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)524 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
525 uint64_t value)
526 {
527 uint32_t mask = 0;
528
529 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
530 if (!arm_feature(env, ARM_FEATURE_V8)) {
531 /*
532 * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
533 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
534 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
535 */
536 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
537 /* VFP coprocessor: cp10 & cp11 [23:20] */
538 mask |= R_CPACR_ASEDIS_MASK |
539 R_CPACR_D32DIS_MASK |
540 R_CPACR_CP11_MASK |
541 R_CPACR_CP10_MASK;
542
543 if (!arm_feature(env, ARM_FEATURE_NEON)) {
544 /* ASEDIS [31] bit is RAO/WI */
545 value |= R_CPACR_ASEDIS_MASK;
546 }
547
548 /*
549 * VFPv3 and upwards with NEON implement 32 double precision
550 * registers (D0-D31).
551 */
552 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
553 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
554 value |= R_CPACR_D32DIS_MASK;
555 }
556 }
557 value &= mask;
558 }
559
560 /*
561 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
562 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
563 */
564 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
565 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
566 mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
567 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
568 }
569
570 env->cp15.cpacr_el1 = value;
571 }
572
cpacr_read(CPUARMState * env,const ARMCPRegInfo * ri)573 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
574 {
575 /*
576 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
577 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
578 */
579 uint64_t value = env->cp15.cpacr_el1;
580
581 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
582 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
583 value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
584 }
585 return value;
586 }
587
588
cpacr_reset(CPUARMState * env,const ARMCPRegInfo * ri)589 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
590 {
591 /*
592 * Call cpacr_write() so that we reset with the correct RAO bits set
593 * for our CPU features.
594 */
595 cpacr_write(env, ri, 0);
596 }
597
cpacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)598 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
599 bool isread)
600 {
601 if (arm_feature(env, ARM_FEATURE_V8)) {
602 /* Check if CPACR accesses are to be trapped to EL2 */
603 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
604 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
605 return CP_ACCESS_TRAP_EL2;
606 /* Check if CPACR accesses are to be trapped to EL3 */
607 } else if (arm_current_el(env) < 3 &&
608 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
609 return CP_ACCESS_TRAP_EL3;
610 }
611 }
612
613 return CP_ACCESS_OK;
614 }
615
cptr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)616 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
617 bool isread)
618 {
619 /* Check if CPTR accesses are set to trap to EL3 */
620 if (arm_current_el(env) == 2 &&
621 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
622 return CP_ACCESS_TRAP_EL3;
623 }
624
625 return CP_ACCESS_OK;
626 }
627
628 static const ARMCPRegInfo v6_cp_reginfo[] = {
629 /* prefetch by MVA in v6, NOP in v7 */
630 { .name = "MVA_prefetch",
631 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
632 .access = PL1_W, .type = ARM_CP_NOP },
633 /*
634 * We need to break the TB after ISB to execute self-modifying code
635 * correctly and also to take any pending interrupts immediately.
636 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
637 */
638 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
639 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
640 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
641 .access = PL0_W, .type = ARM_CP_NOP },
642 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
643 .access = PL0_W, .type = ARM_CP_NOP },
644 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
645 .access = PL1_RW, .accessfn = access_tvm_trvm,
646 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
647 offsetof(CPUARMState, cp15.ifar_ns) },
648 .resetvalue = 0, },
649 /*
650 * Watchpoint Fault Address Register : should actually only be present
651 * for 1136, 1176, 11MPCore.
652 */
653 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
654 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
655 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
656 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
657 .fgt = FGT_CPACR_EL1,
658 .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
659 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
660 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
661 };
662
663 /*
664 * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
665 * We use these to decide whether we need to wrap a write to MDCR_EL2
666 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
667 */
668 #define MDCR_EL2_PMU_ENABLE_BITS \
669 (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
670 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
671
vbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)672 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
673 uint64_t value)
674 {
675 /*
676 * Note that even though the AArch64 view of this register has bits
677 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
678 * architectural requirements for bits which are RES0 only in some
679 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
680 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
681 */
682 raw_write(env, ri, value & ~0x1FULL);
683 }
684
scr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)685 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
686 {
687 /* Begin with base v8.0 state. */
688 uint64_t valid_mask = 0x3fff;
689 ARMCPU *cpu = env_archcpu(env);
690 uint64_t changed;
691
692 /*
693 * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
694 * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
695 * Instead, choose the format based on the mode of EL3.
696 */
697 if (arm_el_is_aa64(env, 3)) {
698 value |= SCR_FW | SCR_AW; /* RES1 */
699 valid_mask &= ~SCR_NET; /* RES0 */
700
701 if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
702 !cpu_isar_feature(aa64_aa32_el2, cpu)) {
703 value |= SCR_RW; /* RAO/WI */
704 }
705 if (cpu_isar_feature(aa64_ras, cpu)) {
706 valid_mask |= SCR_TERR;
707 }
708 if (cpu_isar_feature(aa64_lor, cpu)) {
709 valid_mask |= SCR_TLOR;
710 }
711 if (cpu_isar_feature(aa64_pauth, cpu)) {
712 valid_mask |= SCR_API | SCR_APK;
713 }
714 if (cpu_isar_feature(aa64_sel2, cpu)) {
715 valid_mask |= SCR_EEL2;
716 } else if (cpu_isar_feature(aa64_rme, cpu)) {
717 /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
718 value |= SCR_NS;
719 }
720 if (cpu_isar_feature(aa64_mte, cpu)) {
721 valid_mask |= SCR_ATA;
722 }
723 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
724 valid_mask |= SCR_ENSCXT;
725 }
726 if (cpu_isar_feature(aa64_doublefault, cpu)) {
727 valid_mask |= SCR_EASE | SCR_NMEA;
728 }
729 if (cpu_isar_feature(aa64_sme, cpu)) {
730 valid_mask |= SCR_ENTP2;
731 }
732 if (cpu_isar_feature(aa64_hcx, cpu)) {
733 valid_mask |= SCR_HXEN;
734 }
735 if (cpu_isar_feature(aa64_fgt, cpu)) {
736 valid_mask |= SCR_FGTEN;
737 }
738 if (cpu_isar_feature(aa64_rme, cpu)) {
739 valid_mask |= SCR_NSE | SCR_GPF;
740 }
741 if (cpu_isar_feature(aa64_ecv, cpu)) {
742 valid_mask |= SCR_ECVEN;
743 }
744 } else {
745 valid_mask &= ~(SCR_RW | SCR_ST);
746 if (cpu_isar_feature(aa32_ras, cpu)) {
747 valid_mask |= SCR_TERR;
748 }
749 }
750
751 if (!arm_feature(env, ARM_FEATURE_EL2)) {
752 valid_mask &= ~SCR_HCE;
753
754 /*
755 * On ARMv7, SMD (or SCD as it is called in v7) is only
756 * supported if EL2 exists. The bit is UNK/SBZP when
757 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
758 * when EL2 is unavailable.
759 * On ARMv8, this bit is always available.
760 */
761 if (arm_feature(env, ARM_FEATURE_V7) &&
762 !arm_feature(env, ARM_FEATURE_V8)) {
763 valid_mask &= ~SCR_SMD;
764 }
765 }
766
767 /* Clear all-context RES0 bits. */
768 value &= valid_mask;
769 changed = env->cp15.scr_el3 ^ value;
770 env->cp15.scr_el3 = value;
771
772 /*
773 * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
774 * we must invalidate all TLBs below EL3.
775 */
776 if (changed & (SCR_NS | SCR_NSE)) {
777 tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
778 ARMMMUIdxBit_E20_0 |
779 ARMMMUIdxBit_E10_1 |
780 ARMMMUIdxBit_E20_2 |
781 ARMMMUIdxBit_E10_1_PAN |
782 ARMMMUIdxBit_E20_2_PAN |
783 ARMMMUIdxBit_E2));
784 }
785 }
786
scr_reset(CPUARMState * env,const ARMCPRegInfo * ri)787 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
788 {
789 /*
790 * scr_write will set the RES1 bits on an AArch64-only CPU.
791 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
792 */
793 scr_write(env, ri, 0);
794 }
795
access_tid4(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)796 static CPAccessResult access_tid4(CPUARMState *env,
797 const ARMCPRegInfo *ri,
798 bool isread)
799 {
800 if (arm_current_el(env) == 1 &&
801 (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
802 return CP_ACCESS_TRAP_EL2;
803 }
804
805 return CP_ACCESS_OK;
806 }
807
ccsidr_read(CPUARMState * env,const ARMCPRegInfo * ri)808 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
809 {
810 ARMCPU *cpu = env_archcpu(env);
811
812 /*
813 * Acquire the CSSELR index from the bank corresponding to the CCSIDR
814 * bank
815 */
816 uint32_t index = A32_BANKED_REG_GET(env, csselr,
817 ri->secure & ARM_CP_SECSTATE_S);
818
819 return cpu->ccsidr[index];
820 }
821
csselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)822 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
823 uint64_t value)
824 {
825 raw_write(env, ri, value & 0xf);
826 }
827
isr_read(CPUARMState * env,const ARMCPRegInfo * ri)828 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
829 {
830 CPUState *cs = env_cpu(env);
831 bool el1 = arm_current_el(env) == 1;
832 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
833 uint64_t ret = 0;
834
835 if (hcr_el2 & HCR_IMO) {
836 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
837 ret |= CPSR_I;
838 }
839 if (cs->interrupt_request & CPU_INTERRUPT_VINMI) {
840 ret |= ISR_IS;
841 ret |= CPSR_I;
842 }
843 } else {
844 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
845 ret |= CPSR_I;
846 }
847
848 if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
849 ret |= ISR_IS;
850 ret |= CPSR_I;
851 }
852 }
853
854 if (hcr_el2 & HCR_FMO) {
855 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
856 ret |= CPSR_F;
857 }
858 if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) {
859 ret |= ISR_FS;
860 ret |= CPSR_F;
861 }
862 } else {
863 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
864 ret |= CPSR_F;
865 }
866 }
867
868 if (hcr_el2 & HCR_AMO) {
869 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
870 ret |= CPSR_A;
871 }
872 }
873
874 return ret;
875 }
876
access_aa64_tid1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)877 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
878 bool isread)
879 {
880 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
881 return CP_ACCESS_TRAP_EL2;
882 }
883
884 return CP_ACCESS_OK;
885 }
886
access_aa32_tid1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)887 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
888 bool isread)
889 {
890 if (arm_feature(env, ARM_FEATURE_V8)) {
891 return access_aa64_tid1(env, ri, isread);
892 }
893
894 return CP_ACCESS_OK;
895 }
896
897 static const ARMCPRegInfo v7_cp_reginfo[] = {
898 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
899 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
900 .access = PL1_W, .type = ARM_CP_NOP },
901 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
902 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
903 .access = PL1_R,
904 .accessfn = access_tid4,
905 .fgt = FGT_CCSIDR_EL1,
906 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
907 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
908 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
909 .access = PL1_RW,
910 .accessfn = access_tid4,
911 .fgt = FGT_CSSELR_EL1,
912 .writefn = csselr_write, .resetvalue = 0,
913 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
914 offsetof(CPUARMState, cp15.csselr_ns) } },
915 /*
916 * Auxiliary ID register: this actually has an IMPDEF value but for now
917 * just RAZ for all cores:
918 */
919 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
920 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
921 .access = PL1_R, .type = ARM_CP_CONST,
922 .accessfn = access_aa64_tid1,
923 .fgt = FGT_AIDR_EL1,
924 .resetvalue = 0 },
925 /*
926 * Auxiliary fault status registers: these also are IMPDEF, and we
927 * choose to RAZ/WI for all cores.
928 */
929 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
930 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
931 .access = PL1_RW, .accessfn = access_tvm_trvm,
932 .fgt = FGT_AFSR0_EL1,
933 .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
934 .type = ARM_CP_CONST, .resetvalue = 0 },
935 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
936 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
937 .access = PL1_RW, .accessfn = access_tvm_trvm,
938 .fgt = FGT_AFSR1_EL1,
939 .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
940 .type = ARM_CP_CONST, .resetvalue = 0 },
941 /*
942 * MAIR can just read-as-written because we don't implement caches
943 * and so don't need to care about memory attributes.
944 */
945 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
946 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
947 .access = PL1_RW, .accessfn = access_tvm_trvm,
948 .fgt = FGT_MAIR_EL1,
949 .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
950 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
951 .resetvalue = 0 },
952 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
953 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
954 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
955 .resetvalue = 0 },
956 /*
957 * For non-long-descriptor page tables these are PRRR and NMRR;
958 * regardless they still act as reads-as-written for QEMU.
959 */
960 /*
961 * MAIR0/1 are defined separately from their 64-bit counterpart which
962 * allows them to assign the correct fieldoffset based on the endianness
963 * handled in the field definitions.
964 */
965 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
966 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
967 .access = PL1_RW, .accessfn = access_tvm_trvm,
968 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
969 offsetof(CPUARMState, cp15.mair0_ns) },
970 .resetfn = arm_cp_reset_ignore },
971 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
972 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
973 .access = PL1_RW, .accessfn = access_tvm_trvm,
974 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
975 offsetof(CPUARMState, cp15.mair1_ns) },
976 .resetfn = arm_cp_reset_ignore },
977 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
978 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
979 .fgt = FGT_ISR_EL1,
980 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
981 };
982
teecr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)983 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
984 uint64_t value)
985 {
986 value &= 1;
987 env->teecr = value;
988 }
989
teecr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)990 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
991 bool isread)
992 {
993 /*
994 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
995 * at all, so we don't need to check whether we're v8A.
996 */
997 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
998 (env->cp15.hstr_el2 & HSTR_TTEE)) {
999 return CP_ACCESS_TRAP_EL2;
1000 }
1001 return CP_ACCESS_OK;
1002 }
1003
teehbr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1004 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1005 bool isread)
1006 {
1007 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1008 return CP_ACCESS_TRAP_EL1;
1009 }
1010 return teecr_access(env, ri, isread);
1011 }
1012
1013 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1014 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1015 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1016 .resetvalue = 0,
1017 .writefn = teecr_write, .accessfn = teecr_access },
1018 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1019 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1020 .accessfn = teehbr_access, .resetvalue = 0 },
1021 };
1022
1023 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1024 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1025 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1026 .access = PL0_RW,
1027 .fgt = FGT_TPIDR_EL0,
1028 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1029 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1030 .access = PL0_RW,
1031 .fgt = FGT_TPIDR_EL0,
1032 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1033 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1034 .resetfn = arm_cp_reset_ignore },
1035 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1036 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1037 .access = PL0_R | PL1_W,
1038 .fgt = FGT_TPIDRRO_EL0,
1039 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1040 .resetvalue = 0},
1041 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1042 .access = PL0_R | PL1_W,
1043 .fgt = FGT_TPIDRRO_EL0,
1044 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1045 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1046 .resetfn = arm_cp_reset_ignore },
1047 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1048 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1049 .access = PL1_RW,
1050 .fgt = FGT_TPIDR_EL1,
1051 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1052 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1053 .access = PL1_RW,
1054 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1055 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1056 .resetvalue = 0 },
1057 };
1058
arm_gt_cntfrq_reset(CPUARMState * env,const ARMCPRegInfo * opaque)1059 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
1060 {
1061 ARMCPU *cpu = env_archcpu(env);
1062
1063 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
1064 }
1065
1066 #ifndef CONFIG_USER_ONLY
1067
gt_cntfrq_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1068 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1069 bool isread)
1070 {
1071 /*
1072 * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1073 * Writable only at the highest implemented exception level.
1074 */
1075 int el = arm_current_el(env);
1076 uint64_t hcr;
1077 uint32_t cntkctl;
1078
1079 switch (el) {
1080 case 0:
1081 hcr = arm_hcr_el2_eff(env);
1082 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1083 cntkctl = env->cp15.cnthctl_el2;
1084 } else {
1085 cntkctl = env->cp15.c14_cntkctl;
1086 }
1087 if (!extract32(cntkctl, 0, 2)) {
1088 return CP_ACCESS_TRAP_EL1;
1089 }
1090 break;
1091 case 1:
1092 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1093 arm_is_secure_below_el3(env)) {
1094 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1095 return CP_ACCESS_UNDEFINED;
1096 }
1097 break;
1098 case 2:
1099 case 3:
1100 break;
1101 }
1102
1103 if (!isread && el < arm_highest_el(env)) {
1104 return CP_ACCESS_UNDEFINED;
1105 }
1106
1107 return CP_ACCESS_OK;
1108 }
1109
gt_counter_access(CPUARMState * env,int timeridx,bool isread)1110 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1111 bool isread)
1112 {
1113 unsigned int cur_el = arm_current_el(env);
1114 bool has_el2 = arm_is_el2_enabled(env);
1115 uint64_t hcr = arm_hcr_el2_eff(env);
1116
1117 switch (cur_el) {
1118 case 0:
1119 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
1120 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1121 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
1122 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1123 }
1124
1125 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
1126 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1127 return CP_ACCESS_TRAP_EL1;
1128 }
1129 /* fall through */
1130 case 1:
1131 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
1132 if (has_el2 && timeridx == GTIMER_PHYS &&
1133 (hcr & HCR_E2H
1134 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
1135 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
1136 return CP_ACCESS_TRAP_EL2;
1137 }
1138 if (has_el2 && timeridx == GTIMER_VIRT) {
1139 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
1140 return CP_ACCESS_TRAP_EL2;
1141 }
1142 }
1143 break;
1144 }
1145 return CP_ACCESS_OK;
1146 }
1147
gt_timer_access(CPUARMState * env,int timeridx,bool isread)1148 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1149 bool isread)
1150 {
1151 unsigned int cur_el = arm_current_el(env);
1152 bool has_el2 = arm_is_el2_enabled(env);
1153 uint64_t hcr = arm_hcr_el2_eff(env);
1154
1155 switch (cur_el) {
1156 case 0:
1157 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1158 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
1159 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
1160 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1161 }
1162
1163 /*
1164 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
1165 * EL0 if EL0[PV]TEN is zero.
1166 */
1167 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1168 return CP_ACCESS_TRAP_EL1;
1169 }
1170 /* fall through */
1171
1172 case 1:
1173 if (has_el2 && timeridx == GTIMER_PHYS) {
1174 if (hcr & HCR_E2H) {
1175 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
1176 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
1177 return CP_ACCESS_TRAP_EL2;
1178 }
1179 } else {
1180 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
1181 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
1182 return CP_ACCESS_TRAP_EL2;
1183 }
1184 }
1185 }
1186 if (has_el2 && timeridx == GTIMER_VIRT) {
1187 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
1188 return CP_ACCESS_TRAP_EL2;
1189 }
1190 }
1191 break;
1192 }
1193 return CP_ACCESS_OK;
1194 }
1195
gt_pct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1196 static CPAccessResult gt_pct_access(CPUARMState *env,
1197 const ARMCPRegInfo *ri,
1198 bool isread)
1199 {
1200 return gt_counter_access(env, GTIMER_PHYS, isread);
1201 }
1202
gt_vct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1203 static CPAccessResult gt_vct_access(CPUARMState *env,
1204 const ARMCPRegInfo *ri,
1205 bool isread)
1206 {
1207 return gt_counter_access(env, GTIMER_VIRT, isread);
1208 }
1209
gt_ptimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1210 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1211 bool isread)
1212 {
1213 return gt_timer_access(env, GTIMER_PHYS, isread);
1214 }
1215
gt_vtimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1216 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1217 bool isread)
1218 {
1219 return gt_timer_access(env, GTIMER_VIRT, isread);
1220 }
1221
gt_stimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1222 static CPAccessResult gt_stimer_access(CPUARMState *env,
1223 const ARMCPRegInfo *ri,
1224 bool isread)
1225 {
1226 /*
1227 * The AArch64 register view of the secure physical timer is
1228 * always accessible from EL3, and configurably accessible from
1229 * Secure EL1.
1230 */
1231 switch (arm_current_el(env)) {
1232 case 1:
1233 if (!arm_is_secure(env)) {
1234 return CP_ACCESS_UNDEFINED;
1235 }
1236 if (arm_is_el2_enabled(env)) {
1237 return CP_ACCESS_UNDEFINED;
1238 }
1239 if (!(env->cp15.scr_el3 & SCR_ST)) {
1240 return CP_ACCESS_TRAP_EL3;
1241 }
1242 return CP_ACCESS_OK;
1243 case 0:
1244 case 2:
1245 return CP_ACCESS_UNDEFINED;
1246 case 3:
1247 return CP_ACCESS_OK;
1248 default:
1249 g_assert_not_reached();
1250 }
1251 }
1252
gt_sel2timer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1253 static CPAccessResult gt_sel2timer_access(CPUARMState *env,
1254 const ARMCPRegInfo *ri,
1255 bool isread)
1256 {
1257 /*
1258 * The AArch64 register view of the secure EL2 timers are mostly
1259 * accessible from EL3 and EL2 although can also be trapped to EL2
1260 * from EL1 depending on nested virt config.
1261 */
1262 switch (arm_current_el(env)) {
1263 case 0: /* UNDEFINED */
1264 return CP_ACCESS_UNDEFINED;
1265 case 1:
1266 if (!arm_is_secure(env)) {
1267 /* UNDEFINED */
1268 return CP_ACCESS_UNDEFINED;
1269 } else if (arm_hcr_el2_eff(env) & HCR_NV) {
1270 /* Aarch64.SystemAccessTrap(EL2, 0x18) */
1271 return CP_ACCESS_TRAP_EL2;
1272 }
1273 /* UNDEFINED */
1274 return CP_ACCESS_UNDEFINED;
1275 case 2:
1276 if (!arm_is_secure(env)) {
1277 /* UNDEFINED */
1278 return CP_ACCESS_UNDEFINED;
1279 }
1280 return CP_ACCESS_OK;
1281 case 3:
1282 if (env->cp15.scr_el3 & SCR_EEL2) {
1283 return CP_ACCESS_OK;
1284 } else {
1285 return CP_ACCESS_UNDEFINED;
1286 }
1287 default:
1288 g_assert_not_reached();
1289 }
1290 }
1291
gt_get_countervalue(CPUARMState * env)1292 uint64_t gt_get_countervalue(CPUARMState *env)
1293 {
1294 ARMCPU *cpu = env_archcpu(env);
1295
1296 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
1297 }
1298
gt_update_irq(ARMCPU * cpu,int timeridx)1299 static void gt_update_irq(ARMCPU *cpu, int timeridx)
1300 {
1301 CPUARMState *env = &cpu->env;
1302 uint64_t cnthctl = env->cp15.cnthctl_el2;
1303 ARMSecuritySpace ss = arm_security_space(env);
1304 /* ISTATUS && !IMASK */
1305 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
1306
1307 /*
1308 * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
1309 * It is RES0 in Secure and NonSecure state.
1310 */
1311 if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
1312 ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) ||
1313 (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) {
1314 irqstate = 0;
1315 }
1316
1317 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1318 trace_arm_gt_update_irq(timeridx, irqstate);
1319 }
1320
gt_rme_post_el_change(ARMCPU * cpu,void * ignored)1321 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
1322 {
1323 /*
1324 * Changing security state between Root and Secure/NonSecure, which may
1325 * happen when switching EL, can change the effective value of CNTHCTL_EL2
1326 * mask bits. Update the IRQ state accordingly.
1327 */
1328 gt_update_irq(cpu, GTIMER_VIRT);
1329 gt_update_irq(cpu, GTIMER_PHYS);
1330 }
1331
gt_phys_raw_cnt_offset(CPUARMState * env)1332 static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
1333 {
1334 if ((env->cp15.scr_el3 & SCR_ECVEN) &&
1335 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
1336 arm_is_el2_enabled(env) &&
1337 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
1338 return env->cp15.cntpoff_el2;
1339 }
1340 return 0;
1341 }
1342
gt_indirect_access_timer_offset(CPUARMState * env,int timeridx)1343 static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx)
1344 {
1345 /*
1346 * Return the timer offset to use for indirect accesses to the timer.
1347 * This is the Offset value as defined in D12.2.4.1 "Operation of the
1348 * CompareValue views of the timers".
1349 *
1350 * The condition here is not always the same as the condition for
1351 * whether to apply an offset register when doing a direct read of
1352 * the counter sysreg; those conditions are described in the
1353 * access pseudocode for each counter register.
1354 */
1355 switch (timeridx) {
1356 case GTIMER_PHYS:
1357 return gt_phys_raw_cnt_offset(env);
1358 case GTIMER_VIRT:
1359 return env->cp15.cntvoff_el2;
1360 case GTIMER_HYP:
1361 case GTIMER_SEC:
1362 case GTIMER_HYPVIRT:
1363 case GTIMER_S_EL2_PHYS:
1364 case GTIMER_S_EL2_VIRT:
1365 return 0;
1366 default:
1367 g_assert_not_reached();
1368 }
1369 }
1370
gt_direct_access_timer_offset(CPUARMState * env,int timeridx)1371 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx)
1372 {
1373 /*
1374 * Return the timer offset to use for direct accesses to the
1375 * counter registers CNTPCT and CNTVCT, and for direct accesses
1376 * to the CNT*_TVAL registers.
1377 *
1378 * This isn't exactly the same as the indirect-access offset,
1379 * because here we also care about what EL the register access
1380 * is being made from.
1381 *
1382 * This corresponds to the access pseudocode for the registers.
1383 */
1384 uint64_t hcr;
1385
1386 switch (timeridx) {
1387 case GTIMER_PHYS:
1388 if (arm_current_el(env) >= 2) {
1389 return 0;
1390 }
1391 return gt_phys_raw_cnt_offset(env);
1392 case GTIMER_VIRT:
1393 switch (arm_current_el(env)) {
1394 case 2:
1395 hcr = arm_hcr_el2_eff(env);
1396 if (hcr & HCR_E2H) {
1397 return 0;
1398 }
1399 break;
1400 case 0:
1401 hcr = arm_hcr_el2_eff(env);
1402 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1403 return 0;
1404 }
1405 break;
1406 }
1407 return env->cp15.cntvoff_el2;
1408 case GTIMER_HYP:
1409 case GTIMER_SEC:
1410 case GTIMER_HYPVIRT:
1411 case GTIMER_S_EL2_PHYS:
1412 case GTIMER_S_EL2_VIRT:
1413 return 0;
1414 default:
1415 g_assert_not_reached();
1416 }
1417 }
1418
gt_recalc_timer(ARMCPU * cpu,int timeridx)1419 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1420 {
1421 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1422
1423 if (gt->ctl & 1) {
1424 /*
1425 * Timer enabled: calculate and set current ISTATUS, irq, and
1426 * reset timer to when ISTATUS next has to change
1427 */
1428 uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx);
1429 uint64_t count = gt_get_countervalue(&cpu->env);
1430 /* Note that this must be unsigned 64 bit arithmetic: */
1431 int istatus = count - offset >= gt->cval;
1432 uint64_t nexttick;
1433
1434 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1435
1436 if (istatus) {
1437 /*
1438 * Next transition is when (count - offset) rolls back over to 0.
1439 * If offset > count then this is when count == offset;
1440 * if offset <= count then this is when count == offset + 2^64
1441 * For the latter case we set nexttick to an "as far in future
1442 * as possible" value and let the code below handle it.
1443 */
1444 if (offset > count) {
1445 nexttick = offset;
1446 } else {
1447 nexttick = UINT64_MAX;
1448 }
1449 } else {
1450 /*
1451 * Next transition is when (count - offset) == cval, i.e.
1452 * when count == (cval + offset).
1453 * If that would overflow, then again we set up the next interrupt
1454 * for "as far in the future as possible" for the code below.
1455 */
1456 if (uadd64_overflow(gt->cval, offset, &nexttick)) {
1457 nexttick = UINT64_MAX;
1458 }
1459 }
1460 /*
1461 * Note that the desired next expiry time might be beyond the
1462 * signed-64-bit range of a QEMUTimer -- in this case we just
1463 * set the timer for as far in the future as possible. When the
1464 * timer expires we will reset the timer for any remaining period.
1465 */
1466 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
1467 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
1468 } else {
1469 timer_mod(cpu->gt_timer[timeridx], nexttick);
1470 }
1471 trace_arm_gt_recalc(timeridx, nexttick);
1472 } else {
1473 /* Timer disabled: ISTATUS and timer output always clear */
1474 gt->ctl &= ~4;
1475 timer_del(cpu->gt_timer[timeridx]);
1476 trace_arm_gt_recalc_disabled(timeridx);
1477 }
1478 gt_update_irq(cpu, timeridx);
1479 }
1480
gt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)1481 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1482 int timeridx)
1483 {
1484 ARMCPU *cpu = env_archcpu(env);
1485
1486 timer_del(cpu->gt_timer[timeridx]);
1487 }
1488
gt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)1489 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1490 {
1491 uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS);
1492 return gt_get_countervalue(env) - offset;
1493 }
1494
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)1495 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1496 {
1497 uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
1498 return gt_get_countervalue(env) - offset;
1499 }
1500
gt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1501 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1502 int timeridx,
1503 uint64_t value)
1504 {
1505 trace_arm_gt_cval_write(timeridx, value);
1506 env->cp15.c14_timer[timeridx].cval = value;
1507 gt_recalc_timer(env_archcpu(env), timeridx);
1508 }
1509
do_tval_read(CPUARMState * env,int timeridx,uint64_t offset)1510 static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset)
1511 {
1512 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1513 (gt_get_countervalue(env) - offset));
1514 }
1515
gt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)1516 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1517 int timeridx)
1518 {
1519 uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1520
1521 return do_tval_read(env, timeridx, offset);
1522 }
1523
do_tval_write(CPUARMState * env,int timeridx,uint64_t value,uint64_t offset)1524 static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value,
1525 uint64_t offset)
1526 {
1527 trace_arm_gt_tval_write(timeridx, value);
1528 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1529 sextract64(value, 0, 32);
1530 gt_recalc_timer(env_archcpu(env), timeridx);
1531 }
1532
gt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1533 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1534 int timeridx,
1535 uint64_t value)
1536 {
1537 uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1538
1539 do_tval_write(env, timeridx, value, offset);
1540 }
1541
gt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1542 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1543 int timeridx,
1544 uint64_t value)
1545 {
1546 ARMCPU *cpu = env_archcpu(env);
1547 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1548
1549 trace_arm_gt_ctl_write(timeridx, value);
1550 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1551 if ((oldval ^ value) & 1) {
1552 /* Enable toggled */
1553 gt_recalc_timer(cpu, timeridx);
1554 } else if ((oldval ^ value) & 2) {
1555 /*
1556 * IMASK toggled: don't need to recalculate,
1557 * just set the interrupt line based on ISTATUS
1558 */
1559 trace_arm_gt_imask_toggle(timeridx);
1560 gt_update_irq(cpu, timeridx);
1561 }
1562 }
1563
gt_phys_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1564 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1565 {
1566 gt_timer_reset(env, ri, GTIMER_PHYS);
1567 }
1568
gt_phys_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1569 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1570 uint64_t value)
1571 {
1572 gt_cval_write(env, ri, GTIMER_PHYS, value);
1573 }
1574
gt_phys_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1575 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1576 {
1577 return gt_tval_read(env, ri, GTIMER_PHYS);
1578 }
1579
gt_phys_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1580 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1581 uint64_t value)
1582 {
1583 gt_tval_write(env, ri, GTIMER_PHYS, value);
1584 }
1585
gt_phys_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1586 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1587 uint64_t value)
1588 {
1589 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1590 }
1591
gt_phys_redir_timeridx(CPUARMState * env)1592 static int gt_phys_redir_timeridx(CPUARMState *env)
1593 {
1594 switch (arm_mmu_idx(env)) {
1595 case ARMMMUIdx_E20_0:
1596 case ARMMMUIdx_E20_2:
1597 case ARMMMUIdx_E20_2_PAN:
1598 return GTIMER_HYP;
1599 default:
1600 return GTIMER_PHYS;
1601 }
1602 }
1603
gt_virt_redir_timeridx(CPUARMState * env)1604 static int gt_virt_redir_timeridx(CPUARMState *env)
1605 {
1606 switch (arm_mmu_idx(env)) {
1607 case ARMMMUIdx_E20_0:
1608 case ARMMMUIdx_E20_2:
1609 case ARMMMUIdx_E20_2_PAN:
1610 return GTIMER_HYPVIRT;
1611 default:
1612 return GTIMER_VIRT;
1613 }
1614 }
1615
gt_phys_redir_cval_read(CPUARMState * env,const ARMCPRegInfo * ri)1616 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
1617 const ARMCPRegInfo *ri)
1618 {
1619 int timeridx = gt_phys_redir_timeridx(env);
1620 return env->cp15.c14_timer[timeridx].cval;
1621 }
1622
gt_phys_redir_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1623 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1624 uint64_t value)
1625 {
1626 int timeridx = gt_phys_redir_timeridx(env);
1627 gt_cval_write(env, ri, timeridx, value);
1628 }
1629
gt_phys_redir_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1630 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
1631 const ARMCPRegInfo *ri)
1632 {
1633 int timeridx = gt_phys_redir_timeridx(env);
1634 return gt_tval_read(env, ri, timeridx);
1635 }
1636
gt_phys_redir_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1637 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1638 uint64_t value)
1639 {
1640 int timeridx = gt_phys_redir_timeridx(env);
1641 gt_tval_write(env, ri, timeridx, value);
1642 }
1643
gt_phys_redir_ctl_read(CPUARMState * env,const ARMCPRegInfo * ri)1644 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
1645 const ARMCPRegInfo *ri)
1646 {
1647 int timeridx = gt_phys_redir_timeridx(env);
1648 return env->cp15.c14_timer[timeridx].ctl;
1649 }
1650
gt_phys_redir_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1651 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1652 uint64_t value)
1653 {
1654 int timeridx = gt_phys_redir_timeridx(env);
1655 gt_ctl_write(env, ri, timeridx, value);
1656 }
1657
gt_virt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1658 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1659 {
1660 gt_timer_reset(env, ri, GTIMER_VIRT);
1661 }
1662
gt_virt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1663 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1664 uint64_t value)
1665 {
1666 gt_cval_write(env, ri, GTIMER_VIRT, value);
1667 }
1668
gt_virt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1669 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1670 {
1671 /*
1672 * This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0
1673 * we always apply CNTVOFF_EL2. Special case that here rather
1674 * than going into the generic gt_tval_read() and then having
1675 * to re-detect that it's this register.
1676 * Note that the accessfn/perms mean we know we're at EL2 or EL3 here.
1677 */
1678 return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2);
1679 }
1680
gt_virt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1681 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1682 uint64_t value)
1683 {
1684 /* Similarly for writes to CNTV_TVAL_EL02 */
1685 do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2);
1686 }
1687
gt_virt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1688 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1689 uint64_t value)
1690 {
1691 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1692 }
1693
gt_cnthctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1694 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1695 uint64_t value)
1696 {
1697 ARMCPU *cpu = env_archcpu(env);
1698 uint32_t oldval = env->cp15.cnthctl_el2;
1699 uint32_t valid_mask =
1700 R_CNTHCTL_EL0PCTEN_E2H1_MASK |
1701 R_CNTHCTL_EL0VCTEN_E2H1_MASK |
1702 R_CNTHCTL_EVNTEN_MASK |
1703 R_CNTHCTL_EVNTDIR_MASK |
1704 R_CNTHCTL_EVNTI_MASK |
1705 R_CNTHCTL_EL0VTEN_MASK |
1706 R_CNTHCTL_EL0PTEN_MASK |
1707 R_CNTHCTL_EL1PCTEN_E2H1_MASK |
1708 R_CNTHCTL_EL1PTEN_MASK;
1709
1710 if (cpu_isar_feature(aa64_rme, cpu)) {
1711 valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK;
1712 }
1713 if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
1714 valid_mask |=
1715 R_CNTHCTL_EL1TVT_MASK |
1716 R_CNTHCTL_EL1TVCT_MASK |
1717 R_CNTHCTL_EL1NVPCT_MASK |
1718 R_CNTHCTL_EL1NVVCT_MASK |
1719 R_CNTHCTL_EVNTIS_MASK;
1720 }
1721 if (cpu_isar_feature(aa64_ecv, cpu)) {
1722 valid_mask |= R_CNTHCTL_ECV_MASK;
1723 }
1724
1725 /* Clear RES0 bits */
1726 value &= valid_mask;
1727
1728 raw_write(env, ri, value);
1729
1730 if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) {
1731 gt_update_irq(cpu, GTIMER_VIRT);
1732 } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) {
1733 gt_update_irq(cpu, GTIMER_PHYS);
1734 }
1735 }
1736
gt_cntvoff_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1737 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1738 uint64_t value)
1739 {
1740 ARMCPU *cpu = env_archcpu(env);
1741
1742 trace_arm_gt_cntvoff_write(value);
1743 raw_write(env, ri, value);
1744 gt_recalc_timer(cpu, GTIMER_VIRT);
1745 }
1746
gt_virt_redir_cval_read(CPUARMState * env,const ARMCPRegInfo * ri)1747 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
1748 const ARMCPRegInfo *ri)
1749 {
1750 int timeridx = gt_virt_redir_timeridx(env);
1751 return env->cp15.c14_timer[timeridx].cval;
1752 }
1753
gt_virt_redir_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1754 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1755 uint64_t value)
1756 {
1757 int timeridx = gt_virt_redir_timeridx(env);
1758 gt_cval_write(env, ri, timeridx, value);
1759 }
1760
gt_virt_redir_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1761 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
1762 const ARMCPRegInfo *ri)
1763 {
1764 int timeridx = gt_virt_redir_timeridx(env);
1765 return gt_tval_read(env, ri, timeridx);
1766 }
1767
gt_virt_redir_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1768 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1769 uint64_t value)
1770 {
1771 int timeridx = gt_virt_redir_timeridx(env);
1772 gt_tval_write(env, ri, timeridx, value);
1773 }
1774
gt_virt_redir_ctl_read(CPUARMState * env,const ARMCPRegInfo * ri)1775 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
1776 const ARMCPRegInfo *ri)
1777 {
1778 int timeridx = gt_virt_redir_timeridx(env);
1779 return env->cp15.c14_timer[timeridx].ctl;
1780 }
1781
gt_virt_redir_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1782 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1783 uint64_t value)
1784 {
1785 int timeridx = gt_virt_redir_timeridx(env);
1786 gt_ctl_write(env, ri, timeridx, value);
1787 }
1788
gt_hyp_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1789 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1790 {
1791 gt_timer_reset(env, ri, GTIMER_HYP);
1792 }
1793
gt_hyp_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1794 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1795 uint64_t value)
1796 {
1797 gt_cval_write(env, ri, GTIMER_HYP, value);
1798 }
1799
gt_hyp_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1800 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1801 {
1802 return gt_tval_read(env, ri, GTIMER_HYP);
1803 }
1804
gt_hyp_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1805 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1806 uint64_t value)
1807 {
1808 gt_tval_write(env, ri, GTIMER_HYP, value);
1809 }
1810
gt_hyp_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1811 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1812 uint64_t value)
1813 {
1814 gt_ctl_write(env, ri, GTIMER_HYP, value);
1815 }
1816
gt_sec_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1817 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1818 {
1819 gt_timer_reset(env, ri, GTIMER_SEC);
1820 }
1821
gt_sec_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1822 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1823 uint64_t value)
1824 {
1825 gt_cval_write(env, ri, GTIMER_SEC, value);
1826 }
1827
gt_sec_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1828 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829 {
1830 return gt_tval_read(env, ri, GTIMER_SEC);
1831 }
1832
gt_sec_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1833 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1834 uint64_t value)
1835 {
1836 gt_tval_write(env, ri, GTIMER_SEC, value);
1837 }
1838
gt_sec_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1839 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1840 uint64_t value)
1841 {
1842 gt_ctl_write(env, ri, GTIMER_SEC, value);
1843 }
1844
gt_sec_pel2_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1845 static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1846 {
1847 gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS);
1848 }
1849
gt_sec_pel2_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1850 static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1851 uint64_t value)
1852 {
1853 gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1854 }
1855
gt_sec_pel2_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1856 static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1857 {
1858 return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS);
1859 }
1860
gt_sec_pel2_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1861 static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1862 uint64_t value)
1863 {
1864 gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1865 }
1866
gt_sec_pel2_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1867 static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1868 uint64_t value)
1869 {
1870 gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value);
1871 }
1872
gt_sec_vel2_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1873 static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1874 {
1875 gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT);
1876 }
1877
gt_sec_vel2_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1878 static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1879 uint64_t value)
1880 {
1881 gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1882 }
1883
gt_sec_vel2_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1884 static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1885 {
1886 return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT);
1887 }
1888
gt_sec_vel2_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1889 static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1890 uint64_t value)
1891 {
1892 gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1893 }
1894
gt_sec_vel2_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1895 static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1896 uint64_t value)
1897 {
1898 gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value);
1899 }
1900
gt_hv_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1901 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1902 {
1903 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
1904 }
1905
gt_hv_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1906 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1907 uint64_t value)
1908 {
1909 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
1910 }
1911
gt_hv_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1912 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1913 {
1914 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
1915 }
1916
gt_hv_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1917 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1918 uint64_t value)
1919 {
1920 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
1921 }
1922
gt_hv_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1923 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1924 uint64_t value)
1925 {
1926 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
1927 }
1928
arm_gt_ptimer_cb(void * opaque)1929 void arm_gt_ptimer_cb(void *opaque)
1930 {
1931 ARMCPU *cpu = opaque;
1932
1933 gt_recalc_timer(cpu, GTIMER_PHYS);
1934 }
1935
arm_gt_vtimer_cb(void * opaque)1936 void arm_gt_vtimer_cb(void *opaque)
1937 {
1938 ARMCPU *cpu = opaque;
1939
1940 gt_recalc_timer(cpu, GTIMER_VIRT);
1941 }
1942
arm_gt_htimer_cb(void * opaque)1943 void arm_gt_htimer_cb(void *opaque)
1944 {
1945 ARMCPU *cpu = opaque;
1946
1947 gt_recalc_timer(cpu, GTIMER_HYP);
1948 }
1949
arm_gt_stimer_cb(void * opaque)1950 void arm_gt_stimer_cb(void *opaque)
1951 {
1952 ARMCPU *cpu = opaque;
1953
1954 gt_recalc_timer(cpu, GTIMER_SEC);
1955 }
1956
arm_gt_sel2timer_cb(void * opaque)1957 void arm_gt_sel2timer_cb(void *opaque)
1958 {
1959 ARMCPU *cpu = opaque;
1960
1961 gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS);
1962 }
1963
arm_gt_sel2vtimer_cb(void * opaque)1964 void arm_gt_sel2vtimer_cb(void *opaque)
1965 {
1966 ARMCPU *cpu = opaque;
1967
1968 gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT);
1969 }
1970
arm_gt_hvtimer_cb(void * opaque)1971 void arm_gt_hvtimer_cb(void *opaque)
1972 {
1973 ARMCPU *cpu = opaque;
1974
1975 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
1976 }
1977
1978 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1979 /*
1980 * Note that CNTFRQ is purely reads-as-written for the benefit
1981 * of software; writing it doesn't actually change the timer frequency.
1982 * Our reset value matches the fixed frequency we implement the timer at.
1983 */
1984 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1985 .type = ARM_CP_ALIAS,
1986 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1987 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1988 },
1989 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1990 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1991 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1992 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1993 .resetfn = arm_gt_cntfrq_reset,
1994 },
1995 /* overall control: mostly access permissions */
1996 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1997 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1998 .access = PL1_RW,
1999 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2000 .resetvalue = 0,
2001 },
2002 /* per-timer control */
2003 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2004 .secure = ARM_CP_SECSTATE_NS,
2005 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2006 .accessfn = gt_ptimer_access,
2007 .fieldoffset = offsetoflow32(CPUARMState,
2008 cp15.c14_timer[GTIMER_PHYS].ctl),
2009 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2010 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2011 },
2012 { .name = "CNTP_CTL_S",
2013 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2014 .secure = ARM_CP_SECSTATE_S,
2015 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2016 .accessfn = gt_ptimer_access,
2017 .fieldoffset = offsetoflow32(CPUARMState,
2018 cp15.c14_timer[GTIMER_SEC].ctl),
2019 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2020 },
2021 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2022 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2023 .type = ARM_CP_IO, .access = PL0_RW,
2024 .accessfn = gt_ptimer_access,
2025 .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
2026 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2027 .resetvalue = 0,
2028 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2029 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2030 },
2031 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2032 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2033 .accessfn = gt_vtimer_access,
2034 .fieldoffset = offsetoflow32(CPUARMState,
2035 cp15.c14_timer[GTIMER_VIRT].ctl),
2036 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2037 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2038 },
2039 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2040 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2041 .type = ARM_CP_IO, .access = PL0_RW,
2042 .accessfn = gt_vtimer_access,
2043 .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
2044 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2045 .resetvalue = 0,
2046 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2047 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2048 },
2049 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2050 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2051 .secure = ARM_CP_SECSTATE_NS,
2052 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2053 .accessfn = gt_ptimer_access,
2054 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2055 },
2056 { .name = "CNTP_TVAL_S",
2057 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2058 .secure = ARM_CP_SECSTATE_S,
2059 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2060 .accessfn = gt_ptimer_access,
2061 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2062 },
2063 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2064 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2065 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2066 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2067 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2068 },
2069 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2070 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2071 .accessfn = gt_vtimer_access,
2072 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2073 },
2074 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2075 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2076 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2077 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2078 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2079 },
2080 /* The counter itself */
2081 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2082 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2083 .accessfn = gt_pct_access,
2084 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2085 },
2086 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2087 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2088 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2089 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2090 },
2091 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2092 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2093 .accessfn = gt_vct_access,
2094 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2095 },
2096 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2097 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2098 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2099 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2100 },
2101 /* Comparison value, indicating when the timer goes off */
2102 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2103 .secure = ARM_CP_SECSTATE_NS,
2104 .access = PL0_RW,
2105 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2106 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2107 .accessfn = gt_ptimer_access,
2108 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2109 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2110 },
2111 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2112 .secure = ARM_CP_SECSTATE_S,
2113 .access = PL0_RW,
2114 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2115 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2116 .accessfn = gt_ptimer_access,
2117 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2118 },
2119 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2120 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2121 .access = PL0_RW,
2122 .type = ARM_CP_IO,
2123 .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
2124 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2125 .resetvalue = 0, .accessfn = gt_ptimer_access,
2126 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2127 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2128 },
2129 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2130 .access = PL0_RW,
2131 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2132 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2133 .accessfn = gt_vtimer_access,
2134 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2135 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2136 },
2137 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2138 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2139 .access = PL0_RW,
2140 .type = ARM_CP_IO,
2141 .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
2142 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2143 .resetvalue = 0, .accessfn = gt_vtimer_access,
2144 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2145 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2146 },
2147 /*
2148 * Secure timer -- this is actually restricted to only EL3
2149 * and configurably Secure-EL1 via the accessfn.
2150 */
2151 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2152 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2153 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2154 .accessfn = gt_stimer_access,
2155 .readfn = gt_sec_tval_read,
2156 .writefn = gt_sec_tval_write,
2157 .resetfn = gt_sec_timer_reset,
2158 },
2159 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2160 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2161 .type = ARM_CP_IO, .access = PL1_RW,
2162 .accessfn = gt_stimer_access,
2163 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2164 .resetvalue = 0,
2165 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2166 },
2167 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2168 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2169 .type = ARM_CP_IO, .access = PL1_RW,
2170 .accessfn = gt_stimer_access,
2171 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2172 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2173 },
2174 };
2175
2176 /*
2177 * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
2178 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
2179 * so our implementations here are identical to the normal registers.
2180 */
2181 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2182 { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
2183 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2184 .accessfn = gt_vct_access,
2185 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2186 },
2187 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2188 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2189 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2190 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2191 },
2192 { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
2193 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2194 .accessfn = gt_pct_access,
2195 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2196 },
2197 { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
2198 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5,
2199 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2200 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2201 },
2202 };
2203
gt_cntpoff_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2204 static CPAccessResult gt_cntpoff_access(CPUARMState *env,
2205 const ARMCPRegInfo *ri,
2206 bool isread)
2207 {
2208 if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) &&
2209 !(env->cp15.scr_el3 & SCR_ECVEN)) {
2210 return CP_ACCESS_TRAP_EL3;
2211 }
2212 return CP_ACCESS_OK;
2213 }
2214
gt_cntpoff_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2215 static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2216 uint64_t value)
2217 {
2218 ARMCPU *cpu = env_archcpu(env);
2219
2220 trace_arm_gt_cntpoff_write(value);
2221 raw_write(env, ri, value);
2222 gt_recalc_timer(cpu, GTIMER_PHYS);
2223 }
2224
2225 static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
2226 .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
2227 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
2228 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
2229 .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
2230 .nv2_redirect_offset = 0x1a8,
2231 .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
2232 };
2233 #else
2234
2235 /*
2236 * In user-mode most of the generic timer registers are inaccessible
2237 * however modern kernels (4.12+) allow access to cntvct_el0
2238 */
2239
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)2240 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2241 {
2242 ARMCPU *cpu = env_archcpu(env);
2243
2244 /*
2245 * Currently we have no support for QEMUTimer in linux-user so we
2246 * can't call gt_get_countervalue(env), instead we directly
2247 * call the lower level functions.
2248 */
2249 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
2250 }
2251
2252 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2253 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2254 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2255 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2256 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2257 .resetfn = arm_gt_cntfrq_reset,
2258 },
2259 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2260 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2261 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2262 .readfn = gt_virt_cnt_read,
2263 },
2264 };
2265
2266 /*
2267 * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
2268 * is exposed to userspace by Linux.
2269 */
2270 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2271 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2272 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2273 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2274 .readfn = gt_virt_cnt_read,
2275 },
2276 };
2277
2278 #endif
2279
par_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2280 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2281 {
2282 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2283 raw_write(env, ri, value);
2284 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2285 raw_write(env, ri, value & 0xfffff6ff);
2286 } else {
2287 raw_write(env, ri, value & 0xfffff1ff);
2288 }
2289 }
2290
2291 /* Return basic MPU access permission bits. */
simple_mpu_ap_bits(uint32_t val)2292 static uint32_t simple_mpu_ap_bits(uint32_t val)
2293 {
2294 uint32_t ret;
2295 uint32_t mask;
2296 int i;
2297 ret = 0;
2298 mask = 3;
2299 for (i = 0; i < 16; i += 2) {
2300 ret |= (val >> i) & mask;
2301 mask <<= 2;
2302 }
2303 return ret;
2304 }
2305
2306 /* Pad basic MPU access permission bits to extended format. */
extended_mpu_ap_bits(uint32_t val)2307 static uint32_t extended_mpu_ap_bits(uint32_t val)
2308 {
2309 uint32_t ret;
2310 uint32_t mask;
2311 int i;
2312 ret = 0;
2313 mask = 3;
2314 for (i = 0; i < 16; i += 2) {
2315 ret |= (val & mask) << i;
2316 mask <<= 2;
2317 }
2318 return ret;
2319 }
2320
pmsav5_data_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2321 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2322 uint64_t value)
2323 {
2324 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2325 }
2326
pmsav5_data_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)2327 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2328 {
2329 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2330 }
2331
pmsav5_insn_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2332 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2333 uint64_t value)
2334 {
2335 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2336 }
2337
pmsav5_insn_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)2338 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2339 {
2340 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2341 }
2342
pmsav7_read(CPUARMState * env,const ARMCPRegInfo * ri)2343 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2344 {
2345 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2346
2347 if (!u32p) {
2348 return 0;
2349 }
2350
2351 u32p += env->pmsav7.rnr[M_REG_NS];
2352 return *u32p;
2353 }
2354
pmsav7_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2355 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2356 uint64_t value)
2357 {
2358 ARMCPU *cpu = env_archcpu(env);
2359 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2360
2361 if (!u32p) {
2362 return;
2363 }
2364
2365 u32p += env->pmsav7.rnr[M_REG_NS];
2366 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2367 *u32p = value;
2368 }
2369
pmsav7_rgnr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2370 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2371 uint64_t value)
2372 {
2373 ARMCPU *cpu = env_archcpu(env);
2374 uint32_t nrgs = cpu->pmsav7_dregion;
2375
2376 if (value >= nrgs) {
2377 qemu_log_mask(LOG_GUEST_ERROR,
2378 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2379 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2380 return;
2381 }
2382
2383 raw_write(env, ri, value);
2384 }
2385
prbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2386 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2387 uint64_t value)
2388 {
2389 ARMCPU *cpu = env_archcpu(env);
2390
2391 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2392 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2393 }
2394
prbar_read(CPUARMState * env,const ARMCPRegInfo * ri)2395 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2396 {
2397 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2398 }
2399
prlar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2400 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2401 uint64_t value)
2402 {
2403 ARMCPU *cpu = env_archcpu(env);
2404
2405 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2406 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2407 }
2408
prlar_read(CPUARMState * env,const ARMCPRegInfo * ri)2409 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2410 {
2411 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2412 }
2413
prselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2414 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2415 uint64_t value)
2416 {
2417 ARMCPU *cpu = env_archcpu(env);
2418
2419 /*
2420 * Ignore writes that would select not implemented region.
2421 * This is architecturally UNPREDICTABLE.
2422 */
2423 if (value >= cpu->pmsav7_dregion) {
2424 return;
2425 }
2426
2427 env->pmsav7.rnr[M_REG_NS] = value;
2428 }
2429
hprbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2430 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2431 uint64_t value)
2432 {
2433 ARMCPU *cpu = env_archcpu(env);
2434
2435 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2436 env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
2437 }
2438
hprbar_read(CPUARMState * env,const ARMCPRegInfo * ri)2439 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2440 {
2441 return env->pmsav8.hprbar[env->pmsav8.hprselr];
2442 }
2443
hprlar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2444 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2445 uint64_t value)
2446 {
2447 ARMCPU *cpu = env_archcpu(env);
2448
2449 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2450 env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
2451 }
2452
hprlar_read(CPUARMState * env,const ARMCPRegInfo * ri)2453 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2454 {
2455 return env->pmsav8.hprlar[env->pmsav8.hprselr];
2456 }
2457
hprenr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2458 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2459 uint64_t value)
2460 {
2461 uint32_t n;
2462 uint32_t bit;
2463 ARMCPU *cpu = env_archcpu(env);
2464
2465 /* Ignore writes to unimplemented regions */
2466 int rmax = MIN(cpu->pmsav8r_hdregion, 32);
2467 value &= MAKE_64BIT_MASK(0, rmax);
2468
2469 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2470
2471 /* Register alias is only valid for first 32 indexes */
2472 for (n = 0; n < rmax; ++n) {
2473 bit = extract32(value, n, 1);
2474 env->pmsav8.hprlar[n] = deposit32(
2475 env->pmsav8.hprlar[n], 0, 1, bit);
2476 }
2477 }
2478
hprenr_read(CPUARMState * env,const ARMCPRegInfo * ri)2479 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2480 {
2481 uint32_t n;
2482 uint32_t result = 0x0;
2483 ARMCPU *cpu = env_archcpu(env);
2484
2485 /* Register alias is only valid for first 32 indexes */
2486 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
2487 if (env->pmsav8.hprlar[n] & 0x1) {
2488 result |= (0x1 << n);
2489 }
2490 }
2491 return result;
2492 }
2493
hprselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2494 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2495 uint64_t value)
2496 {
2497 ARMCPU *cpu = env_archcpu(env);
2498
2499 /*
2500 * Ignore writes that would select not implemented region.
2501 * This is architecturally UNPREDICTABLE.
2502 */
2503 if (value >= cpu->pmsav8r_hdregion) {
2504 return;
2505 }
2506
2507 env->pmsav8.hprselr = value;
2508 }
2509
pmsav8r_regn_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2510 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
2511 uint64_t value)
2512 {
2513 ARMCPU *cpu = env_archcpu(env);
2514 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2515 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2516
2517 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2518
2519 if (ri->opc1 & 4) {
2520 if (index >= cpu->pmsav8r_hdregion) {
2521 return;
2522 }
2523 if (ri->opc2 & 0x1) {
2524 env->pmsav8.hprlar[index] = value;
2525 } else {
2526 env->pmsav8.hprbar[index] = value;
2527 }
2528 } else {
2529 if (index >= cpu->pmsav7_dregion) {
2530 return;
2531 }
2532 if (ri->opc2 & 0x1) {
2533 env->pmsav8.rlar[M_REG_NS][index] = value;
2534 } else {
2535 env->pmsav8.rbar[M_REG_NS][index] = value;
2536 }
2537 }
2538 }
2539
pmsav8r_regn_read(CPUARMState * env,const ARMCPRegInfo * ri)2540 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
2541 {
2542 ARMCPU *cpu = env_archcpu(env);
2543 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2544 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2545
2546 if (ri->opc1 & 4) {
2547 if (index >= cpu->pmsav8r_hdregion) {
2548 return 0x0;
2549 }
2550 if (ri->opc2 & 0x1) {
2551 return env->pmsav8.hprlar[index];
2552 } else {
2553 return env->pmsav8.hprbar[index];
2554 }
2555 } else {
2556 if (index >= cpu->pmsav7_dregion) {
2557 return 0x0;
2558 }
2559 if (ri->opc2 & 0x1) {
2560 return env->pmsav8.rlar[M_REG_NS][index];
2561 } else {
2562 return env->pmsav8.rbar[M_REG_NS][index];
2563 }
2564 }
2565 }
2566
2567 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
2568 { .name = "PRBAR",
2569 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
2570 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2571 .accessfn = access_tvm_trvm,
2572 .readfn = prbar_read, .writefn = prbar_write },
2573 { .name = "PRLAR",
2574 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
2575 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2576 .accessfn = access_tvm_trvm,
2577 .readfn = prlar_read, .writefn = prlar_write },
2578 { .name = "PRSELR", .resetvalue = 0,
2579 .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
2580 .access = PL1_RW, .accessfn = access_tvm_trvm,
2581 .writefn = prselr_write,
2582 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
2583 { .name = "HPRBAR", .resetvalue = 0,
2584 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
2585 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2586 .readfn = hprbar_read, .writefn = hprbar_write },
2587 { .name = "HPRLAR",
2588 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
2589 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2590 .readfn = hprlar_read, .writefn = hprlar_write },
2591 { .name = "HPRSELR", .resetvalue = 0,
2592 .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
2593 .access = PL2_RW,
2594 .writefn = hprselr_write,
2595 .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
2596 { .name = "HPRENR",
2597 .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
2598 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2599 .readfn = hprenr_read, .writefn = hprenr_write },
2600 };
2601
2602 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2603 /*
2604 * Reset for all these registers is handled in arm_cpu_reset(),
2605 * because the PMSAv7 is also used by M-profile CPUs, which do
2606 * not register cpregs but still need the state to be reset.
2607 */
2608 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2609 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2610 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2611 .readfn = pmsav7_read, .writefn = pmsav7_write,
2612 .resetfn = arm_cp_reset_ignore },
2613 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2614 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2615 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2616 .readfn = pmsav7_read, .writefn = pmsav7_write,
2617 .resetfn = arm_cp_reset_ignore },
2618 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2619 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2620 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2621 .readfn = pmsav7_read, .writefn = pmsav7_write,
2622 .resetfn = arm_cp_reset_ignore },
2623 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2624 .access = PL1_RW,
2625 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2626 .writefn = pmsav7_rgnr_write,
2627 .resetfn = arm_cp_reset_ignore },
2628 };
2629
2630 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2631 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2632 .access = PL1_RW, .type = ARM_CP_ALIAS,
2633 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2634 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2635 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2636 .access = PL1_RW, .type = ARM_CP_ALIAS,
2637 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2638 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2639 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2640 .access = PL1_RW,
2641 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2642 .resetvalue = 0, },
2643 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2644 .access = PL1_RW,
2645 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2646 .resetvalue = 0, },
2647 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2648 .access = PL1_RW,
2649 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2650 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2651 .access = PL1_RW,
2652 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2653 /* Protection region base and size registers */
2654 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2655 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2656 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2657 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2658 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2659 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2660 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2661 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2662 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2663 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2664 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2665 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2666 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2667 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2668 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2669 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2670 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2671 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2672 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2673 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2674 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2675 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2676 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2677 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2678 };
2679
vmsa_ttbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2680 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2681 uint64_t value)
2682 {
2683 ARMCPU *cpu = env_archcpu(env);
2684
2685 if (!arm_feature(env, ARM_FEATURE_V8)) {
2686 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2687 /*
2688 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2689 * using Long-descriptor translation table format
2690 */
2691 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2692 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2693 /*
2694 * In an implementation that includes the Security Extensions
2695 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2696 * Short-descriptor translation table format.
2697 */
2698 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2699 } else {
2700 value &= TTBCR_N;
2701 }
2702 }
2703
2704 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2705 /*
2706 * With LPAE the TTBCR could result in a change of ASID
2707 * via the TTBCR.A1 bit, so do a TLB flush.
2708 */
2709 tlb_flush(CPU(cpu));
2710 }
2711 raw_write(env, ri, value);
2712 }
2713
vmsa_tcr_el12_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2714 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
2715 uint64_t value)
2716 {
2717 ARMCPU *cpu = env_archcpu(env);
2718
2719 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2720 tlb_flush(CPU(cpu));
2721 raw_write(env, ri, value);
2722 }
2723
vmsa_ttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2724 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2725 uint64_t value)
2726 {
2727 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
2728 if (cpreg_field_is_64bit(ri) &&
2729 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2730 ARMCPU *cpu = env_archcpu(env);
2731 tlb_flush(CPU(cpu));
2732 }
2733 raw_write(env, ri, value);
2734 }
2735
vmsa_tcr_ttbr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2736 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2737 uint64_t value)
2738 {
2739 /*
2740 * If we are running with E2&0 regime, then an ASID is active.
2741 * Flush if that might be changing. Note we're not checking
2742 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
2743 * holds the active ASID, only checking the field that might.
2744 */
2745 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
2746 (arm_hcr_el2_eff(env) & HCR_E2H)) {
2747 uint16_t mask = ARMMMUIdxBit_E20_2 |
2748 ARMMMUIdxBit_E20_2_PAN |
2749 ARMMMUIdxBit_E20_0;
2750 tlb_flush_by_mmuidx(env_cpu(env), mask);
2751 }
2752 raw_write(env, ri, value);
2753 }
2754
vttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2755 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2756 uint64_t value)
2757 {
2758 ARMCPU *cpu = env_archcpu(env);
2759 CPUState *cs = CPU(cpu);
2760
2761 /*
2762 * A change in VMID to the stage2 page table (Stage2) invalidates
2763 * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
2764 */
2765 if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2766 tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
2767 }
2768 raw_write(env, ri, value);
2769 }
2770
2771 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2772 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2773 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
2774 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2775 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2776 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2777 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2778 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2779 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2780 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2781 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2782 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2783 offsetof(CPUARMState, cp15.dfar_ns) } },
2784 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2785 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2786 .access = PL1_RW, .accessfn = access_tvm_trvm,
2787 .fgt = FGT_FAR_EL1,
2788 .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
2789 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2790 .resetvalue = 0, },
2791 };
2792
2793 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2794 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2795 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2796 .access = PL1_RW, .accessfn = access_tvm_trvm,
2797 .fgt = FGT_ESR_EL1,
2798 .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
2799 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2800 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2801 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2802 .access = PL1_RW, .accessfn = access_tvm_trvm,
2803 .fgt = FGT_TTBR0_EL1,
2804 .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
2805 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2806 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2807 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2808 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2809 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2810 .access = PL1_RW, .accessfn = access_tvm_trvm,
2811 .fgt = FGT_TTBR1_EL1,
2812 .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
2813 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2814 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2815 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2816 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2817 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2818 .access = PL1_RW, .accessfn = access_tvm_trvm,
2819 .fgt = FGT_TCR_EL1,
2820 .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
2821 .writefn = vmsa_tcr_el12_write,
2822 .raw_writefn = raw_write,
2823 .resetvalue = 0,
2824 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2825 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2826 .access = PL1_RW, .accessfn = access_tvm_trvm,
2827 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2828 .raw_writefn = raw_write,
2829 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2830 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2831 };
2832
2833 /*
2834 * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
2835 * qemu tlbs nor adjusting cached masks.
2836 */
2837 static const ARMCPRegInfo ttbcr2_reginfo = {
2838 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
2839 .access = PL1_RW, .accessfn = access_tvm_trvm,
2840 .type = ARM_CP_ALIAS,
2841 .bank_fieldoffsets = {
2842 offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
2843 offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
2844 },
2845 };
2846
omap_ticonfig_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2847 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2848 uint64_t value)
2849 {
2850 env->cp15.c15_ticonfig = value & 0xe7;
2851 /* The OS_TYPE bit in this register changes the reported CPUID! */
2852 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2853 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2854 }
2855
omap_threadid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2856 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857 uint64_t value)
2858 {
2859 env->cp15.c15_threadid = value & 0xffff;
2860 }
2861
omap_wfi_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2862 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2863 uint64_t value)
2864 {
2865 /* Wait-for-interrupt (deprecated) */
2866 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
2867 }
2868
omap_cachemaint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2869 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2870 uint64_t value)
2871 {
2872 /*
2873 * On OMAP there are registers indicating the max/min index of dcache lines
2874 * containing a dirty line; cache flush operations have to reset these.
2875 */
2876 env->cp15.c15_i_max = 0x000;
2877 env->cp15.c15_i_min = 0xff0;
2878 }
2879
2880 static const ARMCPRegInfo omap_cp_reginfo[] = {
2881 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2882 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2883 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2884 .resetvalue = 0, },
2885 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2886 .access = PL1_RW, .type = ARM_CP_NOP },
2887 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2888 .access = PL1_RW,
2889 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2890 .writefn = omap_ticonfig_write },
2891 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2892 .access = PL1_RW,
2893 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2894 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2895 .access = PL1_RW, .resetvalue = 0xff0,
2896 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2897 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2898 .access = PL1_RW,
2899 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2900 .writefn = omap_threadid_write },
2901 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2902 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2903 .type = ARM_CP_NO_RAW,
2904 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2905 /*
2906 * TODO: Peripheral port remap register:
2907 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2908 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2909 * when MMU is off.
2910 */
2911 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2912 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2913 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2914 .writefn = omap_cachemaint_write },
2915 { .name = "C9", .cp = 15, .crn = 9,
2916 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2917 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2918 };
2919
xscale_cpar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2920 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2921 uint64_t value)
2922 {
2923 env->cp15.c15_cpar = value & 0x3fff;
2924 }
2925
2926 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2927 { .name = "XSCALE_CPAR",
2928 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2929 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2930 .writefn = xscale_cpar_write, },
2931 { .name = "XSCALE_AUXCR",
2932 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2933 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2934 .resetvalue = 0, },
2935 /*
2936 * XScale specific cache-lockdown: since we have no cache we NOP these
2937 * and hope the guest does not really rely on cache behaviour.
2938 */
2939 { .name = "XSCALE_LOCK_ICACHE_LINE",
2940 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2941 .access = PL1_W, .type = ARM_CP_NOP },
2942 { .name = "XSCALE_UNLOCK_ICACHE",
2943 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2944 .access = PL1_W, .type = ARM_CP_NOP },
2945 { .name = "XSCALE_DCACHE_LOCK",
2946 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2947 .access = PL1_RW, .type = ARM_CP_NOP },
2948 { .name = "XSCALE_UNLOCK_DCACHE",
2949 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2950 .access = PL1_W, .type = ARM_CP_NOP },
2951 };
2952
2953 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2954 /*
2955 * RAZ/WI the whole crn=15 space, when we don't have a more specific
2956 * implementation of this implementation-defined space.
2957 * Ideally this should eventually disappear in favour of actually
2958 * implementing the correct behaviour for all cores.
2959 */
2960 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2961 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2962 .access = PL1_RW,
2963 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2964 .resetvalue = 0 },
2965 };
2966
2967 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2968 /* Cache status: RAZ because we have no cache so it's always clean */
2969 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2970 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2971 .resetvalue = 0 },
2972 };
2973
2974 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2975 /* We never have a block transfer operation in progress */
2976 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2977 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2978 .resetvalue = 0 },
2979 /* The cache ops themselves: these all NOP for QEMU */
2980 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2981 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2982 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2983 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2984 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2985 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2986 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2987 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2988 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2989 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2990 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2991 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2992 };
2993
2994 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2995 /*
2996 * The cache test-and-clean instructions always return (1 << 30)
2997 * to indicate that there are no dirty cache lines.
2998 */
2999 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3000 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3001 .resetvalue = (1 << 30) },
3002 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3003 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3004 .resetvalue = (1 << 30) },
3005 };
3006
3007 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3008 /* Ignore ReadBuffer accesses */
3009 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3010 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3011 .access = PL1_RW, .resetvalue = 0,
3012 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3013 };
3014
midr_read(CPUARMState * env,const ARMCPRegInfo * ri)3015 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3016 {
3017 unsigned int cur_el = arm_current_el(env);
3018
3019 if (arm_is_el2_enabled(env) && cur_el == 1) {
3020 return env->cp15.vpidr_el2;
3021 }
3022 return raw_read(env, ri);
3023 }
3024
mpidr_read_val(CPUARMState * env)3025 static uint64_t mpidr_read_val(CPUARMState *env)
3026 {
3027 ARMCPU *cpu = env_archcpu(env);
3028 uint64_t mpidr = cpu->mp_affinity;
3029
3030 if (arm_feature(env, ARM_FEATURE_V7MP)) {
3031 mpidr |= (1U << 31);
3032 /*
3033 * Cores which are uniprocessor (non-coherent)
3034 * but still implement the MP extensions set
3035 * bit 30. (For instance, Cortex-R5).
3036 */
3037 if (cpu->mp_is_up) {
3038 mpidr |= (1u << 30);
3039 }
3040 }
3041 return mpidr;
3042 }
3043
mpidr_read(CPUARMState * env,const ARMCPRegInfo * ri)3044 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3045 {
3046 unsigned int cur_el = arm_current_el(env);
3047
3048 if (arm_is_el2_enabled(env) && cur_el == 1) {
3049 return env->cp15.vmpidr_el2;
3050 }
3051 return mpidr_read_val(env);
3052 }
3053
3054 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3055 /* NOP AMAIR0/1 */
3056 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3057 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3058 .access = PL1_RW, .accessfn = access_tvm_trvm,
3059 .fgt = FGT_AMAIR_EL1,
3060 .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
3061 .type = ARM_CP_CONST, .resetvalue = 0 },
3062 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3063 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3064 .access = PL1_RW, .accessfn = access_tvm_trvm,
3065 .type = ARM_CP_CONST, .resetvalue = 0 },
3066 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3067 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3068 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3069 offsetof(CPUARMState, cp15.par_ns)} },
3070 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3071 .access = PL1_RW, .accessfn = access_tvm_trvm,
3072 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3073 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3074 offsetof(CPUARMState, cp15.ttbr0_ns) },
3075 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3076 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3077 .access = PL1_RW, .accessfn = access_tvm_trvm,
3078 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3079 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3080 offsetof(CPUARMState, cp15.ttbr1_ns) },
3081 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3082 };
3083
aa64_fpcr_read(CPUARMState * env,const ARMCPRegInfo * ri)3084 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3085 {
3086 return vfp_get_fpcr(env);
3087 }
3088
aa64_fpcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3089 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3090 uint64_t value)
3091 {
3092 vfp_set_fpcr(env, value);
3093 }
3094
aa64_fpsr_read(CPUARMState * env,const ARMCPRegInfo * ri)3095 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3096 {
3097 return vfp_get_fpsr(env);
3098 }
3099
aa64_fpsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3100 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3101 uint64_t value)
3102 {
3103 vfp_set_fpsr(env, value);
3104 }
3105
aa64_daif_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3106 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3107 bool isread)
3108 {
3109 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
3110 return CP_ACCESS_TRAP_EL1;
3111 }
3112 return CP_ACCESS_OK;
3113 }
3114
aa64_daif_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3115 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3116 uint64_t value)
3117 {
3118 env->daif = value & PSTATE_DAIF;
3119 }
3120
aa64_pan_read(CPUARMState * env,const ARMCPRegInfo * ri)3121 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
3122 {
3123 return env->pstate & PSTATE_PAN;
3124 }
3125
aa64_pan_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3126 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
3127 uint64_t value)
3128 {
3129 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
3130 }
3131
3132 static const ARMCPRegInfo pan_reginfo = {
3133 .name = "PAN", .state = ARM_CP_STATE_AA64,
3134 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
3135 .type = ARM_CP_NO_RAW, .access = PL1_RW,
3136 .readfn = aa64_pan_read, .writefn = aa64_pan_write
3137 };
3138
aa64_uao_read(CPUARMState * env,const ARMCPRegInfo * ri)3139 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
3140 {
3141 return env->pstate & PSTATE_UAO;
3142 }
3143
aa64_uao_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3144 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
3145 uint64_t value)
3146 {
3147 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
3148 }
3149
3150 static const ARMCPRegInfo uao_reginfo = {
3151 .name = "UAO", .state = ARM_CP_STATE_AA64,
3152 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
3153 .type = ARM_CP_NO_RAW, .access = PL1_RW,
3154 .readfn = aa64_uao_read, .writefn = aa64_uao_write
3155 };
3156
aa64_dit_read(CPUARMState * env,const ARMCPRegInfo * ri)3157 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
3158 {
3159 return env->pstate & PSTATE_DIT;
3160 }
3161
aa64_dit_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3162 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
3163 uint64_t value)
3164 {
3165 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
3166 }
3167
3168 static const ARMCPRegInfo dit_reginfo = {
3169 .name = "DIT", .state = ARM_CP_STATE_AA64,
3170 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
3171 .type = ARM_CP_NO_RAW, .access = PL0_RW,
3172 .readfn = aa64_dit_read, .writefn = aa64_dit_write
3173 };
3174
aa64_ssbs_read(CPUARMState * env,const ARMCPRegInfo * ri)3175 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
3176 {
3177 return env->pstate & PSTATE_SSBS;
3178 }
3179
aa64_ssbs_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3180 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
3181 uint64_t value)
3182 {
3183 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
3184 }
3185
3186 static const ARMCPRegInfo ssbs_reginfo = {
3187 .name = "SSBS", .state = ARM_CP_STATE_AA64,
3188 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
3189 .type = ARM_CP_NO_RAW, .access = PL0_RW,
3190 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
3191 };
3192
aa64_cacheop_poc_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3193 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
3194 const ARMCPRegInfo *ri,
3195 bool isread)
3196 {
3197 /* Cache invalidate/clean to Point of Coherency or Persistence... */
3198 switch (arm_current_el(env)) {
3199 case 0:
3200 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
3201 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3202 return CP_ACCESS_TRAP_EL1;
3203 }
3204 /* fall through */
3205 case 1:
3206 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
3207 if (arm_hcr_el2_eff(env) & HCR_TPCP) {
3208 return CP_ACCESS_TRAP_EL2;
3209 }
3210 break;
3211 }
3212 return CP_ACCESS_OK;
3213 }
3214
do_cacheop_pou_access(CPUARMState * env,uint64_t hcrflags)3215 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
3216 {
3217 /* Cache invalidate/clean to Point of Unification... */
3218 switch (arm_current_el(env)) {
3219 case 0:
3220 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
3221 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3222 return CP_ACCESS_TRAP_EL1;
3223 }
3224 /* fall through */
3225 case 1:
3226 /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */
3227 if (arm_hcr_el2_eff(env) & hcrflags) {
3228 return CP_ACCESS_TRAP_EL2;
3229 }
3230 break;
3231 }
3232 return CP_ACCESS_OK;
3233 }
3234
access_ticab(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3235 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
3236 bool isread)
3237 {
3238 return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
3239 }
3240
access_tocu(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3241 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
3242 bool isread)
3243 {
3244 return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
3245 }
3246
aa64_zva_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3247 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3248 bool isread)
3249 {
3250 int cur_el = arm_current_el(env);
3251
3252 if (cur_el < 2) {
3253 uint64_t hcr = arm_hcr_el2_eff(env);
3254
3255 if (cur_el == 0) {
3256 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
3257 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
3258 return CP_ACCESS_TRAP_EL2;
3259 }
3260 } else {
3261 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3262 return CP_ACCESS_TRAP_EL1;
3263 }
3264 if (hcr & HCR_TDZ) {
3265 return CP_ACCESS_TRAP_EL2;
3266 }
3267 }
3268 } else if (hcr & HCR_TDZ) {
3269 return CP_ACCESS_TRAP_EL2;
3270 }
3271 }
3272 return CP_ACCESS_OK;
3273 }
3274
aa64_dczid_read(CPUARMState * env,const ARMCPRegInfo * ri)3275 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3276 {
3277 ARMCPU *cpu = env_archcpu(env);
3278 int dzp_bit = 1 << 4;
3279
3280 /* DZP indicates whether DC ZVA access is allowed */
3281 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3282 dzp_bit = 0;
3283 }
3284 return cpu->dcz_blocksize | dzp_bit;
3285 }
3286
sp_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3287 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3288 bool isread)
3289 {
3290 if (!(env->pstate & PSTATE_SP)) {
3291 /*
3292 * Access to SP_EL0 is undefined if it's being used as
3293 * the stack pointer.
3294 */
3295 return CP_ACCESS_UNDEFINED;
3296 }
3297 return CP_ACCESS_OK;
3298 }
3299
spsel_read(CPUARMState * env,const ARMCPRegInfo * ri)3300 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3301 {
3302 return env->pstate & PSTATE_SP;
3303 }
3304
spsel_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)3305 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3306 {
3307 update_spsel(env, val);
3308 }
3309
sctlr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3310 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3311 uint64_t value)
3312 {
3313 ARMCPU *cpu = env_archcpu(env);
3314
3315 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3316 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3317 value &= ~SCTLR_M;
3318 }
3319
3320 /* ??? Lots of these bits are not implemented. */
3321
3322 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
3323 if (ri->opc1 == 6) { /* SCTLR_EL3 */
3324 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
3325 } else {
3326 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
3327 SCTLR_ATA0 | SCTLR_ATA);
3328 }
3329 }
3330
3331 if (raw_read(env, ri) == value) {
3332 /*
3333 * Skip the TLB flush if nothing actually changed; Linux likes
3334 * to do a lot of pointless SCTLR writes.
3335 */
3336 return;
3337 }
3338
3339 raw_write(env, ri, value);
3340
3341 /* This may enable/disable the MMU, so do a TLB flush. */
3342 tlb_flush(CPU(cpu));
3343
3344 if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
3345 /*
3346 * Normally we would always end the TB on an SCTLR write; see the
3347 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
3348 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
3349 * of hflags from the translator, so do it here.
3350 */
3351 arm_rebuild_hflags(env);
3352 }
3353 }
3354
mdcr_el3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3355 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3356 uint64_t value)
3357 {
3358 /*
3359 * Some MDCR_EL3 bits affect whether PMU counters are running:
3360 * if we are trying to change any of those then we must
3361 * bracket this update with PMU start/finish calls.
3362 */
3363 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
3364
3365 if (pmu_op) {
3366 pmu_op_start(env);
3367 }
3368 env->cp15.mdcr_el3 = value;
3369 if (pmu_op) {
3370 pmu_op_finish(env);
3371 }
3372 }
3373
sdcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3374 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3375 uint64_t value)
3376 {
3377 /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
3378 mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
3379 }
3380
mdcr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3381 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3382 uint64_t value)
3383 {
3384 /*
3385 * Some MDCR_EL2 bits affect whether PMU counters are running:
3386 * if we are trying to change any of those then we must
3387 * bracket this update with PMU start/finish calls.
3388 */
3389 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
3390
3391 if (pmu_op) {
3392 pmu_op_start(env);
3393 }
3394 env->cp15.mdcr_el2 = value;
3395 if (pmu_op) {
3396 pmu_op_finish(env);
3397 }
3398 }
3399
access_nv1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3400 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
3401 bool isread)
3402 {
3403 if (arm_current_el(env) == 1) {
3404 uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
3405
3406 if (hcr_nv == (HCR_NV | HCR_NV1)) {
3407 return CP_ACCESS_TRAP_EL2;
3408 }
3409 }
3410 return CP_ACCESS_OK;
3411 }
3412
3413 #ifdef CONFIG_USER_ONLY
3414 /*
3415 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
3416 * code to get around W^X restrictions, where one region is writable and the
3417 * other is executable.
3418 *
3419 * Since the executable region is never written to we cannot detect code
3420 * changes when running in user mode, and rely on the emulated JIT telling us
3421 * that the code has changed by executing this instruction.
3422 */
ic_ivau_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3423 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
3424 uint64_t value)
3425 {
3426 uint64_t icache_line_mask, start_address, end_address;
3427 const ARMCPU *cpu;
3428
3429 cpu = env_archcpu(env);
3430
3431 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
3432 start_address = value & ~icache_line_mask;
3433 end_address = value | icache_line_mask;
3434
3435 mmap_lock();
3436
3437 tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
3438
3439 mmap_unlock();
3440 }
3441 #endif
3442
3443 static const ARMCPRegInfo v8_cp_reginfo[] = {
3444 /*
3445 * Minimal set of EL0-visible registers. This will need to be expanded
3446 * significantly for system emulation of AArch64 CPUs.
3447 */
3448 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3449 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3450 .access = PL0_RW, .type = ARM_CP_NZCV },
3451 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3452 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3453 .type = ARM_CP_NO_RAW,
3454 .access = PL0_RW, .accessfn = aa64_daif_access,
3455 .fieldoffset = offsetof(CPUARMState, daif),
3456 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3457 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3458 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3459 .access = PL0_RW, .type = ARM_CP_FPU,
3460 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3461 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3462 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3463 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3464 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3465 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3466 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3467 .access = PL0_R, .type = ARM_CP_NO_RAW,
3468 .fgt = FGT_DCZID_EL0,
3469 .readfn = aa64_dczid_read },
3470 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3471 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3472 .access = PL0_W, .type = ARM_CP_DC_ZVA,
3473 #ifndef CONFIG_USER_ONLY
3474 /* Avoid overhead of an access check that always passes in user-mode */
3475 .accessfn = aa64_zva_access,
3476 .fgt = FGT_DCZVA,
3477 #endif
3478 },
3479 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3480 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3481 .access = PL1_R, .type = ARM_CP_CURRENTEL },
3482 /*
3483 * Instruction cache ops. All of these except `IC IVAU` NOP because we
3484 * don't emulate caches.
3485 */
3486 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3487 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3488 .access = PL1_W, .type = ARM_CP_NOP,
3489 .fgt = FGT_ICIALLUIS,
3490 .accessfn = access_ticab },
3491 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3492 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3493 .access = PL1_W, .type = ARM_CP_NOP,
3494 .fgt = FGT_ICIALLU,
3495 .accessfn = access_tocu },
3496 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3497 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3498 .access = PL0_W,
3499 .fgt = FGT_ICIVAU,
3500 .accessfn = access_tocu,
3501 #ifdef CONFIG_USER_ONLY
3502 .type = ARM_CP_NO_RAW,
3503 .writefn = ic_ivau_write
3504 #else
3505 .type = ARM_CP_NOP
3506 #endif
3507 },
3508 /* Cache ops: all NOPs since we don't emulate caches */
3509 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3510 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3511 .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
3512 .fgt = FGT_DCIVAC,
3513 .type = ARM_CP_NOP },
3514 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3515 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3516 .fgt = FGT_DCISW,
3517 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3518 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3519 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3520 .access = PL0_W, .type = ARM_CP_NOP,
3521 .fgt = FGT_DCCVAC,
3522 .accessfn = aa64_cacheop_poc_access },
3523 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3524 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3525 .fgt = FGT_DCCSW,
3526 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3527 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3528 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3529 .access = PL0_W, .type = ARM_CP_NOP,
3530 .fgt = FGT_DCCVAU,
3531 .accessfn = access_tocu },
3532 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3533 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3534 .access = PL0_W, .type = ARM_CP_NOP,
3535 .fgt = FGT_DCCIVAC,
3536 .accessfn = aa64_cacheop_poc_access },
3537 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3538 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3539 .fgt = FGT_DCCISW,
3540 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3541 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3542 .type = ARM_CP_ALIAS,
3543 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3544 .access = PL1_RW, .resetvalue = 0,
3545 .fgt = FGT_PAR_EL1,
3546 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3547 .writefn = par_write },
3548 /* 32 bit cache operations */
3549 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3550 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
3551 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3552 .type = ARM_CP_NOP, .access = PL1_W },
3553 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3554 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3555 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3556 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3557 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3558 .type = ARM_CP_NOP, .access = PL1_W },
3559 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3560 .type = ARM_CP_NOP, .access = PL1_W },
3561 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3562 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3563 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3564 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3565 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3566 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3567 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3568 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3569 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3570 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3571 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3572 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3573 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3574 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3575 /* MMU Domain access control / MPU write buffer control */
3576 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3577 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3578 .writefn = dacr_write, .raw_writefn = raw_write,
3579 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3580 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3581 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3582 .type = ARM_CP_ALIAS,
3583 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3584 .access = PL1_RW, .accessfn = access_nv1,
3585 .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
3586 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3587 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3588 .type = ARM_CP_ALIAS,
3589 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3590 .access = PL1_RW, .accessfn = access_nv1,
3591 .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
3592 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3593 /*
3594 * We rely on the access checks not allowing the guest to write to the
3595 * state field when SPSel indicates that it's being used as the stack
3596 * pointer.
3597 */
3598 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3599 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3600 .access = PL1_RW, .accessfn = sp_el0_access,
3601 .type = ARM_CP_ALIAS,
3602 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3603 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3604 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3605 .nv2_redirect_offset = 0x240,
3606 .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
3607 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3608 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3609 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3610 .type = ARM_CP_NO_RAW,
3611 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3612 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3613 .type = ARM_CP_ALIAS,
3614 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3615 .access = PL2_RW,
3616 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3617 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3618 .type = ARM_CP_ALIAS,
3619 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3620 .access = PL2_RW,
3621 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3622 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3623 .type = ARM_CP_ALIAS,
3624 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3625 .access = PL2_RW,
3626 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3627 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3628 .type = ARM_CP_ALIAS,
3629 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3630 .access = PL2_RW,
3631 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3632 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3633 .type = ARM_CP_IO,
3634 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3635 .resetvalue = 0,
3636 .access = PL3_RW,
3637 .writefn = mdcr_el3_write,
3638 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3639 { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
3640 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3641 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3642 .writefn = sdcr_write,
3643 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3644 };
3645
3646 /* These are present only when EL1 supports AArch32 */
3647 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
3648 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3649 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3650 .access = PL2_RW,
3651 .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
3652 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
3653 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3654 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3655 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3656 .writefn = dacr_write, .raw_writefn = raw_write,
3657 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3658 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3659 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3660 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3661 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3662 };
3663
do_hcr_write(CPUARMState * env,uint64_t value,uint64_t valid_mask)3664 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
3665 {
3666 ARMCPU *cpu = env_archcpu(env);
3667
3668 if (arm_feature(env, ARM_FEATURE_V8)) {
3669 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
3670 } else {
3671 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
3672 }
3673
3674 if (arm_feature(env, ARM_FEATURE_EL3)) {
3675 valid_mask &= ~HCR_HCD;
3676 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3677 /*
3678 * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3679 * However, if we're using the SMC PSCI conduit then QEMU is
3680 * effectively acting like EL3 firmware and so the guest at
3681 * EL2 should retain the ability to prevent EL1 from being
3682 * able to make SMC calls into the ersatz firmware, so in
3683 * that case HCR.TSC should be read/write.
3684 */
3685 valid_mask &= ~HCR_TSC;
3686 }
3687
3688 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
3689 if (cpu_isar_feature(aa64_vh, cpu)) {
3690 valid_mask |= HCR_E2H;
3691 }
3692 if (cpu_isar_feature(aa64_ras, cpu)) {
3693 valid_mask |= HCR_TERR | HCR_TEA;
3694 }
3695 if (cpu_isar_feature(aa64_lor, cpu)) {
3696 valid_mask |= HCR_TLOR;
3697 }
3698 if (cpu_isar_feature(aa64_pauth, cpu)) {
3699 valid_mask |= HCR_API | HCR_APK;
3700 }
3701 if (cpu_isar_feature(aa64_mte, cpu)) {
3702 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
3703 }
3704 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
3705 valid_mask |= HCR_ENSCXT;
3706 }
3707 if (cpu_isar_feature(aa64_fwb, cpu)) {
3708 valid_mask |= HCR_FWB;
3709 }
3710 if (cpu_isar_feature(aa64_rme, cpu)) {
3711 valid_mask |= HCR_GPF;
3712 }
3713 if (cpu_isar_feature(aa64_nv, cpu)) {
3714 valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
3715 }
3716 if (cpu_isar_feature(aa64_nv2, cpu)) {
3717 valid_mask |= HCR_NV2;
3718 }
3719 }
3720
3721 if (cpu_isar_feature(any_evt, cpu)) {
3722 valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
3723 } else if (cpu_isar_feature(any_half_evt, cpu)) {
3724 valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
3725 }
3726
3727 /* Clear RES0 bits. */
3728 value &= valid_mask;
3729
3730 /* RW is RAO/WI if EL1 is AArch64 only */
3731 if (!cpu_isar_feature(aa64_aa32_el1, cpu)) {
3732 value |= HCR_RW;
3733 }
3734
3735 /*
3736 * These bits change the MMU setup:
3737 * HCR_VM enables stage 2 translation
3738 * HCR_PTW forbids certain page-table setups
3739 * HCR_DC disables stage1 and enables stage2 translation
3740 * HCR_DCT enables tagging on (disabled) stage1 translation
3741 * HCR_FWB changes the interpretation of stage2 descriptor bits
3742 * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
3743 */
3744 if ((env->cp15.hcr_el2 ^ value) &
3745 (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
3746 tlb_flush(CPU(cpu));
3747 }
3748 env->cp15.hcr_el2 = value;
3749
3750 /*
3751 * Updates to VI and VF require us to update the status of
3752 * virtual interrupts, which are the logical OR of these bits
3753 * and the state of the input lines from the GIC. (This requires
3754 * that we have the BQL, which is done by marking the
3755 * reginfo structs as ARM_CP_IO.)
3756 * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
3757 * VFNMI, it is never possible for it to be taken immediately
3758 * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
3759 * at EL0 or EL1, and HCR can only be written at EL2.
3760 */
3761 g_assert(bql_locked());
3762 arm_cpu_update_virq(cpu);
3763 arm_cpu_update_vfiq(cpu);
3764 arm_cpu_update_vserr(cpu);
3765 if (cpu_isar_feature(aa64_nmi, cpu)) {
3766 arm_cpu_update_vinmi(cpu);
3767 arm_cpu_update_vfnmi(cpu);
3768 }
3769 }
3770
hcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3771 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3772 {
3773 do_hcr_write(env, value, 0);
3774 }
3775
hcr_writehigh(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3776 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
3777 uint64_t value)
3778 {
3779 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
3780 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
3781 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
3782 }
3783
hcr_writelow(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3784 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
3785 uint64_t value)
3786 {
3787 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
3788 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
3789 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
3790 }
3791
hcr_reset(CPUARMState * env,const ARMCPRegInfo * ri)3792 static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3793 {
3794 /* hcr_write will set the RES1 bits on an AArch64-only CPU */
3795 hcr_write(env, ri, 0);
3796 }
3797
3798 /*
3799 * Return the effective value of HCR_EL2, at the given security state.
3800 * Bits that are not included here:
3801 * RW (read from SCR_EL3.RW as needed)
3802 */
arm_hcr_el2_eff_secstate(CPUARMState * env,ARMSecuritySpace space)3803 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
3804 {
3805 uint64_t ret = env->cp15.hcr_el2;
3806
3807 assert(space != ARMSS_Root);
3808
3809 if (!arm_is_el2_enabled_secstate(env, space)) {
3810 /*
3811 * "This register has no effect if EL2 is not enabled in the
3812 * current Security state". This is ARMv8.4-SecEL2 speak for
3813 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
3814 *
3815 * Prior to that, the language was "In an implementation that
3816 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
3817 * as if this field is 0 for all purposes other than a direct
3818 * read or write access of HCR_EL2". With lots of enumeration
3819 * on a per-field basis. In current QEMU, this is condition
3820 * is arm_is_secure_below_el3.
3821 *
3822 * Since the v8.4 language applies to the entire register, and
3823 * appears to be backward compatible, use that.
3824 */
3825 return 0;
3826 }
3827
3828 /*
3829 * For a cpu that supports both aarch64 and aarch32, we can set bits
3830 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
3831 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
3832 */
3833 if (!arm_el_is_aa64(env, 2)) {
3834 uint64_t aa32_valid;
3835
3836 /*
3837 * These bits are up-to-date as of ARMv8.6.
3838 * For HCR, it's easiest to list just the 2 bits that are invalid.
3839 * For HCR2, list those that are valid.
3840 */
3841 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
3842 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
3843 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
3844 ret &= aa32_valid;
3845 }
3846
3847 if (ret & HCR_TGE) {
3848 /* These bits are up-to-date as of ARMv8.6. */
3849 if (ret & HCR_E2H) {
3850 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
3851 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
3852 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
3853 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
3854 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
3855 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
3856 } else {
3857 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
3858 }
3859 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
3860 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
3861 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
3862 HCR_TLOR);
3863 }
3864
3865 return ret;
3866 }
3867
arm_hcr_el2_eff(CPUARMState * env)3868 uint64_t arm_hcr_el2_eff(CPUARMState *env)
3869 {
3870 if (arm_feature(env, ARM_FEATURE_M)) {
3871 return 0;
3872 }
3873 return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
3874 }
3875
3876 /*
3877 * Corresponds to ARM pseudocode function ELIsInHost().
3878 */
el_is_in_host(CPUARMState * env,int el)3879 bool el_is_in_host(CPUARMState *env, int el)
3880 {
3881 uint64_t mask;
3882
3883 /*
3884 * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
3885 * Perform the simplest bit tests first, and validate EL2 afterward.
3886 */
3887 if (el & 1) {
3888 return false; /* EL1 or EL3 */
3889 }
3890
3891 /*
3892 * Note that hcr_write() checks isar_feature_aa64_vh(),
3893 * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
3894 */
3895 mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
3896 if ((env->cp15.hcr_el2 & mask) != mask) {
3897 return false;
3898 }
3899
3900 /* TGE and/or E2H set: double check those bits are currently legal. */
3901 return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
3902 }
3903
hcrx_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3904 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
3905 uint64_t value)
3906 {
3907 ARMCPU *cpu = env_archcpu(env);
3908 uint64_t valid_mask = 0;
3909
3910 /* FEAT_MOPS adds MSCEn and MCE2 */
3911 if (cpu_isar_feature(aa64_mops, cpu)) {
3912 valid_mask |= HCRX_MSCEN | HCRX_MCE2;
3913 }
3914
3915 /* FEAT_NMI adds TALLINT, VINMI and VFNMI */
3916 if (cpu_isar_feature(aa64_nmi, cpu)) {
3917 valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
3918 }
3919 /* FEAT_CMOW adds CMOW */
3920 if (cpu_isar_feature(aa64_cmow, cpu)) {
3921 valid_mask |= HCRX_CMOW;
3922 }
3923 /* FEAT_XS adds FGTnXS, FnXS */
3924 if (cpu_isar_feature(aa64_xs, cpu)) {
3925 valid_mask |= HCRX_FGTNXS | HCRX_FNXS;
3926 }
3927
3928 /* Clear RES0 bits. */
3929 env->cp15.hcrx_el2 = value & valid_mask;
3930
3931 /*
3932 * Updates to VINMI and VFNMI require us to update the status of
3933 * virtual NMI, which are the logical OR of these bits
3934 * and the state of the input lines from the GIC. (This requires
3935 * that we have the BQL, which is done by marking the
3936 * reginfo structs as ARM_CP_IO.)
3937 * Note that if a write to HCRX pends a VINMI or VFNMI it is never
3938 * possible for it to be taken immediately, because VINMI and
3939 * VFNMI are masked unless running at EL0 or EL1, and HCRX
3940 * can only be written at EL2.
3941 */
3942 if (cpu_isar_feature(aa64_nmi, cpu)) {
3943 g_assert(bql_locked());
3944 arm_cpu_update_vinmi(cpu);
3945 arm_cpu_update_vfnmi(cpu);
3946 }
3947 }
3948
access_hxen(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3949 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
3950 bool isread)
3951 {
3952 if (arm_current_el(env) == 2
3953 && arm_feature(env, ARM_FEATURE_EL3)
3954 && !(env->cp15.scr_el3 & SCR_HXEN)) {
3955 return CP_ACCESS_TRAP_EL3;
3956 }
3957 return CP_ACCESS_OK;
3958 }
3959
3960 static const ARMCPRegInfo hcrx_el2_reginfo = {
3961 .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
3962 .type = ARM_CP_IO,
3963 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
3964 .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
3965 .nv2_redirect_offset = 0xa0,
3966 .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
3967 };
3968
3969 /* Return the effective value of HCRX_EL2. */
arm_hcrx_el2_eff(CPUARMState * env)3970 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
3971 {
3972 /*
3973 * The bits in this register behave as 0 for all purposes other than
3974 * direct reads of the register if SCR_EL3.HXEn is 0.
3975 * If EL2 is not enabled in the current security state, then the
3976 * bit may behave as if 0, or as if 1, depending on the bit.
3977 * For the moment, we treat the EL2-disabled case as taking
3978 * priority over the HXEn-disabled case. This is true for the only
3979 * bit for a feature which we implement where the answer is different
3980 * for the two cases (MSCEn for FEAT_MOPS).
3981 * This may need to be revisited for future bits.
3982 */
3983 if (!arm_is_el2_enabled(env)) {
3984 uint64_t hcrx = 0;
3985 if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
3986 /* MSCEn behaves as 1 if EL2 is not enabled */
3987 hcrx |= HCRX_MSCEN;
3988 }
3989 return hcrx;
3990 }
3991 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
3992 return 0;
3993 }
3994 return env->cp15.hcrx_el2;
3995 }
3996
cptr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3997 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3998 uint64_t value)
3999 {
4000 /*
4001 * For A-profile AArch32 EL3, if NSACR.CP10
4002 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4003 */
4004 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4005 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4006 uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4007 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
4008 }
4009 env->cp15.cptr_el[2] = value;
4010 }
4011
cptr_el2_read(CPUARMState * env,const ARMCPRegInfo * ri)4012 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4013 {
4014 /*
4015 * For A-profile AArch32 EL3, if NSACR.CP10
4016 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4017 */
4018 uint64_t value = env->cp15.cptr_el[2];
4019
4020 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4021 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4022 value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4023 }
4024 return value;
4025 }
4026
4027 static const ARMCPRegInfo el2_cp_reginfo[] = {
4028 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4029 .type = ARM_CP_IO,
4030 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4031 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4032 .nv2_redirect_offset = 0x78,
4033 .resetfn = hcr_reset,
4034 .writefn = hcr_write, .raw_writefn = raw_write },
4035 { .name = "HCR", .state = ARM_CP_STATE_AA32,
4036 .type = ARM_CP_ALIAS | ARM_CP_IO,
4037 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4038 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4039 .writefn = hcr_writelow },
4040 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4041 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4042 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4043 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4044 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4045 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4046 .access = PL2_RW,
4047 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4048 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4049 .type = ARM_CP_NV2_REDIRECT,
4050 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4051 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4052 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4053 .type = ARM_CP_NV2_REDIRECT,
4054 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4055 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4056 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4057 .type = ARM_CP_ALIAS,
4058 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4059 .access = PL2_RW,
4060 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4061 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4062 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4063 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4064 .access = PL2_RW,
4065 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4066 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4067 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4068 .access = PL2_RW, .writefn = vbar_write,
4069 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4070 .resetvalue = 0 },
4071 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4072 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4073 .access = PL3_RW, .type = ARM_CP_ALIAS,
4074 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4075 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4076 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4077 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4078 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4079 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
4080 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4081 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4082 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4083 .resetvalue = 0 },
4084 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4085 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4086 .access = PL2_RW, .type = ARM_CP_ALIAS,
4087 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4088 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4089 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4090 .access = PL2_RW, .type = ARM_CP_CONST,
4091 .resetvalue = 0 },
4092 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4093 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4094 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4095 .access = PL2_RW, .type = ARM_CP_CONST,
4096 .resetvalue = 0 },
4097 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4098 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4099 .access = PL2_RW, .type = ARM_CP_CONST,
4100 .resetvalue = 0 },
4101 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4102 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4103 .access = PL2_RW, .type = ARM_CP_CONST,
4104 .resetvalue = 0 },
4105 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4106 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4107 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
4108 .raw_writefn = raw_write,
4109 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4110 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4111 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4112 .type = ARM_CP_ALIAS,
4113 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4114 .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
4115 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4116 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4117 .access = PL2_RW,
4118 .nv2_redirect_offset = 0x40,
4119 /* no .writefn needed as this can't cause an ASID change */
4120 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4121 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4122 .cp = 15, .opc1 = 6, .crm = 2,
4123 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4124 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4125 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4126 .writefn = vttbr_write, .raw_writefn = raw_write },
4127 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4128 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4129 .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
4130 .nv2_redirect_offset = 0x20,
4131 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4132 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4133 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4134 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4135 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4136 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4137 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4138 .access = PL2_RW, .resetvalue = 0,
4139 .nv2_redirect_offset = 0x90,
4140 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4141 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4142 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4143 .access = PL2_RW, .resetvalue = 0,
4144 .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
4145 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4146 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4147 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4148 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4149 #ifndef CONFIG_USER_ONLY
4150 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4151 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4152 /*
4153 * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4154 * reset values as IMPDEF. We choose to reset to 3 to comply with
4155 * both ARMv7 and ARMv8.
4156 */
4157 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
4158 .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
4159 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4160 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4161 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4162 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4163 .writefn = gt_cntvoff_write,
4164 .nv2_redirect_offset = 0x60,
4165 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4166 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4167 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4168 .writefn = gt_cntvoff_write,
4169 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4170 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4171 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4172 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4173 .type = ARM_CP_IO, .access = PL2_RW,
4174 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4175 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4176 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4177 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4178 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4179 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4180 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4181 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4182 .resetfn = gt_hyp_timer_reset,
4183 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4184 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4185 .type = ARM_CP_IO,
4186 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4187 .access = PL2_RW,
4188 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4189 .resetvalue = 0,
4190 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4191 #endif
4192 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4193 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4194 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4195 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4196 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4197 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4198 .access = PL2_RW,
4199 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4200 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4201 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4202 .access = PL2_RW,
4203 .nv2_redirect_offset = 0x80,
4204 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4205 };
4206
4207 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4208 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4209 .type = ARM_CP_ALIAS | ARM_CP_IO,
4210 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4211 .access = PL2_RW,
4212 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4213 .writefn = hcr_writehigh },
4214 };
4215
sel2_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4216 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
4217 bool isread)
4218 {
4219 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
4220 return CP_ACCESS_OK;
4221 }
4222 return CP_ACCESS_UNDEFINED;
4223 }
4224
4225 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
4226 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
4227 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
4228 .access = PL2_RW, .accessfn = sel2_access,
4229 .nv2_redirect_offset = 0x30,
4230 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
4231 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
4232 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
4233 .access = PL2_RW, .accessfn = sel2_access,
4234 .nv2_redirect_offset = 0x48,
4235 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
4236 #ifndef CONFIG_USER_ONLY
4237 /* Secure EL2 Physical Timer */
4238 { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4239 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0,
4240 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4241 .accessfn = gt_sel2timer_access,
4242 .readfn = gt_sec_pel2_tval_read,
4243 .writefn = gt_sec_pel2_tval_write,
4244 .resetfn = gt_sec_pel2_timer_reset,
4245 },
4246 { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4247 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1,
4248 .type = ARM_CP_IO, .access = PL2_RW,
4249 .accessfn = gt_sel2timer_access,
4250 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl),
4251 .resetvalue = 0,
4252 .writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write,
4253 },
4254 { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4255 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2,
4256 .type = ARM_CP_IO, .access = PL2_RW,
4257 .accessfn = gt_sel2timer_access,
4258 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval),
4259 .writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write,
4260 },
4261 /* Secure EL2 Virtual Timer */
4262 { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4263 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0,
4264 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4265 .accessfn = gt_sel2timer_access,
4266 .readfn = gt_sec_vel2_tval_read,
4267 .writefn = gt_sec_vel2_tval_write,
4268 .resetfn = gt_sec_vel2_timer_reset,
4269 },
4270 { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4271 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1,
4272 .type = ARM_CP_IO, .access = PL2_RW,
4273 .accessfn = gt_sel2timer_access,
4274 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl),
4275 .resetvalue = 0,
4276 .writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write,
4277 },
4278 { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4279 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2,
4280 .type = ARM_CP_IO, .access = PL2_RW,
4281 .accessfn = gt_sel2timer_access,
4282 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval),
4283 .writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write,
4284 },
4285 #endif
4286 };
4287
nsacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4288 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4289 bool isread)
4290 {
4291 /*
4292 * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4293 * At Secure EL1 it traps to EL3 or EL2.
4294 */
4295 if (arm_current_el(env) == 3) {
4296 return CP_ACCESS_OK;
4297 }
4298 if (arm_is_secure_below_el3(env)) {
4299 if (env->cp15.scr_el3 & SCR_EEL2) {
4300 return CP_ACCESS_TRAP_EL2;
4301 }
4302 return CP_ACCESS_TRAP_EL3;
4303 }
4304 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4305 if (isread) {
4306 return CP_ACCESS_OK;
4307 }
4308 return CP_ACCESS_UNDEFINED;
4309 }
4310
4311 static const ARMCPRegInfo el3_cp_reginfo[] = {
4312 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4313 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4314 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4315 .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
4316 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
4317 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4318 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4319 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4320 .writefn = scr_write, .raw_writefn = raw_write },
4321 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4322 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4323 .access = PL3_RW, .resetvalue = 0,
4324 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4325 { .name = "SDER",
4326 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4327 .access = PL3_RW, .resetvalue = 0,
4328 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4329 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4330 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4331 .writefn = vbar_write, .resetvalue = 0,
4332 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4333 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4334 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4335 .access = PL3_RW, .resetvalue = 0,
4336 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4337 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4338 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4339 .access = PL3_RW,
4340 /* no .writefn needed as this can't cause an ASID change */
4341 .resetvalue = 0,
4342 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4343 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4344 .type = ARM_CP_ALIAS,
4345 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4346 .access = PL3_RW,
4347 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4348 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4349 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4350 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4351 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4352 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4353 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4354 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4355 .type = ARM_CP_ALIAS,
4356 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4357 .access = PL3_RW,
4358 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4359 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4360 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4361 .access = PL3_RW, .writefn = vbar_write,
4362 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4363 .resetvalue = 0 },
4364 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4365 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4366 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4367 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4368 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4369 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4370 .access = PL3_RW, .resetvalue = 0,
4371 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4372 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4373 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4374 .access = PL3_RW, .type = ARM_CP_CONST,
4375 .resetvalue = 0 },
4376 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4377 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4378 .access = PL3_RW, .type = ARM_CP_CONST,
4379 .resetvalue = 0 },
4380 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4381 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4382 .access = PL3_RW, .type = ARM_CP_CONST,
4383 .resetvalue = 0 },
4384 };
4385
4386 #ifndef CONFIG_USER_ONLY
4387
e2h_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4388 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
4389 bool isread)
4390 {
4391 if (arm_current_el(env) == 1) {
4392 /* This must be a FEAT_NV access */
4393 return CP_ACCESS_OK;
4394 }
4395 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
4396 return CP_ACCESS_UNDEFINED;
4397 }
4398 return CP_ACCESS_OK;
4399 }
4400
access_el1nvpct(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4401 static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri,
4402 bool isread)
4403 {
4404 if (arm_current_el(env) == 1) {
4405 /* This must be a FEAT_NV access with NVx == 101 */
4406 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
4407 return CP_ACCESS_TRAP_EL2;
4408 }
4409 }
4410 return e2h_access(env, ri, isread);
4411 }
4412
access_el1nvvct(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4413 static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
4414 bool isread)
4415 {
4416 if (arm_current_el(env) == 1) {
4417 /* This must be a FEAT_NV access with NVx == 101 */
4418 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
4419 return CP_ACCESS_TRAP_EL2;
4420 }
4421 }
4422 return e2h_access(env, ri, isread);
4423 }
4424
4425 /* Test if system register redirection is to occur in the current state. */
redirect_for_e2h(CPUARMState * env)4426 static bool redirect_for_e2h(CPUARMState *env)
4427 {
4428 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
4429 }
4430
el2_e2h_read(CPUARMState * env,const ARMCPRegInfo * ri)4431 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
4432 {
4433 CPReadFn *readfn;
4434
4435 if (redirect_for_e2h(env)) {
4436 /* Switch to the saved EL2 version of the register. */
4437 ri = ri->opaque;
4438 readfn = ri->readfn;
4439 } else {
4440 readfn = ri->orig_readfn;
4441 }
4442 if (readfn == NULL) {
4443 readfn = raw_read;
4444 }
4445 return readfn(env, ri);
4446 }
4447
el2_e2h_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4448 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
4449 uint64_t value)
4450 {
4451 CPWriteFn *writefn;
4452
4453 if (redirect_for_e2h(env)) {
4454 /* Switch to the saved EL2 version of the register. */
4455 ri = ri->opaque;
4456 writefn = ri->writefn;
4457 } else {
4458 writefn = ri->orig_writefn;
4459 }
4460 if (writefn == NULL) {
4461 writefn = raw_write;
4462 }
4463 writefn(env, ri, value);
4464 }
4465
el2_e2h_e12_read(CPUARMState * env,const ARMCPRegInfo * ri)4466 static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
4467 {
4468 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
4469 return ri->orig_readfn(env, ri->opaque);
4470 }
4471
el2_e2h_e12_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4472 static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
4473 uint64_t value)
4474 {
4475 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
4476 return ri->orig_writefn(env, ri->opaque, value);
4477 }
4478
el2_e2h_e12_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4479 static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
4480 const ARMCPRegInfo *ri,
4481 bool isread)
4482 {
4483 if (arm_current_el(env) == 1) {
4484 /*
4485 * This must be a FEAT_NV access (will either trap or redirect
4486 * to memory). None of the registers with _EL12 aliases want to
4487 * apply their trap controls for this kind of access, so don't
4488 * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
4489 */
4490 return CP_ACCESS_OK;
4491 }
4492 /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
4493 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
4494 return CP_ACCESS_UNDEFINED;
4495 }
4496 if (ri->orig_accessfn) {
4497 return ri->orig_accessfn(env, ri->opaque, isread);
4498 }
4499 return CP_ACCESS_OK;
4500 }
4501
define_arm_vh_e2h_redirects_aliases(ARMCPU * cpu)4502 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
4503 {
4504 struct E2HAlias {
4505 uint32_t src_key, dst_key, new_key;
4506 const char *src_name, *dst_name, *new_name;
4507 bool (*feature)(const ARMISARegisters *id);
4508 };
4509
4510 #define K(op0, op1, crn, crm, op2) \
4511 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
4512
4513 static const struct E2HAlias aliases[] = {
4514 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
4515 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
4516 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
4517 "CPACR", "CPTR_EL2", "CPACR_EL12" },
4518 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
4519 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
4520 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
4521 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
4522 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
4523 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
4524 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
4525 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
4526 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
4527 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
4528 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
4529 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
4530 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
4531 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
4532 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
4533 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
4534 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
4535 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
4536 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
4537 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
4538 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
4539 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
4540 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
4541 "VBAR", "VBAR_EL2", "VBAR_EL12" },
4542 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
4543 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
4544 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
4545 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
4546
4547 /*
4548 * Note that redirection of ZCR is mentioned in the description
4549 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
4550 * not in the summary table.
4551 */
4552 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
4553 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
4554 { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
4555 "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
4556
4557 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
4558 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
4559
4560 { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
4561 "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
4562 isar_feature_aa64_scxtnum },
4563
4564 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
4565 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
4566 };
4567 #undef K
4568
4569 size_t i;
4570
4571 for (i = 0; i < ARRAY_SIZE(aliases); i++) {
4572 const struct E2HAlias *a = &aliases[i];
4573 ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
4574 bool ok;
4575
4576 if (a->feature && !a->feature(&cpu->isar)) {
4577 continue;
4578 }
4579
4580 src_reg = g_hash_table_lookup(cpu->cp_regs,
4581 (gpointer)(uintptr_t)a->src_key);
4582 dst_reg = g_hash_table_lookup(cpu->cp_regs,
4583 (gpointer)(uintptr_t)a->dst_key);
4584 g_assert(src_reg != NULL);
4585 g_assert(dst_reg != NULL);
4586
4587 /* Cross-compare names to detect typos in the keys. */
4588 g_assert(strcmp(src_reg->name, a->src_name) == 0);
4589 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
4590
4591 /* None of the core system registers use opaque; we will. */
4592 g_assert(src_reg->opaque == NULL);
4593
4594 /* Create alias before redirection so we dup the right data. */
4595 new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
4596
4597 new_reg->name = a->new_name;
4598 new_reg->type |= ARM_CP_ALIAS;
4599 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
4600 new_reg->access &= PL2_RW | PL3_RW;
4601 /* The new_reg op fields are as per new_key, not the target reg */
4602 new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
4603 >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
4604 new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
4605 >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
4606 new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
4607 >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
4608 new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
4609 >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
4610 new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
4611 >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
4612 new_reg->opaque = src_reg;
4613 new_reg->orig_readfn = src_reg->readfn ?: raw_read;
4614 new_reg->orig_writefn = src_reg->writefn ?: raw_write;
4615 new_reg->orig_accessfn = src_reg->accessfn;
4616 if (!new_reg->raw_readfn) {
4617 new_reg->raw_readfn = raw_read;
4618 }
4619 if (!new_reg->raw_writefn) {
4620 new_reg->raw_writefn = raw_write;
4621 }
4622 new_reg->readfn = el2_e2h_e12_read;
4623 new_reg->writefn = el2_e2h_e12_write;
4624 new_reg->accessfn = el2_e2h_e12_access;
4625
4626 /*
4627 * If the _EL1 register is redirected to memory by FEAT_NV2,
4628 * then it shares the offset with the _EL12 register,
4629 * and which one is redirected depends on HCR_EL2.NV1.
4630 */
4631 if (new_reg->nv2_redirect_offset) {
4632 assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
4633 new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
4634 new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
4635 }
4636
4637 ok = g_hash_table_insert(cpu->cp_regs,
4638 (gpointer)(uintptr_t)a->new_key, new_reg);
4639 g_assert(ok);
4640
4641 src_reg->opaque = dst_reg;
4642 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
4643 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
4644 if (!src_reg->raw_readfn) {
4645 src_reg->raw_readfn = raw_read;
4646 }
4647 if (!src_reg->raw_writefn) {
4648 src_reg->raw_writefn = raw_write;
4649 }
4650 src_reg->readfn = el2_e2h_read;
4651 src_reg->writefn = el2_e2h_write;
4652 }
4653 }
4654 #endif
4655
ctr_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4656 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4657 bool isread)
4658 {
4659 int cur_el = arm_current_el(env);
4660
4661 if (cur_el < 2) {
4662 uint64_t hcr = arm_hcr_el2_eff(env);
4663
4664 if (cur_el == 0) {
4665 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4666 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
4667 return CP_ACCESS_TRAP_EL2;
4668 }
4669 } else {
4670 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4671 return CP_ACCESS_TRAP_EL1;
4672 }
4673 if (hcr & HCR_TID2) {
4674 return CP_ACCESS_TRAP_EL2;
4675 }
4676 }
4677 } else if (hcr & HCR_TID2) {
4678 return CP_ACCESS_TRAP_EL2;
4679 }
4680 }
4681
4682 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
4683 return CP_ACCESS_TRAP_EL2;
4684 }
4685
4686 return CP_ACCESS_OK;
4687 }
4688
4689 /*
4690 * Check for traps to RAS registers, which are controlled
4691 * by HCR_EL2.TERR and SCR_EL3.TERR.
4692 */
access_terr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4693 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
4694 bool isread)
4695 {
4696 int el = arm_current_el(env);
4697
4698 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
4699 return CP_ACCESS_TRAP_EL2;
4700 }
4701 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) {
4702 return CP_ACCESS_TRAP_EL3;
4703 }
4704 return CP_ACCESS_OK;
4705 }
4706
disr_read(CPUARMState * env,const ARMCPRegInfo * ri)4707 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4708 {
4709 int el = arm_current_el(env);
4710
4711 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4712 return env->cp15.vdisr_el2;
4713 }
4714 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4715 return 0; /* RAZ/WI */
4716 }
4717 return env->cp15.disr_el1;
4718 }
4719
disr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)4720 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4721 {
4722 int el = arm_current_el(env);
4723
4724 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4725 env->cp15.vdisr_el2 = val;
4726 return;
4727 }
4728 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4729 return; /* RAZ/WI */
4730 }
4731 env->cp15.disr_el1 = val;
4732 }
4733
4734 /*
4735 * Minimal RAS implementation with no Error Records.
4736 * Which means that all of the Error Record registers:
4737 * ERXADDR_EL1
4738 * ERXCTLR_EL1
4739 * ERXFR_EL1
4740 * ERXMISC0_EL1
4741 * ERXMISC1_EL1
4742 * ERXMISC2_EL1
4743 * ERXMISC3_EL1
4744 * ERXPFGCDN_EL1 (RASv1p1)
4745 * ERXPFGCTL_EL1 (RASv1p1)
4746 * ERXPFGF_EL1 (RASv1p1)
4747 * ERXSTATUS_EL1
4748 * and
4749 * ERRSELR_EL1
4750 * may generate UNDEFINED, which is the effect we get by not
4751 * listing them at all.
4752 *
4753 * These registers have fine-grained trap bits, but UNDEF-to-EL1
4754 * is higher priority than FGT-to-EL2 so we do not need to list them
4755 * in order to check for an FGT.
4756 */
4757 static const ARMCPRegInfo minimal_ras_reginfo[] = {
4758 { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
4759 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
4760 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
4761 .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
4762 { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
4763 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
4764 .access = PL1_R, .accessfn = access_terr,
4765 .fgt = FGT_ERRIDR_EL1,
4766 .type = ARM_CP_CONST, .resetvalue = 0 },
4767 { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
4768 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
4769 .nv2_redirect_offset = 0x500,
4770 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
4771 { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
4772 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
4773 .nv2_redirect_offset = 0x508,
4774 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
4775 };
4776
4777 /*
4778 * Return the exception level to which exceptions should be taken
4779 * via SVEAccessTrap. This excludes the check for whether the exception
4780 * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily
4781 * be found by testing 0 < fp_exception_el < sve_exception_el.
4782 *
4783 * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the
4784 * pseudocode does *not* separate out the FP trap checks, but has them
4785 * all in one function.
4786 */
sve_exception_el(CPUARMState * env,int el)4787 int sve_exception_el(CPUARMState *env, int el)
4788 {
4789 #ifndef CONFIG_USER_ONLY
4790 if (el <= 1 && !el_is_in_host(env, el)) {
4791 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
4792 case 1:
4793 if (el != 0) {
4794 break;
4795 }
4796 /* fall through */
4797 case 0:
4798 case 2:
4799 return 1;
4800 }
4801 }
4802
4803 if (el <= 2 && arm_is_el2_enabled(env)) {
4804 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4805 if (env->cp15.hcr_el2 & HCR_E2H) {
4806 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
4807 case 1:
4808 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4809 break;
4810 }
4811 /* fall through */
4812 case 0:
4813 case 2:
4814 return 2;
4815 }
4816 } else {
4817 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
4818 return 2;
4819 }
4820 }
4821 }
4822
4823 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
4824 if (arm_feature(env, ARM_FEATURE_EL3)
4825 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
4826 return 3;
4827 }
4828 #endif
4829 return 0;
4830 }
4831
4832 /*
4833 * Return the exception level to which exceptions should be taken for SME.
4834 * C.f. the ARM pseudocode function CheckSMEAccess.
4835 */
sme_exception_el(CPUARMState * env,int el)4836 int sme_exception_el(CPUARMState *env, int el)
4837 {
4838 #ifndef CONFIG_USER_ONLY
4839 if (el <= 1 && !el_is_in_host(env, el)) {
4840 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
4841 case 1:
4842 if (el != 0) {
4843 break;
4844 }
4845 /* fall through */
4846 case 0:
4847 case 2:
4848 return 1;
4849 }
4850 }
4851
4852 if (el <= 2 && arm_is_el2_enabled(env)) {
4853 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4854 if (env->cp15.hcr_el2 & HCR_E2H) {
4855 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
4856 case 1:
4857 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4858 break;
4859 }
4860 /* fall through */
4861 case 0:
4862 case 2:
4863 return 2;
4864 }
4865 } else {
4866 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
4867 return 2;
4868 }
4869 }
4870 }
4871
4872 /* CPTR_EL3. Since ESM is negative we must check for EL3. */
4873 if (arm_feature(env, ARM_FEATURE_EL3)
4874 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4875 return 3;
4876 }
4877 #endif
4878 return 0;
4879 }
4880
4881 /*
4882 * Given that SVE is enabled, return the vector length for EL.
4883 */
sve_vqm1_for_el_sm(CPUARMState * env,int el,bool sm)4884 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
4885 {
4886 ARMCPU *cpu = env_archcpu(env);
4887 uint64_t *cr = env->vfp.zcr_el;
4888 uint32_t map = cpu->sve_vq.map;
4889 uint32_t len = ARM_MAX_VQ - 1;
4890
4891 if (sm) {
4892 cr = env->vfp.smcr_el;
4893 map = cpu->sme_vq.map;
4894 }
4895
4896 if (el <= 1 && !el_is_in_host(env, el)) {
4897 len = MIN(len, 0xf & (uint32_t)cr[1]);
4898 }
4899 if (el <= 2 && arm_is_el2_enabled(env)) {
4900 len = MIN(len, 0xf & (uint32_t)cr[2]);
4901 }
4902 if (arm_feature(env, ARM_FEATURE_EL3)) {
4903 len = MIN(len, 0xf & (uint32_t)cr[3]);
4904 }
4905
4906 map &= MAKE_64BIT_MASK(0, len + 1);
4907 if (map != 0) {
4908 return 31 - clz32(map);
4909 }
4910
4911 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
4912 assert(sm);
4913 return ctz32(cpu->sme_vq.map);
4914 }
4915
sve_vqm1_for_el(CPUARMState * env,int el)4916 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
4917 {
4918 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
4919 }
4920
zcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4921 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4922 uint64_t value)
4923 {
4924 int cur_el = arm_current_el(env);
4925 int old_len = sve_vqm1_for_el(env, cur_el);
4926 int new_len;
4927
4928 /* Bits other than [3:0] are RAZ/WI. */
4929 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
4930 raw_write(env, ri, value & 0xf);
4931
4932 /*
4933 * Because we arrived here, we know both FP and SVE are enabled;
4934 * otherwise we would have trapped access to the ZCR_ELn register.
4935 */
4936 new_len = sve_vqm1_for_el(env, cur_el);
4937 if (new_len < old_len) {
4938 aarch64_sve_narrow_vq(env, new_len + 1);
4939 }
4940 }
4941
4942 static const ARMCPRegInfo zcr_reginfo[] = {
4943 { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4944 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4945 .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
4946 .access = PL1_RW, .type = ARM_CP_SVE,
4947 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4948 .writefn = zcr_write, .raw_writefn = raw_write },
4949 { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4950 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4951 .access = PL2_RW, .type = ARM_CP_SVE,
4952 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4953 .writefn = zcr_write, .raw_writefn = raw_write },
4954 { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4955 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4956 .access = PL3_RW, .type = ARM_CP_SVE,
4957 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4958 .writefn = zcr_write, .raw_writefn = raw_write },
4959 };
4960
access_tpidr2(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4961 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
4962 bool isread)
4963 {
4964 int el = arm_current_el(env);
4965
4966 if (el == 0) {
4967 uint64_t sctlr = arm_sctlr(env, el);
4968 if (!(sctlr & SCTLR_EnTP2)) {
4969 return CP_ACCESS_TRAP_EL1;
4970 }
4971 }
4972 /* TODO: FEAT_FGT */
4973 if (el < 3
4974 && arm_feature(env, ARM_FEATURE_EL3)
4975 && !(env->cp15.scr_el3 & SCR_ENTP2)) {
4976 return CP_ACCESS_TRAP_EL3;
4977 }
4978 return CP_ACCESS_OK;
4979 }
4980
access_smprimap(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4981 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
4982 bool isread)
4983 {
4984 /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
4985 if (arm_current_el(env) == 2
4986 && arm_feature(env, ARM_FEATURE_EL3)
4987 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4988 return CP_ACCESS_TRAP_EL3;
4989 }
4990 return CP_ACCESS_OK;
4991 }
4992
access_smpri(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4993 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
4994 bool isread)
4995 {
4996 if (arm_current_el(env) < 3
4997 && arm_feature(env, ARM_FEATURE_EL3)
4998 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4999 return CP_ACCESS_TRAP_EL3;
5000 }
5001 return CP_ACCESS_OK;
5002 }
5003
5004 /* ResetSVEState */
arm_reset_sve_state(CPUARMState * env)5005 static void arm_reset_sve_state(CPUARMState *env)
5006 {
5007 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
5008 /* Recall that FFR is stored as pregs[16]. */
5009 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
5010 vfp_set_fpsr(env, 0x0800009f);
5011 }
5012
aarch64_set_svcr(CPUARMState * env,uint64_t new,uint64_t mask)5013 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
5014 {
5015 uint64_t change = (env->svcr ^ new) & mask;
5016
5017 if (change == 0) {
5018 return;
5019 }
5020 env->svcr ^= change;
5021
5022 if (change & R_SVCR_SM_MASK) {
5023 arm_reset_sve_state(env);
5024 }
5025
5026 /*
5027 * ResetSMEState.
5028 *
5029 * SetPSTATE_ZA zeros on enable and disable. We can zero this only
5030 * on enable: while disabled, the storage is inaccessible and the
5031 * value does not matter. We're not saving the storage in vmstate
5032 * when disabled either.
5033 */
5034 if (change & new & R_SVCR_ZA_MASK) {
5035 memset(&env->za_state, 0, sizeof(env->za_state));
5036 }
5037
5038 if (tcg_enabled()) {
5039 arm_rebuild_hflags(env);
5040 }
5041 }
5042
svcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5043 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5044 uint64_t value)
5045 {
5046 aarch64_set_svcr(env, value, -1);
5047 }
5048
smcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5049 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5050 uint64_t value)
5051 {
5052 int cur_el = arm_current_el(env);
5053 int old_len = sve_vqm1_for_el(env, cur_el);
5054 uint64_t valid_mask = R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
5055 int new_len;
5056
5057 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
5058 if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
5059 valid_mask |= R_SMCR_EZT0_MASK;
5060 }
5061 value &= valid_mask;
5062 raw_write(env, ri, value);
5063
5064 /*
5065 * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
5066 * when SVL is widened (old values kept, or zeros). Choose to keep the
5067 * current values for simplicity. But for QEMU internals, we must still
5068 * apply the narrower SVL to the Zregs and Pregs -- see the comment
5069 * above aarch64_sve_narrow_vq.
5070 */
5071 new_len = sve_vqm1_for_el(env, cur_el);
5072 if (new_len < old_len) {
5073 aarch64_sve_narrow_vq(env, new_len + 1);
5074 }
5075 }
5076
5077 static const ARMCPRegInfo sme_reginfo[] = {
5078 { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
5079 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
5080 .access = PL0_RW, .accessfn = access_tpidr2,
5081 .fgt = FGT_NTPIDR2_EL0,
5082 .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
5083 { .name = "SVCR", .state = ARM_CP_STATE_AA64,
5084 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
5085 .access = PL0_RW, .type = ARM_CP_SME,
5086 .fieldoffset = offsetof(CPUARMState, svcr),
5087 .writefn = svcr_write, .raw_writefn = raw_write },
5088 { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
5089 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
5090 .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
5091 .access = PL1_RW, .type = ARM_CP_SME,
5092 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
5093 .writefn = smcr_write, .raw_writefn = raw_write },
5094 { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
5095 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
5096 .access = PL2_RW, .type = ARM_CP_SME,
5097 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
5098 .writefn = smcr_write, .raw_writefn = raw_write },
5099 { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
5100 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
5101 .access = PL3_RW, .type = ARM_CP_SME,
5102 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
5103 .writefn = smcr_write, .raw_writefn = raw_write },
5104 { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
5105 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
5106 .access = PL1_R, .accessfn = access_aa64_tid1,
5107 /*
5108 * IMPLEMENTOR = 0 (software)
5109 * REVISION = 0 (implementation defined)
5110 * SMPS = 0 (no streaming execution priority in QEMU)
5111 * AFFINITY = 0 (streaming sve mode not shared with other PEs)
5112 */
5113 .type = ARM_CP_CONST, .resetvalue = 0, },
5114 /*
5115 * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
5116 */
5117 { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
5118 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
5119 .access = PL1_RW, .accessfn = access_smpri,
5120 .fgt = FGT_NSMPRI_EL1,
5121 .type = ARM_CP_CONST, .resetvalue = 0 },
5122 { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
5123 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
5124 .nv2_redirect_offset = 0x1f8,
5125 .access = PL2_RW, .accessfn = access_smprimap,
5126 .type = ARM_CP_CONST, .resetvalue = 0 },
5127 };
5128
gpccr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5129 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5130 uint64_t value)
5131 {
5132 /* L0GPTSZ is RO; other bits not mentioned are RES0. */
5133 uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
5134 R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
5135 R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
5136
5137 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
5138 }
5139
gpccr_reset(CPUARMState * env,const ARMCPRegInfo * ri)5140 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
5141 {
5142 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
5143 env_archcpu(env)->reset_l0gptsz);
5144 }
5145
5146 static const ARMCPRegInfo rme_reginfo[] = {
5147 { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
5148 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
5149 .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
5150 .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
5151 { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
5152 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
5153 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
5154 { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
5155 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
5156 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
5157 { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
5158 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
5159 .access = PL3_W, .type = ARM_CP_NOP },
5160 };
5161
5162 static const ARMCPRegInfo rme_mte_reginfo[] = {
5163 { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
5164 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
5165 .access = PL3_W, .type = ARM_CP_NOP },
5166 };
5167
aa64_allint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5168 static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri,
5169 uint64_t value)
5170 {
5171 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT);
5172 }
5173
aa64_allint_read(CPUARMState * env,const ARMCPRegInfo * ri)5174 static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri)
5175 {
5176 return env->pstate & PSTATE_ALLINT;
5177 }
5178
aa64_allint_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5179 static CPAccessResult aa64_allint_access(CPUARMState *env,
5180 const ARMCPRegInfo *ri, bool isread)
5181 {
5182 if (!isread && arm_current_el(env) == 1 &&
5183 (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) {
5184 return CP_ACCESS_TRAP_EL2;
5185 }
5186 return CP_ACCESS_OK;
5187 }
5188
5189 static const ARMCPRegInfo nmi_reginfo[] = {
5190 { .name = "ALLINT", .state = ARM_CP_STATE_AA64,
5191 .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3,
5192 .type = ARM_CP_NO_RAW,
5193 .access = PL1_RW, .accessfn = aa64_allint_access,
5194 .fieldoffset = offsetof(CPUARMState, pstate),
5195 .writefn = aa64_allint_write, .readfn = aa64_allint_read,
5196 .resetfn = arm_cp_reset_ignore },
5197 };
5198
5199 #ifndef CONFIG_USER_ONLY
5200 /*
5201 * We don't know until after realize whether there's a GICv3
5202 * attached, and that is what registers the gicv3 sysregs.
5203 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5204 * at runtime.
5205 */
id_pfr1_read(CPUARMState * env,const ARMCPRegInfo * ri)5206 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5207 {
5208 ARMCPU *cpu = env_archcpu(env);
5209 uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
5210
5211 if (env->gicv3state) {
5212 pfr1 |= 1 << 28;
5213 }
5214 return pfr1;
5215 }
5216
id_aa64pfr0_read(CPUARMState * env,const ARMCPRegInfo * ri)5217 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5218 {
5219 ARMCPU *cpu = env_archcpu(env);
5220 uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
5221
5222 if (env->gicv3state) {
5223 pfr0 |= 1 << 24;
5224 }
5225 return pfr0;
5226 }
5227 #endif
5228
5229 /*
5230 * Shared logic between LORID and the rest of the LOR* registers.
5231 * Secure state exclusion has already been dealt with.
5232 */
access_lor_ns(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5233 static CPAccessResult access_lor_ns(CPUARMState *env,
5234 const ARMCPRegInfo *ri, bool isread)
5235 {
5236 int el = arm_current_el(env);
5237
5238 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5239 return CP_ACCESS_TRAP_EL2;
5240 }
5241 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5242 return CP_ACCESS_TRAP_EL3;
5243 }
5244 return CP_ACCESS_OK;
5245 }
5246
access_lor_other(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5247 static CPAccessResult access_lor_other(CPUARMState *env,
5248 const ARMCPRegInfo *ri, bool isread)
5249 {
5250 if (arm_is_secure_below_el3(env)) {
5251 /* UNDEF if SCR_EL3.NS == 0 */
5252 return CP_ACCESS_UNDEFINED;
5253 }
5254 return access_lor_ns(env, ri, isread);
5255 }
5256
5257 /*
5258 * A trivial implementation of ARMv8.1-LOR leaves all of these
5259 * registers fixed at 0, which indicates that there are zero
5260 * supported Limited Ordering regions.
5261 */
5262 static const ARMCPRegInfo lor_reginfo[] = {
5263 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
5264 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
5265 .access = PL1_RW, .accessfn = access_lor_other,
5266 .fgt = FGT_LORSA_EL1,
5267 .type = ARM_CP_CONST, .resetvalue = 0 },
5268 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
5269 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
5270 .access = PL1_RW, .accessfn = access_lor_other,
5271 .fgt = FGT_LOREA_EL1,
5272 .type = ARM_CP_CONST, .resetvalue = 0 },
5273 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
5274 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
5275 .access = PL1_RW, .accessfn = access_lor_other,
5276 .fgt = FGT_LORN_EL1,
5277 .type = ARM_CP_CONST, .resetvalue = 0 },
5278 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
5279 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
5280 .access = PL1_RW, .accessfn = access_lor_other,
5281 .fgt = FGT_LORC_EL1,
5282 .type = ARM_CP_CONST, .resetvalue = 0 },
5283 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
5284 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
5285 .access = PL1_R, .accessfn = access_lor_ns,
5286 .fgt = FGT_LORID_EL1,
5287 .type = ARM_CP_CONST, .resetvalue = 0 },
5288 };
5289
access_pauth(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5290 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5291 bool isread)
5292 {
5293 int el = arm_current_el(env);
5294
5295 if (el < 2 &&
5296 arm_is_el2_enabled(env) &&
5297 !(arm_hcr_el2_eff(env) & HCR_APK)) {
5298 return CP_ACCESS_TRAP_EL2;
5299 }
5300 if (el < 3 &&
5301 arm_feature(env, ARM_FEATURE_EL3) &&
5302 !(env->cp15.scr_el3 & SCR_APK)) {
5303 return CP_ACCESS_TRAP_EL3;
5304 }
5305 return CP_ACCESS_OK;
5306 }
5307
5308 static const ARMCPRegInfo pauth_reginfo[] = {
5309 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5310 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5311 .access = PL1_RW, .accessfn = access_pauth,
5312 .fgt = FGT_APDAKEY,
5313 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
5314 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5315 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5316 .access = PL1_RW, .accessfn = access_pauth,
5317 .fgt = FGT_APDAKEY,
5318 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
5319 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5320 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5321 .access = PL1_RW, .accessfn = access_pauth,
5322 .fgt = FGT_APDBKEY,
5323 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
5324 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5325 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5326 .access = PL1_RW, .accessfn = access_pauth,
5327 .fgt = FGT_APDBKEY,
5328 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
5329 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5330 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5331 .access = PL1_RW, .accessfn = access_pauth,
5332 .fgt = FGT_APGAKEY,
5333 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
5334 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5335 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5336 .access = PL1_RW, .accessfn = access_pauth,
5337 .fgt = FGT_APGAKEY,
5338 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
5339 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5340 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5341 .access = PL1_RW, .accessfn = access_pauth,
5342 .fgt = FGT_APIAKEY,
5343 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
5344 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5345 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5346 .access = PL1_RW, .accessfn = access_pauth,
5347 .fgt = FGT_APIAKEY,
5348 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
5349 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5350 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5351 .access = PL1_RW, .accessfn = access_pauth,
5352 .fgt = FGT_APIBKEY,
5353 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
5354 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5355 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5356 .access = PL1_RW, .accessfn = access_pauth,
5357 .fgt = FGT_APIBKEY,
5358 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
5359 };
5360
rndr_readfn(CPUARMState * env,const ARMCPRegInfo * ri)5361 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
5362 {
5363 Error *err = NULL;
5364 uint64_t ret;
5365
5366 /* Success sets NZCV = 0000. */
5367 env->NF = env->CF = env->VF = 0, env->ZF = 1;
5368
5369 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
5370 /*
5371 * ??? Failed, for unknown reasons in the crypto subsystem.
5372 * The best we can do is log the reason and return the
5373 * timed-out indication to the guest. There is no reason
5374 * we know to expect this failure to be transitory, so the
5375 * guest may well hang retrying the operation.
5376 */
5377 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5378 ri->name, error_get_pretty(err));
5379 error_free(err);
5380
5381 env->ZF = 0; /* NZCF = 0100 */
5382 return 0;
5383 }
5384 return ret;
5385 }
5386
5387 /* We do not support re-seeding, so the two registers operate the same. */
5388 static const ARMCPRegInfo rndr_reginfo[] = {
5389 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
5390 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5391 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
5392 .access = PL0_R, .readfn = rndr_readfn },
5393 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
5394 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5395 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
5396 .access = PL0_R, .readfn = rndr_readfn },
5397 };
5398
dccvap_writefn(CPUARMState * env,const ARMCPRegInfo * opaque,uint64_t value)5399 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
5400 uint64_t value)
5401 {
5402 #ifdef CONFIG_TCG
5403 ARMCPU *cpu = env_archcpu(env);
5404 /* CTR_EL0 System register -> DminLine, bits [19:16] */
5405 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
5406 uint64_t vaddr_in = (uint64_t) value;
5407 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
5408 void *haddr;
5409 int mem_idx = arm_env_mmu_index(env);
5410
5411 /* This won't be crossing page boundaries */
5412 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
5413 if (haddr) {
5414 #ifndef CONFIG_USER_ONLY
5415
5416 ram_addr_t offset;
5417 MemoryRegion *mr;
5418
5419 /* RCU lock is already being held */
5420 mr = memory_region_from_host(haddr, &offset);
5421
5422 if (mr) {
5423 memory_region_writeback(mr, offset, dline_size);
5424 }
5425 #endif /*CONFIG_USER_ONLY*/
5426 }
5427 #else
5428 /* Handled by hardware accelerator. */
5429 g_assert_not_reached();
5430 #endif /* CONFIG_TCG */
5431 }
5432
5433 static const ARMCPRegInfo dcpop_reg[] = {
5434 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
5435 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
5436 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5437 .fgt = FGT_DCCVAP,
5438 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5439 };
5440
5441 static const ARMCPRegInfo dcpodp_reg[] = {
5442 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
5443 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
5444 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5445 .fgt = FGT_DCCVADP,
5446 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5447 };
5448
access_aa64_tid5(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5449 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
5450 bool isread)
5451 {
5452 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
5453 return CP_ACCESS_TRAP_EL2;
5454 }
5455
5456 return CP_ACCESS_OK;
5457 }
5458
access_mte(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5459 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
5460 bool isread)
5461 {
5462 int el = arm_current_el(env);
5463 if (el < 2 && arm_is_el2_enabled(env)) {
5464 uint64_t hcr = arm_hcr_el2_eff(env);
5465 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5466 return CP_ACCESS_TRAP_EL2;
5467 }
5468 }
5469 if (el < 3 &&
5470 arm_feature(env, ARM_FEATURE_EL3) &&
5471 !(env->cp15.scr_el3 & SCR_ATA)) {
5472 return CP_ACCESS_TRAP_EL3;
5473 }
5474 return CP_ACCESS_OK;
5475 }
5476
access_tfsr_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5477 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
5478 bool isread)
5479 {
5480 CPAccessResult nv1 = access_nv1(env, ri, isread);
5481
5482 if (nv1 != CP_ACCESS_OK) {
5483 return nv1;
5484 }
5485 return access_mte(env, ri, isread);
5486 }
5487
access_tfsr_el2(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5488 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
5489 bool isread)
5490 {
5491 /*
5492 * TFSR_EL2: similar to generic access_mte(), but we need to
5493 * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
5494 * if NV2 is enabled then we will redirect this to TFSR_EL1
5495 * after doing the HCR and SCR ATA traps; otherwise this will
5496 * be a trap to EL2 and the HCR/SCR traps do not apply.
5497 */
5498 int el = arm_current_el(env);
5499
5500 if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
5501 return CP_ACCESS_OK;
5502 }
5503 if (el < 2 && arm_is_el2_enabled(env)) {
5504 uint64_t hcr = arm_hcr_el2_eff(env);
5505 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5506 return CP_ACCESS_TRAP_EL2;
5507 }
5508 }
5509 if (el < 3 &&
5510 arm_feature(env, ARM_FEATURE_EL3) &&
5511 !(env->cp15.scr_el3 & SCR_ATA)) {
5512 return CP_ACCESS_TRAP_EL3;
5513 }
5514 return CP_ACCESS_OK;
5515 }
5516
tco_read(CPUARMState * env,const ARMCPRegInfo * ri)5517 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
5518 {
5519 return env->pstate & PSTATE_TCO;
5520 }
5521
tco_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)5522 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5523 {
5524 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
5525 }
5526
5527 static const ARMCPRegInfo mte_reginfo[] = {
5528 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
5529 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
5530 .access = PL1_RW, .accessfn = access_mte,
5531 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
5532 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
5533 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
5534 .access = PL1_RW, .accessfn = access_tfsr_el1,
5535 .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
5536 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
5537 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
5538 .type = ARM_CP_NV2_REDIRECT,
5539 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
5540 .access = PL2_RW, .accessfn = access_tfsr_el2,
5541 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
5542 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
5543 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
5544 .access = PL3_RW,
5545 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
5546 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
5547 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
5548 .access = PL1_RW, .accessfn = access_mte,
5549 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
5550 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
5551 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
5552 .access = PL1_RW, .accessfn = access_mte,
5553 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
5554 { .name = "TCO", .state = ARM_CP_STATE_AA64,
5555 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5556 .type = ARM_CP_NO_RAW,
5557 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
5558 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
5559 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
5560 .type = ARM_CP_NOP, .access = PL1_W,
5561 .fgt = FGT_DCIVAC,
5562 .accessfn = aa64_cacheop_poc_access },
5563 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
5564 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
5565 .fgt = FGT_DCISW,
5566 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5567 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
5568 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
5569 .type = ARM_CP_NOP, .access = PL1_W,
5570 .fgt = FGT_DCIVAC,
5571 .accessfn = aa64_cacheop_poc_access },
5572 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
5573 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
5574 .fgt = FGT_DCISW,
5575 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5576 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
5577 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
5578 .fgt = FGT_DCCSW,
5579 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5580 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
5581 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
5582 .fgt = FGT_DCCSW,
5583 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5584 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
5585 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
5586 .fgt = FGT_DCCISW,
5587 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5588 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
5589 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
5590 .fgt = FGT_DCCISW,
5591 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5592 };
5593
5594 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
5595 { .name = "TCO", .state = ARM_CP_STATE_AA64,
5596 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5597 .type = ARM_CP_CONST, .access = PL0_RW, },
5598 };
5599
5600 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
5601 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
5602 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
5603 .type = ARM_CP_NOP, .access = PL0_W,
5604 .fgt = FGT_DCCVAC,
5605 .accessfn = aa64_cacheop_poc_access },
5606 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
5607 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
5608 .type = ARM_CP_NOP, .access = PL0_W,
5609 .fgt = FGT_DCCVAC,
5610 .accessfn = aa64_cacheop_poc_access },
5611 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
5612 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
5613 .type = ARM_CP_NOP, .access = PL0_W,
5614 .fgt = FGT_DCCVAP,
5615 .accessfn = aa64_cacheop_poc_access },
5616 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
5617 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
5618 .type = ARM_CP_NOP, .access = PL0_W,
5619 .fgt = FGT_DCCVAP,
5620 .accessfn = aa64_cacheop_poc_access },
5621 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
5622 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
5623 .type = ARM_CP_NOP, .access = PL0_W,
5624 .fgt = FGT_DCCVADP,
5625 .accessfn = aa64_cacheop_poc_access },
5626 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
5627 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
5628 .type = ARM_CP_NOP, .access = PL0_W,
5629 .fgt = FGT_DCCVADP,
5630 .accessfn = aa64_cacheop_poc_access },
5631 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
5632 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
5633 .type = ARM_CP_NOP, .access = PL0_W,
5634 .fgt = FGT_DCCIVAC,
5635 .accessfn = aa64_cacheop_poc_access },
5636 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
5637 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
5638 .type = ARM_CP_NOP, .access = PL0_W,
5639 .fgt = FGT_DCCIVAC,
5640 .accessfn = aa64_cacheop_poc_access },
5641 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
5642 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
5643 .access = PL0_W, .type = ARM_CP_DC_GVA,
5644 #ifndef CONFIG_USER_ONLY
5645 /* Avoid overhead of an access check that always passes in user-mode */
5646 .accessfn = aa64_zva_access,
5647 .fgt = FGT_DCZVA,
5648 #endif
5649 },
5650 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
5651 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
5652 .access = PL0_W, .type = ARM_CP_DC_GZVA,
5653 #ifndef CONFIG_USER_ONLY
5654 /* Avoid overhead of an access check that always passes in user-mode */
5655 .accessfn = aa64_zva_access,
5656 .fgt = FGT_DCZVA,
5657 #endif
5658 },
5659 };
5660
access_scxtnum(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5661 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
5662 bool isread)
5663 {
5664 uint64_t hcr = arm_hcr_el2_eff(env);
5665 int el = arm_current_el(env);
5666
5667 if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
5668 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
5669 if (hcr & HCR_TGE) {
5670 return CP_ACCESS_TRAP_EL2;
5671 }
5672 return CP_ACCESS_TRAP_EL1;
5673 }
5674 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
5675 return CP_ACCESS_TRAP_EL2;
5676 }
5677 if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
5678 return CP_ACCESS_TRAP_EL2;
5679 }
5680 if (el < 3
5681 && arm_feature(env, ARM_FEATURE_EL3)
5682 && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
5683 return CP_ACCESS_TRAP_EL3;
5684 }
5685 return CP_ACCESS_OK;
5686 }
5687
access_scxtnum_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5688 static CPAccessResult access_scxtnum_el1(CPUARMState *env,
5689 const ARMCPRegInfo *ri,
5690 bool isread)
5691 {
5692 CPAccessResult nv1 = access_nv1(env, ri, isread);
5693
5694 if (nv1 != CP_ACCESS_OK) {
5695 return nv1;
5696 }
5697 return access_scxtnum(env, ri, isread);
5698 }
5699
5700 static const ARMCPRegInfo scxtnum_reginfo[] = {
5701 { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
5702 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
5703 .access = PL0_RW, .accessfn = access_scxtnum,
5704 .fgt = FGT_SCXTNUM_EL0,
5705 .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
5706 { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
5707 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
5708 .access = PL1_RW, .accessfn = access_scxtnum_el1,
5709 .fgt = FGT_SCXTNUM_EL1,
5710 .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
5711 .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
5712 { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
5713 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
5714 .access = PL2_RW, .accessfn = access_scxtnum,
5715 .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
5716 { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
5717 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
5718 .access = PL3_RW,
5719 .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
5720 };
5721
access_fgt(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5722 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
5723 bool isread)
5724 {
5725 if (arm_current_el(env) == 2 &&
5726 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
5727 return CP_ACCESS_TRAP_EL3;
5728 }
5729 return CP_ACCESS_OK;
5730 }
5731
5732 static const ARMCPRegInfo fgt_reginfo[] = {
5733 { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5734 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5735 .nv2_redirect_offset = 0x1b8,
5736 .access = PL2_RW, .accessfn = access_fgt,
5737 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
5738 { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5739 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
5740 .nv2_redirect_offset = 0x1c0,
5741 .access = PL2_RW, .accessfn = access_fgt,
5742 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
5743 { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5744 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
5745 .nv2_redirect_offset = 0x1d0,
5746 .access = PL2_RW, .accessfn = access_fgt,
5747 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
5748 { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5749 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
5750 .nv2_redirect_offset = 0x1d8,
5751 .access = PL2_RW, .accessfn = access_fgt,
5752 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
5753 { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
5754 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
5755 .nv2_redirect_offset = 0x1c8,
5756 .access = PL2_RW, .accessfn = access_fgt,
5757 .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
5758 };
5759
vncr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5760 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5761 uint64_t value)
5762 {
5763 /*
5764 * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
5765 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
5766 * about the RESS bits at the top -- we choose the "generate an EL2
5767 * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
5768 * the ptw.c code detect the resulting invalid address).
5769 */
5770 env->cp15.vncr_el2 = value & ~0xfffULL;
5771 }
5772
5773 static const ARMCPRegInfo nv2_reginfo[] = {
5774 { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
5775 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
5776 .access = PL2_RW,
5777 .writefn = vncr_write,
5778 .nv2_redirect_offset = 0xb0,
5779 .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
5780 };
5781
access_predinv(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5782 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5783 bool isread)
5784 {
5785 int el = arm_current_el(env);
5786
5787 if (el == 0) {
5788 uint64_t sctlr = arm_sctlr(env, el);
5789 if (!(sctlr & SCTLR_EnRCTX)) {
5790 return CP_ACCESS_TRAP_EL1;
5791 }
5792 } else if (el == 1) {
5793 uint64_t hcr = arm_hcr_el2_eff(env);
5794 if (hcr & HCR_NV) {
5795 return CP_ACCESS_TRAP_EL2;
5796 }
5797 }
5798 return CP_ACCESS_OK;
5799 }
5800
5801 static const ARMCPRegInfo predinv_reginfo[] = {
5802 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5803 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5804 .fgt = FGT_CFPRCTX,
5805 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5806 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5807 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5808 .fgt = FGT_DVPRCTX,
5809 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5810 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5811 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5812 .fgt = FGT_CPPRCTX,
5813 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5814 /*
5815 * Note the AArch32 opcodes have a different OPC1.
5816 */
5817 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5818 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5819 .fgt = FGT_CFPRCTX,
5820 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5821 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5822 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5823 .fgt = FGT_DVPRCTX,
5824 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5825 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5826 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5827 .fgt = FGT_CPPRCTX,
5828 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5829 };
5830
ccsidr2_read(CPUARMState * env,const ARMCPRegInfo * ri)5831 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5832 {
5833 /* Read the high 32 bits of the current CCSIDR */
5834 return extract64(ccsidr_read(env, ri), 32, 32);
5835 }
5836
5837 static const ARMCPRegInfo ccsidr2_reginfo[] = {
5838 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
5839 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
5840 .access = PL1_R,
5841 .accessfn = access_tid4,
5842 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
5843 };
5844
access_aa64_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5845 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5846 bool isread)
5847 {
5848 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
5849 return CP_ACCESS_TRAP_EL2;
5850 }
5851
5852 return CP_ACCESS_OK;
5853 }
5854
access_aa32_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5855 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5856 bool isread)
5857 {
5858 if (arm_feature(env, ARM_FEATURE_V8)) {
5859 return access_aa64_tid3(env, ri, isread);
5860 }
5861
5862 return CP_ACCESS_OK;
5863 }
5864
access_jazelle(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5865 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
5866 bool isread)
5867 {
5868 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
5869 return CP_ACCESS_TRAP_EL2;
5870 }
5871
5872 return CP_ACCESS_OK;
5873 }
5874
access_joscr_jmcr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5875 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
5876 const ARMCPRegInfo *ri, bool isread)
5877 {
5878 /*
5879 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
5880 * in v7A, not in v8A.
5881 */
5882 if (!arm_feature(env, ARM_FEATURE_V8) &&
5883 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
5884 (env->cp15.hstr_el2 & HSTR_TJDBX)) {
5885 return CP_ACCESS_TRAP_EL2;
5886 }
5887 return CP_ACCESS_OK;
5888 }
5889
5890 static const ARMCPRegInfo jazelle_regs[] = {
5891 { .name = "JIDR",
5892 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
5893 .access = PL1_R, .accessfn = access_jazelle,
5894 .type = ARM_CP_CONST, .resetvalue = 0 },
5895 { .name = "JOSCR",
5896 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
5897 .accessfn = access_joscr_jmcr,
5898 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5899 { .name = "JMCR",
5900 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
5901 .accessfn = access_joscr_jmcr,
5902 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5903 };
5904
5905 static const ARMCPRegInfo contextidr_el2 = {
5906 .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
5907 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
5908 .access = PL2_RW,
5909 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
5910 };
5911
5912 static const ARMCPRegInfo vhe_reginfo[] = {
5913 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
5914 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
5915 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
5916 .raw_writefn = raw_write,
5917 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
5918 #ifndef CONFIG_USER_ONLY
5919 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5920 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
5921 .fieldoffset =
5922 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
5923 .type = ARM_CP_IO, .access = PL2_RW,
5924 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
5925 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5926 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
5927 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5928 .resetfn = gt_hv_timer_reset,
5929 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
5930 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5931 .type = ARM_CP_IO,
5932 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
5933 .access = PL2_RW,
5934 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
5935 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
5936 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
5937 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
5938 .type = ARM_CP_IO | ARM_CP_ALIAS,
5939 .access = PL2_RW, .accessfn = access_el1nvpct,
5940 .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
5941 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
5942 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
5943 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
5944 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
5945 .type = ARM_CP_IO | ARM_CP_ALIAS,
5946 .access = PL2_RW, .accessfn = access_el1nvvct,
5947 .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
5948 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
5949 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
5950 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5951 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
5952 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5953 .access = PL2_RW, .accessfn = e2h_access,
5954 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
5955 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5956 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
5957 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5958 .access = PL2_RW, .accessfn = e2h_access,
5959 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
5960 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5961 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
5962 .type = ARM_CP_IO | ARM_CP_ALIAS,
5963 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
5964 .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
5965 .access = PL2_RW, .accessfn = access_el1nvpct,
5966 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
5967 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5968 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
5969 .type = ARM_CP_IO | ARM_CP_ALIAS,
5970 .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
5971 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
5972 .access = PL2_RW, .accessfn = access_el1nvvct,
5973 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
5974 #endif
5975 };
5976
5977 /*
5978 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
5979 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
5980 * is non-zero, which is never for ARMv7, optionally in ARMv8
5981 * and mandatorily for ARMv8.2 and up.
5982 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
5983 * implementation is RAZ/WI we can ignore this detail, as we
5984 * do for ACTLR.
5985 */
5986 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
5987 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
5988 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
5989 .access = PL1_RW, .accessfn = access_tacr,
5990 .type = ARM_CP_CONST, .resetvalue = 0 },
5991 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
5992 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
5993 .access = PL2_RW, .type = ARM_CP_CONST,
5994 .resetvalue = 0 },
5995 };
5996
register_cp_regs_for_features(ARMCPU * cpu)5997 void register_cp_regs_for_features(ARMCPU *cpu)
5998 {
5999 /* Register all the coprocessor registers based on feature bits */
6000 CPUARMState *env = &cpu->env;
6001 ARMISARegisters *isar = &cpu->isar;
6002
6003 if (arm_feature(env, ARM_FEATURE_M)) {
6004 /* M profile has no coprocessor registers */
6005 return;
6006 }
6007
6008 define_arm_cp_regs(cpu, cp_reginfo);
6009 if (!arm_feature(env, ARM_FEATURE_V8)) {
6010 /*
6011 * Must go early as it is full of wildcards that may be
6012 * overridden by later definitions.
6013 */
6014 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
6015 }
6016
6017 #ifndef CONFIG_USER_ONLY
6018 if (tcg_enabled()) {
6019 define_tlb_insn_regs(cpu);
6020 define_at_insn_regs(cpu);
6021 }
6022 #endif
6023
6024 if (arm_feature(env, ARM_FEATURE_V6)) {
6025 /* The ID registers all have impdef reset values */
6026 ARMCPRegInfo v6_idregs[] = {
6027 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
6028 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6029 .access = PL1_R, .type = ARM_CP_CONST,
6030 .accessfn = access_aa32_tid3,
6031 .resetvalue = GET_IDREG(isar, ID_PFR0)},
6032 /*
6033 * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6034 * the value of the GIC field until after we define these regs.
6035 */
6036 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
6037 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
6038 .access = PL1_R, .type = ARM_CP_NO_RAW,
6039 .accessfn = access_aa32_tid3,
6040 #ifdef CONFIG_USER_ONLY
6041 .type = ARM_CP_CONST,
6042 .resetvalue = GET_IDREG(isar, ID_PFR1),
6043 #else
6044 .type = ARM_CP_NO_RAW,
6045 .accessfn = access_aa32_tid3,
6046 .readfn = id_pfr1_read,
6047 .writefn = arm_cp_write_ignore
6048 #endif
6049 },
6050 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
6051 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
6052 .access = PL1_R, .type = ARM_CP_CONST,
6053 .accessfn = access_aa32_tid3,
6054 .resetvalue = GET_IDREG(isar, ID_DFR0)},
6055 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
6056 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
6057 .access = PL1_R, .type = ARM_CP_CONST,
6058 .accessfn = access_aa32_tid3,
6059 .resetvalue = GET_IDREG(isar, ID_AFR0)},
6060 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
6061 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
6062 .access = PL1_R, .type = ARM_CP_CONST,
6063 .accessfn = access_aa32_tid3,
6064 .resetvalue = GET_IDREG(isar, ID_MMFR0)},
6065 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
6066 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
6067 .access = PL1_R, .type = ARM_CP_CONST,
6068 .accessfn = access_aa32_tid3,
6069 .resetvalue = GET_IDREG(isar, ID_MMFR1)},
6070 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
6071 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
6072 .access = PL1_R, .type = ARM_CP_CONST,
6073 .accessfn = access_aa32_tid3,
6074 .resetvalue = GET_IDREG(isar, ID_MMFR2)},
6075 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
6076 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
6077 .access = PL1_R, .type = ARM_CP_CONST,
6078 .accessfn = access_aa32_tid3,
6079 .resetvalue = GET_IDREG(isar, ID_MMFR3)},
6080 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
6081 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6082 .access = PL1_R, .type = ARM_CP_CONST,
6083 .accessfn = access_aa32_tid3,
6084 .resetvalue = GET_IDREG(isar, ID_ISAR0)},
6085 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
6086 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
6087 .access = PL1_R, .type = ARM_CP_CONST,
6088 .accessfn = access_aa32_tid3,
6089 .resetvalue = GET_IDREG(isar, ID_ISAR1)},
6090 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
6091 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6092 .access = PL1_R, .type = ARM_CP_CONST,
6093 .accessfn = access_aa32_tid3,
6094 .resetvalue = GET_IDREG(isar, ID_ISAR2)},
6095 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
6096 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
6097 .access = PL1_R, .type = ARM_CP_CONST,
6098 .accessfn = access_aa32_tid3,
6099 .resetvalue = GET_IDREG(isar, ID_ISAR3) },
6100 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
6101 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
6102 .access = PL1_R, .type = ARM_CP_CONST,
6103 .accessfn = access_aa32_tid3,
6104 .resetvalue = GET_IDREG(isar, ID_ISAR4) },
6105 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
6106 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
6107 .access = PL1_R, .type = ARM_CP_CONST,
6108 .accessfn = access_aa32_tid3,
6109 .resetvalue = GET_IDREG(isar, ID_ISAR5) },
6110 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
6111 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
6112 .access = PL1_R, .type = ARM_CP_CONST,
6113 .accessfn = access_aa32_tid3,
6114 .resetvalue = GET_IDREG(isar, ID_MMFR4)},
6115 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
6116 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
6117 .access = PL1_R, .type = ARM_CP_CONST,
6118 .accessfn = access_aa32_tid3,
6119 .resetvalue = GET_IDREG(isar, ID_ISAR6) },
6120 };
6121 define_arm_cp_regs(cpu, v6_idregs);
6122 define_arm_cp_regs(cpu, v6_cp_reginfo);
6123 } else {
6124 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
6125 }
6126 if (arm_feature(env, ARM_FEATURE_V6K)) {
6127 define_arm_cp_regs(cpu, v6k_cp_reginfo);
6128 }
6129 if (arm_feature(env, ARM_FEATURE_V7)) {
6130 ARMCPRegInfo clidr = {
6131 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
6132 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
6133 .access = PL1_R, .type = ARM_CP_CONST,
6134 .accessfn = access_tid4,
6135 .fgt = FGT_CLIDR_EL1,
6136 .resetvalue = GET_IDREG(isar, CLIDR)
6137 };
6138 define_one_arm_cp_reg(cpu, &clidr);
6139 define_arm_cp_regs(cpu, v7_cp_reginfo);
6140 define_debug_regs(cpu);
6141 } else {
6142 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
6143 }
6144 if (arm_feature(env, ARM_FEATURE_V8)) {
6145 /*
6146 * v8 ID registers, which all have impdef reset values.
6147 * Note that within the ID register ranges the unused slots
6148 * must all RAZ, not UNDEF; future architecture versions may
6149 * define new registers here.
6150 * ID registers which are AArch64 views of the AArch32 ID registers
6151 * which already existed in v6 and v7 are handled elsewhere,
6152 * in v6_idregs[].
6153 */
6154 int i;
6155 ARMCPRegInfo v8_idregs[] = {
6156 /*
6157 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
6158 * emulation because we don't know the right value for the
6159 * GIC field until after we define these regs.
6160 */
6161 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6162 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
6163 .access = PL1_R,
6164 #ifdef CONFIG_USER_ONLY
6165 .type = ARM_CP_CONST,
6166 .resetvalue = GET_IDREG(isar, ID_AA64PFR0)
6167 #else
6168 .type = ARM_CP_NO_RAW,
6169 .accessfn = access_aa64_tid3,
6170 .readfn = id_aa64pfr0_read,
6171 .writefn = arm_cp_write_ignore
6172 #endif
6173 },
6174 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6175 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6176 .access = PL1_R, .type = ARM_CP_CONST,
6177 .accessfn = access_aa64_tid3,
6178 .resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
6179 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6180 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6181 .access = PL1_R, .type = ARM_CP_CONST,
6182 .accessfn = access_aa64_tid3,
6183 .resetvalue = 0 },
6184 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6185 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6186 .access = PL1_R, .type = ARM_CP_CONST,
6187 .accessfn = access_aa64_tid3,
6188 .resetvalue = 0 },
6189 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
6190 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6191 .access = PL1_R, .type = ARM_CP_CONST,
6192 .accessfn = access_aa64_tid3,
6193 .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)},
6194 { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
6195 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6196 .access = PL1_R, .type = ARM_CP_CONST,
6197 .accessfn = access_aa64_tid3,
6198 .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)},
6199 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6200 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6201 .access = PL1_R, .type = ARM_CP_CONST,
6202 .accessfn = access_aa64_tid3,
6203 .resetvalue = 0 },
6204 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6205 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6206 .access = PL1_R, .type = ARM_CP_CONST,
6207 .accessfn = access_aa64_tid3,
6208 .resetvalue = 0 },
6209 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6210 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6211 .access = PL1_R, .type = ARM_CP_CONST,
6212 .accessfn = access_aa64_tid3,
6213 .resetvalue = GET_IDREG(isar, ID_AA64DFR0) },
6214 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6215 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6216 .access = PL1_R, .type = ARM_CP_CONST,
6217 .accessfn = access_aa64_tid3,
6218 .resetvalue = GET_IDREG(isar, ID_AA64DFR1) },
6219 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6220 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6221 .access = PL1_R, .type = ARM_CP_CONST,
6222 .accessfn = access_aa64_tid3,
6223 .resetvalue = 0 },
6224 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6225 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6226 .access = PL1_R, .type = ARM_CP_CONST,
6227 .accessfn = access_aa64_tid3,
6228 .resetvalue = 0 },
6229 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6230 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6231 .access = PL1_R, .type = ARM_CP_CONST,
6232 .accessfn = access_aa64_tid3,
6233 .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
6234 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6235 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6236 .access = PL1_R, .type = ARM_CP_CONST,
6237 .accessfn = access_aa64_tid3,
6238 .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
6239 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6240 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6241 .access = PL1_R, .type = ARM_CP_CONST,
6242 .accessfn = access_aa64_tid3,
6243 .resetvalue = 0 },
6244 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6245 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6246 .access = PL1_R, .type = ARM_CP_CONST,
6247 .accessfn = access_aa64_tid3,
6248 .resetvalue = 0 },
6249 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6250 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6251 .access = PL1_R, .type = ARM_CP_CONST,
6252 .accessfn = access_aa64_tid3,
6253 .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)},
6254 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6255 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6256 .access = PL1_R, .type = ARM_CP_CONST,
6257 .accessfn = access_aa64_tid3,
6258 .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)},
6259 { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
6260 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6261 .access = PL1_R, .type = ARM_CP_CONST,
6262 .accessfn = access_aa64_tid3,
6263 .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)},
6264 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6265 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6266 .access = PL1_R, .type = ARM_CP_CONST,
6267 .accessfn = access_aa64_tid3,
6268 .resetvalue = 0 },
6269 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6270 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6271 .access = PL1_R, .type = ARM_CP_CONST,
6272 .accessfn = access_aa64_tid3,
6273 .resetvalue = 0 },
6274 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6275 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6276 .access = PL1_R, .type = ARM_CP_CONST,
6277 .accessfn = access_aa64_tid3,
6278 .resetvalue = 0 },
6279 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6280 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6281 .access = PL1_R, .type = ARM_CP_CONST,
6282 .accessfn = access_aa64_tid3,
6283 .resetvalue = 0 },
6284 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6285 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6286 .access = PL1_R, .type = ARM_CP_CONST,
6287 .accessfn = access_aa64_tid3,
6288 .resetvalue = 0 },
6289 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6290 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6291 .access = PL1_R, .type = ARM_CP_CONST,
6292 .accessfn = access_aa64_tid3,
6293 .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)},
6294 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6295 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6296 .access = PL1_R, .type = ARM_CP_CONST,
6297 .accessfn = access_aa64_tid3,
6298 .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) },
6299 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
6300 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6301 .access = PL1_R, .type = ARM_CP_CONST,
6302 .accessfn = access_aa64_tid3,
6303 .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) },
6304 { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
6305 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6306 .access = PL1_R, .type = ARM_CP_CONST,
6307 .accessfn = access_aa64_tid3,
6308 .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) },
6309 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6310 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6311 .access = PL1_R, .type = ARM_CP_CONST,
6312 .accessfn = access_aa64_tid3,
6313 .resetvalue = 0 },
6314 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6315 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6316 .access = PL1_R, .type = ARM_CP_CONST,
6317 .accessfn = access_aa64_tid3,
6318 .resetvalue = 0 },
6319 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6320 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6321 .access = PL1_R, .type = ARM_CP_CONST,
6322 .accessfn = access_aa64_tid3,
6323 .resetvalue = 0 },
6324 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6325 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6326 .access = PL1_R, .type = ARM_CP_CONST,
6327 .accessfn = access_aa64_tid3,
6328 .resetvalue = 0 },
6329 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6330 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6331 .access = PL1_R, .type = ARM_CP_CONST,
6332 .accessfn = access_aa64_tid3,
6333 .resetvalue = cpu->isar.mvfr0 },
6334 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6335 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6336 .access = PL1_R, .type = ARM_CP_CONST,
6337 .accessfn = access_aa64_tid3,
6338 .resetvalue = cpu->isar.mvfr1 },
6339 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6340 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6341 .access = PL1_R, .type = ARM_CP_CONST,
6342 .accessfn = access_aa64_tid3,
6343 .resetvalue = cpu->isar.mvfr2 },
6344 /*
6345 * "0, c0, c3, {0,1,2}" are the encodings corresponding to
6346 * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
6347 * as RAZ, since it is in the "reserved for future ID
6348 * registers, RAZ" part of the AArch32 encoding space.
6349 */
6350 { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
6351 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6352 .access = PL1_R, .type = ARM_CP_CONST,
6353 .accessfn = access_aa64_tid3,
6354 .resetvalue = 0 },
6355 { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
6356 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6357 .access = PL1_R, .type = ARM_CP_CONST,
6358 .accessfn = access_aa64_tid3,
6359 .resetvalue = 0 },
6360 { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
6361 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6362 .access = PL1_R, .type = ARM_CP_CONST,
6363 .accessfn = access_aa64_tid3,
6364 .resetvalue = 0 },
6365 /*
6366 * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
6367 * they're also RAZ for AArch64, and in v8 are gradually
6368 * being filled with AArch64-view-of-AArch32-ID-register
6369 * for new ID registers.
6370 */
6371 { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
6372 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6373 .access = PL1_R, .type = ARM_CP_CONST,
6374 .accessfn = access_aa64_tid3,
6375 .resetvalue = 0 },
6376 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
6377 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6378 .access = PL1_R, .type = ARM_CP_CONST,
6379 .accessfn = access_aa64_tid3,
6380 .resetvalue = GET_IDREG(isar, ID_PFR2)},
6381 { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
6382 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6383 .access = PL1_R, .type = ARM_CP_CONST,
6384 .accessfn = access_aa64_tid3,
6385 .resetvalue = GET_IDREG(isar, ID_DFR1)},
6386 { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
6387 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6388 .access = PL1_R, .type = ARM_CP_CONST,
6389 .accessfn = access_aa64_tid3,
6390 .resetvalue = GET_IDREG(isar, ID_MMFR5)},
6391 { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
6392 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6393 .access = PL1_R, .type = ARM_CP_CONST,
6394 .accessfn = access_aa64_tid3,
6395 .resetvalue = 0 },
6396 };
6397 #ifdef CONFIG_USER_ONLY
6398 static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6399 { .name = "ID_AA64PFR0_EL1",
6400 .exported_bits = R_ID_AA64PFR0_FP_MASK |
6401 R_ID_AA64PFR0_ADVSIMD_MASK |
6402 R_ID_AA64PFR0_SVE_MASK |
6403 R_ID_AA64PFR0_DIT_MASK,
6404 .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
6405 (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
6406 { .name = "ID_AA64PFR1_EL1",
6407 .exported_bits = R_ID_AA64PFR1_BT_MASK |
6408 R_ID_AA64PFR1_SSBS_MASK |
6409 R_ID_AA64PFR1_MTE_MASK |
6410 R_ID_AA64PFR1_SME_MASK },
6411 { .name = "ID_AA64PFR*_EL1_RESERVED",
6412 .is_glob = true },
6413 { .name = "ID_AA64ZFR0_EL1",
6414 .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
6415 R_ID_AA64ZFR0_AES_MASK |
6416 R_ID_AA64ZFR0_BITPERM_MASK |
6417 R_ID_AA64ZFR0_BFLOAT16_MASK |
6418 R_ID_AA64ZFR0_B16B16_MASK |
6419 R_ID_AA64ZFR0_SHA3_MASK |
6420 R_ID_AA64ZFR0_SM4_MASK |
6421 R_ID_AA64ZFR0_I8MM_MASK |
6422 R_ID_AA64ZFR0_F32MM_MASK |
6423 R_ID_AA64ZFR0_F64MM_MASK },
6424 { .name = "ID_AA64SMFR0_EL1",
6425 .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
6426 R_ID_AA64SMFR0_BI32I32_MASK |
6427 R_ID_AA64SMFR0_B16F32_MASK |
6428 R_ID_AA64SMFR0_F16F32_MASK |
6429 R_ID_AA64SMFR0_I8I32_MASK |
6430 R_ID_AA64SMFR0_F16F16_MASK |
6431 R_ID_AA64SMFR0_B16B16_MASK |
6432 R_ID_AA64SMFR0_I16I32_MASK |
6433 R_ID_AA64SMFR0_F64F64_MASK |
6434 R_ID_AA64SMFR0_I16I64_MASK |
6435 R_ID_AA64SMFR0_SMEVER_MASK |
6436 R_ID_AA64SMFR0_FA64_MASK },
6437 { .name = "ID_AA64MMFR0_EL1",
6438 .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
6439 .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
6440 (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
6441 { .name = "ID_AA64MMFR1_EL1",
6442 .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
6443 { .name = "ID_AA64MMFR2_EL1",
6444 .exported_bits = R_ID_AA64MMFR2_AT_MASK },
6445 { .name = "ID_AA64MMFR3_EL1",
6446 .exported_bits = 0 },
6447 { .name = "ID_AA64MMFR*_EL1_RESERVED",
6448 .is_glob = true },
6449 { .name = "ID_AA64DFR0_EL1",
6450 .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
6451 { .name = "ID_AA64DFR1_EL1" },
6452 { .name = "ID_AA64DFR*_EL1_RESERVED",
6453 .is_glob = true },
6454 { .name = "ID_AA64AFR*",
6455 .is_glob = true },
6456 { .name = "ID_AA64ISAR0_EL1",
6457 .exported_bits = R_ID_AA64ISAR0_AES_MASK |
6458 R_ID_AA64ISAR0_SHA1_MASK |
6459 R_ID_AA64ISAR0_SHA2_MASK |
6460 R_ID_AA64ISAR0_CRC32_MASK |
6461 R_ID_AA64ISAR0_ATOMIC_MASK |
6462 R_ID_AA64ISAR0_RDM_MASK |
6463 R_ID_AA64ISAR0_SHA3_MASK |
6464 R_ID_AA64ISAR0_SM3_MASK |
6465 R_ID_AA64ISAR0_SM4_MASK |
6466 R_ID_AA64ISAR0_DP_MASK |
6467 R_ID_AA64ISAR0_FHM_MASK |
6468 R_ID_AA64ISAR0_TS_MASK |
6469 R_ID_AA64ISAR0_RNDR_MASK },
6470 { .name = "ID_AA64ISAR1_EL1",
6471 .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
6472 R_ID_AA64ISAR1_APA_MASK |
6473 R_ID_AA64ISAR1_API_MASK |
6474 R_ID_AA64ISAR1_JSCVT_MASK |
6475 R_ID_AA64ISAR1_FCMA_MASK |
6476 R_ID_AA64ISAR1_LRCPC_MASK |
6477 R_ID_AA64ISAR1_GPA_MASK |
6478 R_ID_AA64ISAR1_GPI_MASK |
6479 R_ID_AA64ISAR1_FRINTTS_MASK |
6480 R_ID_AA64ISAR1_SB_MASK |
6481 R_ID_AA64ISAR1_BF16_MASK |
6482 R_ID_AA64ISAR1_DGH_MASK |
6483 R_ID_AA64ISAR1_I8MM_MASK },
6484 { .name = "ID_AA64ISAR2_EL1",
6485 .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
6486 R_ID_AA64ISAR2_RPRES_MASK |
6487 R_ID_AA64ISAR2_GPA3_MASK |
6488 R_ID_AA64ISAR2_APA3_MASK |
6489 R_ID_AA64ISAR2_MOPS_MASK |
6490 R_ID_AA64ISAR2_BC_MASK |
6491 R_ID_AA64ISAR2_RPRFM_MASK |
6492 R_ID_AA64ISAR2_CSSC_MASK },
6493 { .name = "ID_AA64ISAR*_EL1_RESERVED",
6494 .is_glob = true },
6495 };
6496 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6497 #endif
6498 /*
6499 * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
6500 * TODO: For RMR, a write with bit 1 set should do something with
6501 * cpu_reset(). In the meantime, "the bit is strictly a request",
6502 * so we are in spec just ignoring writes.
6503 */
6504 if (!arm_feature(env, ARM_FEATURE_EL3) &&
6505 !arm_feature(env, ARM_FEATURE_EL2)) {
6506 ARMCPRegInfo el1_reset_regs[] = {
6507 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
6508 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6509 .access = PL1_R,
6510 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6511 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
6512 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6513 .access = PL1_RW, .type = ARM_CP_CONST,
6514 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
6515 };
6516 define_arm_cp_regs(cpu, el1_reset_regs);
6517 }
6518 define_arm_cp_regs(cpu, v8_idregs);
6519 define_arm_cp_regs(cpu, v8_cp_reginfo);
6520 if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
6521 define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
6522 }
6523
6524 for (i = 4; i < 16; i++) {
6525 /*
6526 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
6527 * For pre-v8 cores there are RAZ patterns for these in
6528 * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
6529 * v8 extends the "must RAZ" part of the ID register space
6530 * to also cover c0, 0, c{8-15}, {0-7}.
6531 * These are STATE_AA32 because in the AArch64 sysreg space
6532 * c4-c7 is where the AArch64 ID registers live (and we've
6533 * already defined those in v8_idregs[]), and c8-c15 are not
6534 * "must RAZ" for AArch64.
6535 */
6536 g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
6537 ARMCPRegInfo v8_aa32_raz_idregs = {
6538 .name = name,
6539 .state = ARM_CP_STATE_AA32,
6540 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
6541 .access = PL1_R, .type = ARM_CP_CONST,
6542 .accessfn = access_aa64_tid3,
6543 .resetvalue = 0 };
6544 define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
6545 }
6546 }
6547
6548 /*
6549 * Register the base EL2 cpregs.
6550 * Pre v8, these registers are implemented only as part of the
6551 * Virtualization Extensions (EL2 present). Beginning with v8,
6552 * if EL2 is missing but EL3 is enabled, mostly these become
6553 * RES0 from EL3, with some specific exceptions.
6554 */
6555 if (arm_feature(env, ARM_FEATURE_EL2)
6556 || (arm_feature(env, ARM_FEATURE_EL3)
6557 && arm_feature(env, ARM_FEATURE_V8))) {
6558 uint64_t vmpidr_def = mpidr_read_val(env);
6559 ARMCPRegInfo vpidr_regs[] = {
6560 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6561 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6562 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6563 .resetvalue = cpu->midr,
6564 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6565 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6566 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6567 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6568 .access = PL2_RW, .resetvalue = cpu->midr,
6569 .type = ARM_CP_EL3_NO_EL2_C_NZ,
6570 .nv2_redirect_offset = 0x88,
6571 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6572 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6573 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6574 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6575 .resetvalue = vmpidr_def,
6576 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6577 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6578 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6579 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6580 .access = PL2_RW, .resetvalue = vmpidr_def,
6581 .type = ARM_CP_EL3_NO_EL2_C_NZ,
6582 .nv2_redirect_offset = 0x50,
6583 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6584 };
6585 /*
6586 * The only field of MDCR_EL2 that has a defined architectural reset
6587 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
6588 */
6589 ARMCPRegInfo mdcr_el2 = {
6590 .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
6591 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
6592 .writefn = mdcr_el2_write,
6593 .access = PL2_RW, .resetvalue = pmu_num_counters(env),
6594 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
6595 };
6596 define_one_arm_cp_reg(cpu, &mdcr_el2);
6597 define_arm_cp_regs(cpu, vpidr_regs);
6598 define_arm_cp_regs(cpu, el2_cp_reginfo);
6599 if (arm_feature(env, ARM_FEATURE_V8)) {
6600 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6601 }
6602 if (cpu_isar_feature(aa64_sel2, cpu)) {
6603 define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
6604 }
6605 /*
6606 * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
6607 * See commentary near RMR_EL1.
6608 */
6609 if (!arm_feature(env, ARM_FEATURE_EL3)) {
6610 static const ARMCPRegInfo el2_reset_regs[] = {
6611 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6612 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6613 .access = PL2_R,
6614 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6615 { .name = "RVBAR", .type = ARM_CP_ALIAS,
6616 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6617 .access = PL2_R,
6618 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6619 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
6620 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
6621 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6622 };
6623 define_arm_cp_regs(cpu, el2_reset_regs);
6624 }
6625 }
6626
6627 /* Register the base EL3 cpregs. */
6628 if (arm_feature(env, ARM_FEATURE_EL3)) {
6629 define_arm_cp_regs(cpu, el3_cp_reginfo);
6630 ARMCPRegInfo el3_regs[] = {
6631 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6632 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6633 .access = PL3_R,
6634 .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
6635 { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
6636 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
6637 .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6638 { .name = "RMR", .state = ARM_CP_STATE_AA32,
6639 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6640 .access = PL3_RW, .type = ARM_CP_CONST,
6641 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
6642 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6643 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6644 .access = PL3_RW,
6645 .raw_writefn = raw_write, .writefn = sctlr_write,
6646 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6647 .resetvalue = cpu->reset_sctlr },
6648 };
6649
6650 define_arm_cp_regs(cpu, el3_regs);
6651 }
6652 /*
6653 * The behaviour of NSACR is sufficiently various that we don't
6654 * try to describe it in a single reginfo:
6655 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6656 * reads as constant 0xc00 from NS EL1 and NS EL2
6657 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6658 * if v7 without EL3, register doesn't exist
6659 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6660 */
6661 if (arm_feature(env, ARM_FEATURE_EL3)) {
6662 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6663 static const ARMCPRegInfo nsacr = {
6664 .name = "NSACR", .type = ARM_CP_CONST,
6665 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6666 .access = PL1_RW, .accessfn = nsacr_access,
6667 .resetvalue = 0xc00
6668 };
6669 define_one_arm_cp_reg(cpu, &nsacr);
6670 } else {
6671 static const ARMCPRegInfo nsacr = {
6672 .name = "NSACR",
6673 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6674 .access = PL3_RW | PL1_R,
6675 .resetvalue = 0,
6676 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6677 };
6678 define_one_arm_cp_reg(cpu, &nsacr);
6679 }
6680 } else {
6681 if (arm_feature(env, ARM_FEATURE_V8)) {
6682 static const ARMCPRegInfo nsacr = {
6683 .name = "NSACR", .type = ARM_CP_CONST,
6684 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6685 .access = PL1_R,
6686 .resetvalue = 0xc00
6687 };
6688 define_one_arm_cp_reg(cpu, &nsacr);
6689 }
6690 }
6691
6692 if (arm_feature(env, ARM_FEATURE_PMSA)) {
6693 if (arm_feature(env, ARM_FEATURE_V6)) {
6694 /* PMSAv6 not implemented */
6695 assert(arm_feature(env, ARM_FEATURE_V7));
6696 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6697 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6698 } else {
6699 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6700 }
6701 } else {
6702 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6703 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6704 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
6705 if (cpu_isar_feature(aa32_hpd, cpu)) {
6706 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6707 }
6708 }
6709 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6710 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6711 }
6712 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6713 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6714 }
6715 if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
6716 define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
6717 }
6718 #ifndef CONFIG_USER_ONLY
6719 if (cpu_isar_feature(aa64_ecv, cpu)) {
6720 define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
6721 }
6722 #endif
6723 if (arm_feature(env, ARM_FEATURE_VAPA)) {
6724 ARMCPRegInfo vapa_cp_reginfo[] = {
6725 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
6726 .access = PL1_RW, .resetvalue = 0,
6727 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
6728 offsetoflow32(CPUARMState, cp15.par_ns) },
6729 .writefn = par_write},
6730 };
6731
6732 /*
6733 * When LPAE exists this 32-bit PAR register is an alias of the
6734 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
6735 */
6736 if (arm_feature(env, ARM_FEATURE_LPAE)) {
6737 vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
6738 }
6739 define_arm_cp_regs(cpu, vapa_cp_reginfo);
6740 }
6741 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
6742 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
6743 }
6744 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
6745 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
6746 }
6747 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
6748 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
6749 }
6750 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
6751 define_arm_cp_regs(cpu, omap_cp_reginfo);
6752 }
6753 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
6754 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
6755 }
6756 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6757 define_arm_cp_regs(cpu, xscale_cp_reginfo);
6758 }
6759 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
6760 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
6761 }
6762 if (arm_feature(env, ARM_FEATURE_LPAE)) {
6763 define_arm_cp_regs(cpu, lpae_cp_reginfo);
6764 }
6765 if (cpu_isar_feature(aa32_jazelle, cpu)) {
6766 define_arm_cp_regs(cpu, jazelle_regs);
6767 }
6768 /*
6769 * Slightly awkwardly, the OMAP and StrongARM cores need all of
6770 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6771 * be read-only (ie write causes UNDEF exception).
6772 */
6773 {
6774 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
6775 /*
6776 * Pre-v8 MIDR space.
6777 * Note that the MIDR isn't a simple constant register because
6778 * of the TI925 behaviour where writes to another register can
6779 * cause the MIDR value to change.
6780 *
6781 * Unimplemented registers in the c15 0 0 0 space default to
6782 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6783 * and friends override accordingly.
6784 */
6785 { .name = "MIDR",
6786 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
6787 .access = PL1_R, .resetvalue = cpu->midr,
6788 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
6789 .readfn = midr_read,
6790 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6791 .type = ARM_CP_OVERRIDE },
6792 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6793 { .name = "DUMMY",
6794 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
6795 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6796 { .name = "DUMMY",
6797 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
6798 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6799 { .name = "DUMMY",
6800 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
6801 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6802 { .name = "DUMMY",
6803 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
6804 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6805 { .name = "DUMMY",
6806 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
6807 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6808 };
6809 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
6810 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
6811 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
6812 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
6813 .fgt = FGT_MIDR_EL1,
6814 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6815 .readfn = midr_read },
6816 /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
6817 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6818 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
6819 .access = PL1_R, .resetvalue = cpu->midr },
6820 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
6821 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
6822 .access = PL1_R,
6823 .accessfn = access_aa64_tid1,
6824 .fgt = FGT_REVIDR_EL1,
6825 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
6826 };
6827 ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
6828 .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
6829 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6830 .access = PL1_R, .resetvalue = cpu->midr
6831 };
6832 ARMCPRegInfo id_cp_reginfo[] = {
6833 /* These are common to v8 and pre-v8 */
6834 { .name = "CTR",
6835 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
6836 .access = PL1_R, .accessfn = ctr_el0_access,
6837 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6838 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
6839 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
6840 .access = PL0_R, .accessfn = ctr_el0_access,
6841 .fgt = FGT_CTR_EL0,
6842 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6843 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6844 { .name = "TCMTR",
6845 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
6846 .access = PL1_R,
6847 .accessfn = access_aa32_tid1,
6848 .type = ARM_CP_CONST, .resetvalue = 0 },
6849 };
6850 /* TLBTR is specific to VMSA */
6851 ARMCPRegInfo id_tlbtr_reginfo = {
6852 .name = "TLBTR",
6853 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
6854 .access = PL1_R,
6855 .accessfn = access_aa32_tid1,
6856 .type = ARM_CP_CONST, .resetvalue = 0,
6857 };
6858 /* MPUIR is specific to PMSA V6+ */
6859 ARMCPRegInfo id_mpuir_reginfo = {
6860 .name = "MPUIR",
6861 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6862 .access = PL1_R, .type = ARM_CP_CONST,
6863 .resetvalue = cpu->pmsav7_dregion << 8
6864 };
6865 /* HMPUIR is specific to PMSA V8 */
6866 ARMCPRegInfo id_hmpuir_reginfo = {
6867 .name = "HMPUIR",
6868 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
6869 .access = PL2_R, .type = ARM_CP_CONST,
6870 .resetvalue = cpu->pmsav8r_hdregion
6871 };
6872 static const ARMCPRegInfo crn0_wi_reginfo = {
6873 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
6874 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
6875 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
6876 };
6877 #ifdef CONFIG_USER_ONLY
6878 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6879 { .name = "MIDR_EL1",
6880 .exported_bits = R_MIDR_EL1_REVISION_MASK |
6881 R_MIDR_EL1_PARTNUM_MASK |
6882 R_MIDR_EL1_ARCHITECTURE_MASK |
6883 R_MIDR_EL1_VARIANT_MASK |
6884 R_MIDR_EL1_IMPLEMENTER_MASK },
6885 { .name = "REVIDR_EL1" },
6886 };
6887 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
6888 #endif
6889 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
6890 arm_feature(env, ARM_FEATURE_STRONGARM)) {
6891 size_t i;
6892 /*
6893 * Register the blanket "writes ignored" value first to cover the
6894 * whole space. Then update the specific ID registers to allow write
6895 * access, so that they ignore writes rather than causing them to
6896 * UNDEF.
6897 */
6898 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
6899 for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
6900 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
6901 }
6902 for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
6903 id_cp_reginfo[i].access = PL1_RW;
6904 }
6905 id_mpuir_reginfo.access = PL1_RW;
6906 id_tlbtr_reginfo.access = PL1_RW;
6907 }
6908 if (arm_feature(env, ARM_FEATURE_V8)) {
6909 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
6910 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6911 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
6912 }
6913 } else {
6914 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
6915 }
6916 define_arm_cp_regs(cpu, id_cp_reginfo);
6917 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6918 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
6919 } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
6920 arm_feature(env, ARM_FEATURE_V8)) {
6921 uint32_t i = 0;
6922 char *tmp_string;
6923
6924 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6925 define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
6926 define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
6927
6928 /* Register alias is only valid for first 32 indexes */
6929 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
6930 uint8_t crm = 0b1000 | extract32(i, 1, 3);
6931 uint8_t opc1 = extract32(i, 4, 1);
6932 uint8_t opc2 = extract32(i, 0, 1) << 2;
6933
6934 tmp_string = g_strdup_printf("PRBAR%u", i);
6935 ARMCPRegInfo tmp_prbarn_reginfo = {
6936 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
6937 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6938 .access = PL1_RW, .resetvalue = 0,
6939 .accessfn = access_tvm_trvm,
6940 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6941 };
6942 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
6943 g_free(tmp_string);
6944
6945 opc2 = extract32(i, 0, 1) << 2 | 0x1;
6946 tmp_string = g_strdup_printf("PRLAR%u", i);
6947 ARMCPRegInfo tmp_prlarn_reginfo = {
6948 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
6949 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6950 .access = PL1_RW, .resetvalue = 0,
6951 .accessfn = access_tvm_trvm,
6952 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6953 };
6954 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
6955 g_free(tmp_string);
6956 }
6957
6958 /* Register alias is only valid for first 32 indexes */
6959 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
6960 uint8_t crm = 0b1000 | extract32(i, 1, 3);
6961 uint8_t opc1 = 0b100 | extract32(i, 4, 1);
6962 uint8_t opc2 = extract32(i, 0, 1) << 2;
6963
6964 tmp_string = g_strdup_printf("HPRBAR%u", i);
6965 ARMCPRegInfo tmp_hprbarn_reginfo = {
6966 .name = tmp_string,
6967 .type = ARM_CP_NO_RAW,
6968 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6969 .access = PL2_RW, .resetvalue = 0,
6970 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6971 };
6972 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
6973 g_free(tmp_string);
6974
6975 opc2 = extract32(i, 0, 1) << 2 | 0x1;
6976 tmp_string = g_strdup_printf("HPRLAR%u", i);
6977 ARMCPRegInfo tmp_hprlarn_reginfo = {
6978 .name = tmp_string,
6979 .type = ARM_CP_NO_RAW,
6980 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6981 .access = PL2_RW, .resetvalue = 0,
6982 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6983 };
6984 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
6985 g_free(tmp_string);
6986 }
6987 } else if (arm_feature(env, ARM_FEATURE_V7)) {
6988 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6989 }
6990 }
6991
6992 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
6993 ARMCPRegInfo mpidr_cp_reginfo[] = {
6994 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
6995 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
6996 .fgt = FGT_MPIDR_EL1,
6997 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
6998 };
6999 #ifdef CONFIG_USER_ONLY
7000 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
7001 { .name = "MPIDR_EL1",
7002 .fixed_bits = 0x0000000080000000 },
7003 };
7004 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
7005 #endif
7006 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
7007 }
7008
7009 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
7010 ARMCPRegInfo auxcr_reginfo[] = {
7011 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
7012 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
7013 .access = PL1_RW, .accessfn = access_tacr,
7014 .nv2_redirect_offset = 0x118,
7015 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
7016 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
7017 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
7018 .access = PL2_RW, .type = ARM_CP_CONST,
7019 .resetvalue = 0 },
7020 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
7021 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
7022 .access = PL3_RW, .type = ARM_CP_CONST,
7023 .resetvalue = 0 },
7024 };
7025 define_arm_cp_regs(cpu, auxcr_reginfo);
7026 if (cpu_isar_feature(aa32_ac2, cpu)) {
7027 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
7028 }
7029 }
7030
7031 if (arm_feature(env, ARM_FEATURE_CBAR)) {
7032 /*
7033 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7034 * There are two flavours:
7035 * (1) older 32-bit only cores have a simple 32-bit CBAR
7036 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7037 * 32-bit register visible to AArch32 at a different encoding
7038 * to the "flavour 1" register and with the bits rearranged to
7039 * be able to squash a 64-bit address into the 32-bit view.
7040 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7041 * in future if we support AArch32-only configs of some of the
7042 * AArch64 cores we might need to add a specific feature flag
7043 * to indicate cores with "flavour 2" CBAR.
7044 */
7045 if (arm_feature(env, ARM_FEATURE_V8)) {
7046 /* 32 bit view is [31:18] 0...0 [43:32]. */
7047 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
7048 | extract64(cpu->reset_cbar, 32, 12);
7049 ARMCPRegInfo cbar_reginfo[] = {
7050 { .name = "CBAR",
7051 .type = ARM_CP_CONST,
7052 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
7053 .access = PL1_R, .resetvalue = cbar32 },
7054 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
7055 .type = ARM_CP_CONST,
7056 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
7057 .access = PL1_R, .resetvalue = cpu->reset_cbar },
7058 };
7059 /* We don't implement a r/w 64 bit CBAR currently */
7060 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
7061 define_arm_cp_regs(cpu, cbar_reginfo);
7062 } else {
7063 ARMCPRegInfo cbar = {
7064 .name = "CBAR",
7065 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
7066 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
7067 .fieldoffset = offsetof(CPUARMState,
7068 cp15.c15_config_base_address)
7069 };
7070 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
7071 cbar.access = PL1_R;
7072 cbar.fieldoffset = 0;
7073 cbar.type = ARM_CP_CONST;
7074 }
7075 define_one_arm_cp_reg(cpu, &cbar);
7076 }
7077 }
7078
7079 if (arm_feature(env, ARM_FEATURE_VBAR)) {
7080 static const ARMCPRegInfo vbar_cp_reginfo[] = {
7081 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
7082 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
7083 .access = PL1_RW, .writefn = vbar_write,
7084 .accessfn = access_nv1,
7085 .fgt = FGT_VBAR_EL1,
7086 .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
7087 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
7088 offsetof(CPUARMState, cp15.vbar_ns) },
7089 .resetvalue = 0 },
7090 };
7091 define_arm_cp_regs(cpu, vbar_cp_reginfo);
7092 }
7093
7094 /* Generic registers whose values depend on the implementation */
7095 {
7096 ARMCPRegInfo sctlr = {
7097 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
7098 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
7099 .access = PL1_RW, .accessfn = access_tvm_trvm,
7100 .fgt = FGT_SCTLR_EL1,
7101 .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
7102 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
7103 offsetof(CPUARMState, cp15.sctlr_ns) },
7104 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
7105 .raw_writefn = raw_write,
7106 };
7107 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7108 /*
7109 * Normally we would always end the TB on an SCTLR write, but Linux
7110 * arch/arm/mach-pxa/sleep.S expects two instructions following
7111 * an MMU enable to execute from cache. Imitate this behaviour.
7112 */
7113 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
7114 }
7115 define_one_arm_cp_reg(cpu, &sctlr);
7116
7117 if (arm_feature(env, ARM_FEATURE_PMSA) &&
7118 arm_feature(env, ARM_FEATURE_V8)) {
7119 ARMCPRegInfo vsctlr = {
7120 .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
7121 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
7122 .access = PL2_RW, .resetvalue = 0x0,
7123 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
7124 };
7125 define_one_arm_cp_reg(cpu, &vsctlr);
7126 }
7127 }
7128
7129 if (cpu_isar_feature(aa64_lor, cpu)) {
7130 define_arm_cp_regs(cpu, lor_reginfo);
7131 }
7132 if (cpu_isar_feature(aa64_pan, cpu)) {
7133 define_one_arm_cp_reg(cpu, &pan_reginfo);
7134 }
7135 if (cpu_isar_feature(aa64_uao, cpu)) {
7136 define_one_arm_cp_reg(cpu, &uao_reginfo);
7137 }
7138
7139 if (cpu_isar_feature(aa64_dit, cpu)) {
7140 define_one_arm_cp_reg(cpu, &dit_reginfo);
7141 }
7142 if (cpu_isar_feature(aa64_ssbs, cpu)) {
7143 define_one_arm_cp_reg(cpu, &ssbs_reginfo);
7144 }
7145 if (cpu_isar_feature(any_ras, cpu)) {
7146 define_arm_cp_regs(cpu, minimal_ras_reginfo);
7147 }
7148
7149 if (cpu_isar_feature(aa64_vh, cpu) ||
7150 cpu_isar_feature(aa64_debugv8p2, cpu)) {
7151 define_one_arm_cp_reg(cpu, &contextidr_el2);
7152 }
7153 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7154 define_arm_cp_regs(cpu, vhe_reginfo);
7155 }
7156
7157 if (cpu_isar_feature(aa64_sve, cpu)) {
7158 define_arm_cp_regs(cpu, zcr_reginfo);
7159 }
7160
7161 if (cpu_isar_feature(aa64_hcx, cpu)) {
7162 define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
7163 }
7164
7165 if (cpu_isar_feature(aa64_sme, cpu)) {
7166 define_arm_cp_regs(cpu, sme_reginfo);
7167 }
7168 if (cpu_isar_feature(aa64_pauth, cpu)) {
7169 define_arm_cp_regs(cpu, pauth_reginfo);
7170 }
7171 if (cpu_isar_feature(aa64_rndr, cpu)) {
7172 define_arm_cp_regs(cpu, rndr_reginfo);
7173 }
7174 /* Data Cache clean instructions up to PoP */
7175 if (cpu_isar_feature(aa64_dcpop, cpu)) {
7176 define_one_arm_cp_reg(cpu, dcpop_reg);
7177
7178 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
7179 define_one_arm_cp_reg(cpu, dcpodp_reg);
7180 }
7181 }
7182
7183 /*
7184 * If full MTE is enabled, add all of the system registers.
7185 * If only "instructions available at EL0" are enabled,
7186 * then define only a RAZ/WI version of PSTATE.TCO.
7187 */
7188 if (cpu_isar_feature(aa64_mte, cpu)) {
7189 ARMCPRegInfo gmid_reginfo = {
7190 .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7191 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7192 .access = PL1_R, .accessfn = access_aa64_tid5,
7193 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
7194 };
7195 define_one_arm_cp_reg(cpu, &gmid_reginfo);
7196 define_arm_cp_regs(cpu, mte_reginfo);
7197 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7198 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
7199 define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
7200 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7201 }
7202
7203 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
7204 define_arm_cp_regs(cpu, scxtnum_reginfo);
7205 }
7206
7207 if (cpu_isar_feature(aa64_fgt, cpu)) {
7208 define_arm_cp_regs(cpu, fgt_reginfo);
7209 }
7210
7211 if (cpu_isar_feature(aa64_rme, cpu)) {
7212 define_arm_cp_regs(cpu, rme_reginfo);
7213 if (cpu_isar_feature(aa64_mte, cpu)) {
7214 define_arm_cp_regs(cpu, rme_mte_reginfo);
7215 }
7216 }
7217
7218 if (cpu_isar_feature(aa64_nv2, cpu)) {
7219 define_arm_cp_regs(cpu, nv2_reginfo);
7220 }
7221
7222 if (cpu_isar_feature(aa64_nmi, cpu)) {
7223 define_arm_cp_regs(cpu, nmi_reginfo);
7224 }
7225
7226 if (cpu_isar_feature(any_predinv, cpu)) {
7227 define_arm_cp_regs(cpu, predinv_reginfo);
7228 }
7229
7230 if (cpu_isar_feature(any_ccidx, cpu)) {
7231 define_arm_cp_regs(cpu, ccsidr2_reginfo);
7232 }
7233
7234 define_pm_cpregs(cpu);
7235
7236 #ifndef CONFIG_USER_ONLY
7237 /*
7238 * Register redirections and aliases must be done last,
7239 * after the registers from the other extensions have been defined.
7240 */
7241 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7242 define_arm_vh_e2h_redirects_aliases(cpu);
7243 }
7244 #endif
7245 }
7246
7247 /*
7248 * Private utility function for define_one_arm_cp_reg_with_opaque():
7249 * add a single reginfo struct to the hash table.
7250 */
add_cpreg_to_hashtable(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque,CPState state,CPSecureState secstate,int crm,int opc1,int opc2,const char * name)7251 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
7252 void *opaque, CPState state,
7253 CPSecureState secstate,
7254 int crm, int opc1, int opc2,
7255 const char *name)
7256 {
7257 CPUARMState *env = &cpu->env;
7258 uint32_t key;
7259 ARMCPRegInfo *r2;
7260 bool is64 = r->type & ARM_CP_64BIT;
7261 bool ns = secstate & ARM_CP_SECSTATE_NS;
7262 int cp = r->cp;
7263 size_t name_len;
7264 bool make_const;
7265
7266 switch (state) {
7267 case ARM_CP_STATE_AA32:
7268 /* We assume it is a cp15 register if the .cp field is left unset. */
7269 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
7270 cp = 15;
7271 }
7272 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
7273 break;
7274 case ARM_CP_STATE_AA64:
7275 /*
7276 * To allow abbreviation of ARMCPRegInfo definitions, we treat
7277 * cp == 0 as equivalent to the value for "standard guest-visible
7278 * sysreg". STATE_BOTH definitions are also always "standard sysreg"
7279 * in their AArch64 view (the .cp value may be non-zero for the
7280 * benefit of the AArch32 view).
7281 */
7282 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
7283 cp = CP_REG_ARM64_SYSREG_CP;
7284 }
7285 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
7286 break;
7287 default:
7288 g_assert_not_reached();
7289 }
7290
7291 /* Overriding of an existing definition must be explicitly requested. */
7292 if (!(r->type & ARM_CP_OVERRIDE)) {
7293 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
7294 if (oldreg) {
7295 assert(oldreg->type & ARM_CP_OVERRIDE);
7296 }
7297 }
7298
7299 /*
7300 * Eliminate registers that are not present because the EL is missing.
7301 * Doing this here makes it easier to put all registers for a given
7302 * feature into the same ARMCPRegInfo array and define them all at once.
7303 */
7304 make_const = false;
7305 if (arm_feature(env, ARM_FEATURE_EL3)) {
7306 /*
7307 * An EL2 register without EL2 but with EL3 is (usually) RES0.
7308 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
7309 */
7310 int min_el = ctz32(r->access) / 2;
7311 if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
7312 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
7313 return;
7314 }
7315 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
7316 }
7317 } else {
7318 CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
7319 ? PL2_RW : PL1_RW);
7320 if ((r->access & max_el) == 0) {
7321 return;
7322 }
7323 }
7324
7325 /* Combine cpreg and name into one allocation. */
7326 name_len = strlen(name) + 1;
7327 r2 = g_malloc(sizeof(*r2) + name_len);
7328 *r2 = *r;
7329 r2->name = memcpy(r2 + 1, name, name_len);
7330
7331 /*
7332 * Update fields to match the instantiation, overwiting wildcards
7333 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
7334 */
7335 r2->cp = cp;
7336 r2->crm = crm;
7337 r2->opc1 = opc1;
7338 r2->opc2 = opc2;
7339 r2->state = state;
7340 r2->secure = secstate;
7341 if (opaque) {
7342 r2->opaque = opaque;
7343 }
7344
7345 if (make_const) {
7346 /* This should not have been a very special register to begin. */
7347 int old_special = r2->type & ARM_CP_SPECIAL_MASK;
7348 assert(old_special == 0 || old_special == ARM_CP_NOP);
7349 /*
7350 * Set the special function to CONST, retaining the other flags.
7351 * This is important for e.g. ARM_CP_SVE so that we still
7352 * take the SVE trap if CPTR_EL3.EZ == 0.
7353 */
7354 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
7355 /*
7356 * Usually, these registers become RES0, but there are a few
7357 * special cases like VPIDR_EL2 which have a constant non-zero
7358 * value with writes ignored.
7359 */
7360 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
7361 r2->resetvalue = 0;
7362 }
7363 /*
7364 * ARM_CP_CONST has precedence, so removing the callbacks and
7365 * offsets are not strictly necessary, but it is potentially
7366 * less confusing to debug later.
7367 */
7368 r2->readfn = NULL;
7369 r2->writefn = NULL;
7370 r2->raw_readfn = NULL;
7371 r2->raw_writefn = NULL;
7372 r2->resetfn = NULL;
7373 r2->fieldoffset = 0;
7374 r2->bank_fieldoffsets[0] = 0;
7375 r2->bank_fieldoffsets[1] = 0;
7376 } else {
7377 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
7378
7379 if (isbanked) {
7380 /*
7381 * Register is banked (using both entries in array).
7382 * Overwriting fieldoffset as the array is only used to define
7383 * banked registers but later only fieldoffset is used.
7384 */
7385 r2->fieldoffset = r->bank_fieldoffsets[ns];
7386 }
7387 if (state == ARM_CP_STATE_AA32) {
7388 if (isbanked) {
7389 /*
7390 * If the register is banked then we don't need to migrate or
7391 * reset the 32-bit instance in certain cases:
7392 *
7393 * 1) If the register has both 32-bit and 64-bit instances
7394 * then we can count on the 64-bit instance taking care
7395 * of the non-secure bank.
7396 * 2) If ARMv8 is enabled then we can count on a 64-bit
7397 * version taking care of the secure bank. This requires
7398 * that separate 32 and 64-bit definitions are provided.
7399 */
7400 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
7401 (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
7402 r2->type |= ARM_CP_ALIAS;
7403 }
7404 } else if ((secstate != r->secure) && !ns) {
7405 /*
7406 * The register is not banked so we only want to allow
7407 * migration of the non-secure instance.
7408 */
7409 r2->type |= ARM_CP_ALIAS;
7410 }
7411
7412 if (HOST_BIG_ENDIAN &&
7413 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
7414 r2->fieldoffset += sizeof(uint32_t);
7415 }
7416 }
7417 }
7418
7419 /*
7420 * By convention, for wildcarded registers only the first
7421 * entry is used for migration; the others are marked as
7422 * ALIAS so we don't try to transfer the register
7423 * multiple times. Special registers (ie NOP/WFI) are
7424 * never migratable and not even raw-accessible.
7425 */
7426 if (r2->type & ARM_CP_SPECIAL_MASK) {
7427 r2->type |= ARM_CP_NO_RAW;
7428 }
7429 if (((r->crm == CP_ANY) && crm != 0) ||
7430 ((r->opc1 == CP_ANY) && opc1 != 0) ||
7431 ((r->opc2 == CP_ANY) && opc2 != 0)) {
7432 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
7433 }
7434
7435 /*
7436 * Check that raw accesses are either forbidden or handled. Note that
7437 * we can't assert this earlier because the setup of fieldoffset for
7438 * banked registers has to be done first.
7439 */
7440 if (!(r2->type & ARM_CP_NO_RAW)) {
7441 assert(!raw_accessors_invalid(r2));
7442 }
7443
7444 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
7445 }
7446
7447
define_one_arm_cp_reg_with_opaque(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque)7448 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
7449 const ARMCPRegInfo *r, void *opaque)
7450 {
7451 /*
7452 * Define implementations of coprocessor registers.
7453 * We store these in a hashtable because typically
7454 * there are less than 150 registers in a space which
7455 * is 16*16*16*8*8 = 262144 in size.
7456 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7457 * If a register is defined twice then the second definition is
7458 * used, so this can be used to define some generic registers and
7459 * then override them with implementation specific variations.
7460 * At least one of the original and the second definition should
7461 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7462 * against accidental use.
7463 *
7464 * The state field defines whether the register is to be
7465 * visible in the AArch32 or AArch64 execution state. If the
7466 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7467 * reginfo structure for the AArch32 view, which sees the lower
7468 * 32 bits of the 64 bit register.
7469 *
7470 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7471 * be wildcarded. AArch64 registers are always considered to be 64
7472 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7473 * the register, if any.
7474 */
7475 int crm, opc1, opc2;
7476 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
7477 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
7478 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
7479 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
7480 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
7481 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
7482 CPState state;
7483
7484 /* 64 bit registers have only CRm and Opc1 fields */
7485 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
7486 /* op0 only exists in the AArch64 encodings */
7487 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
7488 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
7489 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
7490 /*
7491 * This API is only for Arm's system coprocessors (14 and 15) or
7492 * (M-profile or v7A-and-earlier only) for implementation defined
7493 * coprocessors in the range 0..7. Our decode assumes this, since
7494 * 8..13 can be used for other insns including VFP and Neon. See
7495 * valid_cp() in translate.c. Assert here that we haven't tried
7496 * to use an invalid coprocessor number.
7497 */
7498 switch (r->state) {
7499 case ARM_CP_STATE_BOTH:
7500 /* 0 has a special meaning, but otherwise the same rules as AA32. */
7501 if (r->cp == 0) {
7502 break;
7503 }
7504 /* fall through */
7505 case ARM_CP_STATE_AA32:
7506 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
7507 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
7508 assert(r->cp >= 14 && r->cp <= 15);
7509 } else {
7510 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
7511 }
7512 break;
7513 case ARM_CP_STATE_AA64:
7514 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
7515 break;
7516 default:
7517 g_assert_not_reached();
7518 }
7519 /*
7520 * The AArch64 pseudocode CheckSystemAccess() specifies that op1
7521 * encodes a minimum access level for the register. We roll this
7522 * runtime check into our general permission check code, so check
7523 * here that the reginfo's specified permissions are strict enough
7524 * to encompass the generic architectural permission check.
7525 */
7526 if (r->state != ARM_CP_STATE_AA32) {
7527 CPAccessRights mask;
7528 switch (r->opc1) {
7529 case 0:
7530 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7531 mask = PL0U_R | PL1_RW;
7532 break;
7533 case 1: case 2:
7534 /* min_EL EL1 */
7535 mask = PL1_RW;
7536 break;
7537 case 3:
7538 /* min_EL EL0 */
7539 mask = PL0_RW;
7540 break;
7541 case 4:
7542 case 5:
7543 /* min_EL EL2 */
7544 mask = PL2_RW;
7545 break;
7546 case 6:
7547 /* min_EL EL3 */
7548 mask = PL3_RW;
7549 break;
7550 case 7:
7551 /* min_EL EL1, secure mode only (we don't check the latter) */
7552 mask = PL1_RW;
7553 break;
7554 default:
7555 /* broken reginfo with out-of-range opc1 */
7556 g_assert_not_reached();
7557 }
7558 /* assert our permissions are not too lax (stricter is fine) */
7559 assert((r->access & ~mask) == 0);
7560 }
7561
7562 /*
7563 * Check that the register definition has enough info to handle
7564 * reads and writes if they are permitted.
7565 */
7566 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
7567 if (r->access & PL3_R) {
7568 assert((r->fieldoffset ||
7569 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7570 r->readfn);
7571 }
7572 if (r->access & PL3_W) {
7573 assert((r->fieldoffset ||
7574 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7575 r->writefn);
7576 }
7577 }
7578
7579 for (crm = crmmin; crm <= crmmax; crm++) {
7580 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
7581 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
7582 for (state = ARM_CP_STATE_AA32;
7583 state <= ARM_CP_STATE_AA64; state++) {
7584 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
7585 continue;
7586 }
7587 if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
7588 cpu_isar_feature(aa64_xs, cpu)) {
7589 /*
7590 * This is a TLBI insn which has an NXS variant. The
7591 * NXS variant is at the same encoding except that
7592 * crn is +1, and has the same behaviour except for
7593 * fine-grained trapping. Add the NXS insn here and
7594 * then fall through to add the normal register.
7595 * add_cpreg_to_hashtable() copies the cpreg struct
7596 * and name that it is passed, so it's OK to use
7597 * a local struct here.
7598 */
7599 ARMCPRegInfo nxs_ri = *r;
7600 g_autofree char *name = g_strdup_printf("%sNXS", r->name);
7601
7602 assert(state == ARM_CP_STATE_AA64);
7603 assert(nxs_ri.crn < 0xf);
7604 nxs_ri.crn++;
7605 if (nxs_ri.fgt) {
7606 nxs_ri.fgt |= R_FGT_NXS_MASK;
7607 }
7608 add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state,
7609 ARM_CP_SECSTATE_NS,
7610 crm, opc1, opc2, name);
7611 }
7612 if (state == ARM_CP_STATE_AA32) {
7613 /*
7614 * Under AArch32 CP registers can be common
7615 * (same for secure and non-secure world) or banked.
7616 */
7617 char *name;
7618
7619 switch (r->secure) {
7620 case ARM_CP_SECSTATE_S:
7621 case ARM_CP_SECSTATE_NS:
7622 add_cpreg_to_hashtable(cpu, r, opaque, state,
7623 r->secure, crm, opc1, opc2,
7624 r->name);
7625 break;
7626 case ARM_CP_SECSTATE_BOTH:
7627 name = g_strdup_printf("%s_S", r->name);
7628 add_cpreg_to_hashtable(cpu, r, opaque, state,
7629 ARM_CP_SECSTATE_S,
7630 crm, opc1, opc2, name);
7631 g_free(name);
7632 add_cpreg_to_hashtable(cpu, r, opaque, state,
7633 ARM_CP_SECSTATE_NS,
7634 crm, opc1, opc2, r->name);
7635 break;
7636 default:
7637 g_assert_not_reached();
7638 }
7639 } else {
7640 /*
7641 * AArch64 registers get mapped to non-secure instance
7642 * of AArch32
7643 */
7644 add_cpreg_to_hashtable(cpu, r, opaque, state,
7645 ARM_CP_SECSTATE_NS,
7646 crm, opc1, opc2, r->name);
7647 }
7648 }
7649 }
7650 }
7651 }
7652 }
7653
7654 /* Define a whole list of registers */
define_arm_cp_regs_with_opaque_len(ARMCPU * cpu,const ARMCPRegInfo * regs,void * opaque,size_t len)7655 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
7656 void *opaque, size_t len)
7657 {
7658 size_t i;
7659 for (i = 0; i < len; ++i) {
7660 define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
7661 }
7662 }
7663
7664 /*
7665 * Modify ARMCPRegInfo for access from userspace.
7666 *
7667 * This is a data driven modification directed by
7668 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7669 * user-space cannot alter any values and dynamic values pertaining to
7670 * execution state are hidden from user space view anyway.
7671 */
modify_arm_cp_regs_with_len(ARMCPRegInfo * regs,size_t regs_len,const ARMCPRegUserSpaceInfo * mods,size_t mods_len)7672 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
7673 const ARMCPRegUserSpaceInfo *mods,
7674 size_t mods_len)
7675 {
7676 for (size_t mi = 0; mi < mods_len; ++mi) {
7677 const ARMCPRegUserSpaceInfo *m = mods + mi;
7678 GPatternSpec *pat = NULL;
7679
7680 if (m->is_glob) {
7681 pat = g_pattern_spec_new(m->name);
7682 }
7683 for (size_t ri = 0; ri < regs_len; ++ri) {
7684 ARMCPRegInfo *r = regs + ri;
7685
7686 if (pat && g_pattern_match_string(pat, r->name)) {
7687 r->type = ARM_CP_CONST;
7688 r->access = PL0U_R;
7689 r->resetvalue = 0;
7690 /* continue */
7691 } else if (strcmp(r->name, m->name) == 0) {
7692 r->type = ARM_CP_CONST;
7693 r->access = PL0U_R;
7694 r->resetvalue &= m->exported_bits;
7695 r->resetvalue |= m->fixed_bits;
7696 break;
7697 }
7698 }
7699 if (pat) {
7700 g_pattern_spec_free(pat);
7701 }
7702 }
7703 }
7704
get_arm_cp_reginfo(GHashTable * cpregs,uint32_t encoded_cp)7705 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
7706 {
7707 return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
7708 }
7709
arm_cp_write_ignore(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)7710 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
7711 uint64_t value)
7712 {
7713 /* Helper coprocessor write function for write-ignore registers */
7714 }
7715
arm_cp_read_zero(CPUARMState * env,const ARMCPRegInfo * ri)7716 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
7717 {
7718 /* Helper coprocessor write function for read-as-zero registers */
7719 return 0;
7720 }
7721
arm_cp_reset_ignore(CPUARMState * env,const ARMCPRegInfo * opaque)7722 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
7723 {
7724 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7725 }
7726
bad_mode_switch(CPUARMState * env,int mode,CPSRWriteType write_type)7727 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
7728 {
7729 /*
7730 * Return true if it is not valid for us to switch to
7731 * this CPU mode (ie all the UNPREDICTABLE cases in
7732 * the ARM ARM CPSRWriteByInstr pseudocode).
7733 */
7734
7735 /* Changes to or from Hyp via MSR and CPS are illegal. */
7736 if (write_type == CPSRWriteByInstr &&
7737 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
7738 mode == ARM_CPU_MODE_HYP)) {
7739 return 1;
7740 }
7741
7742 switch (mode) {
7743 case ARM_CPU_MODE_USR:
7744 return 0;
7745 case ARM_CPU_MODE_SYS:
7746 case ARM_CPU_MODE_SVC:
7747 case ARM_CPU_MODE_ABT:
7748 case ARM_CPU_MODE_UND:
7749 case ARM_CPU_MODE_IRQ:
7750 case ARM_CPU_MODE_FIQ:
7751 /*
7752 * Note that we don't implement the IMPDEF NSACR.RFR which in v7
7753 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7754 */
7755 /*
7756 * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7757 * and CPS are treated as illegal mode changes.
7758 */
7759 if (write_type == CPSRWriteByInstr &&
7760 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7761 (arm_hcr_el2_eff(env) & HCR_TGE)) {
7762 return 1;
7763 }
7764 return 0;
7765 case ARM_CPU_MODE_HYP:
7766 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
7767 case ARM_CPU_MODE_MON:
7768 return arm_current_el(env) < 3;
7769 default:
7770 return 1;
7771 }
7772 }
7773
cpsr_read(CPUARMState * env)7774 uint32_t cpsr_read(CPUARMState *env)
7775 {
7776 int ZF;
7777 ZF = (env->ZF == 0);
7778 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
7779 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
7780 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
7781 | ((env->condexec_bits & 0xfc) << 8)
7782 | (env->GE << 16) | (env->daif & CPSR_AIF);
7783 }
7784
cpsr_write(CPUARMState * env,uint32_t val,uint32_t mask,CPSRWriteType write_type)7785 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
7786 CPSRWriteType write_type)
7787 {
7788 uint32_t changed_daif;
7789 bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
7790 (mask & (CPSR_M | CPSR_E | CPSR_IL));
7791
7792 if (mask & CPSR_NZCV) {
7793 env->ZF = (~val) & CPSR_Z;
7794 env->NF = val;
7795 env->CF = (val >> 29) & 1;
7796 env->VF = (val << 3) & 0x80000000;
7797 }
7798 if (mask & CPSR_Q) {
7799 env->QF = ((val & CPSR_Q) != 0);
7800 }
7801 if (mask & CPSR_T) {
7802 env->thumb = ((val & CPSR_T) != 0);
7803 }
7804 if (mask & CPSR_IT_0_1) {
7805 env->condexec_bits &= ~3;
7806 env->condexec_bits |= (val >> 25) & 3;
7807 }
7808 if (mask & CPSR_IT_2_7) {
7809 env->condexec_bits &= 3;
7810 env->condexec_bits |= (val >> 8) & 0xfc;
7811 }
7812 if (mask & CPSR_GE) {
7813 env->GE = (val >> 16) & 0xf;
7814 }
7815
7816 /*
7817 * In a V7 implementation that includes the security extensions but does
7818 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7819 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7820 * bits respectively.
7821 *
7822 * In a V8 implementation, it is permitted for privileged software to
7823 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7824 */
7825 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
7826 arm_feature(env, ARM_FEATURE_EL3) &&
7827 !arm_feature(env, ARM_FEATURE_EL2) &&
7828 !arm_is_secure(env)) {
7829
7830 changed_daif = (env->daif ^ val) & mask;
7831
7832 if (changed_daif & CPSR_A) {
7833 /*
7834 * Check to see if we are allowed to change the masking of async
7835 * abort exceptions from a non-secure state.
7836 */
7837 if (!(env->cp15.scr_el3 & SCR_AW)) {
7838 qemu_log_mask(LOG_GUEST_ERROR,
7839 "Ignoring attempt to switch CPSR_A flag from "
7840 "non-secure world with SCR.AW bit clear\n");
7841 mask &= ~CPSR_A;
7842 }
7843 }
7844
7845 if (changed_daif & CPSR_F) {
7846 /*
7847 * Check to see if we are allowed to change the masking of FIQ
7848 * exceptions from a non-secure state.
7849 */
7850 if (!(env->cp15.scr_el3 & SCR_FW)) {
7851 qemu_log_mask(LOG_GUEST_ERROR,
7852 "Ignoring attempt to switch CPSR_F flag from "
7853 "non-secure world with SCR.FW bit clear\n");
7854 mask &= ~CPSR_F;
7855 }
7856
7857 /*
7858 * Check whether non-maskable FIQ (NMFI) support is enabled.
7859 * If this bit is set software is not allowed to mask
7860 * FIQs, but is allowed to set CPSR_F to 0.
7861 */
7862 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
7863 (val & CPSR_F)) {
7864 qemu_log_mask(LOG_GUEST_ERROR,
7865 "Ignoring attempt to enable CPSR_F flag "
7866 "(non-maskable FIQ [NMFI] support enabled)\n");
7867 mask &= ~CPSR_F;
7868 }
7869 }
7870 }
7871
7872 env->daif &= ~(CPSR_AIF & mask);
7873 env->daif |= val & CPSR_AIF & mask;
7874
7875 if (write_type != CPSRWriteRaw &&
7876 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
7877 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
7878 /*
7879 * Note that we can only get here in USR mode if this is a
7880 * gdb stub write; for this case we follow the architectural
7881 * behaviour for guest writes in USR mode of ignoring an attempt
7882 * to switch mode. (Those are caught by translate.c for writes
7883 * triggered by guest instructions.)
7884 */
7885 mask &= ~CPSR_M;
7886 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
7887 /*
7888 * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7889 * v7, and has defined behaviour in v8:
7890 * + leave CPSR.M untouched
7891 * + allow changes to the other CPSR fields
7892 * + set PSTATE.IL
7893 * For user changes via the GDB stub, we don't set PSTATE.IL,
7894 * as this would be unnecessarily harsh for a user error.
7895 */
7896 mask &= ~CPSR_M;
7897 if (write_type != CPSRWriteByGDBStub &&
7898 arm_feature(env, ARM_FEATURE_V8)) {
7899 mask |= CPSR_IL;
7900 val |= CPSR_IL;
7901 }
7902 qemu_log_mask(LOG_GUEST_ERROR,
7903 "Illegal AArch32 mode switch attempt from %s to %s\n",
7904 aarch32_mode_name(env->uncached_cpsr),
7905 aarch32_mode_name(val));
7906 } else {
7907 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
7908 write_type == CPSRWriteExceptionReturn ?
7909 "Exception return from AArch32" :
7910 "AArch32 mode switch from",
7911 aarch32_mode_name(env->uncached_cpsr),
7912 aarch32_mode_name(val), env->regs[15]);
7913 switch_mode(env, val & CPSR_M);
7914 }
7915 }
7916 mask &= ~CACHED_CPSR_BITS;
7917 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
7918 if (tcg_enabled() && rebuild_hflags) {
7919 arm_rebuild_hflags(env);
7920 }
7921 }
7922
7923 #ifdef CONFIG_USER_ONLY
7924
switch_mode(CPUARMState * env,int mode)7925 static void switch_mode(CPUARMState *env, int mode)
7926 {
7927 ARMCPU *cpu = env_archcpu(env);
7928
7929 if (mode != ARM_CPU_MODE_USR) {
7930 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
7931 }
7932 }
7933
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)7934 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7935 uint32_t cur_el, bool secure)
7936 {
7937 return 1;
7938 }
7939
aarch64_sync_64_to_32(CPUARMState * env)7940 void aarch64_sync_64_to_32(CPUARMState *env)
7941 {
7942 g_assert_not_reached();
7943 }
7944
7945 #else
7946
switch_mode(CPUARMState * env,int mode)7947 static void switch_mode(CPUARMState *env, int mode)
7948 {
7949 int old_mode;
7950 int i;
7951
7952 old_mode = env->uncached_cpsr & CPSR_M;
7953 if (mode == old_mode) {
7954 return;
7955 }
7956
7957 if (old_mode == ARM_CPU_MODE_FIQ) {
7958 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
7959 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
7960 } else if (mode == ARM_CPU_MODE_FIQ) {
7961 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
7962 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
7963 }
7964
7965 i = bank_number(old_mode);
7966 env->banked_r13[i] = env->regs[13];
7967 env->banked_spsr[i] = env->spsr;
7968
7969 i = bank_number(mode);
7970 env->regs[13] = env->banked_r13[i];
7971 env->spsr = env->banked_spsr[i];
7972
7973 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
7974 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
7975 }
7976
7977 /*
7978 * Physical Interrupt Target EL Lookup Table
7979 *
7980 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7981 *
7982 * The below multi-dimensional table is used for looking up the target
7983 * exception level given numerous condition criteria. Specifically, the
7984 * target EL is based on SCR and HCR routing controls as well as the
7985 * currently executing EL and secure state.
7986 *
7987 * Dimensions:
7988 * target_el_table[2][2][2][2][2][4]
7989 * | | | | | +--- Current EL
7990 * | | | | +------ Non-secure(0)/Secure(1)
7991 * | | | +--------- HCR mask override
7992 * | | +------------ SCR exec state control
7993 * | +--------------- SCR mask override
7994 * +------------------ 32-bit(0)/64-bit(1) EL3
7995 *
7996 * The table values are as such:
7997 * 0-3 = EL0-EL3
7998 * -1 = Cannot occur
7999 *
8000 * The ARM ARM target EL table includes entries indicating that an "exception
8001 * is not taken". The two cases where this is applicable are:
8002 * 1) An exception is taken from EL3 but the SCR does not have the exception
8003 * routed to EL3.
8004 * 2) An exception is taken from EL2 but the HCR does not have the exception
8005 * routed to EL2.
8006 * In these two cases, the below table contain a target of EL1. This value is
8007 * returned as it is expected that the consumer of the table data will check
8008 * for "target EL >= current EL" to ensure the exception is not taken.
8009 *
8010 * SCR HCR
8011 * 64 EA AMO From
8012 * BIT IRQ IMO Non-secure Secure
8013 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
8014 */
8015 static const int8_t target_el_table[2][2][2][2][2][4] = {
8016 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8017 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8018 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8019 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8020 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8021 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8022 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8023 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8024 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8025 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
8026 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
8027 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
8028 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8029 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8030 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
8031 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
8032 };
8033
8034 /*
8035 * Determine the target EL for physical exceptions
8036 */
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)8037 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8038 uint32_t cur_el, bool secure)
8039 {
8040 CPUARMState *env = cpu_env(cs);
8041 bool rw;
8042 bool scr;
8043 bool hcr;
8044 int target_el;
8045 /* Is the highest EL AArch64? */
8046 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
8047 uint64_t hcr_el2;
8048
8049 if (arm_feature(env, ARM_FEATURE_EL3)) {
8050 rw = arm_scr_rw_eff(env);
8051 } else {
8052 /*
8053 * Either EL2 is the highest EL (and so the EL2 register width
8054 * is given by is64); or there is no EL2 or EL3, in which case
8055 * the value of 'rw' does not affect the table lookup anyway.
8056 */
8057 rw = is64;
8058 }
8059
8060 hcr_el2 = arm_hcr_el2_eff(env);
8061 switch (excp_idx) {
8062 case EXCP_IRQ:
8063 case EXCP_NMI:
8064 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
8065 hcr = hcr_el2 & HCR_IMO;
8066 break;
8067 case EXCP_FIQ:
8068 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
8069 hcr = hcr_el2 & HCR_FMO;
8070 break;
8071 default:
8072 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
8073 hcr = hcr_el2 & HCR_AMO;
8074 break;
8075 };
8076
8077 /*
8078 * For these purposes, TGE and AMO/IMO/FMO both force the
8079 * interrupt to EL2. Fold TGE into the bit extracted above.
8080 */
8081 hcr |= (hcr_el2 & HCR_TGE) != 0;
8082
8083 /* Perform a table-lookup for the target EL given the current state */
8084 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
8085
8086 assert(target_el > 0);
8087
8088 return target_el;
8089 }
8090
arm_log_exception(CPUState * cs)8091 void arm_log_exception(CPUState *cs)
8092 {
8093 int idx = cs->exception_index;
8094
8095 if (qemu_loglevel_mask(CPU_LOG_INT)) {
8096 const char *exc = NULL;
8097 static const char * const excnames[] = {
8098 [EXCP_UDEF] = "Undefined Instruction",
8099 [EXCP_SWI] = "SVC",
8100 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
8101 [EXCP_DATA_ABORT] = "Data Abort",
8102 [EXCP_IRQ] = "IRQ",
8103 [EXCP_FIQ] = "FIQ",
8104 [EXCP_BKPT] = "Breakpoint",
8105 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
8106 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
8107 [EXCP_HVC] = "Hypervisor Call",
8108 [EXCP_HYP_TRAP] = "Hypervisor Trap",
8109 [EXCP_SMC] = "Secure Monitor Call",
8110 [EXCP_VIRQ] = "Virtual IRQ",
8111 [EXCP_VFIQ] = "Virtual FIQ",
8112 [EXCP_SEMIHOST] = "Semihosting call",
8113 [EXCP_NOCP] = "v7M NOCP UsageFault",
8114 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
8115 [EXCP_STKOF] = "v8M STKOF UsageFault",
8116 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
8117 [EXCP_LSERR] = "v8M LSERR UsageFault",
8118 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
8119 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
8120 [EXCP_VSERR] = "Virtual SERR",
8121 [EXCP_GPC] = "Granule Protection Check",
8122 [EXCP_NMI] = "NMI",
8123 [EXCP_VINMI] = "Virtual IRQ NMI",
8124 [EXCP_VFNMI] = "Virtual FIQ NMI",
8125 [EXCP_MON_TRAP] = "Monitor Trap",
8126 };
8127
8128 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
8129 exc = excnames[idx];
8130 }
8131 if (!exc) {
8132 exc = "unknown";
8133 }
8134 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
8135 idx, exc, cs->cpu_index);
8136 }
8137 }
8138
8139 /*
8140 * Function used to synchronize QEMU's AArch64 register set with AArch32
8141 * register set. This is necessary when switching between AArch32 and AArch64
8142 * execution state.
8143 */
aarch64_sync_32_to_64(CPUARMState * env)8144 void aarch64_sync_32_to_64(CPUARMState *env)
8145 {
8146 int i;
8147 uint32_t mode = env->uncached_cpsr & CPSR_M;
8148
8149 /* We can blanket copy R[0:7] to X[0:7] */
8150 for (i = 0; i < 8; i++) {
8151 env->xregs[i] = env->regs[i];
8152 }
8153
8154 /*
8155 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8156 * Otherwise, they come from the banked user regs.
8157 */
8158 if (mode == ARM_CPU_MODE_FIQ) {
8159 for (i = 8; i < 13; i++) {
8160 env->xregs[i] = env->usr_regs[i - 8];
8161 }
8162 } else {
8163 for (i = 8; i < 13; i++) {
8164 env->xregs[i] = env->regs[i];
8165 }
8166 }
8167
8168 /*
8169 * Registers x13-x23 are the various mode SP and FP registers. Registers
8170 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8171 * from the mode banked register.
8172 */
8173 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8174 env->xregs[13] = env->regs[13];
8175 env->xregs[14] = env->regs[14];
8176 } else {
8177 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8178 /* HYP is an exception in that it is copied from r14 */
8179 if (mode == ARM_CPU_MODE_HYP) {
8180 env->xregs[14] = env->regs[14];
8181 } else {
8182 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
8183 }
8184 }
8185
8186 if (mode == ARM_CPU_MODE_HYP) {
8187 env->xregs[15] = env->regs[13];
8188 } else {
8189 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
8190 }
8191
8192 if (mode == ARM_CPU_MODE_IRQ) {
8193 env->xregs[16] = env->regs[14];
8194 env->xregs[17] = env->regs[13];
8195 } else {
8196 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8197 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8198 }
8199
8200 if (mode == ARM_CPU_MODE_SVC) {
8201 env->xregs[18] = env->regs[14];
8202 env->xregs[19] = env->regs[13];
8203 } else {
8204 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8205 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8206 }
8207
8208 if (mode == ARM_CPU_MODE_ABT) {
8209 env->xregs[20] = env->regs[14];
8210 env->xregs[21] = env->regs[13];
8211 } else {
8212 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8213 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8214 }
8215
8216 if (mode == ARM_CPU_MODE_UND) {
8217 env->xregs[22] = env->regs[14];
8218 env->xregs[23] = env->regs[13];
8219 } else {
8220 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8221 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
8222 }
8223
8224 /*
8225 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8226 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8227 * FIQ bank for r8-r14.
8228 */
8229 if (mode == ARM_CPU_MODE_FIQ) {
8230 for (i = 24; i < 31; i++) {
8231 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
8232 }
8233 } else {
8234 for (i = 24; i < 29; i++) {
8235 env->xregs[i] = env->fiq_regs[i - 24];
8236 }
8237 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8238 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
8239 }
8240
8241 env->pc = env->regs[15];
8242 }
8243
8244 /*
8245 * Function used to synchronize QEMU's AArch32 register set with AArch64
8246 * register set. This is necessary when switching between AArch32 and AArch64
8247 * execution state.
8248 */
aarch64_sync_64_to_32(CPUARMState * env)8249 void aarch64_sync_64_to_32(CPUARMState *env)
8250 {
8251 int i;
8252 uint32_t mode = env->uncached_cpsr & CPSR_M;
8253
8254 /* We can blanket copy X[0:7] to R[0:7] */
8255 for (i = 0; i < 8; i++) {
8256 env->regs[i] = env->xregs[i];
8257 }
8258
8259 /*
8260 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8261 * Otherwise, we copy x8-x12 into the banked user regs.
8262 */
8263 if (mode == ARM_CPU_MODE_FIQ) {
8264 for (i = 8; i < 13; i++) {
8265 env->usr_regs[i - 8] = env->xregs[i];
8266 }
8267 } else {
8268 for (i = 8; i < 13; i++) {
8269 env->regs[i] = env->xregs[i];
8270 }
8271 }
8272
8273 /*
8274 * Registers r13 & r14 depend on the current mode.
8275 * If we are in a given mode, we copy the corresponding x registers to r13
8276 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8277 * for the mode.
8278 */
8279 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8280 env->regs[13] = env->xregs[13];
8281 env->regs[14] = env->xregs[14];
8282 } else {
8283 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
8284
8285 /*
8286 * HYP is an exception in that it does not have its own banked r14 but
8287 * shares the USR r14
8288 */
8289 if (mode == ARM_CPU_MODE_HYP) {
8290 env->regs[14] = env->xregs[14];
8291 } else {
8292 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8293 }
8294 }
8295
8296 if (mode == ARM_CPU_MODE_HYP) {
8297 env->regs[13] = env->xregs[15];
8298 } else {
8299 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8300 }
8301
8302 if (mode == ARM_CPU_MODE_IRQ) {
8303 env->regs[14] = env->xregs[16];
8304 env->regs[13] = env->xregs[17];
8305 } else {
8306 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8307 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8308 }
8309
8310 if (mode == ARM_CPU_MODE_SVC) {
8311 env->regs[14] = env->xregs[18];
8312 env->regs[13] = env->xregs[19];
8313 } else {
8314 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8315 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8316 }
8317
8318 if (mode == ARM_CPU_MODE_ABT) {
8319 env->regs[14] = env->xregs[20];
8320 env->regs[13] = env->xregs[21];
8321 } else {
8322 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8323 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8324 }
8325
8326 if (mode == ARM_CPU_MODE_UND) {
8327 env->regs[14] = env->xregs[22];
8328 env->regs[13] = env->xregs[23];
8329 } else {
8330 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8331 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8332 }
8333
8334 /*
8335 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8336 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8337 * FIQ bank for r8-r14.
8338 */
8339 if (mode == ARM_CPU_MODE_FIQ) {
8340 for (i = 24; i < 31; i++) {
8341 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
8342 }
8343 } else {
8344 for (i = 24; i < 29; i++) {
8345 env->fiq_regs[i - 24] = env->xregs[i];
8346 }
8347 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8348 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8349 }
8350
8351 env->regs[15] = env->pc;
8352 }
8353
take_aarch32_exception(CPUARMState * env,int new_mode,uint32_t mask,uint32_t offset,uint32_t newpc)8354 static void take_aarch32_exception(CPUARMState *env, int new_mode,
8355 uint32_t mask, uint32_t offset,
8356 uint32_t newpc)
8357 {
8358 int new_el;
8359
8360 /* Change the CPU state so as to actually take the exception. */
8361 switch_mode(env, new_mode);
8362
8363 /*
8364 * For exceptions taken to AArch32 we must clear the SS bit in both
8365 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8366 */
8367 env->pstate &= ~PSTATE_SS;
8368 env->spsr = cpsr_read(env);
8369 /* Clear IT bits. */
8370 env->condexec_bits = 0;
8371 /* Switch to the new mode, and to the correct instruction set. */
8372 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8373
8374 /* This must be after mode switching. */
8375 new_el = arm_current_el(env);
8376
8377 /* Set new mode endianness */
8378 env->uncached_cpsr &= ~CPSR_E;
8379 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
8380 env->uncached_cpsr |= CPSR_E;
8381 }
8382 /* J and IL must always be cleared for exception entry */
8383 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8384 env->daif |= mask;
8385
8386 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
8387 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
8388 env->uncached_cpsr |= CPSR_SSBS;
8389 } else {
8390 env->uncached_cpsr &= ~CPSR_SSBS;
8391 }
8392 }
8393
8394 if (new_mode == ARM_CPU_MODE_HYP) {
8395 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8396 env->elr_el[2] = env->regs[15];
8397 } else {
8398 /* CPSR.PAN is normally preserved preserved unless... */
8399 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
8400 switch (new_el) {
8401 case 3:
8402 if (!arm_is_secure_below_el3(env)) {
8403 /* ... the target is EL3, from non-secure state. */
8404 env->uncached_cpsr &= ~CPSR_PAN;
8405 break;
8406 }
8407 /* ... the target is EL3, from secure state ... */
8408 /* fall through */
8409 case 1:
8410 /* ... the target is EL1 and SCTLR.SPAN is 0. */
8411 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
8412 env->uncached_cpsr |= CPSR_PAN;
8413 }
8414 break;
8415 }
8416 }
8417 /*
8418 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8419 * and we should just guard the thumb mode on V4
8420 */
8421 if (arm_feature(env, ARM_FEATURE_V4T)) {
8422 env->thumb =
8423 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8424 }
8425 env->regs[14] = env->regs[15] + offset;
8426 }
8427 env->regs[15] = newpc;
8428
8429 if (tcg_enabled()) {
8430 arm_rebuild_hflags(env);
8431 }
8432 }
8433
arm_cpu_do_interrupt_aarch32_hyp(CPUState * cs)8434 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8435 {
8436 /*
8437 * Handle exception entry to Hyp mode; this is sufficiently
8438 * different to entry to other AArch32 modes that we handle it
8439 * separately here.
8440 *
8441 * The vector table entry used is always the 0x14 Hyp mode entry point,
8442 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
8443 * The offset applied to the preferred return address is always zero
8444 * (see DDI0487C.a section G1.12.3).
8445 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8446 */
8447 uint32_t addr, mask;
8448 ARMCPU *cpu = ARM_CPU(cs);
8449 CPUARMState *env = &cpu->env;
8450
8451 switch (cs->exception_index) {
8452 case EXCP_UDEF:
8453 addr = 0x04;
8454 break;
8455 case EXCP_SWI:
8456 addr = 0x08;
8457 break;
8458 case EXCP_BKPT:
8459 /* Fall through to prefetch abort. */
8460 case EXCP_PREFETCH_ABORT:
8461 env->cp15.ifar_s = env->exception.vaddress;
8462 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8463 (uint32_t)env->exception.vaddress);
8464 addr = 0x0c;
8465 break;
8466 case EXCP_DATA_ABORT:
8467 env->cp15.dfar_s = env->exception.vaddress;
8468 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8469 (uint32_t)env->exception.vaddress);
8470 addr = 0x10;
8471 break;
8472 case EXCP_IRQ:
8473 addr = 0x18;
8474 break;
8475 case EXCP_FIQ:
8476 addr = 0x1c;
8477 break;
8478 case EXCP_HVC:
8479 addr = 0x08;
8480 break;
8481 case EXCP_HYP_TRAP:
8482 addr = 0x14;
8483 break;
8484 default:
8485 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8486 }
8487
8488 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
8489 if (!arm_feature(env, ARM_FEATURE_V8)) {
8490 /*
8491 * QEMU syndrome values are v8-style. v7 has the IL bit
8492 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8493 * If this is a v7 CPU, squash the IL bit in those cases.
8494 */
8495 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8496 (cs->exception_index == EXCP_DATA_ABORT &&
8497 !(env->exception.syndrome & ARM_EL_ISV)) ||
8498 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8499 env->exception.syndrome &= ~ARM_EL_IL;
8500 }
8501 }
8502 env->cp15.esr_el[2] = env->exception.syndrome;
8503 }
8504
8505 if (arm_current_el(env) != 2 && addr < 0x14) {
8506 addr = 0x14;
8507 }
8508
8509 mask = 0;
8510 if (!(env->cp15.scr_el3 & SCR_EA)) {
8511 mask |= CPSR_A;
8512 }
8513 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8514 mask |= CPSR_I;
8515 }
8516 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8517 mask |= CPSR_F;
8518 }
8519
8520 addr += env->cp15.hvbar;
8521
8522 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8523 }
8524
arm_cpu_do_interrupt_aarch32(CPUState * cs)8525 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8526 {
8527 ARMCPU *cpu = ARM_CPU(cs);
8528 CPUARMState *env = &cpu->env;
8529 uint32_t addr;
8530 uint32_t mask;
8531 int new_mode;
8532 uint32_t offset;
8533 uint32_t moe;
8534
8535 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8536 switch (syn_get_ec(env->exception.syndrome)) {
8537 case EC_BREAKPOINT:
8538 case EC_BREAKPOINT_SAME_EL:
8539 moe = 1;
8540 break;
8541 case EC_WATCHPOINT:
8542 case EC_WATCHPOINT_SAME_EL:
8543 moe = 10;
8544 break;
8545 case EC_AA32_BKPT:
8546 moe = 3;
8547 break;
8548 case EC_VECTORCATCH:
8549 moe = 5;
8550 break;
8551 default:
8552 moe = 0;
8553 break;
8554 }
8555
8556 if (moe) {
8557 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8558 }
8559
8560 if (env->exception.target_el == 2) {
8561 /* Debug exceptions are reported differently on AArch32 */
8562 switch (syn_get_ec(env->exception.syndrome)) {
8563 case EC_BREAKPOINT:
8564 case EC_BREAKPOINT_SAME_EL:
8565 case EC_AA32_BKPT:
8566 case EC_VECTORCATCH:
8567 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
8568 0, 0, 0x22);
8569 break;
8570 case EC_WATCHPOINT:
8571 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8572 EC_DATAABORT);
8573 break;
8574 case EC_WATCHPOINT_SAME_EL:
8575 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8576 EC_DATAABORT_SAME_EL);
8577 break;
8578 }
8579 arm_cpu_do_interrupt_aarch32_hyp(cs);
8580 return;
8581 }
8582
8583 switch (cs->exception_index) {
8584 case EXCP_UDEF:
8585 new_mode = ARM_CPU_MODE_UND;
8586 addr = 0x04;
8587 mask = CPSR_I;
8588 if (env->thumb) {
8589 offset = 2;
8590 } else {
8591 offset = 4;
8592 }
8593 break;
8594 case EXCP_SWI:
8595 new_mode = ARM_CPU_MODE_SVC;
8596 addr = 0x08;
8597 mask = CPSR_I;
8598 /* The PC already points to the next instruction. */
8599 offset = 0;
8600 break;
8601 case EXCP_BKPT:
8602 /* Fall through to prefetch abort. */
8603 case EXCP_PREFETCH_ABORT:
8604 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8605 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8606 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8607 env->exception.fsr, (uint32_t)env->exception.vaddress);
8608 new_mode = ARM_CPU_MODE_ABT;
8609 addr = 0x0c;
8610 mask = CPSR_A | CPSR_I;
8611 offset = 4;
8612 break;
8613 case EXCP_DATA_ABORT:
8614 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8615 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8616 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8617 env->exception.fsr,
8618 (uint32_t)env->exception.vaddress);
8619 new_mode = ARM_CPU_MODE_ABT;
8620 addr = 0x10;
8621 mask = CPSR_A | CPSR_I;
8622 offset = 8;
8623 break;
8624 case EXCP_IRQ:
8625 new_mode = ARM_CPU_MODE_IRQ;
8626 addr = 0x18;
8627 /* Disable IRQ and imprecise data aborts. */
8628 mask = CPSR_A | CPSR_I;
8629 offset = 4;
8630 if (env->cp15.scr_el3 & SCR_IRQ) {
8631 /* IRQ routed to monitor mode */
8632 new_mode = ARM_CPU_MODE_MON;
8633 mask |= CPSR_F;
8634 }
8635 break;
8636 case EXCP_FIQ:
8637 new_mode = ARM_CPU_MODE_FIQ;
8638 addr = 0x1c;
8639 /* Disable FIQ, IRQ and imprecise data aborts. */
8640 mask = CPSR_A | CPSR_I | CPSR_F;
8641 if (env->cp15.scr_el3 & SCR_FIQ) {
8642 /* FIQ routed to monitor mode */
8643 new_mode = ARM_CPU_MODE_MON;
8644 }
8645 offset = 4;
8646 break;
8647 case EXCP_VIRQ:
8648 new_mode = ARM_CPU_MODE_IRQ;
8649 addr = 0x18;
8650 /* Disable IRQ and imprecise data aborts. */
8651 mask = CPSR_A | CPSR_I;
8652 offset = 4;
8653 break;
8654 case EXCP_VFIQ:
8655 new_mode = ARM_CPU_MODE_FIQ;
8656 addr = 0x1c;
8657 /* Disable FIQ, IRQ and imprecise data aborts. */
8658 mask = CPSR_A | CPSR_I | CPSR_F;
8659 offset = 4;
8660 break;
8661 case EXCP_VSERR:
8662 {
8663 /*
8664 * Note that this is reported as a data abort, but the DFAR
8665 * has an UNKNOWN value. Construct the SError syndrome from
8666 * AET and ExT fields.
8667 */
8668 ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
8669
8670 if (extended_addresses_enabled(env)) {
8671 env->exception.fsr = arm_fi_to_lfsc(&fi);
8672 } else {
8673 env->exception.fsr = arm_fi_to_sfsc(&fi);
8674 }
8675 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
8676 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8677 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
8678 env->exception.fsr);
8679
8680 new_mode = ARM_CPU_MODE_ABT;
8681 addr = 0x10;
8682 mask = CPSR_A | CPSR_I;
8683 offset = 8;
8684 }
8685 break;
8686 case EXCP_SMC:
8687 new_mode = ARM_CPU_MODE_MON;
8688 addr = 0x08;
8689 mask = CPSR_A | CPSR_I | CPSR_F;
8690 offset = 0;
8691 break;
8692 case EXCP_MON_TRAP:
8693 new_mode = ARM_CPU_MODE_MON;
8694 addr = 0x04;
8695 mask = CPSR_A | CPSR_I | CPSR_F;
8696 if (env->thumb) {
8697 offset = 2;
8698 } else {
8699 offset = 4;
8700 }
8701 break;
8702 default:
8703 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8704 return; /* Never happens. Keep compiler happy. */
8705 }
8706
8707 if (new_mode == ARM_CPU_MODE_MON) {
8708 addr += env->cp15.mvbar;
8709 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
8710 /* High vectors. When enabled, base address cannot be remapped. */
8711 addr += 0xffff0000;
8712 } else {
8713 /*
8714 * ARM v7 architectures provide a vector base address register to remap
8715 * the interrupt vector table.
8716 * This register is only followed in non-monitor mode, and is banked.
8717 * Note: only bits 31:5 are valid.
8718 */
8719 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
8720 }
8721
8722 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8723 env->cp15.scr_el3 &= ~SCR_NS;
8724 }
8725
8726 take_aarch32_exception(env, new_mode, mask, offset, addr);
8727 }
8728
aarch64_regnum(CPUARMState * env,int aarch32_reg)8729 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
8730 {
8731 /*
8732 * Return the register number of the AArch64 view of the AArch32
8733 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
8734 * be that of the AArch32 mode the exception came from.
8735 */
8736 int mode = env->uncached_cpsr & CPSR_M;
8737
8738 switch (aarch32_reg) {
8739 case 0 ... 7:
8740 return aarch32_reg;
8741 case 8 ... 12:
8742 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
8743 case 13:
8744 switch (mode) {
8745 case ARM_CPU_MODE_USR:
8746 case ARM_CPU_MODE_SYS:
8747 return 13;
8748 case ARM_CPU_MODE_HYP:
8749 return 15;
8750 case ARM_CPU_MODE_IRQ:
8751 return 17;
8752 case ARM_CPU_MODE_SVC:
8753 return 19;
8754 case ARM_CPU_MODE_ABT:
8755 return 21;
8756 case ARM_CPU_MODE_UND:
8757 return 23;
8758 case ARM_CPU_MODE_FIQ:
8759 return 29;
8760 default:
8761 g_assert_not_reached();
8762 }
8763 case 14:
8764 switch (mode) {
8765 case ARM_CPU_MODE_USR:
8766 case ARM_CPU_MODE_SYS:
8767 case ARM_CPU_MODE_HYP:
8768 return 14;
8769 case ARM_CPU_MODE_IRQ:
8770 return 16;
8771 case ARM_CPU_MODE_SVC:
8772 return 18;
8773 case ARM_CPU_MODE_ABT:
8774 return 20;
8775 case ARM_CPU_MODE_UND:
8776 return 22;
8777 case ARM_CPU_MODE_FIQ:
8778 return 30;
8779 default:
8780 g_assert_not_reached();
8781 }
8782 case 15:
8783 return 31;
8784 default:
8785 g_assert_not_reached();
8786 }
8787 }
8788
cpsr_read_for_spsr_elx(CPUARMState * env)8789 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
8790 {
8791 uint32_t ret = cpsr_read(env);
8792
8793 /* Move DIT to the correct location for SPSR_ELx */
8794 if (ret & CPSR_DIT) {
8795 ret &= ~CPSR_DIT;
8796 ret |= PSTATE_DIT;
8797 }
8798 /* Merge PSTATE.SS into SPSR_ELx */
8799 ret |= env->pstate & PSTATE_SS;
8800
8801 return ret;
8802 }
8803
syndrome_is_sync_extabt(uint32_t syndrome)8804 static bool syndrome_is_sync_extabt(uint32_t syndrome)
8805 {
8806 /* Return true if this syndrome value is a synchronous external abort */
8807 switch (syn_get_ec(syndrome)) {
8808 case EC_INSNABORT:
8809 case EC_INSNABORT_SAME_EL:
8810 case EC_DATAABORT:
8811 case EC_DATAABORT_SAME_EL:
8812 /* Look at fault status code for all the synchronous ext abort cases */
8813 switch (syndrome & 0x3f) {
8814 case 0x10:
8815 case 0x13:
8816 case 0x14:
8817 case 0x15:
8818 case 0x16:
8819 case 0x17:
8820 return true;
8821 default:
8822 return false;
8823 }
8824 default:
8825 return false;
8826 }
8827 }
8828
8829 /* Handle exception entry to a target EL which is using AArch64 */
arm_cpu_do_interrupt_aarch64(CPUState * cs)8830 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
8831 {
8832 ARMCPU *cpu = ARM_CPU(cs);
8833 CPUARMState *env = &cpu->env;
8834 unsigned int new_el = env->exception.target_el;
8835 vaddr addr = env->cp15.vbar_el[new_el];
8836 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
8837 unsigned int old_mode;
8838 unsigned int cur_el = arm_current_el(env);
8839 int rt;
8840
8841 if (tcg_enabled()) {
8842 /*
8843 * Note that new_el can never be 0. If cur_el is 0, then
8844 * el0_a64 is is_a64(), else el0_a64 is ignored.
8845 */
8846 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
8847 }
8848
8849 if (cur_el < new_el) {
8850 /*
8851 * Entry vector offset depends on whether the implemented EL
8852 * immediately lower than the target level is using AArch32 or AArch64
8853 */
8854 bool is_aa64;
8855 uint64_t hcr;
8856
8857 switch (new_el) {
8858 case 3:
8859 is_aa64 = arm_scr_rw_eff(env);
8860 break;
8861 case 2:
8862 hcr = arm_hcr_el2_eff(env);
8863 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
8864 is_aa64 = (hcr & HCR_RW) != 0;
8865 break;
8866 }
8867 /* fall through */
8868 case 1:
8869 is_aa64 = is_a64(env);
8870 break;
8871 default:
8872 g_assert_not_reached();
8873 }
8874
8875 if (is_aa64) {
8876 addr += 0x400;
8877 } else {
8878 addr += 0x600;
8879 }
8880 } else if (pstate_read(env) & PSTATE_SP) {
8881 addr += 0x200;
8882 }
8883
8884 switch (cs->exception_index) {
8885 case EXCP_GPC:
8886 qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
8887 env->cp15.mfar_el3);
8888 /* fall through */
8889 case EXCP_PREFETCH_ABORT:
8890 case EXCP_DATA_ABORT:
8891 /*
8892 * FEAT_DoubleFault allows synchronous external aborts taken to EL3
8893 * to be taken to the SError vector entrypoint.
8894 */
8895 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
8896 syndrome_is_sync_extabt(env->exception.syndrome)) {
8897 addr += 0x180;
8898 }
8899 env->cp15.far_el[new_el] = env->exception.vaddress;
8900 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8901 env->cp15.far_el[new_el]);
8902 /* fall through */
8903 case EXCP_BKPT:
8904 case EXCP_UDEF:
8905 case EXCP_SWI:
8906 case EXCP_HVC:
8907 case EXCP_HYP_TRAP:
8908 case EXCP_SMC:
8909 switch (syn_get_ec(env->exception.syndrome)) {
8910 case EC_ADVSIMDFPACCESSTRAP:
8911 /*
8912 * QEMU internal FP/SIMD syndromes from AArch32 include the
8913 * TA and coproc fields which are only exposed if the exception
8914 * is taken to AArch32 Hyp mode. Mask them out to get a valid
8915 * AArch64 format syndrome.
8916 */
8917 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
8918 break;
8919 case EC_CP14RTTRAP:
8920 case EC_CP15RTTRAP:
8921 case EC_CP14DTTRAP:
8922 /*
8923 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
8924 * the raw register field from the insn; when taking this to
8925 * AArch64 we must convert it to the AArch64 view of the register
8926 * number. Notice that we read a 4-bit AArch32 register number and
8927 * write back a 5-bit AArch64 one.
8928 */
8929 rt = extract32(env->exception.syndrome, 5, 4);
8930 rt = aarch64_regnum(env, rt);
8931 env->exception.syndrome = deposit32(env->exception.syndrome,
8932 5, 5, rt);
8933 break;
8934 case EC_CP15RRTTRAP:
8935 case EC_CP14RRTTRAP:
8936 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
8937 rt = extract32(env->exception.syndrome, 5, 4);
8938 rt = aarch64_regnum(env, rt);
8939 env->exception.syndrome = deposit32(env->exception.syndrome,
8940 5, 5, rt);
8941 rt = extract32(env->exception.syndrome, 10, 4);
8942 rt = aarch64_regnum(env, rt);
8943 env->exception.syndrome = deposit32(env->exception.syndrome,
8944 10, 5, rt);
8945 break;
8946 }
8947 env->cp15.esr_el[new_el] = env->exception.syndrome;
8948 break;
8949 case EXCP_IRQ:
8950 case EXCP_VIRQ:
8951 case EXCP_NMI:
8952 case EXCP_VINMI:
8953 addr += 0x80;
8954 break;
8955 case EXCP_FIQ:
8956 case EXCP_VFIQ:
8957 case EXCP_VFNMI:
8958 addr += 0x100;
8959 break;
8960 case EXCP_VSERR:
8961 addr += 0x180;
8962 /* Construct the SError syndrome from IDS and ISS fields. */
8963 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
8964 env->cp15.esr_el[new_el] = env->exception.syndrome;
8965 break;
8966 default:
8967 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8968 }
8969
8970 if (is_a64(env)) {
8971 old_mode = pstate_read(env);
8972 aarch64_save_sp(env, arm_current_el(env));
8973 env->elr_el[new_el] = env->pc;
8974
8975 if (cur_el == 1 && new_el == 1) {
8976 uint64_t hcr = arm_hcr_el2_eff(env);
8977 if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
8978 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
8979 /*
8980 * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
8981 * by setting M[3:2] to 0b10.
8982 * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
8983 * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
8984 */
8985 old_mode = deposit32(old_mode, 2, 2, 2);
8986 }
8987 }
8988 } else {
8989 old_mode = cpsr_read_for_spsr_elx(env);
8990 env->elr_el[new_el] = env->regs[15];
8991
8992 aarch64_sync_32_to_64(env);
8993
8994 env->condexec_bits = 0;
8995 }
8996 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
8997
8998 qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
8999 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9000 env->elr_el[new_el]);
9001
9002 if (cpu_isar_feature(aa64_pan, cpu)) {
9003 /* The value of PSTATE.PAN is normally preserved, except when ... */
9004 new_mode |= old_mode & PSTATE_PAN;
9005 switch (new_el) {
9006 case 2:
9007 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9008 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9009 != (HCR_E2H | HCR_TGE)) {
9010 break;
9011 }
9012 /* fall through */
9013 case 1:
9014 /* ... the target is EL1 ... */
9015 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9016 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9017 new_mode |= PSTATE_PAN;
9018 }
9019 break;
9020 }
9021 }
9022 if (cpu_isar_feature(aa64_mte, cpu)) {
9023 new_mode |= PSTATE_TCO;
9024 }
9025
9026 if (cpu_isar_feature(aa64_ssbs, cpu)) {
9027 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
9028 new_mode |= PSTATE_SSBS;
9029 } else {
9030 new_mode &= ~PSTATE_SSBS;
9031 }
9032 }
9033
9034 if (cpu_isar_feature(aa64_nmi, cpu)) {
9035 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) {
9036 new_mode |= PSTATE_ALLINT;
9037 } else {
9038 new_mode &= ~PSTATE_ALLINT;
9039 }
9040 }
9041
9042 pstate_write(env, PSTATE_DAIF | new_mode);
9043 env->aarch64 = true;
9044 aarch64_restore_sp(env, new_el);
9045
9046 if (tcg_enabled()) {
9047 helper_rebuild_hflags_a64(env, new_el);
9048 }
9049
9050 env->pc = addr;
9051
9052 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
9053 new_el, env->pc, pstate_read(env));
9054 }
9055
9056 /*
9057 * Do semihosting call and set the appropriate return value. All the
9058 * permission and validity checks have been done at translate time.
9059 *
9060 * We only see semihosting exceptions in TCG only as they are not
9061 * trapped to the hypervisor in KVM.
9062 */
9063 #ifdef CONFIG_TCG
tcg_handle_semihosting(CPUState * cs)9064 static void tcg_handle_semihosting(CPUState *cs)
9065 {
9066 ARMCPU *cpu = ARM_CPU(cs);
9067 CPUARMState *env = &cpu->env;
9068
9069 if (is_a64(env)) {
9070 qemu_log_mask(CPU_LOG_INT,
9071 "...handling as semihosting call 0x%" PRIx64 "\n",
9072 env->xregs[0]);
9073 do_common_semihosting(cs);
9074 env->pc += 4;
9075 } else {
9076 qemu_log_mask(CPU_LOG_INT,
9077 "...handling as semihosting call 0x%x\n",
9078 env->regs[0]);
9079 do_common_semihosting(cs);
9080 env->regs[15] += env->thumb ? 2 : 4;
9081 }
9082 }
9083 #endif
9084
9085 /*
9086 * Handle a CPU exception for A and R profile CPUs.
9087 * Do any appropriate logging, handle PSCI calls, and then hand off
9088 * to the AArch64-entry or AArch32-entry function depending on the
9089 * target exception level's register width.
9090 *
9091 * Note: this is used for both TCG (as the do_interrupt tcg op),
9092 * and KVM to re-inject guest debug exceptions, and to
9093 * inject a Synchronous-External-Abort.
9094 */
arm_cpu_do_interrupt(CPUState * cs)9095 void arm_cpu_do_interrupt(CPUState *cs)
9096 {
9097 ARMCPU *cpu = ARM_CPU(cs);
9098 CPUARMState *env = &cpu->env;
9099 unsigned int new_el = env->exception.target_el;
9100
9101 assert(!arm_feature(env, ARM_FEATURE_M));
9102
9103 arm_log_exception(cs);
9104 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
9105 new_el);
9106 if (qemu_loglevel_mask(CPU_LOG_INT)
9107 && !excp_is_internal(cs->exception_index)) {
9108 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
9109 syn_get_ec(env->exception.syndrome),
9110 env->exception.syndrome);
9111 }
9112
9113 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
9114 arm_handle_psci_call(cpu);
9115 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
9116 return;
9117 }
9118
9119 /*
9120 * Semihosting semantics depend on the register width of the code
9121 * that caused the exception, not the target exception level, so
9122 * must be handled here.
9123 */
9124 #ifdef CONFIG_TCG
9125 if (cs->exception_index == EXCP_SEMIHOST) {
9126 tcg_handle_semihosting(cs);
9127 return;
9128 }
9129 #endif
9130
9131 /*
9132 * Hooks may change global state so BQL should be held, also the
9133 * BQL needs to be held for any modification of
9134 * cs->interrupt_request.
9135 */
9136 g_assert(bql_locked());
9137
9138 arm_call_pre_el_change_hook(cpu);
9139
9140 assert(!excp_is_internal(cs->exception_index));
9141 if (arm_el_is_aa64(env, new_el)) {
9142 arm_cpu_do_interrupt_aarch64(cs);
9143 } else {
9144 arm_cpu_do_interrupt_aarch32(cs);
9145 }
9146
9147 arm_call_el_change_hook(cpu);
9148
9149 if (!kvm_enabled()) {
9150 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
9151 }
9152 }
9153 #endif /* !CONFIG_USER_ONLY */
9154
arm_sctlr(CPUARMState * env,int el)9155 uint64_t arm_sctlr(CPUARMState *env, int el)
9156 {
9157 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */
9158 if (el == 0) {
9159 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
9160 switch (mmu_idx) {
9161 case ARMMMUIdx_E20_0:
9162 el = 2;
9163 break;
9164 case ARMMMUIdx_E30_0:
9165 el = 3;
9166 break;
9167 default:
9168 el = 1;
9169 break;
9170 }
9171 }
9172 return env->cp15.sctlr_el[el];
9173 }
9174
aa64_va_parameter_tbi(uint64_t tcr,ARMMMUIdx mmu_idx)9175 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
9176 {
9177 if (regime_has_2_ranges(mmu_idx)) {
9178 return extract64(tcr, 37, 2);
9179 } else if (regime_is_stage2(mmu_idx)) {
9180 return 0; /* VTCR_EL2 */
9181 } else {
9182 /* Replicate the single TBI bit so we always have 2 bits. */
9183 return extract32(tcr, 20, 1) * 3;
9184 }
9185 }
9186
aa64_va_parameter_tbid(uint64_t tcr,ARMMMUIdx mmu_idx)9187 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
9188 {
9189 if (regime_has_2_ranges(mmu_idx)) {
9190 return extract64(tcr, 51, 2);
9191 } else if (regime_is_stage2(mmu_idx)) {
9192 return 0; /* VTCR_EL2 */
9193 } else {
9194 /* Replicate the single TBID bit so we always have 2 bits. */
9195 return extract32(tcr, 29, 1) * 3;
9196 }
9197 }
9198
aa64_va_parameter_tcma(uint64_t tcr,ARMMMUIdx mmu_idx)9199 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
9200 {
9201 if (regime_has_2_ranges(mmu_idx)) {
9202 return extract64(tcr, 57, 2);
9203 } else {
9204 /* Replicate the single TCMA bit so we always have 2 bits. */
9205 return extract32(tcr, 30, 1) * 3;
9206 }
9207 }
9208
tg0_to_gran_size(int tg)9209 static ARMGranuleSize tg0_to_gran_size(int tg)
9210 {
9211 switch (tg) {
9212 case 0:
9213 return Gran4K;
9214 case 1:
9215 return Gran64K;
9216 case 2:
9217 return Gran16K;
9218 default:
9219 return GranInvalid;
9220 }
9221 }
9222
tg1_to_gran_size(int tg)9223 static ARMGranuleSize tg1_to_gran_size(int tg)
9224 {
9225 switch (tg) {
9226 case 1:
9227 return Gran16K;
9228 case 2:
9229 return Gran4K;
9230 case 3:
9231 return Gran64K;
9232 default:
9233 return GranInvalid;
9234 }
9235 }
9236
have4k(ARMCPU * cpu,bool stage2)9237 static inline bool have4k(ARMCPU *cpu, bool stage2)
9238 {
9239 return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
9240 : cpu_isar_feature(aa64_tgran4, cpu);
9241 }
9242
have16k(ARMCPU * cpu,bool stage2)9243 static inline bool have16k(ARMCPU *cpu, bool stage2)
9244 {
9245 return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
9246 : cpu_isar_feature(aa64_tgran16, cpu);
9247 }
9248
have64k(ARMCPU * cpu,bool stage2)9249 static inline bool have64k(ARMCPU *cpu, bool stage2)
9250 {
9251 return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
9252 : cpu_isar_feature(aa64_tgran64, cpu);
9253 }
9254
sanitize_gran_size(ARMCPU * cpu,ARMGranuleSize gran,bool stage2)9255 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
9256 bool stage2)
9257 {
9258 switch (gran) {
9259 case Gran4K:
9260 if (have4k(cpu, stage2)) {
9261 return gran;
9262 }
9263 break;
9264 case Gran16K:
9265 if (have16k(cpu, stage2)) {
9266 return gran;
9267 }
9268 break;
9269 case Gran64K:
9270 if (have64k(cpu, stage2)) {
9271 return gran;
9272 }
9273 break;
9274 case GranInvalid:
9275 break;
9276 }
9277 /*
9278 * If the guest selects a granule size that isn't implemented,
9279 * the architecture requires that we behave as if it selected one
9280 * that is (with an IMPDEF choice of which one to pick). We choose
9281 * to implement the smallest supported granule size.
9282 */
9283 if (have4k(cpu, stage2)) {
9284 return Gran4K;
9285 }
9286 if (have16k(cpu, stage2)) {
9287 return Gran16K;
9288 }
9289 assert(have64k(cpu, stage2));
9290 return Gran64K;
9291 }
9292
aa64_va_parameters(CPUARMState * env,uint64_t va,ARMMMUIdx mmu_idx,bool data,bool el1_is_aa32)9293 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
9294 ARMMMUIdx mmu_idx, bool data,
9295 bool el1_is_aa32)
9296 {
9297 uint64_t tcr = regime_tcr(env, mmu_idx);
9298 bool epd, hpd, tsz_oob, ds, ha, hd;
9299 int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
9300 ARMGranuleSize gran;
9301 ARMCPU *cpu = env_archcpu(env);
9302 bool stage2 = regime_is_stage2(mmu_idx);
9303
9304 if (!regime_has_2_ranges(mmu_idx)) {
9305 select = 0;
9306 tsz = extract32(tcr, 0, 6);
9307 gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9308 if (stage2) {
9309 /* VTCR_EL2 */
9310 hpd = false;
9311 } else {
9312 hpd = extract32(tcr, 24, 1);
9313 }
9314 epd = false;
9315 sh = extract32(tcr, 12, 2);
9316 ps = extract32(tcr, 16, 3);
9317 ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
9318 hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9319 ds = extract64(tcr, 32, 1);
9320 } else {
9321 bool e0pd;
9322
9323 /*
9324 * Bit 55 is always between the two regions, and is canonical for
9325 * determining if address tagging is enabled.
9326 */
9327 select = extract64(va, 55, 1);
9328 if (!select) {
9329 tsz = extract32(tcr, 0, 6);
9330 gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9331 epd = extract32(tcr, 7, 1);
9332 sh = extract32(tcr, 12, 2);
9333 hpd = extract64(tcr, 41, 1);
9334 e0pd = extract64(tcr, 55, 1);
9335 } else {
9336 tsz = extract32(tcr, 16, 6);
9337 gran = tg1_to_gran_size(extract32(tcr, 30, 2));
9338 epd = extract32(tcr, 23, 1);
9339 sh = extract32(tcr, 28, 2);
9340 hpd = extract64(tcr, 42, 1);
9341 e0pd = extract64(tcr, 56, 1);
9342 }
9343 ps = extract64(tcr, 32, 3);
9344 ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
9345 hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9346 ds = extract64(tcr, 59, 1);
9347
9348 if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
9349 regime_is_user(env, mmu_idx)) {
9350 epd = true;
9351 }
9352 }
9353
9354 gran = sanitize_gran_size(cpu, gran, stage2);
9355
9356 if (cpu_isar_feature(aa64_st, cpu)) {
9357 max_tsz = 48 - (gran == Gran64K);
9358 } else {
9359 max_tsz = 39;
9360 }
9361
9362 /*
9363 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
9364 * adjust the effective value of DS, as documented.
9365 */
9366 min_tsz = 16;
9367 if (gran == Gran64K) {
9368 if (cpu_isar_feature(aa64_lva, cpu)) {
9369 min_tsz = 12;
9370 }
9371 ds = false;
9372 } else if (ds) {
9373 if (regime_is_stage2(mmu_idx)) {
9374 if (gran == Gran16K) {
9375 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
9376 } else {
9377 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
9378 }
9379 } else {
9380 if (gran == Gran16K) {
9381 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
9382 } else {
9383 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
9384 }
9385 }
9386 if (ds) {
9387 min_tsz = 12;
9388 }
9389 }
9390
9391 if (stage2 && el1_is_aa32) {
9392 /*
9393 * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
9394 * are loosened: a configured IPA of 40 bits is permitted even if
9395 * the implemented PA is less than that (and so a 40 bit IPA would
9396 * fault for an AArch64 EL1). See R_DTLMN.
9397 */
9398 min_tsz = MIN(min_tsz, 24);
9399 }
9400
9401 if (tsz > max_tsz) {
9402 tsz = max_tsz;
9403 tsz_oob = true;
9404 } else if (tsz < min_tsz) {
9405 tsz = min_tsz;
9406 tsz_oob = true;
9407 } else {
9408 tsz_oob = false;
9409 }
9410
9411 /* Present TBI as a composite with TBID. */
9412 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
9413 if (!data) {
9414 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
9415 }
9416 tbi = (tbi >> select) & 1;
9417
9418 return (ARMVAParameters) {
9419 .tsz = tsz,
9420 .ps = ps,
9421 .sh = sh,
9422 .select = select,
9423 .tbi = tbi,
9424 .epd = epd,
9425 .hpd = hpd,
9426 .tsz_oob = tsz_oob,
9427 .ds = ds,
9428 .ha = ha,
9429 .hd = ha && hd,
9430 .gran = gran,
9431 };
9432 }
9433
9434
9435 /*
9436 * Return the exception level to which FP-disabled exceptions should
9437 * be taken, or 0 if FP is enabled.
9438 */
fp_exception_el(CPUARMState * env,int cur_el)9439 int fp_exception_el(CPUARMState *env, int cur_el)
9440 {
9441 #ifndef CONFIG_USER_ONLY
9442 uint64_t hcr_el2;
9443
9444 /*
9445 * CPACR and the CPTR registers don't exist before v6, so FP is
9446 * always accessible
9447 */
9448 if (!arm_feature(env, ARM_FEATURE_V6)) {
9449 return 0;
9450 }
9451
9452 if (arm_feature(env, ARM_FEATURE_M)) {
9453 /* CPACR can cause a NOCP UsageFault taken to current security state */
9454 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
9455 return 1;
9456 }
9457
9458 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
9459 if (!extract32(env->v7m.nsacr, 10, 1)) {
9460 /* FP insns cause a NOCP UsageFault taken to Secure */
9461 return 3;
9462 }
9463 }
9464
9465 return 0;
9466 }
9467
9468 hcr_el2 = arm_hcr_el2_eff(env);
9469
9470 /*
9471 * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
9472 * 0, 2 : trap EL0 and EL1/PL1 accesses
9473 * 1 : trap only EL0 accesses
9474 * 3 : trap no accesses
9475 * This register is ignored if E2H+TGE are both set.
9476 */
9477 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9478 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
9479
9480 switch (fpen) {
9481 case 1:
9482 if (cur_el != 0) {
9483 break;
9484 }
9485 /* fall through */
9486 case 0:
9487 case 2:
9488 /* Trap from Secure PL0 or PL1 to Secure PL1. */
9489 if (!arm_el_is_aa64(env, 3)
9490 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
9491 return 3;
9492 }
9493 if (cur_el <= 1) {
9494 return 1;
9495 }
9496 break;
9497 }
9498 }
9499
9500 /*
9501 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
9502 * to control non-secure access to the FPU. It doesn't have any
9503 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
9504 */
9505 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
9506 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
9507 if (!extract32(env->cp15.nsacr, 10, 1)) {
9508 /* FP insns act as UNDEF */
9509 return cur_el == 2 ? 2 : 1;
9510 }
9511 }
9512
9513 /*
9514 * CPTR_EL2 is present in v7VE or v8, and changes format
9515 * with HCR_EL2.E2H (regardless of TGE).
9516 */
9517 if (cur_el <= 2) {
9518 if (hcr_el2 & HCR_E2H) {
9519 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
9520 case 1:
9521 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
9522 break;
9523 }
9524 /* fall through */
9525 case 0:
9526 case 2:
9527 return 2;
9528 }
9529 } else if (arm_is_el2_enabled(env)) {
9530 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
9531 return 2;
9532 }
9533 }
9534 }
9535
9536 /* CPTR_EL3 : present in v8 */
9537 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
9538 /* Trap all FP ops to EL3 */
9539 return 3;
9540 }
9541 #endif
9542 return 0;
9543 }
9544
9545 /* Return the exception level we're running at if this is our mmu_idx */
arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)9546 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
9547 {
9548 if (mmu_idx & ARM_MMU_IDX_M) {
9549 return mmu_idx & ARM_MMU_IDX_M_PRIV;
9550 }
9551
9552 switch (mmu_idx) {
9553 case ARMMMUIdx_E10_0:
9554 case ARMMMUIdx_E20_0:
9555 case ARMMMUIdx_E30_0:
9556 return 0;
9557 case ARMMMUIdx_E10_1:
9558 case ARMMMUIdx_E10_1_PAN:
9559 return 1;
9560 case ARMMMUIdx_E2:
9561 case ARMMMUIdx_E20_2:
9562 case ARMMMUIdx_E20_2_PAN:
9563 return 2;
9564 case ARMMMUIdx_E3:
9565 case ARMMMUIdx_E30_3_PAN:
9566 return 3;
9567 default:
9568 g_assert_not_reached();
9569 }
9570 }
9571
9572 #ifndef CONFIG_TCG
arm_v7m_mmu_idx_for_secstate(CPUARMState * env,bool secstate)9573 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
9574 {
9575 g_assert_not_reached();
9576 }
9577 #endif
9578
arm_mmu_idx_el(CPUARMState * env,int el)9579 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
9580 {
9581 ARMMMUIdx idx;
9582 uint64_t hcr;
9583
9584 if (arm_feature(env, ARM_FEATURE_M)) {
9585 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
9586 }
9587
9588 /* See ARM pseudo-function ELIsInHost. */
9589 switch (el) {
9590 case 0:
9591 hcr = arm_hcr_el2_eff(env);
9592 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
9593 idx = ARMMMUIdx_E20_0;
9594 } else if (arm_is_secure_below_el3(env) &&
9595 !arm_el_is_aa64(env, 3)) {
9596 idx = ARMMMUIdx_E30_0;
9597 } else {
9598 idx = ARMMMUIdx_E10_0;
9599 }
9600 break;
9601 case 1:
9602 if (arm_pan_enabled(env)) {
9603 idx = ARMMMUIdx_E10_1_PAN;
9604 } else {
9605 idx = ARMMMUIdx_E10_1;
9606 }
9607 break;
9608 case 2:
9609 /* Note that TGE does not apply at EL2. */
9610 if (arm_hcr_el2_eff(env) & HCR_E2H) {
9611 if (arm_pan_enabled(env)) {
9612 idx = ARMMMUIdx_E20_2_PAN;
9613 } else {
9614 idx = ARMMMUIdx_E20_2;
9615 }
9616 } else {
9617 idx = ARMMMUIdx_E2;
9618 }
9619 break;
9620 case 3:
9621 if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) {
9622 return ARMMMUIdx_E30_3_PAN;
9623 }
9624 return ARMMMUIdx_E3;
9625 default:
9626 g_assert_not_reached();
9627 }
9628
9629 return idx;
9630 }
9631
arm_mmu_idx(CPUARMState * env)9632 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
9633 {
9634 return arm_mmu_idx_el(env, arm_current_el(env));
9635 }
9636
9637 /*
9638 * The manual says that when SVE is enabled and VQ is widened the
9639 * implementation is allowed to zero the previously inaccessible
9640 * portion of the registers. The corollary to that is that when
9641 * SVE is enabled and VQ is narrowed we are also allowed to zero
9642 * the now inaccessible portion of the registers.
9643 *
9644 * The intent of this is that no predicate bit beyond VQ is ever set.
9645 * Which means that some operations on predicate registers themselves
9646 * may operate on full uint64_t or even unrolled across the maximum
9647 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
9648 * may well be cheaper than conditionals to restrict the operation
9649 * to the relevant portion of a uint16_t[16].
9650 */
aarch64_sve_narrow_vq(CPUARMState * env,unsigned vq)9651 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
9652 {
9653 int i, j;
9654 uint64_t pmask;
9655
9656 assert(vq >= 1 && vq <= ARM_MAX_VQ);
9657 assert(vq <= env_archcpu(env)->sve_max_vq);
9658
9659 /* Zap the high bits of the zregs. */
9660 for (i = 0; i < 32; i++) {
9661 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
9662 }
9663
9664 /* Zap the high bits of the pregs and ffr. */
9665 pmask = 0;
9666 if (vq & 3) {
9667 pmask = ~(-1ULL << (16 * (vq & 3)));
9668 }
9669 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
9670 for (i = 0; i < 17; ++i) {
9671 env->vfp.pregs[i].p[j] &= pmask;
9672 }
9673 pmask = 0;
9674 }
9675 }
9676
sve_vqm1_for_el_sm_ena(CPUARMState * env,int el,bool sm)9677 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
9678 {
9679 int exc_el;
9680
9681 if (sm) {
9682 exc_el = sme_exception_el(env, el);
9683 } else {
9684 exc_el = sve_exception_el(env, el);
9685 }
9686 if (exc_el) {
9687 return 0; /* disabled */
9688 }
9689 return sve_vqm1_for_el_sm(env, el, sm);
9690 }
9691
9692 /*
9693 * Notice a change in SVE vector size when changing EL.
9694 */
aarch64_sve_change_el(CPUARMState * env,int old_el,int new_el,bool el0_a64)9695 void aarch64_sve_change_el(CPUARMState *env, int old_el,
9696 int new_el, bool el0_a64)
9697 {
9698 ARMCPU *cpu = env_archcpu(env);
9699 int old_len, new_len;
9700 bool old_a64, new_a64, sm;
9701
9702 /* Nothing to do if no SVE. */
9703 if (!cpu_isar_feature(aa64_sve, cpu)) {
9704 return;
9705 }
9706
9707 /* Nothing to do if FP is disabled in either EL. */
9708 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
9709 return;
9710 }
9711
9712 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
9713 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
9714
9715 /*
9716 * Both AArch64.TakeException and AArch64.ExceptionReturn
9717 * invoke ResetSVEState when taking an exception from, or
9718 * returning to, AArch32 state when PSTATE.SM is enabled.
9719 */
9720 sm = FIELD_EX64(env->svcr, SVCR, SM);
9721 if (old_a64 != new_a64 && sm) {
9722 arm_reset_sve_state(env);
9723 return;
9724 }
9725
9726 /*
9727 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
9728 * at ELx, or not available because the EL is in AArch32 state, then
9729 * for all purposes other than a direct read, the ZCR_ELx.LEN field
9730 * has an effective value of 0".
9731 *
9732 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
9733 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
9734 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
9735 * we already have the correct register contents when encountering the
9736 * vq0->vq0 transition between EL0->EL1.
9737 */
9738 old_len = new_len = 0;
9739 if (old_a64) {
9740 old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
9741 }
9742 if (new_a64) {
9743 new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
9744 }
9745
9746 /* When changing vector length, clear inaccessible state. */
9747 if (new_len < old_len) {
9748 aarch64_sve_narrow_vq(env, new_len + 1);
9749 }
9750 }
9751
9752 #ifndef CONFIG_USER_ONLY
arm_security_space(CPUARMState * env)9753 ARMSecuritySpace arm_security_space(CPUARMState *env)
9754 {
9755 if (arm_feature(env, ARM_FEATURE_M)) {
9756 return arm_secure_to_space(env->v7m.secure);
9757 }
9758
9759 /*
9760 * If EL3 is not supported then the secure state is implementation
9761 * defined, in which case QEMU defaults to non-secure.
9762 */
9763 if (!arm_feature(env, ARM_FEATURE_EL3)) {
9764 return ARMSS_NonSecure;
9765 }
9766
9767 /* Check for AArch64 EL3 or AArch32 Mon. */
9768 if (is_a64(env)) {
9769 if (extract32(env->pstate, 2, 2) == 3) {
9770 if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
9771 return ARMSS_Root;
9772 } else {
9773 return ARMSS_Secure;
9774 }
9775 }
9776 } else {
9777 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9778 return ARMSS_Secure;
9779 }
9780 }
9781
9782 return arm_security_space_below_el3(env);
9783 }
9784
arm_security_space_below_el3(CPUARMState * env)9785 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
9786 {
9787 assert(!arm_feature(env, ARM_FEATURE_M));
9788
9789 /*
9790 * If EL3 is not supported then the secure state is implementation
9791 * defined, in which case QEMU defaults to non-secure.
9792 */
9793 if (!arm_feature(env, ARM_FEATURE_EL3)) {
9794 return ARMSS_NonSecure;
9795 }
9796
9797 /*
9798 * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
9799 * Ignoring NSE when !NS retains consistency without having to
9800 * modify other predicates.
9801 */
9802 if (!(env->cp15.scr_el3 & SCR_NS)) {
9803 return ARMSS_Secure;
9804 } else if (env->cp15.scr_el3 & SCR_NSE) {
9805 return ARMSS_Realm;
9806 } else {
9807 return ARMSS_NonSecure;
9808 }
9809 }
9810 #endif /* !CONFIG_USER_ONLY */
9811