1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "trace.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/page-protection.h"
16 #include "exec/mmap-lock.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/timer.h"
19 #include "qemu/bitops.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/cputlb.h"
22 #include "exec/translation-block.h"
23 #include "hw/irq.h"
24 #include "system/cpu-timers.h"
25 #include "exec/icount.h"
26 #include "system/kvm.h"
27 #include "system/tcg.h"
28 #include "qapi/error.h"
29 #include "qemu/guest-random.h"
30 #ifdef CONFIG_TCG
31 #include "accel/tcg/probe.h"
32 #include "accel/tcg/getpc.h"
33 #include "semihosting/common-semi.h"
34 #endif
35 #include "cpregs.h"
36 #include "target/arm/gtimer.h"
37 #include "qemu/plugin.h"
38
39 #define HELPER_H "tcg/helper.h"
40 #include "exec/helper-proto.h.inc"
41
42 static void switch_mode(CPUARMState *env, int mode);
43
compare_u64(const void * a,const void * b)44 int compare_u64(const void *a, const void *b)
45 {
46 if (*(uint64_t *)a > *(uint64_t *)b) {
47 return 1;
48 }
49 if (*(uint64_t *)a < *(uint64_t *)b) {
50 return -1;
51 }
52 return 0;
53 }
54
55 /*
56 * Macros which are lvalues for the field in CPUARMState for the
57 * ARMCPRegInfo *ri.
58 */
59 #define CPREG_FIELD32(env, ri) \
60 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
61 #define CPREG_FIELD64(env, ri) \
62 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
63
raw_read(CPUARMState * env,const ARMCPRegInfo * ri)64 uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
65 {
66 assert(ri->fieldoffset);
67 switch (cpreg_field_type(ri)) {
68 case MO_64:
69 return CPREG_FIELD64(env, ri);
70 case MO_32:
71 return CPREG_FIELD32(env, ri);
72 default:
73 g_assert_not_reached();
74 }
75 }
76
raw_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)77 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
78 {
79 assert(ri->fieldoffset);
80 switch (cpreg_field_type(ri)) {
81 case MO_64:
82 CPREG_FIELD64(env, ri) = value;
83 break;
84 case MO_32:
85 CPREG_FIELD32(env, ri) = value;
86 break;
87 default:
88 g_assert_not_reached();
89 }
90 }
91
92 #undef CPREG_FIELD32
93 #undef CPREG_FIELD64
94
raw_ptr(CPUARMState * env,const ARMCPRegInfo * ri)95 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
96 {
97 return (char *)env + ri->fieldoffset;
98 }
99
read_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri)100 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
101 {
102 /* Raw read of a coprocessor register (as needed for migration, etc). */
103 if (ri->type & ARM_CP_CONST) {
104 return ri->resetvalue;
105 } else if (ri->raw_readfn) {
106 return ri->raw_readfn(env, ri);
107 } else if (ri->readfn) {
108 return ri->readfn(env, ri);
109 } else {
110 return raw_read(env, ri);
111 }
112 }
113
write_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t v)114 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
115 uint64_t v)
116 {
117 /*
118 * Raw write of a coprocessor register (as needed for migration, etc).
119 * Note that constant registers are treated as write-ignored; the
120 * caller should check for success by whether a readback gives the
121 * value written.
122 */
123 if (ri->type & ARM_CP_CONST) {
124 return;
125 } else if (ri->raw_writefn) {
126 ri->raw_writefn(env, ri, v);
127 } else if (ri->writefn) {
128 ri->writefn(env, ri, v);
129 } else {
130 raw_write(env, ri, v);
131 }
132 }
133
raw_accessors_invalid(const ARMCPRegInfo * ri)134 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
135 {
136 /*
137 * Return true if the regdef would cause an assertion if you called
138 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
139 * program bug for it not to have the NO_RAW flag).
140 * NB that returning false here doesn't necessarily mean that calling
141 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
142 * read/write access functions which are safe for raw use" from "has
143 * read/write access functions which have side effects but has forgotten
144 * to provide raw access functions".
145 * The tests here line up with the conditions in read/write_raw_cp_reg()
146 * and assertions in raw_read()/raw_write().
147 */
148 if ((ri->type & ARM_CP_CONST) ||
149 ri->fieldoffset ||
150 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
151 return false;
152 }
153 return true;
154 }
155
write_cpustate_to_list(ARMCPU * cpu,bool kvm_sync)156 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
157 {
158 /* Write the coprocessor state from cpu->env to the (index,value) list. */
159 int i;
160 bool ok = true;
161
162 for (i = 0; i < cpu->cpreg_array_len; i++) {
163 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
164 const ARMCPRegInfo *ri;
165 uint64_t newval;
166
167 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
168 if (!ri) {
169 ok = false;
170 continue;
171 }
172 if (ri->type & ARM_CP_NO_RAW) {
173 continue;
174 }
175
176 newval = read_raw_cp_reg(&cpu->env, ri);
177 if (kvm_sync) {
178 /*
179 * Only sync if the previous list->cpustate sync succeeded.
180 * Rather than tracking the success/failure state for every
181 * item in the list, we just recheck "does the raw write we must
182 * have made in write_list_to_cpustate() read back OK" here.
183 */
184 uint64_t oldval = cpu->cpreg_values[i];
185
186 if (oldval == newval) {
187 continue;
188 }
189
190 write_raw_cp_reg(&cpu->env, ri, oldval);
191 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
192 continue;
193 }
194
195 write_raw_cp_reg(&cpu->env, ri, newval);
196 }
197 cpu->cpreg_values[i] = newval;
198 }
199 return ok;
200 }
201
write_list_to_cpustate(ARMCPU * cpu)202 bool write_list_to_cpustate(ARMCPU *cpu)
203 {
204 int i;
205 bool ok = true;
206
207 for (i = 0; i < cpu->cpreg_array_len; i++) {
208 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
209 uint64_t v = cpu->cpreg_values[i];
210 const ARMCPRegInfo *ri;
211
212 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
213 if (!ri) {
214 ok = false;
215 continue;
216 }
217 if (ri->type & ARM_CP_NO_RAW) {
218 continue;
219 }
220 /*
221 * Write value and confirm it reads back as written
222 * (to catch read-only registers and partially read-only
223 * registers where the incoming migration value doesn't match)
224 */
225 write_raw_cp_reg(&cpu->env, ri, v);
226 if (read_raw_cp_reg(&cpu->env, ri) != v) {
227 ok = false;
228 }
229 }
230 return ok;
231 }
232
add_cpreg_to_list(gpointer key,gpointer value,gpointer opaque)233 static void add_cpreg_to_list(gpointer key, gpointer value, gpointer opaque)
234 {
235 ARMCPU *cpu = opaque;
236 uint32_t regidx = (uintptr_t)key;
237 const ARMCPRegInfo *ri = value;
238
239 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
240 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
241 /* The value array need not be initialized at this point */
242 cpu->cpreg_array_len++;
243 }
244 }
245
count_cpreg(gpointer key,gpointer value,gpointer opaque)246 static void count_cpreg(gpointer key, gpointer value, gpointer opaque)
247 {
248 ARMCPU *cpu = opaque;
249 const ARMCPRegInfo *ri = value;
250
251 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
252 cpu->cpreg_array_len++;
253 }
254 }
255
arm_init_cpreg_list(ARMCPU * cpu)256 void arm_init_cpreg_list(ARMCPU *cpu)
257 {
258 /*
259 * Initialise the cpreg_tuples[] array based on the cp_regs hash.
260 * Note that we require cpreg_tuples[] to be sorted by key ID.
261 */
262 int arraylen;
263
264 cpu->cpreg_array_len = 0;
265 g_hash_table_foreach(cpu->cp_regs, count_cpreg, cpu);
266
267 arraylen = cpu->cpreg_array_len;
268 if (arraylen) {
269 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
270 cpu->cpreg_values = g_new(uint64_t, arraylen);
271 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
272 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
273 } else {
274 cpu->cpreg_indexes = NULL;
275 cpu->cpreg_values = NULL;
276 cpu->cpreg_vmstate_indexes = NULL;
277 cpu->cpreg_vmstate_values = NULL;
278 }
279 cpu->cpreg_vmstate_array_len = arraylen;
280 cpu->cpreg_array_len = 0;
281
282 g_hash_table_foreach(cpu->cp_regs, add_cpreg_to_list, cpu);
283
284 assert(cpu->cpreg_array_len == arraylen);
285
286 if (arraylen) {
287 qsort(cpu->cpreg_indexes, arraylen, sizeof(uint64_t), compare_u64);
288 }
289 }
290
arm_pan_enabled(CPUARMState * env)291 bool arm_pan_enabled(CPUARMState *env)
292 {
293 if (is_a64(env)) {
294 if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
295 return false;
296 }
297 return env->pstate & PSTATE_PAN;
298 } else {
299 return env->uncached_cpsr & CPSR_PAN;
300 }
301 }
302
303 /*
304 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
305 */
access_el3_aa32ns(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)306 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
307 const ARMCPRegInfo *ri,
308 bool isread)
309 {
310 if (!is_a64(env) && arm_current_el(env) == 3 &&
311 arm_is_secure_below_el3(env)) {
312 return CP_ACCESS_UNDEFINED;
313 }
314 return CP_ACCESS_OK;
315 }
316
317 /*
318 * Some secure-only AArch32 registers trap to EL3 if used from
319 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
320 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
321 * We assume that the .access field is set to PL1_RW.
322 */
access_trap_aa32s_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)323 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
324 const ARMCPRegInfo *ri,
325 bool isread)
326 {
327 if (arm_current_el(env) == 3) {
328 return CP_ACCESS_OK;
329 }
330 if (arm_is_secure_below_el3(env)) {
331 if (env->cp15.scr_el3 & SCR_EEL2) {
332 return CP_ACCESS_TRAP_EL2;
333 }
334 return CP_ACCESS_TRAP_EL3;
335 }
336 /* This will be EL1 NS and EL2 NS, which just UNDEF */
337 return CP_ACCESS_UNDEFINED;
338 }
339
340 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
access_tvm_trvm(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)341 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
342 bool isread)
343 {
344 if (arm_current_el(env) == 1) {
345 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
346 if (arm_hcr_el2_eff(env) & trap) {
347 return CP_ACCESS_TRAP_EL2;
348 }
349 }
350 return CP_ACCESS_OK;
351 }
352
353 /* Check for traps from EL1 due to HCR_EL2.TSW. */
access_tsw(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)354 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
355 bool isread)
356 {
357 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
358 return CP_ACCESS_TRAP_EL2;
359 }
360 return CP_ACCESS_OK;
361 }
362
363 /* Check for traps from EL1 due to HCR_EL2.TACR. */
access_tacr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)364 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
365 bool isread)
366 {
367 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
368 return CP_ACCESS_TRAP_EL2;
369 }
370 return CP_ACCESS_OK;
371 }
372
dacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)373 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
374 {
375 ARMCPU *cpu = env_archcpu(env);
376
377 raw_write(env, ri, value);
378 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
379 }
380
fcse_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)381 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
382 {
383 ARMCPU *cpu = env_archcpu(env);
384
385 if (raw_read(env, ri) != value) {
386 /*
387 * Unlike real hardware the qemu TLB uses virtual addresses,
388 * not modified virtual addresses, so this causes a TLB flush.
389 */
390 tlb_flush(CPU(cpu));
391 raw_write(env, ri, value);
392 }
393 }
394
contextidr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)395 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
396 uint64_t value)
397 {
398 ARMCPU *cpu = env_archcpu(env);
399
400 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
401 && !extended_addresses_enabled(env)) {
402 /*
403 * For VMSA (when not using the LPAE long descriptor page table
404 * format) this register includes the ASID, so do a TLB flush.
405 * For PMSA it is purely a process ID and no action is needed.
406 */
407 tlb_flush(CPU(cpu));
408 }
409 raw_write(env, ri, value);
410 }
411
alle1_tlbmask(CPUARMState * env)412 int alle1_tlbmask(CPUARMState *env)
413 {
414 /*
415 * Note that the 'ALL' scope must invalidate both stage 1 and
416 * stage 2 translations, whereas most other scopes only invalidate
417 * stage 1 translations.
418 *
419 * For AArch32 this is only used for TLBIALLNSNH and VTTBR
420 * writes, so only needs to apply to NS PL1&0, not S PL1&0.
421 */
422 return (ARMMMUIdxBit_E10_1 |
423 ARMMMUIdxBit_E10_1_PAN |
424 ARMMMUIdxBit_E10_1_GCS |
425 ARMMMUIdxBit_E10_0 |
426 ARMMMUIdxBit_E10_0_GCS |
427 ARMMMUIdxBit_Stage2 |
428 ARMMMUIdxBit_Stage2_S);
429 }
430
431 static const ARMCPRegInfo cp_reginfo[] = {
432 /*
433 * Define the secure and non-secure FCSE identifier CP registers
434 * separately because there is no secure bank in V8 (no _EL3). This allows
435 * the secure register to be properly reset and migrated. There is also no
436 * v8 EL1 version of the register so the non-secure instance stands alone.
437 */
438 { .name = "FCSEIDR",
439 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
440 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
441 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
442 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
443 { .name = "FCSEIDR_S",
444 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
445 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
446 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
447 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
448 /*
449 * Define the secure and non-secure context identifier CP registers
450 * separately because there is no secure bank in V8 (no _EL3). This allows
451 * the secure register to be properly reset and migrated. In the
452 * non-secure case, the 32-bit register will have reset and migration
453 * disabled during registration as it is handled by the 64-bit instance.
454 */
455 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
456 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
457 .access = PL1_RW, .accessfn = access_tvm_trvm,
458 .fgt = FGT_CONTEXTIDR_EL1,
459 .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
460 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 1),
461 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 1),
462 .secure = ARM_CP_SECSTATE_NS,
463 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
464 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
465 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
466 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
467 .access = PL1_RW, .accessfn = access_tvm_trvm,
468 .secure = ARM_CP_SECSTATE_S,
469 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
470 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
471 };
472
473 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
474 /*
475 * NB: Some of these registers exist in v8 but with more precise
476 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
477 */
478 /* MMU Domain access control / MPU write buffer control */
479 { .name = "DACR",
480 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
481 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
482 .writefn = dacr_write, .raw_writefn = raw_write,
483 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
484 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
485 /*
486 * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
487 * For v6 and v5, these mappings are overly broad.
488 */
489 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
490 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
491 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
492 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
493 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
494 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
495 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
496 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
497 /* Cache maintenance ops; some of this space may be overridden later. */
498 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
499 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
500 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
501 };
502
503 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
504 /*
505 * Not all pre-v6 cores implemented this WFI, so this is slightly
506 * over-broad.
507 */
508 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
509 .access = PL1_W, .type = ARM_CP_WFI },
510 };
511
512 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
513 /*
514 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
515 * is UNPREDICTABLE; we choose to NOP as most implementations do).
516 */
517 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
518 .access = PL1_W, .type = ARM_CP_WFI },
519 /*
520 * L1 cache lockdown. Not architectural in v6 and earlier but in practice
521 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
522 * OMAPCP will override this space.
523 */
524 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
525 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
526 .resetvalue = 0 },
527 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
528 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
529 .resetvalue = 0 },
530 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
531 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
532 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
533 .resetvalue = 0 },
534 /*
535 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
536 * implementing it as RAZ means the "debug architecture version" bits
537 * will read as a reserved value, which should cause Linux to not try
538 * to use the debug hardware.
539 */
540 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
541 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
542 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
543 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
544 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
545 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
546 };
547
cpacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)548 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
549 uint64_t value)
550 {
551 uint32_t mask = 0;
552
553 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
554 if (!arm_feature(env, ARM_FEATURE_V8)) {
555 /*
556 * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
557 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
558 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
559 */
560 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
561 /* VFP coprocessor: cp10 & cp11 [23:20] */
562 mask |= R_CPACR_ASEDIS_MASK |
563 R_CPACR_D32DIS_MASK |
564 R_CPACR_CP11_MASK |
565 R_CPACR_CP10_MASK;
566
567 if (!arm_feature(env, ARM_FEATURE_NEON)) {
568 /* ASEDIS [31] bit is RAO/WI */
569 value |= R_CPACR_ASEDIS_MASK;
570 }
571
572 /*
573 * VFPv3 and upwards with NEON implement 32 double precision
574 * registers (D0-D31).
575 */
576 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
577 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
578 value |= R_CPACR_D32DIS_MASK;
579 }
580 }
581 value &= mask;
582 }
583
584 /*
585 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
586 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
587 */
588 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
589 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
590 mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
591 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
592 }
593
594 env->cp15.cpacr_el1 = value;
595 }
596
cpacr_read(CPUARMState * env,const ARMCPRegInfo * ri)597 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
598 {
599 /*
600 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
601 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
602 */
603 uint64_t value = env->cp15.cpacr_el1;
604
605 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
606 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
607 value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
608 }
609 return value;
610 }
611
612
cpacr_reset(CPUARMState * env,const ARMCPRegInfo * ri)613 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
614 {
615 /*
616 * Call cpacr_write() so that we reset with the correct RAO bits set
617 * for our CPU features.
618 */
619 cpacr_write(env, ri, 0);
620 }
621
cpacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)622 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
623 bool isread)
624 {
625 if (arm_feature(env, ARM_FEATURE_V8)) {
626 /* Check if CPACR accesses are to be trapped to EL2 */
627 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
628 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
629 return CP_ACCESS_TRAP_EL2;
630 /* Check if CPACR accesses are to be trapped to EL3 */
631 } else if (arm_current_el(env) < 3 &&
632 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
633 return CP_ACCESS_TRAP_EL3;
634 }
635 }
636
637 return CP_ACCESS_OK;
638 }
639
cptr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)640 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
641 bool isread)
642 {
643 /* Check if CPTR accesses are set to trap to EL3 */
644 if (arm_current_el(env) == 2 &&
645 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
646 return CP_ACCESS_TRAP_EL3;
647 }
648
649 return CP_ACCESS_OK;
650 }
651
652 static const ARMCPRegInfo v6_cp_reginfo[] = {
653 /* prefetch by MVA in v6, NOP in v7 */
654 { .name = "MVA_prefetch",
655 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
656 .access = PL1_W, .type = ARM_CP_NOP },
657 /*
658 * We need to break the TB after ISB to execute self-modifying code
659 * correctly and also to take any pending interrupts immediately.
660 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
661 */
662 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
663 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
664 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
665 .access = PL0_W, .type = ARM_CP_NOP },
666 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
667 .access = PL0_W, .type = ARM_CP_NOP },
668 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
669 .access = PL1_RW, .accessfn = access_tvm_trvm,
670 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
671 offsetof(CPUARMState, cp15.ifar_ns) },
672 .resetvalue = 0, },
673 /*
674 * Watchpoint Fault Address Register : should actually only be present
675 * for 1136, 1176, 11MPCore.
676 */
677 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
678 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
679 { .name = "CPACR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
680 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
681 .fgt = FGT_CPACR_EL1,
682 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 1, 2),
683 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 2),
684 .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
685 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
686 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
687 };
688
689 /*
690 * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
691 * We use these to decide whether we need to wrap a write to MDCR_EL2
692 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
693 */
694 #define MDCR_EL2_PMU_ENABLE_BITS \
695 (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
696 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
697
vbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)698 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
699 uint64_t value)
700 {
701 /*
702 * Note that even though the AArch64 view of this register has bits
703 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
704 * architectural requirements for bits which are RES0 only in some
705 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
706 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
707 */
708 raw_write(env, ri, value & ~0x1FULL);
709 }
710
scr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)711 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
712 {
713 /* Begin with base v8.0 state. */
714 uint64_t valid_mask = 0x3fff;
715 ARMCPU *cpu = env_archcpu(env);
716 uint64_t changed;
717
718 /*
719 * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
720 * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
721 * Instead, choose the format based on the mode of EL3.
722 */
723 if (arm_el_is_aa64(env, 3)) {
724 value |= SCR_FW | SCR_AW; /* RES1 */
725 valid_mask &= ~SCR_NET; /* RES0 */
726
727 if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
728 !cpu_isar_feature(aa64_aa32_el2, cpu)) {
729 value |= SCR_RW; /* RAO/WI */
730 }
731 if (cpu_isar_feature(aa64_ras, cpu)) {
732 valid_mask |= SCR_TERR;
733 }
734 if (cpu_isar_feature(aa64_lor, cpu)) {
735 valid_mask |= SCR_TLOR;
736 }
737 if (cpu_isar_feature(aa64_pauth, cpu)) {
738 valid_mask |= SCR_API | SCR_APK;
739 }
740 if (cpu_isar_feature(aa64_sel2, cpu)) {
741 valid_mask |= SCR_EEL2;
742 } else if (cpu_isar_feature(aa64_rme, cpu)) {
743 /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
744 value |= SCR_NS;
745 }
746 if (cpu_isar_feature(aa64_mte, cpu)) {
747 valid_mask |= SCR_ATA;
748 }
749 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
750 valid_mask |= SCR_ENSCXT;
751 }
752 if (cpu_isar_feature(aa64_doublefault, cpu)) {
753 valid_mask |= SCR_EASE | SCR_NMEA;
754 }
755 if (cpu_isar_feature(aa64_sme, cpu)) {
756 valid_mask |= SCR_ENTP2;
757 }
758 if (cpu_isar_feature(aa64_hcx, cpu)) {
759 valid_mask |= SCR_HXEN;
760 }
761 if (cpu_isar_feature(aa64_fgt, cpu)) {
762 valid_mask |= SCR_FGTEN;
763 }
764 if (cpu_isar_feature(aa64_rme, cpu)) {
765 valid_mask |= SCR_NSE | SCR_GPF;
766 }
767 if (cpu_isar_feature(aa64_ecv, cpu)) {
768 valid_mask |= SCR_ECVEN;
769 }
770 if (cpu_isar_feature(aa64_gcs, cpu)) {
771 valid_mask |= SCR_GCSEN;
772 }
773 if (cpu_isar_feature(aa64_tcr2, cpu)) {
774 valid_mask |= SCR_TCR2EN;
775 }
776 if (cpu_isar_feature(aa64_sctlr2, cpu)) {
777 valid_mask |= SCR_SCTLR2EN;
778 }
779 if (cpu_isar_feature(aa64_s1pie, cpu) ||
780 cpu_isar_feature(aa64_s2pie, cpu)) {
781 valid_mask |= SCR_PIEN;
782 }
783 if (cpu_isar_feature(aa64_aie, cpu)) {
784 valid_mask |= SCR_AIEN;
785 }
786 if (cpu_isar_feature(aa64_mec, cpu)) {
787 valid_mask |= SCR_MECEN;
788 }
789 } else {
790 valid_mask &= ~(SCR_RW | SCR_ST);
791 if (cpu_isar_feature(aa32_ras, cpu)) {
792 valid_mask |= SCR_TERR;
793 }
794 }
795
796 if (!arm_feature(env, ARM_FEATURE_EL2)) {
797 valid_mask &= ~SCR_HCE;
798
799 /*
800 * On ARMv7, SMD (or SCD as it is called in v7) is only
801 * supported if EL2 exists. The bit is UNK/SBZP when
802 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
803 * when EL2 is unavailable.
804 * On ARMv8, this bit is always available.
805 */
806 if (arm_feature(env, ARM_FEATURE_V7) &&
807 !arm_feature(env, ARM_FEATURE_V8)) {
808 valid_mask &= ~SCR_SMD;
809 }
810 }
811
812 /* Clear all-context RES0 bits. */
813 value &= valid_mask;
814 changed = env->cp15.scr_el3 ^ value;
815 env->cp15.scr_el3 = value;
816
817 /*
818 * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
819 * we must invalidate all TLBs below EL3.
820 */
821 if (changed & (SCR_NS | SCR_NSE)) {
822 tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
823 ARMMMUIdxBit_E10_0_GCS |
824 ARMMMUIdxBit_E20_0 |
825 ARMMMUIdxBit_E20_0_GCS |
826 ARMMMUIdxBit_E10_1 |
827 ARMMMUIdxBit_E10_1_PAN |
828 ARMMMUIdxBit_E10_1_GCS |
829 ARMMMUIdxBit_E20_2 |
830 ARMMMUIdxBit_E20_2_PAN |
831 ARMMMUIdxBit_E20_2_GCS |
832 ARMMMUIdxBit_E2 |
833 ARMMMUIdxBit_E2_GCS));
834 }
835 }
836
scr_reset(CPUARMState * env,const ARMCPRegInfo * ri)837 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
838 {
839 /*
840 * scr_write will set the RES1 bits on an AArch64-only CPU.
841 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
842 */
843 scr_write(env, ri, 0);
844 }
845
access_tid4(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)846 static CPAccessResult access_tid4(CPUARMState *env,
847 const ARMCPRegInfo *ri,
848 bool isread)
849 {
850 if (arm_current_el(env) == 1 &&
851 (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
852 return CP_ACCESS_TRAP_EL2;
853 }
854
855 return CP_ACCESS_OK;
856 }
857
ccsidr_read(CPUARMState * env,const ARMCPRegInfo * ri)858 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
859 {
860 ARMCPU *cpu = env_archcpu(env);
861
862 /*
863 * Acquire the CSSELR index from the bank corresponding to the CCSIDR
864 * bank
865 */
866 uint32_t index = A32_BANKED_REG_GET(env, csselr,
867 ri->secure & ARM_CP_SECSTATE_S);
868
869 return cpu->ccsidr[index];
870 }
871
csselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)872 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
873 uint64_t value)
874 {
875 raw_write(env, ri, value & 0xf);
876 }
877
isr_read(CPUARMState * env,const ARMCPRegInfo * ri)878 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
879 {
880 CPUState *cs = env_cpu(env);
881 bool el1 = arm_current_el(env) == 1;
882 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
883 uint64_t ret = 0;
884
885 if (hcr_el2 & HCR_IMO) {
886 if (cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) {
887 ret |= CPSR_I;
888 }
889 if (cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) {
890 ret |= ISR_IS;
891 ret |= CPSR_I;
892 }
893 } else {
894 if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
895 ret |= CPSR_I;
896 }
897
898 if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
899 ret |= ISR_IS;
900 ret |= CPSR_I;
901 }
902 }
903
904 if (hcr_el2 & HCR_FMO) {
905 if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) {
906 ret |= CPSR_F;
907 }
908 if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) {
909 ret |= ISR_FS;
910 ret |= CPSR_F;
911 }
912 } else {
913 if (cpu_test_interrupt(cs, CPU_INTERRUPT_FIQ)) {
914 ret |= CPSR_F;
915 }
916 }
917
918 if (hcr_el2 & HCR_AMO) {
919 if (cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) {
920 ret |= CPSR_A;
921 }
922 }
923
924 return ret;
925 }
926
access_tid1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)927 static CPAccessResult access_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
928 bool isread)
929 {
930 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
931 return CP_ACCESS_TRAP_EL2;
932 }
933
934 return CP_ACCESS_OK;
935 }
936
937 static const ARMCPRegInfo v7_cp_reginfo[] = {
938 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
939 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
940 .access = PL1_W, .type = ARM_CP_NOP },
941 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
942 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
943 .access = PL1_R,
944 .accessfn = access_tid4,
945 .fgt = FGT_CCSIDR_EL1,
946 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
947 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
948 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
949 .access = PL1_RW,
950 .accessfn = access_tid4,
951 .fgt = FGT_CSSELR_EL1,
952 .writefn = csselr_write, .resetvalue = 0,
953 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
954 offsetof(CPUARMState, cp15.csselr_ns) } },
955 /*
956 * Auxiliary ID register: this actually has an IMPDEF value but for now
957 * just RAZ for all cores:
958 */
959 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
960 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
961 .access = PL1_R, .type = ARM_CP_CONST,
962 .accessfn = access_tid1,
963 .fgt = FGT_AIDR_EL1,
964 .resetvalue = 0 },
965 /*
966 * Auxiliary fault status registers: these also are IMPDEF, and we
967 * choose to RAZ/WI for all cores.
968 */
969 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
970 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
971 .access = PL1_RW, .accessfn = access_tvm_trvm,
972 .fgt = FGT_AFSR0_EL1,
973 .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
974 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 0),
975 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 0),
976 .type = ARM_CP_CONST, .resetvalue = 0 },
977 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
978 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
979 .access = PL1_RW, .accessfn = access_tvm_trvm,
980 .fgt = FGT_AFSR1_EL1,
981 .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
982 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 1),
983 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 1),
984 .type = ARM_CP_CONST, .resetvalue = 0 },
985 /*
986 * MAIR can just read-as-written because we don't implement caches
987 * and so don't need to care about memory attributes.
988 */
989 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
990 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
991 .access = PL1_RW, .accessfn = access_tvm_trvm,
992 .fgt = FGT_MAIR_EL1,
993 .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
994 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 0),
995 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 0),
996 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
997 .resetvalue = 0 },
998 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
999 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1000 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1001 .resetvalue = 0 },
1002 /*
1003 * For non-long-descriptor page tables these are PRRR and NMRR;
1004 * regardless they still act as reads-as-written for QEMU.
1005 */
1006 /*
1007 * MAIR0/1 are defined separately from their 64-bit counterpart which
1008 * allows them to assign the correct fieldoffset based on the endianness
1009 * handled in the field definitions.
1010 */
1011 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1012 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1013 .access = PL1_RW, .accessfn = access_tvm_trvm,
1014 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1015 offsetof(CPUARMState, cp15.mair0_ns) },
1016 .resetfn = arm_cp_reset_ignore },
1017 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1018 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
1019 .access = PL1_RW, .accessfn = access_tvm_trvm,
1020 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1021 offsetof(CPUARMState, cp15.mair1_ns) },
1022 .resetfn = arm_cp_reset_ignore },
1023 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1024 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1025 .fgt = FGT_ISR_EL1,
1026 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1027 };
1028
teecr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1029 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1030 uint64_t value)
1031 {
1032 value &= 1;
1033 env->teecr = value;
1034 }
1035
teecr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1036 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1037 bool isread)
1038 {
1039 /*
1040 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
1041 * at all, so we don't need to check whether we're v8A.
1042 */
1043 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
1044 (env->cp15.hstr_el2 & HSTR_TTEE)) {
1045 return CP_ACCESS_TRAP_EL2;
1046 }
1047 return CP_ACCESS_OK;
1048 }
1049
teehbr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1050 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1051 bool isread)
1052 {
1053 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1054 return CP_ACCESS_TRAP_EL1;
1055 }
1056 return teecr_access(env, ri, isread);
1057 }
1058
1059 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1060 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1061 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1062 .resetvalue = 0,
1063 .writefn = teecr_write, .accessfn = teecr_access },
1064 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1065 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1066 .accessfn = teehbr_access, .resetvalue = 0 },
1067 };
1068
1069 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1070 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1071 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1072 .access = PL0_RW,
1073 .fgt = FGT_TPIDR_EL0,
1074 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1075 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1076 .access = PL0_RW,
1077 .fgt = FGT_TPIDR_EL0,
1078 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1079 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1080 .resetfn = arm_cp_reset_ignore },
1081 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1082 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1083 .access = PL0_R | PL1_W,
1084 .fgt = FGT_TPIDRRO_EL0,
1085 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1086 .resetvalue = 0},
1087 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1088 .access = PL0_R | PL1_W,
1089 .fgt = FGT_TPIDRRO_EL0,
1090 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1091 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1092 .resetfn = arm_cp_reset_ignore },
1093 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1094 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1095 .access = PL1_RW,
1096 .fgt = FGT_TPIDR_EL1,
1097 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1098 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1099 .access = PL1_RW,
1100 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1101 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1102 .resetvalue = 0 },
1103 };
1104
arm_gt_cntfrq_reset(CPUARMState * env,const ARMCPRegInfo * ri)1105 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1106 {
1107 ARMCPU *cpu = env_archcpu(env);
1108
1109 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
1110 }
1111
1112 #ifndef CONFIG_USER_ONLY
1113
gt_cntfrq_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1114 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1115 bool isread)
1116 {
1117 /*
1118 * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1119 * Writable only at the highest implemented exception level.
1120 */
1121 int el = arm_current_el(env);
1122 uint64_t hcr;
1123 uint32_t cntkctl;
1124
1125 switch (el) {
1126 case 0:
1127 hcr = arm_hcr_el2_eff(env);
1128 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1129 cntkctl = env->cp15.cnthctl_el2;
1130 } else {
1131 cntkctl = env->cp15.c14_cntkctl;
1132 }
1133 if (!extract32(cntkctl, 0, 2)) {
1134 return CP_ACCESS_TRAP_EL1;
1135 }
1136 break;
1137 case 1:
1138 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1139 arm_is_secure_below_el3(env)) {
1140 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1141 return CP_ACCESS_UNDEFINED;
1142 }
1143 break;
1144 case 2:
1145 case 3:
1146 break;
1147 }
1148
1149 if (!isread && el < arm_highest_el(env)) {
1150 return CP_ACCESS_UNDEFINED;
1151 }
1152
1153 return CP_ACCESS_OK;
1154 }
1155
gt_counter_access(CPUARMState * env,int timeridx,bool isread)1156 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1157 bool isread)
1158 {
1159 unsigned int cur_el = arm_current_el(env);
1160 bool has_el2 = arm_is_el2_enabled(env);
1161 uint64_t hcr = arm_hcr_el2_eff(env);
1162
1163 switch (cur_el) {
1164 case 0:
1165 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
1166 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1167 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
1168 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1169 }
1170
1171 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
1172 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1173 return CP_ACCESS_TRAP_EL1;
1174 }
1175 /* fall through */
1176 case 1:
1177 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
1178 if (has_el2 && timeridx == GTIMER_PHYS &&
1179 (hcr & HCR_E2H
1180 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
1181 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
1182 return CP_ACCESS_TRAP_EL2;
1183 }
1184 if (has_el2 && timeridx == GTIMER_VIRT) {
1185 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
1186 return CP_ACCESS_TRAP_EL2;
1187 }
1188 }
1189 break;
1190 }
1191 return CP_ACCESS_OK;
1192 }
1193
gt_timer_access(CPUARMState * env,int timeridx,bool isread)1194 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1195 bool isread)
1196 {
1197 unsigned int cur_el = arm_current_el(env);
1198 bool has_el2 = arm_is_el2_enabled(env);
1199 uint64_t hcr = arm_hcr_el2_eff(env);
1200
1201 switch (cur_el) {
1202 case 0:
1203 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1204 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
1205 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
1206 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1207 }
1208
1209 /*
1210 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
1211 * EL0 if EL0[PV]TEN is zero.
1212 */
1213 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1214 return CP_ACCESS_TRAP_EL1;
1215 }
1216 /* fall through */
1217
1218 case 1:
1219 if (has_el2 && timeridx == GTIMER_PHYS) {
1220 if (hcr & HCR_E2H) {
1221 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
1222 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
1223 return CP_ACCESS_TRAP_EL2;
1224 }
1225 } else {
1226 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
1227 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
1228 return CP_ACCESS_TRAP_EL2;
1229 }
1230 }
1231 }
1232 if (has_el2 && timeridx == GTIMER_VIRT) {
1233 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
1234 return CP_ACCESS_TRAP_EL2;
1235 }
1236 }
1237 break;
1238 }
1239 return CP_ACCESS_OK;
1240 }
1241
gt_pct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1242 static CPAccessResult gt_pct_access(CPUARMState *env,
1243 const ARMCPRegInfo *ri,
1244 bool isread)
1245 {
1246 return gt_counter_access(env, GTIMER_PHYS, isread);
1247 }
1248
gt_vct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1249 static CPAccessResult gt_vct_access(CPUARMState *env,
1250 const ARMCPRegInfo *ri,
1251 bool isread)
1252 {
1253 return gt_counter_access(env, GTIMER_VIRT, isread);
1254 }
1255
gt_ptimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1256 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1257 bool isread)
1258 {
1259 return gt_timer_access(env, GTIMER_PHYS, isread);
1260 }
1261
gt_vtimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1262 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1263 bool isread)
1264 {
1265 return gt_timer_access(env, GTIMER_VIRT, isread);
1266 }
1267
gt_stimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1268 static CPAccessResult gt_stimer_access(CPUARMState *env,
1269 const ARMCPRegInfo *ri,
1270 bool isread)
1271 {
1272 /*
1273 * The AArch64 register view of the secure physical timer is
1274 * always accessible from EL3, and configurably accessible from
1275 * Secure EL1.
1276 */
1277 switch (arm_current_el(env)) {
1278 case 1:
1279 if (!arm_is_secure(env)) {
1280 return CP_ACCESS_UNDEFINED;
1281 }
1282 if (arm_is_el2_enabled(env)) {
1283 return CP_ACCESS_UNDEFINED;
1284 }
1285 if (!(env->cp15.scr_el3 & SCR_ST)) {
1286 return CP_ACCESS_TRAP_EL3;
1287 }
1288 return CP_ACCESS_OK;
1289 case 0:
1290 case 2:
1291 return CP_ACCESS_UNDEFINED;
1292 case 3:
1293 return CP_ACCESS_OK;
1294 default:
1295 g_assert_not_reached();
1296 }
1297 }
1298
gt_sel2timer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1299 static CPAccessResult gt_sel2timer_access(CPUARMState *env,
1300 const ARMCPRegInfo *ri,
1301 bool isread)
1302 {
1303 /*
1304 * The AArch64 register view of the secure EL2 timers are mostly
1305 * accessible from EL3 and EL2 although can also be trapped to EL2
1306 * from EL1 depending on nested virt config.
1307 */
1308 switch (arm_current_el(env)) {
1309 case 0: /* UNDEFINED */
1310 return CP_ACCESS_UNDEFINED;
1311 case 1:
1312 if (!arm_is_secure(env)) {
1313 /* UNDEFINED */
1314 return CP_ACCESS_UNDEFINED;
1315 } else if (arm_hcr_el2_eff(env) & HCR_NV) {
1316 /* Aarch64.SystemAccessTrap(EL2, 0x18) */
1317 return CP_ACCESS_TRAP_EL2;
1318 }
1319 /* UNDEFINED */
1320 return CP_ACCESS_UNDEFINED;
1321 case 2:
1322 if (!arm_is_secure(env)) {
1323 /* UNDEFINED */
1324 return CP_ACCESS_UNDEFINED;
1325 }
1326 return CP_ACCESS_OK;
1327 case 3:
1328 if (env->cp15.scr_el3 & SCR_EEL2) {
1329 return CP_ACCESS_OK;
1330 } else {
1331 return CP_ACCESS_UNDEFINED;
1332 }
1333 default:
1334 g_assert_not_reached();
1335 }
1336 }
1337
gt_get_countervalue(CPUARMState * env)1338 uint64_t gt_get_countervalue(CPUARMState *env)
1339 {
1340 ARMCPU *cpu = env_archcpu(env);
1341
1342 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
1343 }
1344
gt_update_irq(ARMCPU * cpu,int timeridx)1345 static void gt_update_irq(ARMCPU *cpu, int timeridx)
1346 {
1347 CPUARMState *env = &cpu->env;
1348 uint64_t cnthctl = env->cp15.cnthctl_el2;
1349 ARMSecuritySpace ss = arm_security_space(env);
1350 /* ISTATUS && !IMASK */
1351 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
1352
1353 /*
1354 * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
1355 * It is RES0 in Secure and NonSecure state.
1356 */
1357 if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
1358 ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) ||
1359 (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) {
1360 irqstate = 0;
1361 }
1362
1363 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1364 trace_arm_gt_update_irq(timeridx, irqstate);
1365 }
1366
gt_rme_post_el_change(ARMCPU * cpu,void * ignored)1367 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
1368 {
1369 /*
1370 * Changing security state between Root and Secure/NonSecure, which may
1371 * happen when switching EL, can change the effective value of CNTHCTL_EL2
1372 * mask bits. Update the IRQ state accordingly.
1373 */
1374 gt_update_irq(cpu, GTIMER_VIRT);
1375 gt_update_irq(cpu, GTIMER_PHYS);
1376 }
1377
gt_phys_raw_cnt_offset(CPUARMState * env)1378 static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
1379 {
1380 if ((env->cp15.scr_el3 & SCR_ECVEN) &&
1381 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
1382 arm_is_el2_enabled(env) &&
1383 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
1384 return env->cp15.cntpoff_el2;
1385 }
1386 return 0;
1387 }
1388
gt_indirect_access_timer_offset(CPUARMState * env,int timeridx)1389 static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx)
1390 {
1391 /*
1392 * Return the timer offset to use for indirect accesses to the timer.
1393 * This is the Offset value as defined in D12.2.4.1 "Operation of the
1394 * CompareValue views of the timers".
1395 *
1396 * The condition here is not always the same as the condition for
1397 * whether to apply an offset register when doing a direct read of
1398 * the counter sysreg; those conditions are described in the
1399 * access pseudocode for each counter register.
1400 */
1401 switch (timeridx) {
1402 case GTIMER_PHYS:
1403 return gt_phys_raw_cnt_offset(env);
1404 case GTIMER_VIRT:
1405 return env->cp15.cntvoff_el2;
1406 case GTIMER_HYP:
1407 case GTIMER_SEC:
1408 case GTIMER_HYPVIRT:
1409 case GTIMER_S_EL2_PHYS:
1410 case GTIMER_S_EL2_VIRT:
1411 return 0;
1412 default:
1413 g_assert_not_reached();
1414 }
1415 }
1416
gt_direct_access_timer_offset(CPUARMState * env,int timeridx)1417 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx)
1418 {
1419 /*
1420 * Return the timer offset to use for direct accesses to the
1421 * counter registers CNTPCT and CNTVCT, and for direct accesses
1422 * to the CNT*_TVAL registers.
1423 *
1424 * This isn't exactly the same as the indirect-access offset,
1425 * because here we also care about what EL the register access
1426 * is being made from.
1427 *
1428 * This corresponds to the access pseudocode for the registers.
1429 */
1430 uint64_t hcr;
1431
1432 switch (timeridx) {
1433 case GTIMER_PHYS:
1434 if (arm_current_el(env) >= 2) {
1435 return 0;
1436 }
1437 return gt_phys_raw_cnt_offset(env);
1438 case GTIMER_VIRT:
1439 switch (arm_current_el(env)) {
1440 case 2:
1441 hcr = arm_hcr_el2_eff(env);
1442 if (hcr & HCR_E2H) {
1443 return 0;
1444 }
1445 break;
1446 case 0:
1447 hcr = arm_hcr_el2_eff(env);
1448 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1449 return 0;
1450 }
1451 break;
1452 }
1453 return env->cp15.cntvoff_el2;
1454 case GTIMER_HYP:
1455 case GTIMER_SEC:
1456 case GTIMER_HYPVIRT:
1457 case GTIMER_S_EL2_PHYS:
1458 case GTIMER_S_EL2_VIRT:
1459 return 0;
1460 default:
1461 g_assert_not_reached();
1462 }
1463 }
1464
gt_recalc_timer(ARMCPU * cpu,int timeridx)1465 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1466 {
1467 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1468
1469 if (gt->ctl & 1) {
1470 /*
1471 * Timer enabled: calculate and set current ISTATUS, irq, and
1472 * reset timer to when ISTATUS next has to change
1473 */
1474 uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx);
1475 uint64_t count = gt_get_countervalue(&cpu->env);
1476 /* Note that this must be unsigned 64 bit arithmetic: */
1477 int istatus = count - offset >= gt->cval;
1478 uint64_t nexttick;
1479
1480 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1481
1482 if (istatus) {
1483 /*
1484 * Next transition is when (count - offset) rolls back over to 0.
1485 * If offset > count then this is when count == offset;
1486 * if offset <= count then this is when count == offset + 2^64
1487 * For the latter case we set nexttick to an "as far in future
1488 * as possible" value and let the code below handle it.
1489 */
1490 if (offset > count) {
1491 nexttick = offset;
1492 } else {
1493 nexttick = UINT64_MAX;
1494 }
1495 } else {
1496 /*
1497 * Next transition is when (count - offset) == cval, i.e.
1498 * when count == (cval + offset).
1499 * If that would overflow, then again we set up the next interrupt
1500 * for "as far in the future as possible" for the code below.
1501 */
1502 if (uadd64_overflow(gt->cval, offset, &nexttick)) {
1503 nexttick = UINT64_MAX;
1504 }
1505 }
1506 /*
1507 * Note that the desired next expiry time might be beyond the
1508 * signed-64-bit range of a QEMUTimer -- in this case we just
1509 * set the timer for as far in the future as possible. When the
1510 * timer expires we will reset the timer for any remaining period.
1511 */
1512 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
1513 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
1514 } else {
1515 timer_mod(cpu->gt_timer[timeridx], nexttick);
1516 }
1517 trace_arm_gt_recalc(timeridx, nexttick);
1518 } else {
1519 /* Timer disabled: ISTATUS and timer output always clear */
1520 gt->ctl &= ~4;
1521 timer_del(cpu->gt_timer[timeridx]);
1522 trace_arm_gt_recalc_disabled(timeridx);
1523 }
1524 gt_update_irq(cpu, timeridx);
1525 }
1526
gt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)1527 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1528 int timeridx)
1529 {
1530 ARMCPU *cpu = env_archcpu(env);
1531
1532 timer_del(cpu->gt_timer[timeridx]);
1533 }
1534
gt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)1535 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1536 {
1537 uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS);
1538 return gt_get_countervalue(env) - offset;
1539 }
1540
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)1541 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1542 {
1543 uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
1544 return gt_get_countervalue(env) - offset;
1545 }
1546
gt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1547 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1548 int timeridx,
1549 uint64_t value)
1550 {
1551 trace_arm_gt_cval_write(timeridx, value);
1552 env->cp15.c14_timer[timeridx].cval = value;
1553 gt_recalc_timer(env_archcpu(env), timeridx);
1554 }
1555
do_tval_read(CPUARMState * env,int timeridx,uint64_t offset)1556 static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset)
1557 {
1558 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1559 (gt_get_countervalue(env) - offset));
1560 }
1561
gt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)1562 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1563 int timeridx)
1564 {
1565 uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1566
1567 return do_tval_read(env, timeridx, offset);
1568 }
1569
do_tval_write(CPUARMState * env,int timeridx,uint64_t value,uint64_t offset)1570 static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value,
1571 uint64_t offset)
1572 {
1573 trace_arm_gt_tval_write(timeridx, value);
1574 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1575 sextract64(value, 0, 32);
1576 gt_recalc_timer(env_archcpu(env), timeridx);
1577 }
1578
gt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1579 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1580 int timeridx,
1581 uint64_t value)
1582 {
1583 uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1584
1585 do_tval_write(env, timeridx, value, offset);
1586 }
1587
gt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1588 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1589 int timeridx,
1590 uint64_t value)
1591 {
1592 ARMCPU *cpu = env_archcpu(env);
1593 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1594
1595 trace_arm_gt_ctl_write(timeridx, value);
1596 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1597 if ((oldval ^ value) & 1) {
1598 /* Enable toggled */
1599 gt_recalc_timer(cpu, timeridx);
1600 } else if ((oldval ^ value) & 2) {
1601 /*
1602 * IMASK toggled: don't need to recalculate,
1603 * just set the interrupt line based on ISTATUS
1604 */
1605 trace_arm_gt_imask_toggle(timeridx);
1606 gt_update_irq(cpu, timeridx);
1607 }
1608 }
1609
gt_phys_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1610 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1611 {
1612 gt_timer_reset(env, ri, GTIMER_PHYS);
1613 }
1614
gt_phys_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1615 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1616 uint64_t value)
1617 {
1618 gt_cval_write(env, ri, GTIMER_PHYS, value);
1619 }
1620
gt_phys_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1621 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1622 {
1623 return gt_tval_read(env, ri, GTIMER_PHYS);
1624 }
1625
gt_phys_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1626 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1627 uint64_t value)
1628 {
1629 gt_tval_write(env, ri, GTIMER_PHYS, value);
1630 }
1631
gt_phys_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1632 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1633 uint64_t value)
1634 {
1635 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1636 }
1637
gt_phys_redir_timeridx(CPUARMState * env)1638 static int gt_phys_redir_timeridx(CPUARMState *env)
1639 {
1640 switch (arm_mmu_idx(env)) {
1641 case ARMMMUIdx_E20_0:
1642 case ARMMMUIdx_E20_2:
1643 case ARMMMUIdx_E20_2_PAN:
1644 return GTIMER_HYP;
1645 default:
1646 return GTIMER_PHYS;
1647 }
1648 }
1649
gt_virt_redir_timeridx(CPUARMState * env)1650 static int gt_virt_redir_timeridx(CPUARMState *env)
1651 {
1652 switch (arm_mmu_idx(env)) {
1653 case ARMMMUIdx_E20_0:
1654 case ARMMMUIdx_E20_2:
1655 case ARMMMUIdx_E20_2_PAN:
1656 return GTIMER_HYPVIRT;
1657 default:
1658 return GTIMER_VIRT;
1659 }
1660 }
1661
gt_phys_redir_cval_read(CPUARMState * env,const ARMCPRegInfo * ri)1662 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
1663 const ARMCPRegInfo *ri)
1664 {
1665 int timeridx = gt_phys_redir_timeridx(env);
1666 return env->cp15.c14_timer[timeridx].cval;
1667 }
1668
gt_phys_redir_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1669 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1670 uint64_t value)
1671 {
1672 int timeridx = gt_phys_redir_timeridx(env);
1673 gt_cval_write(env, ri, timeridx, value);
1674 }
1675
gt_phys_redir_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1676 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
1677 const ARMCPRegInfo *ri)
1678 {
1679 int timeridx = gt_phys_redir_timeridx(env);
1680 return gt_tval_read(env, ri, timeridx);
1681 }
1682
gt_phys_redir_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1683 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1684 uint64_t value)
1685 {
1686 int timeridx = gt_phys_redir_timeridx(env);
1687 gt_tval_write(env, ri, timeridx, value);
1688 }
1689
gt_phys_redir_ctl_read(CPUARMState * env,const ARMCPRegInfo * ri)1690 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
1691 const ARMCPRegInfo *ri)
1692 {
1693 int timeridx = gt_phys_redir_timeridx(env);
1694 return env->cp15.c14_timer[timeridx].ctl;
1695 }
1696
gt_phys_redir_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1697 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1698 uint64_t value)
1699 {
1700 int timeridx = gt_phys_redir_timeridx(env);
1701 gt_ctl_write(env, ri, timeridx, value);
1702 }
1703
gt_virt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1704 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1705 {
1706 gt_timer_reset(env, ri, GTIMER_VIRT);
1707 }
1708
gt_virt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1709 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1710 uint64_t value)
1711 {
1712 gt_cval_write(env, ri, GTIMER_VIRT, value);
1713 }
1714
gt_virt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1715 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1716 {
1717 /*
1718 * This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0
1719 * we always apply CNTVOFF_EL2. Special case that here rather
1720 * than going into the generic gt_tval_read() and then having
1721 * to re-detect that it's this register.
1722 * Note that the accessfn/perms mean we know we're at EL2 or EL3 here.
1723 */
1724 return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2);
1725 }
1726
gt_virt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1727 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1728 uint64_t value)
1729 {
1730 /* Similarly for writes to CNTV_TVAL_EL02 */
1731 do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2);
1732 }
1733
gt_virt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1734 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1735 uint64_t value)
1736 {
1737 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1738 }
1739
gt_cnthctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1740 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1741 uint64_t value)
1742 {
1743 ARMCPU *cpu = env_archcpu(env);
1744 uint32_t oldval = env->cp15.cnthctl_el2;
1745 uint32_t valid_mask =
1746 R_CNTHCTL_EL0PCTEN_E2H1_MASK |
1747 R_CNTHCTL_EL0VCTEN_E2H1_MASK |
1748 R_CNTHCTL_EVNTEN_MASK |
1749 R_CNTHCTL_EVNTDIR_MASK |
1750 R_CNTHCTL_EVNTI_MASK |
1751 R_CNTHCTL_EL0VTEN_MASK |
1752 R_CNTHCTL_EL0PTEN_MASK |
1753 R_CNTHCTL_EL1PCTEN_E2H1_MASK |
1754 R_CNTHCTL_EL1PTEN_MASK;
1755
1756 if (cpu_isar_feature(aa64_rme, cpu)) {
1757 valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK;
1758 }
1759 if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
1760 valid_mask |=
1761 R_CNTHCTL_EL1TVT_MASK |
1762 R_CNTHCTL_EL1TVCT_MASK |
1763 R_CNTHCTL_EL1NVPCT_MASK |
1764 R_CNTHCTL_EL1NVVCT_MASK |
1765 R_CNTHCTL_EVNTIS_MASK;
1766 }
1767 if (cpu_isar_feature(aa64_ecv, cpu)) {
1768 valid_mask |= R_CNTHCTL_ECV_MASK;
1769 }
1770
1771 /* Clear RES0 bits */
1772 value &= valid_mask;
1773
1774 raw_write(env, ri, value);
1775
1776 if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) {
1777 gt_update_irq(cpu, GTIMER_VIRT);
1778 } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) {
1779 gt_update_irq(cpu, GTIMER_PHYS);
1780 }
1781 }
1782
gt_cntvoff_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1783 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1784 uint64_t value)
1785 {
1786 ARMCPU *cpu = env_archcpu(env);
1787
1788 trace_arm_gt_cntvoff_write(value);
1789 raw_write(env, ri, value);
1790 gt_recalc_timer(cpu, GTIMER_VIRT);
1791 }
1792
gt_virt_redir_cval_read(CPUARMState * env,const ARMCPRegInfo * ri)1793 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
1794 const ARMCPRegInfo *ri)
1795 {
1796 int timeridx = gt_virt_redir_timeridx(env);
1797 return env->cp15.c14_timer[timeridx].cval;
1798 }
1799
gt_virt_redir_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1800 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1801 uint64_t value)
1802 {
1803 int timeridx = gt_virt_redir_timeridx(env);
1804 gt_cval_write(env, ri, timeridx, value);
1805 }
1806
gt_virt_redir_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1807 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
1808 const ARMCPRegInfo *ri)
1809 {
1810 int timeridx = gt_virt_redir_timeridx(env);
1811 return gt_tval_read(env, ri, timeridx);
1812 }
1813
gt_virt_redir_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1814 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1815 uint64_t value)
1816 {
1817 int timeridx = gt_virt_redir_timeridx(env);
1818 gt_tval_write(env, ri, timeridx, value);
1819 }
1820
gt_virt_redir_ctl_read(CPUARMState * env,const ARMCPRegInfo * ri)1821 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
1822 const ARMCPRegInfo *ri)
1823 {
1824 int timeridx = gt_virt_redir_timeridx(env);
1825 return env->cp15.c14_timer[timeridx].ctl;
1826 }
1827
gt_virt_redir_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1828 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1829 uint64_t value)
1830 {
1831 int timeridx = gt_virt_redir_timeridx(env);
1832 gt_ctl_write(env, ri, timeridx, value);
1833 }
1834
gt_hyp_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1835 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1836 {
1837 gt_timer_reset(env, ri, GTIMER_HYP);
1838 }
1839
gt_hyp_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1840 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1841 uint64_t value)
1842 {
1843 gt_cval_write(env, ri, GTIMER_HYP, value);
1844 }
1845
gt_hyp_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1846 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1847 {
1848 return gt_tval_read(env, ri, GTIMER_HYP);
1849 }
1850
gt_hyp_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1851 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1852 uint64_t value)
1853 {
1854 gt_tval_write(env, ri, GTIMER_HYP, value);
1855 }
1856
gt_hyp_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1857 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1858 uint64_t value)
1859 {
1860 gt_ctl_write(env, ri, GTIMER_HYP, value);
1861 }
1862
gt_sec_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1863 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1864 {
1865 gt_timer_reset(env, ri, GTIMER_SEC);
1866 }
1867
gt_sec_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1868 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1869 uint64_t value)
1870 {
1871 gt_cval_write(env, ri, GTIMER_SEC, value);
1872 }
1873
gt_sec_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1874 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1875 {
1876 return gt_tval_read(env, ri, GTIMER_SEC);
1877 }
1878
gt_sec_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1879 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1880 uint64_t value)
1881 {
1882 gt_tval_write(env, ri, GTIMER_SEC, value);
1883 }
1884
gt_sec_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1885 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1886 uint64_t value)
1887 {
1888 gt_ctl_write(env, ri, GTIMER_SEC, value);
1889 }
1890
gt_sec_pel2_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1891 static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1892 {
1893 gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS);
1894 }
1895
gt_sec_pel2_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1896 static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1897 uint64_t value)
1898 {
1899 gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1900 }
1901
gt_sec_pel2_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1902 static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1903 {
1904 return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS);
1905 }
1906
gt_sec_pel2_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1907 static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1908 uint64_t value)
1909 {
1910 gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1911 }
1912
gt_sec_pel2_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1913 static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1914 uint64_t value)
1915 {
1916 gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value);
1917 }
1918
gt_sec_vel2_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1919 static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1920 {
1921 gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT);
1922 }
1923
gt_sec_vel2_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1924 static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1925 uint64_t value)
1926 {
1927 gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1928 }
1929
gt_sec_vel2_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1930 static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1931 {
1932 return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT);
1933 }
1934
gt_sec_vel2_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1935 static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1936 uint64_t value)
1937 {
1938 gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1939 }
1940
gt_sec_vel2_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1941 static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1942 uint64_t value)
1943 {
1944 gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value);
1945 }
1946
gt_hv_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1947 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1948 {
1949 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
1950 }
1951
gt_hv_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1952 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1953 uint64_t value)
1954 {
1955 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
1956 }
1957
gt_hv_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1958 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1959 {
1960 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
1961 }
1962
gt_hv_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1963 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1964 uint64_t value)
1965 {
1966 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
1967 }
1968
gt_hv_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1969 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1970 uint64_t value)
1971 {
1972 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
1973 }
1974
arm_gt_ptimer_cb(void * opaque)1975 void arm_gt_ptimer_cb(void *opaque)
1976 {
1977 ARMCPU *cpu = opaque;
1978
1979 gt_recalc_timer(cpu, GTIMER_PHYS);
1980 }
1981
arm_gt_vtimer_cb(void * opaque)1982 void arm_gt_vtimer_cb(void *opaque)
1983 {
1984 ARMCPU *cpu = opaque;
1985
1986 gt_recalc_timer(cpu, GTIMER_VIRT);
1987 }
1988
arm_gt_htimer_cb(void * opaque)1989 void arm_gt_htimer_cb(void *opaque)
1990 {
1991 ARMCPU *cpu = opaque;
1992
1993 gt_recalc_timer(cpu, GTIMER_HYP);
1994 }
1995
arm_gt_stimer_cb(void * opaque)1996 void arm_gt_stimer_cb(void *opaque)
1997 {
1998 ARMCPU *cpu = opaque;
1999
2000 gt_recalc_timer(cpu, GTIMER_SEC);
2001 }
2002
arm_gt_sel2timer_cb(void * opaque)2003 void arm_gt_sel2timer_cb(void *opaque)
2004 {
2005 ARMCPU *cpu = opaque;
2006
2007 gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS);
2008 }
2009
arm_gt_sel2vtimer_cb(void * opaque)2010 void arm_gt_sel2vtimer_cb(void *opaque)
2011 {
2012 ARMCPU *cpu = opaque;
2013
2014 gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT);
2015 }
2016
arm_gt_hvtimer_cb(void * opaque)2017 void arm_gt_hvtimer_cb(void *opaque)
2018 {
2019 ARMCPU *cpu = opaque;
2020
2021 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
2022 }
2023
2024 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2025 /*
2026 * Note that CNTFRQ is purely reads-as-written for the benefit
2027 * of software; writing it doesn't actually change the timer frequency.
2028 * Our reset value matches the fixed frequency we implement the timer at.
2029 */
2030 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2031 .type = ARM_CP_ALIAS,
2032 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2033 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2034 },
2035 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2036 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2037 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2038 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2039 .resetfn = arm_gt_cntfrq_reset,
2040 },
2041 /* overall control: mostly access permissions */
2042 { .name = "CNTKCTL_EL1", .state = ARM_CP_STATE_BOTH,
2043 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2044 .access = PL1_RW,
2045 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 14, 1, 0),
2046 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 14, 1, 0),
2047 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2048 .resetvalue = 0,
2049 },
2050 /* per-timer control */
2051 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2052 .secure = ARM_CP_SECSTATE_NS,
2053 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2054 .accessfn = gt_ptimer_access,
2055 .fieldoffset = offsetoflow32(CPUARMState,
2056 cp15.c14_timer[GTIMER_PHYS].ctl),
2057 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2058 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2059 },
2060 { .name = "CNTP_CTL_S",
2061 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2062 .secure = ARM_CP_SECSTATE_S,
2063 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2064 .accessfn = gt_ptimer_access,
2065 .fieldoffset = offsetoflow32(CPUARMState,
2066 cp15.c14_timer[GTIMER_SEC].ctl),
2067 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2068 },
2069 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2070 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2071 .type = ARM_CP_IO, .access = PL0_RW,
2072 .accessfn = gt_ptimer_access,
2073 .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
2074 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2075 .resetvalue = 0,
2076 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2077 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2078 },
2079 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2080 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2081 .accessfn = gt_vtimer_access,
2082 .fieldoffset = offsetoflow32(CPUARMState,
2083 cp15.c14_timer[GTIMER_VIRT].ctl),
2084 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2085 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2086 },
2087 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2088 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2089 .type = ARM_CP_IO, .access = PL0_RW,
2090 .accessfn = gt_vtimer_access,
2091 .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
2092 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2093 .resetvalue = 0,
2094 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2095 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2096 },
2097 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2098 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2099 .secure = ARM_CP_SECSTATE_NS,
2100 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2101 .accessfn = gt_ptimer_access,
2102 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2103 },
2104 { .name = "CNTP_TVAL_S",
2105 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2106 .secure = ARM_CP_SECSTATE_S,
2107 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2108 .accessfn = gt_ptimer_access,
2109 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2110 },
2111 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2112 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2113 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2114 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2115 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2116 },
2117 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2118 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2119 .accessfn = gt_vtimer_access,
2120 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2121 },
2122 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2123 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2124 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2125 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2126 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2127 },
2128 /* The counter itself */
2129 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2130 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2131 .accessfn = gt_pct_access,
2132 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2133 },
2134 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2135 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2136 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2137 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2138 },
2139 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2140 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2141 .accessfn = gt_vct_access,
2142 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2143 },
2144 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2145 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2146 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2147 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2148 },
2149 /* Comparison value, indicating when the timer goes off */
2150 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2151 .secure = ARM_CP_SECSTATE_NS,
2152 .access = PL0_RW,
2153 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2154 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2155 .accessfn = gt_ptimer_access,
2156 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2157 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2158 },
2159 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2160 .secure = ARM_CP_SECSTATE_S,
2161 .access = PL0_RW,
2162 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2163 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2164 .accessfn = gt_ptimer_access,
2165 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2166 },
2167 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2168 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2169 .access = PL0_RW,
2170 .type = ARM_CP_IO,
2171 .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
2172 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2173 .resetvalue = 0, .accessfn = gt_ptimer_access,
2174 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2175 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2176 },
2177 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2178 .access = PL0_RW,
2179 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2180 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2181 .accessfn = gt_vtimer_access,
2182 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2183 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2184 },
2185 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2186 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2187 .access = PL0_RW,
2188 .type = ARM_CP_IO,
2189 .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
2190 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2191 .resetvalue = 0, .accessfn = gt_vtimer_access,
2192 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2193 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2194 },
2195 /*
2196 * Secure timer -- this is actually restricted to only EL3
2197 * and configurably Secure-EL1 via the accessfn.
2198 */
2199 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2200 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2201 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2202 .accessfn = gt_stimer_access,
2203 .readfn = gt_sec_tval_read,
2204 .writefn = gt_sec_tval_write,
2205 .resetfn = gt_sec_timer_reset,
2206 },
2207 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2208 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2209 .type = ARM_CP_IO, .access = PL1_RW,
2210 .accessfn = gt_stimer_access,
2211 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2212 .resetvalue = 0,
2213 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2214 },
2215 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2216 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2217 .type = ARM_CP_IO, .access = PL1_RW,
2218 .accessfn = gt_stimer_access,
2219 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2220 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2221 },
2222 };
2223
2224 /*
2225 * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
2226 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
2227 * so our implementations here are identical to the normal registers.
2228 */
2229 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2230 { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
2231 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2232 .accessfn = gt_vct_access,
2233 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2234 },
2235 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2236 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2237 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2238 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2239 },
2240 { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
2241 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2242 .accessfn = gt_pct_access,
2243 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2244 },
2245 { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
2246 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5,
2247 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2248 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2249 },
2250 };
2251
gt_cntpoff_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2252 static CPAccessResult gt_cntpoff_access(CPUARMState *env,
2253 const ARMCPRegInfo *ri,
2254 bool isread)
2255 {
2256 if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) &&
2257 !(env->cp15.scr_el3 & SCR_ECVEN)) {
2258 return CP_ACCESS_TRAP_EL3;
2259 }
2260 return CP_ACCESS_OK;
2261 }
2262
gt_cntpoff_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2263 static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2264 uint64_t value)
2265 {
2266 ARMCPU *cpu = env_archcpu(env);
2267
2268 trace_arm_gt_cntpoff_write(value);
2269 raw_write(env, ri, value);
2270 gt_recalc_timer(cpu, GTIMER_PHYS);
2271 }
2272
2273 static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
2274 .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
2275 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
2276 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
2277 .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
2278 .nv2_redirect_offset = 0x1a8,
2279 .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
2280 };
2281 #else
2282
2283 /*
2284 * In user-mode most of the generic timer registers are inaccessible
2285 * however modern kernels (4.12+) allow access to cntvct_el0
2286 */
2287
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)2288 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2289 {
2290 ARMCPU *cpu = env_archcpu(env);
2291
2292 /*
2293 * Currently we have no support for QEMUTimer in linux-user so we
2294 * can't call gt_get_countervalue(env), instead we directly
2295 * call the lower level functions.
2296 */
2297 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
2298 }
2299
2300 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2301 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2302 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2303 .access = PL0_R /* no PL1_RW in linux-user */,
2304 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2305 .resetfn = arm_gt_cntfrq_reset,
2306 },
2307 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2308 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2309 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2310 .readfn = gt_virt_cnt_read,
2311 },
2312 };
2313
2314 /*
2315 * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
2316 * is exposed to userspace by Linux.
2317 */
2318 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2319 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2320 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2321 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2322 .readfn = gt_virt_cnt_read,
2323 },
2324 };
2325
2326 #endif
2327
par_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2328 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2329 {
2330 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2331 raw_write(env, ri, value);
2332 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2333 raw_write(env, ri, value & 0xfffff6ff);
2334 } else {
2335 raw_write(env, ri, value & 0xfffff1ff);
2336 }
2337 }
2338
2339 /* Return basic MPU access permission bits. */
simple_mpu_ap_bits(uint32_t val)2340 static uint32_t simple_mpu_ap_bits(uint32_t val)
2341 {
2342 uint32_t ret;
2343 uint32_t mask;
2344 int i;
2345 ret = 0;
2346 mask = 3;
2347 for (i = 0; i < 16; i += 2) {
2348 ret |= (val >> i) & mask;
2349 mask <<= 2;
2350 }
2351 return ret;
2352 }
2353
2354 /* Pad basic MPU access permission bits to extended format. */
extended_mpu_ap_bits(uint32_t val)2355 static uint32_t extended_mpu_ap_bits(uint32_t val)
2356 {
2357 uint32_t ret;
2358 uint32_t mask;
2359 int i;
2360 ret = 0;
2361 mask = 3;
2362 for (i = 0; i < 16; i += 2) {
2363 ret |= (val & mask) << i;
2364 mask <<= 2;
2365 }
2366 return ret;
2367 }
2368
pmsav5_data_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2369 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2370 uint64_t value)
2371 {
2372 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2373 }
2374
pmsav5_data_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)2375 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2376 {
2377 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2378 }
2379
pmsav5_insn_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2380 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2381 uint64_t value)
2382 {
2383 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2384 }
2385
pmsav5_insn_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)2386 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2387 {
2388 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2389 }
2390
pmsav7_read(CPUARMState * env,const ARMCPRegInfo * ri)2391 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2392 {
2393 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2394
2395 if (!u32p) {
2396 return 0;
2397 }
2398
2399 u32p += env->pmsav7.rnr[M_REG_NS];
2400 return *u32p;
2401 }
2402
pmsav7_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2403 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2404 uint64_t value)
2405 {
2406 ARMCPU *cpu = env_archcpu(env);
2407 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2408
2409 if (!u32p) {
2410 return;
2411 }
2412
2413 u32p += env->pmsav7.rnr[M_REG_NS];
2414 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2415 *u32p = value;
2416 }
2417
pmsav7_rgnr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2418 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2419 uint64_t value)
2420 {
2421 ARMCPU *cpu = env_archcpu(env);
2422 uint32_t nrgs = cpu->pmsav7_dregion;
2423
2424 if (value >= nrgs) {
2425 qemu_log_mask(LOG_GUEST_ERROR,
2426 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2427 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2428 return;
2429 }
2430
2431 raw_write(env, ri, value);
2432 }
2433
prbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2434 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2435 uint64_t value)
2436 {
2437 ARMCPU *cpu = env_archcpu(env);
2438
2439 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2440 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2441 }
2442
prbar_read(CPUARMState * env,const ARMCPRegInfo * ri)2443 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2444 {
2445 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2446 }
2447
prlar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2448 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2449 uint64_t value)
2450 {
2451 ARMCPU *cpu = env_archcpu(env);
2452
2453 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2454 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2455 }
2456
prlar_read(CPUARMState * env,const ARMCPRegInfo * ri)2457 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2458 {
2459 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2460 }
2461
prselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2462 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2463 uint64_t value)
2464 {
2465 ARMCPU *cpu = env_archcpu(env);
2466
2467 /*
2468 * Ignore writes that would select not implemented region.
2469 * This is architecturally UNPREDICTABLE.
2470 */
2471 if (value >= cpu->pmsav7_dregion) {
2472 return;
2473 }
2474
2475 env->pmsav7.rnr[M_REG_NS] = value;
2476 }
2477
hprbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2478 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2479 uint64_t value)
2480 {
2481 ARMCPU *cpu = env_archcpu(env);
2482
2483 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2484 env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
2485 }
2486
hprbar_read(CPUARMState * env,const ARMCPRegInfo * ri)2487 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2488 {
2489 return env->pmsav8.hprbar[env->pmsav8.hprselr];
2490 }
2491
hprlar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2492 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2493 uint64_t value)
2494 {
2495 ARMCPU *cpu = env_archcpu(env);
2496
2497 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2498 env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
2499 }
2500
hprlar_read(CPUARMState * env,const ARMCPRegInfo * ri)2501 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2502 {
2503 return env->pmsav8.hprlar[env->pmsav8.hprselr];
2504 }
2505
hprenr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2506 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2507 uint64_t value)
2508 {
2509 uint32_t n;
2510 uint32_t bit;
2511 ARMCPU *cpu = env_archcpu(env);
2512
2513 /* Ignore writes to unimplemented regions */
2514 int rmax = MIN(cpu->pmsav8r_hdregion, 32);
2515 value &= MAKE_64BIT_MASK(0, rmax);
2516
2517 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2518
2519 /* Register alias is only valid for first 32 indexes */
2520 for (n = 0; n < rmax; ++n) {
2521 bit = extract32(value, n, 1);
2522 env->pmsav8.hprlar[n] = deposit32(
2523 env->pmsav8.hprlar[n], 0, 1, bit);
2524 }
2525 }
2526
hprenr_read(CPUARMState * env,const ARMCPRegInfo * ri)2527 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2528 {
2529 uint32_t n;
2530 uint32_t result = 0x0;
2531 ARMCPU *cpu = env_archcpu(env);
2532
2533 /* Register alias is only valid for first 32 indexes */
2534 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
2535 if (env->pmsav8.hprlar[n] & 0x1) {
2536 result |= (0x1 << n);
2537 }
2538 }
2539 return result;
2540 }
2541
hprselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2542 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2543 uint64_t value)
2544 {
2545 ARMCPU *cpu = env_archcpu(env);
2546
2547 /*
2548 * Ignore writes that would select not implemented region.
2549 * This is architecturally UNPREDICTABLE.
2550 */
2551 if (value >= cpu->pmsav8r_hdregion) {
2552 return;
2553 }
2554
2555 env->pmsav8.hprselr = value;
2556 }
2557
pmsav8r_regn_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2558 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
2559 uint64_t value)
2560 {
2561 ARMCPU *cpu = env_archcpu(env);
2562 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2563 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2564
2565 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2566
2567 if (ri->opc1 & 4) {
2568 if (index >= cpu->pmsav8r_hdregion) {
2569 return;
2570 }
2571 if (ri->opc2 & 0x1) {
2572 env->pmsav8.hprlar[index] = value;
2573 } else {
2574 env->pmsav8.hprbar[index] = value;
2575 }
2576 } else {
2577 if (index >= cpu->pmsav7_dregion) {
2578 return;
2579 }
2580 if (ri->opc2 & 0x1) {
2581 env->pmsav8.rlar[M_REG_NS][index] = value;
2582 } else {
2583 env->pmsav8.rbar[M_REG_NS][index] = value;
2584 }
2585 }
2586 }
2587
pmsav8r_regn_read(CPUARMState * env,const ARMCPRegInfo * ri)2588 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
2589 {
2590 ARMCPU *cpu = env_archcpu(env);
2591 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2592 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2593
2594 if (ri->opc1 & 4) {
2595 if (index >= cpu->pmsav8r_hdregion) {
2596 return 0x0;
2597 }
2598 if (ri->opc2 & 0x1) {
2599 return env->pmsav8.hprlar[index];
2600 } else {
2601 return env->pmsav8.hprbar[index];
2602 }
2603 } else {
2604 if (index >= cpu->pmsav7_dregion) {
2605 return 0x0;
2606 }
2607 if (ri->opc2 & 0x1) {
2608 return env->pmsav8.rlar[M_REG_NS][index];
2609 } else {
2610 return env->pmsav8.rbar[M_REG_NS][index];
2611 }
2612 }
2613 }
2614
2615 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
2616 { .name = "PRBAR",
2617 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
2618 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2619 .accessfn = access_tvm_trvm,
2620 .readfn = prbar_read, .writefn = prbar_write },
2621 { .name = "PRLAR",
2622 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
2623 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2624 .accessfn = access_tvm_trvm,
2625 .readfn = prlar_read, .writefn = prlar_write },
2626 { .name = "PRSELR", .resetvalue = 0,
2627 .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
2628 .access = PL1_RW, .accessfn = access_tvm_trvm,
2629 .writefn = prselr_write,
2630 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
2631 { .name = "HPRBAR", .resetvalue = 0,
2632 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
2633 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2634 .readfn = hprbar_read, .writefn = hprbar_write },
2635 { .name = "HPRLAR",
2636 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
2637 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2638 .readfn = hprlar_read, .writefn = hprlar_write },
2639 { .name = "HPRSELR", .resetvalue = 0,
2640 .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
2641 .access = PL2_RW,
2642 .writefn = hprselr_write,
2643 .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
2644 { .name = "HPRENR",
2645 .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
2646 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2647 .readfn = hprenr_read, .writefn = hprenr_write },
2648 };
2649
2650 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2651 /*
2652 * Reset for all these registers is handled in arm_cpu_reset(),
2653 * because the PMSAv7 is also used by M-profile CPUs, which do
2654 * not register cpregs but still need the state to be reset.
2655 */
2656 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2657 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2658 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2659 .readfn = pmsav7_read, .writefn = pmsav7_write,
2660 .resetfn = arm_cp_reset_ignore },
2661 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2662 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2663 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2664 .readfn = pmsav7_read, .writefn = pmsav7_write,
2665 .resetfn = arm_cp_reset_ignore },
2666 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2667 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2668 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2669 .readfn = pmsav7_read, .writefn = pmsav7_write,
2670 .resetfn = arm_cp_reset_ignore },
2671 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2672 .access = PL1_RW,
2673 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2674 .writefn = pmsav7_rgnr_write,
2675 .resetfn = arm_cp_reset_ignore },
2676 };
2677
2678 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2679 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2680 .access = PL1_RW, .type = ARM_CP_ALIAS,
2681 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2682 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2683 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2684 .access = PL1_RW, .type = ARM_CP_ALIAS,
2685 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2686 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2687 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2688 .access = PL1_RW,
2689 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2690 .resetvalue = 0, },
2691 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2692 .access = PL1_RW,
2693 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2694 .resetvalue = 0, },
2695 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2696 .access = PL1_RW,
2697 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2698 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2699 .access = PL1_RW,
2700 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2701 /* Protection region base and size registers */
2702 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2703 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2704 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2705 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2706 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2707 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2708 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2709 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2710 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2711 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2712 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2713 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2714 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2715 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2716 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2717 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2718 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2719 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2720 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2721 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2722 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2723 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2724 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2725 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2726 };
2727
vmsa_ttbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2728 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2729 uint64_t value)
2730 {
2731 ARMCPU *cpu = env_archcpu(env);
2732
2733 if (!arm_feature(env, ARM_FEATURE_V8)) {
2734 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2735 /*
2736 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2737 * using Long-descriptor translation table format
2738 */
2739 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2740 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2741 /*
2742 * In an implementation that includes the Security Extensions
2743 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2744 * Short-descriptor translation table format.
2745 */
2746 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2747 } else {
2748 value &= TTBCR_N;
2749 }
2750 }
2751
2752 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2753 /*
2754 * With LPAE the TTBCR could result in a change of ASID
2755 * via the TTBCR.A1 bit, so do a TLB flush.
2756 */
2757 tlb_flush(CPU(cpu));
2758 }
2759 raw_write(env, ri, value);
2760 }
2761
vmsa_tcr_el12_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2762 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
2763 uint64_t value)
2764 {
2765 ARMCPU *cpu = env_archcpu(env);
2766
2767 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2768 tlb_flush(CPU(cpu));
2769 raw_write(env, ri, value);
2770 }
2771
vmsa_ttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2772 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2773 uint64_t value)
2774 {
2775 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
2776 if (cpreg_field_type(ri) == MO_64 &&
2777 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2778 ARMCPU *cpu = env_archcpu(env);
2779 tlb_flush(CPU(cpu));
2780 }
2781 raw_write(env, ri, value);
2782 }
2783
vmsa_tcr_ttbr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2784 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2785 uint64_t value)
2786 {
2787 /*
2788 * If we are running with E2&0 regime, then an ASID is active.
2789 * Flush if that might be changing. Note we're not checking
2790 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
2791 * holds the active ASID, only checking the field that might.
2792 */
2793 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
2794 (arm_hcr_el2_eff(env) & HCR_E2H)) {
2795 uint16_t mask = ARMMMUIdxBit_E20_2 |
2796 ARMMMUIdxBit_E20_2_PAN |
2797 ARMMMUIdxBit_E20_2_GCS |
2798 ARMMMUIdxBit_E20_0 |
2799 ARMMMUIdxBit_E20_0_GCS;
2800 tlb_flush_by_mmuidx(env_cpu(env), mask);
2801 }
2802 raw_write(env, ri, value);
2803 }
2804
vttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2805 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2806 uint64_t value)
2807 {
2808 ARMCPU *cpu = env_archcpu(env);
2809 CPUState *cs = CPU(cpu);
2810
2811 /*
2812 * A change in VMID to the stage2 page table (Stage2) invalidates
2813 * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
2814 */
2815 if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2816 tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
2817 }
2818 raw_write(env, ri, value);
2819 }
2820
2821 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2822 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2823 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
2824 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2825 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2826 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2827 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2828 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2829 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2830 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2831 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2832 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2833 offsetof(CPUARMState, cp15.dfar_ns) } },
2834 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2835 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2836 .access = PL1_RW, .accessfn = access_tvm_trvm,
2837 .fgt = FGT_FAR_EL1,
2838 .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
2839 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 6, 0, 0),
2840 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 6, 0, 0),
2841 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2842 .resetvalue = 0, },
2843 };
2844
2845 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2846 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2847 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2848 .access = PL1_RW, .accessfn = access_tvm_trvm,
2849 .fgt = FGT_ESR_EL1,
2850 .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
2851 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 2, 0),
2852 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 2, 0),
2853 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2854 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2855 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2856 .access = PL1_RW, .accessfn = access_tvm_trvm,
2857 .fgt = FGT_TTBR0_EL1,
2858 .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
2859 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 0),
2860 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 0),
2861 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2862 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2863 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2864 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2865 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2866 .access = PL1_RW, .accessfn = access_tvm_trvm,
2867 .fgt = FGT_TTBR1_EL1,
2868 .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
2869 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 1),
2870 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 1),
2871 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2872 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2873 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2874 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2875 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2876 .access = PL1_RW, .accessfn = access_tvm_trvm,
2877 .fgt = FGT_TCR_EL1,
2878 .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
2879 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 2),
2880 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 2),
2881 .writefn = vmsa_tcr_el12_write,
2882 .raw_writefn = raw_write,
2883 .resetvalue = 0,
2884 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2885 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2886 .access = PL1_RW, .accessfn = access_tvm_trvm,
2887 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2888 .raw_writefn = raw_write,
2889 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2890 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2891 };
2892
2893 /*
2894 * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
2895 * qemu tlbs nor adjusting cached masks.
2896 */
2897 static const ARMCPRegInfo ttbcr2_reginfo = {
2898 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
2899 .access = PL1_RW, .accessfn = access_tvm_trvm,
2900 .type = ARM_CP_ALIAS,
2901 .bank_fieldoffsets = {
2902 offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
2903 offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
2904 },
2905 };
2906
omap_ticonfig_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2907 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2908 uint64_t value)
2909 {
2910 env->cp15.c15_ticonfig = value & 0xe7;
2911 /* The OS_TYPE bit in this register changes the reported CPUID! */
2912 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2913 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2914 }
2915
omap_threadid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2916 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2917 uint64_t value)
2918 {
2919 env->cp15.c15_threadid = value & 0xffff;
2920 }
2921
omap_wfi_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2922 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2923 uint64_t value)
2924 {
2925 #ifdef CONFIG_USER_ONLY
2926 g_assert_not_reached();
2927 #else
2928 /* Wait-for-interrupt (deprecated) */
2929 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
2930 #endif
2931 }
2932
omap_cachemaint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2933 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2934 uint64_t value)
2935 {
2936 /*
2937 * On OMAP there are registers indicating the max/min index of dcache lines
2938 * containing a dirty line; cache flush operations have to reset these.
2939 */
2940 env->cp15.c15_i_max = 0x000;
2941 env->cp15.c15_i_min = 0xff0;
2942 }
2943
2944 static const ARMCPRegInfo omap_cp_reginfo[] = {
2945 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2946 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2947 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2948 .resetvalue = 0, },
2949 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2950 .access = PL1_RW, .type = ARM_CP_NOP },
2951 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2952 .access = PL1_RW,
2953 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2954 .writefn = omap_ticonfig_write },
2955 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2956 .access = PL1_RW,
2957 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2958 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2959 .access = PL1_RW, .resetvalue = 0xff0,
2960 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2961 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2962 .access = PL1_RW,
2963 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2964 .writefn = omap_threadid_write },
2965 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2966 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2967 .type = ARM_CP_NO_RAW,
2968 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2969 /*
2970 * TODO: Peripheral port remap register:
2971 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2972 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2973 * when MMU is off.
2974 */
2975 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2976 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2977 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2978 .writefn = omap_cachemaint_write },
2979 { .name = "C9", .cp = 15, .crn = 9,
2980 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2981 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2982 };
2983
2984 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2985 /*
2986 * RAZ/WI the whole crn=15 space, when we don't have a more specific
2987 * implementation of this implementation-defined space.
2988 * Ideally this should eventually disappear in favour of actually
2989 * implementing the correct behaviour for all cores.
2990 */
2991 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2992 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2993 .access = PL1_RW,
2994 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2995 .resetvalue = 0 },
2996 };
2997
2998 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2999 /* Cache status: RAZ because we have no cache so it's always clean */
3000 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3001 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3002 .resetvalue = 0 },
3003 };
3004
3005 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3006 /* We never have a block transfer operation in progress */
3007 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3008 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3009 .resetvalue = 0 },
3010 /* The cache ops themselves: these all NOP for QEMU */
3011 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3012 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3013 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3014 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3015 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3016 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3017 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3018 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3019 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3020 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3021 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3022 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3023 };
3024
3025 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3026 /*
3027 * The cache test-and-clean instructions always return (1 << 30)
3028 * to indicate that there are no dirty cache lines.
3029 */
3030 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3031 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3032 .resetvalue = (1 << 30) },
3033 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3034 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3035 .resetvalue = (1 << 30) },
3036 };
3037
3038 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3039 /* Ignore ReadBuffer accesses */
3040 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3041 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3042 .access = PL1_RW, .resetvalue = 0,
3043 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3044 };
3045
midr_read(CPUARMState * env,const ARMCPRegInfo * ri)3046 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3047 {
3048 unsigned int cur_el = arm_current_el(env);
3049
3050 if (arm_is_el2_enabled(env) && cur_el == 1) {
3051 return env->cp15.vpidr_el2;
3052 }
3053 return raw_read(env, ri);
3054 }
3055
mpidr_read_val(CPUARMState * env)3056 static uint64_t mpidr_read_val(CPUARMState *env)
3057 {
3058 ARMCPU *cpu = env_archcpu(env);
3059 uint64_t mpidr = cpu->mp_affinity;
3060
3061 if (arm_feature(env, ARM_FEATURE_V7MP)) {
3062 mpidr |= (1U << 31);
3063 /*
3064 * Cores which are uniprocessor (non-coherent)
3065 * but still implement the MP extensions set
3066 * bit 30. (For instance, Cortex-R5).
3067 */
3068 if (cpu->mp_is_up) {
3069 mpidr |= (1u << 30);
3070 }
3071 }
3072 return mpidr;
3073 }
3074
mpidr_read(CPUARMState * env,const ARMCPRegInfo * ri)3075 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3076 {
3077 unsigned int cur_el = arm_current_el(env);
3078
3079 if (arm_is_el2_enabled(env) && cur_el == 1) {
3080 return env->cp15.vmpidr_el2;
3081 }
3082 return mpidr_read_val(env);
3083 }
3084
3085 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3086 /* AMAIR0 is mapped to AMAIR_EL1[31:0] */
3087 { .name = "AMAIR_EL1", .state = ARM_CP_STATE_BOTH,
3088 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3089 .access = PL1_RW, .accessfn = access_tvm_trvm,
3090 .fgt = FGT_AMAIR_EL1,
3091 .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
3092 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 3, 0),
3093 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 3, 0),
3094 .type = ARM_CP_CONST, .resetvalue = 0 },
3095 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3096 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3097 .access = PL1_RW, .accessfn = access_tvm_trvm,
3098 .type = ARM_CP_CONST, .resetvalue = 0 },
3099 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3100 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3101 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3102 offsetof(CPUARMState, cp15.par_ns)} },
3103 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3104 .access = PL1_RW, .accessfn = access_tvm_trvm,
3105 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3106 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3107 offsetof(CPUARMState, cp15.ttbr0_ns) },
3108 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3109 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3110 .access = PL1_RW, .accessfn = access_tvm_trvm,
3111 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3112 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3113 offsetof(CPUARMState, cp15.ttbr1_ns) },
3114 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3115 };
3116
aa64_fpcr_read(CPUARMState * env,const ARMCPRegInfo * ri)3117 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3118 {
3119 return vfp_get_fpcr(env);
3120 }
3121
aa64_fpcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3122 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3123 uint64_t value)
3124 {
3125 vfp_set_fpcr(env, value);
3126 }
3127
aa64_fpsr_read(CPUARMState * env,const ARMCPRegInfo * ri)3128 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3129 {
3130 return vfp_get_fpsr(env);
3131 }
3132
aa64_fpsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3133 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3134 uint64_t value)
3135 {
3136 vfp_set_fpsr(env, value);
3137 }
3138
aa64_daif_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3139 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3140 bool isread)
3141 {
3142 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
3143 return CP_ACCESS_TRAP_EL1;
3144 }
3145 return CP_ACCESS_OK;
3146 }
3147
aa64_daif_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3148 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3149 uint64_t value)
3150 {
3151 env->daif = value & PSTATE_DAIF;
3152 }
3153
aa64_pan_read(CPUARMState * env,const ARMCPRegInfo * ri)3154 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
3155 {
3156 return env->pstate & PSTATE_PAN;
3157 }
3158
aa64_pan_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3159 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
3160 uint64_t value)
3161 {
3162 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
3163 }
3164
3165 static const ARMCPRegInfo pan_reginfo = {
3166 .name = "PAN", .state = ARM_CP_STATE_AA64,
3167 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
3168 .type = ARM_CP_NO_RAW, .access = PL1_RW,
3169 .readfn = aa64_pan_read, .writefn = aa64_pan_write
3170 };
3171
aa64_uao_read(CPUARMState * env,const ARMCPRegInfo * ri)3172 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
3173 {
3174 return env->pstate & PSTATE_UAO;
3175 }
3176
aa64_uao_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3177 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
3178 uint64_t value)
3179 {
3180 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
3181 }
3182
3183 static const ARMCPRegInfo uao_reginfo = {
3184 .name = "UAO", .state = ARM_CP_STATE_AA64,
3185 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
3186 .type = ARM_CP_NO_RAW, .access = PL1_RW,
3187 .readfn = aa64_uao_read, .writefn = aa64_uao_write
3188 };
3189
aa64_dit_read(CPUARMState * env,const ARMCPRegInfo * ri)3190 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
3191 {
3192 return env->pstate & PSTATE_DIT;
3193 }
3194
aa64_dit_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3195 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
3196 uint64_t value)
3197 {
3198 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
3199 }
3200
3201 static const ARMCPRegInfo dit_reginfo = {
3202 .name = "DIT", .state = ARM_CP_STATE_AA64,
3203 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
3204 .type = ARM_CP_NO_RAW, .access = PL0_RW,
3205 .readfn = aa64_dit_read, .writefn = aa64_dit_write
3206 };
3207
aa64_ssbs_read(CPUARMState * env,const ARMCPRegInfo * ri)3208 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
3209 {
3210 return env->pstate & PSTATE_SSBS;
3211 }
3212
aa64_ssbs_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3213 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
3214 uint64_t value)
3215 {
3216 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
3217 }
3218
3219 static const ARMCPRegInfo ssbs_reginfo = {
3220 .name = "SSBS", .state = ARM_CP_STATE_AA64,
3221 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
3222 .type = ARM_CP_NO_RAW, .access = PL0_RW,
3223 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
3224 };
3225
aa64_cacheop_poc_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3226 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
3227 const ARMCPRegInfo *ri,
3228 bool isread)
3229 {
3230 /* Cache invalidate/clean to Point of Coherency or Persistence... */
3231 switch (arm_current_el(env)) {
3232 case 0:
3233 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
3234 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3235 return CP_ACCESS_TRAP_EL1;
3236 }
3237 /* fall through */
3238 case 1:
3239 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
3240 if (arm_hcr_el2_eff(env) & HCR_TPCP) {
3241 return CP_ACCESS_TRAP_EL2;
3242 }
3243 break;
3244 }
3245 return CP_ACCESS_OK;
3246 }
3247
do_cacheop_pou_access(CPUARMState * env,uint64_t hcrflags)3248 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
3249 {
3250 /* Cache invalidate/clean to Point of Unification... */
3251 switch (arm_current_el(env)) {
3252 case 0:
3253 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
3254 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3255 return CP_ACCESS_TRAP_EL1;
3256 }
3257 /* fall through */
3258 case 1:
3259 /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */
3260 if (arm_hcr_el2_eff(env) & hcrflags) {
3261 return CP_ACCESS_TRAP_EL2;
3262 }
3263 break;
3264 }
3265 return CP_ACCESS_OK;
3266 }
3267
access_ticab(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3268 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
3269 bool isread)
3270 {
3271 return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
3272 }
3273
access_tocu(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3274 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
3275 bool isread)
3276 {
3277 return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
3278 }
3279
aa64_zva_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3280 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3281 bool isread)
3282 {
3283 int cur_el = arm_current_el(env);
3284
3285 if (cur_el < 2) {
3286 uint64_t hcr = arm_hcr_el2_eff(env);
3287
3288 if (cur_el == 0) {
3289 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
3290 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
3291 return CP_ACCESS_TRAP_EL2;
3292 }
3293 } else {
3294 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3295 return CP_ACCESS_TRAP_EL1;
3296 }
3297 if (hcr & HCR_TDZ) {
3298 return CP_ACCESS_TRAP_EL2;
3299 }
3300 }
3301 } else if (hcr & HCR_TDZ) {
3302 return CP_ACCESS_TRAP_EL2;
3303 }
3304 }
3305 return CP_ACCESS_OK;
3306 }
3307
aa64_dczid_read(CPUARMState * env,const ARMCPRegInfo * ri)3308 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3309 {
3310 ARMCPU *cpu = env_archcpu(env);
3311 int dzp_bit = 1 << 4;
3312
3313 /* DZP indicates whether DC ZVA access is allowed */
3314 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3315 dzp_bit = 0;
3316 }
3317 return cpu->dcz_blocksize | dzp_bit;
3318 }
3319
sp_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3320 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3321 bool isread)
3322 {
3323 if (!(env->pstate & PSTATE_SP)) {
3324 /*
3325 * Access to SP_EL0 is undefined if it's being used as
3326 * the stack pointer.
3327 */
3328 return CP_ACCESS_UNDEFINED;
3329 }
3330 return CP_ACCESS_OK;
3331 }
3332
spsel_read(CPUARMState * env,const ARMCPRegInfo * ri)3333 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3334 {
3335 return env->pstate & PSTATE_SP;
3336 }
3337
spsel_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)3338 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3339 {
3340 update_spsel(env, val);
3341 }
3342
sctlr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3343 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3344 uint64_t value)
3345 {
3346 ARMCPU *cpu = env_archcpu(env);
3347
3348 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3349 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3350 value &= ~SCTLR_M;
3351 }
3352
3353 /* ??? Lots of these bits are not implemented. */
3354
3355 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
3356 if (ri->opc1 == 6) { /* SCTLR_EL3 */
3357 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
3358 } else {
3359 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
3360 SCTLR_ATA0 | SCTLR_ATA);
3361 }
3362 }
3363
3364 if (raw_read(env, ri) == value) {
3365 /*
3366 * Skip the TLB flush if nothing actually changed; Linux likes
3367 * to do a lot of pointless SCTLR writes.
3368 */
3369 return;
3370 }
3371
3372 raw_write(env, ri, value);
3373
3374 /* This may enable/disable the MMU, so do a TLB flush. */
3375 tlb_flush(CPU(cpu));
3376 }
3377
mdcr_el3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3378 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3379 uint64_t value)
3380 {
3381 /*
3382 * Some MDCR_EL3 bits affect whether PMU counters are running:
3383 * if we are trying to change any of those then we must
3384 * bracket this update with PMU start/finish calls.
3385 */
3386 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
3387
3388 if (pmu_op) {
3389 pmu_op_start(env);
3390 }
3391 env->cp15.mdcr_el3 = value;
3392 if (pmu_op) {
3393 pmu_op_finish(env);
3394 }
3395 }
3396
sdcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3397 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3398 uint64_t value)
3399 {
3400 /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
3401 mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
3402 }
3403
mdcr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3404 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3405 uint64_t value)
3406 {
3407 /*
3408 * Some MDCR_EL2 bits affect whether PMU counters are running:
3409 * if we are trying to change any of those then we must
3410 * bracket this update with PMU start/finish calls.
3411 */
3412 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
3413
3414 if (pmu_op) {
3415 pmu_op_start(env);
3416 }
3417 env->cp15.mdcr_el2 = value;
3418 if (pmu_op) {
3419 pmu_op_finish(env);
3420 }
3421 }
3422
access_nv1_with_nvx(uint64_t hcr_nv)3423 static CPAccessResult access_nv1_with_nvx(uint64_t hcr_nv)
3424 {
3425 return hcr_nv == (HCR_NV | HCR_NV1) ? CP_ACCESS_TRAP_EL2 : CP_ACCESS_OK;
3426 }
3427
access_nv1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3428 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
3429 bool isread)
3430 {
3431 if (arm_current_el(env) == 1) {
3432 return access_nv1_with_nvx(arm_hcr_el2_nvx_eff(env));
3433 }
3434 return CP_ACCESS_OK;
3435 }
3436
access_nv1_or_exlock_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3437 static CPAccessResult access_nv1_or_exlock_el1(CPUARMState *env,
3438 const ARMCPRegInfo *ri,
3439 bool isread)
3440 {
3441 if (arm_current_el(env) == 1) {
3442 uint64_t nvx = arm_hcr_el2_nvx_eff(env);
3443
3444 if (!isread &&
3445 (env->pstate & PSTATE_EXLOCK) &&
3446 (env->cp15.gcscr_el[1] & GCSCR_EXLOCKEN) &&
3447 !(nvx & HCR_NV1)) {
3448 return CP_ACCESS_EXLOCK;
3449 }
3450 return access_nv1_with_nvx(nvx);
3451 }
3452
3453 /*
3454 * At EL2, since VHE redirection is done at translation time,
3455 * el_is_in_host is always false here, so EXLOCK does not apply.
3456 */
3457 return CP_ACCESS_OK;
3458 }
3459
access_exlock_el2(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3460 static CPAccessResult access_exlock_el2(CPUARMState *env,
3461 const ARMCPRegInfo *ri, bool isread)
3462 {
3463 int el = arm_current_el(env);
3464
3465 if (el == 3) {
3466 return CP_ACCESS_OK;
3467 }
3468
3469 /*
3470 * Access to the EL2 register from EL1 means NV is set, and
3471 * EXLOCK has priority over an NV1 trap to EL2.
3472 */
3473 if (!isread &&
3474 (env->pstate & PSTATE_EXLOCK) &&
3475 (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN)) {
3476 return CP_ACCESS_EXLOCK;
3477 }
3478 return CP_ACCESS_OK;
3479 }
3480
access_exlock_el3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3481 static CPAccessResult access_exlock_el3(CPUARMState *env,
3482 const ARMCPRegInfo *ri, bool isread)
3483 {
3484 if (!isread &&
3485 (env->pstate & PSTATE_EXLOCK) &&
3486 (env->cp15.gcscr_el[3] & GCSCR_EXLOCKEN)) {
3487 return CP_ACCESS_EXLOCK;
3488 }
3489 return CP_ACCESS_OK;
3490 }
3491
3492 #ifdef CONFIG_USER_ONLY
3493 /*
3494 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
3495 * code to get around W^X restrictions, where one region is writable and the
3496 * other is executable.
3497 *
3498 * Since the executable region is never written to we cannot detect code
3499 * changes when running in user mode, and rely on the emulated JIT telling us
3500 * that the code has changed by executing this instruction.
3501 */
ic_ivau_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3502 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
3503 uint64_t value)
3504 {
3505 uint64_t icache_line_mask, start_address, end_address;
3506 const ARMCPU *cpu;
3507
3508 cpu = env_archcpu(env);
3509
3510 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
3511 start_address = value & ~icache_line_mask;
3512 end_address = value | icache_line_mask;
3513
3514 mmap_lock();
3515
3516 tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
3517
3518 mmap_unlock();
3519 }
3520 #endif
3521
3522 static const ARMCPRegInfo v8_cp_reginfo[] = {
3523 /*
3524 * Minimal set of EL0-visible registers. This will need to be expanded
3525 * significantly for system emulation of AArch64 CPUs.
3526 */
3527 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3528 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3529 .access = PL0_RW, .type = ARM_CP_NZCV },
3530 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3531 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3532 .type = ARM_CP_NO_RAW,
3533 .access = PL0_RW, .accessfn = aa64_daif_access,
3534 .fieldoffset = offsetof(CPUARMState, daif),
3535 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3536 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3537 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3538 .access = PL0_RW, .type = ARM_CP_FPU,
3539 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3540 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3541 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3542 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3543 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3544 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3545 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3546 .access = PL0_R, .type = ARM_CP_NO_RAW,
3547 .fgt = FGT_DCZID_EL0,
3548 .readfn = aa64_dczid_read },
3549 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3550 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3551 .access = PL0_W, .type = ARM_CP_DC_ZVA,
3552 #ifndef CONFIG_USER_ONLY
3553 /* Avoid overhead of an access check that always passes in user-mode */
3554 .accessfn = aa64_zva_access,
3555 .fgt = FGT_DCZVA,
3556 #endif
3557 },
3558 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3559 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3560 .access = PL1_R, .type = ARM_CP_CURRENTEL },
3561 /*
3562 * Instruction cache ops. All of these except `IC IVAU` NOP because we
3563 * don't emulate caches.
3564 */
3565 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3566 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3567 .access = PL1_W, .type = ARM_CP_NOP,
3568 .fgt = FGT_ICIALLUIS,
3569 .accessfn = access_ticab },
3570 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3571 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3572 .access = PL1_W, .type = ARM_CP_NOP,
3573 .fgt = FGT_ICIALLU,
3574 .accessfn = access_tocu },
3575 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3576 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3577 .access = PL0_W,
3578 .fgt = FGT_ICIVAU,
3579 .accessfn = access_tocu,
3580 #ifdef CONFIG_USER_ONLY
3581 .type = ARM_CP_NO_RAW,
3582 .writefn = ic_ivau_write
3583 #else
3584 .type = ARM_CP_NOP
3585 #endif
3586 },
3587 /* Cache ops: all NOPs since we don't emulate caches */
3588 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3589 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3590 .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
3591 .fgt = FGT_DCIVAC,
3592 .type = ARM_CP_NOP },
3593 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3594 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3595 .fgt = FGT_DCISW,
3596 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3597 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3598 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3599 .access = PL0_W, .type = ARM_CP_NOP,
3600 .fgt = FGT_DCCVAC,
3601 .accessfn = aa64_cacheop_poc_access },
3602 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3603 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3604 .fgt = FGT_DCCSW,
3605 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3606 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3607 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3608 .access = PL0_W, .type = ARM_CP_NOP,
3609 .fgt = FGT_DCCVAU,
3610 .accessfn = access_tocu },
3611 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3612 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3613 .access = PL0_W, .type = ARM_CP_NOP,
3614 .fgt = FGT_DCCIVAC,
3615 .accessfn = aa64_cacheop_poc_access },
3616 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3617 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3618 .fgt = FGT_DCCISW,
3619 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3620 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3621 .type = ARM_CP_ALIAS,
3622 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3623 .access = PL1_RW, .resetvalue = 0,
3624 .fgt = FGT_PAR_EL1,
3625 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3626 .writefn = par_write },
3627 /* 32 bit cache operations */
3628 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3629 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
3630 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3631 .type = ARM_CP_NOP, .access = PL1_W },
3632 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3633 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3634 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3635 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3636 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3637 .type = ARM_CP_NOP, .access = PL1_W },
3638 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3639 .type = ARM_CP_NOP, .access = PL1_W },
3640 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3641 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3642 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3643 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3644 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3645 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3646 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3647 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3648 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3649 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3650 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3651 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3652 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3653 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3654 /* MMU Domain access control / MPU write buffer control */
3655 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3656 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3657 .writefn = dacr_write, .raw_writefn = raw_write,
3658 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3659 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3660 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3661 .type = ARM_CP_ALIAS,
3662 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3663 .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
3664 .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
3665 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 1),
3666 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 1),
3667 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3668 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3669 .type = ARM_CP_ALIAS,
3670 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3671 .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
3672 .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
3673 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 0),
3674 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 0),
3675 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3676 /*
3677 * We rely on the access checks not allowing the guest to write to the
3678 * state field when SPSel indicates that it's being used as the stack
3679 * pointer.
3680 */
3681 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3682 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3683 .access = PL1_RW, .accessfn = sp_el0_access,
3684 .type = ARM_CP_ALIAS,
3685 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3686 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3687 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3688 .nv2_redirect_offset = 0x240,
3689 .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
3690 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3691 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3692 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3693 .type = ARM_CP_NO_RAW,
3694 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3695 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3696 .type = ARM_CP_ALIAS,
3697 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3698 .access = PL2_RW,
3699 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3700 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3701 .type = ARM_CP_ALIAS,
3702 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3703 .access = PL2_RW,
3704 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3705 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3706 .type = ARM_CP_ALIAS,
3707 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3708 .access = PL2_RW,
3709 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3710 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3711 .type = ARM_CP_ALIAS,
3712 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3713 .access = PL2_RW,
3714 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3715 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3716 .type = ARM_CP_IO,
3717 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3718 .resetvalue = 0,
3719 .access = PL3_RW,
3720 .writefn = mdcr_el3_write,
3721 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3722 { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
3723 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3724 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3725 .writefn = sdcr_write,
3726 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3727 };
3728
3729 /* These are present only when EL1 supports AArch32 */
3730 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
3731 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3732 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3733 .access = PL2_RW,
3734 .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
3735 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
3736 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3737 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3738 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3739 .writefn = dacr_write, .raw_writefn = raw_write,
3740 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3741 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3742 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3743 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3744 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3745 };
3746
do_hcr_write(CPUARMState * env,uint64_t value,uint64_t valid_mask)3747 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
3748 {
3749 ARMCPU *cpu = env_archcpu(env);
3750
3751 if (arm_feature(env, ARM_FEATURE_V8)) {
3752 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
3753 } else {
3754 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
3755 }
3756
3757 if (arm_feature(env, ARM_FEATURE_EL3)) {
3758 valid_mask &= ~HCR_HCD;
3759 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3760 /*
3761 * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3762 * However, if we're using the SMC PSCI conduit then QEMU is
3763 * effectively acting like EL3 firmware and so the guest at
3764 * EL2 should retain the ability to prevent EL1 from being
3765 * able to make SMC calls into the ersatz firmware, so in
3766 * that case HCR.TSC should be read/write.
3767 */
3768 valid_mask &= ~HCR_TSC;
3769 }
3770
3771 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
3772 if (cpu_isar_feature(aa64_vh, cpu)) {
3773 valid_mask |= HCR_E2H;
3774 }
3775 if (cpu_isar_feature(aa64_ras, cpu)) {
3776 valid_mask |= HCR_TERR | HCR_TEA;
3777 }
3778 if (cpu_isar_feature(aa64_lor, cpu)) {
3779 valid_mask |= HCR_TLOR;
3780 }
3781 if (cpu_isar_feature(aa64_pauth, cpu)) {
3782 valid_mask |= HCR_API | HCR_APK;
3783 }
3784 if (cpu_isar_feature(aa64_mte, cpu)) {
3785 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
3786 }
3787 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
3788 valid_mask |= HCR_ENSCXT;
3789 }
3790 if (cpu_isar_feature(aa64_fwb, cpu)) {
3791 valid_mask |= HCR_FWB;
3792 }
3793 if (cpu_isar_feature(aa64_rme, cpu)) {
3794 valid_mask |= HCR_GPF;
3795 }
3796 if (cpu_isar_feature(aa64_nv, cpu)) {
3797 valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
3798 }
3799 if (cpu_isar_feature(aa64_nv2, cpu)) {
3800 valid_mask |= HCR_NV2;
3801 }
3802 }
3803
3804 if (cpu_isar_feature(any_evt, cpu)) {
3805 valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
3806 } else if (cpu_isar_feature(any_half_evt, cpu)) {
3807 valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
3808 }
3809
3810 /* Clear RES0 bits. */
3811 value &= valid_mask;
3812
3813 /* RW is RAO/WI if EL1 is AArch64 only */
3814 if (arm_feature(env, ARM_FEATURE_AARCH64) &&
3815 !cpu_isar_feature(aa64_aa32_el1, cpu)) {
3816 value |= HCR_RW;
3817 }
3818
3819 /*
3820 * These bits change the MMU setup:
3821 * HCR_VM enables stage 2 translation
3822 * HCR_PTW forbids certain page-table setups
3823 * HCR_DC disables stage1 and enables stage2 translation
3824 * HCR_DCT enables tagging on (disabled) stage1 translation
3825 * HCR_FWB changes the interpretation of stage2 descriptor bits
3826 * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
3827 */
3828 if ((env->cp15.hcr_el2 ^ value) &
3829 (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
3830 tlb_flush(CPU(cpu));
3831 }
3832 env->cp15.hcr_el2 = value;
3833
3834 /*
3835 * Updates to VI and VF require us to update the status of
3836 * virtual interrupts, which are the logical OR of these bits
3837 * and the state of the input lines from the GIC. (This requires
3838 * that we have the BQL, which is done by marking the
3839 * reginfo structs as ARM_CP_IO.)
3840 * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
3841 * VFNMI, it is never possible for it to be taken immediately
3842 * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
3843 * at EL0 or EL1, and HCR can only be written at EL2.
3844 */
3845 g_assert(bql_locked());
3846 arm_cpu_update_virq(cpu);
3847 arm_cpu_update_vfiq(cpu);
3848 arm_cpu_update_vserr(cpu);
3849 if (cpu_isar_feature(aa64_nmi, cpu)) {
3850 arm_cpu_update_vinmi(cpu);
3851 arm_cpu_update_vfnmi(cpu);
3852 }
3853 }
3854
hcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3855 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3856 {
3857 do_hcr_write(env, value, 0);
3858 }
3859
hcr_writehigh(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3860 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
3861 uint64_t value)
3862 {
3863 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
3864 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
3865 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
3866 }
3867
hcr_writelow(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3868 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
3869 uint64_t value)
3870 {
3871 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
3872 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
3873 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
3874 }
3875
hcr_reset(CPUARMState * env,const ARMCPRegInfo * ri)3876 static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3877 {
3878 /* hcr_write will set the RES1 bits on an AArch64-only CPU */
3879 hcr_write(env, ri, 0);
3880 }
3881
3882 /*
3883 * Return the effective value of HCR_EL2, at the given security state.
3884 * Bits that are not included here:
3885 * RW (read from SCR_EL3.RW as needed)
3886 */
arm_hcr_el2_eff_secstate(CPUARMState * env,ARMSecuritySpace space)3887 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
3888 {
3889 uint64_t ret = env->cp15.hcr_el2;
3890
3891 assert(space != ARMSS_Root);
3892
3893 if (!arm_is_el2_enabled_secstate(env, space)) {
3894 /*
3895 * "This register has no effect if EL2 is not enabled in the
3896 * current Security state". This is ARMv8.4-SecEL2 speak for
3897 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
3898 *
3899 * Prior to that, the language was "In an implementation that
3900 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
3901 * as if this field is 0 for all purposes other than a direct
3902 * read or write access of HCR_EL2". With lots of enumeration
3903 * on a per-field basis. In current QEMU, this is condition
3904 * is arm_is_secure_below_el3.
3905 *
3906 * Since the v8.4 language applies to the entire register, and
3907 * appears to be backward compatible, use that.
3908 */
3909 return 0;
3910 }
3911
3912 /*
3913 * For a cpu that supports both aarch64 and aarch32, we can set bits
3914 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
3915 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
3916 */
3917 if (!arm_el_is_aa64(env, 2)) {
3918 uint64_t aa32_valid;
3919
3920 /*
3921 * These bits are up-to-date as of ARMv8.6.
3922 * For HCR, it's easiest to list just the 2 bits that are invalid.
3923 * For HCR2, list those that are valid.
3924 */
3925 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
3926 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
3927 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
3928 ret &= aa32_valid;
3929 }
3930
3931 if (ret & HCR_TGE) {
3932 /* These bits are up-to-date as of ARMv8.6. */
3933 if (ret & HCR_E2H) {
3934 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
3935 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
3936 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
3937 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
3938 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
3939 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
3940 } else {
3941 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
3942 }
3943 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
3944 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
3945 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
3946 HCR_TLOR);
3947 }
3948
3949 return ret;
3950 }
3951
arm_hcr_el2_eff(CPUARMState * env)3952 uint64_t arm_hcr_el2_eff(CPUARMState *env)
3953 {
3954 if (arm_feature(env, ARM_FEATURE_M)) {
3955 return 0;
3956 }
3957 return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
3958 }
3959
arm_hcr_el2_nvx_eff(CPUARMState * env)3960 uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env)
3961 {
3962 uint64_t hcr = arm_hcr_el2_eff(env);
3963
3964 if (!(hcr & HCR_NV)) {
3965 return 0; /* CONSTRAINED UNPREDICTABLE wrt NV1 */
3966 }
3967 return hcr & (HCR_NV2 | HCR_NV1 | HCR_NV);
3968 }
3969
3970 /*
3971 * Corresponds to ARM pseudocode function ELIsInHost().
3972 */
el_is_in_host(CPUARMState * env,int el)3973 bool el_is_in_host(CPUARMState *env, int el)
3974 {
3975 uint64_t mask;
3976
3977 /*
3978 * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
3979 * Perform the simplest bit tests first, and validate EL2 afterward.
3980 */
3981 if (el & 1) {
3982 return false; /* EL1 or EL3 */
3983 }
3984
3985 /*
3986 * Note that hcr_write() checks isar_feature_aa64_vh(),
3987 * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
3988 */
3989 mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
3990 if ((env->cp15.hcr_el2 & mask) != mask) {
3991 return false;
3992 }
3993
3994 /* TGE and/or E2H set: double check those bits are currently legal. */
3995 return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
3996 }
3997
hcrx_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3998 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
3999 uint64_t value)
4000 {
4001 ARMCPU *cpu = env_archcpu(env);
4002 uint64_t valid_mask = 0;
4003
4004 if (cpu_isar_feature(aa64_mops, cpu)) {
4005 valid_mask |= HCRX_MSCEN | HCRX_MCE2;
4006 }
4007 if (cpu_isar_feature(aa64_nmi, cpu)) {
4008 valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
4009 }
4010 if (cpu_isar_feature(aa64_cmow, cpu)) {
4011 valid_mask |= HCRX_CMOW;
4012 }
4013 if (cpu_isar_feature(aa64_xs, cpu)) {
4014 valid_mask |= HCRX_FGTNXS | HCRX_FNXS;
4015 }
4016 if (cpu_isar_feature(aa64_tcr2, cpu)) {
4017 valid_mask |= HCRX_TCR2EN;
4018 }
4019 if (cpu_isar_feature(aa64_sctlr2, cpu)) {
4020 valid_mask |= HCRX_SCTLR2EN;
4021 }
4022 if (cpu_isar_feature(aa64_gcs, cpu)) {
4023 valid_mask |= HCRX_GCSEN;
4024 }
4025
4026 /* Clear RES0 bits. */
4027 env->cp15.hcrx_el2 = value & valid_mask;
4028
4029 /*
4030 * Updates to VINMI and VFNMI require us to update the status of
4031 * virtual NMI, which are the logical OR of these bits
4032 * and the state of the input lines from the GIC. (This requires
4033 * that we have the BQL, which is done by marking the
4034 * reginfo structs as ARM_CP_IO.)
4035 * Note that if a write to HCRX pends a VINMI or VFNMI it is never
4036 * possible for it to be taken immediately, because VINMI and
4037 * VFNMI are masked unless running at EL0 or EL1, and HCRX
4038 * can only be written at EL2.
4039 */
4040 if (cpu_isar_feature(aa64_nmi, cpu)) {
4041 g_assert(bql_locked());
4042 arm_cpu_update_vinmi(cpu);
4043 arm_cpu_update_vfnmi(cpu);
4044 }
4045 }
4046
access_hxen(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4047 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
4048 bool isread)
4049 {
4050 if (arm_current_el(env) == 2
4051 && arm_feature(env, ARM_FEATURE_EL3)
4052 && !(env->cp15.scr_el3 & SCR_HXEN)) {
4053 return CP_ACCESS_TRAP_EL3;
4054 }
4055 return CP_ACCESS_OK;
4056 }
4057
4058 static const ARMCPRegInfo hcrx_el2_reginfo = {
4059 .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
4060 .type = ARM_CP_IO,
4061 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
4062 .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
4063 .nv2_redirect_offset = 0xa0,
4064 .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
4065 };
4066
4067 /* Return the effective value of HCRX_EL2. */
arm_hcrx_el2_eff(CPUARMState * env)4068 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
4069 {
4070 /*
4071 * The bits in this register behave as 0 for all purposes other than
4072 * direct reads of the register if SCR_EL3.HXEn is 0.
4073 * If EL2 is not enabled in the current security state, then the
4074 * bit may behave as if 0, or as if 1, depending on the bit.
4075 * For the moment, we treat the EL2-disabled case as taking
4076 * priority over the HXEn-disabled case. This is true for the only
4077 * bit for a feature which we implement where the answer is different
4078 * for the two cases (MSCEn for FEAT_MOPS).
4079 * This may need to be revisited for future bits.
4080 */
4081 if (!arm_is_el2_enabled(env)) {
4082 ARMCPU *cpu = env_archcpu(env);
4083 uint64_t hcrx = 0;
4084
4085 /* Bits which whose effective value is 1 if el2 not enabled. */
4086 if (cpu_isar_feature(aa64_mops, cpu)) {
4087 hcrx |= HCRX_MSCEN;
4088 }
4089 if (cpu_isar_feature(aa64_tcr2, cpu)) {
4090 hcrx |= HCRX_TCR2EN;
4091 }
4092 if (cpu_isar_feature(aa64_sctlr2, cpu)) {
4093 hcrx |= HCRX_SCTLR2EN;
4094 }
4095 if (cpu_isar_feature(aa64_gcs, cpu)) {
4096 hcrx |= HCRX_GCSEN;
4097 }
4098 return hcrx;
4099 }
4100 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
4101 return 0;
4102 }
4103 return env->cp15.hcrx_el2;
4104 }
4105
cptr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4106 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4107 uint64_t value)
4108 {
4109 /*
4110 * For A-profile AArch32 EL3, if NSACR.CP10
4111 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4112 */
4113 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4114 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4115 uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4116 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
4117 }
4118 env->cp15.cptr_el[2] = value;
4119 }
4120
cptr_el2_read(CPUARMState * env,const ARMCPRegInfo * ri)4121 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4122 {
4123 /*
4124 * For A-profile AArch32 EL3, if NSACR.CP10
4125 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4126 */
4127 uint64_t value = env->cp15.cptr_el[2];
4128
4129 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4130 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4131 value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4132 }
4133 return value;
4134 }
4135
4136 static const ARMCPRegInfo el2_cp_reginfo[] = {
4137 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4138 .type = ARM_CP_IO,
4139 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4140 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4141 .nv2_redirect_offset = 0x78,
4142 .resetfn = hcr_reset,
4143 .writefn = hcr_write, .raw_writefn = raw_write },
4144 { .name = "HCR", .state = ARM_CP_STATE_AA32,
4145 .type = ARM_CP_ALIAS | ARM_CP_IO,
4146 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4147 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4148 .writefn = hcr_writelow },
4149 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4150 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4151 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4152 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4153 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4154 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4155 .access = PL2_RW, .accessfn = access_exlock_el2,
4156 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4157 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4158 .type = ARM_CP_NV2_REDIRECT,
4159 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4160 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4161 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4162 .type = ARM_CP_NV2_REDIRECT,
4163 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4164 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4165 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4166 .type = ARM_CP_ALIAS,
4167 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4168 .access = PL2_RW,
4169 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4170 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4171 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4172 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4173 .access = PL2_RW, .accessfn = access_exlock_el2,
4174 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4175 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4176 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4177 .access = PL2_RW, .writefn = vbar_write,
4178 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4179 .resetvalue = 0 },
4180 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4181 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4182 .access = PL3_RW, .type = ARM_CP_ALIAS,
4183 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4184 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4185 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4186 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4187 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4188 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
4189 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4190 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4191 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4192 .resetvalue = 0 },
4193 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4194 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4195 .access = PL2_RW, .type = ARM_CP_ALIAS,
4196 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4197 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4198 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4199 .access = PL2_RW, .type = ARM_CP_CONST,
4200 .resetvalue = 0 },
4201 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4202 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4203 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4204 .access = PL2_RW, .type = ARM_CP_CONST,
4205 .resetvalue = 0 },
4206 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4207 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4208 .access = PL2_RW, .type = ARM_CP_CONST,
4209 .resetvalue = 0 },
4210 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4211 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4212 .access = PL2_RW, .type = ARM_CP_CONST,
4213 .resetvalue = 0 },
4214 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4215 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4216 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
4217 .raw_writefn = raw_write,
4218 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4219 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4220 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4221 .type = ARM_CP_ALIAS,
4222 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4223 .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
4224 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4225 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4226 .access = PL2_RW,
4227 .nv2_redirect_offset = 0x40,
4228 /* no .writefn needed as this can't cause an ASID change */
4229 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4230 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4231 .cp = 15, .opc1 = 6, .crm = 2,
4232 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4233 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4234 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4235 .writefn = vttbr_write, .raw_writefn = raw_write },
4236 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4237 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4238 .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
4239 .nv2_redirect_offset = 0x20,
4240 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4241 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4242 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4243 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4244 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4245 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4246 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4247 .access = PL2_RW, .resetvalue = 0,
4248 .nv2_redirect_offset = 0x90,
4249 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4250 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4251 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4252 .access = PL2_RW, .resetvalue = 0,
4253 .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
4254 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4255 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4256 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4257 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4258 #ifndef CONFIG_USER_ONLY
4259 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4260 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4261 /*
4262 * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4263 * reset values as IMPDEF. We choose to reset to 3 to comply with
4264 * both ARMv7 and ARMv8.
4265 */
4266 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
4267 .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
4268 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4269 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4270 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4271 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4272 .writefn = gt_cntvoff_write,
4273 .nv2_redirect_offset = 0x60,
4274 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4275 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4276 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4277 .writefn = gt_cntvoff_write,
4278 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4279 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4280 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4281 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4282 .type = ARM_CP_IO, .access = PL2_RW,
4283 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4284 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4285 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4286 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4287 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4288 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4289 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4290 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4291 .resetfn = gt_hyp_timer_reset,
4292 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4293 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4294 .type = ARM_CP_IO,
4295 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4296 .access = PL2_RW,
4297 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4298 .resetvalue = 0,
4299 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4300 #endif
4301 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4302 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4303 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4304 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4305 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4306 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4307 .access = PL2_RW,
4308 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4309 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4310 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4311 .access = PL2_RW,
4312 .nv2_redirect_offset = 0x80,
4313 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4314 };
4315
4316 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4317 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4318 .type = ARM_CP_ALIAS | ARM_CP_IO,
4319 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4320 .access = PL2_RW,
4321 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4322 .writefn = hcr_writehigh },
4323 };
4324
sel2_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4325 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
4326 bool isread)
4327 {
4328 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
4329 return CP_ACCESS_OK;
4330 }
4331 return CP_ACCESS_UNDEFINED;
4332 }
4333
4334 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
4335 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
4336 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
4337 .access = PL2_RW, .accessfn = sel2_access,
4338 .nv2_redirect_offset = 0x30,
4339 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
4340 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
4341 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
4342 .access = PL2_RW, .accessfn = sel2_access,
4343 .nv2_redirect_offset = 0x48,
4344 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
4345 #ifndef CONFIG_USER_ONLY
4346 /* Secure EL2 Physical Timer */
4347 { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4348 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0,
4349 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4350 .accessfn = gt_sel2timer_access,
4351 .readfn = gt_sec_pel2_tval_read,
4352 .writefn = gt_sec_pel2_tval_write,
4353 .resetfn = gt_sec_pel2_timer_reset,
4354 },
4355 { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4356 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1,
4357 .type = ARM_CP_IO, .access = PL2_RW,
4358 .accessfn = gt_sel2timer_access,
4359 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl),
4360 .resetvalue = 0,
4361 .writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write,
4362 },
4363 { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4364 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2,
4365 .type = ARM_CP_IO, .access = PL2_RW,
4366 .accessfn = gt_sel2timer_access,
4367 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval),
4368 .writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write,
4369 },
4370 /* Secure EL2 Virtual Timer */
4371 { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4372 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0,
4373 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4374 .accessfn = gt_sel2timer_access,
4375 .readfn = gt_sec_vel2_tval_read,
4376 .writefn = gt_sec_vel2_tval_write,
4377 .resetfn = gt_sec_vel2_timer_reset,
4378 },
4379 { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4380 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1,
4381 .type = ARM_CP_IO, .access = PL2_RW,
4382 .accessfn = gt_sel2timer_access,
4383 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl),
4384 .resetvalue = 0,
4385 .writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write,
4386 },
4387 { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4388 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2,
4389 .type = ARM_CP_IO, .access = PL2_RW,
4390 .accessfn = gt_sel2timer_access,
4391 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval),
4392 .writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write,
4393 },
4394 #endif
4395 };
4396
nsacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4397 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4398 bool isread)
4399 {
4400 /*
4401 * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4402 * At Secure EL1 it traps to EL3 or EL2.
4403 */
4404 if (arm_current_el(env) == 3) {
4405 return CP_ACCESS_OK;
4406 }
4407 if (arm_is_secure_below_el3(env)) {
4408 if (env->cp15.scr_el3 & SCR_EEL2) {
4409 return CP_ACCESS_TRAP_EL2;
4410 }
4411 return CP_ACCESS_TRAP_EL3;
4412 }
4413 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4414 if (isread) {
4415 return CP_ACCESS_OK;
4416 }
4417 return CP_ACCESS_UNDEFINED;
4418 }
4419
4420 static const ARMCPRegInfo el3_cp_reginfo[] = {
4421 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4422 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4423 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4424 .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
4425 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
4426 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4427 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4428 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4429 .writefn = scr_write, .raw_writefn = raw_write },
4430 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4431 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4432 .access = PL3_RW, .resetvalue = 0,
4433 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4434 { .name = "SDER",
4435 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4436 .access = PL3_RW, .resetvalue = 0,
4437 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4438 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4439 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4440 .writefn = vbar_write, .resetvalue = 0,
4441 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4442 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4443 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4444 .access = PL3_RW, .resetvalue = 0,
4445 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4446 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4447 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4448 .access = PL3_RW,
4449 /* no .writefn needed as this can't cause an ASID change */
4450 .resetvalue = 0,
4451 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4452 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4453 .type = ARM_CP_ALIAS,
4454 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4455 .access = PL3_RW, .accessfn = access_exlock_el3,
4456 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4457 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4458 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4459 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4460 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4461 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4462 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4463 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4464 .type = ARM_CP_ALIAS,
4465 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4466 .access = PL3_RW, .accessfn = access_exlock_el3,
4467 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4468 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4469 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4470 .access = PL3_RW, .writefn = vbar_write,
4471 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4472 .resetvalue = 0 },
4473 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4474 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4475 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4476 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4477 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4478 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4479 .access = PL3_RW, .resetvalue = 0,
4480 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4481 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4482 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4483 .access = PL3_RW, .type = ARM_CP_CONST,
4484 .resetvalue = 0 },
4485 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4486 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4487 .access = PL3_RW, .type = ARM_CP_CONST,
4488 .resetvalue = 0 },
4489 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4490 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4491 .access = PL3_RW, .type = ARM_CP_CONST,
4492 .resetvalue = 0 },
4493 };
4494
4495 #ifndef CONFIG_USER_ONLY
4496
e2h_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4497 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
4498 bool isread)
4499 {
4500 if (arm_current_el(env) == 1) {
4501 /* This must be a FEAT_NV access */
4502 return CP_ACCESS_OK;
4503 }
4504 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
4505 return CP_ACCESS_UNDEFINED;
4506 }
4507 return CP_ACCESS_OK;
4508 }
4509
access_el1nvpct(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4510 static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri,
4511 bool isread)
4512 {
4513 if (arm_current_el(env) == 1) {
4514 /* This must be a FEAT_NV access with NVx == 101 */
4515 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
4516 return CP_ACCESS_TRAP_EL2;
4517 }
4518 }
4519 return e2h_access(env, ri, isread);
4520 }
4521
access_el1nvvct(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4522 static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
4523 bool isread)
4524 {
4525 if (arm_current_el(env) == 1) {
4526 /* This must be a FEAT_NV access with NVx == 101 */
4527 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
4528 return CP_ACCESS_TRAP_EL2;
4529 }
4530 }
4531 return e2h_access(env, ri, isread);
4532 }
4533
4534 #endif
4535
ctr_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4536 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4537 bool isread)
4538 {
4539 int cur_el = arm_current_el(env);
4540
4541 if (cur_el < 2) {
4542 uint64_t hcr = arm_hcr_el2_eff(env);
4543
4544 if (cur_el == 0) {
4545 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4546 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
4547 return CP_ACCESS_TRAP_EL2;
4548 }
4549 } else {
4550 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4551 return CP_ACCESS_TRAP_EL1;
4552 }
4553 if (hcr & HCR_TID2) {
4554 return CP_ACCESS_TRAP_EL2;
4555 }
4556 }
4557 } else if (hcr & HCR_TID2) {
4558 return CP_ACCESS_TRAP_EL2;
4559 }
4560 }
4561
4562 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
4563 return CP_ACCESS_TRAP_EL2;
4564 }
4565
4566 return CP_ACCESS_OK;
4567 }
4568
4569 /*
4570 * Check for traps to RAS registers, which are controlled
4571 * by HCR_EL2.TERR and SCR_EL3.TERR.
4572 */
access_terr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4573 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
4574 bool isread)
4575 {
4576 int el = arm_current_el(env);
4577
4578 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
4579 return CP_ACCESS_TRAP_EL2;
4580 }
4581 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) {
4582 return CP_ACCESS_TRAP_EL3;
4583 }
4584 return CP_ACCESS_OK;
4585 }
4586
disr_read(CPUARMState * env,const ARMCPRegInfo * ri)4587 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4588 {
4589 int el = arm_current_el(env);
4590
4591 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4592 return env->cp15.vdisr_el2;
4593 }
4594 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4595 return 0; /* RAZ/WI */
4596 }
4597 return env->cp15.disr_el1;
4598 }
4599
disr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)4600 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4601 {
4602 int el = arm_current_el(env);
4603
4604 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4605 env->cp15.vdisr_el2 = val;
4606 return;
4607 }
4608 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4609 return; /* RAZ/WI */
4610 }
4611 env->cp15.disr_el1 = val;
4612 }
4613
4614 /*
4615 * Minimal RAS implementation with no Error Records.
4616 * Which means that all of the Error Record registers:
4617 * ERXADDR_EL1
4618 * ERXCTLR_EL1
4619 * ERXFR_EL1
4620 * ERXMISC0_EL1
4621 * ERXMISC1_EL1
4622 * ERXMISC2_EL1
4623 * ERXMISC3_EL1
4624 * ERXPFGCDN_EL1 (RASv1p1)
4625 * ERXPFGCTL_EL1 (RASv1p1)
4626 * ERXPFGF_EL1 (RASv1p1)
4627 * ERXSTATUS_EL1
4628 * and
4629 * ERRSELR_EL1
4630 * may generate UNDEFINED, which is the effect we get by not
4631 * listing them at all.
4632 *
4633 * These registers have fine-grained trap bits, but UNDEF-to-EL1
4634 * is higher priority than FGT-to-EL2 so we do not need to list them
4635 * in order to check for an FGT.
4636 */
4637 static const ARMCPRegInfo minimal_ras_reginfo[] = {
4638 { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
4639 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
4640 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
4641 .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
4642 { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
4643 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
4644 .access = PL1_R, .accessfn = access_terr,
4645 .fgt = FGT_ERRIDR_EL1,
4646 .type = ARM_CP_CONST, .resetvalue = 0 },
4647 { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
4648 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
4649 .nv2_redirect_offset = 0x500,
4650 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
4651 { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
4652 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
4653 .nv2_redirect_offset = 0x508,
4654 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
4655 };
4656
4657 /*
4658 * Return the exception level to which exceptions should be taken
4659 * via SVEAccessTrap. This excludes the check for whether the exception
4660 * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily
4661 * be found by testing 0 < fp_exception_el < sve_exception_el.
4662 *
4663 * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the
4664 * pseudocode does *not* separate out the FP trap checks, but has them
4665 * all in one function.
4666 */
sve_exception_el(CPUARMState * env,int el)4667 int sve_exception_el(CPUARMState *env, int el)
4668 {
4669 #ifndef CONFIG_USER_ONLY
4670 if (el <= 1 && !el_is_in_host(env, el)) {
4671 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
4672 case 1:
4673 if (el != 0) {
4674 break;
4675 }
4676 /* fall through */
4677 case 0:
4678 case 2:
4679 return 1;
4680 }
4681 }
4682
4683 if (el <= 2 && arm_is_el2_enabled(env)) {
4684 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4685 if (env->cp15.hcr_el2 & HCR_E2H) {
4686 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
4687 case 1:
4688 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4689 break;
4690 }
4691 /* fall through */
4692 case 0:
4693 case 2:
4694 return 2;
4695 }
4696 } else {
4697 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
4698 return 2;
4699 }
4700 }
4701 }
4702
4703 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
4704 if (arm_feature(env, ARM_FEATURE_EL3)
4705 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
4706 return 3;
4707 }
4708 #endif
4709 return 0;
4710 }
4711
4712 /*
4713 * Return the exception level to which exceptions should be taken for SME.
4714 * C.f. the ARM pseudocode function CheckSMEAccess.
4715 */
sme_exception_el(CPUARMState * env,int el)4716 int sme_exception_el(CPUARMState *env, int el)
4717 {
4718 #ifndef CONFIG_USER_ONLY
4719 if (el <= 1 && !el_is_in_host(env, el)) {
4720 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
4721 case 1:
4722 if (el != 0) {
4723 break;
4724 }
4725 /* fall through */
4726 case 0:
4727 case 2:
4728 return 1;
4729 }
4730 }
4731
4732 if (el <= 2 && arm_is_el2_enabled(env)) {
4733 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4734 if (env->cp15.hcr_el2 & HCR_E2H) {
4735 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
4736 case 1:
4737 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4738 break;
4739 }
4740 /* fall through */
4741 case 0:
4742 case 2:
4743 return 2;
4744 }
4745 } else {
4746 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
4747 return 2;
4748 }
4749 }
4750 }
4751
4752 /* CPTR_EL3. Since ESM is negative we must check for EL3. */
4753 if (arm_feature(env, ARM_FEATURE_EL3)
4754 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4755 return 3;
4756 }
4757 #endif
4758 return 0;
4759 }
4760
4761 /*
4762 * Given that SVE is enabled, return the vector length for EL.
4763 */
sve_vqm1_for_el_sm(CPUARMState * env,int el,bool sm)4764 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
4765 {
4766 ARMCPU *cpu = env_archcpu(env);
4767 uint64_t *cr = env->vfp.zcr_el;
4768 uint32_t map = cpu->sve_vq.map;
4769 uint32_t len = ARM_MAX_VQ - 1;
4770
4771 if (sm) {
4772 cr = env->vfp.smcr_el;
4773 map = cpu->sme_vq.map;
4774 }
4775
4776 if (el <= 1 && !el_is_in_host(env, el)) {
4777 len = MIN(len, 0xf & (uint32_t)cr[1]);
4778 }
4779 if (el <= 2 && arm_is_el2_enabled(env)) {
4780 len = MIN(len, 0xf & (uint32_t)cr[2]);
4781 }
4782 if (arm_feature(env, ARM_FEATURE_EL3)) {
4783 len = MIN(len, 0xf & (uint32_t)cr[3]);
4784 }
4785
4786 map &= MAKE_64BIT_MASK(0, len + 1);
4787 if (map != 0) {
4788 return 31 - clz32(map);
4789 }
4790
4791 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
4792 assert(sm);
4793 return ctz32(cpu->sme_vq.map);
4794 }
4795
sve_vqm1_for_el(CPUARMState * env,int el)4796 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
4797 {
4798 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
4799 }
4800
zcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4801 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4802 uint64_t value)
4803 {
4804 int cur_el = arm_current_el(env);
4805 int old_len = sve_vqm1_for_el(env, cur_el);
4806 int new_len;
4807
4808 /* Bits other than [3:0] are RAZ/WI. */
4809 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
4810 raw_write(env, ri, value & 0xf);
4811
4812 /*
4813 * Because we arrived here, we know both FP and SVE are enabled;
4814 * otherwise we would have trapped access to the ZCR_ELn register.
4815 */
4816 new_len = sve_vqm1_for_el(env, cur_el);
4817 if (new_len < old_len) {
4818 aarch64_sve_narrow_vq(env, new_len + 1);
4819 }
4820 }
4821
4822 static const ARMCPRegInfo zcr_reginfo[] = {
4823 { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4824 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4825 .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
4826 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 0),
4827 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 0),
4828 .access = PL1_RW, .type = ARM_CP_SVE,
4829 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4830 .writefn = zcr_write, .raw_writefn = raw_write },
4831 { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4832 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4833 .access = PL2_RW, .type = ARM_CP_SVE,
4834 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4835 .writefn = zcr_write, .raw_writefn = raw_write },
4836 { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4837 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4838 .access = PL3_RW, .type = ARM_CP_SVE,
4839 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4840 .writefn = zcr_write, .raw_writefn = raw_write },
4841 };
4842
access_tpidr2(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4843 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
4844 bool isread)
4845 {
4846 int el = arm_current_el(env);
4847
4848 if (el == 0) {
4849 uint64_t sctlr = arm_sctlr(env, el);
4850 if (!(sctlr & SCTLR_EnTP2)) {
4851 return CP_ACCESS_TRAP_EL1;
4852 }
4853 }
4854 /* TODO: FEAT_FGT */
4855 if (el < 3
4856 && arm_feature(env, ARM_FEATURE_EL3)
4857 && !(env->cp15.scr_el3 & SCR_ENTP2)) {
4858 return CP_ACCESS_TRAP_EL3;
4859 }
4860 return CP_ACCESS_OK;
4861 }
4862
access_smprimap(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4863 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
4864 bool isread)
4865 {
4866 /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
4867 if (arm_current_el(env) == 2
4868 && arm_feature(env, ARM_FEATURE_EL3)
4869 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4870 return CP_ACCESS_TRAP_EL3;
4871 }
4872 return CP_ACCESS_OK;
4873 }
4874
access_smpri(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4875 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
4876 bool isread)
4877 {
4878 if (arm_current_el(env) < 3
4879 && arm_feature(env, ARM_FEATURE_EL3)
4880 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4881 return CP_ACCESS_TRAP_EL3;
4882 }
4883 return CP_ACCESS_OK;
4884 }
4885
4886 /* ResetSVEState */
arm_reset_sve_state(CPUARMState * env)4887 static void arm_reset_sve_state(CPUARMState *env)
4888 {
4889 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
4890 /* Recall that FFR is stored as pregs[16]. */
4891 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
4892 vfp_set_fpsr(env, 0x0800009f);
4893 }
4894
aarch64_set_svcr(CPUARMState * env,uint64_t new,uint64_t mask)4895 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
4896 {
4897 uint64_t change = (env->svcr ^ new) & mask;
4898
4899 if (change == 0) {
4900 return;
4901 }
4902 env->svcr ^= change;
4903
4904 if (change & R_SVCR_SM_MASK) {
4905 arm_reset_sve_state(env);
4906 }
4907
4908 /*
4909 * ResetSMEState.
4910 *
4911 * SetPSTATE_ZA zeros on enable and disable. We can zero this only
4912 * on enable: while disabled, the storage is inaccessible and the
4913 * value does not matter. We're not saving the storage in vmstate
4914 * when disabled either.
4915 */
4916 if (change & new & R_SVCR_ZA_MASK) {
4917 memset(&env->za_state, 0, sizeof(env->za_state));
4918 }
4919
4920 if (tcg_enabled()) {
4921 arm_rebuild_hflags(env);
4922 }
4923 }
4924
svcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4925 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4926 uint64_t value)
4927 {
4928 aarch64_set_svcr(env, value, -1);
4929 }
4930
smcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4931 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4932 uint64_t value)
4933 {
4934 int cur_el = arm_current_el(env);
4935 int old_len = sve_vqm1_for_el(env, cur_el);
4936 uint64_t valid_mask = R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
4937 int new_len;
4938
4939 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
4940 if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
4941 valid_mask |= R_SMCR_EZT0_MASK;
4942 }
4943 value &= valid_mask;
4944 raw_write(env, ri, value);
4945
4946 /*
4947 * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
4948 * when SVL is widened (old values kept, or zeros). Choose to keep the
4949 * current values for simplicity. But for QEMU internals, we must still
4950 * apply the narrower SVL to the Zregs and Pregs -- see the comment
4951 * above aarch64_sve_narrow_vq.
4952 */
4953 new_len = sve_vqm1_for_el(env, cur_el);
4954 if (new_len < old_len) {
4955 aarch64_sve_narrow_vq(env, new_len + 1);
4956 }
4957 }
4958
4959 static const ARMCPRegInfo sme_reginfo[] = {
4960 { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
4961 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
4962 .access = PL0_RW, .accessfn = access_tpidr2,
4963 .fgt = FGT_NTPIDR2_EL0,
4964 .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
4965 { .name = "SVCR", .state = ARM_CP_STATE_AA64,
4966 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
4967 .access = PL0_RW, .type = ARM_CP_SME,
4968 .fieldoffset = offsetof(CPUARMState, svcr),
4969 .writefn = svcr_write, .raw_writefn = raw_write },
4970 { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
4971 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
4972 .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
4973 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 6),
4974 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 6),
4975 .access = PL1_RW, .type = ARM_CP_SME,
4976 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
4977 .writefn = smcr_write, .raw_writefn = raw_write },
4978 { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
4979 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
4980 .access = PL2_RW, .type = ARM_CP_SME,
4981 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
4982 .writefn = smcr_write, .raw_writefn = raw_write },
4983 { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
4984 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
4985 .access = PL3_RW, .type = ARM_CP_SME,
4986 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
4987 .writefn = smcr_write, .raw_writefn = raw_write },
4988 { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
4989 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
4990 .access = PL1_R, .accessfn = access_tid1,
4991 /*
4992 * IMPLEMENTOR = 0 (software)
4993 * REVISION = 0 (implementation defined)
4994 * SMPS = 0 (no streaming execution priority in QEMU)
4995 * AFFINITY = 0 (streaming sve mode not shared with other PEs)
4996 */
4997 .type = ARM_CP_CONST, .resetvalue = 0, },
4998 /*
4999 * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
5000 */
5001 { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
5002 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
5003 .access = PL1_RW, .accessfn = access_smpri,
5004 .fgt = FGT_NSMPRI_EL1,
5005 .type = ARM_CP_CONST, .resetvalue = 0 },
5006 { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
5007 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
5008 .nv2_redirect_offset = 0x1f8,
5009 .access = PL2_RW, .accessfn = access_smprimap,
5010 .type = ARM_CP_CONST, .resetvalue = 0 },
5011 };
5012
gpccr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5013 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5014 uint64_t value)
5015 {
5016 /* L0GPTSZ is RO; other bits not mentioned are RES0. */
5017 uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
5018 R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
5019 R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
5020
5021 if (cpu_isar_feature(aa64_rme_gpc2, env_archcpu(env))) {
5022 rw_mask |= R_GPCCR_APPSAA_MASK | R_GPCCR_NSO_MASK |
5023 R_GPCCR_SPAD_MASK | R_GPCCR_NSPAD_MASK | R_GPCCR_RLPAD_MASK;
5024 }
5025
5026 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
5027 }
5028
gpccr_reset(CPUARMState * env,const ARMCPRegInfo * ri)5029 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
5030 {
5031 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
5032 env_archcpu(env)->reset_l0gptsz);
5033 }
5034
5035 static const ARMCPRegInfo rme_reginfo[] = {
5036 { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
5037 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
5038 .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
5039 .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
5040 { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
5041 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
5042 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
5043 { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
5044 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
5045 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
5046 { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
5047 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
5048 .access = PL3_W, .type = ARM_CP_NOP },
5049 };
5050
5051 static const ARMCPRegInfo rme_mte_reginfo[] = {
5052 { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
5053 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
5054 .access = PL3_W, .type = ARM_CP_NOP },
5055 };
5056
aa64_allint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5057 static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri,
5058 uint64_t value)
5059 {
5060 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT);
5061 }
5062
aa64_allint_read(CPUARMState * env,const ARMCPRegInfo * ri)5063 static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri)
5064 {
5065 return env->pstate & PSTATE_ALLINT;
5066 }
5067
aa64_allint_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5068 static CPAccessResult aa64_allint_access(CPUARMState *env,
5069 const ARMCPRegInfo *ri, bool isread)
5070 {
5071 if (!isread && arm_current_el(env) == 1 &&
5072 (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) {
5073 return CP_ACCESS_TRAP_EL2;
5074 }
5075 return CP_ACCESS_OK;
5076 }
5077
5078 static const ARMCPRegInfo nmi_reginfo[] = {
5079 { .name = "ALLINT", .state = ARM_CP_STATE_AA64,
5080 .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3,
5081 .type = ARM_CP_NO_RAW,
5082 .access = PL1_RW, .accessfn = aa64_allint_access,
5083 .fieldoffset = offsetof(CPUARMState, pstate),
5084 .writefn = aa64_allint_write, .readfn = aa64_allint_read,
5085 .resetfn = arm_cp_reset_ignore },
5086 };
5087
mecid_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5088 static CPAccessResult mecid_access(CPUARMState *env,
5089 const ARMCPRegInfo *ri, bool isread)
5090 {
5091 int el = arm_current_el(env);
5092
5093 if (el == 2) {
5094 if (arm_security_space(env) != ARMSS_Realm) {
5095 return CP_ACCESS_UNDEFINED;
5096 }
5097
5098 if (!(env->cp15.scr_el3 & SCR_MECEN)) {
5099 return CP_ACCESS_TRAP_EL3;
5100 }
5101 }
5102
5103 return CP_ACCESS_OK;
5104 }
5105
mecid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5106 static void mecid_write(CPUARMState *env, const ARMCPRegInfo *ri,
5107 uint64_t value)
5108 {
5109 value = extract64(value, 0, MECID_WIDTH);
5110 raw_write(env, ri, value);
5111 }
5112
cipae_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5113 static CPAccessResult cipae_access(CPUARMState *env, const ARMCPRegInfo *ri,
5114 bool isread)
5115 {
5116 switch (arm_security_space(env)) {
5117 case ARMSS_Root: /* EL3 */
5118 case ARMSS_Realm: /* Realm EL2 */
5119 return CP_ACCESS_OK;
5120 default:
5121 return CP_ACCESS_UNDEFINED;
5122 }
5123 }
5124
5125 static const ARMCPRegInfo mec_reginfo[] = {
5126 { .name = "MECIDR_EL2", .state = ARM_CP_STATE_AA64,
5127 .opc0 = 3, .opc1 = 4, .opc2 = 7, .crn = 10, .crm = 8,
5128 .access = PL2_R, .type = ARM_CP_CONST | ARM_CP_NV_NO_TRAP,
5129 .resetvalue = MECID_WIDTH - 1 },
5130 { .name = "MECID_P0_EL2", .state = ARM_CP_STATE_AA64,
5131 .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 8,
5132 .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5133 .accessfn = mecid_access, .writefn = mecid_write,
5134 .fieldoffset = offsetof(CPUARMState, cp15.mecid_p0_el2) },
5135 { .name = "MECID_A0_EL2", .state = ARM_CP_STATE_AA64,
5136 .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 8,
5137 .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5138 .accessfn = mecid_access, .writefn = mecid_write,
5139 .fieldoffset = offsetof(CPUARMState, cp15.mecid_a0_el2) },
5140 { .name = "MECID_P1_EL2", .state = ARM_CP_STATE_AA64,
5141 .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 8,
5142 .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5143 .accessfn = mecid_access, .writefn = mecid_write,
5144 .fieldoffset = offsetof(CPUARMState, cp15.mecid_p1_el2) },
5145 { .name = "MECID_A1_EL2", .state = ARM_CP_STATE_AA64,
5146 .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 8,
5147 .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5148 .accessfn = mecid_access, .writefn = mecid_write,
5149 .fieldoffset = offsetof(CPUARMState, cp15.mecid_a1_el2) },
5150 { .name = "MECID_RL_A_EL3", .state = ARM_CP_STATE_AA64,
5151 .opc0 = 3, .opc1 = 6, .opc2 = 1, .crn = 10, .crm = 10,
5152 .access = PL3_RW, .accessfn = mecid_access,
5153 .writefn = mecid_write,
5154 .fieldoffset = offsetof(CPUARMState, cp15.mecid_rl_a_el3) },
5155 { .name = "VMECID_P_EL2", .state = ARM_CP_STATE_AA64,
5156 .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 9,
5157 .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5158 .accessfn = mecid_access, .writefn = mecid_write,
5159 .fieldoffset = offsetof(CPUARMState, cp15.vmecid_p_el2) },
5160 { .name = "VMECID_A_EL2", .state = ARM_CP_STATE_AA64,
5161 .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 9,
5162 .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5163 .accessfn = mecid_access, .writefn = mecid_write,
5164 .fieldoffset = offsetof(CPUARMState, cp15.vmecid_a_el2) },
5165 { .name = "DC_CIPAE", .state = ARM_CP_STATE_AA64,
5166 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 0,
5167 .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
5168 .accessfn = cipae_access },
5169 };
5170
5171 static const ARMCPRegInfo mec_mte_reginfo[] = {
5172 { .name = "DC_CIGDPAE", .state = ARM_CP_STATE_AA64,
5173 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 7,
5174 .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
5175 .accessfn = cipae_access },
5176 };
5177
5178 #ifndef CONFIG_USER_ONLY
5179 /*
5180 * We don't know until after realize whether there's a GICv3
5181 * attached, and that is what registers the gicv3 sysregs.
5182 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5183 * at runtime.
5184 */
id_pfr1_read(CPUARMState * env,const ARMCPRegInfo * ri)5185 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5186 {
5187 ARMCPU *cpu = env_archcpu(env);
5188 uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
5189
5190 if (env->gicv3state) {
5191 pfr1 = FIELD_DP64(pfr1, ID_PFR1, GIC, 1);
5192 }
5193 return pfr1;
5194 }
5195
id_aa64pfr0_read(CPUARMState * env,const ARMCPRegInfo * ri)5196 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5197 {
5198 ARMCPU *cpu = env_archcpu(env);
5199 uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
5200
5201 if (env->gicv3state) {
5202 pfr0 = FIELD_DP64(pfr0, ID_AA64PFR0, GIC, 1);
5203 }
5204 return pfr0;
5205 }
5206 #endif
5207
5208 /*
5209 * Shared logic between LORID and the rest of the LOR* registers.
5210 * Secure state exclusion has already been dealt with.
5211 */
access_lor_ns(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5212 static CPAccessResult access_lor_ns(CPUARMState *env,
5213 const ARMCPRegInfo *ri, bool isread)
5214 {
5215 int el = arm_current_el(env);
5216
5217 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5218 return CP_ACCESS_TRAP_EL2;
5219 }
5220 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5221 return CP_ACCESS_TRAP_EL3;
5222 }
5223 return CP_ACCESS_OK;
5224 }
5225
access_lor_other(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5226 static CPAccessResult access_lor_other(CPUARMState *env,
5227 const ARMCPRegInfo *ri, bool isread)
5228 {
5229 if (arm_is_secure_below_el3(env)) {
5230 /* UNDEF if SCR_EL3.NS == 0 */
5231 return CP_ACCESS_UNDEFINED;
5232 }
5233 return access_lor_ns(env, ri, isread);
5234 }
5235
5236 /*
5237 * A trivial implementation of ARMv8.1-LOR leaves all of these
5238 * registers fixed at 0, which indicates that there are zero
5239 * supported Limited Ordering regions.
5240 */
5241 static const ARMCPRegInfo lor_reginfo[] = {
5242 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
5243 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
5244 .access = PL1_RW, .accessfn = access_lor_other,
5245 .fgt = FGT_LORSA_EL1,
5246 .type = ARM_CP_CONST, .resetvalue = 0 },
5247 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
5248 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
5249 .access = PL1_RW, .accessfn = access_lor_other,
5250 .fgt = FGT_LOREA_EL1,
5251 .type = ARM_CP_CONST, .resetvalue = 0 },
5252 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
5253 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
5254 .access = PL1_RW, .accessfn = access_lor_other,
5255 .fgt = FGT_LORN_EL1,
5256 .type = ARM_CP_CONST, .resetvalue = 0 },
5257 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
5258 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
5259 .access = PL1_RW, .accessfn = access_lor_other,
5260 .fgt = FGT_LORC_EL1,
5261 .type = ARM_CP_CONST, .resetvalue = 0 },
5262 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
5263 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
5264 .access = PL1_R, .accessfn = access_lor_ns,
5265 .fgt = FGT_LORID_EL1,
5266 .type = ARM_CP_CONST, .resetvalue = 0 },
5267 };
5268
access_pauth(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5269 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5270 bool isread)
5271 {
5272 int el = arm_current_el(env);
5273
5274 if (el < 2 &&
5275 arm_is_el2_enabled(env) &&
5276 !(arm_hcr_el2_eff(env) & HCR_APK)) {
5277 return CP_ACCESS_TRAP_EL2;
5278 }
5279 if (el < 3 &&
5280 arm_feature(env, ARM_FEATURE_EL3) &&
5281 !(env->cp15.scr_el3 & SCR_APK)) {
5282 return CP_ACCESS_TRAP_EL3;
5283 }
5284 return CP_ACCESS_OK;
5285 }
5286
5287 static const ARMCPRegInfo pauth_reginfo[] = {
5288 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5289 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5290 .access = PL1_RW, .accessfn = access_pauth,
5291 .fgt = FGT_APDAKEY,
5292 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
5293 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5294 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5295 .access = PL1_RW, .accessfn = access_pauth,
5296 .fgt = FGT_APDAKEY,
5297 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
5298 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5299 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5300 .access = PL1_RW, .accessfn = access_pauth,
5301 .fgt = FGT_APDBKEY,
5302 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
5303 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5304 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5305 .access = PL1_RW, .accessfn = access_pauth,
5306 .fgt = FGT_APDBKEY,
5307 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
5308 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5309 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5310 .access = PL1_RW, .accessfn = access_pauth,
5311 .fgt = FGT_APGAKEY,
5312 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
5313 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5314 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5315 .access = PL1_RW, .accessfn = access_pauth,
5316 .fgt = FGT_APGAKEY,
5317 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
5318 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5319 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5320 .access = PL1_RW, .accessfn = access_pauth,
5321 .fgt = FGT_APIAKEY,
5322 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
5323 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5324 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5325 .access = PL1_RW, .accessfn = access_pauth,
5326 .fgt = FGT_APIAKEY,
5327 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
5328 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5329 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5330 .access = PL1_RW, .accessfn = access_pauth,
5331 .fgt = FGT_APIBKEY,
5332 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
5333 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5334 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5335 .access = PL1_RW, .accessfn = access_pauth,
5336 .fgt = FGT_APIBKEY,
5337 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
5338 };
5339
rndr_readfn(CPUARMState * env,const ARMCPRegInfo * ri)5340 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
5341 {
5342 Error *err = NULL;
5343 uint64_t ret;
5344
5345 /* Success sets NZCV = 0000. */
5346 env->NF = env->CF = env->VF = 0, env->ZF = 1;
5347
5348 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
5349 /*
5350 * ??? Failed, for unknown reasons in the crypto subsystem.
5351 * The best we can do is log the reason and return the
5352 * timed-out indication to the guest. There is no reason
5353 * we know to expect this failure to be transitory, so the
5354 * guest may well hang retrying the operation.
5355 */
5356 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5357 ri->name, error_get_pretty(err));
5358 error_free(err);
5359
5360 env->ZF = 0; /* NZCF = 0100 */
5361 return 0;
5362 }
5363 return ret;
5364 }
5365
5366 /* We do not support re-seeding, so the two registers operate the same. */
5367 static const ARMCPRegInfo rndr_reginfo[] = {
5368 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
5369 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5370 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
5371 .access = PL0_R, .readfn = rndr_readfn },
5372 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
5373 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5374 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
5375 .access = PL0_R, .readfn = rndr_readfn },
5376 };
5377
dccvap_writefn(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5378 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
5379 uint64_t value)
5380 {
5381 #ifdef CONFIG_TCG
5382 ARMCPU *cpu = env_archcpu(env);
5383 /* CTR_EL0 System register -> DminLine, bits [19:16] */
5384 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
5385 uint64_t vaddr_in = (uint64_t) value;
5386 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
5387 void *haddr;
5388 int mem_idx = arm_env_mmu_index(env);
5389
5390 /* This won't be crossing page boundaries */
5391 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
5392 if (haddr) {
5393 #ifndef CONFIG_USER_ONLY
5394
5395 ram_addr_t offset;
5396 MemoryRegion *mr;
5397
5398 /* RCU lock is already being held */
5399 mr = memory_region_from_host(haddr, &offset);
5400
5401 if (mr) {
5402 memory_region_writeback(mr, offset, dline_size);
5403 }
5404 #endif /*CONFIG_USER_ONLY*/
5405 }
5406 #else
5407 /* Handled by hardware accelerator. */
5408 g_assert_not_reached();
5409 #endif /* CONFIG_TCG */
5410 }
5411
5412 static const ARMCPRegInfo dcpop_reg[] = {
5413 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
5414 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
5415 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5416 .fgt = FGT_DCCVAP,
5417 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5418 };
5419
5420 static const ARMCPRegInfo dcpodp_reg[] = {
5421 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
5422 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
5423 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5424 .fgt = FGT_DCCVADP,
5425 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5426 };
5427
access_aa64_tid5(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5428 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
5429 bool isread)
5430 {
5431 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
5432 return CP_ACCESS_TRAP_EL2;
5433 }
5434
5435 return CP_ACCESS_OK;
5436 }
5437
access_mte(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5438 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
5439 bool isread)
5440 {
5441 int el = arm_current_el(env);
5442 if (el < 2 && arm_is_el2_enabled(env)) {
5443 uint64_t hcr = arm_hcr_el2_eff(env);
5444 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5445 return CP_ACCESS_TRAP_EL2;
5446 }
5447 }
5448 if (el < 3 &&
5449 arm_feature(env, ARM_FEATURE_EL3) &&
5450 !(env->cp15.scr_el3 & SCR_ATA)) {
5451 return CP_ACCESS_TRAP_EL3;
5452 }
5453 return CP_ACCESS_OK;
5454 }
5455
access_tfsr_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5456 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
5457 bool isread)
5458 {
5459 CPAccessResult nv1 = access_nv1(env, ri, isread);
5460
5461 if (nv1 != CP_ACCESS_OK) {
5462 return nv1;
5463 }
5464 return access_mte(env, ri, isread);
5465 }
5466
access_tfsr_el2(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5467 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
5468 bool isread)
5469 {
5470 /*
5471 * TFSR_EL2: similar to generic access_mte(), but we need to
5472 * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
5473 * if NV2 is enabled then we will redirect this to TFSR_EL1
5474 * after doing the HCR and SCR ATA traps; otherwise this will
5475 * be a trap to EL2 and the HCR/SCR traps do not apply.
5476 */
5477 int el = arm_current_el(env);
5478
5479 if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
5480 return CP_ACCESS_OK;
5481 }
5482 if (el < 2 && arm_is_el2_enabled(env)) {
5483 uint64_t hcr = arm_hcr_el2_eff(env);
5484 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5485 return CP_ACCESS_TRAP_EL2;
5486 }
5487 }
5488 if (el < 3 &&
5489 arm_feature(env, ARM_FEATURE_EL3) &&
5490 !(env->cp15.scr_el3 & SCR_ATA)) {
5491 return CP_ACCESS_TRAP_EL3;
5492 }
5493 return CP_ACCESS_OK;
5494 }
5495
tco_read(CPUARMState * env,const ARMCPRegInfo * ri)5496 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
5497 {
5498 return env->pstate & PSTATE_TCO;
5499 }
5500
tco_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)5501 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5502 {
5503 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
5504 }
5505
5506 static const ARMCPRegInfo mte_reginfo[] = {
5507 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
5508 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
5509 .access = PL1_RW, .accessfn = access_mte,
5510 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
5511 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
5512 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
5513 .access = PL1_RW, .accessfn = access_tfsr_el1,
5514 .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
5515 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 6, 0),
5516 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 6, 0),
5517 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
5518 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
5519 .type = ARM_CP_NV2_REDIRECT,
5520 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
5521 .access = PL2_RW, .accessfn = access_tfsr_el2,
5522 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
5523 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
5524 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
5525 .access = PL3_RW,
5526 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
5527 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
5528 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
5529 .access = PL1_RW, .accessfn = access_mte,
5530 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
5531 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
5532 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
5533 .access = PL1_RW, .accessfn = access_mte,
5534 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
5535 { .name = "TCO", .state = ARM_CP_STATE_AA64,
5536 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5537 .type = ARM_CP_NO_RAW,
5538 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
5539 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
5540 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
5541 .type = ARM_CP_NOP, .access = PL1_W,
5542 .fgt = FGT_DCIVAC,
5543 .accessfn = aa64_cacheop_poc_access },
5544 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
5545 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
5546 .fgt = FGT_DCISW,
5547 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5548 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
5549 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
5550 .type = ARM_CP_NOP, .access = PL1_W,
5551 .fgt = FGT_DCIVAC,
5552 .accessfn = aa64_cacheop_poc_access },
5553 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
5554 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
5555 .fgt = FGT_DCISW,
5556 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5557 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
5558 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
5559 .fgt = FGT_DCCSW,
5560 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5561 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
5562 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
5563 .fgt = FGT_DCCSW,
5564 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5565 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
5566 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
5567 .fgt = FGT_DCCISW,
5568 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5569 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
5570 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
5571 .fgt = FGT_DCCISW,
5572 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5573 };
5574
5575 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
5576 { .name = "TCO", .state = ARM_CP_STATE_AA64,
5577 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5578 .type = ARM_CP_CONST, .access = PL0_RW, },
5579 };
5580
5581 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
5582 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
5583 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
5584 .type = ARM_CP_NOP, .access = PL0_W,
5585 .fgt = FGT_DCCVAC,
5586 .accessfn = aa64_cacheop_poc_access },
5587 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
5588 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
5589 .type = ARM_CP_NOP, .access = PL0_W,
5590 .fgt = FGT_DCCVAC,
5591 .accessfn = aa64_cacheop_poc_access },
5592 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
5593 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
5594 .type = ARM_CP_NOP, .access = PL0_W,
5595 .fgt = FGT_DCCVAP,
5596 .accessfn = aa64_cacheop_poc_access },
5597 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
5598 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
5599 .type = ARM_CP_NOP, .access = PL0_W,
5600 .fgt = FGT_DCCVAP,
5601 .accessfn = aa64_cacheop_poc_access },
5602 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
5603 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
5604 .type = ARM_CP_NOP, .access = PL0_W,
5605 .fgt = FGT_DCCVADP,
5606 .accessfn = aa64_cacheop_poc_access },
5607 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
5608 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
5609 .type = ARM_CP_NOP, .access = PL0_W,
5610 .fgt = FGT_DCCVADP,
5611 .accessfn = aa64_cacheop_poc_access },
5612 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
5613 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
5614 .type = ARM_CP_NOP, .access = PL0_W,
5615 .fgt = FGT_DCCIVAC,
5616 .accessfn = aa64_cacheop_poc_access },
5617 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
5618 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
5619 .type = ARM_CP_NOP, .access = PL0_W,
5620 .fgt = FGT_DCCIVAC,
5621 .accessfn = aa64_cacheop_poc_access },
5622 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
5623 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
5624 .access = PL0_W, .type = ARM_CP_DC_GVA,
5625 #ifndef CONFIG_USER_ONLY
5626 /* Avoid overhead of an access check that always passes in user-mode */
5627 .accessfn = aa64_zva_access,
5628 .fgt = FGT_DCZVA,
5629 #endif
5630 },
5631 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
5632 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
5633 .access = PL0_W, .type = ARM_CP_DC_GZVA,
5634 #ifndef CONFIG_USER_ONLY
5635 /* Avoid overhead of an access check that always passes in user-mode */
5636 .accessfn = aa64_zva_access,
5637 .fgt = FGT_DCZVA,
5638 #endif
5639 },
5640 };
5641
access_scxtnum(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5642 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
5643 bool isread)
5644 {
5645 uint64_t hcr = arm_hcr_el2_eff(env);
5646 int el = arm_current_el(env);
5647
5648 if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
5649 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
5650 if (hcr & HCR_TGE) {
5651 return CP_ACCESS_TRAP_EL2;
5652 }
5653 return CP_ACCESS_TRAP_EL1;
5654 }
5655 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
5656 return CP_ACCESS_TRAP_EL2;
5657 }
5658 if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
5659 return CP_ACCESS_TRAP_EL2;
5660 }
5661 if (el < 3
5662 && arm_feature(env, ARM_FEATURE_EL3)
5663 && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
5664 return CP_ACCESS_TRAP_EL3;
5665 }
5666 return CP_ACCESS_OK;
5667 }
5668
access_scxtnum_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5669 static CPAccessResult access_scxtnum_el1(CPUARMState *env,
5670 const ARMCPRegInfo *ri,
5671 bool isread)
5672 {
5673 CPAccessResult nv1 = access_nv1(env, ri, isread);
5674
5675 if (nv1 != CP_ACCESS_OK) {
5676 return nv1;
5677 }
5678 return access_scxtnum(env, ri, isread);
5679 }
5680
5681 static const ARMCPRegInfo scxtnum_reginfo[] = {
5682 { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
5683 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
5684 .access = PL0_RW, .accessfn = access_scxtnum,
5685 .fgt = FGT_SCXTNUM_EL0,
5686 .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
5687 { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
5688 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
5689 .access = PL1_RW, .accessfn = access_scxtnum_el1,
5690 .fgt = FGT_SCXTNUM_EL1,
5691 .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
5692 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 7),
5693 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 7),
5694 .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
5695 { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
5696 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
5697 .access = PL2_RW, .accessfn = access_scxtnum,
5698 .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
5699 { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
5700 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
5701 .access = PL3_RW,
5702 .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
5703 };
5704
access_fgt(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5705 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
5706 bool isread)
5707 {
5708 if (arm_current_el(env) == 2 &&
5709 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
5710 return CP_ACCESS_TRAP_EL3;
5711 }
5712 return CP_ACCESS_OK;
5713 }
5714
5715 static const ARMCPRegInfo fgt_reginfo[] = {
5716 { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5717 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5718 .nv2_redirect_offset = 0x1b8,
5719 .access = PL2_RW, .accessfn = access_fgt,
5720 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
5721 { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5722 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
5723 .nv2_redirect_offset = 0x1c0,
5724 .access = PL2_RW, .accessfn = access_fgt,
5725 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
5726 { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5727 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
5728 .nv2_redirect_offset = 0x1d0,
5729 .access = PL2_RW, .accessfn = access_fgt,
5730 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
5731 { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5732 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
5733 .nv2_redirect_offset = 0x1d8,
5734 .access = PL2_RW, .accessfn = access_fgt,
5735 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
5736 { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
5737 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
5738 .nv2_redirect_offset = 0x1c8,
5739 .access = PL2_RW, .accessfn = access_fgt,
5740 .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
5741 };
5742
vncr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5743 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5744 uint64_t value)
5745 {
5746 /*
5747 * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
5748 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
5749 * about the RESS bits at the top -- we choose the "generate an EL2
5750 * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
5751 * the ptw.c code detect the resulting invalid address).
5752 */
5753 env->cp15.vncr_el2 = value & ~0xfffULL;
5754 }
5755
5756 static const ARMCPRegInfo nv2_reginfo[] = {
5757 { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
5758 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
5759 .access = PL2_RW,
5760 .writefn = vncr_write,
5761 .nv2_redirect_offset = 0xb0,
5762 .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
5763 };
5764
access_predinv(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5765 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5766 bool isread)
5767 {
5768 int el = arm_current_el(env);
5769
5770 if (el == 0) {
5771 uint64_t sctlr = arm_sctlr(env, el);
5772 if (!(sctlr & SCTLR_EnRCTX)) {
5773 return CP_ACCESS_TRAP_EL1;
5774 }
5775 } else if (el == 1) {
5776 uint64_t hcr = arm_hcr_el2_eff(env);
5777 if (hcr & HCR_NV) {
5778 return CP_ACCESS_TRAP_EL2;
5779 }
5780 }
5781 return CP_ACCESS_OK;
5782 }
5783
5784 static const ARMCPRegInfo predinv_reginfo[] = {
5785 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5786 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5787 .fgt = FGT_CFPRCTX,
5788 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5789 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5790 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5791 .fgt = FGT_DVPRCTX,
5792 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5793 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5794 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5795 .fgt = FGT_CPPRCTX,
5796 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5797 /*
5798 * Note the AArch32 opcodes have a different OPC1.
5799 */
5800 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5801 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5802 .fgt = FGT_CFPRCTX,
5803 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5804 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5805 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5806 .fgt = FGT_DVPRCTX,
5807 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5808 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5809 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5810 .fgt = FGT_CPPRCTX,
5811 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5812 };
5813
ccsidr2_read(CPUARMState * env,const ARMCPRegInfo * ri)5814 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5815 {
5816 /* Read the high 32 bits of the current CCSIDR */
5817 return extract64(ccsidr_read(env, ri), 32, 32);
5818 }
5819
5820 static const ARMCPRegInfo ccsidr2_reginfo[] = {
5821 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
5822 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
5823 .access = PL1_R,
5824 .accessfn = access_tid4,
5825 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
5826 };
5827
access_v7a_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5828 static CPAccessResult access_v7a_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5829 bool isread)
5830 {
5831 /*
5832 * Trap on TID3 always. This should be used only for the fixed set of
5833 * registers which are defined to trap on HCR.TID3 in v7A, which is:
5834 * ID_PFR{0,1}, ID_DFR0, ID_AFR0, ID_MMFR{0,1,2,3}, ID_ISAR{0,1,2,3,4,5}
5835 * (MVFR0 and MVFR1 also trap in v7A, but this is not handled via
5836 * this accessfn but in check_hcr_el2_trap.)
5837 * Any other registers in the TID3 trap space should use access_tid3(),
5838 * so that they trap on v8 and above, but not on v7.
5839 */
5840 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
5841 return CP_ACCESS_TRAP_EL2;
5842 }
5843
5844 return CP_ACCESS_OK;
5845 }
5846
access_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5847 static CPAccessResult access_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5848 bool isread)
5849 {
5850 /*
5851 * Trap on TID3, if we implement at least v8. For v8 and above
5852 * the ID register space is at least IMPDEF permitted to trap,
5853 * and must trap if FEAT_FGT is implemented. We choose to trap
5854 * always. Use this function for any new registers that should
5855 * trap on TID3.
5856 */
5857 if (arm_feature(env, ARM_FEATURE_V8)) {
5858 return access_v7a_tid3(env, ri, isread);
5859 }
5860
5861 return CP_ACCESS_OK;
5862 }
5863
access_jazelle(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5864 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
5865 bool isread)
5866 {
5867 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
5868 return CP_ACCESS_TRAP_EL2;
5869 }
5870
5871 return CP_ACCESS_OK;
5872 }
5873
access_joscr_jmcr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5874 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
5875 const ARMCPRegInfo *ri, bool isread)
5876 {
5877 /*
5878 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
5879 * in v7A, not in v8A.
5880 */
5881 if (!arm_feature(env, ARM_FEATURE_V8) &&
5882 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
5883 (env->cp15.hstr_el2 & HSTR_TJDBX)) {
5884 return CP_ACCESS_TRAP_EL2;
5885 }
5886 return CP_ACCESS_OK;
5887 }
5888
5889 static const ARMCPRegInfo jazelle_regs[] = {
5890 { .name = "JIDR",
5891 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
5892 .access = PL1_R, .accessfn = access_jazelle,
5893 .type = ARM_CP_CONST, .resetvalue = 0 },
5894 { .name = "JOSCR",
5895 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
5896 .accessfn = access_joscr_jmcr,
5897 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5898 { .name = "JMCR",
5899 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
5900 .accessfn = access_joscr_jmcr,
5901 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5902 };
5903
5904 static const ARMCPRegInfo contextidr_el2 = {
5905 .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
5906 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
5907 .access = PL2_RW,
5908 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
5909 };
5910
5911 static const ARMCPRegInfo vhe_reginfo[] = {
5912 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
5913 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
5914 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
5915 .raw_writefn = raw_write,
5916 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
5917 #ifndef CONFIG_USER_ONLY
5918 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5919 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
5920 .fieldoffset =
5921 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
5922 .type = ARM_CP_IO, .access = PL2_RW,
5923 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
5924 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5925 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
5926 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5927 .resetfn = gt_hv_timer_reset,
5928 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
5929 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5930 .type = ARM_CP_IO,
5931 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
5932 .access = PL2_RW,
5933 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
5934 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
5935 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
5936 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
5937 .type = ARM_CP_IO | ARM_CP_ALIAS,
5938 .access = PL2_RW, .accessfn = access_el1nvpct,
5939 .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
5940 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
5941 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
5942 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
5943 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
5944 .type = ARM_CP_IO | ARM_CP_ALIAS,
5945 .access = PL2_RW, .accessfn = access_el1nvvct,
5946 .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
5947 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
5948 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
5949 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5950 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
5951 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5952 .access = PL2_RW, .accessfn = e2h_access,
5953 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
5954 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5955 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
5956 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5957 .access = PL2_RW, .accessfn = e2h_access,
5958 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
5959 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5960 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
5961 .type = ARM_CP_IO | ARM_CP_ALIAS,
5962 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
5963 .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
5964 .access = PL2_RW, .accessfn = access_el1nvpct,
5965 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
5966 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5967 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
5968 .type = ARM_CP_IO | ARM_CP_ALIAS,
5969 .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
5970 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
5971 .access = PL2_RW, .accessfn = access_el1nvvct,
5972 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
5973 #endif
5974 };
5975
5976 /*
5977 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
5978 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
5979 * is non-zero, which is never for ARMv7, optionally in ARMv8
5980 * and mandatorily for ARMv8.2 and up.
5981 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
5982 * implementation is RAZ/WI we can ignore this detail, as we
5983 * do for ACTLR.
5984 */
5985 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
5986 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
5987 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
5988 .access = PL1_RW, .accessfn = access_tacr,
5989 .type = ARM_CP_CONST, .resetvalue = 0 },
5990 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
5991 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
5992 .access = PL2_RW, .type = ARM_CP_CONST,
5993 .resetvalue = 0 },
5994 };
5995
sctlr2_el2_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5996 static CPAccessResult sctlr2_el2_access(CPUARMState *env,
5997 const ARMCPRegInfo *ri,
5998 bool isread)
5999 {
6000 if (arm_current_el(env) < 3
6001 && arm_feature(env, ARM_FEATURE_EL3)
6002 && !(env->cp15.scr_el3 & SCR_SCTLR2EN)) {
6003 return CP_ACCESS_TRAP_EL3;
6004 }
6005 return CP_ACCESS_OK;
6006 }
6007
sctlr2_el1_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)6008 static CPAccessResult sctlr2_el1_access(CPUARMState *env,
6009 const ARMCPRegInfo *ri,
6010 bool isread)
6011 {
6012 CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6013 if (ret != CP_ACCESS_OK) {
6014 return ret;
6015 }
6016 if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_SCTLR2EN)) {
6017 return CP_ACCESS_TRAP_EL2;
6018 }
6019 return sctlr2_el2_access(env, ri, isread);
6020 }
6021
sctlr2_el1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)6022 static void sctlr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
6023 uint64_t value)
6024 {
6025 uint64_t valid_mask = 0;
6026
6027 value &= valid_mask;
6028 raw_write(env, ri, value);
6029 }
6030
sctlr2_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)6031 static void sctlr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
6032 uint64_t value)
6033 {
6034 uint64_t valid_mask = 0;
6035
6036 if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
6037 valid_mask |= SCTLR2_EMEC;
6038 }
6039 value &= valid_mask;
6040 raw_write(env, ri, value);
6041 }
6042
sctlr2_el3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)6043 static void sctlr2_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
6044 uint64_t value)
6045 {
6046 uint64_t valid_mask = 0;
6047
6048 if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
6049 valid_mask |= SCTLR2_EMEC;
6050 }
6051 value &= valid_mask;
6052 raw_write(env, ri, value);
6053 }
6054
6055 static const ARMCPRegInfo sctlr2_reginfo[] = {
6056 { .name = "SCTLR2_EL1", .state = ARM_CP_STATE_AA64,
6057 .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 1, .crm = 0,
6058 .access = PL1_RW, .accessfn = sctlr2_el1_access,
6059 .writefn = sctlr2_el1_write, .fgt = FGT_SCTLR_EL1,
6060 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 3),
6061 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 3),
6062 .nv2_redirect_offset = 0x278 | NV2_REDIR_NV1,
6063 .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[1]) },
6064 { .name = "SCTLR2_EL2", .state = ARM_CP_STATE_AA64,
6065 .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 1, .crm = 0,
6066 .access = PL2_RW, .accessfn = sctlr2_el2_access,
6067 .writefn = sctlr2_el2_write,
6068 .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[2]) },
6069 { .name = "SCTLR2_EL3", .state = ARM_CP_STATE_AA64,
6070 .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 1, .crm = 0,
6071 .access = PL3_RW, .writefn = sctlr2_el3_write,
6072 .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[3]) },
6073 };
6074
tcr2_el2_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)6075 static CPAccessResult tcr2_el2_access(CPUARMState *env, const ARMCPRegInfo *ri,
6076 bool isread)
6077 {
6078 if (arm_current_el(env) < 3
6079 && arm_feature(env, ARM_FEATURE_EL3)
6080 && !(env->cp15.scr_el3 & SCR_TCR2EN)) {
6081 return CP_ACCESS_TRAP_EL3;
6082 }
6083 return CP_ACCESS_OK;
6084 }
6085
tcr2_el1_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)6086 static CPAccessResult tcr2_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
6087 bool isread)
6088 {
6089 CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6090 if (ret != CP_ACCESS_OK) {
6091 return ret;
6092 }
6093 if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_TCR2EN)) {
6094 return CP_ACCESS_TRAP_EL2;
6095 }
6096 return tcr2_el2_access(env, ri, isread);
6097 }
6098
tcr2_el1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)6099 static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
6100 uint64_t value)
6101 {
6102 ARMCPU *cpu = env_archcpu(env);
6103 uint64_t valid_mask = 0;
6104
6105 if (cpu_isar_feature(aa64_s1pie, cpu)) {
6106 valid_mask |= TCR2_PIE;
6107 }
6108 if (cpu_isar_feature(aa64_aie, cpu)) {
6109 valid_mask |= TCR2_AIE;
6110 }
6111 value &= valid_mask;
6112 raw_write(env, ri, value);
6113 }
6114
tcr2_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)6115 static void tcr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
6116 uint64_t value)
6117 {
6118 ARMCPU *cpu = env_archcpu(env);
6119 uint64_t valid_mask = 0;
6120
6121 if (cpu_isar_feature(aa64_s1pie, cpu)) {
6122 valid_mask |= TCR2_PIE;
6123 }
6124 if (cpu_isar_feature(aa64_aie, cpu)) {
6125 valid_mask |= TCR2_AIE;
6126 }
6127 if (cpu_isar_feature(aa64_mec, cpu)) {
6128 valid_mask |= TCR2_AMEC0 | TCR2_AMEC1;
6129 }
6130 value &= valid_mask;
6131 raw_write(env, ri, value);
6132 }
6133
6134 static const ARMCPRegInfo tcr2_reginfo[] = {
6135 { .name = "TCR2_EL1", .state = ARM_CP_STATE_AA64,
6136 .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 2, .crm = 0,
6137 .access = PL1_RW, .accessfn = tcr2_el1_access,
6138 .writefn = tcr2_el1_write, .fgt = FGT_TCR_EL1,
6139 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 3),
6140 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 3),
6141 .nv2_redirect_offset = 0x270 | NV2_REDIR_NV1,
6142 .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[1]) },
6143 { .name = "TCR2_EL2", .state = ARM_CP_STATE_AA64,
6144 .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 2, .crm = 0,
6145 .access = PL2_RW, .accessfn = tcr2_el2_access,
6146 .writefn = tcr2_el2_write,
6147 .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[2]) },
6148 };
6149
pien_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)6150 static CPAccessResult pien_access(CPUARMState *env, const ARMCPRegInfo *ri,
6151 bool isread)
6152 {
6153 if (arm_feature(env, ARM_FEATURE_EL3)
6154 && !(env->cp15.scr_el3 & SCR_PIEN)
6155 && arm_current_el(env) < 3) {
6156 return CP_ACCESS_TRAP_EL3;
6157 }
6158 return CP_ACCESS_OK;
6159 }
6160
pien_el1_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)6161 static CPAccessResult pien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
6162 bool isread)
6163 {
6164 CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6165 if (ret == CP_ACCESS_OK) {
6166 ret = pien_access(env, ri, isread);
6167 }
6168 return ret;
6169 }
6170
6171 static const ARMCPRegInfo s1pie_reginfo[] = {
6172 { .name = "PIR_EL1", .state = ARM_CP_STATE_AA64,
6173 .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 10, .crm = 2,
6174 .access = PL1_RW, .accessfn = pien_el1_access,
6175 .fgt = FGT_NPIR_EL1, .nv2_redirect_offset = 0x2a0 | NV2_REDIR_NV1,
6176 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 3),
6177 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 3),
6178 .fieldoffset = offsetof(CPUARMState, cp15.pir_el[1]) },
6179 { .name = "PIR_EL2", .state = ARM_CP_STATE_AA64,
6180 .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 2,
6181 .access = PL2_RW, .accessfn = pien_access,
6182 .fieldoffset = offsetof(CPUARMState, cp15.pir_el[2]) },
6183 { .name = "PIR_EL3", .state = ARM_CP_STATE_AA64,
6184 .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 10, .crm = 2,
6185 .access = PL3_RW,
6186 .fieldoffset = offsetof(CPUARMState, cp15.pir_el[3]) },
6187 { .name = "PIRE0_EL1", .state = ARM_CP_STATE_AA64,
6188 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 10, .crm = 2,
6189 .access = PL1_RW, .accessfn = pien_el1_access,
6190 .fgt = FGT_NPIRE0_EL1, .nv2_redirect_offset = 0x290 | NV2_REDIR_NV1,
6191 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 2),
6192 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 2),
6193 .fieldoffset = offsetof(CPUARMState, cp15.pir_el[0]) },
6194 { .name = "PIRE0_EL2", .state = ARM_CP_STATE_AA64,
6195 .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 2,
6196 .access = PL2_RW, .accessfn = pien_access,
6197 .fieldoffset = offsetof(CPUARMState, cp15.pire0_el2) },
6198 };
6199
6200 static const ARMCPRegInfo s2pie_reginfo[] = {
6201 { .name = "S2PIR_EL2", .state = ARM_CP_STATE_AA64,
6202 .opc0 = 3, .opc1 = 4, .opc2 = 5, .crn = 10, .crm = 2,
6203 .access = PL2_RW, .accessfn = pien_access,
6204 .nv2_redirect_offset = 0x2b0,
6205 .fieldoffset = offsetof(CPUARMState, cp15.s2pir_el2) },
6206 };
6207
aien_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)6208 static CPAccessResult aien_access(CPUARMState *env, const ARMCPRegInfo *ri,
6209 bool isread)
6210 {
6211 if (arm_feature(env, ARM_FEATURE_EL3)
6212 && !(env->cp15.scr_el3 & SCR_AIEN)
6213 && arm_current_el(env) < 3) {
6214 return CP_ACCESS_TRAP_EL3;
6215 }
6216 return CP_ACCESS_OK;
6217 }
6218
aien_el1_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)6219 static CPAccessResult aien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
6220 bool isread)
6221 {
6222 CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6223 if (ret == CP_ACCESS_OK) {
6224 ret = aien_access(env, ri, isread);
6225 }
6226 return ret;
6227 }
6228
6229 static const ARMCPRegInfo aie_reginfo[] = {
6230 { .name = "MAIR2_EL1", .state = ARM_CP_STATE_AA64,
6231 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
6232 .access = PL1_RW, .accessfn = aien_el1_access,
6233 .fgt = FGT_NMAIR2_EL1, .nv2_redirect_offset = 0x280 | NV2_REDIR_NV1,
6234 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 1, 1),
6235 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 1),
6236 .fieldoffset = offsetof(CPUARMState, cp15.mair2_el[1]) },
6237 { .name = "MAIR2_EL2", .state = ARM_CP_STATE_AA64,
6238 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 1, .opc2 = 1,
6239 .access = PL2_RW, .accessfn = aien_access,
6240 .fieldoffset = offsetof(CPUARMState, cp15.mair2_el[2]) },
6241 { .name = "MAIR2_EL3", .state = ARM_CP_STATE_AA64,
6242 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 1, .opc2 = 1,
6243 .access = PL3_RW,
6244 .fieldoffset = offsetof(CPUARMState, cp15.mair2_el[3]) },
6245
6246 { .name = "AMAIR2_EL1", .state = ARM_CP_STATE_AA64,
6247 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 3, .opc2 = 1,
6248 .access = PL1_RW, .accessfn = aien_el1_access,
6249 .fgt = FGT_NAMAIR2_EL1, .nv2_redirect_offset = 0x288 | NV2_REDIR_NV1,
6250 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 3, 1),
6251 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 3, 1),
6252 .type = ARM_CP_CONST, .resetvalue = 0 },
6253 { .name = "AMAIR2_EL2", .state = ARM_CP_STATE_AA64,
6254 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
6255 .access = PL2_RW, .accessfn = aien_access,
6256 .type = ARM_CP_CONST, .resetvalue = 0 },
6257 { .name = "AMAIR2_EL3", .state = ARM_CP_STATE_AA64,
6258 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 1,
6259 .access = PL3_RW,
6260 .type = ARM_CP_CONST, .resetvalue = 0 },
6261 };
6262
register_cp_regs_for_features(ARMCPU * cpu)6263 void register_cp_regs_for_features(ARMCPU *cpu)
6264 {
6265 /* Register all the coprocessor registers based on feature bits */
6266 CPUARMState *env = &cpu->env;
6267 ARMISARegisters *isar = &cpu->isar;
6268
6269 if (arm_feature(env, ARM_FEATURE_M)) {
6270 /* M profile has no coprocessor registers */
6271 return;
6272 }
6273
6274 define_arm_cp_regs(cpu, cp_reginfo);
6275 if (!arm_feature(env, ARM_FEATURE_V8)) {
6276 /*
6277 * Must go early as it is full of wildcards that may be
6278 * overridden by later definitions.
6279 */
6280 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
6281 }
6282
6283 #ifndef CONFIG_USER_ONLY
6284 if (tcg_enabled()) {
6285 define_tlb_insn_regs(cpu);
6286 define_at_insn_regs(cpu);
6287 }
6288 #endif
6289
6290 if (arm_feature(env, ARM_FEATURE_V6)) {
6291 /* The ID registers all have impdef reset values */
6292 ARMCPRegInfo v6_idregs[] = {
6293 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
6294 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6295 .access = PL1_R, .type = ARM_CP_CONST,
6296 .accessfn = access_v7a_tid3,
6297 .resetvalue = GET_IDREG(isar, ID_PFR0)},
6298 /*
6299 * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6300 * the value of the GIC field until after we define these regs.
6301 */
6302 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
6303 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
6304 .access = PL1_R, .type = ARM_CP_NO_RAW,
6305 #ifdef CONFIG_USER_ONLY
6306 .type = ARM_CP_CONST,
6307 .resetvalue = GET_IDREG(isar, ID_PFR1),
6308 #else
6309 .type = ARM_CP_NO_RAW,
6310 .accessfn = access_v7a_tid3,
6311 .readfn = id_pfr1_read,
6312 .writefn = arm_cp_write_ignore
6313 #endif
6314 },
6315 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
6316 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
6317 .access = PL1_R, .type = ARM_CP_CONST,
6318 .accessfn = access_v7a_tid3,
6319 .resetvalue = GET_IDREG(isar, ID_DFR0)},
6320 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
6321 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
6322 .access = PL1_R, .type = ARM_CP_CONST,
6323 .accessfn = access_v7a_tid3,
6324 .resetvalue = GET_IDREG(isar, ID_AFR0)},
6325 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
6326 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
6327 .access = PL1_R, .type = ARM_CP_CONST,
6328 .accessfn = access_v7a_tid3,
6329 .resetvalue = GET_IDREG(isar, ID_MMFR0)},
6330 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
6331 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
6332 .access = PL1_R, .type = ARM_CP_CONST,
6333 .accessfn = access_v7a_tid3,
6334 .resetvalue = GET_IDREG(isar, ID_MMFR1)},
6335 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
6336 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
6337 .access = PL1_R, .type = ARM_CP_CONST,
6338 .accessfn = access_v7a_tid3,
6339 .resetvalue = GET_IDREG(isar, ID_MMFR2)},
6340 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
6341 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
6342 .access = PL1_R, .type = ARM_CP_CONST,
6343 .accessfn = access_v7a_tid3,
6344 .resetvalue = GET_IDREG(isar, ID_MMFR3)},
6345 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
6346 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6347 .access = PL1_R, .type = ARM_CP_CONST,
6348 .accessfn = access_v7a_tid3,
6349 .resetvalue = GET_IDREG(isar, ID_ISAR0)},
6350 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
6351 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
6352 .access = PL1_R, .type = ARM_CP_CONST,
6353 .accessfn = access_v7a_tid3,
6354 .resetvalue = GET_IDREG(isar, ID_ISAR1)},
6355 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
6356 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6357 .access = PL1_R, .type = ARM_CP_CONST,
6358 .accessfn = access_v7a_tid3,
6359 .resetvalue = GET_IDREG(isar, ID_ISAR2)},
6360 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
6361 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
6362 .access = PL1_R, .type = ARM_CP_CONST,
6363 .accessfn = access_v7a_tid3,
6364 .resetvalue = GET_IDREG(isar, ID_ISAR3) },
6365 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
6366 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
6367 .access = PL1_R, .type = ARM_CP_CONST,
6368 .accessfn = access_v7a_tid3,
6369 .resetvalue = GET_IDREG(isar, ID_ISAR4) },
6370 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
6371 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
6372 .access = PL1_R, .type = ARM_CP_CONST,
6373 .accessfn = access_v7a_tid3,
6374 .resetvalue = GET_IDREG(isar, ID_ISAR5) },
6375 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
6376 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
6377 .access = PL1_R, .type = ARM_CP_CONST,
6378 .accessfn = access_tid3,
6379 .resetvalue = GET_IDREG(isar, ID_MMFR4)},
6380 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
6381 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
6382 .access = PL1_R, .type = ARM_CP_CONST,
6383 .accessfn = access_tid3,
6384 .resetvalue = GET_IDREG(isar, ID_ISAR6) },
6385 };
6386 define_arm_cp_regs(cpu, v6_idregs);
6387 define_arm_cp_regs(cpu, v6_cp_reginfo);
6388 } else {
6389 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
6390 }
6391 if (arm_feature(env, ARM_FEATURE_V6K)) {
6392 define_arm_cp_regs(cpu, v6k_cp_reginfo);
6393 }
6394 if (arm_feature(env, ARM_FEATURE_V7)) {
6395 ARMCPRegInfo clidr = {
6396 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
6397 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
6398 .access = PL1_R, .type = ARM_CP_CONST,
6399 .accessfn = access_tid4,
6400 .fgt = FGT_CLIDR_EL1,
6401 .resetvalue = GET_IDREG(isar, CLIDR)
6402 };
6403 define_one_arm_cp_reg(cpu, &clidr);
6404 define_arm_cp_regs(cpu, v7_cp_reginfo);
6405 define_debug_regs(cpu);
6406 } else {
6407 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
6408 }
6409 if (arm_feature(env, ARM_FEATURE_V8)) {
6410 /*
6411 * v8 ID registers, which all have impdef reset values.
6412 * Note that within the ID register ranges the unused slots
6413 * must all RAZ, not UNDEF; future architecture versions may
6414 * define new registers here.
6415 * ID registers which are AArch64 views of the AArch32 ID registers
6416 * which already existed in v6 and v7 are handled elsewhere,
6417 * in v6_idregs[].
6418 */
6419 int i;
6420 ARMCPRegInfo v8_idregs[] = {
6421 /*
6422 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
6423 * emulation because we don't know the right value for the
6424 * GIC field until after we define these regs.
6425 */
6426 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6427 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
6428 .access = PL1_R,
6429 #ifdef CONFIG_USER_ONLY
6430 .type = ARM_CP_CONST,
6431 .resetvalue = GET_IDREG(isar, ID_AA64PFR0)
6432 #else
6433 .type = ARM_CP_NO_RAW,
6434 .accessfn = access_tid3,
6435 .readfn = id_aa64pfr0_read,
6436 .writefn = arm_cp_write_ignore
6437 #endif
6438 },
6439 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6440 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6441 .access = PL1_R, .type = ARM_CP_CONST,
6442 .accessfn = access_tid3,
6443 .resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
6444 { .name = "ID_AA64PFR2_EL1", .state = ARM_CP_STATE_AA64,
6445 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6446 .access = PL1_R, .type = ARM_CP_CONST,
6447 .accessfn = access_tid3,
6448 .resetvalue = GET_IDREG(isar, ID_AA64PFR2)},
6449 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6450 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6451 .access = PL1_R, .type = ARM_CP_CONST,
6452 .accessfn = access_tid3,
6453 .resetvalue = 0 },
6454 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
6455 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6456 .access = PL1_R, .type = ARM_CP_CONST,
6457 .accessfn = access_tid3,
6458 .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)},
6459 { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
6460 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6461 .access = PL1_R, .type = ARM_CP_CONST,
6462 .accessfn = access_tid3,
6463 .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)},
6464 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6465 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6466 .access = PL1_R, .type = ARM_CP_CONST,
6467 .accessfn = access_tid3,
6468 .resetvalue = 0 },
6469 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6470 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6471 .access = PL1_R, .type = ARM_CP_CONST,
6472 .accessfn = access_tid3,
6473 .resetvalue = 0 },
6474 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6475 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6476 .access = PL1_R, .type = ARM_CP_CONST,
6477 .accessfn = access_tid3,
6478 .resetvalue = GET_IDREG(isar, ID_AA64DFR0) },
6479 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6480 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6481 .access = PL1_R, .type = ARM_CP_CONST,
6482 .accessfn = access_tid3,
6483 .resetvalue = GET_IDREG(isar, ID_AA64DFR1) },
6484 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6485 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6486 .access = PL1_R, .type = ARM_CP_CONST,
6487 .accessfn = access_tid3,
6488 .resetvalue = 0 },
6489 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6490 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6491 .access = PL1_R, .type = ARM_CP_CONST,
6492 .accessfn = access_tid3,
6493 .resetvalue = 0 },
6494 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6495 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6496 .access = PL1_R, .type = ARM_CP_CONST,
6497 .accessfn = access_tid3,
6498 .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
6499 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6500 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6501 .access = PL1_R, .type = ARM_CP_CONST,
6502 .accessfn = access_tid3,
6503 .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
6504 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6505 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6506 .access = PL1_R, .type = ARM_CP_CONST,
6507 .accessfn = access_tid3,
6508 .resetvalue = 0 },
6509 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6510 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6511 .access = PL1_R, .type = ARM_CP_CONST,
6512 .accessfn = access_tid3,
6513 .resetvalue = 0 },
6514 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6515 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6516 .access = PL1_R, .type = ARM_CP_CONST,
6517 .accessfn = access_tid3,
6518 .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)},
6519 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6520 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6521 .access = PL1_R, .type = ARM_CP_CONST,
6522 .accessfn = access_tid3,
6523 .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)},
6524 { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
6525 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6526 .access = PL1_R, .type = ARM_CP_CONST,
6527 .accessfn = access_tid3,
6528 .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)},
6529 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6530 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6531 .access = PL1_R, .type = ARM_CP_CONST,
6532 .accessfn = access_tid3,
6533 .resetvalue = 0 },
6534 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6535 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6536 .access = PL1_R, .type = ARM_CP_CONST,
6537 .accessfn = access_tid3,
6538 .resetvalue = 0 },
6539 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6540 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6541 .access = PL1_R, .type = ARM_CP_CONST,
6542 .accessfn = access_tid3,
6543 .resetvalue = 0 },
6544 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6545 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6546 .access = PL1_R, .type = ARM_CP_CONST,
6547 .accessfn = access_tid3,
6548 .resetvalue = 0 },
6549 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6550 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6551 .access = PL1_R, .type = ARM_CP_CONST,
6552 .accessfn = access_tid3,
6553 .resetvalue = 0 },
6554 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6555 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6556 .access = PL1_R, .type = ARM_CP_CONST,
6557 .accessfn = access_tid3,
6558 .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)},
6559 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6560 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6561 .access = PL1_R, .type = ARM_CP_CONST,
6562 .accessfn = access_tid3,
6563 .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) },
6564 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
6565 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6566 .access = PL1_R, .type = ARM_CP_CONST,
6567 .accessfn = access_tid3,
6568 .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) },
6569 { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
6570 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6571 .access = PL1_R, .type = ARM_CP_CONST,
6572 .accessfn = access_tid3,
6573 .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) },
6574 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6575 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6576 .access = PL1_R, .type = ARM_CP_CONST,
6577 .accessfn = access_tid3,
6578 .resetvalue = 0 },
6579 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6580 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6581 .access = PL1_R, .type = ARM_CP_CONST,
6582 .accessfn = access_tid3,
6583 .resetvalue = 0 },
6584 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6585 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6586 .access = PL1_R, .type = ARM_CP_CONST,
6587 .accessfn = access_tid3,
6588 .resetvalue = 0 },
6589 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6590 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6591 .access = PL1_R, .type = ARM_CP_CONST,
6592 .accessfn = access_tid3,
6593 .resetvalue = 0 },
6594 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6595 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6596 .access = PL1_R, .type = ARM_CP_CONST,
6597 .accessfn = access_tid3,
6598 .resetvalue = cpu->isar.mvfr0 },
6599 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6600 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6601 .access = PL1_R, .type = ARM_CP_CONST,
6602 .accessfn = access_tid3,
6603 .resetvalue = cpu->isar.mvfr1 },
6604 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6605 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6606 .access = PL1_R, .type = ARM_CP_CONST,
6607 .accessfn = access_tid3,
6608 .resetvalue = cpu->isar.mvfr2 },
6609 /*
6610 * "0, c0, c3, {0,1,2}" are the encodings corresponding to
6611 * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
6612 * as RAZ, since it is in the "reserved for future ID
6613 * registers, RAZ" part of the AArch32 encoding space.
6614 */
6615 { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
6616 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6617 .access = PL1_R, .type = ARM_CP_CONST,
6618 .accessfn = access_tid3,
6619 .resetvalue = 0 },
6620 { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
6621 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6622 .access = PL1_R, .type = ARM_CP_CONST,
6623 .accessfn = access_tid3,
6624 .resetvalue = 0 },
6625 { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
6626 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6627 .access = PL1_R, .type = ARM_CP_CONST,
6628 .accessfn = access_tid3,
6629 .resetvalue = 0 },
6630 /*
6631 * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
6632 * they're also RAZ for AArch64, and in v8 are gradually
6633 * being filled with AArch64-view-of-AArch32-ID-register
6634 * for new ID registers.
6635 */
6636 { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
6637 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6638 .access = PL1_R, .type = ARM_CP_CONST,
6639 .accessfn = access_tid3,
6640 .resetvalue = 0 },
6641 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
6642 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6643 .access = PL1_R, .type = ARM_CP_CONST,
6644 .accessfn = access_tid3,
6645 .resetvalue = GET_IDREG(isar, ID_PFR2)},
6646 { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
6647 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6648 .access = PL1_R, .type = ARM_CP_CONST,
6649 .accessfn = access_tid3,
6650 .resetvalue = GET_IDREG(isar, ID_DFR1)},
6651 { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
6652 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6653 .access = PL1_R, .type = ARM_CP_CONST,
6654 .accessfn = access_tid3,
6655 .resetvalue = GET_IDREG(isar, ID_MMFR5)},
6656 { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
6657 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6658 .access = PL1_R, .type = ARM_CP_CONST,
6659 .accessfn = access_tid3,
6660 .resetvalue = 0 },
6661 };
6662 #ifdef CONFIG_USER_ONLY
6663 static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6664 { .name = "ID_AA64PFR0_EL1",
6665 .exported_bits = R_ID_AA64PFR0_FP_MASK |
6666 R_ID_AA64PFR0_ADVSIMD_MASK |
6667 R_ID_AA64PFR0_SVE_MASK |
6668 R_ID_AA64PFR0_DIT_MASK,
6669 .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
6670 (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
6671 { .name = "ID_AA64PFR1_EL1",
6672 .exported_bits = R_ID_AA64PFR1_BT_MASK |
6673 R_ID_AA64PFR1_SSBS_MASK |
6674 R_ID_AA64PFR1_MTE_MASK |
6675 R_ID_AA64PFR1_SME_MASK },
6676 { .name = "ID_AA64PFR2_EL1",
6677 .exported_bits = 0 },
6678 { .name = "ID_AA64PFR*_EL1_RESERVED",
6679 .is_glob = true },
6680 { .name = "ID_AA64ZFR0_EL1",
6681 .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
6682 R_ID_AA64ZFR0_AES_MASK |
6683 R_ID_AA64ZFR0_BITPERM_MASK |
6684 R_ID_AA64ZFR0_BFLOAT16_MASK |
6685 R_ID_AA64ZFR0_B16B16_MASK |
6686 R_ID_AA64ZFR0_SHA3_MASK |
6687 R_ID_AA64ZFR0_SM4_MASK |
6688 R_ID_AA64ZFR0_I8MM_MASK |
6689 R_ID_AA64ZFR0_F32MM_MASK |
6690 R_ID_AA64ZFR0_F64MM_MASK },
6691 { .name = "ID_AA64SMFR0_EL1",
6692 .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
6693 R_ID_AA64SMFR0_BI32I32_MASK |
6694 R_ID_AA64SMFR0_B16F32_MASK |
6695 R_ID_AA64SMFR0_F16F32_MASK |
6696 R_ID_AA64SMFR0_I8I32_MASK |
6697 R_ID_AA64SMFR0_F16F16_MASK |
6698 R_ID_AA64SMFR0_B16B16_MASK |
6699 R_ID_AA64SMFR0_I16I32_MASK |
6700 R_ID_AA64SMFR0_F64F64_MASK |
6701 R_ID_AA64SMFR0_I16I64_MASK |
6702 R_ID_AA64SMFR0_SMEVER_MASK |
6703 R_ID_AA64SMFR0_FA64_MASK },
6704 { .name = "ID_AA64MMFR0_EL1",
6705 .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
6706 .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
6707 (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
6708 { .name = "ID_AA64MMFR1_EL1",
6709 .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
6710 { .name = "ID_AA64MMFR2_EL1",
6711 .exported_bits = R_ID_AA64MMFR2_AT_MASK },
6712 { .name = "ID_AA64MMFR3_EL1",
6713 .exported_bits = 0 },
6714 { .name = "ID_AA64MMFR*_EL1_RESERVED",
6715 .is_glob = true },
6716 { .name = "ID_AA64DFR0_EL1",
6717 .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
6718 { .name = "ID_AA64DFR1_EL1" },
6719 { .name = "ID_AA64DFR*_EL1_RESERVED",
6720 .is_glob = true },
6721 { .name = "ID_AA64AFR*",
6722 .is_glob = true },
6723 { .name = "ID_AA64ISAR0_EL1",
6724 .exported_bits = R_ID_AA64ISAR0_AES_MASK |
6725 R_ID_AA64ISAR0_SHA1_MASK |
6726 R_ID_AA64ISAR0_SHA2_MASK |
6727 R_ID_AA64ISAR0_CRC32_MASK |
6728 R_ID_AA64ISAR0_ATOMIC_MASK |
6729 R_ID_AA64ISAR0_RDM_MASK |
6730 R_ID_AA64ISAR0_SHA3_MASK |
6731 R_ID_AA64ISAR0_SM3_MASK |
6732 R_ID_AA64ISAR0_SM4_MASK |
6733 R_ID_AA64ISAR0_DP_MASK |
6734 R_ID_AA64ISAR0_FHM_MASK |
6735 R_ID_AA64ISAR0_TS_MASK |
6736 R_ID_AA64ISAR0_RNDR_MASK },
6737 { .name = "ID_AA64ISAR1_EL1",
6738 .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
6739 R_ID_AA64ISAR1_APA_MASK |
6740 R_ID_AA64ISAR1_API_MASK |
6741 R_ID_AA64ISAR1_JSCVT_MASK |
6742 R_ID_AA64ISAR1_FCMA_MASK |
6743 R_ID_AA64ISAR1_LRCPC_MASK |
6744 R_ID_AA64ISAR1_GPA_MASK |
6745 R_ID_AA64ISAR1_GPI_MASK |
6746 R_ID_AA64ISAR1_FRINTTS_MASK |
6747 R_ID_AA64ISAR1_SB_MASK |
6748 R_ID_AA64ISAR1_BF16_MASK |
6749 R_ID_AA64ISAR1_DGH_MASK |
6750 R_ID_AA64ISAR1_I8MM_MASK },
6751 { .name = "ID_AA64ISAR2_EL1",
6752 .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
6753 R_ID_AA64ISAR2_RPRES_MASK |
6754 R_ID_AA64ISAR2_GPA3_MASK |
6755 R_ID_AA64ISAR2_APA3_MASK |
6756 R_ID_AA64ISAR2_MOPS_MASK |
6757 R_ID_AA64ISAR2_BC_MASK |
6758 R_ID_AA64ISAR2_RPRFM_MASK |
6759 R_ID_AA64ISAR2_CSSC_MASK },
6760 { .name = "ID_AA64ISAR*_EL1_RESERVED",
6761 .is_glob = true },
6762 };
6763 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6764 #endif
6765 /*
6766 * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
6767 * TODO: For RMR, a write with bit 1 set should do something with
6768 * cpu_reset(). In the meantime, "the bit is strictly a request",
6769 * so we are in spec just ignoring writes.
6770 */
6771 if (!arm_feature(env, ARM_FEATURE_EL3) &&
6772 !arm_feature(env, ARM_FEATURE_EL2)) {
6773 ARMCPRegInfo el1_reset_regs[] = {
6774 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
6775 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6776 .access = PL1_R,
6777 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6778 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
6779 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6780 .access = PL1_RW, .type = ARM_CP_CONST,
6781 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
6782 };
6783 define_arm_cp_regs(cpu, el1_reset_regs);
6784 }
6785 define_arm_cp_regs(cpu, v8_idregs);
6786 define_arm_cp_regs(cpu, v8_cp_reginfo);
6787 if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
6788 define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
6789 }
6790
6791 for (i = 4; i < 16; i++) {
6792 /*
6793 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
6794 * For pre-v8 cores there are RAZ patterns for these in
6795 * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
6796 * v8 extends the "must RAZ" part of the ID register space
6797 * to also cover c0, 0, c{8-15}, {0-7}.
6798 * These are STATE_AA32 because in the AArch64 sysreg space
6799 * c4-c7 is where the AArch64 ID registers live (and we've
6800 * already defined those in v8_idregs[]), and c8-c15 are not
6801 * "must RAZ" for AArch64.
6802 */
6803 g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
6804 ARMCPRegInfo v8_aa32_raz_idregs = {
6805 .name = name,
6806 .state = ARM_CP_STATE_AA32,
6807 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
6808 .access = PL1_R, .type = ARM_CP_CONST,
6809 .accessfn = access_tid3,
6810 .resetvalue = 0 };
6811 define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
6812 }
6813 }
6814
6815 /*
6816 * Register the base EL2 cpregs.
6817 * Pre v8, these registers are implemented only as part of the
6818 * Virtualization Extensions (EL2 present). Beginning with v8,
6819 * if EL2 is missing but EL3 is enabled, mostly these become
6820 * RES0 from EL3, with some specific exceptions.
6821 */
6822 if (arm_feature(env, ARM_FEATURE_EL2)
6823 || (arm_feature(env, ARM_FEATURE_EL3)
6824 && arm_feature(env, ARM_FEATURE_V8))) {
6825 uint64_t vmpidr_def = mpidr_read_val(env);
6826 ARMCPRegInfo vpidr_regs[] = {
6827 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6828 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6829 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6830 .resetvalue = cpu->midr,
6831 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6832 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6833 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6834 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6835 .access = PL2_RW, .resetvalue = cpu->midr,
6836 .type = ARM_CP_EL3_NO_EL2_C_NZ,
6837 .nv2_redirect_offset = 0x88,
6838 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6839 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6840 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6841 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6842 .resetvalue = vmpidr_def,
6843 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6844 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6845 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6846 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6847 .access = PL2_RW, .resetvalue = vmpidr_def,
6848 .type = ARM_CP_EL3_NO_EL2_C_NZ,
6849 .nv2_redirect_offset = 0x50,
6850 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6851 };
6852 /*
6853 * The only field of MDCR_EL2 that has a defined architectural reset
6854 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
6855 */
6856 ARMCPRegInfo mdcr_el2 = {
6857 .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
6858 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
6859 .writefn = mdcr_el2_write,
6860 .access = PL2_RW, .resetvalue = pmu_num_counters(env),
6861 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
6862 };
6863 define_one_arm_cp_reg(cpu, &mdcr_el2);
6864 define_arm_cp_regs(cpu, vpidr_regs);
6865 define_arm_cp_regs(cpu, el2_cp_reginfo);
6866 if (arm_feature(env, ARM_FEATURE_V8)) {
6867 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6868 }
6869 if (cpu_isar_feature(aa64_sel2, cpu)) {
6870 define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
6871 }
6872 /*
6873 * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
6874 * See commentary near RMR_EL1.
6875 */
6876 if (!arm_feature(env, ARM_FEATURE_EL3)) {
6877 static const ARMCPRegInfo el2_reset_regs[] = {
6878 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6879 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6880 .access = PL2_R,
6881 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6882 { .name = "RVBAR", .type = ARM_CP_ALIAS,
6883 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6884 .access = PL2_R,
6885 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6886 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
6887 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
6888 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6889 };
6890 define_arm_cp_regs(cpu, el2_reset_regs);
6891 }
6892 }
6893
6894 /* Register the base EL3 cpregs. */
6895 if (arm_feature(env, ARM_FEATURE_EL3)) {
6896 define_arm_cp_regs(cpu, el3_cp_reginfo);
6897 ARMCPRegInfo el3_regs[] = {
6898 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6899 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6900 .access = PL3_R,
6901 .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
6902 { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
6903 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
6904 .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6905 { .name = "RMR", .state = ARM_CP_STATE_AA32,
6906 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6907 .access = PL3_RW, .type = ARM_CP_CONST,
6908 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
6909 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6910 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6911 .access = PL3_RW,
6912 .raw_writefn = raw_write, .writefn = sctlr_write,
6913 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6914 .resetvalue = cpu->reset_sctlr },
6915 };
6916
6917 define_arm_cp_regs(cpu, el3_regs);
6918 }
6919 /*
6920 * The behaviour of NSACR is sufficiently various that we don't
6921 * try to describe it in a single reginfo:
6922 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6923 * reads as constant 0xc00 from NS EL1 and NS EL2
6924 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6925 * if v7 without EL3, register doesn't exist
6926 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6927 */
6928 if (arm_feature(env, ARM_FEATURE_EL3)) {
6929 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6930 static const ARMCPRegInfo nsacr = {
6931 .name = "NSACR", .type = ARM_CP_CONST,
6932 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6933 .access = PL1_RW, .accessfn = nsacr_access,
6934 .resetvalue = 0xc00
6935 };
6936 define_one_arm_cp_reg(cpu, &nsacr);
6937 } else {
6938 static const ARMCPRegInfo nsacr = {
6939 .name = "NSACR",
6940 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6941 .access = PL3_RW | PL1_R,
6942 .resetvalue = 0,
6943 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6944 };
6945 define_one_arm_cp_reg(cpu, &nsacr);
6946 }
6947 } else {
6948 if (arm_feature(env, ARM_FEATURE_V8)) {
6949 static const ARMCPRegInfo nsacr = {
6950 .name = "NSACR", .type = ARM_CP_CONST,
6951 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6952 .access = PL1_R,
6953 .resetvalue = 0xc00
6954 };
6955 define_one_arm_cp_reg(cpu, &nsacr);
6956 }
6957 }
6958
6959 if (arm_feature(env, ARM_FEATURE_PMSA)) {
6960 if (arm_feature(env, ARM_FEATURE_V6)) {
6961 /* PMSAv6 not implemented */
6962 assert(arm_feature(env, ARM_FEATURE_V7));
6963 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6964 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6965 } else {
6966 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6967 }
6968 } else {
6969 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6970 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6971 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
6972 if (cpu_isar_feature(aa32_hpd, cpu)) {
6973 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6974 }
6975 }
6976 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6977 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6978 }
6979 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6980 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6981 }
6982 if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
6983 define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
6984 }
6985 #ifndef CONFIG_USER_ONLY
6986 if (cpu_isar_feature(aa64_ecv, cpu)) {
6987 define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
6988 }
6989 #endif
6990 if (arm_feature(env, ARM_FEATURE_VAPA)) {
6991 ARMCPRegInfo vapa_cp_reginfo[] = {
6992 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
6993 .access = PL1_RW, .resetvalue = 0,
6994 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
6995 offsetoflow32(CPUARMState, cp15.par_ns) },
6996 .writefn = par_write},
6997 };
6998
6999 /*
7000 * When LPAE exists this 32-bit PAR register is an alias of the
7001 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
7002 */
7003 if (arm_feature(env, ARM_FEATURE_LPAE)) {
7004 vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
7005 }
7006 define_arm_cp_regs(cpu, vapa_cp_reginfo);
7007 }
7008 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
7009 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
7010 }
7011 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
7012 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
7013 }
7014 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
7015 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
7016 }
7017 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
7018 define_arm_cp_regs(cpu, omap_cp_reginfo);
7019 }
7020 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
7021 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
7022 }
7023 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
7024 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
7025 }
7026 if (arm_feature(env, ARM_FEATURE_LPAE)) {
7027 define_arm_cp_regs(cpu, lpae_cp_reginfo);
7028 }
7029 if (cpu_isar_feature(aa32_jazelle, cpu)) {
7030 define_arm_cp_regs(cpu, jazelle_regs);
7031 }
7032 /*
7033 * Slightly awkwardly, the OMAP and StrongARM cores need all of
7034 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7035 * be read-only (ie write causes UNDEF exception).
7036 */
7037 {
7038 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
7039 /*
7040 * Pre-v8 MIDR space.
7041 * Note that the MIDR isn't a simple constant register because
7042 * of the TI925 behaviour where writes to another register can
7043 * cause the MIDR value to change.
7044 *
7045 * Unimplemented registers in the c15 0 0 0 space default to
7046 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7047 * and friends override accordingly.
7048 */
7049 { .name = "MIDR",
7050 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7051 .access = PL1_R, .resetvalue = cpu->midr,
7052 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
7053 .readfn = midr_read,
7054 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7055 .type = ARM_CP_OVERRIDE },
7056 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7057 { .name = "DUMMY",
7058 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
7059 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7060 { .name = "DUMMY",
7061 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
7062 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7063 { .name = "DUMMY",
7064 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
7065 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7066 { .name = "DUMMY",
7067 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
7068 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7069 { .name = "DUMMY",
7070 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
7071 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7072 };
7073 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
7074 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
7075 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
7076 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
7077 .fgt = FGT_MIDR_EL1,
7078 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7079 .readfn = midr_read },
7080 /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
7081 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7082 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
7083 .access = PL1_R, .resetvalue = cpu->midr },
7084 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
7085 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
7086 .access = PL1_R,
7087 .accessfn = access_tid1,
7088 .fgt = FGT_REVIDR_EL1,
7089 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
7090 };
7091 ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
7092 .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
7093 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7094 .access = PL1_R, .resetvalue = cpu->midr
7095 };
7096 ARMCPRegInfo id_cp_reginfo[] = {
7097 /* These are common to v8 and pre-v8 */
7098 { .name = "CTR",
7099 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
7100 .access = PL1_R, .accessfn = ctr_el0_access,
7101 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7102 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
7103 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
7104 .access = PL0_R, .accessfn = ctr_el0_access,
7105 .fgt = FGT_CTR_EL0,
7106 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7107 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7108 { .name = "TCMTR",
7109 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
7110 .access = PL1_R,
7111 .accessfn = access_tid1,
7112 .type = ARM_CP_CONST, .resetvalue = 0 },
7113 };
7114 /* TLBTR is specific to VMSA */
7115 ARMCPRegInfo id_tlbtr_reginfo = {
7116 .name = "TLBTR",
7117 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
7118 .access = PL1_R,
7119 .accessfn = access_tid1,
7120 .type = ARM_CP_CONST, .resetvalue = 0,
7121 };
7122 /* MPUIR is specific to PMSA V6+ */
7123 ARMCPRegInfo id_mpuir_reginfo = {
7124 .name = "MPUIR",
7125 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7126 .access = PL1_R, .type = ARM_CP_CONST,
7127 .resetvalue = cpu->pmsav7_dregion << 8
7128 };
7129 /* HMPUIR is specific to PMSA V8 */
7130 ARMCPRegInfo id_hmpuir_reginfo = {
7131 .name = "HMPUIR",
7132 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
7133 .access = PL2_R, .type = ARM_CP_CONST,
7134 .resetvalue = cpu->pmsav8r_hdregion
7135 };
7136 static const ARMCPRegInfo crn0_wi_reginfo = {
7137 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
7138 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
7139 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
7140 };
7141 #ifdef CONFIG_USER_ONLY
7142 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
7143 { .name = "MIDR_EL1",
7144 .exported_bits = R_MIDR_EL1_REVISION_MASK |
7145 R_MIDR_EL1_PARTNUM_MASK |
7146 R_MIDR_EL1_ARCHITECTURE_MASK |
7147 R_MIDR_EL1_VARIANT_MASK |
7148 R_MIDR_EL1_IMPLEMENTER_MASK },
7149 { .name = "REVIDR_EL1" },
7150 };
7151 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
7152 #endif
7153 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
7154 arm_feature(env, ARM_FEATURE_STRONGARM)) {
7155 size_t i;
7156 /*
7157 * Register the blanket "writes ignored" value first to cover the
7158 * whole space. Then update the specific ID registers to allow write
7159 * access, so that they ignore writes rather than causing them to
7160 * UNDEF.
7161 */
7162 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
7163 for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
7164 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
7165 }
7166 for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
7167 id_cp_reginfo[i].access = PL1_RW;
7168 }
7169 id_mpuir_reginfo.access = PL1_RW;
7170 id_tlbtr_reginfo.access = PL1_RW;
7171 }
7172 if (arm_feature(env, ARM_FEATURE_V8)) {
7173 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
7174 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
7175 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
7176 }
7177 } else {
7178 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
7179 }
7180 define_arm_cp_regs(cpu, id_cp_reginfo);
7181 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
7182 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
7183 } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
7184 arm_feature(env, ARM_FEATURE_V8)) {
7185 uint32_t i = 0;
7186 char *tmp_string;
7187
7188 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
7189 define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
7190 define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
7191
7192 /* Register alias is only valid for first 32 indexes */
7193 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
7194 uint8_t crm = 0b1000 | extract32(i, 1, 3);
7195 uint8_t opc1 = extract32(i, 4, 1);
7196 uint8_t opc2 = extract32(i, 0, 1) << 2;
7197
7198 tmp_string = g_strdup_printf("PRBAR%u", i);
7199 ARMCPRegInfo tmp_prbarn_reginfo = {
7200 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
7201 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7202 .access = PL1_RW, .resetvalue = 0,
7203 .accessfn = access_tvm_trvm,
7204 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7205 };
7206 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
7207 g_free(tmp_string);
7208
7209 opc2 = extract32(i, 0, 1) << 2 | 0x1;
7210 tmp_string = g_strdup_printf("PRLAR%u", i);
7211 ARMCPRegInfo tmp_prlarn_reginfo = {
7212 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
7213 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7214 .access = PL1_RW, .resetvalue = 0,
7215 .accessfn = access_tvm_trvm,
7216 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7217 };
7218 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
7219 g_free(tmp_string);
7220 }
7221
7222 /* Register alias is only valid for first 32 indexes */
7223 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
7224 uint8_t crm = 0b1000 | extract32(i, 1, 3);
7225 uint8_t opc1 = 0b100 | extract32(i, 4, 1);
7226 uint8_t opc2 = extract32(i, 0, 1) << 2;
7227
7228 tmp_string = g_strdup_printf("HPRBAR%u", i);
7229 ARMCPRegInfo tmp_hprbarn_reginfo = {
7230 .name = tmp_string,
7231 .type = ARM_CP_NO_RAW,
7232 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7233 .access = PL2_RW, .resetvalue = 0,
7234 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7235 };
7236 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
7237 g_free(tmp_string);
7238
7239 opc2 = extract32(i, 0, 1) << 2 | 0x1;
7240 tmp_string = g_strdup_printf("HPRLAR%u", i);
7241 ARMCPRegInfo tmp_hprlarn_reginfo = {
7242 .name = tmp_string,
7243 .type = ARM_CP_NO_RAW,
7244 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7245 .access = PL2_RW, .resetvalue = 0,
7246 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7247 };
7248 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
7249 g_free(tmp_string);
7250 }
7251 } else if (arm_feature(env, ARM_FEATURE_V7)) {
7252 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
7253 }
7254 }
7255
7256 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
7257 ARMCPRegInfo mpidr_cp_reginfo[] = {
7258 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
7259 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
7260 .fgt = FGT_MPIDR_EL1,
7261 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
7262 };
7263 #ifdef CONFIG_USER_ONLY
7264 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
7265 { .name = "MPIDR_EL1",
7266 .fixed_bits = 0x0000000080000000 },
7267 };
7268 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
7269 #endif
7270 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
7271 }
7272
7273 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
7274 ARMCPRegInfo auxcr_reginfo[] = {
7275 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
7276 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
7277 .access = PL1_RW, .accessfn = access_tacr,
7278 .nv2_redirect_offset = 0x118,
7279 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
7280 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
7281 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
7282 .access = PL2_RW, .type = ARM_CP_CONST,
7283 .resetvalue = 0 },
7284 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
7285 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
7286 .access = PL3_RW, .type = ARM_CP_CONST,
7287 .resetvalue = 0 },
7288 };
7289 define_arm_cp_regs(cpu, auxcr_reginfo);
7290 if (cpu_isar_feature(aa32_ac2, cpu)) {
7291 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
7292 }
7293 }
7294
7295 if (arm_feature(env, ARM_FEATURE_CBAR)) {
7296 /*
7297 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7298 * There are two flavours:
7299 * (1) older 32-bit only cores have a simple 32-bit CBAR
7300 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7301 * 32-bit register visible to AArch32 at a different encoding
7302 * to the "flavour 1" register and with the bits rearranged to
7303 * be able to squash a 64-bit address into the 32-bit view.
7304 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7305 * in future if we support AArch32-only configs of some of the
7306 * AArch64 cores we might need to add a specific feature flag
7307 * to indicate cores with "flavour 2" CBAR.
7308 */
7309 if (arm_feature(env, ARM_FEATURE_V8)) {
7310 /* 32 bit view is [31:18] 0...0 [43:32]. */
7311 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
7312 | extract64(cpu->reset_cbar, 32, 12);
7313 ARMCPRegInfo cbar_reginfo[] = {
7314 { .name = "CBAR",
7315 .type = ARM_CP_CONST,
7316 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
7317 .access = PL1_R, .resetvalue = cbar32 },
7318 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
7319 .type = ARM_CP_CONST,
7320 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
7321 .access = PL1_R, .resetvalue = cpu->reset_cbar },
7322 };
7323 /* We don't implement a r/w 64 bit CBAR currently */
7324 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
7325 define_arm_cp_regs(cpu, cbar_reginfo);
7326 } else {
7327 ARMCPRegInfo cbar = {
7328 .name = "CBAR",
7329 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
7330 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
7331 .fieldoffset = offsetof(CPUARMState,
7332 cp15.c15_config_base_address)
7333 };
7334 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
7335 cbar.access = PL1_R;
7336 cbar.fieldoffset = 0;
7337 cbar.type = ARM_CP_CONST;
7338 }
7339 define_one_arm_cp_reg(cpu, &cbar);
7340 }
7341 }
7342
7343 if (arm_feature(env, ARM_FEATURE_VBAR)) {
7344 static const ARMCPRegInfo vbar_cp_reginfo[] = {
7345 { .name = "VBAR_EL1", .state = ARM_CP_STATE_BOTH,
7346 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
7347 .access = PL1_RW, .writefn = vbar_write,
7348 .accessfn = access_nv1,
7349 .fgt = FGT_VBAR_EL1,
7350 .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
7351 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 12, 0, 0),
7352 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 12, 0, 0),
7353 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
7354 offsetof(CPUARMState, cp15.vbar_ns) },
7355 .resetvalue = 0 },
7356 };
7357 define_arm_cp_regs(cpu, vbar_cp_reginfo);
7358 }
7359
7360 /* Generic registers whose values depend on the implementation */
7361 {
7362 ARMCPRegInfo sctlr = {
7363 .name = "SCTLR_EL1", .state = ARM_CP_STATE_BOTH,
7364 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
7365 .access = PL1_RW, .accessfn = access_tvm_trvm,
7366 .fgt = FGT_SCTLR_EL1,
7367 .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 0),
7368 .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 0),
7369 .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
7370 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
7371 offsetof(CPUARMState, cp15.sctlr_ns) },
7372 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
7373 .raw_writefn = raw_write,
7374 };
7375 define_one_arm_cp_reg(cpu, &sctlr);
7376
7377 if (arm_feature(env, ARM_FEATURE_PMSA) &&
7378 arm_feature(env, ARM_FEATURE_V8)) {
7379 ARMCPRegInfo vsctlr = {
7380 .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
7381 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
7382 .access = PL2_RW, .resetvalue = 0x0,
7383 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
7384 };
7385 define_one_arm_cp_reg(cpu, &vsctlr);
7386 }
7387 }
7388
7389 if (cpu_isar_feature(aa64_lor, cpu)) {
7390 define_arm_cp_regs(cpu, lor_reginfo);
7391 }
7392 if (cpu_isar_feature(aa64_pan, cpu)) {
7393 define_one_arm_cp_reg(cpu, &pan_reginfo);
7394 }
7395 if (cpu_isar_feature(aa64_uao, cpu)) {
7396 define_one_arm_cp_reg(cpu, &uao_reginfo);
7397 }
7398
7399 if (cpu_isar_feature(aa64_dit, cpu)) {
7400 define_one_arm_cp_reg(cpu, &dit_reginfo);
7401 }
7402 if (cpu_isar_feature(aa64_ssbs, cpu)) {
7403 define_one_arm_cp_reg(cpu, &ssbs_reginfo);
7404 }
7405 if (cpu_isar_feature(any_ras, cpu)) {
7406 define_arm_cp_regs(cpu, minimal_ras_reginfo);
7407 }
7408
7409 if (cpu_isar_feature(aa64_vh, cpu) ||
7410 cpu_isar_feature(aa64_debugv8p2, cpu)) {
7411 define_one_arm_cp_reg(cpu, &contextidr_el2);
7412 }
7413 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7414 define_arm_cp_regs(cpu, vhe_reginfo);
7415 }
7416
7417 if (cpu_isar_feature(aa64_sve, cpu)) {
7418 define_arm_cp_regs(cpu, zcr_reginfo);
7419 }
7420
7421 if (cpu_isar_feature(aa64_hcx, cpu)) {
7422 define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
7423 }
7424
7425 if (cpu_isar_feature(aa64_sme, cpu)) {
7426 define_arm_cp_regs(cpu, sme_reginfo);
7427 }
7428 if (cpu_isar_feature(aa64_pauth, cpu)) {
7429 define_arm_cp_regs(cpu, pauth_reginfo);
7430 }
7431 if (cpu_isar_feature(aa64_rndr, cpu)) {
7432 define_arm_cp_regs(cpu, rndr_reginfo);
7433 }
7434 /* Data Cache clean instructions up to PoP */
7435 if (cpu_isar_feature(aa64_dcpop, cpu)) {
7436 define_one_arm_cp_reg(cpu, dcpop_reg);
7437
7438 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
7439 define_one_arm_cp_reg(cpu, dcpodp_reg);
7440 }
7441 }
7442
7443 /*
7444 * If full MTE is enabled, add all of the system registers.
7445 * If only "instructions available at EL0" are enabled,
7446 * then define only a RAZ/WI version of PSTATE.TCO.
7447 */
7448 if (cpu_isar_feature(aa64_mte, cpu)) {
7449 ARMCPRegInfo gmid_reginfo = {
7450 .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7451 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7452 .access = PL1_R, .accessfn = access_aa64_tid5,
7453 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
7454 };
7455 define_one_arm_cp_reg(cpu, &gmid_reginfo);
7456 define_arm_cp_regs(cpu, mte_reginfo);
7457 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7458 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
7459 define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
7460 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7461 }
7462
7463 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
7464 define_arm_cp_regs(cpu, scxtnum_reginfo);
7465 }
7466
7467 if (cpu_isar_feature(aa64_fgt, cpu)) {
7468 define_arm_cp_regs(cpu, fgt_reginfo);
7469 }
7470
7471 if (cpu_isar_feature(aa64_rme, cpu)) {
7472 define_arm_cp_regs(cpu, rme_reginfo);
7473 if (cpu_isar_feature(aa64_mte, cpu)) {
7474 define_arm_cp_regs(cpu, rme_mte_reginfo);
7475 }
7476 }
7477
7478 if (cpu_isar_feature(aa64_nv2, cpu)) {
7479 define_arm_cp_regs(cpu, nv2_reginfo);
7480 }
7481
7482 if (cpu_isar_feature(aa64_nmi, cpu)) {
7483 define_arm_cp_regs(cpu, nmi_reginfo);
7484 }
7485
7486 if (cpu_isar_feature(aa64_sctlr2, cpu)) {
7487 define_arm_cp_regs(cpu, sctlr2_reginfo);
7488 }
7489
7490 if (cpu_isar_feature(aa64_tcr2, cpu)) {
7491 define_arm_cp_regs(cpu, tcr2_reginfo);
7492 }
7493
7494 if (cpu_isar_feature(aa64_s1pie, cpu)) {
7495 define_arm_cp_regs(cpu, s1pie_reginfo);
7496 }
7497 if (cpu_isar_feature(aa64_s2pie, cpu)) {
7498 define_arm_cp_regs(cpu, s2pie_reginfo);
7499 }
7500 if (cpu_isar_feature(aa64_mec, cpu)) {
7501 define_arm_cp_regs(cpu, mec_reginfo);
7502 if (cpu_isar_feature(aa64_mte, cpu)) {
7503 define_arm_cp_regs(cpu, mec_mte_reginfo);
7504 }
7505 }
7506
7507 if (cpu_isar_feature(aa64_aie, cpu)) {
7508 define_arm_cp_regs(cpu, aie_reginfo);
7509 }
7510
7511 if (cpu_isar_feature(any_predinv, cpu)) {
7512 define_arm_cp_regs(cpu, predinv_reginfo);
7513 }
7514
7515 if (cpu_isar_feature(any_ccidx, cpu)) {
7516 define_arm_cp_regs(cpu, ccsidr2_reginfo);
7517 }
7518
7519 define_pm_cpregs(cpu);
7520 define_gcs_cpregs(cpu);
7521 }
7522
7523 /*
7524 * Copy a ARMCPRegInfo structure, allocating it along with the name
7525 * and an optional suffix to the name.
7526 */
alloc_cpreg(const ARMCPRegInfo * in,const char * suffix)7527 static ARMCPRegInfo *alloc_cpreg(const ARMCPRegInfo *in, const char *suffix)
7528 {
7529 const char *name = in->name;
7530 size_t name_len = strlen(name);
7531 size_t suff_len = suffix ? strlen(suffix) : 0;
7532 ARMCPRegInfo *out = g_malloc(sizeof(*in) + name_len + suff_len + 1);
7533 char *p = (char *)(out + 1);
7534
7535 *out = *in;
7536 out->name = p;
7537
7538 memcpy(p, name, name_len + 1);
7539 if (suffix) {
7540 memcpy(p + name_len, suffix, suff_len + 1);
7541 }
7542 return out;
7543 }
7544
7545 /*
7546 * Private utility function for define_one_arm_cp_reg():
7547 * add a single reginfo struct to the hash table.
7548 */
add_cpreg_to_hashtable(ARMCPU * cpu,ARMCPRegInfo * r,CPState state,CPSecureState secstate,uint32_t key)7549 static void add_cpreg_to_hashtable(ARMCPU *cpu, ARMCPRegInfo *r,
7550 CPState state, CPSecureState secstate,
7551 uint32_t key)
7552 {
7553 CPUARMState *env = &cpu->env;
7554 bool ns = secstate & ARM_CP_SECSTATE_NS;
7555
7556 /* Overriding of an existing definition must be explicitly requested. */
7557 if (!(r->type & ARM_CP_OVERRIDE)) {
7558 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
7559 if (oldreg) {
7560 assert(oldreg->type & ARM_CP_OVERRIDE);
7561 }
7562 }
7563
7564 {
7565 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
7566
7567 if (isbanked) {
7568 /*
7569 * Register is banked (using both entries in array).
7570 * Overwriting fieldoffset as the array is only used to define
7571 * banked registers but later only fieldoffset is used.
7572 */
7573 r->fieldoffset = r->bank_fieldoffsets[ns];
7574 }
7575 if (state == ARM_CP_STATE_AA32) {
7576 if (isbanked) {
7577 /*
7578 * If the register is banked then we don't need to migrate or
7579 * reset the 32-bit instance in certain cases:
7580 *
7581 * 1) If the register has both 32-bit and 64-bit instances
7582 * then we can count on the 64-bit instance taking care
7583 * of the non-secure bank.
7584 * 2) If ARMv8 is enabled then we can count on a 64-bit
7585 * version taking care of the secure bank. This requires
7586 * that separate 32 and 64-bit definitions are provided.
7587 */
7588 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
7589 (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
7590 r->type |= ARM_CP_ALIAS;
7591 }
7592 } else if ((secstate != r->secure) && !ns) {
7593 /*
7594 * The register is not banked so we only want to allow
7595 * migration of the non-secure instance.
7596 */
7597 r->type |= ARM_CP_ALIAS;
7598 }
7599 }
7600 }
7601
7602 /*
7603 * For 32-bit AArch32 regs shared with 64-bit AArch64 regs,
7604 * adjust the field offset for endianness. This had to be
7605 * delayed until banked registers were resolved.
7606 */
7607 if (HOST_BIG_ENDIAN &&
7608 state == ARM_CP_STATE_AA32 &&
7609 r->state == ARM_CP_STATE_BOTH &&
7610 r->fieldoffset) {
7611 r->fieldoffset += sizeof(uint32_t);
7612 }
7613
7614 /*
7615 * Special registers (ie NOP/WFI) are never migratable and
7616 * are not even raw-accessible.
7617 */
7618 if (r->type & ARM_CP_SPECIAL_MASK) {
7619 r->type |= ARM_CP_NO_RAW;
7620 }
7621
7622 /*
7623 * Update fields to match the instantiation, overwiting wildcards
7624 * such as ARM_CP_STATE_BOTH or ARM_CP_SECSTATE_BOTH.
7625 */
7626 r->state = state;
7627 r->secure = secstate;
7628
7629 /*
7630 * Check that raw accesses are either forbidden or handled. Note that
7631 * we can't assert this earlier because the setup of fieldoffset for
7632 * banked registers has to be done first.
7633 */
7634 if (!(r->type & ARM_CP_NO_RAW)) {
7635 assert(!raw_accessors_invalid(r));
7636 }
7637
7638 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r);
7639 }
7640
add_cpreg_to_hashtable_aa32(ARMCPU * cpu,ARMCPRegInfo * r)7641 static void add_cpreg_to_hashtable_aa32(ARMCPU *cpu, ARMCPRegInfo *r)
7642 {
7643 /*
7644 * Under AArch32 CP registers can be common
7645 * (same for secure and non-secure world) or banked.
7646 */
7647 ARMCPRegInfo *r_s;
7648 bool is64 = r->type & ARM_CP_64BIT;
7649 uint32_t key = ENCODE_CP_REG(r->cp, is64, 0, r->crn,
7650 r->crm, r->opc1, r->opc2);
7651
7652 assert(!(r->type & ARM_CP_ADD_TLBI_NXS)); /* aa64 only */
7653 r->vhe_redir_to_el2 = 0;
7654 r->vhe_redir_to_el01 = 0;
7655
7656 switch (r->secure) {
7657 case ARM_CP_SECSTATE_NS:
7658 key |= CP_REG_AA32_NS_MASK;
7659 /* fall through */
7660 case ARM_CP_SECSTATE_S:
7661 add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32, r->secure, key);
7662 break;
7663 case ARM_CP_SECSTATE_BOTH:
7664 r_s = alloc_cpreg(r, "_S");
7665 add_cpreg_to_hashtable(cpu, r_s, ARM_CP_STATE_AA32,
7666 ARM_CP_SECSTATE_S, key);
7667
7668 key |= CP_REG_AA32_NS_MASK;
7669 add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32,
7670 ARM_CP_SECSTATE_NS, key);
7671 break;
7672 default:
7673 g_assert_not_reached();
7674 }
7675 }
7676
add_cpreg_to_hashtable_aa64(ARMCPU * cpu,ARMCPRegInfo * r)7677 static void add_cpreg_to_hashtable_aa64(ARMCPU *cpu, ARMCPRegInfo *r)
7678 {
7679 uint32_t key = ENCODE_AA64_CP_REG(r->opc0, r->opc1,
7680 r->crn, r->crm, r->opc2);
7681
7682 if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
7683 cpu_isar_feature(aa64_xs, cpu)) {
7684 /*
7685 * This is a TLBI insn which has an NXS variant. The
7686 * NXS variant is at the same encoding except that
7687 * crn is +1, and has the same behaviour except for
7688 * fine-grained trapping. Add the NXS insn here and
7689 * then fall through to add the normal register.
7690 * add_cpreg_to_hashtable() copies the cpreg struct
7691 * and name that it is passed, so it's OK to use
7692 * a local struct here.
7693 */
7694 ARMCPRegInfo *nxs_ri = alloc_cpreg(r, "NXS");
7695 uint32_t nxs_key;
7696
7697 assert(nxs_ri->crn < 0xf);
7698 nxs_ri->crn++;
7699 /* Also increment the CRN field inside the key value */
7700 nxs_key = key + (1 << CP_REG_ARM64_SYSREG_CRN_SHIFT);
7701 if (nxs_ri->fgt) {
7702 nxs_ri->fgt |= R_FGT_NXS_MASK;
7703 }
7704
7705 add_cpreg_to_hashtable(cpu, nxs_ri, ARM_CP_STATE_AA64,
7706 ARM_CP_SECSTATE_NS, nxs_key);
7707 }
7708
7709 if (!r->vhe_redir_to_el01) {
7710 assert(!r->vhe_redir_to_el2);
7711 } else if (!arm_feature(&cpu->env, ARM_FEATURE_EL2) ||
7712 !cpu_isar_feature(aa64_vh, cpu)) {
7713 r->vhe_redir_to_el2 = 0;
7714 r->vhe_redir_to_el01 = 0;
7715 } else {
7716 /* Create the FOO_EL12 alias. */
7717 ARMCPRegInfo *r2 = alloc_cpreg(r, "2");
7718 uint32_t key2 = r->vhe_redir_to_el01;
7719
7720 /*
7721 * Clear EL1 redirection on the FOO_EL1 reg;
7722 * Clear EL2 redirection on the FOO_EL12 reg;
7723 * Install redirection from FOO_EL12 back to FOO_EL1.
7724 */
7725 r->vhe_redir_to_el01 = 0;
7726 r2->vhe_redir_to_el2 = 0;
7727 r2->vhe_redir_to_el01 = key;
7728
7729 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_RAW;
7730 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
7731 r2->access &= PL2_RW | PL3_RW;
7732 /* The new_reg op fields are as per new_key, not the target reg */
7733 r2->crn = (key2 & CP_REG_ARM64_SYSREG_CRN_MASK)
7734 >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
7735 r2->crm = (key2 & CP_REG_ARM64_SYSREG_CRM_MASK)
7736 >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
7737 r2->opc0 = (key2 & CP_REG_ARM64_SYSREG_OP0_MASK)
7738 >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
7739 r2->opc1 = (key2 & CP_REG_ARM64_SYSREG_OP1_MASK)
7740 >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
7741 r2->opc2 = (key2 & CP_REG_ARM64_SYSREG_OP2_MASK)
7742 >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
7743
7744 /* Non-redirected access to this register will abort. */
7745 r2->readfn = NULL;
7746 r2->writefn = NULL;
7747 r2->raw_readfn = NULL;
7748 r2->raw_writefn = NULL;
7749 r2->accessfn = NULL;
7750 r2->fieldoffset = 0;
7751
7752 /*
7753 * If the _EL1 register is redirected to memory by FEAT_NV2,
7754 * then it shares the offset with the _EL12 register,
7755 * and which one is redirected depends on HCR_EL2.NV1.
7756 */
7757 if (r2->nv2_redirect_offset) {
7758 assert(r2->nv2_redirect_offset & NV2_REDIR_NV1);
7759 r2->nv2_redirect_offset &= ~NV2_REDIR_NV1;
7760 r2->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
7761 }
7762 add_cpreg_to_hashtable(cpu, r2, ARM_CP_STATE_AA64,
7763 ARM_CP_SECSTATE_NS, key2);
7764 }
7765
7766 add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA64,
7767 ARM_CP_SECSTATE_NS, key);
7768 }
7769
define_one_arm_cp_reg(ARMCPU * cpu,const ARMCPRegInfo * r)7770 void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r)
7771 {
7772 /*
7773 * Define implementations of coprocessor registers.
7774 * We store these in a hashtable because typically
7775 * there are less than 150 registers in a space which
7776 * is 16*16*16*8*8 = 262144 in size.
7777 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7778 * If a register is defined twice then the second definition is
7779 * used, so this can be used to define some generic registers and
7780 * then override them with implementation specific variations.
7781 * At least one of the original and the second definition should
7782 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7783 * against accidental use.
7784 *
7785 * The state field defines whether the register is to be
7786 * visible in the AArch32 or AArch64 execution state. If the
7787 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7788 * reginfo structure for the AArch32 view, which sees the lower
7789 * 32 bits of the 64 bit register.
7790 *
7791 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7792 * be wildcarded. AArch64 registers are always considered to be 64
7793 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7794 * the register, if any.
7795 */
7796 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
7797 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
7798 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
7799 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
7800 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
7801 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
7802 int cp = r->cp;
7803 ARMCPRegInfo r_const;
7804 CPUARMState *env = &cpu->env;
7805
7806 /*
7807 * AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless.
7808 * Moreover, the encoding test just following in general prevents
7809 * shared encoding so ARM_CP_STATE_BOTH won't work either.
7810 */
7811 assert(r->state == ARM_CP_STATE_AA32 || !(r->type & ARM_CP_64BIT));
7812 /* AArch32 64-bit registers have only CRm and Opc1 fields. */
7813 assert(!(r->type & ARM_CP_64BIT) || !(r->opc2 || r->crn));
7814 /* op0 only exists in the AArch64 encodings */
7815 assert(r->state != ARM_CP_STATE_AA32 || r->opc0 == 0);
7816
7817 /*
7818 * This API is only for Arm's system coprocessors (14 and 15) or
7819 * (M-profile or v7A-and-earlier only) for implementation defined
7820 * coprocessors in the range 0..7. Our decode assumes this, since
7821 * 8..13 can be used for other insns including VFP and Neon. See
7822 * valid_cp() in translate.c. Assert here that we haven't tried
7823 * to use an invalid coprocessor number.
7824 */
7825 switch (r->state) {
7826 case ARM_CP_STATE_BOTH:
7827 /*
7828 * If the cp field is left unset, assume cp15.
7829 * Otherwise apply the same rules as AA32.
7830 */
7831 if (cp == 0) {
7832 cp = 15;
7833 break;
7834 }
7835 /* fall through */
7836 case ARM_CP_STATE_AA32:
7837 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
7838 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
7839 assert(cp >= 14 && cp <= 15);
7840 } else {
7841 assert(cp < 8 || (cp >= 14 && cp <= 15));
7842 }
7843 break;
7844 case ARM_CP_STATE_AA64:
7845 assert(cp == 0);
7846 break;
7847 default:
7848 g_assert_not_reached();
7849 }
7850 /*
7851 * The AArch64 pseudocode CheckSystemAccess() specifies that op1
7852 * encodes a minimum access level for the register. We roll this
7853 * runtime check into our general permission check code, so check
7854 * here that the reginfo's specified permissions are strict enough
7855 * to encompass the generic architectural permission check.
7856 */
7857 if (r->state != ARM_CP_STATE_AA32) {
7858 CPAccessRights mask;
7859 switch (r->opc1) {
7860 case 0:
7861 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7862 mask = PL0U_R | PL1_RW;
7863 break;
7864 case 1: case 2:
7865 /* min_EL EL1 */
7866 mask = PL1_RW;
7867 break;
7868 case 3:
7869 /* min_EL EL0 */
7870 mask = PL0_RW;
7871 break;
7872 case 4:
7873 case 5:
7874 /* min_EL EL2 */
7875 mask = PL2_RW;
7876 break;
7877 case 6:
7878 /* min_EL EL3 */
7879 mask = PL3_RW;
7880 break;
7881 case 7:
7882 /* min_EL EL1, secure mode only (we don't check the latter) */
7883 mask = PL1_RW;
7884 break;
7885 default:
7886 /* broken reginfo with out-of-range opc1 */
7887 g_assert_not_reached();
7888 }
7889 /* assert our permissions are not too lax (stricter is fine) */
7890 assert((r->access & ~mask) == 0);
7891 }
7892
7893 /*
7894 * Check that the register definition has enough info to handle
7895 * reads and writes if they are permitted.
7896 */
7897 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
7898 if (r->access & PL3_R) {
7899 assert((r->fieldoffset ||
7900 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7901 r->readfn);
7902 }
7903 if (r->access & PL3_W) {
7904 assert((r->fieldoffset ||
7905 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7906 r->writefn);
7907 }
7908 }
7909
7910 /*
7911 * Eliminate registers that are not present because the EL is missing.
7912 * Doing this here makes it easier to put all registers for a given
7913 * feature into the same ARMCPRegInfo array and define them all at once.
7914 */
7915 if (arm_feature(env, ARM_FEATURE_EL3)) {
7916 /*
7917 * An EL2 register without EL2 but with EL3 is (usually) RES0.
7918 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
7919 */
7920 int min_el = ctz32(r->access) / 2;
7921 if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
7922 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
7923 return;
7924 }
7925 if (!(r->type & ARM_CP_EL3_NO_EL2_KEEP)) {
7926 /* This should not have been a very special register. */
7927 int old_special = r->type & ARM_CP_SPECIAL_MASK;
7928 assert(old_special == 0 || old_special == ARM_CP_NOP);
7929
7930 r_const = *r;
7931
7932 /*
7933 * Set the special function to CONST, retaining the other flags.
7934 * This is important for e.g. ARM_CP_SVE so that we still
7935 * take the SVE trap if CPTR_EL3.EZ == 0.
7936 */
7937 r_const.type = (r->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
7938 /*
7939 * Usually, these registers become RES0, but there are a few
7940 * special cases like VPIDR_EL2 which have a constant non-zero
7941 * value with writes ignored.
7942 */
7943 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
7944 r_const.resetvalue = 0;
7945 }
7946 /*
7947 * ARM_CP_CONST has precedence, so removing the callbacks and
7948 * offsets are not strictly necessary, but it is potentially
7949 * less confusing to debug later.
7950 */
7951 r_const.readfn = NULL;
7952 r_const.writefn = NULL;
7953 r_const.raw_readfn = NULL;
7954 r_const.raw_writefn = NULL;
7955 r_const.resetfn = NULL;
7956 r_const.fieldoffset = 0;
7957 r_const.bank_fieldoffsets[0] = 0;
7958 r_const.bank_fieldoffsets[1] = 0;
7959
7960 r = &r_const;
7961 }
7962 }
7963 } else {
7964 CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
7965 ? PL2_RW : PL1_RW);
7966 if ((r->access & max_el) == 0) {
7967 return;
7968 }
7969 }
7970
7971 for (int crm = crmmin; crm <= crmmax; crm++) {
7972 for (int opc1 = opc1min; opc1 <= opc1max; opc1++) {
7973 for (int opc2 = opc2min; opc2 <= opc2max; opc2++) {
7974 ARMCPRegInfo *r2 = alloc_cpreg(r, NULL);
7975 ARMCPRegInfo *r3;
7976
7977 /*
7978 * By convention, for wildcarded registers only the first
7979 * entry is used for migration; the others are marked as
7980 * ALIAS so we don't try to transfer the register
7981 * multiple times.
7982 */
7983 if (crm != crmmin || opc1 != opc1min || opc2 != opc2min) {
7984 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
7985 }
7986
7987 /* Overwrite CP_ANY with the instantiation. */
7988 r2->crm = crm;
7989 r2->opc1 = opc1;
7990 r2->opc2 = opc2;
7991
7992 switch (r->state) {
7993 case ARM_CP_STATE_AA32:
7994 add_cpreg_to_hashtable_aa32(cpu, r2);
7995 break;
7996 case ARM_CP_STATE_AA64:
7997 add_cpreg_to_hashtable_aa64(cpu, r2);
7998 break;
7999 case ARM_CP_STATE_BOTH:
8000 r3 = alloc_cpreg(r2, NULL);
8001 r2->cp = cp;
8002 add_cpreg_to_hashtable_aa32(cpu, r2);
8003 r3->cp = 0;
8004 add_cpreg_to_hashtable_aa64(cpu, r3);
8005 break;
8006 default:
8007 g_assert_not_reached();
8008 }
8009 }
8010 }
8011 }
8012 }
8013
8014 /* Define a whole list of registers */
define_arm_cp_regs_len(ARMCPU * cpu,const ARMCPRegInfo * regs,size_t len)8015 void define_arm_cp_regs_len(ARMCPU *cpu, const ARMCPRegInfo *regs, size_t len)
8016 {
8017 for (size_t i = 0; i < len; ++i) {
8018 define_one_arm_cp_reg(cpu, regs + i);
8019 }
8020 }
8021
8022 /*
8023 * Modify ARMCPRegInfo for access from userspace.
8024 *
8025 * This is a data driven modification directed by
8026 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8027 * user-space cannot alter any values and dynamic values pertaining to
8028 * execution state are hidden from user space view anyway.
8029 */
modify_arm_cp_regs_with_len(ARMCPRegInfo * regs,size_t regs_len,const ARMCPRegUserSpaceInfo * mods,size_t mods_len)8030 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
8031 const ARMCPRegUserSpaceInfo *mods,
8032 size_t mods_len)
8033 {
8034 for (size_t mi = 0; mi < mods_len; ++mi) {
8035 const ARMCPRegUserSpaceInfo *m = mods + mi;
8036 GPatternSpec *pat = NULL;
8037
8038 if (m->is_glob) {
8039 pat = g_pattern_spec_new(m->name);
8040 }
8041 for (size_t ri = 0; ri < regs_len; ++ri) {
8042 ARMCPRegInfo *r = regs + ri;
8043
8044 if (pat && g_pattern_match_string(pat, r->name)) {
8045 r->type = ARM_CP_CONST;
8046 r->access = PL0U_R;
8047 r->resetvalue = 0;
8048 /* continue */
8049 } else if (strcmp(r->name, m->name) == 0) {
8050 r->type = ARM_CP_CONST;
8051 r->access = PL0U_R;
8052 r->resetvalue &= m->exported_bits;
8053 r->resetvalue |= m->fixed_bits;
8054 break;
8055 }
8056 }
8057 if (pat) {
8058 g_pattern_spec_free(pat);
8059 }
8060 }
8061 }
8062
get_arm_cp_reginfo(GHashTable * cpregs,uint32_t encoded_cp)8063 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
8064 {
8065 return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
8066 }
8067
arm_cp_write_ignore(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)8068 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8069 uint64_t value)
8070 {
8071 /* Helper coprocessor write function for write-ignore registers */
8072 }
8073
arm_cp_read_zero(CPUARMState * env,const ARMCPRegInfo * ri)8074 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
8075 {
8076 /* Helper coprocessor write function for read-as-zero registers */
8077 return 0;
8078 }
8079
arm_cp_reset_ignore(CPUARMState * env,const ARMCPRegInfo * ri)8080 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *ri)
8081 {
8082 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8083 }
8084
bad_mode_switch(CPUARMState * env,int mode,CPSRWriteType write_type)8085 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
8086 {
8087 /*
8088 * Return true if it is not valid for us to switch to
8089 * this CPU mode (ie all the UNPREDICTABLE cases in
8090 * the ARM ARM CPSRWriteByInstr pseudocode).
8091 */
8092
8093 /* Changes to or from Hyp via MSR and CPS are illegal. */
8094 if (write_type == CPSRWriteByInstr &&
8095 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8096 mode == ARM_CPU_MODE_HYP)) {
8097 return 1;
8098 }
8099
8100 switch (mode) {
8101 case ARM_CPU_MODE_USR:
8102 return 0;
8103 case ARM_CPU_MODE_SYS:
8104 case ARM_CPU_MODE_SVC:
8105 case ARM_CPU_MODE_ABT:
8106 case ARM_CPU_MODE_UND:
8107 case ARM_CPU_MODE_IRQ:
8108 case ARM_CPU_MODE_FIQ:
8109 /*
8110 * Note that we don't implement the IMPDEF NSACR.RFR which in v7
8111 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8112 */
8113 /*
8114 * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8115 * and CPS are treated as illegal mode changes.
8116 */
8117 if (write_type == CPSRWriteByInstr &&
8118 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
8119 (arm_hcr_el2_eff(env) & HCR_TGE)) {
8120 return 1;
8121 }
8122 return 0;
8123 case ARM_CPU_MODE_HYP:
8124 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
8125 case ARM_CPU_MODE_MON:
8126 return arm_current_el(env) < 3;
8127 default:
8128 return 1;
8129 }
8130 }
8131
cpsr_read(CPUARMState * env)8132 uint32_t cpsr_read(CPUARMState *env)
8133 {
8134 int ZF;
8135 ZF = (env->ZF == 0);
8136 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
8137 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8138 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8139 | ((env->condexec_bits & 0xfc) << 8)
8140 | (env->GE << 16) | (env->daif & CPSR_AIF);
8141 }
8142
cpsr_write(CPUARMState * env,uint32_t val,uint32_t mask,CPSRWriteType write_type)8143 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8144 CPSRWriteType write_type)
8145 {
8146 uint32_t changed_daif;
8147 bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
8148 (mask & (CPSR_M | CPSR_E | CPSR_IL));
8149
8150 if (mask & CPSR_NZCV) {
8151 env->ZF = (~val) & CPSR_Z;
8152 env->NF = val;
8153 env->CF = (val >> 29) & 1;
8154 env->VF = (val << 3) & 0x80000000;
8155 }
8156 if (mask & CPSR_Q) {
8157 env->QF = ((val & CPSR_Q) != 0);
8158 }
8159 if (mask & CPSR_T) {
8160 env->thumb = ((val & CPSR_T) != 0);
8161 }
8162 if (mask & CPSR_IT_0_1) {
8163 env->condexec_bits &= ~3;
8164 env->condexec_bits |= (val >> 25) & 3;
8165 }
8166 if (mask & CPSR_IT_2_7) {
8167 env->condexec_bits &= 3;
8168 env->condexec_bits |= (val >> 8) & 0xfc;
8169 }
8170 if (mask & CPSR_GE) {
8171 env->GE = (val >> 16) & 0xf;
8172 }
8173
8174 /*
8175 * In a V7 implementation that includes the security extensions but does
8176 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8177 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8178 * bits respectively.
8179 *
8180 * In a V8 implementation, it is permitted for privileged software to
8181 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8182 */
8183 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
8184 arm_feature(env, ARM_FEATURE_EL3) &&
8185 !arm_feature(env, ARM_FEATURE_EL2) &&
8186 !arm_is_secure(env)) {
8187
8188 changed_daif = (env->daif ^ val) & mask;
8189
8190 if (changed_daif & CPSR_A) {
8191 /*
8192 * Check to see if we are allowed to change the masking of async
8193 * abort exceptions from a non-secure state.
8194 */
8195 if (!(env->cp15.scr_el3 & SCR_AW)) {
8196 qemu_log_mask(LOG_GUEST_ERROR,
8197 "Ignoring attempt to switch CPSR_A flag from "
8198 "non-secure world with SCR.AW bit clear\n");
8199 mask &= ~CPSR_A;
8200 }
8201 }
8202
8203 if (changed_daif & CPSR_F) {
8204 /*
8205 * Check to see if we are allowed to change the masking of FIQ
8206 * exceptions from a non-secure state.
8207 */
8208 if (!(env->cp15.scr_el3 & SCR_FW)) {
8209 qemu_log_mask(LOG_GUEST_ERROR,
8210 "Ignoring attempt to switch CPSR_F flag from "
8211 "non-secure world with SCR.FW bit clear\n");
8212 mask &= ~CPSR_F;
8213 }
8214
8215 /*
8216 * Check whether non-maskable FIQ (NMFI) support is enabled.
8217 * If this bit is set software is not allowed to mask
8218 * FIQs, but is allowed to set CPSR_F to 0.
8219 */
8220 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
8221 (val & CPSR_F)) {
8222 qemu_log_mask(LOG_GUEST_ERROR,
8223 "Ignoring attempt to enable CPSR_F flag "
8224 "(non-maskable FIQ [NMFI] support enabled)\n");
8225 mask &= ~CPSR_F;
8226 }
8227 }
8228 }
8229
8230 env->daif &= ~(CPSR_AIF & mask);
8231 env->daif |= val & CPSR_AIF & mask;
8232
8233 if (write_type != CPSRWriteRaw &&
8234 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8235 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
8236 /*
8237 * Note that we can only get here in USR mode if this is a
8238 * gdb stub write; for this case we follow the architectural
8239 * behaviour for guest writes in USR mode of ignoring an attempt
8240 * to switch mode. (Those are caught by translate.c for writes
8241 * triggered by guest instructions.)
8242 */
8243 mask &= ~CPSR_M;
8244 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
8245 /*
8246 * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8247 * v7, and has defined behaviour in v8:
8248 * + leave CPSR.M untouched
8249 * + allow changes to the other CPSR fields
8250 * + set PSTATE.IL
8251 * For user changes via the GDB stub, we don't set PSTATE.IL,
8252 * as this would be unnecessarily harsh for a user error.
8253 */
8254 mask &= ~CPSR_M;
8255 if (write_type != CPSRWriteByGDBStub &&
8256 arm_feature(env, ARM_FEATURE_V8)) {
8257 mask |= CPSR_IL;
8258 val |= CPSR_IL;
8259 }
8260 qemu_log_mask(LOG_GUEST_ERROR,
8261 "Illegal AArch32 mode switch attempt from %s to %s\n",
8262 aarch32_mode_name(env->uncached_cpsr),
8263 aarch32_mode_name(val));
8264 } else {
8265 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
8266 write_type == CPSRWriteExceptionReturn ?
8267 "Exception return from AArch32" :
8268 "AArch32 mode switch from",
8269 aarch32_mode_name(env->uncached_cpsr),
8270 aarch32_mode_name(val), env->regs[15]);
8271 switch_mode(env, val & CPSR_M);
8272 }
8273 }
8274 mask &= ~CACHED_CPSR_BITS;
8275 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
8276 if (tcg_enabled() && rebuild_hflags) {
8277 arm_rebuild_hflags(env);
8278 }
8279 }
8280
8281 #ifdef CONFIG_USER_ONLY
8282
switch_mode(CPUARMState * env,int mode)8283 static void switch_mode(CPUARMState *env, int mode)
8284 {
8285 ARMCPU *cpu = env_archcpu(env);
8286
8287 if (mode != ARM_CPU_MODE_USR) {
8288 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
8289 }
8290 }
8291
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)8292 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8293 uint32_t cur_el, bool secure)
8294 {
8295 return 1;
8296 }
8297
aarch64_sync_64_to_32(CPUARMState * env)8298 void aarch64_sync_64_to_32(CPUARMState *env)
8299 {
8300 g_assert_not_reached();
8301 }
8302
8303 #else
8304
switch_mode(CPUARMState * env,int mode)8305 static void switch_mode(CPUARMState *env, int mode)
8306 {
8307 int old_mode;
8308 int i;
8309
8310 old_mode = env->uncached_cpsr & CPSR_M;
8311 if (mode == old_mode) {
8312 return;
8313 }
8314
8315 if (old_mode == ARM_CPU_MODE_FIQ) {
8316 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8317 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
8318 } else if (mode == ARM_CPU_MODE_FIQ) {
8319 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8320 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
8321 }
8322
8323 i = bank_number(old_mode);
8324 env->banked_r13[i] = env->regs[13];
8325 env->banked_spsr[i] = env->spsr;
8326
8327 i = bank_number(mode);
8328 env->regs[13] = env->banked_r13[i];
8329 env->spsr = env->banked_spsr[i];
8330
8331 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
8332 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
8333 }
8334
8335 /*
8336 * Physical Interrupt Target EL Lookup Table
8337 *
8338 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8339 *
8340 * The below multi-dimensional table is used for looking up the target
8341 * exception level given numerous condition criteria. Specifically, the
8342 * target EL is based on SCR and HCR routing controls as well as the
8343 * currently executing EL and secure state.
8344 *
8345 * Dimensions:
8346 * target_el_table[2][2][2][2][2][4]
8347 * | | | | | +--- Current EL
8348 * | | | | +------ Non-secure(0)/Secure(1)
8349 * | | | +--------- HCR mask override
8350 * | | +------------ SCR exec state control
8351 * | +--------------- SCR mask override
8352 * +------------------ 32-bit(0)/64-bit(1) EL3
8353 *
8354 * The table values are as such:
8355 * 0-3 = EL0-EL3
8356 * -1 = Cannot occur
8357 *
8358 * The ARM ARM target EL table includes entries indicating that an "exception
8359 * is not taken". The two cases where this is applicable are:
8360 * 1) An exception is taken from EL3 but the SCR does not have the exception
8361 * routed to EL3.
8362 * 2) An exception is taken from EL2 but the HCR does not have the exception
8363 * routed to EL2.
8364 * In these two cases, the below table contain a target of EL1. This value is
8365 * returned as it is expected that the consumer of the table data will check
8366 * for "target EL >= current EL" to ensure the exception is not taken.
8367 *
8368 * SCR HCR
8369 * 64 EA AMO From
8370 * BIT IRQ IMO Non-secure Secure
8371 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
8372 */
8373 static const int8_t target_el_table[2][2][2][2][2][4] = {
8374 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8375 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8376 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8377 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8378 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8379 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8380 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8381 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8382 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8383 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
8384 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
8385 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
8386 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8387 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8388 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
8389 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
8390 };
8391
8392 /*
8393 * Determine the target EL for physical exceptions
8394 */
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)8395 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8396 uint32_t cur_el, bool secure)
8397 {
8398 CPUARMState *env = cpu_env(cs);
8399 bool rw;
8400 bool scr;
8401 bool hcr;
8402 int target_el;
8403 /* Is the highest EL AArch64? */
8404 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
8405 uint64_t hcr_el2;
8406
8407 if (arm_feature(env, ARM_FEATURE_EL3)) {
8408 rw = arm_scr_rw_eff(env);
8409 } else {
8410 /*
8411 * Either EL2 is the highest EL (and so the EL2 register width
8412 * is given by is64); or there is no EL2 or EL3, in which case
8413 * the value of 'rw' does not affect the table lookup anyway.
8414 */
8415 rw = is64;
8416 }
8417
8418 hcr_el2 = arm_hcr_el2_eff(env);
8419 switch (excp_idx) {
8420 case EXCP_IRQ:
8421 case EXCP_NMI:
8422 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
8423 hcr = hcr_el2 & HCR_IMO;
8424 break;
8425 case EXCP_FIQ:
8426 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
8427 hcr = hcr_el2 & HCR_FMO;
8428 break;
8429 default:
8430 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
8431 hcr = hcr_el2 & HCR_AMO;
8432 break;
8433 };
8434
8435 /*
8436 * For these purposes, TGE and AMO/IMO/FMO both force the
8437 * interrupt to EL2. Fold TGE into the bit extracted above.
8438 */
8439 hcr |= (hcr_el2 & HCR_TGE) != 0;
8440
8441 /* Perform a table-lookup for the target EL given the current state */
8442 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
8443
8444 assert(target_el > 0);
8445
8446 return target_el;
8447 }
8448
arm_log_exception(CPUState * cs)8449 void arm_log_exception(CPUState *cs)
8450 {
8451 int idx = cs->exception_index;
8452
8453 if (qemu_loglevel_mask(CPU_LOG_INT)) {
8454 const char *exc = NULL;
8455 static const char * const excnames[] = {
8456 [EXCP_UDEF] = "Undefined Instruction",
8457 [EXCP_SWI] = "SVC",
8458 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
8459 [EXCP_DATA_ABORT] = "Data Abort",
8460 [EXCP_IRQ] = "IRQ",
8461 [EXCP_FIQ] = "FIQ",
8462 [EXCP_BKPT] = "Breakpoint",
8463 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
8464 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
8465 [EXCP_HVC] = "Hypervisor Call",
8466 [EXCP_HYP_TRAP] = "Hypervisor Trap",
8467 [EXCP_SMC] = "Secure Monitor Call",
8468 [EXCP_VIRQ] = "Virtual IRQ",
8469 [EXCP_VFIQ] = "Virtual FIQ",
8470 [EXCP_SEMIHOST] = "Semihosting call",
8471 [EXCP_NOCP] = "v7M NOCP UsageFault",
8472 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
8473 [EXCP_STKOF] = "v8M STKOF UsageFault",
8474 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
8475 [EXCP_LSERR] = "v8M LSERR UsageFault",
8476 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
8477 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
8478 [EXCP_VSERR] = "Virtual SERR",
8479 [EXCP_GPC] = "Granule Protection Check",
8480 [EXCP_NMI] = "NMI",
8481 [EXCP_VINMI] = "Virtual IRQ NMI",
8482 [EXCP_VFNMI] = "Virtual FIQ NMI",
8483 [EXCP_MON_TRAP] = "Monitor Trap",
8484 };
8485
8486 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
8487 exc = excnames[idx];
8488 }
8489 if (!exc) {
8490 exc = "unknown";
8491 }
8492 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
8493 idx, exc, cs->cpu_index);
8494 }
8495 }
8496
8497 /*
8498 * Function used to synchronize QEMU's AArch64 register set with AArch32
8499 * register set. This is necessary when switching between AArch32 and AArch64
8500 * execution state.
8501 */
aarch64_sync_32_to_64(CPUARMState * env)8502 void aarch64_sync_32_to_64(CPUARMState *env)
8503 {
8504 int i;
8505 uint32_t mode = env->uncached_cpsr & CPSR_M;
8506
8507 /* We can blanket copy R[0:7] to X[0:7] */
8508 for (i = 0; i < 8; i++) {
8509 env->xregs[i] = env->regs[i];
8510 }
8511
8512 /*
8513 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8514 * Otherwise, they come from the banked user regs.
8515 */
8516 if (mode == ARM_CPU_MODE_FIQ) {
8517 for (i = 8; i < 13; i++) {
8518 env->xregs[i] = env->usr_regs[i - 8];
8519 }
8520 } else {
8521 for (i = 8; i < 13; i++) {
8522 env->xregs[i] = env->regs[i];
8523 }
8524 }
8525
8526 /*
8527 * Registers x13-x23 are the various mode SP and FP registers. Registers
8528 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8529 * from the mode banked register.
8530 */
8531 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8532 env->xregs[13] = env->regs[13];
8533 env->xregs[14] = env->regs[14];
8534 } else {
8535 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8536 /* HYP is an exception in that it is copied from r14 */
8537 if (mode == ARM_CPU_MODE_HYP) {
8538 env->xregs[14] = env->regs[14];
8539 } else {
8540 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
8541 }
8542 }
8543
8544 if (mode == ARM_CPU_MODE_HYP) {
8545 env->xregs[15] = env->regs[13];
8546 } else {
8547 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
8548 }
8549
8550 if (mode == ARM_CPU_MODE_IRQ) {
8551 env->xregs[16] = env->regs[14];
8552 env->xregs[17] = env->regs[13];
8553 } else {
8554 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8555 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8556 }
8557
8558 if (mode == ARM_CPU_MODE_SVC) {
8559 env->xregs[18] = env->regs[14];
8560 env->xregs[19] = env->regs[13];
8561 } else {
8562 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8563 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8564 }
8565
8566 if (mode == ARM_CPU_MODE_ABT) {
8567 env->xregs[20] = env->regs[14];
8568 env->xregs[21] = env->regs[13];
8569 } else {
8570 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8571 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8572 }
8573
8574 if (mode == ARM_CPU_MODE_UND) {
8575 env->xregs[22] = env->regs[14];
8576 env->xregs[23] = env->regs[13];
8577 } else {
8578 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8579 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
8580 }
8581
8582 /*
8583 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8584 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8585 * FIQ bank for r8-r14.
8586 */
8587 if (mode == ARM_CPU_MODE_FIQ) {
8588 for (i = 24; i < 31; i++) {
8589 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
8590 }
8591 } else {
8592 for (i = 24; i < 29; i++) {
8593 env->xregs[i] = env->fiq_regs[i - 24];
8594 }
8595 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8596 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
8597 }
8598
8599 env->pc = env->regs[15];
8600 }
8601
8602 /*
8603 * Function used to synchronize QEMU's AArch32 register set with AArch64
8604 * register set. This is necessary when switching between AArch32 and AArch64
8605 * execution state.
8606 */
aarch64_sync_64_to_32(CPUARMState * env)8607 void aarch64_sync_64_to_32(CPUARMState *env)
8608 {
8609 int i;
8610 uint32_t mode = env->uncached_cpsr & CPSR_M;
8611
8612 /* We can blanket copy X[0:7] to R[0:7] */
8613 for (i = 0; i < 8; i++) {
8614 env->regs[i] = env->xregs[i];
8615 }
8616
8617 /*
8618 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8619 * Otherwise, we copy x8-x12 into the banked user regs.
8620 */
8621 if (mode == ARM_CPU_MODE_FIQ) {
8622 for (i = 8; i < 13; i++) {
8623 env->usr_regs[i - 8] = env->xregs[i];
8624 }
8625 } else {
8626 for (i = 8; i < 13; i++) {
8627 env->regs[i] = env->xregs[i];
8628 }
8629 }
8630
8631 /*
8632 * Registers r13 & r14 depend on the current mode.
8633 * If we are in a given mode, we copy the corresponding x registers to r13
8634 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8635 * for the mode.
8636 */
8637 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8638 env->regs[13] = env->xregs[13];
8639 env->regs[14] = env->xregs[14];
8640 } else {
8641 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
8642
8643 /*
8644 * HYP is an exception in that it does not have its own banked r14 but
8645 * shares the USR r14
8646 */
8647 if (mode == ARM_CPU_MODE_HYP) {
8648 env->regs[14] = env->xregs[14];
8649 } else {
8650 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8651 }
8652 }
8653
8654 if (mode == ARM_CPU_MODE_HYP) {
8655 env->regs[13] = env->xregs[15];
8656 } else {
8657 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8658 }
8659
8660 if (mode == ARM_CPU_MODE_IRQ) {
8661 env->regs[14] = env->xregs[16];
8662 env->regs[13] = env->xregs[17];
8663 } else {
8664 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8665 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8666 }
8667
8668 if (mode == ARM_CPU_MODE_SVC) {
8669 env->regs[14] = env->xregs[18];
8670 env->regs[13] = env->xregs[19];
8671 } else {
8672 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8673 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8674 }
8675
8676 if (mode == ARM_CPU_MODE_ABT) {
8677 env->regs[14] = env->xregs[20];
8678 env->regs[13] = env->xregs[21];
8679 } else {
8680 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8681 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8682 }
8683
8684 if (mode == ARM_CPU_MODE_UND) {
8685 env->regs[14] = env->xregs[22];
8686 env->regs[13] = env->xregs[23];
8687 } else {
8688 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8689 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8690 }
8691
8692 /*
8693 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8694 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8695 * FIQ bank for r8-r14.
8696 */
8697 if (mode == ARM_CPU_MODE_FIQ) {
8698 for (i = 24; i < 31; i++) {
8699 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
8700 }
8701 } else {
8702 for (i = 24; i < 29; i++) {
8703 env->fiq_regs[i - 24] = env->xregs[i];
8704 }
8705 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8706 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8707 }
8708
8709 env->regs[15] = env->pc;
8710 }
8711
take_aarch32_exception(CPUARMState * env,int new_mode,uint32_t mask,uint32_t offset,uint32_t newpc)8712 static void take_aarch32_exception(CPUARMState *env, int new_mode,
8713 uint32_t mask, uint32_t offset,
8714 uint32_t newpc)
8715 {
8716 int new_el;
8717
8718 /* Change the CPU state so as to actually take the exception. */
8719 switch_mode(env, new_mode);
8720
8721 /*
8722 * For exceptions taken to AArch32 we must clear the SS bit in both
8723 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8724 */
8725 env->pstate &= ~PSTATE_SS;
8726 env->spsr = cpsr_read(env);
8727 /* Clear IT bits. */
8728 env->condexec_bits = 0;
8729 /* Switch to the new mode, and to the correct instruction set. */
8730 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8731
8732 /* This must be after mode switching. */
8733 new_el = arm_current_el(env);
8734
8735 /* Set new mode endianness */
8736 env->uncached_cpsr &= ~CPSR_E;
8737 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
8738 env->uncached_cpsr |= CPSR_E;
8739 }
8740 /* J and IL must always be cleared for exception entry */
8741 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8742 env->daif |= mask;
8743
8744 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
8745 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
8746 env->uncached_cpsr |= CPSR_SSBS;
8747 } else {
8748 env->uncached_cpsr &= ~CPSR_SSBS;
8749 }
8750 }
8751
8752 if (new_mode == ARM_CPU_MODE_HYP) {
8753 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8754 env->elr_el[2] = env->regs[15];
8755 } else {
8756 /* CPSR.PAN is normally preserved preserved unless... */
8757 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
8758 switch (new_el) {
8759 case 3:
8760 if (!arm_is_secure_below_el3(env)) {
8761 /* ... the target is EL3, from non-secure state. */
8762 env->uncached_cpsr &= ~CPSR_PAN;
8763 break;
8764 }
8765 /* ... the target is EL3, from secure state ... */
8766 /* fall through */
8767 case 1:
8768 /* ... the target is EL1 and SCTLR.SPAN is 0. */
8769 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
8770 env->uncached_cpsr |= CPSR_PAN;
8771 }
8772 break;
8773 }
8774 }
8775 /*
8776 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8777 * and we should just guard the thumb mode on V4
8778 */
8779 if (arm_feature(env, ARM_FEATURE_V4T)) {
8780 env->thumb =
8781 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8782 }
8783 env->regs[14] = env->regs[15] + offset;
8784 }
8785 env->regs[15] = newpc;
8786
8787 if (tcg_enabled()) {
8788 arm_rebuild_hflags(env);
8789 }
8790 }
8791
arm_do_plugin_vcpu_discon_cb(CPUState * cs,uint64_t from)8792 void arm_do_plugin_vcpu_discon_cb(CPUState *cs, uint64_t from)
8793 {
8794 switch (cs->exception_index) {
8795 case EXCP_IRQ:
8796 case EXCP_VIRQ:
8797 case EXCP_NMI:
8798 case EXCP_VINMI:
8799 case EXCP_FIQ:
8800 case EXCP_VFIQ:
8801 case EXCP_VFNMI:
8802 case EXCP_VSERR:
8803 qemu_plugin_vcpu_interrupt_cb(cs, from);
8804 break;
8805 default:
8806 qemu_plugin_vcpu_exception_cb(cs, from);
8807 }
8808 }
8809
arm_cpu_do_interrupt_aarch32_hyp(CPUState * cs)8810 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8811 {
8812 /*
8813 * Handle exception entry to Hyp mode; this is sufficiently
8814 * different to entry to other AArch32 modes that we handle it
8815 * separately here.
8816 *
8817 * The vector table entry used is always the 0x14 Hyp mode entry point,
8818 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
8819 * The offset applied to the preferred return address is always zero
8820 * (see DDI0487C.a section G1.12.3).
8821 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8822 */
8823 uint32_t addr, mask;
8824 ARMCPU *cpu = ARM_CPU(cs);
8825 CPUARMState *env = &cpu->env;
8826
8827 switch (cs->exception_index) {
8828 case EXCP_UDEF:
8829 addr = 0x04;
8830 break;
8831 case EXCP_SWI:
8832 addr = 0x08;
8833 break;
8834 case EXCP_BKPT:
8835 /* Fall through to prefetch abort. */
8836 case EXCP_PREFETCH_ABORT:
8837 env->cp15.ifar_s = env->exception.vaddress;
8838 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8839 (uint32_t)env->exception.vaddress);
8840 addr = 0x0c;
8841 break;
8842 case EXCP_DATA_ABORT:
8843 env->cp15.dfar_s = env->exception.vaddress;
8844 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8845 (uint32_t)env->exception.vaddress);
8846 addr = 0x10;
8847 break;
8848 case EXCP_IRQ:
8849 addr = 0x18;
8850 break;
8851 case EXCP_FIQ:
8852 addr = 0x1c;
8853 break;
8854 case EXCP_HVC:
8855 addr = 0x08;
8856 break;
8857 case EXCP_HYP_TRAP:
8858 addr = 0x14;
8859 break;
8860 default:
8861 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8862 }
8863
8864 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
8865 if (!arm_feature(env, ARM_FEATURE_V8)) {
8866 /*
8867 * QEMU syndrome values are v8-style. v7 has the IL bit
8868 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8869 * If this is a v7 CPU, squash the IL bit in those cases.
8870 */
8871 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8872 (cs->exception_index == EXCP_DATA_ABORT &&
8873 !(env->exception.syndrome & ARM_EL_ISV)) ||
8874 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8875 env->exception.syndrome &= ~ARM_EL_IL;
8876 }
8877 }
8878 env->cp15.esr_el[2] = env->exception.syndrome;
8879 }
8880
8881 if (arm_current_el(env) != 2 && addr < 0x14) {
8882 addr = 0x14;
8883 }
8884
8885 mask = 0;
8886 if (!(env->cp15.scr_el3 & SCR_EA)) {
8887 mask |= CPSR_A;
8888 }
8889 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8890 mask |= CPSR_I;
8891 }
8892 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8893 mask |= CPSR_F;
8894 }
8895
8896 addr += env->cp15.hvbar;
8897
8898 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8899 }
8900
arm_cpu_do_interrupt_aarch32(CPUState * cs)8901 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8902 {
8903 ARMCPU *cpu = ARM_CPU(cs);
8904 CPUARMState *env = &cpu->env;
8905 uint32_t addr;
8906 uint32_t mask;
8907 int new_mode;
8908 uint32_t offset;
8909 uint32_t moe;
8910
8911 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8912 switch (syn_get_ec(env->exception.syndrome)) {
8913 case EC_BREAKPOINT:
8914 case EC_BREAKPOINT_SAME_EL:
8915 moe = 1;
8916 break;
8917 case EC_WATCHPOINT:
8918 case EC_WATCHPOINT_SAME_EL:
8919 moe = 10;
8920 break;
8921 case EC_AA32_BKPT:
8922 moe = 3;
8923 break;
8924 case EC_VECTORCATCH:
8925 moe = 5;
8926 break;
8927 default:
8928 moe = 0;
8929 break;
8930 }
8931
8932 if (moe) {
8933 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8934 }
8935
8936 if (env->exception.target_el == 2) {
8937 /* Debug exceptions are reported differently on AArch32 */
8938 switch (syn_get_ec(env->exception.syndrome)) {
8939 case EC_BREAKPOINT:
8940 case EC_BREAKPOINT_SAME_EL:
8941 case EC_AA32_BKPT:
8942 case EC_VECTORCATCH:
8943 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
8944 0, 0, 0x22);
8945 break;
8946 case EC_WATCHPOINT:
8947 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8948 EC_DATAABORT);
8949 break;
8950 case EC_WATCHPOINT_SAME_EL:
8951 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8952 EC_DATAABORT_SAME_EL);
8953 break;
8954 }
8955 arm_cpu_do_interrupt_aarch32_hyp(cs);
8956 return;
8957 }
8958
8959 switch (cs->exception_index) {
8960 case EXCP_UDEF:
8961 new_mode = ARM_CPU_MODE_UND;
8962 addr = 0x04;
8963 mask = CPSR_I;
8964 if (env->thumb) {
8965 offset = 2;
8966 } else {
8967 offset = 4;
8968 }
8969 break;
8970 case EXCP_SWI:
8971 new_mode = ARM_CPU_MODE_SVC;
8972 addr = 0x08;
8973 mask = CPSR_I;
8974 /* The PC already points to the next instruction. */
8975 offset = 0;
8976 break;
8977 case EXCP_BKPT:
8978 /* Fall through to prefetch abort. */
8979 case EXCP_PREFETCH_ABORT:
8980 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8981 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8982 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8983 env->exception.fsr, (uint32_t)env->exception.vaddress);
8984 new_mode = ARM_CPU_MODE_ABT;
8985 addr = 0x0c;
8986 mask = CPSR_A | CPSR_I;
8987 offset = 4;
8988 break;
8989 case EXCP_DATA_ABORT:
8990 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8991 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8992 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8993 env->exception.fsr,
8994 (uint32_t)env->exception.vaddress);
8995 new_mode = ARM_CPU_MODE_ABT;
8996 addr = 0x10;
8997 mask = CPSR_A | CPSR_I;
8998 offset = 8;
8999 break;
9000 case EXCP_IRQ:
9001 new_mode = ARM_CPU_MODE_IRQ;
9002 addr = 0x18;
9003 /* Disable IRQ and imprecise data aborts. */
9004 mask = CPSR_A | CPSR_I;
9005 offset = 4;
9006 if (env->cp15.scr_el3 & SCR_IRQ) {
9007 /* IRQ routed to monitor mode */
9008 new_mode = ARM_CPU_MODE_MON;
9009 mask |= CPSR_F;
9010 }
9011 break;
9012 case EXCP_FIQ:
9013 new_mode = ARM_CPU_MODE_FIQ;
9014 addr = 0x1c;
9015 /* Disable FIQ, IRQ and imprecise data aborts. */
9016 mask = CPSR_A | CPSR_I | CPSR_F;
9017 if (env->cp15.scr_el3 & SCR_FIQ) {
9018 /* FIQ routed to monitor mode */
9019 new_mode = ARM_CPU_MODE_MON;
9020 }
9021 offset = 4;
9022 break;
9023 case EXCP_VIRQ:
9024 new_mode = ARM_CPU_MODE_IRQ;
9025 addr = 0x18;
9026 /* Disable IRQ and imprecise data aborts. */
9027 mask = CPSR_A | CPSR_I;
9028 offset = 4;
9029 break;
9030 case EXCP_VFIQ:
9031 new_mode = ARM_CPU_MODE_FIQ;
9032 addr = 0x1c;
9033 /* Disable FIQ, IRQ and imprecise data aborts. */
9034 mask = CPSR_A | CPSR_I | CPSR_F;
9035 offset = 4;
9036 break;
9037 case EXCP_VSERR:
9038 {
9039 /*
9040 * Note that this is reported as a data abort, but the DFAR
9041 * has an UNKNOWN value. Construct the SError syndrome from
9042 * AET and ExT fields.
9043 */
9044 ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
9045
9046 if (extended_addresses_enabled(env)) {
9047 env->exception.fsr = arm_fi_to_lfsc(&fi);
9048 } else {
9049 env->exception.fsr = arm_fi_to_sfsc(&fi);
9050 }
9051 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
9052 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9053 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
9054 env->exception.fsr);
9055
9056 new_mode = ARM_CPU_MODE_ABT;
9057 addr = 0x10;
9058 mask = CPSR_A | CPSR_I;
9059 offset = 8;
9060 }
9061 break;
9062 case EXCP_SMC:
9063 new_mode = ARM_CPU_MODE_MON;
9064 addr = 0x08;
9065 mask = CPSR_A | CPSR_I | CPSR_F;
9066 offset = 0;
9067 break;
9068 case EXCP_MON_TRAP:
9069 new_mode = ARM_CPU_MODE_MON;
9070 addr = 0x04;
9071 mask = CPSR_A | CPSR_I | CPSR_F;
9072 if (env->thumb) {
9073 offset = 2;
9074 } else {
9075 offset = 4;
9076 }
9077 break;
9078 default:
9079 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9080 return; /* Never happens. Keep compiler happy. */
9081 }
9082
9083 if (new_mode == ARM_CPU_MODE_MON) {
9084 addr += env->cp15.mvbar;
9085 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
9086 /* High vectors. When enabled, base address cannot be remapped. */
9087 addr += 0xffff0000;
9088 } else {
9089 /*
9090 * ARM v7 architectures provide a vector base address register to remap
9091 * the interrupt vector table.
9092 * This register is only followed in non-monitor mode, and is banked.
9093 * Note: only bits 31:5 are valid.
9094 */
9095 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
9096 }
9097
9098 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9099 env->cp15.scr_el3 &= ~SCR_NS;
9100 }
9101
9102 take_aarch32_exception(env, new_mode, mask, offset, addr);
9103 }
9104
aarch64_regnum(CPUARMState * env,int aarch32_reg)9105 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
9106 {
9107 /*
9108 * Return the register number of the AArch64 view of the AArch32
9109 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9110 * be that of the AArch32 mode the exception came from.
9111 */
9112 int mode = env->uncached_cpsr & CPSR_M;
9113
9114 switch (aarch32_reg) {
9115 case 0 ... 7:
9116 return aarch32_reg;
9117 case 8 ... 12:
9118 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
9119 case 13:
9120 switch (mode) {
9121 case ARM_CPU_MODE_USR:
9122 case ARM_CPU_MODE_SYS:
9123 return 13;
9124 case ARM_CPU_MODE_HYP:
9125 return 15;
9126 case ARM_CPU_MODE_IRQ:
9127 return 17;
9128 case ARM_CPU_MODE_SVC:
9129 return 19;
9130 case ARM_CPU_MODE_ABT:
9131 return 21;
9132 case ARM_CPU_MODE_UND:
9133 return 23;
9134 case ARM_CPU_MODE_FIQ:
9135 return 29;
9136 default:
9137 g_assert_not_reached();
9138 }
9139 case 14:
9140 switch (mode) {
9141 case ARM_CPU_MODE_USR:
9142 case ARM_CPU_MODE_SYS:
9143 case ARM_CPU_MODE_HYP:
9144 return 14;
9145 case ARM_CPU_MODE_IRQ:
9146 return 16;
9147 case ARM_CPU_MODE_SVC:
9148 return 18;
9149 case ARM_CPU_MODE_ABT:
9150 return 20;
9151 case ARM_CPU_MODE_UND:
9152 return 22;
9153 case ARM_CPU_MODE_FIQ:
9154 return 30;
9155 default:
9156 g_assert_not_reached();
9157 }
9158 case 15:
9159 return 31;
9160 default:
9161 g_assert_not_reached();
9162 }
9163 }
9164
cpsr_read_for_spsr_elx(CPUARMState * env)9165 uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
9166 {
9167 uint32_t ret = cpsr_read(env);
9168
9169 /* Move DIT to the correct location for SPSR_ELx */
9170 if (ret & CPSR_DIT) {
9171 ret &= ~CPSR_DIT;
9172 ret |= PSTATE_DIT;
9173 }
9174 /* Merge PSTATE.SS into SPSR_ELx */
9175 ret |= env->pstate & PSTATE_SS;
9176
9177 return ret;
9178 }
9179
cpsr_write_from_spsr_elx(CPUARMState * env,uint32_t val)9180 void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val)
9181 {
9182 uint32_t mask;
9183
9184 /* Save SPSR_ELx.SS into PSTATE. */
9185 env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
9186 val &= ~PSTATE_SS;
9187
9188 /* Move DIT to the correct location for CPSR */
9189 if (val & PSTATE_DIT) {
9190 val &= ~PSTATE_DIT;
9191 val |= CPSR_DIT;
9192 }
9193
9194 mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
9195 cpsr_write(env, val, mask, CPSRWriteRaw);
9196 }
9197
syndrome_is_sync_extabt(uint32_t syndrome)9198 static bool syndrome_is_sync_extabt(uint32_t syndrome)
9199 {
9200 /* Return true if this syndrome value is a synchronous external abort */
9201 switch (syn_get_ec(syndrome)) {
9202 case EC_INSNABORT:
9203 case EC_INSNABORT_SAME_EL:
9204 case EC_DATAABORT:
9205 case EC_DATAABORT_SAME_EL:
9206 /* Look at fault status code for all the synchronous ext abort cases */
9207 switch (syndrome & 0x3f) {
9208 case 0x10:
9209 case 0x13:
9210 case 0x14:
9211 case 0x15:
9212 case 0x16:
9213 case 0x17:
9214 return true;
9215 default:
9216 return false;
9217 }
9218 default:
9219 return false;
9220 }
9221 }
9222
9223 /* Handle exception entry to a target EL which is using AArch64 */
arm_cpu_do_interrupt_aarch64(CPUState * cs)9224 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
9225 {
9226 ARMCPU *cpu = ARM_CPU(cs);
9227 CPUARMState *env = &cpu->env;
9228 unsigned int new_el = env->exception.target_el;
9229 vaddr addr = env->cp15.vbar_el[new_el];
9230 uint64_t new_mode = aarch64_pstate_mode(new_el, true);
9231 uint64_t old_mode;
9232 unsigned int cur_el = arm_current_el(env);
9233 int rt;
9234
9235 if (tcg_enabled()) {
9236 /*
9237 * Note that new_el can never be 0. If cur_el is 0, then
9238 * el0_a64 is is_a64(), else el0_a64 is ignored.
9239 */
9240 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
9241 }
9242
9243 if (cur_el < new_el) {
9244 /*
9245 * Entry vector offset depends on whether the implemented EL
9246 * immediately lower than the target level is using AArch32 or AArch64
9247 */
9248 bool is_aa64;
9249 uint64_t hcr;
9250
9251 switch (new_el) {
9252 case 3:
9253 is_aa64 = arm_scr_rw_eff(env);
9254 break;
9255 case 2:
9256 hcr = arm_hcr_el2_eff(env);
9257 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9258 is_aa64 = (hcr & HCR_RW) != 0;
9259 break;
9260 }
9261 /* fall through */
9262 case 1:
9263 is_aa64 = is_a64(env);
9264 break;
9265 default:
9266 g_assert_not_reached();
9267 }
9268
9269 if (is_aa64) {
9270 addr += 0x400;
9271 } else {
9272 addr += 0x600;
9273 }
9274 } else {
9275 if (pstate_read(env) & PSTATE_SP) {
9276 addr += 0x200;
9277 }
9278 if (is_a64(env) && (env->cp15.gcscr_el[new_el] & GCSCR_EXLOCKEN)) {
9279 new_mode |= PSTATE_EXLOCK;
9280 }
9281 }
9282
9283 switch (cs->exception_index) {
9284 case EXCP_GPC:
9285 qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
9286 env->cp15.mfar_el3);
9287 /* fall through */
9288 case EXCP_PREFETCH_ABORT:
9289 case EXCP_DATA_ABORT:
9290 /*
9291 * FEAT_DoubleFault allows synchronous external aborts taken to EL3
9292 * to be taken to the SError vector entrypoint.
9293 */
9294 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
9295 syndrome_is_sync_extabt(env->exception.syndrome)) {
9296 addr += 0x180;
9297 }
9298 env->cp15.far_el[new_el] = env->exception.vaddress;
9299 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9300 env->cp15.far_el[new_el]);
9301 /* fall through */
9302 case EXCP_BKPT:
9303 case EXCP_UDEF:
9304 case EXCP_SWI:
9305 case EXCP_HVC:
9306 case EXCP_HYP_TRAP:
9307 case EXCP_SMC:
9308 switch (syn_get_ec(env->exception.syndrome)) {
9309 case EC_ADVSIMDFPACCESSTRAP:
9310 /*
9311 * QEMU internal FP/SIMD syndromes from AArch32 include the
9312 * TA and coproc fields which are only exposed if the exception
9313 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9314 * AArch64 format syndrome.
9315 */
9316 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
9317 break;
9318 case EC_CP14RTTRAP:
9319 case EC_CP15RTTRAP:
9320 case EC_CP14DTTRAP:
9321 /*
9322 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
9323 * the raw register field from the insn; when taking this to
9324 * AArch64 we must convert it to the AArch64 view of the register
9325 * number. Notice that we read a 4-bit AArch32 register number and
9326 * write back a 5-bit AArch64 one.
9327 */
9328 rt = extract32(env->exception.syndrome, 5, 4);
9329 rt = aarch64_regnum(env, rt);
9330 env->exception.syndrome = deposit32(env->exception.syndrome,
9331 5, 5, rt);
9332 break;
9333 case EC_CP15RRTTRAP:
9334 case EC_CP14RRTTRAP:
9335 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
9336 rt = extract32(env->exception.syndrome, 5, 4);
9337 rt = aarch64_regnum(env, rt);
9338 env->exception.syndrome = deposit32(env->exception.syndrome,
9339 5, 5, rt);
9340 rt = extract32(env->exception.syndrome, 10, 4);
9341 rt = aarch64_regnum(env, rt);
9342 env->exception.syndrome = deposit32(env->exception.syndrome,
9343 10, 5, rt);
9344 break;
9345 }
9346 env->cp15.esr_el[new_el] = env->exception.syndrome;
9347 break;
9348 case EXCP_IRQ:
9349 case EXCP_VIRQ:
9350 case EXCP_NMI:
9351 case EXCP_VINMI:
9352 addr += 0x80;
9353 break;
9354 case EXCP_FIQ:
9355 case EXCP_VFIQ:
9356 case EXCP_VFNMI:
9357 addr += 0x100;
9358 break;
9359 case EXCP_VSERR:
9360 addr += 0x180;
9361 /* Construct the SError syndrome from IDS and ISS fields. */
9362 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
9363 env->cp15.esr_el[new_el] = env->exception.syndrome;
9364 break;
9365 default:
9366 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9367 }
9368
9369 if (is_a64(env)) {
9370 old_mode = pstate_read(env);
9371 aarch64_save_sp(env, arm_current_el(env));
9372 env->elr_el[new_el] = env->pc;
9373
9374 if (cur_el == 1 && new_el == 1) {
9375 uint64_t hcr = arm_hcr_el2_eff(env);
9376 if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
9377 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
9378 /*
9379 * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
9380 * by setting M[3:2] to 0b10.
9381 * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
9382 * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
9383 */
9384 old_mode = deposit64(old_mode, 2, 2, 2);
9385 }
9386 }
9387 } else {
9388 old_mode = cpsr_read_for_spsr_elx(env);
9389 env->elr_el[new_el] = env->regs[15];
9390
9391 aarch64_sync_32_to_64(env);
9392
9393 env->condexec_bits = 0;
9394 }
9395 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
9396
9397 qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%" PRIx64 "\n", old_mode);
9398 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9399 env->elr_el[new_el]);
9400
9401 if (cpu_isar_feature(aa64_pan, cpu)) {
9402 /* The value of PSTATE.PAN is normally preserved, except when ... */
9403 new_mode |= old_mode & PSTATE_PAN;
9404 switch (new_el) {
9405 case 2:
9406 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9407 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9408 != (HCR_E2H | HCR_TGE)) {
9409 break;
9410 }
9411 /* fall through */
9412 case 1:
9413 /* ... the target is EL1 ... */
9414 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9415 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9416 new_mode |= PSTATE_PAN;
9417 }
9418 break;
9419 }
9420 }
9421 if (cpu_isar_feature(aa64_mte, cpu)) {
9422 new_mode |= PSTATE_TCO;
9423 }
9424
9425 if (cpu_isar_feature(aa64_ssbs, cpu)) {
9426 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
9427 new_mode |= PSTATE_SSBS;
9428 } else {
9429 new_mode &= ~PSTATE_SSBS;
9430 }
9431 }
9432
9433 if (cpu_isar_feature(aa64_nmi, cpu)) {
9434 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) {
9435 new_mode |= PSTATE_ALLINT;
9436 } else {
9437 new_mode &= ~PSTATE_ALLINT;
9438 }
9439 }
9440
9441 pstate_write(env, PSTATE_DAIF | new_mode);
9442 env->aarch64 = true;
9443 aarch64_restore_sp(env, new_el);
9444
9445 if (tcg_enabled()) {
9446 helper_rebuild_hflags_a64(env, new_el);
9447 }
9448
9449 env->pc = addr;
9450
9451 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64
9452 " PSTATE 0x%" PRIx64 "\n",
9453 new_el, env->pc, pstate_read(env));
9454 }
9455
9456 /*
9457 * Do semihosting call and set the appropriate return value. All the
9458 * permission and validity checks have been done at translate time.
9459 *
9460 * We only see semihosting exceptions in TCG only as they are not
9461 * trapped to the hypervisor in KVM.
9462 */
9463 #ifdef CONFIG_TCG
tcg_handle_semihosting(CPUState * cs)9464 static void tcg_handle_semihosting(CPUState *cs)
9465 {
9466 ARMCPU *cpu = ARM_CPU(cs);
9467 CPUARMState *env = &cpu->env;
9468
9469 if (is_a64(env)) {
9470 qemu_log_mask(CPU_LOG_INT,
9471 "...handling as semihosting call 0x%" PRIx64 "\n",
9472 env->xregs[0]);
9473 do_common_semihosting(cs);
9474 env->pc += 4;
9475 } else {
9476 qemu_log_mask(CPU_LOG_INT,
9477 "...handling as semihosting call 0x%x\n",
9478 env->regs[0]);
9479 do_common_semihosting(cs);
9480 env->regs[15] += env->thumb ? 2 : 4;
9481 }
9482 }
9483 #endif
9484
9485 /*
9486 * Handle a CPU exception for A and R profile CPUs.
9487 * Do any appropriate logging, handle PSCI calls, and then hand off
9488 * to the AArch64-entry or AArch32-entry function depending on the
9489 * target exception level's register width.
9490 *
9491 * Note: this is used for both TCG (as the do_interrupt tcg op),
9492 * and KVM to re-inject guest debug exceptions, and to
9493 * inject a Synchronous-External-Abort.
9494 */
arm_cpu_do_interrupt(CPUState * cs)9495 void arm_cpu_do_interrupt(CPUState *cs)
9496 {
9497 ARMCPU *cpu = ARM_CPU(cs);
9498 CPUARMState *env = &cpu->env;
9499 unsigned int new_el = env->exception.target_el;
9500 uint64_t last_pc = cs->cc->get_pc(cs);
9501
9502 assert(!arm_feature(env, ARM_FEATURE_M));
9503
9504 arm_log_exception(cs);
9505 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
9506 new_el);
9507 if (qemu_loglevel_mask(CPU_LOG_INT)
9508 && !excp_is_internal(cs->exception_index)) {
9509 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx64 "\n",
9510 syn_get_ec(env->exception.syndrome),
9511 env->exception.syndrome);
9512 }
9513
9514 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
9515 arm_handle_psci_call(cpu);
9516 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
9517 qemu_plugin_vcpu_hostcall_cb(cs, last_pc);
9518 return;
9519 }
9520
9521 /*
9522 * Semihosting semantics depend on the register width of the code
9523 * that caused the exception, not the target exception level, so
9524 * must be handled here.
9525 */
9526 #ifdef CONFIG_TCG
9527 if (cs->exception_index == EXCP_SEMIHOST) {
9528 tcg_handle_semihosting(cs);
9529 qemu_plugin_vcpu_hostcall_cb(cs, last_pc);
9530 return;
9531 }
9532 #endif
9533
9534 /*
9535 * Hooks may change global state so BQL should be held, also the
9536 * BQL needs to be held for any modification of
9537 * cs->interrupt_request.
9538 */
9539 g_assert(bql_locked());
9540
9541 arm_call_pre_el_change_hook(cpu);
9542
9543 assert(!excp_is_internal(cs->exception_index));
9544 if (arm_el_is_aa64(env, new_el)) {
9545 arm_cpu_do_interrupt_aarch64(cs);
9546 } else {
9547 arm_cpu_do_interrupt_aarch32(cs);
9548 }
9549
9550 arm_call_el_change_hook(cpu);
9551
9552 if (!kvm_enabled()) {
9553 cpu_set_interrupt(cs, CPU_INTERRUPT_EXITTB);
9554 }
9555
9556 arm_do_plugin_vcpu_discon_cb(cs, last_pc);
9557 }
9558 #endif /* !CONFIG_USER_ONLY */
9559
arm_sctlr(CPUARMState * env,int el)9560 uint64_t arm_sctlr(CPUARMState *env, int el)
9561 {
9562 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */
9563 if (el == 0) {
9564 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
9565 switch (mmu_idx) {
9566 case ARMMMUIdx_E20_0:
9567 el = 2;
9568 break;
9569 case ARMMMUIdx_E30_0:
9570 el = 3;
9571 break;
9572 default:
9573 el = 1;
9574 break;
9575 }
9576 }
9577 return env->cp15.sctlr_el[el];
9578 }
9579
aa64_va_parameter_tbi(uint64_t tcr,ARMMMUIdx mmu_idx)9580 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
9581 {
9582 if (regime_has_2_ranges(mmu_idx)) {
9583 return extract64(tcr, 37, 2);
9584 } else if (regime_is_stage2(mmu_idx)) {
9585 return 0; /* VTCR_EL2 */
9586 } else {
9587 /* Replicate the single TBI bit so we always have 2 bits. */
9588 return extract32(tcr, 20, 1) * 3;
9589 }
9590 }
9591
aa64_va_parameter_tbid(uint64_t tcr,ARMMMUIdx mmu_idx)9592 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
9593 {
9594 if (regime_has_2_ranges(mmu_idx)) {
9595 return extract64(tcr, 51, 2);
9596 } else if (regime_is_stage2(mmu_idx)) {
9597 return 0; /* VTCR_EL2 */
9598 } else {
9599 /* Replicate the single TBID bit so we always have 2 bits. */
9600 return extract32(tcr, 29, 1) * 3;
9601 }
9602 }
9603
aa64_va_parameter_tcma(uint64_t tcr,ARMMMUIdx mmu_idx)9604 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
9605 {
9606 if (regime_has_2_ranges(mmu_idx)) {
9607 return extract64(tcr, 57, 2);
9608 } else {
9609 /* Replicate the single TCMA bit so we always have 2 bits. */
9610 return extract32(tcr, 30, 1) * 3;
9611 }
9612 }
9613
tg0_to_gran_size(int tg)9614 static ARMGranuleSize tg0_to_gran_size(int tg)
9615 {
9616 switch (tg) {
9617 case 0:
9618 return Gran4K;
9619 case 1:
9620 return Gran64K;
9621 case 2:
9622 return Gran16K;
9623 default:
9624 return GranInvalid;
9625 }
9626 }
9627
tg1_to_gran_size(int tg)9628 static ARMGranuleSize tg1_to_gran_size(int tg)
9629 {
9630 switch (tg) {
9631 case 1:
9632 return Gran16K;
9633 case 2:
9634 return Gran4K;
9635 case 3:
9636 return Gran64K;
9637 default:
9638 return GranInvalid;
9639 }
9640 }
9641
have4k(ARMCPU * cpu,bool stage2)9642 static inline bool have4k(ARMCPU *cpu, bool stage2)
9643 {
9644 return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
9645 : cpu_isar_feature(aa64_tgran4, cpu);
9646 }
9647
have16k(ARMCPU * cpu,bool stage2)9648 static inline bool have16k(ARMCPU *cpu, bool stage2)
9649 {
9650 return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
9651 : cpu_isar_feature(aa64_tgran16, cpu);
9652 }
9653
have64k(ARMCPU * cpu,bool stage2)9654 static inline bool have64k(ARMCPU *cpu, bool stage2)
9655 {
9656 return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
9657 : cpu_isar_feature(aa64_tgran64, cpu);
9658 }
9659
sanitize_gran_size(ARMCPU * cpu,ARMGranuleSize gran,bool stage2)9660 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
9661 bool stage2)
9662 {
9663 switch (gran) {
9664 case Gran4K:
9665 if (have4k(cpu, stage2)) {
9666 return gran;
9667 }
9668 break;
9669 case Gran16K:
9670 if (have16k(cpu, stage2)) {
9671 return gran;
9672 }
9673 break;
9674 case Gran64K:
9675 if (have64k(cpu, stage2)) {
9676 return gran;
9677 }
9678 break;
9679 case GranInvalid:
9680 break;
9681 }
9682 /*
9683 * If the guest selects a granule size that isn't implemented,
9684 * the architecture requires that we behave as if it selected one
9685 * that is (with an IMPDEF choice of which one to pick). We choose
9686 * to implement the smallest supported granule size.
9687 */
9688 if (have4k(cpu, stage2)) {
9689 return Gran4K;
9690 }
9691 if (have16k(cpu, stage2)) {
9692 return Gran16K;
9693 }
9694 assert(have64k(cpu, stage2));
9695 return Gran64K;
9696 }
9697
aa64_va_parameters(CPUARMState * env,uint64_t va,ARMMMUIdx mmu_idx,bool data,bool el1_is_aa32)9698 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
9699 ARMMMUIdx mmu_idx, bool data,
9700 bool el1_is_aa32)
9701 {
9702 uint64_t tcr = regime_tcr(env, mmu_idx);
9703 bool epd, hpd, tsz_oob, ds, ha, hd, pie = false;
9704 bool aie = false;
9705 int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
9706 ARMGranuleSize gran;
9707 ARMCPU *cpu = env_archcpu(env);
9708 bool stage2 = regime_is_stage2(mmu_idx);
9709 int r_el = regime_el(mmu_idx);
9710
9711 if (!regime_has_2_ranges(mmu_idx)) {
9712 select = 0;
9713 tsz = extract32(tcr, 0, 6);
9714 gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9715 if (stage2) {
9716 /*
9717 * Stage2 does not have hierarchical permissions.
9718 * Thus disabling them makes things easier during ptw.
9719 */
9720 hpd = true;
9721 pie = extract64(tcr, 36, 1) && cpu_isar_feature(aa64_s2pie, cpu);
9722 } else {
9723 hpd = extract32(tcr, 24, 1);
9724 if (r_el == 3) {
9725 pie = (extract64(tcr, 35, 1)
9726 && cpu_isar_feature(aa64_s1pie, cpu));
9727 aie = (extract64(tcr, 37, 1)
9728 && cpu_isar_feature(aa64_aie, cpu));
9729 } else if (!arm_feature(env, ARM_FEATURE_EL3)
9730 || (env->cp15.scr_el3 & SCR_TCR2EN)) {
9731 pie = env->cp15.tcr2_el[2] & TCR2_PIE;
9732 aie = env->cp15.tcr2_el[2] & TCR2_AIE;
9733 }
9734 }
9735 epd = false;
9736 sh = extract32(tcr, 12, 2);
9737 ps = extract32(tcr, 16, 3);
9738 ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
9739 hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9740 ds = extract64(tcr, 32, 1);
9741 } else {
9742 bool e0pd;
9743
9744 /*
9745 * Bit 55 is always between the two regions, and is canonical for
9746 * determining if address tagging is enabled.
9747 */
9748 select = extract64(va, 55, 1);
9749 if (!select) {
9750 tsz = extract32(tcr, 0, 6);
9751 gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9752 epd = extract32(tcr, 7, 1);
9753 sh = extract32(tcr, 12, 2);
9754 hpd = extract64(tcr, 41, 1);
9755 e0pd = extract64(tcr, 55, 1);
9756 } else {
9757 tsz = extract32(tcr, 16, 6);
9758 gran = tg1_to_gran_size(extract32(tcr, 30, 2));
9759 epd = extract32(tcr, 23, 1);
9760 sh = extract32(tcr, 28, 2);
9761 hpd = extract64(tcr, 42, 1);
9762 e0pd = extract64(tcr, 56, 1);
9763 }
9764 ps = extract64(tcr, 32, 3);
9765 ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
9766 hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9767 ds = extract64(tcr, 59, 1);
9768
9769 if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
9770 regime_is_user(mmu_idx)) {
9771 epd = true;
9772 }
9773
9774 if ((!arm_feature(env, ARM_FEATURE_EL3)
9775 || (env->cp15.scr_el3 & SCR_TCR2EN))
9776 && (r_el == 2 || (arm_hcrx_el2_eff(env) & HCRX_TCR2EN))) {
9777 pie = env->cp15.tcr2_el[r_el] & TCR2_PIE;
9778 aie = env->cp15.tcr2_el[r_el] & TCR2_AIE;
9779 }
9780 }
9781 hpd |= pie;
9782
9783 gran = sanitize_gran_size(cpu, gran, stage2);
9784
9785 if (cpu_isar_feature(aa64_st, cpu)) {
9786 max_tsz = 48 - (gran == Gran64K);
9787 } else {
9788 max_tsz = 39;
9789 }
9790
9791 /*
9792 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
9793 * adjust the effective value of DS, as documented.
9794 */
9795 min_tsz = 16;
9796 if (gran == Gran64K) {
9797 if (cpu_isar_feature(aa64_lva, cpu)) {
9798 min_tsz = 12;
9799 }
9800 ds = false;
9801 } else if (ds) {
9802 if (regime_is_stage2(mmu_idx)) {
9803 if (gran == Gran16K) {
9804 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
9805 } else {
9806 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
9807 }
9808 } else {
9809 if (gran == Gran16K) {
9810 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
9811 } else {
9812 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
9813 }
9814 }
9815 if (ds) {
9816 min_tsz = 12;
9817 }
9818 }
9819
9820 if (stage2 && el1_is_aa32) {
9821 /*
9822 * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
9823 * are loosened: a configured IPA of 40 bits is permitted even if
9824 * the implemented PA is less than that (and so a 40 bit IPA would
9825 * fault for an AArch64 EL1). See R_DTLMN.
9826 */
9827 min_tsz = MIN(min_tsz, 24);
9828 }
9829
9830 if (tsz > max_tsz) {
9831 tsz = max_tsz;
9832 tsz_oob = true;
9833 } else if (tsz < min_tsz) {
9834 tsz = min_tsz;
9835 tsz_oob = true;
9836 } else {
9837 tsz_oob = false;
9838 }
9839
9840 /* Present TBI as a composite with TBID. */
9841 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
9842 if (!data) {
9843 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
9844 }
9845 tbi = (tbi >> select) & 1;
9846
9847 return (ARMVAParameters) {
9848 .tsz = tsz,
9849 .ps = ps,
9850 .sh = sh,
9851 .select = select,
9852 .tbi = tbi,
9853 .epd = epd,
9854 .hpd = hpd,
9855 .tsz_oob = tsz_oob,
9856 .ds = ds,
9857 .ha = ha,
9858 .hd = ha && hd,
9859 .gran = gran,
9860 .pie = pie,
9861 .aie = aie,
9862 };
9863 }
9864
9865
9866 /*
9867 * Return the exception level to which FP-disabled exceptions should
9868 * be taken, or 0 if FP is enabled.
9869 */
fp_exception_el(CPUARMState * env,int cur_el)9870 int fp_exception_el(CPUARMState *env, int cur_el)
9871 {
9872 #ifndef CONFIG_USER_ONLY
9873 uint64_t hcr_el2;
9874
9875 /*
9876 * CPACR and the CPTR registers don't exist before v6, so FP is
9877 * always accessible
9878 */
9879 if (!arm_feature(env, ARM_FEATURE_V6)) {
9880 return 0;
9881 }
9882
9883 if (arm_feature(env, ARM_FEATURE_M)) {
9884 /* CPACR can cause a NOCP UsageFault taken to current security state */
9885 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
9886 return 1;
9887 }
9888
9889 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
9890 if (!extract32(env->v7m.nsacr, 10, 1)) {
9891 /* FP insns cause a NOCP UsageFault taken to Secure */
9892 return 3;
9893 }
9894 }
9895
9896 return 0;
9897 }
9898
9899 hcr_el2 = arm_hcr_el2_eff(env);
9900
9901 /*
9902 * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
9903 * 0, 2 : trap EL0 and EL1/PL1 accesses
9904 * 1 : trap only EL0 accesses
9905 * 3 : trap no accesses
9906 * This register is ignored if E2H+TGE are both set.
9907 */
9908 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9909 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
9910
9911 switch (fpen) {
9912 case 1:
9913 if (cur_el != 0) {
9914 break;
9915 }
9916 /* fall through */
9917 case 0:
9918 case 2:
9919 /* Trap from Secure PL0 or PL1 to Secure PL1. */
9920 if (!arm_el_is_aa64(env, 3)
9921 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
9922 return 3;
9923 }
9924 if (cur_el <= 1) {
9925 return 1;
9926 }
9927 break;
9928 }
9929 }
9930
9931 /*
9932 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
9933 * to control non-secure access to the FPU. It doesn't have any
9934 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
9935 */
9936 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
9937 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
9938 if (!extract32(env->cp15.nsacr, 10, 1)) {
9939 /* FP insns act as UNDEF */
9940 return cur_el == 2 ? 2 : 1;
9941 }
9942 }
9943
9944 /*
9945 * CPTR_EL2 is present in v7VE or v8, and changes format
9946 * with HCR_EL2.E2H (regardless of TGE).
9947 */
9948 if (cur_el <= 2) {
9949 if (hcr_el2 & HCR_E2H) {
9950 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
9951 case 1:
9952 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
9953 break;
9954 }
9955 /* fall through */
9956 case 0:
9957 case 2:
9958 return 2;
9959 }
9960 } else if (arm_is_el2_enabled(env)) {
9961 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
9962 return 2;
9963 }
9964 }
9965 }
9966
9967 /* CPTR_EL3 : present in v8 */
9968 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
9969 /* Trap all FP ops to EL3 */
9970 return 3;
9971 }
9972 #endif
9973 return 0;
9974 }
9975
9976 #ifndef CONFIG_TCG
arm_v7m_mmu_idx_for_secstate(CPUARMState * env,bool secstate)9977 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
9978 {
9979 g_assert_not_reached();
9980 }
9981 #endif
9982
arm_mmu_idx_el(CPUARMState * env,int el)9983 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
9984 {
9985 ARMMMUIdx idx;
9986 uint64_t hcr;
9987
9988 if (arm_feature(env, ARM_FEATURE_M)) {
9989 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
9990 }
9991
9992 /* See ARM pseudo-function ELIsInHost. */
9993 switch (el) {
9994 case 0:
9995 hcr = arm_hcr_el2_eff(env);
9996 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
9997 idx = ARMMMUIdx_E20_0;
9998 } else if (arm_is_secure_below_el3(env) &&
9999 !arm_el_is_aa64(env, 3)) {
10000 idx = ARMMMUIdx_E30_0;
10001 } else {
10002 idx = ARMMMUIdx_E10_0;
10003 }
10004 break;
10005 case 1:
10006 if (arm_pan_enabled(env)) {
10007 idx = ARMMMUIdx_E10_1_PAN;
10008 } else {
10009 idx = ARMMMUIdx_E10_1;
10010 }
10011 break;
10012 case 2:
10013 /* Note that TGE does not apply at EL2. */
10014 if (arm_hcr_el2_eff(env) & HCR_E2H) {
10015 if (arm_pan_enabled(env)) {
10016 idx = ARMMMUIdx_E20_2_PAN;
10017 } else {
10018 idx = ARMMMUIdx_E20_2;
10019 }
10020 } else {
10021 idx = ARMMMUIdx_E2;
10022 }
10023 break;
10024 case 3:
10025 if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) {
10026 return ARMMMUIdx_E30_3_PAN;
10027 }
10028 return ARMMMUIdx_E3;
10029 default:
10030 g_assert_not_reached();
10031 }
10032
10033 return idx;
10034 }
10035
arm_mmu_idx(CPUARMState * env)10036 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
10037 {
10038 return arm_mmu_idx_el(env, arm_current_el(env));
10039 }
10040
10041 /*
10042 * The manual says that when SVE is enabled and VQ is widened the
10043 * implementation is allowed to zero the previously inaccessible
10044 * portion of the registers. The corollary to that is that when
10045 * SVE is enabled and VQ is narrowed we are also allowed to zero
10046 * the now inaccessible portion of the registers.
10047 *
10048 * The intent of this is that no predicate bit beyond VQ is ever set.
10049 * Which means that some operations on predicate registers themselves
10050 * may operate on full uint64_t or even unrolled across the maximum
10051 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
10052 * may well be cheaper than conditionals to restrict the operation
10053 * to the relevant portion of a uint16_t[16].
10054 */
aarch64_sve_narrow_vq(CPUARMState * env,unsigned vq)10055 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
10056 {
10057 int i, j;
10058 uint64_t pmask;
10059
10060 assert(vq >= 1 && vq <= ARM_MAX_VQ);
10061 assert(vq <= env_archcpu(env)->sve_max_vq);
10062
10063 /* Zap the high bits of the zregs. */
10064 for (i = 0; i < 32; i++) {
10065 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
10066 }
10067
10068 /* Zap the high bits of the pregs and ffr. */
10069 pmask = 0;
10070 if (vq & 3) {
10071 pmask = ~(-1ULL << (16 * (vq & 3)));
10072 }
10073 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
10074 for (i = 0; i < 17; ++i) {
10075 env->vfp.pregs[i].p[j] &= pmask;
10076 }
10077 pmask = 0;
10078 }
10079 }
10080
sve_vqm1_for_el_sm_ena(CPUARMState * env,int el,bool sm)10081 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
10082 {
10083 int exc_el;
10084
10085 if (sm) {
10086 exc_el = sme_exception_el(env, el);
10087 } else {
10088 exc_el = sve_exception_el(env, el);
10089 }
10090 if (exc_el) {
10091 return 0; /* disabled */
10092 }
10093 return sve_vqm1_for_el_sm(env, el, sm);
10094 }
10095
10096 /*
10097 * Notice a change in SVE vector size when changing EL.
10098 */
aarch64_sve_change_el(CPUARMState * env,int old_el,int new_el,bool el0_a64)10099 void aarch64_sve_change_el(CPUARMState *env, int old_el,
10100 int new_el, bool el0_a64)
10101 {
10102 ARMCPU *cpu = env_archcpu(env);
10103 int old_len, new_len;
10104 bool old_a64, new_a64, sm;
10105
10106 /* Nothing to do if no SVE. */
10107 if (!cpu_isar_feature(aa64_sve, cpu)) {
10108 return;
10109 }
10110
10111 /* Nothing to do if FP is disabled in either EL. */
10112 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
10113 return;
10114 }
10115
10116 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
10117 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
10118
10119 /*
10120 * Both AArch64.TakeException and AArch64.ExceptionReturn
10121 * invoke ResetSVEState when taking an exception from, or
10122 * returning to, AArch32 state when PSTATE.SM is enabled.
10123 */
10124 sm = FIELD_EX64(env->svcr, SVCR, SM);
10125 if (old_a64 != new_a64 && sm) {
10126 arm_reset_sve_state(env);
10127 return;
10128 }
10129
10130 /*
10131 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
10132 * at ELx, or not available because the EL is in AArch32 state, then
10133 * for all purposes other than a direct read, the ZCR_ELx.LEN field
10134 * has an effective value of 0".
10135 *
10136 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
10137 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
10138 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
10139 * we already have the correct register contents when encountering the
10140 * vq0->vq0 transition between EL0->EL1.
10141 */
10142 old_len = new_len = 0;
10143 if (old_a64) {
10144 old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
10145 }
10146 if (new_a64) {
10147 new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
10148 }
10149
10150 /* When changing vector length, clear inaccessible state. */
10151 if (new_len < old_len) {
10152 aarch64_sve_narrow_vq(env, new_len + 1);
10153 }
10154 }
10155
10156 #ifndef CONFIG_USER_ONLY
arm_security_space(CPUARMState * env)10157 ARMSecuritySpace arm_security_space(CPUARMState *env)
10158 {
10159 if (arm_feature(env, ARM_FEATURE_M)) {
10160 return arm_secure_to_space(env->v7m.secure);
10161 }
10162
10163 /*
10164 * If EL3 is not supported then the secure state is implementation
10165 * defined, in which case QEMU defaults to non-secure.
10166 */
10167 if (!arm_feature(env, ARM_FEATURE_EL3)) {
10168 return ARMSS_NonSecure;
10169 }
10170
10171 /* Check for AArch64 EL3 or AArch32 Mon. */
10172 if (is_a64(env)) {
10173 if (extract32(env->pstate, 2, 2) == 3) {
10174 if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
10175 return ARMSS_Root;
10176 } else {
10177 return ARMSS_Secure;
10178 }
10179 }
10180 } else {
10181 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
10182 return ARMSS_Secure;
10183 }
10184 }
10185
10186 return arm_security_space_below_el3(env);
10187 }
10188
arm_security_space_below_el3(CPUARMState * env)10189 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
10190 {
10191 assert(!arm_feature(env, ARM_FEATURE_M));
10192
10193 /*
10194 * If EL3 is not supported then the secure state is implementation
10195 * defined, in which case QEMU defaults to non-secure.
10196 */
10197 if (!arm_feature(env, ARM_FEATURE_EL3)) {
10198 return ARMSS_NonSecure;
10199 }
10200
10201 /*
10202 * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
10203 * Ignoring NSE when !NS retains consistency without having to
10204 * modify other predicates.
10205 */
10206 if (!(env->cp15.scr_el3 & SCR_NS)) {
10207 return ARMSS_Secure;
10208 } else if (env->cp15.scr_el3 & SCR_NSE) {
10209 return ARMSS_Realm;
10210 } else {
10211 return ARMSS_NonSecure;
10212 }
10213 }
10214 #endif /* !CONFIG_USER_ONLY */
10215