1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "trace.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/page-protection.h"
16 #include "exec/mmap-lock.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/timer.h"
19 #include "qemu/bitops.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/cputlb.h"
22 #include "exec/translation-block.h"
23 #include "hw/irq.h"
24 #include "system/cpu-timers.h"
25 #include "exec/icount.h"
26 #include "system/kvm.h"
27 #include "system/tcg.h"
28 #include "qapi/error.h"
29 #include "qemu/guest-random.h"
30 #ifdef CONFIG_TCG
31 #include "accel/tcg/probe.h"
32 #include "accel/tcg/getpc.h"
33 #include "semihosting/common-semi.h"
34 #endif
35 #include "cpregs.h"
36 #include "target/arm/gtimer.h"
37
38 #define HELPER_H "tcg/helper.h"
39 #include "exec/helper-proto.h.inc"
40
41 static void switch_mode(CPUARMState *env, int mode);
42
raw_read(CPUARMState * env,const ARMCPRegInfo * ri)43 uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
44 {
45 assert(ri->fieldoffset);
46 if (cpreg_field_is_64bit(ri)) {
47 return CPREG_FIELD64(env, ri);
48 } else {
49 return CPREG_FIELD32(env, ri);
50 }
51 }
52
raw_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)53 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
54 {
55 assert(ri->fieldoffset);
56 if (cpreg_field_is_64bit(ri)) {
57 CPREG_FIELD64(env, ri) = value;
58 } else {
59 CPREG_FIELD32(env, ri) = value;
60 }
61 }
62
raw_ptr(CPUARMState * env,const ARMCPRegInfo * ri)63 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
64 {
65 return (char *)env + ri->fieldoffset;
66 }
67
read_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri)68 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
69 {
70 /* Raw read of a coprocessor register (as needed for migration, etc). */
71 if (ri->type & ARM_CP_CONST) {
72 return ri->resetvalue;
73 } else if (ri->raw_readfn) {
74 return ri->raw_readfn(env, ri);
75 } else if (ri->readfn) {
76 return ri->readfn(env, ri);
77 } else {
78 return raw_read(env, ri);
79 }
80 }
81
write_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t v)82 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
83 uint64_t v)
84 {
85 /*
86 * Raw write of a coprocessor register (as needed for migration, etc).
87 * Note that constant registers are treated as write-ignored; the
88 * caller should check for success by whether a readback gives the
89 * value written.
90 */
91 if (ri->type & ARM_CP_CONST) {
92 return;
93 } else if (ri->raw_writefn) {
94 ri->raw_writefn(env, ri, v);
95 } else if (ri->writefn) {
96 ri->writefn(env, ri, v);
97 } else {
98 raw_write(env, ri, v);
99 }
100 }
101
raw_accessors_invalid(const ARMCPRegInfo * ri)102 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
103 {
104 /*
105 * Return true if the regdef would cause an assertion if you called
106 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
107 * program bug for it not to have the NO_RAW flag).
108 * NB that returning false here doesn't necessarily mean that calling
109 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
110 * read/write access functions which are safe for raw use" from "has
111 * read/write access functions which have side effects but has forgotten
112 * to provide raw access functions".
113 * The tests here line up with the conditions in read/write_raw_cp_reg()
114 * and assertions in raw_read()/raw_write().
115 */
116 if ((ri->type & ARM_CP_CONST) ||
117 ri->fieldoffset ||
118 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
119 return false;
120 }
121 return true;
122 }
123
write_cpustate_to_list(ARMCPU * cpu,bool kvm_sync)124 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
125 {
126 /* Write the coprocessor state from cpu->env to the (index,value) list. */
127 int i;
128 bool ok = true;
129
130 for (i = 0; i < cpu->cpreg_array_len; i++) {
131 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
132 const ARMCPRegInfo *ri;
133 uint64_t newval;
134
135 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
136 if (!ri) {
137 ok = false;
138 continue;
139 }
140 if (ri->type & ARM_CP_NO_RAW) {
141 continue;
142 }
143
144 newval = read_raw_cp_reg(&cpu->env, ri);
145 if (kvm_sync) {
146 /*
147 * Only sync if the previous list->cpustate sync succeeded.
148 * Rather than tracking the success/failure state for every
149 * item in the list, we just recheck "does the raw write we must
150 * have made in write_list_to_cpustate() read back OK" here.
151 */
152 uint64_t oldval = cpu->cpreg_values[i];
153
154 if (oldval == newval) {
155 continue;
156 }
157
158 write_raw_cp_reg(&cpu->env, ri, oldval);
159 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
160 continue;
161 }
162
163 write_raw_cp_reg(&cpu->env, ri, newval);
164 }
165 cpu->cpreg_values[i] = newval;
166 }
167 return ok;
168 }
169
write_list_to_cpustate(ARMCPU * cpu)170 bool write_list_to_cpustate(ARMCPU *cpu)
171 {
172 int i;
173 bool ok = true;
174
175 for (i = 0; i < cpu->cpreg_array_len; i++) {
176 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
177 uint64_t v = cpu->cpreg_values[i];
178 const ARMCPRegInfo *ri;
179
180 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
181 if (!ri) {
182 ok = false;
183 continue;
184 }
185 if (ri->type & ARM_CP_NO_RAW) {
186 continue;
187 }
188 /*
189 * Write value and confirm it reads back as written
190 * (to catch read-only registers and partially read-only
191 * registers where the incoming migration value doesn't match)
192 */
193 write_raw_cp_reg(&cpu->env, ri, v);
194 if (read_raw_cp_reg(&cpu->env, ri) != v) {
195 ok = false;
196 }
197 }
198 return ok;
199 }
200
add_cpreg_to_list(gpointer key,gpointer opaque)201 static void add_cpreg_to_list(gpointer key, gpointer opaque)
202 {
203 ARMCPU *cpu = opaque;
204 uint32_t regidx = (uintptr_t)key;
205 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
206
207 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
208 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
209 /* The value array need not be initialized at this point */
210 cpu->cpreg_array_len++;
211 }
212 }
213
count_cpreg(gpointer key,gpointer opaque)214 static void count_cpreg(gpointer key, gpointer opaque)
215 {
216 ARMCPU *cpu = opaque;
217 const ARMCPRegInfo *ri;
218
219 ri = g_hash_table_lookup(cpu->cp_regs, key);
220
221 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
222 cpu->cpreg_array_len++;
223 }
224 }
225
cpreg_key_compare(gconstpointer a,gconstpointer b,gpointer d)226 static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d)
227 {
228 uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
229 uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
230
231 if (aidx > bidx) {
232 return 1;
233 }
234 if (aidx < bidx) {
235 return -1;
236 }
237 return 0;
238 }
239
init_cpreg_list(ARMCPU * cpu)240 void init_cpreg_list(ARMCPU *cpu)
241 {
242 /*
243 * Initialise the cpreg_tuples[] array based on the cp_regs hash.
244 * Note that we require cpreg_tuples[] to be sorted by key ID.
245 */
246 GList *keys;
247 int arraylen;
248
249 keys = g_hash_table_get_keys(cpu->cp_regs);
250 keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL);
251
252 cpu->cpreg_array_len = 0;
253
254 g_list_foreach(keys, count_cpreg, cpu);
255
256 arraylen = cpu->cpreg_array_len;
257 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
258 cpu->cpreg_values = g_new(uint64_t, arraylen);
259 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
260 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
261 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
262 cpu->cpreg_array_len = 0;
263
264 g_list_foreach(keys, add_cpreg_to_list, cpu);
265
266 assert(cpu->cpreg_array_len == arraylen);
267
268 g_list_free(keys);
269 }
270
arm_pan_enabled(CPUARMState * env)271 bool arm_pan_enabled(CPUARMState *env)
272 {
273 if (is_a64(env)) {
274 if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
275 return false;
276 }
277 return env->pstate & PSTATE_PAN;
278 } else {
279 return env->uncached_cpsr & CPSR_PAN;
280 }
281 }
282
283 /*
284 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
285 */
access_el3_aa32ns(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)286 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
287 const ARMCPRegInfo *ri,
288 bool isread)
289 {
290 if (!is_a64(env) && arm_current_el(env) == 3 &&
291 arm_is_secure_below_el3(env)) {
292 return CP_ACCESS_UNDEFINED;
293 }
294 return CP_ACCESS_OK;
295 }
296
297 /*
298 * Some secure-only AArch32 registers trap to EL3 if used from
299 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
300 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
301 * We assume that the .access field is set to PL1_RW.
302 */
access_trap_aa32s_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)303 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
304 const ARMCPRegInfo *ri,
305 bool isread)
306 {
307 if (arm_current_el(env) == 3) {
308 return CP_ACCESS_OK;
309 }
310 if (arm_is_secure_below_el3(env)) {
311 if (env->cp15.scr_el3 & SCR_EEL2) {
312 return CP_ACCESS_TRAP_EL2;
313 }
314 return CP_ACCESS_TRAP_EL3;
315 }
316 /* This will be EL1 NS and EL2 NS, which just UNDEF */
317 return CP_ACCESS_UNDEFINED;
318 }
319
320 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
access_tvm_trvm(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)321 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
322 bool isread)
323 {
324 if (arm_current_el(env) == 1) {
325 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
326 if (arm_hcr_el2_eff(env) & trap) {
327 return CP_ACCESS_TRAP_EL2;
328 }
329 }
330 return CP_ACCESS_OK;
331 }
332
333 /* Check for traps from EL1 due to HCR_EL2.TSW. */
access_tsw(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)334 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
335 bool isread)
336 {
337 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
338 return CP_ACCESS_TRAP_EL2;
339 }
340 return CP_ACCESS_OK;
341 }
342
343 /* Check for traps from EL1 due to HCR_EL2.TACR. */
access_tacr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)344 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
345 bool isread)
346 {
347 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
348 return CP_ACCESS_TRAP_EL2;
349 }
350 return CP_ACCESS_OK;
351 }
352
dacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)353 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
354 {
355 ARMCPU *cpu = env_archcpu(env);
356
357 raw_write(env, ri, value);
358 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
359 }
360
fcse_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)361 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
362 {
363 ARMCPU *cpu = env_archcpu(env);
364
365 if (raw_read(env, ri) != value) {
366 /*
367 * Unlike real hardware the qemu TLB uses virtual addresses,
368 * not modified virtual addresses, so this causes a TLB flush.
369 */
370 tlb_flush(CPU(cpu));
371 raw_write(env, ri, value);
372 }
373 }
374
contextidr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)375 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
376 uint64_t value)
377 {
378 ARMCPU *cpu = env_archcpu(env);
379
380 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
381 && !extended_addresses_enabled(env)) {
382 /*
383 * For VMSA (when not using the LPAE long descriptor page table
384 * format) this register includes the ASID, so do a TLB flush.
385 * For PMSA it is purely a process ID and no action is needed.
386 */
387 tlb_flush(CPU(cpu));
388 }
389 raw_write(env, ri, value);
390 }
391
alle1_tlbmask(CPUARMState * env)392 int alle1_tlbmask(CPUARMState *env)
393 {
394 /*
395 * Note that the 'ALL' scope must invalidate both stage 1 and
396 * stage 2 translations, whereas most other scopes only invalidate
397 * stage 1 translations.
398 *
399 * For AArch32 this is only used for TLBIALLNSNH and VTTBR
400 * writes, so only needs to apply to NS PL1&0, not S PL1&0.
401 */
402 return (ARMMMUIdxBit_E10_1 |
403 ARMMMUIdxBit_E10_1_PAN |
404 ARMMMUIdxBit_E10_0 |
405 ARMMMUIdxBit_Stage2 |
406 ARMMMUIdxBit_Stage2_S);
407 }
408
409 static const ARMCPRegInfo cp_reginfo[] = {
410 /*
411 * Define the secure and non-secure FCSE identifier CP registers
412 * separately because there is no secure bank in V8 (no _EL3). This allows
413 * the secure register to be properly reset and migrated. There is also no
414 * v8 EL1 version of the register so the non-secure instance stands alone.
415 */
416 { .name = "FCSEIDR",
417 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
418 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
419 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
420 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
421 { .name = "FCSEIDR_S",
422 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
423 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
424 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
425 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
426 /*
427 * Define the secure and non-secure context identifier CP registers
428 * separately because there is no secure bank in V8 (no _EL3). This allows
429 * the secure register to be properly reset and migrated. In the
430 * non-secure case, the 32-bit register will have reset and migration
431 * disabled during registration as it is handled by the 64-bit instance.
432 */
433 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
434 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
435 .access = PL1_RW, .accessfn = access_tvm_trvm,
436 .fgt = FGT_CONTEXTIDR_EL1,
437 .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
438 .secure = ARM_CP_SECSTATE_NS,
439 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
440 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
441 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
442 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
443 .access = PL1_RW, .accessfn = access_tvm_trvm,
444 .secure = ARM_CP_SECSTATE_S,
445 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
446 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
447 };
448
449 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
450 /*
451 * NB: Some of these registers exist in v8 but with more precise
452 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
453 */
454 /* MMU Domain access control / MPU write buffer control */
455 { .name = "DACR",
456 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
457 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
458 .writefn = dacr_write, .raw_writefn = raw_write,
459 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
460 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
461 /*
462 * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
463 * For v6 and v5, these mappings are overly broad.
464 */
465 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
466 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
467 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
468 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
469 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
470 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
471 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
472 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
473 /* Cache maintenance ops; some of this space may be overridden later. */
474 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
475 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
476 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
477 };
478
479 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
480 /*
481 * Not all pre-v6 cores implemented this WFI, so this is slightly
482 * over-broad.
483 */
484 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
485 .access = PL1_W, .type = ARM_CP_WFI },
486 };
487
488 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
489 /*
490 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
491 * is UNPREDICTABLE; we choose to NOP as most implementations do).
492 */
493 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
494 .access = PL1_W, .type = ARM_CP_WFI },
495 /*
496 * L1 cache lockdown. Not architectural in v6 and earlier but in practice
497 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
498 * OMAPCP will override this space.
499 */
500 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
501 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
502 .resetvalue = 0 },
503 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
504 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
505 .resetvalue = 0 },
506 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
507 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
508 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
509 .resetvalue = 0 },
510 /*
511 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
512 * implementing it as RAZ means the "debug architecture version" bits
513 * will read as a reserved value, which should cause Linux to not try
514 * to use the debug hardware.
515 */
516 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
517 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
518 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
519 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
520 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
521 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
522 };
523
cpacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)524 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
525 uint64_t value)
526 {
527 uint32_t mask = 0;
528
529 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
530 if (!arm_feature(env, ARM_FEATURE_V8)) {
531 /*
532 * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
533 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
534 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
535 */
536 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
537 /* VFP coprocessor: cp10 & cp11 [23:20] */
538 mask |= R_CPACR_ASEDIS_MASK |
539 R_CPACR_D32DIS_MASK |
540 R_CPACR_CP11_MASK |
541 R_CPACR_CP10_MASK;
542
543 if (!arm_feature(env, ARM_FEATURE_NEON)) {
544 /* ASEDIS [31] bit is RAO/WI */
545 value |= R_CPACR_ASEDIS_MASK;
546 }
547
548 /*
549 * VFPv3 and upwards with NEON implement 32 double precision
550 * registers (D0-D31).
551 */
552 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
553 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
554 value |= R_CPACR_D32DIS_MASK;
555 }
556 }
557 value &= mask;
558 }
559
560 /*
561 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
562 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
563 */
564 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
565 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
566 mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
567 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
568 }
569
570 env->cp15.cpacr_el1 = value;
571 }
572
cpacr_read(CPUARMState * env,const ARMCPRegInfo * ri)573 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
574 {
575 /*
576 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
577 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
578 */
579 uint64_t value = env->cp15.cpacr_el1;
580
581 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
582 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
583 value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
584 }
585 return value;
586 }
587
588
cpacr_reset(CPUARMState * env,const ARMCPRegInfo * ri)589 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
590 {
591 /*
592 * Call cpacr_write() so that we reset with the correct RAO bits set
593 * for our CPU features.
594 */
595 cpacr_write(env, ri, 0);
596 }
597
cpacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)598 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
599 bool isread)
600 {
601 if (arm_feature(env, ARM_FEATURE_V8)) {
602 /* Check if CPACR accesses are to be trapped to EL2 */
603 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
604 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
605 return CP_ACCESS_TRAP_EL2;
606 /* Check if CPACR accesses are to be trapped to EL3 */
607 } else if (arm_current_el(env) < 3 &&
608 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
609 return CP_ACCESS_TRAP_EL3;
610 }
611 }
612
613 return CP_ACCESS_OK;
614 }
615
cptr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)616 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
617 bool isread)
618 {
619 /* Check if CPTR accesses are set to trap to EL3 */
620 if (arm_current_el(env) == 2 &&
621 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
622 return CP_ACCESS_TRAP_EL3;
623 }
624
625 return CP_ACCESS_OK;
626 }
627
628 static const ARMCPRegInfo v6_cp_reginfo[] = {
629 /* prefetch by MVA in v6, NOP in v7 */
630 { .name = "MVA_prefetch",
631 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
632 .access = PL1_W, .type = ARM_CP_NOP },
633 /*
634 * We need to break the TB after ISB to execute self-modifying code
635 * correctly and also to take any pending interrupts immediately.
636 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
637 */
638 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
639 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
640 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
641 .access = PL0_W, .type = ARM_CP_NOP },
642 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
643 .access = PL0_W, .type = ARM_CP_NOP },
644 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
645 .access = PL1_RW, .accessfn = access_tvm_trvm,
646 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
647 offsetof(CPUARMState, cp15.ifar_ns) },
648 .resetvalue = 0, },
649 /*
650 * Watchpoint Fault Address Register : should actually only be present
651 * for 1136, 1176, 11MPCore.
652 */
653 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
654 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
655 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
656 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
657 .fgt = FGT_CPACR_EL1,
658 .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
659 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
660 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
661 };
662
663 /*
664 * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
665 * We use these to decide whether we need to wrap a write to MDCR_EL2
666 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
667 */
668 #define MDCR_EL2_PMU_ENABLE_BITS \
669 (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
670 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
671
vbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)672 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
673 uint64_t value)
674 {
675 /*
676 * Note that even though the AArch64 view of this register has bits
677 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
678 * architectural requirements for bits which are RES0 only in some
679 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
680 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
681 */
682 raw_write(env, ri, value & ~0x1FULL);
683 }
684
scr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)685 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
686 {
687 /* Begin with base v8.0 state. */
688 uint64_t valid_mask = 0x3fff;
689 ARMCPU *cpu = env_archcpu(env);
690 uint64_t changed;
691
692 /*
693 * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
694 * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
695 * Instead, choose the format based on the mode of EL3.
696 */
697 if (arm_el_is_aa64(env, 3)) {
698 value |= SCR_FW | SCR_AW; /* RES1 */
699 valid_mask &= ~SCR_NET; /* RES0 */
700
701 if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
702 !cpu_isar_feature(aa64_aa32_el2, cpu)) {
703 value |= SCR_RW; /* RAO/WI */
704 }
705 if (cpu_isar_feature(aa64_ras, cpu)) {
706 valid_mask |= SCR_TERR;
707 }
708 if (cpu_isar_feature(aa64_lor, cpu)) {
709 valid_mask |= SCR_TLOR;
710 }
711 if (cpu_isar_feature(aa64_pauth, cpu)) {
712 valid_mask |= SCR_API | SCR_APK;
713 }
714 if (cpu_isar_feature(aa64_sel2, cpu)) {
715 valid_mask |= SCR_EEL2;
716 } else if (cpu_isar_feature(aa64_rme, cpu)) {
717 /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
718 value |= SCR_NS;
719 }
720 if (cpu_isar_feature(aa64_mte, cpu)) {
721 valid_mask |= SCR_ATA;
722 }
723 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
724 valid_mask |= SCR_ENSCXT;
725 }
726 if (cpu_isar_feature(aa64_doublefault, cpu)) {
727 valid_mask |= SCR_EASE | SCR_NMEA;
728 }
729 if (cpu_isar_feature(aa64_sme, cpu)) {
730 valid_mask |= SCR_ENTP2;
731 }
732 if (cpu_isar_feature(aa64_hcx, cpu)) {
733 valid_mask |= SCR_HXEN;
734 }
735 if (cpu_isar_feature(aa64_fgt, cpu)) {
736 valid_mask |= SCR_FGTEN;
737 }
738 if (cpu_isar_feature(aa64_rme, cpu)) {
739 valid_mask |= SCR_NSE | SCR_GPF;
740 }
741 if (cpu_isar_feature(aa64_ecv, cpu)) {
742 valid_mask |= SCR_ECVEN;
743 }
744 } else {
745 valid_mask &= ~(SCR_RW | SCR_ST);
746 if (cpu_isar_feature(aa32_ras, cpu)) {
747 valid_mask |= SCR_TERR;
748 }
749 }
750
751 if (!arm_feature(env, ARM_FEATURE_EL2)) {
752 valid_mask &= ~SCR_HCE;
753
754 /*
755 * On ARMv7, SMD (or SCD as it is called in v7) is only
756 * supported if EL2 exists. The bit is UNK/SBZP when
757 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
758 * when EL2 is unavailable.
759 * On ARMv8, this bit is always available.
760 */
761 if (arm_feature(env, ARM_FEATURE_V7) &&
762 !arm_feature(env, ARM_FEATURE_V8)) {
763 valid_mask &= ~SCR_SMD;
764 }
765 }
766
767 /* Clear all-context RES0 bits. */
768 value &= valid_mask;
769 changed = env->cp15.scr_el3 ^ value;
770 env->cp15.scr_el3 = value;
771
772 /*
773 * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
774 * we must invalidate all TLBs below EL3.
775 */
776 if (changed & (SCR_NS | SCR_NSE)) {
777 tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
778 ARMMMUIdxBit_E20_0 |
779 ARMMMUIdxBit_E10_1 |
780 ARMMMUIdxBit_E20_2 |
781 ARMMMUIdxBit_E10_1_PAN |
782 ARMMMUIdxBit_E20_2_PAN |
783 ARMMMUIdxBit_E2));
784 }
785 }
786
scr_reset(CPUARMState * env,const ARMCPRegInfo * ri)787 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
788 {
789 /*
790 * scr_write will set the RES1 bits on an AArch64-only CPU.
791 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
792 */
793 scr_write(env, ri, 0);
794 }
795
access_tid4(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)796 static CPAccessResult access_tid4(CPUARMState *env,
797 const ARMCPRegInfo *ri,
798 bool isread)
799 {
800 if (arm_current_el(env) == 1 &&
801 (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
802 return CP_ACCESS_TRAP_EL2;
803 }
804
805 return CP_ACCESS_OK;
806 }
807
ccsidr_read(CPUARMState * env,const ARMCPRegInfo * ri)808 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
809 {
810 ARMCPU *cpu = env_archcpu(env);
811
812 /*
813 * Acquire the CSSELR index from the bank corresponding to the CCSIDR
814 * bank
815 */
816 uint32_t index = A32_BANKED_REG_GET(env, csselr,
817 ri->secure & ARM_CP_SECSTATE_S);
818
819 return cpu->ccsidr[index];
820 }
821
csselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)822 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
823 uint64_t value)
824 {
825 raw_write(env, ri, value & 0xf);
826 }
827
isr_read(CPUARMState * env,const ARMCPRegInfo * ri)828 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
829 {
830 CPUState *cs = env_cpu(env);
831 bool el1 = arm_current_el(env) == 1;
832 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
833 uint64_t ret = 0;
834
835 if (hcr_el2 & HCR_IMO) {
836 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
837 ret |= CPSR_I;
838 }
839 if (cs->interrupt_request & CPU_INTERRUPT_VINMI) {
840 ret |= ISR_IS;
841 ret |= CPSR_I;
842 }
843 } else {
844 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
845 ret |= CPSR_I;
846 }
847
848 if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
849 ret |= ISR_IS;
850 ret |= CPSR_I;
851 }
852 }
853
854 if (hcr_el2 & HCR_FMO) {
855 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
856 ret |= CPSR_F;
857 }
858 if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) {
859 ret |= ISR_FS;
860 ret |= CPSR_F;
861 }
862 } else {
863 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
864 ret |= CPSR_F;
865 }
866 }
867
868 if (hcr_el2 & HCR_AMO) {
869 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
870 ret |= CPSR_A;
871 }
872 }
873
874 return ret;
875 }
876
access_aa64_tid1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)877 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
878 bool isread)
879 {
880 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
881 return CP_ACCESS_TRAP_EL2;
882 }
883
884 return CP_ACCESS_OK;
885 }
886
access_aa32_tid1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)887 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
888 bool isread)
889 {
890 if (arm_feature(env, ARM_FEATURE_V8)) {
891 return access_aa64_tid1(env, ri, isread);
892 }
893
894 return CP_ACCESS_OK;
895 }
896
897 static const ARMCPRegInfo v7_cp_reginfo[] = {
898 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
899 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
900 .access = PL1_W, .type = ARM_CP_NOP },
901 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
902 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
903 .access = PL1_R,
904 .accessfn = access_tid4,
905 .fgt = FGT_CCSIDR_EL1,
906 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
907 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
908 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
909 .access = PL1_RW,
910 .accessfn = access_tid4,
911 .fgt = FGT_CSSELR_EL1,
912 .writefn = csselr_write, .resetvalue = 0,
913 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
914 offsetof(CPUARMState, cp15.csselr_ns) } },
915 /*
916 * Auxiliary ID register: this actually has an IMPDEF value but for now
917 * just RAZ for all cores:
918 */
919 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
920 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
921 .access = PL1_R, .type = ARM_CP_CONST,
922 .accessfn = access_aa64_tid1,
923 .fgt = FGT_AIDR_EL1,
924 .resetvalue = 0 },
925 /*
926 * Auxiliary fault status registers: these also are IMPDEF, and we
927 * choose to RAZ/WI for all cores.
928 */
929 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
930 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
931 .access = PL1_RW, .accessfn = access_tvm_trvm,
932 .fgt = FGT_AFSR0_EL1,
933 .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
934 .type = ARM_CP_CONST, .resetvalue = 0 },
935 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
936 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
937 .access = PL1_RW, .accessfn = access_tvm_trvm,
938 .fgt = FGT_AFSR1_EL1,
939 .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
940 .type = ARM_CP_CONST, .resetvalue = 0 },
941 /*
942 * MAIR can just read-as-written because we don't implement caches
943 * and so don't need to care about memory attributes.
944 */
945 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
946 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
947 .access = PL1_RW, .accessfn = access_tvm_trvm,
948 .fgt = FGT_MAIR_EL1,
949 .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
950 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
951 .resetvalue = 0 },
952 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
953 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
954 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
955 .resetvalue = 0 },
956 /*
957 * For non-long-descriptor page tables these are PRRR and NMRR;
958 * regardless they still act as reads-as-written for QEMU.
959 */
960 /*
961 * MAIR0/1 are defined separately from their 64-bit counterpart which
962 * allows them to assign the correct fieldoffset based on the endianness
963 * handled in the field definitions.
964 */
965 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
966 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
967 .access = PL1_RW, .accessfn = access_tvm_trvm,
968 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
969 offsetof(CPUARMState, cp15.mair0_ns) },
970 .resetfn = arm_cp_reset_ignore },
971 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
972 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
973 .access = PL1_RW, .accessfn = access_tvm_trvm,
974 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
975 offsetof(CPUARMState, cp15.mair1_ns) },
976 .resetfn = arm_cp_reset_ignore },
977 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
978 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
979 .fgt = FGT_ISR_EL1,
980 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
981 };
982
teecr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)983 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
984 uint64_t value)
985 {
986 value &= 1;
987 env->teecr = value;
988 }
989
teecr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)990 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
991 bool isread)
992 {
993 /*
994 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
995 * at all, so we don't need to check whether we're v8A.
996 */
997 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
998 (env->cp15.hstr_el2 & HSTR_TTEE)) {
999 return CP_ACCESS_TRAP_EL2;
1000 }
1001 return CP_ACCESS_OK;
1002 }
1003
teehbr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1004 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1005 bool isread)
1006 {
1007 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1008 return CP_ACCESS_TRAP_EL1;
1009 }
1010 return teecr_access(env, ri, isread);
1011 }
1012
1013 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1014 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1015 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1016 .resetvalue = 0,
1017 .writefn = teecr_write, .accessfn = teecr_access },
1018 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1019 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1020 .accessfn = teehbr_access, .resetvalue = 0 },
1021 };
1022
1023 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1024 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1025 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1026 .access = PL0_RW,
1027 .fgt = FGT_TPIDR_EL0,
1028 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1029 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1030 .access = PL0_RW,
1031 .fgt = FGT_TPIDR_EL0,
1032 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1033 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1034 .resetfn = arm_cp_reset_ignore },
1035 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1036 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1037 .access = PL0_R | PL1_W,
1038 .fgt = FGT_TPIDRRO_EL0,
1039 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1040 .resetvalue = 0},
1041 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1042 .access = PL0_R | PL1_W,
1043 .fgt = FGT_TPIDRRO_EL0,
1044 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1045 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1046 .resetfn = arm_cp_reset_ignore },
1047 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1048 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1049 .access = PL1_RW,
1050 .fgt = FGT_TPIDR_EL1,
1051 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1052 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1053 .access = PL1_RW,
1054 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1055 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1056 .resetvalue = 0 },
1057 };
1058
arm_gt_cntfrq_reset(CPUARMState * env,const ARMCPRegInfo * opaque)1059 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
1060 {
1061 ARMCPU *cpu = env_archcpu(env);
1062
1063 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
1064 }
1065
1066 #ifndef CONFIG_USER_ONLY
1067
gt_cntfrq_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1068 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1069 bool isread)
1070 {
1071 /*
1072 * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1073 * Writable only at the highest implemented exception level.
1074 */
1075 int el = arm_current_el(env);
1076 uint64_t hcr;
1077 uint32_t cntkctl;
1078
1079 switch (el) {
1080 case 0:
1081 hcr = arm_hcr_el2_eff(env);
1082 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1083 cntkctl = env->cp15.cnthctl_el2;
1084 } else {
1085 cntkctl = env->cp15.c14_cntkctl;
1086 }
1087 if (!extract32(cntkctl, 0, 2)) {
1088 return CP_ACCESS_TRAP_EL1;
1089 }
1090 break;
1091 case 1:
1092 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1093 arm_is_secure_below_el3(env)) {
1094 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1095 return CP_ACCESS_UNDEFINED;
1096 }
1097 break;
1098 case 2:
1099 case 3:
1100 break;
1101 }
1102
1103 if (!isread && el < arm_highest_el(env)) {
1104 return CP_ACCESS_UNDEFINED;
1105 }
1106
1107 return CP_ACCESS_OK;
1108 }
1109
gt_counter_access(CPUARMState * env,int timeridx,bool isread)1110 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1111 bool isread)
1112 {
1113 unsigned int cur_el = arm_current_el(env);
1114 bool has_el2 = arm_is_el2_enabled(env);
1115 uint64_t hcr = arm_hcr_el2_eff(env);
1116
1117 switch (cur_el) {
1118 case 0:
1119 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
1120 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1121 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
1122 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1123 }
1124
1125 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
1126 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1127 return CP_ACCESS_TRAP_EL1;
1128 }
1129 /* fall through */
1130 case 1:
1131 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
1132 if (has_el2 && timeridx == GTIMER_PHYS &&
1133 (hcr & HCR_E2H
1134 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
1135 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
1136 return CP_ACCESS_TRAP_EL2;
1137 }
1138 if (has_el2 && timeridx == GTIMER_VIRT) {
1139 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
1140 return CP_ACCESS_TRAP_EL2;
1141 }
1142 }
1143 break;
1144 }
1145 return CP_ACCESS_OK;
1146 }
1147
gt_timer_access(CPUARMState * env,int timeridx,bool isread)1148 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1149 bool isread)
1150 {
1151 unsigned int cur_el = arm_current_el(env);
1152 bool has_el2 = arm_is_el2_enabled(env);
1153 uint64_t hcr = arm_hcr_el2_eff(env);
1154
1155 switch (cur_el) {
1156 case 0:
1157 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1158 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
1159 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
1160 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1161 }
1162
1163 /*
1164 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
1165 * EL0 if EL0[PV]TEN is zero.
1166 */
1167 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1168 return CP_ACCESS_TRAP_EL1;
1169 }
1170 /* fall through */
1171
1172 case 1:
1173 if (has_el2 && timeridx == GTIMER_PHYS) {
1174 if (hcr & HCR_E2H) {
1175 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
1176 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
1177 return CP_ACCESS_TRAP_EL2;
1178 }
1179 } else {
1180 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
1181 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
1182 return CP_ACCESS_TRAP_EL2;
1183 }
1184 }
1185 }
1186 if (has_el2 && timeridx == GTIMER_VIRT) {
1187 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
1188 return CP_ACCESS_TRAP_EL2;
1189 }
1190 }
1191 break;
1192 }
1193 return CP_ACCESS_OK;
1194 }
1195
gt_pct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1196 static CPAccessResult gt_pct_access(CPUARMState *env,
1197 const ARMCPRegInfo *ri,
1198 bool isread)
1199 {
1200 return gt_counter_access(env, GTIMER_PHYS, isread);
1201 }
1202
gt_vct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1203 static CPAccessResult gt_vct_access(CPUARMState *env,
1204 const ARMCPRegInfo *ri,
1205 bool isread)
1206 {
1207 return gt_counter_access(env, GTIMER_VIRT, isread);
1208 }
1209
gt_ptimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1210 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1211 bool isread)
1212 {
1213 return gt_timer_access(env, GTIMER_PHYS, isread);
1214 }
1215
gt_vtimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1216 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1217 bool isread)
1218 {
1219 return gt_timer_access(env, GTIMER_VIRT, isread);
1220 }
1221
gt_stimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1222 static CPAccessResult gt_stimer_access(CPUARMState *env,
1223 const ARMCPRegInfo *ri,
1224 bool isread)
1225 {
1226 /*
1227 * The AArch64 register view of the secure physical timer is
1228 * always accessible from EL3, and configurably accessible from
1229 * Secure EL1.
1230 */
1231 switch (arm_current_el(env)) {
1232 case 1:
1233 if (!arm_is_secure(env)) {
1234 return CP_ACCESS_UNDEFINED;
1235 }
1236 if (arm_is_el2_enabled(env)) {
1237 return CP_ACCESS_UNDEFINED;
1238 }
1239 if (!(env->cp15.scr_el3 & SCR_ST)) {
1240 return CP_ACCESS_TRAP_EL3;
1241 }
1242 return CP_ACCESS_OK;
1243 case 0:
1244 case 2:
1245 return CP_ACCESS_UNDEFINED;
1246 case 3:
1247 return CP_ACCESS_OK;
1248 default:
1249 g_assert_not_reached();
1250 }
1251 }
1252
gt_sel2timer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1253 static CPAccessResult gt_sel2timer_access(CPUARMState *env,
1254 const ARMCPRegInfo *ri,
1255 bool isread)
1256 {
1257 /*
1258 * The AArch64 register view of the secure EL2 timers are mostly
1259 * accessible from EL3 and EL2 although can also be trapped to EL2
1260 * from EL1 depending on nested virt config.
1261 */
1262 switch (arm_current_el(env)) {
1263 case 0: /* UNDEFINED */
1264 return CP_ACCESS_UNDEFINED;
1265 case 1:
1266 if (!arm_is_secure(env)) {
1267 /* UNDEFINED */
1268 return CP_ACCESS_UNDEFINED;
1269 } else if (arm_hcr_el2_eff(env) & HCR_NV) {
1270 /* Aarch64.SystemAccessTrap(EL2, 0x18) */
1271 return CP_ACCESS_TRAP_EL2;
1272 }
1273 /* UNDEFINED */
1274 return CP_ACCESS_UNDEFINED;
1275 case 2:
1276 if (!arm_is_secure(env)) {
1277 /* UNDEFINED */
1278 return CP_ACCESS_UNDEFINED;
1279 }
1280 return CP_ACCESS_OK;
1281 case 3:
1282 if (env->cp15.scr_el3 & SCR_EEL2) {
1283 return CP_ACCESS_OK;
1284 } else {
1285 return CP_ACCESS_UNDEFINED;
1286 }
1287 default:
1288 g_assert_not_reached();
1289 }
1290 }
1291
gt_get_countervalue(CPUARMState * env)1292 uint64_t gt_get_countervalue(CPUARMState *env)
1293 {
1294 ARMCPU *cpu = env_archcpu(env);
1295
1296 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
1297 }
1298
gt_update_irq(ARMCPU * cpu,int timeridx)1299 static void gt_update_irq(ARMCPU *cpu, int timeridx)
1300 {
1301 CPUARMState *env = &cpu->env;
1302 uint64_t cnthctl = env->cp15.cnthctl_el2;
1303 ARMSecuritySpace ss = arm_security_space(env);
1304 /* ISTATUS && !IMASK */
1305 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
1306
1307 /*
1308 * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
1309 * It is RES0 in Secure and NonSecure state.
1310 */
1311 if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
1312 ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) ||
1313 (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) {
1314 irqstate = 0;
1315 }
1316
1317 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1318 trace_arm_gt_update_irq(timeridx, irqstate);
1319 }
1320
gt_rme_post_el_change(ARMCPU * cpu,void * ignored)1321 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
1322 {
1323 /*
1324 * Changing security state between Root and Secure/NonSecure, which may
1325 * happen when switching EL, can change the effective value of CNTHCTL_EL2
1326 * mask bits. Update the IRQ state accordingly.
1327 */
1328 gt_update_irq(cpu, GTIMER_VIRT);
1329 gt_update_irq(cpu, GTIMER_PHYS);
1330 }
1331
gt_phys_raw_cnt_offset(CPUARMState * env)1332 static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
1333 {
1334 if ((env->cp15.scr_el3 & SCR_ECVEN) &&
1335 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
1336 arm_is_el2_enabled(env) &&
1337 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
1338 return env->cp15.cntpoff_el2;
1339 }
1340 return 0;
1341 }
1342
gt_indirect_access_timer_offset(CPUARMState * env,int timeridx)1343 static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx)
1344 {
1345 /*
1346 * Return the timer offset to use for indirect accesses to the timer.
1347 * This is the Offset value as defined in D12.2.4.1 "Operation of the
1348 * CompareValue views of the timers".
1349 *
1350 * The condition here is not always the same as the condition for
1351 * whether to apply an offset register when doing a direct read of
1352 * the counter sysreg; those conditions are described in the
1353 * access pseudocode for each counter register.
1354 */
1355 switch (timeridx) {
1356 case GTIMER_PHYS:
1357 return gt_phys_raw_cnt_offset(env);
1358 case GTIMER_VIRT:
1359 return env->cp15.cntvoff_el2;
1360 case GTIMER_HYP:
1361 case GTIMER_SEC:
1362 case GTIMER_HYPVIRT:
1363 case GTIMER_S_EL2_PHYS:
1364 case GTIMER_S_EL2_VIRT:
1365 return 0;
1366 default:
1367 g_assert_not_reached();
1368 }
1369 }
1370
gt_direct_access_timer_offset(CPUARMState * env,int timeridx)1371 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx)
1372 {
1373 /*
1374 * Return the timer offset to use for direct accesses to the
1375 * counter registers CNTPCT and CNTVCT, and for direct accesses
1376 * to the CNT*_TVAL registers.
1377 *
1378 * This isn't exactly the same as the indirect-access offset,
1379 * because here we also care about what EL the register access
1380 * is being made from.
1381 *
1382 * This corresponds to the access pseudocode for the registers.
1383 */
1384 uint64_t hcr;
1385
1386 switch (timeridx) {
1387 case GTIMER_PHYS:
1388 if (arm_current_el(env) >= 2) {
1389 return 0;
1390 }
1391 return gt_phys_raw_cnt_offset(env);
1392 case GTIMER_VIRT:
1393 switch (arm_current_el(env)) {
1394 case 2:
1395 hcr = arm_hcr_el2_eff(env);
1396 if (hcr & HCR_E2H) {
1397 return 0;
1398 }
1399 break;
1400 case 0:
1401 hcr = arm_hcr_el2_eff(env);
1402 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1403 return 0;
1404 }
1405 break;
1406 }
1407 return env->cp15.cntvoff_el2;
1408 case GTIMER_HYP:
1409 case GTIMER_SEC:
1410 case GTIMER_HYPVIRT:
1411 case GTIMER_S_EL2_PHYS:
1412 case GTIMER_S_EL2_VIRT:
1413 return 0;
1414 default:
1415 g_assert_not_reached();
1416 }
1417 }
1418
gt_recalc_timer(ARMCPU * cpu,int timeridx)1419 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1420 {
1421 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1422
1423 if (gt->ctl & 1) {
1424 /*
1425 * Timer enabled: calculate and set current ISTATUS, irq, and
1426 * reset timer to when ISTATUS next has to change
1427 */
1428 uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx);
1429 uint64_t count = gt_get_countervalue(&cpu->env);
1430 /* Note that this must be unsigned 64 bit arithmetic: */
1431 int istatus = count - offset >= gt->cval;
1432 uint64_t nexttick;
1433
1434 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1435
1436 if (istatus) {
1437 /*
1438 * Next transition is when (count - offset) rolls back over to 0.
1439 * If offset > count then this is when count == offset;
1440 * if offset <= count then this is when count == offset + 2^64
1441 * For the latter case we set nexttick to an "as far in future
1442 * as possible" value and let the code below handle it.
1443 */
1444 if (offset > count) {
1445 nexttick = offset;
1446 } else {
1447 nexttick = UINT64_MAX;
1448 }
1449 } else {
1450 /*
1451 * Next transition is when (count - offset) == cval, i.e.
1452 * when count == (cval + offset).
1453 * If that would overflow, then again we set up the next interrupt
1454 * for "as far in the future as possible" for the code below.
1455 */
1456 if (uadd64_overflow(gt->cval, offset, &nexttick)) {
1457 nexttick = UINT64_MAX;
1458 }
1459 }
1460 /*
1461 * Note that the desired next expiry time might be beyond the
1462 * signed-64-bit range of a QEMUTimer -- in this case we just
1463 * set the timer for as far in the future as possible. When the
1464 * timer expires we will reset the timer for any remaining period.
1465 */
1466 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
1467 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
1468 } else {
1469 timer_mod(cpu->gt_timer[timeridx], nexttick);
1470 }
1471 trace_arm_gt_recalc(timeridx, nexttick);
1472 } else {
1473 /* Timer disabled: ISTATUS and timer output always clear */
1474 gt->ctl &= ~4;
1475 timer_del(cpu->gt_timer[timeridx]);
1476 trace_arm_gt_recalc_disabled(timeridx);
1477 }
1478 gt_update_irq(cpu, timeridx);
1479 }
1480
gt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)1481 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1482 int timeridx)
1483 {
1484 ARMCPU *cpu = env_archcpu(env);
1485
1486 timer_del(cpu->gt_timer[timeridx]);
1487 }
1488
gt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)1489 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1490 {
1491 uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS);
1492 return gt_get_countervalue(env) - offset;
1493 }
1494
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)1495 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1496 {
1497 uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
1498 return gt_get_countervalue(env) - offset;
1499 }
1500
gt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1501 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1502 int timeridx,
1503 uint64_t value)
1504 {
1505 trace_arm_gt_cval_write(timeridx, value);
1506 env->cp15.c14_timer[timeridx].cval = value;
1507 gt_recalc_timer(env_archcpu(env), timeridx);
1508 }
1509
do_tval_read(CPUARMState * env,int timeridx,uint64_t offset)1510 static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset)
1511 {
1512 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1513 (gt_get_countervalue(env) - offset));
1514 }
1515
gt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)1516 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1517 int timeridx)
1518 {
1519 uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1520
1521 return do_tval_read(env, timeridx, offset);
1522 }
1523
do_tval_write(CPUARMState * env,int timeridx,uint64_t value,uint64_t offset)1524 static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value,
1525 uint64_t offset)
1526 {
1527 trace_arm_gt_tval_write(timeridx, value);
1528 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1529 sextract64(value, 0, 32);
1530 gt_recalc_timer(env_archcpu(env), timeridx);
1531 }
1532
gt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1533 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1534 int timeridx,
1535 uint64_t value)
1536 {
1537 uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1538
1539 do_tval_write(env, timeridx, value, offset);
1540 }
1541
gt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)1542 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1543 int timeridx,
1544 uint64_t value)
1545 {
1546 ARMCPU *cpu = env_archcpu(env);
1547 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1548
1549 trace_arm_gt_ctl_write(timeridx, value);
1550 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1551 if ((oldval ^ value) & 1) {
1552 /* Enable toggled */
1553 gt_recalc_timer(cpu, timeridx);
1554 } else if ((oldval ^ value) & 2) {
1555 /*
1556 * IMASK toggled: don't need to recalculate,
1557 * just set the interrupt line based on ISTATUS
1558 */
1559 trace_arm_gt_imask_toggle(timeridx);
1560 gt_update_irq(cpu, timeridx);
1561 }
1562 }
1563
gt_phys_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1564 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1565 {
1566 gt_timer_reset(env, ri, GTIMER_PHYS);
1567 }
1568
gt_phys_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1569 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1570 uint64_t value)
1571 {
1572 gt_cval_write(env, ri, GTIMER_PHYS, value);
1573 }
1574
gt_phys_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1575 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1576 {
1577 return gt_tval_read(env, ri, GTIMER_PHYS);
1578 }
1579
gt_phys_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1580 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1581 uint64_t value)
1582 {
1583 gt_tval_write(env, ri, GTIMER_PHYS, value);
1584 }
1585
gt_phys_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1586 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1587 uint64_t value)
1588 {
1589 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1590 }
1591
gt_phys_redir_timeridx(CPUARMState * env)1592 static int gt_phys_redir_timeridx(CPUARMState *env)
1593 {
1594 switch (arm_mmu_idx(env)) {
1595 case ARMMMUIdx_E20_0:
1596 case ARMMMUIdx_E20_2:
1597 case ARMMMUIdx_E20_2_PAN:
1598 return GTIMER_HYP;
1599 default:
1600 return GTIMER_PHYS;
1601 }
1602 }
1603
gt_virt_redir_timeridx(CPUARMState * env)1604 static int gt_virt_redir_timeridx(CPUARMState *env)
1605 {
1606 switch (arm_mmu_idx(env)) {
1607 case ARMMMUIdx_E20_0:
1608 case ARMMMUIdx_E20_2:
1609 case ARMMMUIdx_E20_2_PAN:
1610 return GTIMER_HYPVIRT;
1611 default:
1612 return GTIMER_VIRT;
1613 }
1614 }
1615
gt_phys_redir_cval_read(CPUARMState * env,const ARMCPRegInfo * ri)1616 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
1617 const ARMCPRegInfo *ri)
1618 {
1619 int timeridx = gt_phys_redir_timeridx(env);
1620 return env->cp15.c14_timer[timeridx].cval;
1621 }
1622
gt_phys_redir_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1623 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1624 uint64_t value)
1625 {
1626 int timeridx = gt_phys_redir_timeridx(env);
1627 gt_cval_write(env, ri, timeridx, value);
1628 }
1629
gt_phys_redir_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1630 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
1631 const ARMCPRegInfo *ri)
1632 {
1633 int timeridx = gt_phys_redir_timeridx(env);
1634 return gt_tval_read(env, ri, timeridx);
1635 }
1636
gt_phys_redir_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1637 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1638 uint64_t value)
1639 {
1640 int timeridx = gt_phys_redir_timeridx(env);
1641 gt_tval_write(env, ri, timeridx, value);
1642 }
1643
gt_phys_redir_ctl_read(CPUARMState * env,const ARMCPRegInfo * ri)1644 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
1645 const ARMCPRegInfo *ri)
1646 {
1647 int timeridx = gt_phys_redir_timeridx(env);
1648 return env->cp15.c14_timer[timeridx].ctl;
1649 }
1650
gt_phys_redir_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1651 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1652 uint64_t value)
1653 {
1654 int timeridx = gt_phys_redir_timeridx(env);
1655 gt_ctl_write(env, ri, timeridx, value);
1656 }
1657
gt_virt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1658 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1659 {
1660 gt_timer_reset(env, ri, GTIMER_VIRT);
1661 }
1662
gt_virt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1663 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1664 uint64_t value)
1665 {
1666 gt_cval_write(env, ri, GTIMER_VIRT, value);
1667 }
1668
gt_virt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1669 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1670 {
1671 /*
1672 * This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0
1673 * we always apply CNTVOFF_EL2. Special case that here rather
1674 * than going into the generic gt_tval_read() and then having
1675 * to re-detect that it's this register.
1676 * Note that the accessfn/perms mean we know we're at EL2 or EL3 here.
1677 */
1678 return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2);
1679 }
1680
gt_virt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1681 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1682 uint64_t value)
1683 {
1684 /* Similarly for writes to CNTV_TVAL_EL02 */
1685 do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2);
1686 }
1687
gt_virt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1688 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1689 uint64_t value)
1690 {
1691 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1692 }
1693
gt_cnthctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1694 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1695 uint64_t value)
1696 {
1697 ARMCPU *cpu = env_archcpu(env);
1698 uint32_t oldval = env->cp15.cnthctl_el2;
1699 uint32_t valid_mask =
1700 R_CNTHCTL_EL0PCTEN_E2H1_MASK |
1701 R_CNTHCTL_EL0VCTEN_E2H1_MASK |
1702 R_CNTHCTL_EVNTEN_MASK |
1703 R_CNTHCTL_EVNTDIR_MASK |
1704 R_CNTHCTL_EVNTI_MASK |
1705 R_CNTHCTL_EL0VTEN_MASK |
1706 R_CNTHCTL_EL0PTEN_MASK |
1707 R_CNTHCTL_EL1PCTEN_E2H1_MASK |
1708 R_CNTHCTL_EL1PTEN_MASK;
1709
1710 if (cpu_isar_feature(aa64_rme, cpu)) {
1711 valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK;
1712 }
1713 if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
1714 valid_mask |=
1715 R_CNTHCTL_EL1TVT_MASK |
1716 R_CNTHCTL_EL1TVCT_MASK |
1717 R_CNTHCTL_EL1NVPCT_MASK |
1718 R_CNTHCTL_EL1NVVCT_MASK |
1719 R_CNTHCTL_EVNTIS_MASK;
1720 }
1721 if (cpu_isar_feature(aa64_ecv, cpu)) {
1722 valid_mask |= R_CNTHCTL_ECV_MASK;
1723 }
1724
1725 /* Clear RES0 bits */
1726 value &= valid_mask;
1727
1728 raw_write(env, ri, value);
1729
1730 if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) {
1731 gt_update_irq(cpu, GTIMER_VIRT);
1732 } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) {
1733 gt_update_irq(cpu, GTIMER_PHYS);
1734 }
1735 }
1736
gt_cntvoff_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1737 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1738 uint64_t value)
1739 {
1740 ARMCPU *cpu = env_archcpu(env);
1741
1742 trace_arm_gt_cntvoff_write(value);
1743 raw_write(env, ri, value);
1744 gt_recalc_timer(cpu, GTIMER_VIRT);
1745 }
1746
gt_virt_redir_cval_read(CPUARMState * env,const ARMCPRegInfo * ri)1747 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
1748 const ARMCPRegInfo *ri)
1749 {
1750 int timeridx = gt_virt_redir_timeridx(env);
1751 return env->cp15.c14_timer[timeridx].cval;
1752 }
1753
gt_virt_redir_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1754 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1755 uint64_t value)
1756 {
1757 int timeridx = gt_virt_redir_timeridx(env);
1758 gt_cval_write(env, ri, timeridx, value);
1759 }
1760
gt_virt_redir_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1761 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
1762 const ARMCPRegInfo *ri)
1763 {
1764 int timeridx = gt_virt_redir_timeridx(env);
1765 return gt_tval_read(env, ri, timeridx);
1766 }
1767
gt_virt_redir_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1768 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1769 uint64_t value)
1770 {
1771 int timeridx = gt_virt_redir_timeridx(env);
1772 gt_tval_write(env, ri, timeridx, value);
1773 }
1774
gt_virt_redir_ctl_read(CPUARMState * env,const ARMCPRegInfo * ri)1775 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
1776 const ARMCPRegInfo *ri)
1777 {
1778 int timeridx = gt_virt_redir_timeridx(env);
1779 return env->cp15.c14_timer[timeridx].ctl;
1780 }
1781
gt_virt_redir_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1782 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1783 uint64_t value)
1784 {
1785 int timeridx = gt_virt_redir_timeridx(env);
1786 gt_ctl_write(env, ri, timeridx, value);
1787 }
1788
gt_hyp_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1789 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1790 {
1791 gt_timer_reset(env, ri, GTIMER_HYP);
1792 }
1793
gt_hyp_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1794 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1795 uint64_t value)
1796 {
1797 gt_cval_write(env, ri, GTIMER_HYP, value);
1798 }
1799
gt_hyp_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1800 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1801 {
1802 return gt_tval_read(env, ri, GTIMER_HYP);
1803 }
1804
gt_hyp_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1805 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1806 uint64_t value)
1807 {
1808 gt_tval_write(env, ri, GTIMER_HYP, value);
1809 }
1810
gt_hyp_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1811 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1812 uint64_t value)
1813 {
1814 gt_ctl_write(env, ri, GTIMER_HYP, value);
1815 }
1816
gt_sec_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1817 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1818 {
1819 gt_timer_reset(env, ri, GTIMER_SEC);
1820 }
1821
gt_sec_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1822 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1823 uint64_t value)
1824 {
1825 gt_cval_write(env, ri, GTIMER_SEC, value);
1826 }
1827
gt_sec_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1828 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829 {
1830 return gt_tval_read(env, ri, GTIMER_SEC);
1831 }
1832
gt_sec_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1833 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1834 uint64_t value)
1835 {
1836 gt_tval_write(env, ri, GTIMER_SEC, value);
1837 }
1838
gt_sec_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1839 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1840 uint64_t value)
1841 {
1842 gt_ctl_write(env, ri, GTIMER_SEC, value);
1843 }
1844
gt_sec_pel2_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1845 static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1846 {
1847 gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS);
1848 }
1849
gt_sec_pel2_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1850 static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1851 uint64_t value)
1852 {
1853 gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1854 }
1855
gt_sec_pel2_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1856 static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1857 {
1858 return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS);
1859 }
1860
gt_sec_pel2_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1861 static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1862 uint64_t value)
1863 {
1864 gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1865 }
1866
gt_sec_pel2_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1867 static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1868 uint64_t value)
1869 {
1870 gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value);
1871 }
1872
gt_sec_vel2_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1873 static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1874 {
1875 gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT);
1876 }
1877
gt_sec_vel2_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1878 static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1879 uint64_t value)
1880 {
1881 gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1882 }
1883
gt_sec_vel2_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1884 static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1885 {
1886 return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT);
1887 }
1888
gt_sec_vel2_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1889 static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1890 uint64_t value)
1891 {
1892 gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1893 }
1894
gt_sec_vel2_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1895 static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1896 uint64_t value)
1897 {
1898 gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value);
1899 }
1900
gt_hv_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)1901 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1902 {
1903 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
1904 }
1905
gt_hv_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1906 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1907 uint64_t value)
1908 {
1909 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
1910 }
1911
gt_hv_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1912 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1913 {
1914 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
1915 }
1916
gt_hv_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1917 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1918 uint64_t value)
1919 {
1920 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
1921 }
1922
gt_hv_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1923 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1924 uint64_t value)
1925 {
1926 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
1927 }
1928
arm_gt_ptimer_cb(void * opaque)1929 void arm_gt_ptimer_cb(void *opaque)
1930 {
1931 ARMCPU *cpu = opaque;
1932
1933 gt_recalc_timer(cpu, GTIMER_PHYS);
1934 }
1935
arm_gt_vtimer_cb(void * opaque)1936 void arm_gt_vtimer_cb(void *opaque)
1937 {
1938 ARMCPU *cpu = opaque;
1939
1940 gt_recalc_timer(cpu, GTIMER_VIRT);
1941 }
1942
arm_gt_htimer_cb(void * opaque)1943 void arm_gt_htimer_cb(void *opaque)
1944 {
1945 ARMCPU *cpu = opaque;
1946
1947 gt_recalc_timer(cpu, GTIMER_HYP);
1948 }
1949
arm_gt_stimer_cb(void * opaque)1950 void arm_gt_stimer_cb(void *opaque)
1951 {
1952 ARMCPU *cpu = opaque;
1953
1954 gt_recalc_timer(cpu, GTIMER_SEC);
1955 }
1956
arm_gt_sel2timer_cb(void * opaque)1957 void arm_gt_sel2timer_cb(void *opaque)
1958 {
1959 ARMCPU *cpu = opaque;
1960
1961 gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS);
1962 }
1963
arm_gt_sel2vtimer_cb(void * opaque)1964 void arm_gt_sel2vtimer_cb(void *opaque)
1965 {
1966 ARMCPU *cpu = opaque;
1967
1968 gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT);
1969 }
1970
arm_gt_hvtimer_cb(void * opaque)1971 void arm_gt_hvtimer_cb(void *opaque)
1972 {
1973 ARMCPU *cpu = opaque;
1974
1975 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
1976 }
1977
1978 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1979 /*
1980 * Note that CNTFRQ is purely reads-as-written for the benefit
1981 * of software; writing it doesn't actually change the timer frequency.
1982 * Our reset value matches the fixed frequency we implement the timer at.
1983 */
1984 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1985 .type = ARM_CP_ALIAS,
1986 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1987 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1988 },
1989 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1990 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1991 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1992 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1993 .resetfn = arm_gt_cntfrq_reset,
1994 },
1995 /* overall control: mostly access permissions */
1996 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1997 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1998 .access = PL1_RW,
1999 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2000 .resetvalue = 0,
2001 },
2002 /* per-timer control */
2003 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2004 .secure = ARM_CP_SECSTATE_NS,
2005 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2006 .accessfn = gt_ptimer_access,
2007 .fieldoffset = offsetoflow32(CPUARMState,
2008 cp15.c14_timer[GTIMER_PHYS].ctl),
2009 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2010 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2011 },
2012 { .name = "CNTP_CTL_S",
2013 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2014 .secure = ARM_CP_SECSTATE_S,
2015 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2016 .accessfn = gt_ptimer_access,
2017 .fieldoffset = offsetoflow32(CPUARMState,
2018 cp15.c14_timer[GTIMER_SEC].ctl),
2019 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2020 },
2021 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2022 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2023 .type = ARM_CP_IO, .access = PL0_RW,
2024 .accessfn = gt_ptimer_access,
2025 .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
2026 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2027 .resetvalue = 0,
2028 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2029 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2030 },
2031 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2032 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2033 .accessfn = gt_vtimer_access,
2034 .fieldoffset = offsetoflow32(CPUARMState,
2035 cp15.c14_timer[GTIMER_VIRT].ctl),
2036 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2037 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2038 },
2039 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2040 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2041 .type = ARM_CP_IO, .access = PL0_RW,
2042 .accessfn = gt_vtimer_access,
2043 .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
2044 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2045 .resetvalue = 0,
2046 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2047 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2048 },
2049 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2050 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2051 .secure = ARM_CP_SECSTATE_NS,
2052 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2053 .accessfn = gt_ptimer_access,
2054 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2055 },
2056 { .name = "CNTP_TVAL_S",
2057 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2058 .secure = ARM_CP_SECSTATE_S,
2059 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2060 .accessfn = gt_ptimer_access,
2061 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2062 },
2063 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2064 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2065 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2066 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2067 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2068 },
2069 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2070 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2071 .accessfn = gt_vtimer_access,
2072 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2073 },
2074 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2075 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2076 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2077 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2078 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2079 },
2080 /* The counter itself */
2081 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2082 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2083 .accessfn = gt_pct_access,
2084 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2085 },
2086 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2087 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2088 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2089 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2090 },
2091 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2092 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2093 .accessfn = gt_vct_access,
2094 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2095 },
2096 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2097 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2098 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2099 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2100 },
2101 /* Comparison value, indicating when the timer goes off */
2102 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2103 .secure = ARM_CP_SECSTATE_NS,
2104 .access = PL0_RW,
2105 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2106 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2107 .accessfn = gt_ptimer_access,
2108 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2109 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2110 },
2111 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2112 .secure = ARM_CP_SECSTATE_S,
2113 .access = PL0_RW,
2114 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2115 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2116 .accessfn = gt_ptimer_access,
2117 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2118 },
2119 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2120 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2121 .access = PL0_RW,
2122 .type = ARM_CP_IO,
2123 .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
2124 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2125 .resetvalue = 0, .accessfn = gt_ptimer_access,
2126 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2127 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2128 },
2129 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2130 .access = PL0_RW,
2131 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2132 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2133 .accessfn = gt_vtimer_access,
2134 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2135 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2136 },
2137 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2138 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2139 .access = PL0_RW,
2140 .type = ARM_CP_IO,
2141 .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
2142 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2143 .resetvalue = 0, .accessfn = gt_vtimer_access,
2144 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2145 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2146 },
2147 /*
2148 * Secure timer -- this is actually restricted to only EL3
2149 * and configurably Secure-EL1 via the accessfn.
2150 */
2151 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2152 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2153 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2154 .accessfn = gt_stimer_access,
2155 .readfn = gt_sec_tval_read,
2156 .writefn = gt_sec_tval_write,
2157 .resetfn = gt_sec_timer_reset,
2158 },
2159 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2160 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2161 .type = ARM_CP_IO, .access = PL1_RW,
2162 .accessfn = gt_stimer_access,
2163 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2164 .resetvalue = 0,
2165 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2166 },
2167 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2168 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2169 .type = ARM_CP_IO, .access = PL1_RW,
2170 .accessfn = gt_stimer_access,
2171 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2172 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2173 },
2174 };
2175
2176 /*
2177 * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
2178 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
2179 * so our implementations here are identical to the normal registers.
2180 */
2181 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2182 { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
2183 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2184 .accessfn = gt_vct_access,
2185 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2186 },
2187 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2188 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2189 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2190 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2191 },
2192 { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
2193 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2194 .accessfn = gt_pct_access,
2195 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2196 },
2197 { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
2198 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5,
2199 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2200 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2201 },
2202 };
2203
gt_cntpoff_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2204 static CPAccessResult gt_cntpoff_access(CPUARMState *env,
2205 const ARMCPRegInfo *ri,
2206 bool isread)
2207 {
2208 if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) &&
2209 !(env->cp15.scr_el3 & SCR_ECVEN)) {
2210 return CP_ACCESS_TRAP_EL3;
2211 }
2212 return CP_ACCESS_OK;
2213 }
2214
gt_cntpoff_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2215 static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2216 uint64_t value)
2217 {
2218 ARMCPU *cpu = env_archcpu(env);
2219
2220 trace_arm_gt_cntpoff_write(value);
2221 raw_write(env, ri, value);
2222 gt_recalc_timer(cpu, GTIMER_PHYS);
2223 }
2224
2225 static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
2226 .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
2227 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
2228 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
2229 .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
2230 .nv2_redirect_offset = 0x1a8,
2231 .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
2232 };
2233 #else
2234
2235 /*
2236 * In user-mode most of the generic timer registers are inaccessible
2237 * however modern kernels (4.12+) allow access to cntvct_el0
2238 */
2239
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)2240 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2241 {
2242 ARMCPU *cpu = env_archcpu(env);
2243
2244 /*
2245 * Currently we have no support for QEMUTimer in linux-user so we
2246 * can't call gt_get_countervalue(env), instead we directly
2247 * call the lower level functions.
2248 */
2249 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
2250 }
2251
2252 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2253 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2254 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2255 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2256 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2257 .resetfn = arm_gt_cntfrq_reset,
2258 },
2259 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2260 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2261 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2262 .readfn = gt_virt_cnt_read,
2263 },
2264 };
2265
2266 /*
2267 * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
2268 * is exposed to userspace by Linux.
2269 */
2270 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2271 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2272 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2273 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2274 .readfn = gt_virt_cnt_read,
2275 },
2276 };
2277
2278 #endif
2279
par_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2280 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2281 {
2282 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2283 raw_write(env, ri, value);
2284 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2285 raw_write(env, ri, value & 0xfffff6ff);
2286 } else {
2287 raw_write(env, ri, value & 0xfffff1ff);
2288 }
2289 }
2290
2291 /* Return basic MPU access permission bits. */
simple_mpu_ap_bits(uint32_t val)2292 static uint32_t simple_mpu_ap_bits(uint32_t val)
2293 {
2294 uint32_t ret;
2295 uint32_t mask;
2296 int i;
2297 ret = 0;
2298 mask = 3;
2299 for (i = 0; i < 16; i += 2) {
2300 ret |= (val >> i) & mask;
2301 mask <<= 2;
2302 }
2303 return ret;
2304 }
2305
2306 /* Pad basic MPU access permission bits to extended format. */
extended_mpu_ap_bits(uint32_t val)2307 static uint32_t extended_mpu_ap_bits(uint32_t val)
2308 {
2309 uint32_t ret;
2310 uint32_t mask;
2311 int i;
2312 ret = 0;
2313 mask = 3;
2314 for (i = 0; i < 16; i += 2) {
2315 ret |= (val & mask) << i;
2316 mask <<= 2;
2317 }
2318 return ret;
2319 }
2320
pmsav5_data_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2321 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2322 uint64_t value)
2323 {
2324 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2325 }
2326
pmsav5_data_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)2327 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2328 {
2329 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2330 }
2331
pmsav5_insn_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2332 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2333 uint64_t value)
2334 {
2335 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2336 }
2337
pmsav5_insn_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)2338 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2339 {
2340 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2341 }
2342
pmsav7_read(CPUARMState * env,const ARMCPRegInfo * ri)2343 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2344 {
2345 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2346
2347 if (!u32p) {
2348 return 0;
2349 }
2350
2351 u32p += env->pmsav7.rnr[M_REG_NS];
2352 return *u32p;
2353 }
2354
pmsav7_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2355 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2356 uint64_t value)
2357 {
2358 ARMCPU *cpu = env_archcpu(env);
2359 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2360
2361 if (!u32p) {
2362 return;
2363 }
2364
2365 u32p += env->pmsav7.rnr[M_REG_NS];
2366 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2367 *u32p = value;
2368 }
2369
pmsav7_rgnr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2370 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2371 uint64_t value)
2372 {
2373 ARMCPU *cpu = env_archcpu(env);
2374 uint32_t nrgs = cpu->pmsav7_dregion;
2375
2376 if (value >= nrgs) {
2377 qemu_log_mask(LOG_GUEST_ERROR,
2378 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2379 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2380 return;
2381 }
2382
2383 raw_write(env, ri, value);
2384 }
2385
prbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2386 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2387 uint64_t value)
2388 {
2389 ARMCPU *cpu = env_archcpu(env);
2390
2391 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2392 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2393 }
2394
prbar_read(CPUARMState * env,const ARMCPRegInfo * ri)2395 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2396 {
2397 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2398 }
2399
prlar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2400 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2401 uint64_t value)
2402 {
2403 ARMCPU *cpu = env_archcpu(env);
2404
2405 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2406 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2407 }
2408
prlar_read(CPUARMState * env,const ARMCPRegInfo * ri)2409 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2410 {
2411 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2412 }
2413
prselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2414 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2415 uint64_t value)
2416 {
2417 ARMCPU *cpu = env_archcpu(env);
2418
2419 /*
2420 * Ignore writes that would select not implemented region.
2421 * This is architecturally UNPREDICTABLE.
2422 */
2423 if (value >= cpu->pmsav7_dregion) {
2424 return;
2425 }
2426
2427 env->pmsav7.rnr[M_REG_NS] = value;
2428 }
2429
hprbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2430 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2431 uint64_t value)
2432 {
2433 ARMCPU *cpu = env_archcpu(env);
2434
2435 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2436 env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
2437 }
2438
hprbar_read(CPUARMState * env,const ARMCPRegInfo * ri)2439 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2440 {
2441 return env->pmsav8.hprbar[env->pmsav8.hprselr];
2442 }
2443
hprlar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2444 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2445 uint64_t value)
2446 {
2447 ARMCPU *cpu = env_archcpu(env);
2448
2449 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2450 env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
2451 }
2452
hprlar_read(CPUARMState * env,const ARMCPRegInfo * ri)2453 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2454 {
2455 return env->pmsav8.hprlar[env->pmsav8.hprselr];
2456 }
2457
hprenr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2458 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2459 uint64_t value)
2460 {
2461 uint32_t n;
2462 uint32_t bit;
2463 ARMCPU *cpu = env_archcpu(env);
2464
2465 /* Ignore writes to unimplemented regions */
2466 int rmax = MIN(cpu->pmsav8r_hdregion, 32);
2467 value &= MAKE_64BIT_MASK(0, rmax);
2468
2469 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2470
2471 /* Register alias is only valid for first 32 indexes */
2472 for (n = 0; n < rmax; ++n) {
2473 bit = extract32(value, n, 1);
2474 env->pmsav8.hprlar[n] = deposit32(
2475 env->pmsav8.hprlar[n], 0, 1, bit);
2476 }
2477 }
2478
hprenr_read(CPUARMState * env,const ARMCPRegInfo * ri)2479 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2480 {
2481 uint32_t n;
2482 uint32_t result = 0x0;
2483 ARMCPU *cpu = env_archcpu(env);
2484
2485 /* Register alias is only valid for first 32 indexes */
2486 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
2487 if (env->pmsav8.hprlar[n] & 0x1) {
2488 result |= (0x1 << n);
2489 }
2490 }
2491 return result;
2492 }
2493
hprselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2494 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2495 uint64_t value)
2496 {
2497 ARMCPU *cpu = env_archcpu(env);
2498
2499 /*
2500 * Ignore writes that would select not implemented region.
2501 * This is architecturally UNPREDICTABLE.
2502 */
2503 if (value >= cpu->pmsav8r_hdregion) {
2504 return;
2505 }
2506
2507 env->pmsav8.hprselr = value;
2508 }
2509
pmsav8r_regn_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2510 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
2511 uint64_t value)
2512 {
2513 ARMCPU *cpu = env_archcpu(env);
2514 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2515 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2516
2517 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2518
2519 if (ri->opc1 & 4) {
2520 if (index >= cpu->pmsav8r_hdregion) {
2521 return;
2522 }
2523 if (ri->opc2 & 0x1) {
2524 env->pmsav8.hprlar[index] = value;
2525 } else {
2526 env->pmsav8.hprbar[index] = value;
2527 }
2528 } else {
2529 if (index >= cpu->pmsav7_dregion) {
2530 return;
2531 }
2532 if (ri->opc2 & 0x1) {
2533 env->pmsav8.rlar[M_REG_NS][index] = value;
2534 } else {
2535 env->pmsav8.rbar[M_REG_NS][index] = value;
2536 }
2537 }
2538 }
2539
pmsav8r_regn_read(CPUARMState * env,const ARMCPRegInfo * ri)2540 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
2541 {
2542 ARMCPU *cpu = env_archcpu(env);
2543 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2544 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2545
2546 if (ri->opc1 & 4) {
2547 if (index >= cpu->pmsav8r_hdregion) {
2548 return 0x0;
2549 }
2550 if (ri->opc2 & 0x1) {
2551 return env->pmsav8.hprlar[index];
2552 } else {
2553 return env->pmsav8.hprbar[index];
2554 }
2555 } else {
2556 if (index >= cpu->pmsav7_dregion) {
2557 return 0x0;
2558 }
2559 if (ri->opc2 & 0x1) {
2560 return env->pmsav8.rlar[M_REG_NS][index];
2561 } else {
2562 return env->pmsav8.rbar[M_REG_NS][index];
2563 }
2564 }
2565 }
2566
2567 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
2568 { .name = "PRBAR",
2569 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
2570 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2571 .accessfn = access_tvm_trvm,
2572 .readfn = prbar_read, .writefn = prbar_write },
2573 { .name = "PRLAR",
2574 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
2575 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2576 .accessfn = access_tvm_trvm,
2577 .readfn = prlar_read, .writefn = prlar_write },
2578 { .name = "PRSELR", .resetvalue = 0,
2579 .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
2580 .access = PL1_RW, .accessfn = access_tvm_trvm,
2581 .writefn = prselr_write,
2582 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
2583 { .name = "HPRBAR", .resetvalue = 0,
2584 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
2585 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2586 .readfn = hprbar_read, .writefn = hprbar_write },
2587 { .name = "HPRLAR",
2588 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
2589 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2590 .readfn = hprlar_read, .writefn = hprlar_write },
2591 { .name = "HPRSELR", .resetvalue = 0,
2592 .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
2593 .access = PL2_RW,
2594 .writefn = hprselr_write,
2595 .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
2596 { .name = "HPRENR",
2597 .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
2598 .access = PL2_RW, .type = ARM_CP_NO_RAW,
2599 .readfn = hprenr_read, .writefn = hprenr_write },
2600 };
2601
2602 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2603 /*
2604 * Reset for all these registers is handled in arm_cpu_reset(),
2605 * because the PMSAv7 is also used by M-profile CPUs, which do
2606 * not register cpregs but still need the state to be reset.
2607 */
2608 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2609 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2610 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2611 .readfn = pmsav7_read, .writefn = pmsav7_write,
2612 .resetfn = arm_cp_reset_ignore },
2613 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2614 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2615 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2616 .readfn = pmsav7_read, .writefn = pmsav7_write,
2617 .resetfn = arm_cp_reset_ignore },
2618 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2619 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2620 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2621 .readfn = pmsav7_read, .writefn = pmsav7_write,
2622 .resetfn = arm_cp_reset_ignore },
2623 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2624 .access = PL1_RW,
2625 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2626 .writefn = pmsav7_rgnr_write,
2627 .resetfn = arm_cp_reset_ignore },
2628 };
2629
2630 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2631 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2632 .access = PL1_RW, .type = ARM_CP_ALIAS,
2633 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2634 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2635 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2636 .access = PL1_RW, .type = ARM_CP_ALIAS,
2637 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2638 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2639 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2640 .access = PL1_RW,
2641 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2642 .resetvalue = 0, },
2643 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2644 .access = PL1_RW,
2645 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2646 .resetvalue = 0, },
2647 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2648 .access = PL1_RW,
2649 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2650 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2651 .access = PL1_RW,
2652 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2653 /* Protection region base and size registers */
2654 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2655 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2656 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2657 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2658 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2659 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2660 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2661 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2662 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2663 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2664 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2665 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2666 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2667 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2668 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2669 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2670 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2671 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2672 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2673 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2674 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2675 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2676 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2677 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2678 };
2679
vmsa_ttbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2680 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2681 uint64_t value)
2682 {
2683 ARMCPU *cpu = env_archcpu(env);
2684
2685 if (!arm_feature(env, ARM_FEATURE_V8)) {
2686 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2687 /*
2688 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2689 * using Long-descriptor translation table format
2690 */
2691 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2692 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2693 /*
2694 * In an implementation that includes the Security Extensions
2695 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2696 * Short-descriptor translation table format.
2697 */
2698 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2699 } else {
2700 value &= TTBCR_N;
2701 }
2702 }
2703
2704 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2705 /*
2706 * With LPAE the TTBCR could result in a change of ASID
2707 * via the TTBCR.A1 bit, so do a TLB flush.
2708 */
2709 tlb_flush(CPU(cpu));
2710 }
2711 raw_write(env, ri, value);
2712 }
2713
vmsa_tcr_el12_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2714 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
2715 uint64_t value)
2716 {
2717 ARMCPU *cpu = env_archcpu(env);
2718
2719 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2720 tlb_flush(CPU(cpu));
2721 raw_write(env, ri, value);
2722 }
2723
vmsa_ttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2724 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2725 uint64_t value)
2726 {
2727 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
2728 if (cpreg_field_is_64bit(ri) &&
2729 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2730 ARMCPU *cpu = env_archcpu(env);
2731 tlb_flush(CPU(cpu));
2732 }
2733 raw_write(env, ri, value);
2734 }
2735
vmsa_tcr_ttbr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2736 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2737 uint64_t value)
2738 {
2739 /*
2740 * If we are running with E2&0 regime, then an ASID is active.
2741 * Flush if that might be changing. Note we're not checking
2742 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
2743 * holds the active ASID, only checking the field that might.
2744 */
2745 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
2746 (arm_hcr_el2_eff(env) & HCR_E2H)) {
2747 uint16_t mask = ARMMMUIdxBit_E20_2 |
2748 ARMMMUIdxBit_E20_2_PAN |
2749 ARMMMUIdxBit_E20_0;
2750 tlb_flush_by_mmuidx(env_cpu(env), mask);
2751 }
2752 raw_write(env, ri, value);
2753 }
2754
vttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2755 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2756 uint64_t value)
2757 {
2758 ARMCPU *cpu = env_archcpu(env);
2759 CPUState *cs = CPU(cpu);
2760
2761 /*
2762 * A change in VMID to the stage2 page table (Stage2) invalidates
2763 * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
2764 */
2765 if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2766 tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
2767 }
2768 raw_write(env, ri, value);
2769 }
2770
2771 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2772 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2773 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
2774 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2775 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2776 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2777 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2778 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2779 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2780 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2781 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2782 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2783 offsetof(CPUARMState, cp15.dfar_ns) } },
2784 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2785 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2786 .access = PL1_RW, .accessfn = access_tvm_trvm,
2787 .fgt = FGT_FAR_EL1,
2788 .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
2789 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2790 .resetvalue = 0, },
2791 };
2792
2793 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2794 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2795 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2796 .access = PL1_RW, .accessfn = access_tvm_trvm,
2797 .fgt = FGT_ESR_EL1,
2798 .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
2799 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2800 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2801 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2802 .access = PL1_RW, .accessfn = access_tvm_trvm,
2803 .fgt = FGT_TTBR0_EL1,
2804 .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
2805 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2806 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2807 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2808 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2809 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2810 .access = PL1_RW, .accessfn = access_tvm_trvm,
2811 .fgt = FGT_TTBR1_EL1,
2812 .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
2813 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2814 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2815 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2816 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2817 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2818 .access = PL1_RW, .accessfn = access_tvm_trvm,
2819 .fgt = FGT_TCR_EL1,
2820 .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
2821 .writefn = vmsa_tcr_el12_write,
2822 .raw_writefn = raw_write,
2823 .resetvalue = 0,
2824 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2825 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2826 .access = PL1_RW, .accessfn = access_tvm_trvm,
2827 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2828 .raw_writefn = raw_write,
2829 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2830 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2831 };
2832
2833 /*
2834 * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
2835 * qemu tlbs nor adjusting cached masks.
2836 */
2837 static const ARMCPRegInfo ttbcr2_reginfo = {
2838 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
2839 .access = PL1_RW, .accessfn = access_tvm_trvm,
2840 .type = ARM_CP_ALIAS,
2841 .bank_fieldoffsets = {
2842 offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
2843 offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
2844 },
2845 };
2846
omap_ticonfig_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2847 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2848 uint64_t value)
2849 {
2850 env->cp15.c15_ticonfig = value & 0xe7;
2851 /* The OS_TYPE bit in this register changes the reported CPUID! */
2852 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2853 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2854 }
2855
omap_threadid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2856 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857 uint64_t value)
2858 {
2859 env->cp15.c15_threadid = value & 0xffff;
2860 }
2861
omap_wfi_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2862 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2863 uint64_t value)
2864 {
2865 /* Wait-for-interrupt (deprecated) */
2866 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
2867 }
2868
omap_cachemaint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2869 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2870 uint64_t value)
2871 {
2872 /*
2873 * On OMAP there are registers indicating the max/min index of dcache lines
2874 * containing a dirty line; cache flush operations have to reset these.
2875 */
2876 env->cp15.c15_i_max = 0x000;
2877 env->cp15.c15_i_min = 0xff0;
2878 }
2879
2880 static const ARMCPRegInfo omap_cp_reginfo[] = {
2881 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2882 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2883 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2884 .resetvalue = 0, },
2885 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2886 .access = PL1_RW, .type = ARM_CP_NOP },
2887 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2888 .access = PL1_RW,
2889 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2890 .writefn = omap_ticonfig_write },
2891 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2892 .access = PL1_RW,
2893 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2894 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2895 .access = PL1_RW, .resetvalue = 0xff0,
2896 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2897 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2898 .access = PL1_RW,
2899 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2900 .writefn = omap_threadid_write },
2901 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2902 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2903 .type = ARM_CP_NO_RAW,
2904 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2905 /*
2906 * TODO: Peripheral port remap register:
2907 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2908 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2909 * when MMU is off.
2910 */
2911 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2912 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2913 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2914 .writefn = omap_cachemaint_write },
2915 { .name = "C9", .cp = 15, .crn = 9,
2916 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2917 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2918 };
2919
xscale_cpar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2920 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2921 uint64_t value)
2922 {
2923 env->cp15.c15_cpar = value & 0x3fff;
2924 }
2925
2926 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2927 { .name = "XSCALE_CPAR",
2928 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2929 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2930 .writefn = xscale_cpar_write, },
2931 { .name = "XSCALE_AUXCR",
2932 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2933 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2934 .resetvalue = 0, },
2935 /*
2936 * XScale specific cache-lockdown: since we have no cache we NOP these
2937 * and hope the guest does not really rely on cache behaviour.
2938 */
2939 { .name = "XSCALE_LOCK_ICACHE_LINE",
2940 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2941 .access = PL1_W, .type = ARM_CP_NOP },
2942 { .name = "XSCALE_UNLOCK_ICACHE",
2943 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2944 .access = PL1_W, .type = ARM_CP_NOP },
2945 { .name = "XSCALE_DCACHE_LOCK",
2946 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2947 .access = PL1_RW, .type = ARM_CP_NOP },
2948 { .name = "XSCALE_UNLOCK_DCACHE",
2949 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2950 .access = PL1_W, .type = ARM_CP_NOP },
2951 };
2952
2953 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2954 /*
2955 * RAZ/WI the whole crn=15 space, when we don't have a more specific
2956 * implementation of this implementation-defined space.
2957 * Ideally this should eventually disappear in favour of actually
2958 * implementing the correct behaviour for all cores.
2959 */
2960 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2961 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2962 .access = PL1_RW,
2963 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2964 .resetvalue = 0 },
2965 };
2966
2967 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2968 /* Cache status: RAZ because we have no cache so it's always clean */
2969 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2970 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2971 .resetvalue = 0 },
2972 };
2973
2974 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2975 /* We never have a block transfer operation in progress */
2976 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2977 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2978 .resetvalue = 0 },
2979 /* The cache ops themselves: these all NOP for QEMU */
2980 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2981 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2982 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2983 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2984 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2985 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2986 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2987 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2988 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2989 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2990 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2991 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
2992 };
2993
2994 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2995 /*
2996 * The cache test-and-clean instructions always return (1 << 30)
2997 * to indicate that there are no dirty cache lines.
2998 */
2999 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3000 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3001 .resetvalue = (1 << 30) },
3002 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3003 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3004 .resetvalue = (1 << 30) },
3005 };
3006
3007 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3008 /* Ignore ReadBuffer accesses */
3009 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3010 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3011 .access = PL1_RW, .resetvalue = 0,
3012 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3013 };
3014
midr_read(CPUARMState * env,const ARMCPRegInfo * ri)3015 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3016 {
3017 unsigned int cur_el = arm_current_el(env);
3018
3019 if (arm_is_el2_enabled(env) && cur_el == 1) {
3020 return env->cp15.vpidr_el2;
3021 }
3022 return raw_read(env, ri);
3023 }
3024
mpidr_read_val(CPUARMState * env)3025 static uint64_t mpidr_read_val(CPUARMState *env)
3026 {
3027 ARMCPU *cpu = env_archcpu(env);
3028 uint64_t mpidr = cpu->mp_affinity;
3029
3030 if (arm_feature(env, ARM_FEATURE_V7MP)) {
3031 mpidr |= (1U << 31);
3032 /*
3033 * Cores which are uniprocessor (non-coherent)
3034 * but still implement the MP extensions set
3035 * bit 30. (For instance, Cortex-R5).
3036 */
3037 if (cpu->mp_is_up) {
3038 mpidr |= (1u << 30);
3039 }
3040 }
3041 return mpidr;
3042 }
3043
mpidr_read(CPUARMState * env,const ARMCPRegInfo * ri)3044 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3045 {
3046 unsigned int cur_el = arm_current_el(env);
3047
3048 if (arm_is_el2_enabled(env) && cur_el == 1) {
3049 return env->cp15.vmpidr_el2;
3050 }
3051 return mpidr_read_val(env);
3052 }
3053
3054 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3055 /* NOP AMAIR0/1 */
3056 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3057 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3058 .access = PL1_RW, .accessfn = access_tvm_trvm,
3059 .fgt = FGT_AMAIR_EL1,
3060 .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
3061 .type = ARM_CP_CONST, .resetvalue = 0 },
3062 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3063 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3064 .access = PL1_RW, .accessfn = access_tvm_trvm,
3065 .type = ARM_CP_CONST, .resetvalue = 0 },
3066 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3067 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3068 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3069 offsetof(CPUARMState, cp15.par_ns)} },
3070 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3071 .access = PL1_RW, .accessfn = access_tvm_trvm,
3072 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3073 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3074 offsetof(CPUARMState, cp15.ttbr0_ns) },
3075 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3076 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3077 .access = PL1_RW, .accessfn = access_tvm_trvm,
3078 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3079 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3080 offsetof(CPUARMState, cp15.ttbr1_ns) },
3081 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3082 };
3083
aa64_fpcr_read(CPUARMState * env,const ARMCPRegInfo * ri)3084 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3085 {
3086 return vfp_get_fpcr(env);
3087 }
3088
aa64_fpcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3089 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3090 uint64_t value)
3091 {
3092 vfp_set_fpcr(env, value);
3093 }
3094
aa64_fpsr_read(CPUARMState * env,const ARMCPRegInfo * ri)3095 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3096 {
3097 return vfp_get_fpsr(env);
3098 }
3099
aa64_fpsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3100 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3101 uint64_t value)
3102 {
3103 vfp_set_fpsr(env, value);
3104 }
3105
aa64_daif_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3106 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3107 bool isread)
3108 {
3109 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
3110 return CP_ACCESS_TRAP_EL1;
3111 }
3112 return CP_ACCESS_OK;
3113 }
3114
aa64_daif_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3115 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3116 uint64_t value)
3117 {
3118 env->daif = value & PSTATE_DAIF;
3119 }
3120
aa64_pan_read(CPUARMState * env,const ARMCPRegInfo * ri)3121 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
3122 {
3123 return env->pstate & PSTATE_PAN;
3124 }
3125
aa64_pan_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3126 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
3127 uint64_t value)
3128 {
3129 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
3130 }
3131
3132 static const ARMCPRegInfo pan_reginfo = {
3133 .name = "PAN", .state = ARM_CP_STATE_AA64,
3134 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
3135 .type = ARM_CP_NO_RAW, .access = PL1_RW,
3136 .readfn = aa64_pan_read, .writefn = aa64_pan_write
3137 };
3138
aa64_uao_read(CPUARMState * env,const ARMCPRegInfo * ri)3139 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
3140 {
3141 return env->pstate & PSTATE_UAO;
3142 }
3143
aa64_uao_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3144 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
3145 uint64_t value)
3146 {
3147 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
3148 }
3149
3150 static const ARMCPRegInfo uao_reginfo = {
3151 .name = "UAO", .state = ARM_CP_STATE_AA64,
3152 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
3153 .type = ARM_CP_NO_RAW, .access = PL1_RW,
3154 .readfn = aa64_uao_read, .writefn = aa64_uao_write
3155 };
3156
aa64_dit_read(CPUARMState * env,const ARMCPRegInfo * ri)3157 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
3158 {
3159 return env->pstate & PSTATE_DIT;
3160 }
3161
aa64_dit_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3162 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
3163 uint64_t value)
3164 {
3165 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
3166 }
3167
3168 static const ARMCPRegInfo dit_reginfo = {
3169 .name = "DIT", .state = ARM_CP_STATE_AA64,
3170 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
3171 .type = ARM_CP_NO_RAW, .access = PL0_RW,
3172 .readfn = aa64_dit_read, .writefn = aa64_dit_write
3173 };
3174
aa64_ssbs_read(CPUARMState * env,const ARMCPRegInfo * ri)3175 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
3176 {
3177 return env->pstate & PSTATE_SSBS;
3178 }
3179
aa64_ssbs_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3180 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
3181 uint64_t value)
3182 {
3183 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
3184 }
3185
3186 static const ARMCPRegInfo ssbs_reginfo = {
3187 .name = "SSBS", .state = ARM_CP_STATE_AA64,
3188 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
3189 .type = ARM_CP_NO_RAW, .access = PL0_RW,
3190 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
3191 };
3192
aa64_cacheop_poc_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3193 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
3194 const ARMCPRegInfo *ri,
3195 bool isread)
3196 {
3197 /* Cache invalidate/clean to Point of Coherency or Persistence... */
3198 switch (arm_current_el(env)) {
3199 case 0:
3200 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
3201 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3202 return CP_ACCESS_TRAP_EL1;
3203 }
3204 /* fall through */
3205 case 1:
3206 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
3207 if (arm_hcr_el2_eff(env) & HCR_TPCP) {
3208 return CP_ACCESS_TRAP_EL2;
3209 }
3210 break;
3211 }
3212 return CP_ACCESS_OK;
3213 }
3214
do_cacheop_pou_access(CPUARMState * env,uint64_t hcrflags)3215 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
3216 {
3217 /* Cache invalidate/clean to Point of Unification... */
3218 switch (arm_current_el(env)) {
3219 case 0:
3220 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
3221 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3222 return CP_ACCESS_TRAP_EL1;
3223 }
3224 /* fall through */
3225 case 1:
3226 /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */
3227 if (arm_hcr_el2_eff(env) & hcrflags) {
3228 return CP_ACCESS_TRAP_EL2;
3229 }
3230 break;
3231 }
3232 return CP_ACCESS_OK;
3233 }
3234
access_ticab(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3235 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
3236 bool isread)
3237 {
3238 return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
3239 }
3240
access_tocu(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3241 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
3242 bool isread)
3243 {
3244 return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
3245 }
3246
aa64_zva_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3247 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3248 bool isread)
3249 {
3250 int cur_el = arm_current_el(env);
3251
3252 if (cur_el < 2) {
3253 uint64_t hcr = arm_hcr_el2_eff(env);
3254
3255 if (cur_el == 0) {
3256 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
3257 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
3258 return CP_ACCESS_TRAP_EL2;
3259 }
3260 } else {
3261 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3262 return CP_ACCESS_TRAP_EL1;
3263 }
3264 if (hcr & HCR_TDZ) {
3265 return CP_ACCESS_TRAP_EL2;
3266 }
3267 }
3268 } else if (hcr & HCR_TDZ) {
3269 return CP_ACCESS_TRAP_EL2;
3270 }
3271 }
3272 return CP_ACCESS_OK;
3273 }
3274
aa64_dczid_read(CPUARMState * env,const ARMCPRegInfo * ri)3275 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3276 {
3277 ARMCPU *cpu = env_archcpu(env);
3278 int dzp_bit = 1 << 4;
3279
3280 /* DZP indicates whether DC ZVA access is allowed */
3281 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3282 dzp_bit = 0;
3283 }
3284 return cpu->dcz_blocksize | dzp_bit;
3285 }
3286
sp_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3287 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3288 bool isread)
3289 {
3290 if (!(env->pstate & PSTATE_SP)) {
3291 /*
3292 * Access to SP_EL0 is undefined if it's being used as
3293 * the stack pointer.
3294 */
3295 return CP_ACCESS_UNDEFINED;
3296 }
3297 return CP_ACCESS_OK;
3298 }
3299
spsel_read(CPUARMState * env,const ARMCPRegInfo * ri)3300 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3301 {
3302 return env->pstate & PSTATE_SP;
3303 }
3304
spsel_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)3305 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3306 {
3307 update_spsel(env, val);
3308 }
3309
sctlr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3310 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3311 uint64_t value)
3312 {
3313 ARMCPU *cpu = env_archcpu(env);
3314
3315 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3316 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3317 value &= ~SCTLR_M;
3318 }
3319
3320 /* ??? Lots of these bits are not implemented. */
3321
3322 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
3323 if (ri->opc1 == 6) { /* SCTLR_EL3 */
3324 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
3325 } else {
3326 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
3327 SCTLR_ATA0 | SCTLR_ATA);
3328 }
3329 }
3330
3331 if (raw_read(env, ri) == value) {
3332 /*
3333 * Skip the TLB flush if nothing actually changed; Linux likes
3334 * to do a lot of pointless SCTLR writes.
3335 */
3336 return;
3337 }
3338
3339 raw_write(env, ri, value);
3340
3341 /* This may enable/disable the MMU, so do a TLB flush. */
3342 tlb_flush(CPU(cpu));
3343
3344 if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
3345 /*
3346 * Normally we would always end the TB on an SCTLR write; see the
3347 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
3348 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
3349 * of hflags from the translator, so do it here.
3350 */
3351 arm_rebuild_hflags(env);
3352 }
3353 }
3354
mdcr_el3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3355 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3356 uint64_t value)
3357 {
3358 /*
3359 * Some MDCR_EL3 bits affect whether PMU counters are running:
3360 * if we are trying to change any of those then we must
3361 * bracket this update with PMU start/finish calls.
3362 */
3363 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
3364
3365 if (pmu_op) {
3366 pmu_op_start(env);
3367 }
3368 env->cp15.mdcr_el3 = value;
3369 if (pmu_op) {
3370 pmu_op_finish(env);
3371 }
3372 }
3373
sdcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3374 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3375 uint64_t value)
3376 {
3377 /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
3378 mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
3379 }
3380
mdcr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3381 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3382 uint64_t value)
3383 {
3384 /*
3385 * Some MDCR_EL2 bits affect whether PMU counters are running:
3386 * if we are trying to change any of those then we must
3387 * bracket this update with PMU start/finish calls.
3388 */
3389 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
3390
3391 if (pmu_op) {
3392 pmu_op_start(env);
3393 }
3394 env->cp15.mdcr_el2 = value;
3395 if (pmu_op) {
3396 pmu_op_finish(env);
3397 }
3398 }
3399
access_nv1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3400 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
3401 bool isread)
3402 {
3403 if (arm_current_el(env) == 1) {
3404 uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
3405
3406 if (hcr_nv == (HCR_NV | HCR_NV1)) {
3407 return CP_ACCESS_TRAP_EL2;
3408 }
3409 }
3410 return CP_ACCESS_OK;
3411 }
3412
3413 #ifdef CONFIG_USER_ONLY
3414 /*
3415 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
3416 * code to get around W^X restrictions, where one region is writable and the
3417 * other is executable.
3418 *
3419 * Since the executable region is never written to we cannot detect code
3420 * changes when running in user mode, and rely on the emulated JIT telling us
3421 * that the code has changed by executing this instruction.
3422 */
ic_ivau_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3423 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
3424 uint64_t value)
3425 {
3426 uint64_t icache_line_mask, start_address, end_address;
3427 const ARMCPU *cpu;
3428
3429 cpu = env_archcpu(env);
3430
3431 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
3432 start_address = value & ~icache_line_mask;
3433 end_address = value | icache_line_mask;
3434
3435 mmap_lock();
3436
3437 tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
3438
3439 mmap_unlock();
3440 }
3441 #endif
3442
3443 static const ARMCPRegInfo v8_cp_reginfo[] = {
3444 /*
3445 * Minimal set of EL0-visible registers. This will need to be expanded
3446 * significantly for system emulation of AArch64 CPUs.
3447 */
3448 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3449 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3450 .access = PL0_RW, .type = ARM_CP_NZCV },
3451 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3452 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3453 .type = ARM_CP_NO_RAW,
3454 .access = PL0_RW, .accessfn = aa64_daif_access,
3455 .fieldoffset = offsetof(CPUARMState, daif),
3456 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3457 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3458 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3459 .access = PL0_RW, .type = ARM_CP_FPU,
3460 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3461 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3462 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3463 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3464 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3465 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3466 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3467 .access = PL0_R, .type = ARM_CP_NO_RAW,
3468 .fgt = FGT_DCZID_EL0,
3469 .readfn = aa64_dczid_read },
3470 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3471 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3472 .access = PL0_W, .type = ARM_CP_DC_ZVA,
3473 #ifndef CONFIG_USER_ONLY
3474 /* Avoid overhead of an access check that always passes in user-mode */
3475 .accessfn = aa64_zva_access,
3476 .fgt = FGT_DCZVA,
3477 #endif
3478 },
3479 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3480 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3481 .access = PL1_R, .type = ARM_CP_CURRENTEL },
3482 /*
3483 * Instruction cache ops. All of these except `IC IVAU` NOP because we
3484 * don't emulate caches.
3485 */
3486 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3487 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3488 .access = PL1_W, .type = ARM_CP_NOP,
3489 .fgt = FGT_ICIALLUIS,
3490 .accessfn = access_ticab },
3491 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3492 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3493 .access = PL1_W, .type = ARM_CP_NOP,
3494 .fgt = FGT_ICIALLU,
3495 .accessfn = access_tocu },
3496 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3497 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3498 .access = PL0_W,
3499 .fgt = FGT_ICIVAU,
3500 .accessfn = access_tocu,
3501 #ifdef CONFIG_USER_ONLY
3502 .type = ARM_CP_NO_RAW,
3503 .writefn = ic_ivau_write
3504 #else
3505 .type = ARM_CP_NOP
3506 #endif
3507 },
3508 /* Cache ops: all NOPs since we don't emulate caches */
3509 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3510 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3511 .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
3512 .fgt = FGT_DCIVAC,
3513 .type = ARM_CP_NOP },
3514 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3515 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3516 .fgt = FGT_DCISW,
3517 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3518 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3519 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3520 .access = PL0_W, .type = ARM_CP_NOP,
3521 .fgt = FGT_DCCVAC,
3522 .accessfn = aa64_cacheop_poc_access },
3523 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3524 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3525 .fgt = FGT_DCCSW,
3526 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3527 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3528 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3529 .access = PL0_W, .type = ARM_CP_NOP,
3530 .fgt = FGT_DCCVAU,
3531 .accessfn = access_tocu },
3532 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3533 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3534 .access = PL0_W, .type = ARM_CP_NOP,
3535 .fgt = FGT_DCCIVAC,
3536 .accessfn = aa64_cacheop_poc_access },
3537 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3538 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3539 .fgt = FGT_DCCISW,
3540 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3541 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3542 .type = ARM_CP_ALIAS,
3543 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3544 .access = PL1_RW, .resetvalue = 0,
3545 .fgt = FGT_PAR_EL1,
3546 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3547 .writefn = par_write },
3548 /* 32 bit cache operations */
3549 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3550 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
3551 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3552 .type = ARM_CP_NOP, .access = PL1_W },
3553 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3554 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3555 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3556 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3557 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3558 .type = ARM_CP_NOP, .access = PL1_W },
3559 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3560 .type = ARM_CP_NOP, .access = PL1_W },
3561 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3562 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3563 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3564 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3565 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3566 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3567 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3568 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3569 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3570 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3571 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3572 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3573 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3574 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3575 /* MMU Domain access control / MPU write buffer control */
3576 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3577 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3578 .writefn = dacr_write, .raw_writefn = raw_write,
3579 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3580 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3581 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3582 .type = ARM_CP_ALIAS,
3583 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3584 .access = PL1_RW, .accessfn = access_nv1,
3585 .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
3586 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3587 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3588 .type = ARM_CP_ALIAS,
3589 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3590 .access = PL1_RW, .accessfn = access_nv1,
3591 .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
3592 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3593 /*
3594 * We rely on the access checks not allowing the guest to write to the
3595 * state field when SPSel indicates that it's being used as the stack
3596 * pointer.
3597 */
3598 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3599 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3600 .access = PL1_RW, .accessfn = sp_el0_access,
3601 .type = ARM_CP_ALIAS,
3602 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3603 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3604 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3605 .nv2_redirect_offset = 0x240,
3606 .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
3607 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3608 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3609 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3610 .type = ARM_CP_NO_RAW,
3611 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3612 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3613 .type = ARM_CP_ALIAS,
3614 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3615 .access = PL2_RW,
3616 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3617 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3618 .type = ARM_CP_ALIAS,
3619 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3620 .access = PL2_RW,
3621 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3622 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3623 .type = ARM_CP_ALIAS,
3624 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3625 .access = PL2_RW,
3626 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3627 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3628 .type = ARM_CP_ALIAS,
3629 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3630 .access = PL2_RW,
3631 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3632 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3633 .type = ARM_CP_IO,
3634 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3635 .resetvalue = 0,
3636 .access = PL3_RW,
3637 .writefn = mdcr_el3_write,
3638 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3639 { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
3640 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3641 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3642 .writefn = sdcr_write,
3643 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3644 };
3645
3646 /* These are present only when EL1 supports AArch32 */
3647 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
3648 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3649 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3650 .access = PL2_RW,
3651 .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
3652 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
3653 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3654 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3655 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3656 .writefn = dacr_write, .raw_writefn = raw_write,
3657 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3658 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3659 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3660 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3661 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3662 };
3663
do_hcr_write(CPUARMState * env,uint64_t value,uint64_t valid_mask)3664 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
3665 {
3666 ARMCPU *cpu = env_archcpu(env);
3667
3668 if (arm_feature(env, ARM_FEATURE_V8)) {
3669 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
3670 } else {
3671 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
3672 }
3673
3674 if (arm_feature(env, ARM_FEATURE_EL3)) {
3675 valid_mask &= ~HCR_HCD;
3676 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3677 /*
3678 * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3679 * However, if we're using the SMC PSCI conduit then QEMU is
3680 * effectively acting like EL3 firmware and so the guest at
3681 * EL2 should retain the ability to prevent EL1 from being
3682 * able to make SMC calls into the ersatz firmware, so in
3683 * that case HCR.TSC should be read/write.
3684 */
3685 valid_mask &= ~HCR_TSC;
3686 }
3687
3688 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
3689 if (cpu_isar_feature(aa64_vh, cpu)) {
3690 valid_mask |= HCR_E2H;
3691 }
3692 if (cpu_isar_feature(aa64_ras, cpu)) {
3693 valid_mask |= HCR_TERR | HCR_TEA;
3694 }
3695 if (cpu_isar_feature(aa64_lor, cpu)) {
3696 valid_mask |= HCR_TLOR;
3697 }
3698 if (cpu_isar_feature(aa64_pauth, cpu)) {
3699 valid_mask |= HCR_API | HCR_APK;
3700 }
3701 if (cpu_isar_feature(aa64_mte, cpu)) {
3702 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
3703 }
3704 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
3705 valid_mask |= HCR_ENSCXT;
3706 }
3707 if (cpu_isar_feature(aa64_fwb, cpu)) {
3708 valid_mask |= HCR_FWB;
3709 }
3710 if (cpu_isar_feature(aa64_rme, cpu)) {
3711 valid_mask |= HCR_GPF;
3712 }
3713 if (cpu_isar_feature(aa64_nv, cpu)) {
3714 valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
3715 }
3716 if (cpu_isar_feature(aa64_nv2, cpu)) {
3717 valid_mask |= HCR_NV2;
3718 }
3719 }
3720
3721 if (cpu_isar_feature(any_evt, cpu)) {
3722 valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
3723 } else if (cpu_isar_feature(any_half_evt, cpu)) {
3724 valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
3725 }
3726
3727 /* Clear RES0 bits. */
3728 value &= valid_mask;
3729
3730 /* RW is RAO/WI if EL1 is AArch64 only */
3731 if (arm_feature(env, ARM_FEATURE_AARCH64) &&
3732 !cpu_isar_feature(aa64_aa32_el1, cpu)) {
3733 value |= HCR_RW;
3734 }
3735
3736 /*
3737 * These bits change the MMU setup:
3738 * HCR_VM enables stage 2 translation
3739 * HCR_PTW forbids certain page-table setups
3740 * HCR_DC disables stage1 and enables stage2 translation
3741 * HCR_DCT enables tagging on (disabled) stage1 translation
3742 * HCR_FWB changes the interpretation of stage2 descriptor bits
3743 * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
3744 */
3745 if ((env->cp15.hcr_el2 ^ value) &
3746 (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
3747 tlb_flush(CPU(cpu));
3748 }
3749 env->cp15.hcr_el2 = value;
3750
3751 /*
3752 * Updates to VI and VF require us to update the status of
3753 * virtual interrupts, which are the logical OR of these bits
3754 * and the state of the input lines from the GIC. (This requires
3755 * that we have the BQL, which is done by marking the
3756 * reginfo structs as ARM_CP_IO.)
3757 * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
3758 * VFNMI, it is never possible for it to be taken immediately
3759 * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
3760 * at EL0 or EL1, and HCR can only be written at EL2.
3761 */
3762 g_assert(bql_locked());
3763 arm_cpu_update_virq(cpu);
3764 arm_cpu_update_vfiq(cpu);
3765 arm_cpu_update_vserr(cpu);
3766 if (cpu_isar_feature(aa64_nmi, cpu)) {
3767 arm_cpu_update_vinmi(cpu);
3768 arm_cpu_update_vfnmi(cpu);
3769 }
3770 }
3771
hcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3772 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3773 {
3774 do_hcr_write(env, value, 0);
3775 }
3776
hcr_writehigh(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3777 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
3778 uint64_t value)
3779 {
3780 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
3781 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
3782 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
3783 }
3784
hcr_writelow(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3785 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
3786 uint64_t value)
3787 {
3788 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
3789 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
3790 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
3791 }
3792
hcr_reset(CPUARMState * env,const ARMCPRegInfo * ri)3793 static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3794 {
3795 /* hcr_write will set the RES1 bits on an AArch64-only CPU */
3796 hcr_write(env, ri, 0);
3797 }
3798
3799 /*
3800 * Return the effective value of HCR_EL2, at the given security state.
3801 * Bits that are not included here:
3802 * RW (read from SCR_EL3.RW as needed)
3803 */
arm_hcr_el2_eff_secstate(CPUARMState * env,ARMSecuritySpace space)3804 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
3805 {
3806 uint64_t ret = env->cp15.hcr_el2;
3807
3808 assert(space != ARMSS_Root);
3809
3810 if (!arm_is_el2_enabled_secstate(env, space)) {
3811 /*
3812 * "This register has no effect if EL2 is not enabled in the
3813 * current Security state". This is ARMv8.4-SecEL2 speak for
3814 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
3815 *
3816 * Prior to that, the language was "In an implementation that
3817 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
3818 * as if this field is 0 for all purposes other than a direct
3819 * read or write access of HCR_EL2". With lots of enumeration
3820 * on a per-field basis. In current QEMU, this is condition
3821 * is arm_is_secure_below_el3.
3822 *
3823 * Since the v8.4 language applies to the entire register, and
3824 * appears to be backward compatible, use that.
3825 */
3826 return 0;
3827 }
3828
3829 /*
3830 * For a cpu that supports both aarch64 and aarch32, we can set bits
3831 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
3832 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
3833 */
3834 if (!arm_el_is_aa64(env, 2)) {
3835 uint64_t aa32_valid;
3836
3837 /*
3838 * These bits are up-to-date as of ARMv8.6.
3839 * For HCR, it's easiest to list just the 2 bits that are invalid.
3840 * For HCR2, list those that are valid.
3841 */
3842 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
3843 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
3844 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
3845 ret &= aa32_valid;
3846 }
3847
3848 if (ret & HCR_TGE) {
3849 /* These bits are up-to-date as of ARMv8.6. */
3850 if (ret & HCR_E2H) {
3851 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
3852 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
3853 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
3854 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
3855 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
3856 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
3857 } else {
3858 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
3859 }
3860 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
3861 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
3862 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
3863 HCR_TLOR);
3864 }
3865
3866 return ret;
3867 }
3868
arm_hcr_el2_eff(CPUARMState * env)3869 uint64_t arm_hcr_el2_eff(CPUARMState *env)
3870 {
3871 if (arm_feature(env, ARM_FEATURE_M)) {
3872 return 0;
3873 }
3874 return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
3875 }
3876
3877 /*
3878 * Corresponds to ARM pseudocode function ELIsInHost().
3879 */
el_is_in_host(CPUARMState * env,int el)3880 bool el_is_in_host(CPUARMState *env, int el)
3881 {
3882 uint64_t mask;
3883
3884 /*
3885 * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
3886 * Perform the simplest bit tests first, and validate EL2 afterward.
3887 */
3888 if (el & 1) {
3889 return false; /* EL1 or EL3 */
3890 }
3891
3892 /*
3893 * Note that hcr_write() checks isar_feature_aa64_vh(),
3894 * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
3895 */
3896 mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
3897 if ((env->cp15.hcr_el2 & mask) != mask) {
3898 return false;
3899 }
3900
3901 /* TGE and/or E2H set: double check those bits are currently legal. */
3902 return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
3903 }
3904
hcrx_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3905 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
3906 uint64_t value)
3907 {
3908 ARMCPU *cpu = env_archcpu(env);
3909 uint64_t valid_mask = 0;
3910
3911 /* FEAT_MOPS adds MSCEn and MCE2 */
3912 if (cpu_isar_feature(aa64_mops, cpu)) {
3913 valid_mask |= HCRX_MSCEN | HCRX_MCE2;
3914 }
3915
3916 /* FEAT_NMI adds TALLINT, VINMI and VFNMI */
3917 if (cpu_isar_feature(aa64_nmi, cpu)) {
3918 valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
3919 }
3920 /* FEAT_CMOW adds CMOW */
3921 if (cpu_isar_feature(aa64_cmow, cpu)) {
3922 valid_mask |= HCRX_CMOW;
3923 }
3924 /* FEAT_XS adds FGTnXS, FnXS */
3925 if (cpu_isar_feature(aa64_xs, cpu)) {
3926 valid_mask |= HCRX_FGTNXS | HCRX_FNXS;
3927 }
3928
3929 /* Clear RES0 bits. */
3930 env->cp15.hcrx_el2 = value & valid_mask;
3931
3932 /*
3933 * Updates to VINMI and VFNMI require us to update the status of
3934 * virtual NMI, which are the logical OR of these bits
3935 * and the state of the input lines from the GIC. (This requires
3936 * that we have the BQL, which is done by marking the
3937 * reginfo structs as ARM_CP_IO.)
3938 * Note that if a write to HCRX pends a VINMI or VFNMI it is never
3939 * possible for it to be taken immediately, because VINMI and
3940 * VFNMI are masked unless running at EL0 or EL1, and HCRX
3941 * can only be written at EL2.
3942 */
3943 if (cpu_isar_feature(aa64_nmi, cpu)) {
3944 g_assert(bql_locked());
3945 arm_cpu_update_vinmi(cpu);
3946 arm_cpu_update_vfnmi(cpu);
3947 }
3948 }
3949
access_hxen(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3950 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
3951 bool isread)
3952 {
3953 if (arm_current_el(env) == 2
3954 && arm_feature(env, ARM_FEATURE_EL3)
3955 && !(env->cp15.scr_el3 & SCR_HXEN)) {
3956 return CP_ACCESS_TRAP_EL3;
3957 }
3958 return CP_ACCESS_OK;
3959 }
3960
3961 static const ARMCPRegInfo hcrx_el2_reginfo = {
3962 .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
3963 .type = ARM_CP_IO,
3964 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
3965 .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
3966 .nv2_redirect_offset = 0xa0,
3967 .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
3968 };
3969
3970 /* Return the effective value of HCRX_EL2. */
arm_hcrx_el2_eff(CPUARMState * env)3971 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
3972 {
3973 /*
3974 * The bits in this register behave as 0 for all purposes other than
3975 * direct reads of the register if SCR_EL3.HXEn is 0.
3976 * If EL2 is not enabled in the current security state, then the
3977 * bit may behave as if 0, or as if 1, depending on the bit.
3978 * For the moment, we treat the EL2-disabled case as taking
3979 * priority over the HXEn-disabled case. This is true for the only
3980 * bit for a feature which we implement where the answer is different
3981 * for the two cases (MSCEn for FEAT_MOPS).
3982 * This may need to be revisited for future bits.
3983 */
3984 if (!arm_is_el2_enabled(env)) {
3985 uint64_t hcrx = 0;
3986 if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
3987 /* MSCEn behaves as 1 if EL2 is not enabled */
3988 hcrx |= HCRX_MSCEN;
3989 }
3990 return hcrx;
3991 }
3992 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
3993 return 0;
3994 }
3995 return env->cp15.hcrx_el2;
3996 }
3997
cptr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3998 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3999 uint64_t value)
4000 {
4001 /*
4002 * For A-profile AArch32 EL3, if NSACR.CP10
4003 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4004 */
4005 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4006 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4007 uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4008 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
4009 }
4010 env->cp15.cptr_el[2] = value;
4011 }
4012
cptr_el2_read(CPUARMState * env,const ARMCPRegInfo * ri)4013 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4014 {
4015 /*
4016 * For A-profile AArch32 EL3, if NSACR.CP10
4017 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4018 */
4019 uint64_t value = env->cp15.cptr_el[2];
4020
4021 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4022 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4023 value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4024 }
4025 return value;
4026 }
4027
4028 static const ARMCPRegInfo el2_cp_reginfo[] = {
4029 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4030 .type = ARM_CP_IO,
4031 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4032 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4033 .nv2_redirect_offset = 0x78,
4034 .resetfn = hcr_reset,
4035 .writefn = hcr_write, .raw_writefn = raw_write },
4036 { .name = "HCR", .state = ARM_CP_STATE_AA32,
4037 .type = ARM_CP_ALIAS | ARM_CP_IO,
4038 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4039 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4040 .writefn = hcr_writelow },
4041 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4042 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4043 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4044 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4045 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4046 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4047 .access = PL2_RW,
4048 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4049 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4050 .type = ARM_CP_NV2_REDIRECT,
4051 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4052 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4053 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4054 .type = ARM_CP_NV2_REDIRECT,
4055 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4056 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4057 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4058 .type = ARM_CP_ALIAS,
4059 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4060 .access = PL2_RW,
4061 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4062 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4063 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4064 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4065 .access = PL2_RW,
4066 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4067 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4068 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4069 .access = PL2_RW, .writefn = vbar_write,
4070 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4071 .resetvalue = 0 },
4072 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4073 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4074 .access = PL3_RW, .type = ARM_CP_ALIAS,
4075 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4076 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4077 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4078 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4079 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4080 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
4081 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4082 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4083 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4084 .resetvalue = 0 },
4085 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4086 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4087 .access = PL2_RW, .type = ARM_CP_ALIAS,
4088 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4089 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4090 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4091 .access = PL2_RW, .type = ARM_CP_CONST,
4092 .resetvalue = 0 },
4093 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4094 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4095 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4096 .access = PL2_RW, .type = ARM_CP_CONST,
4097 .resetvalue = 0 },
4098 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4099 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4100 .access = PL2_RW, .type = ARM_CP_CONST,
4101 .resetvalue = 0 },
4102 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4103 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4104 .access = PL2_RW, .type = ARM_CP_CONST,
4105 .resetvalue = 0 },
4106 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4107 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4108 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
4109 .raw_writefn = raw_write,
4110 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4111 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4112 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4113 .type = ARM_CP_ALIAS,
4114 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4115 .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
4116 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4117 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4118 .access = PL2_RW,
4119 .nv2_redirect_offset = 0x40,
4120 /* no .writefn needed as this can't cause an ASID change */
4121 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4122 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4123 .cp = 15, .opc1 = 6, .crm = 2,
4124 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4125 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4126 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4127 .writefn = vttbr_write, .raw_writefn = raw_write },
4128 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4129 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4130 .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
4131 .nv2_redirect_offset = 0x20,
4132 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4133 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4134 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4135 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4136 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4137 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4138 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4139 .access = PL2_RW, .resetvalue = 0,
4140 .nv2_redirect_offset = 0x90,
4141 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4142 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4143 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4144 .access = PL2_RW, .resetvalue = 0,
4145 .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
4146 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4147 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4148 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4149 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4150 #ifndef CONFIG_USER_ONLY
4151 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4152 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4153 /*
4154 * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4155 * reset values as IMPDEF. We choose to reset to 3 to comply with
4156 * both ARMv7 and ARMv8.
4157 */
4158 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
4159 .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
4160 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4161 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4162 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4163 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4164 .writefn = gt_cntvoff_write,
4165 .nv2_redirect_offset = 0x60,
4166 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4167 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4168 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4169 .writefn = gt_cntvoff_write,
4170 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4171 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4172 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4173 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4174 .type = ARM_CP_IO, .access = PL2_RW,
4175 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4176 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4177 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4178 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4179 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4180 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4181 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4182 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4183 .resetfn = gt_hyp_timer_reset,
4184 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4185 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4186 .type = ARM_CP_IO,
4187 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4188 .access = PL2_RW,
4189 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4190 .resetvalue = 0,
4191 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4192 #endif
4193 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4194 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4195 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4196 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4197 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4198 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4199 .access = PL2_RW,
4200 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4201 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4202 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4203 .access = PL2_RW,
4204 .nv2_redirect_offset = 0x80,
4205 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4206 };
4207
4208 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4209 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4210 .type = ARM_CP_ALIAS | ARM_CP_IO,
4211 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4212 .access = PL2_RW,
4213 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4214 .writefn = hcr_writehigh },
4215 };
4216
sel2_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4217 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
4218 bool isread)
4219 {
4220 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
4221 return CP_ACCESS_OK;
4222 }
4223 return CP_ACCESS_UNDEFINED;
4224 }
4225
4226 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
4227 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
4228 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
4229 .access = PL2_RW, .accessfn = sel2_access,
4230 .nv2_redirect_offset = 0x30,
4231 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
4232 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
4233 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
4234 .access = PL2_RW, .accessfn = sel2_access,
4235 .nv2_redirect_offset = 0x48,
4236 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
4237 #ifndef CONFIG_USER_ONLY
4238 /* Secure EL2 Physical Timer */
4239 { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4240 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0,
4241 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4242 .accessfn = gt_sel2timer_access,
4243 .readfn = gt_sec_pel2_tval_read,
4244 .writefn = gt_sec_pel2_tval_write,
4245 .resetfn = gt_sec_pel2_timer_reset,
4246 },
4247 { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4248 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1,
4249 .type = ARM_CP_IO, .access = PL2_RW,
4250 .accessfn = gt_sel2timer_access,
4251 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl),
4252 .resetvalue = 0,
4253 .writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write,
4254 },
4255 { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4256 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2,
4257 .type = ARM_CP_IO, .access = PL2_RW,
4258 .accessfn = gt_sel2timer_access,
4259 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval),
4260 .writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write,
4261 },
4262 /* Secure EL2 Virtual Timer */
4263 { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4264 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0,
4265 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4266 .accessfn = gt_sel2timer_access,
4267 .readfn = gt_sec_vel2_tval_read,
4268 .writefn = gt_sec_vel2_tval_write,
4269 .resetfn = gt_sec_vel2_timer_reset,
4270 },
4271 { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4272 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1,
4273 .type = ARM_CP_IO, .access = PL2_RW,
4274 .accessfn = gt_sel2timer_access,
4275 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl),
4276 .resetvalue = 0,
4277 .writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write,
4278 },
4279 { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4280 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2,
4281 .type = ARM_CP_IO, .access = PL2_RW,
4282 .accessfn = gt_sel2timer_access,
4283 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval),
4284 .writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write,
4285 },
4286 #endif
4287 };
4288
nsacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4289 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4290 bool isread)
4291 {
4292 /*
4293 * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4294 * At Secure EL1 it traps to EL3 or EL2.
4295 */
4296 if (arm_current_el(env) == 3) {
4297 return CP_ACCESS_OK;
4298 }
4299 if (arm_is_secure_below_el3(env)) {
4300 if (env->cp15.scr_el3 & SCR_EEL2) {
4301 return CP_ACCESS_TRAP_EL2;
4302 }
4303 return CP_ACCESS_TRAP_EL3;
4304 }
4305 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4306 if (isread) {
4307 return CP_ACCESS_OK;
4308 }
4309 return CP_ACCESS_UNDEFINED;
4310 }
4311
4312 static const ARMCPRegInfo el3_cp_reginfo[] = {
4313 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4314 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4315 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4316 .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
4317 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
4318 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4319 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4320 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4321 .writefn = scr_write, .raw_writefn = raw_write },
4322 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4323 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4324 .access = PL3_RW, .resetvalue = 0,
4325 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4326 { .name = "SDER",
4327 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4328 .access = PL3_RW, .resetvalue = 0,
4329 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4330 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4331 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4332 .writefn = vbar_write, .resetvalue = 0,
4333 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4334 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4335 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4336 .access = PL3_RW, .resetvalue = 0,
4337 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4338 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4339 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4340 .access = PL3_RW,
4341 /* no .writefn needed as this can't cause an ASID change */
4342 .resetvalue = 0,
4343 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4344 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4345 .type = ARM_CP_ALIAS,
4346 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4347 .access = PL3_RW,
4348 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4349 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4350 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4351 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4352 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4353 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4354 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4355 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4356 .type = ARM_CP_ALIAS,
4357 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4358 .access = PL3_RW,
4359 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4360 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4361 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4362 .access = PL3_RW, .writefn = vbar_write,
4363 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4364 .resetvalue = 0 },
4365 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4366 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4367 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4368 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4369 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4370 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4371 .access = PL3_RW, .resetvalue = 0,
4372 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4373 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4374 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4375 .access = PL3_RW, .type = ARM_CP_CONST,
4376 .resetvalue = 0 },
4377 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4378 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4379 .access = PL3_RW, .type = ARM_CP_CONST,
4380 .resetvalue = 0 },
4381 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4382 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4383 .access = PL3_RW, .type = ARM_CP_CONST,
4384 .resetvalue = 0 },
4385 };
4386
4387 #ifndef CONFIG_USER_ONLY
4388
e2h_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4389 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
4390 bool isread)
4391 {
4392 if (arm_current_el(env) == 1) {
4393 /* This must be a FEAT_NV access */
4394 return CP_ACCESS_OK;
4395 }
4396 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
4397 return CP_ACCESS_UNDEFINED;
4398 }
4399 return CP_ACCESS_OK;
4400 }
4401
access_el1nvpct(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4402 static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri,
4403 bool isread)
4404 {
4405 if (arm_current_el(env) == 1) {
4406 /* This must be a FEAT_NV access with NVx == 101 */
4407 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
4408 return CP_ACCESS_TRAP_EL2;
4409 }
4410 }
4411 return e2h_access(env, ri, isread);
4412 }
4413
access_el1nvvct(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4414 static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
4415 bool isread)
4416 {
4417 if (arm_current_el(env) == 1) {
4418 /* This must be a FEAT_NV access with NVx == 101 */
4419 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
4420 return CP_ACCESS_TRAP_EL2;
4421 }
4422 }
4423 return e2h_access(env, ri, isread);
4424 }
4425
4426 /* Test if system register redirection is to occur in the current state. */
redirect_for_e2h(CPUARMState * env)4427 static bool redirect_for_e2h(CPUARMState *env)
4428 {
4429 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
4430 }
4431
el2_e2h_read(CPUARMState * env,const ARMCPRegInfo * ri)4432 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
4433 {
4434 CPReadFn *readfn;
4435
4436 if (redirect_for_e2h(env)) {
4437 /* Switch to the saved EL2 version of the register. */
4438 ri = ri->opaque;
4439 readfn = ri->readfn;
4440 } else {
4441 readfn = ri->orig_readfn;
4442 }
4443 if (readfn == NULL) {
4444 readfn = raw_read;
4445 }
4446 return readfn(env, ri);
4447 }
4448
el2_e2h_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4449 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
4450 uint64_t value)
4451 {
4452 CPWriteFn *writefn;
4453
4454 if (redirect_for_e2h(env)) {
4455 /* Switch to the saved EL2 version of the register. */
4456 ri = ri->opaque;
4457 writefn = ri->writefn;
4458 } else {
4459 writefn = ri->orig_writefn;
4460 }
4461 if (writefn == NULL) {
4462 writefn = raw_write;
4463 }
4464 writefn(env, ri, value);
4465 }
4466
el2_e2h_e12_read(CPUARMState * env,const ARMCPRegInfo * ri)4467 static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
4468 {
4469 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
4470 return ri->orig_readfn(env, ri->opaque);
4471 }
4472
el2_e2h_e12_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4473 static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
4474 uint64_t value)
4475 {
4476 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
4477 return ri->orig_writefn(env, ri->opaque, value);
4478 }
4479
el2_e2h_e12_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4480 static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
4481 const ARMCPRegInfo *ri,
4482 bool isread)
4483 {
4484 if (arm_current_el(env) == 1) {
4485 /*
4486 * This must be a FEAT_NV access (will either trap or redirect
4487 * to memory). None of the registers with _EL12 aliases want to
4488 * apply their trap controls for this kind of access, so don't
4489 * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
4490 */
4491 return CP_ACCESS_OK;
4492 }
4493 /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
4494 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
4495 return CP_ACCESS_UNDEFINED;
4496 }
4497 if (ri->orig_accessfn) {
4498 return ri->orig_accessfn(env, ri->opaque, isread);
4499 }
4500 return CP_ACCESS_OK;
4501 }
4502
define_arm_vh_e2h_redirects_aliases(ARMCPU * cpu)4503 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
4504 {
4505 struct E2HAlias {
4506 uint32_t src_key, dst_key, new_key;
4507 const char *src_name, *dst_name, *new_name;
4508 bool (*feature)(const ARMISARegisters *id);
4509 };
4510
4511 #define K(op0, op1, crn, crm, op2) \
4512 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
4513
4514 static const struct E2HAlias aliases[] = {
4515 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
4516 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
4517 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
4518 "CPACR", "CPTR_EL2", "CPACR_EL12" },
4519 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
4520 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
4521 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
4522 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
4523 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
4524 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
4525 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
4526 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
4527 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
4528 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
4529 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
4530 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
4531 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
4532 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
4533 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
4534 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
4535 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
4536 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
4537 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
4538 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
4539 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
4540 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
4541 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
4542 "VBAR", "VBAR_EL2", "VBAR_EL12" },
4543 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
4544 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
4545 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
4546 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
4547
4548 /*
4549 * Note that redirection of ZCR is mentioned in the description
4550 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
4551 * not in the summary table.
4552 */
4553 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
4554 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
4555 { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
4556 "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
4557
4558 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
4559 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
4560
4561 { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
4562 "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
4563 isar_feature_aa64_scxtnum },
4564
4565 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
4566 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
4567 };
4568 #undef K
4569
4570 size_t i;
4571
4572 for (i = 0; i < ARRAY_SIZE(aliases); i++) {
4573 const struct E2HAlias *a = &aliases[i];
4574 ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
4575 bool ok;
4576
4577 if (a->feature && !a->feature(&cpu->isar)) {
4578 continue;
4579 }
4580
4581 src_reg = g_hash_table_lookup(cpu->cp_regs,
4582 (gpointer)(uintptr_t)a->src_key);
4583 dst_reg = g_hash_table_lookup(cpu->cp_regs,
4584 (gpointer)(uintptr_t)a->dst_key);
4585 g_assert(src_reg != NULL);
4586 g_assert(dst_reg != NULL);
4587
4588 /* Cross-compare names to detect typos in the keys. */
4589 g_assert(strcmp(src_reg->name, a->src_name) == 0);
4590 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
4591
4592 /* None of the core system registers use opaque; we will. */
4593 g_assert(src_reg->opaque == NULL);
4594
4595 /* Create alias before redirection so we dup the right data. */
4596 new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
4597
4598 new_reg->name = a->new_name;
4599 new_reg->type |= ARM_CP_ALIAS;
4600 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
4601 new_reg->access &= PL2_RW | PL3_RW;
4602 /* The new_reg op fields are as per new_key, not the target reg */
4603 new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
4604 >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
4605 new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
4606 >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
4607 new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
4608 >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
4609 new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
4610 >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
4611 new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
4612 >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
4613 new_reg->opaque = src_reg;
4614 new_reg->orig_readfn = src_reg->readfn ?: raw_read;
4615 new_reg->orig_writefn = src_reg->writefn ?: raw_write;
4616 new_reg->orig_accessfn = src_reg->accessfn;
4617 if (!new_reg->raw_readfn) {
4618 new_reg->raw_readfn = raw_read;
4619 }
4620 if (!new_reg->raw_writefn) {
4621 new_reg->raw_writefn = raw_write;
4622 }
4623 new_reg->readfn = el2_e2h_e12_read;
4624 new_reg->writefn = el2_e2h_e12_write;
4625 new_reg->accessfn = el2_e2h_e12_access;
4626
4627 /*
4628 * If the _EL1 register is redirected to memory by FEAT_NV2,
4629 * then it shares the offset with the _EL12 register,
4630 * and which one is redirected depends on HCR_EL2.NV1.
4631 */
4632 if (new_reg->nv2_redirect_offset) {
4633 assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
4634 new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
4635 new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
4636 }
4637
4638 ok = g_hash_table_insert(cpu->cp_regs,
4639 (gpointer)(uintptr_t)a->new_key, new_reg);
4640 g_assert(ok);
4641
4642 src_reg->opaque = dst_reg;
4643 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
4644 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
4645 if (!src_reg->raw_readfn) {
4646 src_reg->raw_readfn = raw_read;
4647 }
4648 if (!src_reg->raw_writefn) {
4649 src_reg->raw_writefn = raw_write;
4650 }
4651 src_reg->readfn = el2_e2h_read;
4652 src_reg->writefn = el2_e2h_write;
4653 }
4654 }
4655 #endif
4656
ctr_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4657 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4658 bool isread)
4659 {
4660 int cur_el = arm_current_el(env);
4661
4662 if (cur_el < 2) {
4663 uint64_t hcr = arm_hcr_el2_eff(env);
4664
4665 if (cur_el == 0) {
4666 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4667 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
4668 return CP_ACCESS_TRAP_EL2;
4669 }
4670 } else {
4671 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4672 return CP_ACCESS_TRAP_EL1;
4673 }
4674 if (hcr & HCR_TID2) {
4675 return CP_ACCESS_TRAP_EL2;
4676 }
4677 }
4678 } else if (hcr & HCR_TID2) {
4679 return CP_ACCESS_TRAP_EL2;
4680 }
4681 }
4682
4683 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
4684 return CP_ACCESS_TRAP_EL2;
4685 }
4686
4687 return CP_ACCESS_OK;
4688 }
4689
4690 /*
4691 * Check for traps to RAS registers, which are controlled
4692 * by HCR_EL2.TERR and SCR_EL3.TERR.
4693 */
access_terr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4694 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
4695 bool isread)
4696 {
4697 int el = arm_current_el(env);
4698
4699 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
4700 return CP_ACCESS_TRAP_EL2;
4701 }
4702 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) {
4703 return CP_ACCESS_TRAP_EL3;
4704 }
4705 return CP_ACCESS_OK;
4706 }
4707
disr_read(CPUARMState * env,const ARMCPRegInfo * ri)4708 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4709 {
4710 int el = arm_current_el(env);
4711
4712 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4713 return env->cp15.vdisr_el2;
4714 }
4715 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4716 return 0; /* RAZ/WI */
4717 }
4718 return env->cp15.disr_el1;
4719 }
4720
disr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)4721 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4722 {
4723 int el = arm_current_el(env);
4724
4725 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4726 env->cp15.vdisr_el2 = val;
4727 return;
4728 }
4729 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4730 return; /* RAZ/WI */
4731 }
4732 env->cp15.disr_el1 = val;
4733 }
4734
4735 /*
4736 * Minimal RAS implementation with no Error Records.
4737 * Which means that all of the Error Record registers:
4738 * ERXADDR_EL1
4739 * ERXCTLR_EL1
4740 * ERXFR_EL1
4741 * ERXMISC0_EL1
4742 * ERXMISC1_EL1
4743 * ERXMISC2_EL1
4744 * ERXMISC3_EL1
4745 * ERXPFGCDN_EL1 (RASv1p1)
4746 * ERXPFGCTL_EL1 (RASv1p1)
4747 * ERXPFGF_EL1 (RASv1p1)
4748 * ERXSTATUS_EL1
4749 * and
4750 * ERRSELR_EL1
4751 * may generate UNDEFINED, which is the effect we get by not
4752 * listing them at all.
4753 *
4754 * These registers have fine-grained trap bits, but UNDEF-to-EL1
4755 * is higher priority than FGT-to-EL2 so we do not need to list them
4756 * in order to check for an FGT.
4757 */
4758 static const ARMCPRegInfo minimal_ras_reginfo[] = {
4759 { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
4760 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
4761 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
4762 .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
4763 { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
4764 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
4765 .access = PL1_R, .accessfn = access_terr,
4766 .fgt = FGT_ERRIDR_EL1,
4767 .type = ARM_CP_CONST, .resetvalue = 0 },
4768 { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
4769 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
4770 .nv2_redirect_offset = 0x500,
4771 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
4772 { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
4773 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
4774 .nv2_redirect_offset = 0x508,
4775 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
4776 };
4777
4778 /*
4779 * Return the exception level to which exceptions should be taken
4780 * via SVEAccessTrap. This excludes the check for whether the exception
4781 * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily
4782 * be found by testing 0 < fp_exception_el < sve_exception_el.
4783 *
4784 * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the
4785 * pseudocode does *not* separate out the FP trap checks, but has them
4786 * all in one function.
4787 */
sve_exception_el(CPUARMState * env,int el)4788 int sve_exception_el(CPUARMState *env, int el)
4789 {
4790 #ifndef CONFIG_USER_ONLY
4791 if (el <= 1 && !el_is_in_host(env, el)) {
4792 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
4793 case 1:
4794 if (el != 0) {
4795 break;
4796 }
4797 /* fall through */
4798 case 0:
4799 case 2:
4800 return 1;
4801 }
4802 }
4803
4804 if (el <= 2 && arm_is_el2_enabled(env)) {
4805 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4806 if (env->cp15.hcr_el2 & HCR_E2H) {
4807 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
4808 case 1:
4809 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4810 break;
4811 }
4812 /* fall through */
4813 case 0:
4814 case 2:
4815 return 2;
4816 }
4817 } else {
4818 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
4819 return 2;
4820 }
4821 }
4822 }
4823
4824 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
4825 if (arm_feature(env, ARM_FEATURE_EL3)
4826 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
4827 return 3;
4828 }
4829 #endif
4830 return 0;
4831 }
4832
4833 /*
4834 * Return the exception level to which exceptions should be taken for SME.
4835 * C.f. the ARM pseudocode function CheckSMEAccess.
4836 */
sme_exception_el(CPUARMState * env,int el)4837 int sme_exception_el(CPUARMState *env, int el)
4838 {
4839 #ifndef CONFIG_USER_ONLY
4840 if (el <= 1 && !el_is_in_host(env, el)) {
4841 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
4842 case 1:
4843 if (el != 0) {
4844 break;
4845 }
4846 /* fall through */
4847 case 0:
4848 case 2:
4849 return 1;
4850 }
4851 }
4852
4853 if (el <= 2 && arm_is_el2_enabled(env)) {
4854 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4855 if (env->cp15.hcr_el2 & HCR_E2H) {
4856 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
4857 case 1:
4858 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4859 break;
4860 }
4861 /* fall through */
4862 case 0:
4863 case 2:
4864 return 2;
4865 }
4866 } else {
4867 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
4868 return 2;
4869 }
4870 }
4871 }
4872
4873 /* CPTR_EL3. Since ESM is negative we must check for EL3. */
4874 if (arm_feature(env, ARM_FEATURE_EL3)
4875 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4876 return 3;
4877 }
4878 #endif
4879 return 0;
4880 }
4881
4882 /*
4883 * Given that SVE is enabled, return the vector length for EL.
4884 */
sve_vqm1_for_el_sm(CPUARMState * env,int el,bool sm)4885 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
4886 {
4887 ARMCPU *cpu = env_archcpu(env);
4888 uint64_t *cr = env->vfp.zcr_el;
4889 uint32_t map = cpu->sve_vq.map;
4890 uint32_t len = ARM_MAX_VQ - 1;
4891
4892 if (sm) {
4893 cr = env->vfp.smcr_el;
4894 map = cpu->sme_vq.map;
4895 }
4896
4897 if (el <= 1 && !el_is_in_host(env, el)) {
4898 len = MIN(len, 0xf & (uint32_t)cr[1]);
4899 }
4900 if (el <= 2 && arm_is_el2_enabled(env)) {
4901 len = MIN(len, 0xf & (uint32_t)cr[2]);
4902 }
4903 if (arm_feature(env, ARM_FEATURE_EL3)) {
4904 len = MIN(len, 0xf & (uint32_t)cr[3]);
4905 }
4906
4907 map &= MAKE_64BIT_MASK(0, len + 1);
4908 if (map != 0) {
4909 return 31 - clz32(map);
4910 }
4911
4912 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
4913 assert(sm);
4914 return ctz32(cpu->sme_vq.map);
4915 }
4916
sve_vqm1_for_el(CPUARMState * env,int el)4917 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
4918 {
4919 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
4920 }
4921
zcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4922 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4923 uint64_t value)
4924 {
4925 int cur_el = arm_current_el(env);
4926 int old_len = sve_vqm1_for_el(env, cur_el);
4927 int new_len;
4928
4929 /* Bits other than [3:0] are RAZ/WI. */
4930 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
4931 raw_write(env, ri, value & 0xf);
4932
4933 /*
4934 * Because we arrived here, we know both FP and SVE are enabled;
4935 * otherwise we would have trapped access to the ZCR_ELn register.
4936 */
4937 new_len = sve_vqm1_for_el(env, cur_el);
4938 if (new_len < old_len) {
4939 aarch64_sve_narrow_vq(env, new_len + 1);
4940 }
4941 }
4942
4943 static const ARMCPRegInfo zcr_reginfo[] = {
4944 { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4945 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4946 .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
4947 .access = PL1_RW, .type = ARM_CP_SVE,
4948 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4949 .writefn = zcr_write, .raw_writefn = raw_write },
4950 { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4951 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4952 .access = PL2_RW, .type = ARM_CP_SVE,
4953 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4954 .writefn = zcr_write, .raw_writefn = raw_write },
4955 { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4956 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4957 .access = PL3_RW, .type = ARM_CP_SVE,
4958 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4959 .writefn = zcr_write, .raw_writefn = raw_write },
4960 };
4961
access_tpidr2(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4962 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
4963 bool isread)
4964 {
4965 int el = arm_current_el(env);
4966
4967 if (el == 0) {
4968 uint64_t sctlr = arm_sctlr(env, el);
4969 if (!(sctlr & SCTLR_EnTP2)) {
4970 return CP_ACCESS_TRAP_EL1;
4971 }
4972 }
4973 /* TODO: FEAT_FGT */
4974 if (el < 3
4975 && arm_feature(env, ARM_FEATURE_EL3)
4976 && !(env->cp15.scr_el3 & SCR_ENTP2)) {
4977 return CP_ACCESS_TRAP_EL3;
4978 }
4979 return CP_ACCESS_OK;
4980 }
4981
access_smprimap(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4982 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
4983 bool isread)
4984 {
4985 /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
4986 if (arm_current_el(env) == 2
4987 && arm_feature(env, ARM_FEATURE_EL3)
4988 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4989 return CP_ACCESS_TRAP_EL3;
4990 }
4991 return CP_ACCESS_OK;
4992 }
4993
access_smpri(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4994 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
4995 bool isread)
4996 {
4997 if (arm_current_el(env) < 3
4998 && arm_feature(env, ARM_FEATURE_EL3)
4999 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
5000 return CP_ACCESS_TRAP_EL3;
5001 }
5002 return CP_ACCESS_OK;
5003 }
5004
5005 /* ResetSVEState */
arm_reset_sve_state(CPUARMState * env)5006 static void arm_reset_sve_state(CPUARMState *env)
5007 {
5008 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
5009 /* Recall that FFR is stored as pregs[16]. */
5010 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
5011 vfp_set_fpsr(env, 0x0800009f);
5012 }
5013
aarch64_set_svcr(CPUARMState * env,uint64_t new,uint64_t mask)5014 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
5015 {
5016 uint64_t change = (env->svcr ^ new) & mask;
5017
5018 if (change == 0) {
5019 return;
5020 }
5021 env->svcr ^= change;
5022
5023 if (change & R_SVCR_SM_MASK) {
5024 arm_reset_sve_state(env);
5025 }
5026
5027 /*
5028 * ResetSMEState.
5029 *
5030 * SetPSTATE_ZA zeros on enable and disable. We can zero this only
5031 * on enable: while disabled, the storage is inaccessible and the
5032 * value does not matter. We're not saving the storage in vmstate
5033 * when disabled either.
5034 */
5035 if (change & new & R_SVCR_ZA_MASK) {
5036 memset(&env->za_state, 0, sizeof(env->za_state));
5037 }
5038
5039 if (tcg_enabled()) {
5040 arm_rebuild_hflags(env);
5041 }
5042 }
5043
svcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5044 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5045 uint64_t value)
5046 {
5047 aarch64_set_svcr(env, value, -1);
5048 }
5049
smcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5050 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5051 uint64_t value)
5052 {
5053 int cur_el = arm_current_el(env);
5054 int old_len = sve_vqm1_for_el(env, cur_el);
5055 uint64_t valid_mask = R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
5056 int new_len;
5057
5058 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
5059 if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
5060 valid_mask |= R_SMCR_EZT0_MASK;
5061 }
5062 value &= valid_mask;
5063 raw_write(env, ri, value);
5064
5065 /*
5066 * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
5067 * when SVL is widened (old values kept, or zeros). Choose to keep the
5068 * current values for simplicity. But for QEMU internals, we must still
5069 * apply the narrower SVL to the Zregs and Pregs -- see the comment
5070 * above aarch64_sve_narrow_vq.
5071 */
5072 new_len = sve_vqm1_for_el(env, cur_el);
5073 if (new_len < old_len) {
5074 aarch64_sve_narrow_vq(env, new_len + 1);
5075 }
5076 }
5077
5078 static const ARMCPRegInfo sme_reginfo[] = {
5079 { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
5080 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
5081 .access = PL0_RW, .accessfn = access_tpidr2,
5082 .fgt = FGT_NTPIDR2_EL0,
5083 .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
5084 { .name = "SVCR", .state = ARM_CP_STATE_AA64,
5085 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
5086 .access = PL0_RW, .type = ARM_CP_SME,
5087 .fieldoffset = offsetof(CPUARMState, svcr),
5088 .writefn = svcr_write, .raw_writefn = raw_write },
5089 { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
5090 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
5091 .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
5092 .access = PL1_RW, .type = ARM_CP_SME,
5093 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
5094 .writefn = smcr_write, .raw_writefn = raw_write },
5095 { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
5096 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
5097 .access = PL2_RW, .type = ARM_CP_SME,
5098 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
5099 .writefn = smcr_write, .raw_writefn = raw_write },
5100 { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
5101 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
5102 .access = PL3_RW, .type = ARM_CP_SME,
5103 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
5104 .writefn = smcr_write, .raw_writefn = raw_write },
5105 { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
5106 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
5107 .access = PL1_R, .accessfn = access_aa64_tid1,
5108 /*
5109 * IMPLEMENTOR = 0 (software)
5110 * REVISION = 0 (implementation defined)
5111 * SMPS = 0 (no streaming execution priority in QEMU)
5112 * AFFINITY = 0 (streaming sve mode not shared with other PEs)
5113 */
5114 .type = ARM_CP_CONST, .resetvalue = 0, },
5115 /*
5116 * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
5117 */
5118 { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
5119 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
5120 .access = PL1_RW, .accessfn = access_smpri,
5121 .fgt = FGT_NSMPRI_EL1,
5122 .type = ARM_CP_CONST, .resetvalue = 0 },
5123 { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
5124 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
5125 .nv2_redirect_offset = 0x1f8,
5126 .access = PL2_RW, .accessfn = access_smprimap,
5127 .type = ARM_CP_CONST, .resetvalue = 0 },
5128 };
5129
gpccr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5130 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5131 uint64_t value)
5132 {
5133 /* L0GPTSZ is RO; other bits not mentioned are RES0. */
5134 uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
5135 R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
5136 R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
5137
5138 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
5139 }
5140
gpccr_reset(CPUARMState * env,const ARMCPRegInfo * ri)5141 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
5142 {
5143 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
5144 env_archcpu(env)->reset_l0gptsz);
5145 }
5146
5147 static const ARMCPRegInfo rme_reginfo[] = {
5148 { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
5149 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
5150 .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
5151 .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
5152 { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
5153 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
5154 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
5155 { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
5156 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
5157 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
5158 { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
5159 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
5160 .access = PL3_W, .type = ARM_CP_NOP },
5161 };
5162
5163 static const ARMCPRegInfo rme_mte_reginfo[] = {
5164 { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
5165 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
5166 .access = PL3_W, .type = ARM_CP_NOP },
5167 };
5168
aa64_allint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5169 static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri,
5170 uint64_t value)
5171 {
5172 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT);
5173 }
5174
aa64_allint_read(CPUARMState * env,const ARMCPRegInfo * ri)5175 static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri)
5176 {
5177 return env->pstate & PSTATE_ALLINT;
5178 }
5179
aa64_allint_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5180 static CPAccessResult aa64_allint_access(CPUARMState *env,
5181 const ARMCPRegInfo *ri, bool isread)
5182 {
5183 if (!isread && arm_current_el(env) == 1 &&
5184 (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) {
5185 return CP_ACCESS_TRAP_EL2;
5186 }
5187 return CP_ACCESS_OK;
5188 }
5189
5190 static const ARMCPRegInfo nmi_reginfo[] = {
5191 { .name = "ALLINT", .state = ARM_CP_STATE_AA64,
5192 .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3,
5193 .type = ARM_CP_NO_RAW,
5194 .access = PL1_RW, .accessfn = aa64_allint_access,
5195 .fieldoffset = offsetof(CPUARMState, pstate),
5196 .writefn = aa64_allint_write, .readfn = aa64_allint_read,
5197 .resetfn = arm_cp_reset_ignore },
5198 };
5199
5200 #ifndef CONFIG_USER_ONLY
5201 /*
5202 * We don't know until after realize whether there's a GICv3
5203 * attached, and that is what registers the gicv3 sysregs.
5204 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5205 * at runtime.
5206 */
id_pfr1_read(CPUARMState * env,const ARMCPRegInfo * ri)5207 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5208 {
5209 ARMCPU *cpu = env_archcpu(env);
5210 uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
5211
5212 if (env->gicv3state) {
5213 pfr1 |= 1 << 28;
5214 }
5215 return pfr1;
5216 }
5217
id_aa64pfr0_read(CPUARMState * env,const ARMCPRegInfo * ri)5218 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5219 {
5220 ARMCPU *cpu = env_archcpu(env);
5221 uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
5222
5223 if (env->gicv3state) {
5224 pfr0 |= 1 << 24;
5225 }
5226 return pfr0;
5227 }
5228 #endif
5229
5230 /*
5231 * Shared logic between LORID and the rest of the LOR* registers.
5232 * Secure state exclusion has already been dealt with.
5233 */
access_lor_ns(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5234 static CPAccessResult access_lor_ns(CPUARMState *env,
5235 const ARMCPRegInfo *ri, bool isread)
5236 {
5237 int el = arm_current_el(env);
5238
5239 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5240 return CP_ACCESS_TRAP_EL2;
5241 }
5242 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5243 return CP_ACCESS_TRAP_EL3;
5244 }
5245 return CP_ACCESS_OK;
5246 }
5247
access_lor_other(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5248 static CPAccessResult access_lor_other(CPUARMState *env,
5249 const ARMCPRegInfo *ri, bool isread)
5250 {
5251 if (arm_is_secure_below_el3(env)) {
5252 /* UNDEF if SCR_EL3.NS == 0 */
5253 return CP_ACCESS_UNDEFINED;
5254 }
5255 return access_lor_ns(env, ri, isread);
5256 }
5257
5258 /*
5259 * A trivial implementation of ARMv8.1-LOR leaves all of these
5260 * registers fixed at 0, which indicates that there are zero
5261 * supported Limited Ordering regions.
5262 */
5263 static const ARMCPRegInfo lor_reginfo[] = {
5264 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
5265 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
5266 .access = PL1_RW, .accessfn = access_lor_other,
5267 .fgt = FGT_LORSA_EL1,
5268 .type = ARM_CP_CONST, .resetvalue = 0 },
5269 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
5270 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
5271 .access = PL1_RW, .accessfn = access_lor_other,
5272 .fgt = FGT_LOREA_EL1,
5273 .type = ARM_CP_CONST, .resetvalue = 0 },
5274 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
5275 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
5276 .access = PL1_RW, .accessfn = access_lor_other,
5277 .fgt = FGT_LORN_EL1,
5278 .type = ARM_CP_CONST, .resetvalue = 0 },
5279 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
5280 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
5281 .access = PL1_RW, .accessfn = access_lor_other,
5282 .fgt = FGT_LORC_EL1,
5283 .type = ARM_CP_CONST, .resetvalue = 0 },
5284 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
5285 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
5286 .access = PL1_R, .accessfn = access_lor_ns,
5287 .fgt = FGT_LORID_EL1,
5288 .type = ARM_CP_CONST, .resetvalue = 0 },
5289 };
5290
access_pauth(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5291 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5292 bool isread)
5293 {
5294 int el = arm_current_el(env);
5295
5296 if (el < 2 &&
5297 arm_is_el2_enabled(env) &&
5298 !(arm_hcr_el2_eff(env) & HCR_APK)) {
5299 return CP_ACCESS_TRAP_EL2;
5300 }
5301 if (el < 3 &&
5302 arm_feature(env, ARM_FEATURE_EL3) &&
5303 !(env->cp15.scr_el3 & SCR_APK)) {
5304 return CP_ACCESS_TRAP_EL3;
5305 }
5306 return CP_ACCESS_OK;
5307 }
5308
5309 static const ARMCPRegInfo pauth_reginfo[] = {
5310 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5311 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5312 .access = PL1_RW, .accessfn = access_pauth,
5313 .fgt = FGT_APDAKEY,
5314 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
5315 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5316 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5317 .access = PL1_RW, .accessfn = access_pauth,
5318 .fgt = FGT_APDAKEY,
5319 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
5320 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5321 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5322 .access = PL1_RW, .accessfn = access_pauth,
5323 .fgt = FGT_APDBKEY,
5324 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
5325 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5326 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5327 .access = PL1_RW, .accessfn = access_pauth,
5328 .fgt = FGT_APDBKEY,
5329 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
5330 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5331 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5332 .access = PL1_RW, .accessfn = access_pauth,
5333 .fgt = FGT_APGAKEY,
5334 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
5335 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5336 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5337 .access = PL1_RW, .accessfn = access_pauth,
5338 .fgt = FGT_APGAKEY,
5339 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
5340 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5341 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5342 .access = PL1_RW, .accessfn = access_pauth,
5343 .fgt = FGT_APIAKEY,
5344 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
5345 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5346 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5347 .access = PL1_RW, .accessfn = access_pauth,
5348 .fgt = FGT_APIAKEY,
5349 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
5350 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5351 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5352 .access = PL1_RW, .accessfn = access_pauth,
5353 .fgt = FGT_APIBKEY,
5354 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
5355 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5356 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5357 .access = PL1_RW, .accessfn = access_pauth,
5358 .fgt = FGT_APIBKEY,
5359 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
5360 };
5361
rndr_readfn(CPUARMState * env,const ARMCPRegInfo * ri)5362 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
5363 {
5364 Error *err = NULL;
5365 uint64_t ret;
5366
5367 /* Success sets NZCV = 0000. */
5368 env->NF = env->CF = env->VF = 0, env->ZF = 1;
5369
5370 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
5371 /*
5372 * ??? Failed, for unknown reasons in the crypto subsystem.
5373 * The best we can do is log the reason and return the
5374 * timed-out indication to the guest. There is no reason
5375 * we know to expect this failure to be transitory, so the
5376 * guest may well hang retrying the operation.
5377 */
5378 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5379 ri->name, error_get_pretty(err));
5380 error_free(err);
5381
5382 env->ZF = 0; /* NZCF = 0100 */
5383 return 0;
5384 }
5385 return ret;
5386 }
5387
5388 /* We do not support re-seeding, so the two registers operate the same. */
5389 static const ARMCPRegInfo rndr_reginfo[] = {
5390 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
5391 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5392 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
5393 .access = PL0_R, .readfn = rndr_readfn },
5394 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
5395 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5396 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
5397 .access = PL0_R, .readfn = rndr_readfn },
5398 };
5399
dccvap_writefn(CPUARMState * env,const ARMCPRegInfo * opaque,uint64_t value)5400 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
5401 uint64_t value)
5402 {
5403 #ifdef CONFIG_TCG
5404 ARMCPU *cpu = env_archcpu(env);
5405 /* CTR_EL0 System register -> DminLine, bits [19:16] */
5406 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
5407 uint64_t vaddr_in = (uint64_t) value;
5408 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
5409 void *haddr;
5410 int mem_idx = arm_env_mmu_index(env);
5411
5412 /* This won't be crossing page boundaries */
5413 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
5414 if (haddr) {
5415 #ifndef CONFIG_USER_ONLY
5416
5417 ram_addr_t offset;
5418 MemoryRegion *mr;
5419
5420 /* RCU lock is already being held */
5421 mr = memory_region_from_host(haddr, &offset);
5422
5423 if (mr) {
5424 memory_region_writeback(mr, offset, dline_size);
5425 }
5426 #endif /*CONFIG_USER_ONLY*/
5427 }
5428 #else
5429 /* Handled by hardware accelerator. */
5430 g_assert_not_reached();
5431 #endif /* CONFIG_TCG */
5432 }
5433
5434 static const ARMCPRegInfo dcpop_reg[] = {
5435 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
5436 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
5437 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5438 .fgt = FGT_DCCVAP,
5439 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5440 };
5441
5442 static const ARMCPRegInfo dcpodp_reg[] = {
5443 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
5444 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
5445 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5446 .fgt = FGT_DCCVADP,
5447 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5448 };
5449
access_aa64_tid5(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5450 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
5451 bool isread)
5452 {
5453 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
5454 return CP_ACCESS_TRAP_EL2;
5455 }
5456
5457 return CP_ACCESS_OK;
5458 }
5459
access_mte(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5460 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
5461 bool isread)
5462 {
5463 int el = arm_current_el(env);
5464 if (el < 2 && arm_is_el2_enabled(env)) {
5465 uint64_t hcr = arm_hcr_el2_eff(env);
5466 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5467 return CP_ACCESS_TRAP_EL2;
5468 }
5469 }
5470 if (el < 3 &&
5471 arm_feature(env, ARM_FEATURE_EL3) &&
5472 !(env->cp15.scr_el3 & SCR_ATA)) {
5473 return CP_ACCESS_TRAP_EL3;
5474 }
5475 return CP_ACCESS_OK;
5476 }
5477
access_tfsr_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5478 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
5479 bool isread)
5480 {
5481 CPAccessResult nv1 = access_nv1(env, ri, isread);
5482
5483 if (nv1 != CP_ACCESS_OK) {
5484 return nv1;
5485 }
5486 return access_mte(env, ri, isread);
5487 }
5488
access_tfsr_el2(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5489 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
5490 bool isread)
5491 {
5492 /*
5493 * TFSR_EL2: similar to generic access_mte(), but we need to
5494 * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
5495 * if NV2 is enabled then we will redirect this to TFSR_EL1
5496 * after doing the HCR and SCR ATA traps; otherwise this will
5497 * be a trap to EL2 and the HCR/SCR traps do not apply.
5498 */
5499 int el = arm_current_el(env);
5500
5501 if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
5502 return CP_ACCESS_OK;
5503 }
5504 if (el < 2 && arm_is_el2_enabled(env)) {
5505 uint64_t hcr = arm_hcr_el2_eff(env);
5506 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5507 return CP_ACCESS_TRAP_EL2;
5508 }
5509 }
5510 if (el < 3 &&
5511 arm_feature(env, ARM_FEATURE_EL3) &&
5512 !(env->cp15.scr_el3 & SCR_ATA)) {
5513 return CP_ACCESS_TRAP_EL3;
5514 }
5515 return CP_ACCESS_OK;
5516 }
5517
tco_read(CPUARMState * env,const ARMCPRegInfo * ri)5518 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
5519 {
5520 return env->pstate & PSTATE_TCO;
5521 }
5522
tco_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)5523 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5524 {
5525 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
5526 }
5527
5528 static const ARMCPRegInfo mte_reginfo[] = {
5529 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
5530 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
5531 .access = PL1_RW, .accessfn = access_mte,
5532 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
5533 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
5534 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
5535 .access = PL1_RW, .accessfn = access_tfsr_el1,
5536 .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
5537 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
5538 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
5539 .type = ARM_CP_NV2_REDIRECT,
5540 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
5541 .access = PL2_RW, .accessfn = access_tfsr_el2,
5542 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
5543 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
5544 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
5545 .access = PL3_RW,
5546 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
5547 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
5548 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
5549 .access = PL1_RW, .accessfn = access_mte,
5550 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
5551 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
5552 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
5553 .access = PL1_RW, .accessfn = access_mte,
5554 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
5555 { .name = "TCO", .state = ARM_CP_STATE_AA64,
5556 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5557 .type = ARM_CP_NO_RAW,
5558 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
5559 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
5560 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
5561 .type = ARM_CP_NOP, .access = PL1_W,
5562 .fgt = FGT_DCIVAC,
5563 .accessfn = aa64_cacheop_poc_access },
5564 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
5565 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
5566 .fgt = FGT_DCISW,
5567 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5568 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
5569 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
5570 .type = ARM_CP_NOP, .access = PL1_W,
5571 .fgt = FGT_DCIVAC,
5572 .accessfn = aa64_cacheop_poc_access },
5573 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
5574 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
5575 .fgt = FGT_DCISW,
5576 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5577 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
5578 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
5579 .fgt = FGT_DCCSW,
5580 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5581 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
5582 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
5583 .fgt = FGT_DCCSW,
5584 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5585 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
5586 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
5587 .fgt = FGT_DCCISW,
5588 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5589 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
5590 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
5591 .fgt = FGT_DCCISW,
5592 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5593 };
5594
5595 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
5596 { .name = "TCO", .state = ARM_CP_STATE_AA64,
5597 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5598 .type = ARM_CP_CONST, .access = PL0_RW, },
5599 };
5600
5601 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
5602 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
5603 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
5604 .type = ARM_CP_NOP, .access = PL0_W,
5605 .fgt = FGT_DCCVAC,
5606 .accessfn = aa64_cacheop_poc_access },
5607 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
5608 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
5609 .type = ARM_CP_NOP, .access = PL0_W,
5610 .fgt = FGT_DCCVAC,
5611 .accessfn = aa64_cacheop_poc_access },
5612 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
5613 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
5614 .type = ARM_CP_NOP, .access = PL0_W,
5615 .fgt = FGT_DCCVAP,
5616 .accessfn = aa64_cacheop_poc_access },
5617 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
5618 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
5619 .type = ARM_CP_NOP, .access = PL0_W,
5620 .fgt = FGT_DCCVAP,
5621 .accessfn = aa64_cacheop_poc_access },
5622 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
5623 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
5624 .type = ARM_CP_NOP, .access = PL0_W,
5625 .fgt = FGT_DCCVADP,
5626 .accessfn = aa64_cacheop_poc_access },
5627 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
5628 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
5629 .type = ARM_CP_NOP, .access = PL0_W,
5630 .fgt = FGT_DCCVADP,
5631 .accessfn = aa64_cacheop_poc_access },
5632 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
5633 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
5634 .type = ARM_CP_NOP, .access = PL0_W,
5635 .fgt = FGT_DCCIVAC,
5636 .accessfn = aa64_cacheop_poc_access },
5637 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
5638 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
5639 .type = ARM_CP_NOP, .access = PL0_W,
5640 .fgt = FGT_DCCIVAC,
5641 .accessfn = aa64_cacheop_poc_access },
5642 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
5643 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
5644 .access = PL0_W, .type = ARM_CP_DC_GVA,
5645 #ifndef CONFIG_USER_ONLY
5646 /* Avoid overhead of an access check that always passes in user-mode */
5647 .accessfn = aa64_zva_access,
5648 .fgt = FGT_DCZVA,
5649 #endif
5650 },
5651 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
5652 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
5653 .access = PL0_W, .type = ARM_CP_DC_GZVA,
5654 #ifndef CONFIG_USER_ONLY
5655 /* Avoid overhead of an access check that always passes in user-mode */
5656 .accessfn = aa64_zva_access,
5657 .fgt = FGT_DCZVA,
5658 #endif
5659 },
5660 };
5661
access_scxtnum(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5662 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
5663 bool isread)
5664 {
5665 uint64_t hcr = arm_hcr_el2_eff(env);
5666 int el = arm_current_el(env);
5667
5668 if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
5669 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
5670 if (hcr & HCR_TGE) {
5671 return CP_ACCESS_TRAP_EL2;
5672 }
5673 return CP_ACCESS_TRAP_EL1;
5674 }
5675 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
5676 return CP_ACCESS_TRAP_EL2;
5677 }
5678 if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
5679 return CP_ACCESS_TRAP_EL2;
5680 }
5681 if (el < 3
5682 && arm_feature(env, ARM_FEATURE_EL3)
5683 && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
5684 return CP_ACCESS_TRAP_EL3;
5685 }
5686 return CP_ACCESS_OK;
5687 }
5688
access_scxtnum_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5689 static CPAccessResult access_scxtnum_el1(CPUARMState *env,
5690 const ARMCPRegInfo *ri,
5691 bool isread)
5692 {
5693 CPAccessResult nv1 = access_nv1(env, ri, isread);
5694
5695 if (nv1 != CP_ACCESS_OK) {
5696 return nv1;
5697 }
5698 return access_scxtnum(env, ri, isread);
5699 }
5700
5701 static const ARMCPRegInfo scxtnum_reginfo[] = {
5702 { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
5703 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
5704 .access = PL0_RW, .accessfn = access_scxtnum,
5705 .fgt = FGT_SCXTNUM_EL0,
5706 .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
5707 { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
5708 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
5709 .access = PL1_RW, .accessfn = access_scxtnum_el1,
5710 .fgt = FGT_SCXTNUM_EL1,
5711 .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
5712 .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
5713 { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
5714 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
5715 .access = PL2_RW, .accessfn = access_scxtnum,
5716 .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
5717 { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
5718 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
5719 .access = PL3_RW,
5720 .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
5721 };
5722
access_fgt(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5723 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
5724 bool isread)
5725 {
5726 if (arm_current_el(env) == 2 &&
5727 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
5728 return CP_ACCESS_TRAP_EL3;
5729 }
5730 return CP_ACCESS_OK;
5731 }
5732
5733 static const ARMCPRegInfo fgt_reginfo[] = {
5734 { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5735 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5736 .nv2_redirect_offset = 0x1b8,
5737 .access = PL2_RW, .accessfn = access_fgt,
5738 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
5739 { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5740 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
5741 .nv2_redirect_offset = 0x1c0,
5742 .access = PL2_RW, .accessfn = access_fgt,
5743 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
5744 { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5745 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
5746 .nv2_redirect_offset = 0x1d0,
5747 .access = PL2_RW, .accessfn = access_fgt,
5748 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
5749 { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5750 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
5751 .nv2_redirect_offset = 0x1d8,
5752 .access = PL2_RW, .accessfn = access_fgt,
5753 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
5754 { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
5755 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
5756 .nv2_redirect_offset = 0x1c8,
5757 .access = PL2_RW, .accessfn = access_fgt,
5758 .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
5759 };
5760
vncr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5761 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5762 uint64_t value)
5763 {
5764 /*
5765 * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
5766 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
5767 * about the RESS bits at the top -- we choose the "generate an EL2
5768 * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
5769 * the ptw.c code detect the resulting invalid address).
5770 */
5771 env->cp15.vncr_el2 = value & ~0xfffULL;
5772 }
5773
5774 static const ARMCPRegInfo nv2_reginfo[] = {
5775 { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
5776 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
5777 .access = PL2_RW,
5778 .writefn = vncr_write,
5779 .nv2_redirect_offset = 0xb0,
5780 .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
5781 };
5782
access_predinv(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5783 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5784 bool isread)
5785 {
5786 int el = arm_current_el(env);
5787
5788 if (el == 0) {
5789 uint64_t sctlr = arm_sctlr(env, el);
5790 if (!(sctlr & SCTLR_EnRCTX)) {
5791 return CP_ACCESS_TRAP_EL1;
5792 }
5793 } else if (el == 1) {
5794 uint64_t hcr = arm_hcr_el2_eff(env);
5795 if (hcr & HCR_NV) {
5796 return CP_ACCESS_TRAP_EL2;
5797 }
5798 }
5799 return CP_ACCESS_OK;
5800 }
5801
5802 static const ARMCPRegInfo predinv_reginfo[] = {
5803 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5804 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5805 .fgt = FGT_CFPRCTX,
5806 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5807 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5808 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5809 .fgt = FGT_DVPRCTX,
5810 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5811 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5812 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5813 .fgt = FGT_CPPRCTX,
5814 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5815 /*
5816 * Note the AArch32 opcodes have a different OPC1.
5817 */
5818 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5819 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5820 .fgt = FGT_CFPRCTX,
5821 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5822 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5823 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5824 .fgt = FGT_DVPRCTX,
5825 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5826 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5827 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5828 .fgt = FGT_CPPRCTX,
5829 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5830 };
5831
ccsidr2_read(CPUARMState * env,const ARMCPRegInfo * ri)5832 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5833 {
5834 /* Read the high 32 bits of the current CCSIDR */
5835 return extract64(ccsidr_read(env, ri), 32, 32);
5836 }
5837
5838 static const ARMCPRegInfo ccsidr2_reginfo[] = {
5839 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
5840 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
5841 .access = PL1_R,
5842 .accessfn = access_tid4,
5843 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
5844 };
5845
access_aa64_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5846 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5847 bool isread)
5848 {
5849 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
5850 return CP_ACCESS_TRAP_EL2;
5851 }
5852
5853 return CP_ACCESS_OK;
5854 }
5855
access_aa32_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5856 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5857 bool isread)
5858 {
5859 if (arm_feature(env, ARM_FEATURE_V8)) {
5860 return access_aa64_tid3(env, ri, isread);
5861 }
5862
5863 return CP_ACCESS_OK;
5864 }
5865
access_jazelle(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5866 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
5867 bool isread)
5868 {
5869 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
5870 return CP_ACCESS_TRAP_EL2;
5871 }
5872
5873 return CP_ACCESS_OK;
5874 }
5875
access_joscr_jmcr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5876 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
5877 const ARMCPRegInfo *ri, bool isread)
5878 {
5879 /*
5880 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
5881 * in v7A, not in v8A.
5882 */
5883 if (!arm_feature(env, ARM_FEATURE_V8) &&
5884 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
5885 (env->cp15.hstr_el2 & HSTR_TJDBX)) {
5886 return CP_ACCESS_TRAP_EL2;
5887 }
5888 return CP_ACCESS_OK;
5889 }
5890
5891 static const ARMCPRegInfo jazelle_regs[] = {
5892 { .name = "JIDR",
5893 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
5894 .access = PL1_R, .accessfn = access_jazelle,
5895 .type = ARM_CP_CONST, .resetvalue = 0 },
5896 { .name = "JOSCR",
5897 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
5898 .accessfn = access_joscr_jmcr,
5899 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5900 { .name = "JMCR",
5901 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
5902 .accessfn = access_joscr_jmcr,
5903 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5904 };
5905
5906 static const ARMCPRegInfo contextidr_el2 = {
5907 .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
5908 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
5909 .access = PL2_RW,
5910 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
5911 };
5912
5913 static const ARMCPRegInfo vhe_reginfo[] = {
5914 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
5915 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
5916 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
5917 .raw_writefn = raw_write,
5918 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
5919 #ifndef CONFIG_USER_ONLY
5920 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5921 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
5922 .fieldoffset =
5923 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
5924 .type = ARM_CP_IO, .access = PL2_RW,
5925 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
5926 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5927 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
5928 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5929 .resetfn = gt_hv_timer_reset,
5930 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
5931 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5932 .type = ARM_CP_IO,
5933 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
5934 .access = PL2_RW,
5935 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
5936 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
5937 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
5938 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
5939 .type = ARM_CP_IO | ARM_CP_ALIAS,
5940 .access = PL2_RW, .accessfn = access_el1nvpct,
5941 .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
5942 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
5943 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
5944 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
5945 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
5946 .type = ARM_CP_IO | ARM_CP_ALIAS,
5947 .access = PL2_RW, .accessfn = access_el1nvvct,
5948 .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
5949 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
5950 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
5951 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5952 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
5953 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5954 .access = PL2_RW, .accessfn = e2h_access,
5955 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
5956 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5957 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
5958 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5959 .access = PL2_RW, .accessfn = e2h_access,
5960 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
5961 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5962 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
5963 .type = ARM_CP_IO | ARM_CP_ALIAS,
5964 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
5965 .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
5966 .access = PL2_RW, .accessfn = access_el1nvpct,
5967 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
5968 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5969 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
5970 .type = ARM_CP_IO | ARM_CP_ALIAS,
5971 .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
5972 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
5973 .access = PL2_RW, .accessfn = access_el1nvvct,
5974 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
5975 #endif
5976 };
5977
5978 /*
5979 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
5980 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
5981 * is non-zero, which is never for ARMv7, optionally in ARMv8
5982 * and mandatorily for ARMv8.2 and up.
5983 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
5984 * implementation is RAZ/WI we can ignore this detail, as we
5985 * do for ACTLR.
5986 */
5987 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
5988 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
5989 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
5990 .access = PL1_RW, .accessfn = access_tacr,
5991 .type = ARM_CP_CONST, .resetvalue = 0 },
5992 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
5993 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
5994 .access = PL2_RW, .type = ARM_CP_CONST,
5995 .resetvalue = 0 },
5996 };
5997
register_cp_regs_for_features(ARMCPU * cpu)5998 void register_cp_regs_for_features(ARMCPU *cpu)
5999 {
6000 /* Register all the coprocessor registers based on feature bits */
6001 CPUARMState *env = &cpu->env;
6002 ARMISARegisters *isar = &cpu->isar;
6003
6004 if (arm_feature(env, ARM_FEATURE_M)) {
6005 /* M profile has no coprocessor registers */
6006 return;
6007 }
6008
6009 define_arm_cp_regs(cpu, cp_reginfo);
6010 if (!arm_feature(env, ARM_FEATURE_V8)) {
6011 /*
6012 * Must go early as it is full of wildcards that may be
6013 * overridden by later definitions.
6014 */
6015 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
6016 }
6017
6018 #ifndef CONFIG_USER_ONLY
6019 if (tcg_enabled()) {
6020 define_tlb_insn_regs(cpu);
6021 define_at_insn_regs(cpu);
6022 }
6023 #endif
6024
6025 if (arm_feature(env, ARM_FEATURE_V6)) {
6026 /* The ID registers all have impdef reset values */
6027 ARMCPRegInfo v6_idregs[] = {
6028 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
6029 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6030 .access = PL1_R, .type = ARM_CP_CONST,
6031 .accessfn = access_aa32_tid3,
6032 .resetvalue = GET_IDREG(isar, ID_PFR0)},
6033 /*
6034 * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6035 * the value of the GIC field until after we define these regs.
6036 */
6037 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
6038 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
6039 .access = PL1_R, .type = ARM_CP_NO_RAW,
6040 .accessfn = access_aa32_tid3,
6041 #ifdef CONFIG_USER_ONLY
6042 .type = ARM_CP_CONST,
6043 .resetvalue = GET_IDREG(isar, ID_PFR1),
6044 #else
6045 .type = ARM_CP_NO_RAW,
6046 .accessfn = access_aa32_tid3,
6047 .readfn = id_pfr1_read,
6048 .writefn = arm_cp_write_ignore
6049 #endif
6050 },
6051 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
6052 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
6053 .access = PL1_R, .type = ARM_CP_CONST,
6054 .accessfn = access_aa32_tid3,
6055 .resetvalue = GET_IDREG(isar, ID_DFR0)},
6056 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
6057 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
6058 .access = PL1_R, .type = ARM_CP_CONST,
6059 .accessfn = access_aa32_tid3,
6060 .resetvalue = GET_IDREG(isar, ID_AFR0)},
6061 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
6062 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
6063 .access = PL1_R, .type = ARM_CP_CONST,
6064 .accessfn = access_aa32_tid3,
6065 .resetvalue = GET_IDREG(isar, ID_MMFR0)},
6066 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
6067 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
6068 .access = PL1_R, .type = ARM_CP_CONST,
6069 .accessfn = access_aa32_tid3,
6070 .resetvalue = GET_IDREG(isar, ID_MMFR1)},
6071 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
6072 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
6073 .access = PL1_R, .type = ARM_CP_CONST,
6074 .accessfn = access_aa32_tid3,
6075 .resetvalue = GET_IDREG(isar, ID_MMFR2)},
6076 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
6077 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
6078 .access = PL1_R, .type = ARM_CP_CONST,
6079 .accessfn = access_aa32_tid3,
6080 .resetvalue = GET_IDREG(isar, ID_MMFR3)},
6081 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
6082 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6083 .access = PL1_R, .type = ARM_CP_CONST,
6084 .accessfn = access_aa32_tid3,
6085 .resetvalue = GET_IDREG(isar, ID_ISAR0)},
6086 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
6087 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
6088 .access = PL1_R, .type = ARM_CP_CONST,
6089 .accessfn = access_aa32_tid3,
6090 .resetvalue = GET_IDREG(isar, ID_ISAR1)},
6091 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
6092 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6093 .access = PL1_R, .type = ARM_CP_CONST,
6094 .accessfn = access_aa32_tid3,
6095 .resetvalue = GET_IDREG(isar, ID_ISAR2)},
6096 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
6097 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
6098 .access = PL1_R, .type = ARM_CP_CONST,
6099 .accessfn = access_aa32_tid3,
6100 .resetvalue = GET_IDREG(isar, ID_ISAR3) },
6101 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
6102 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
6103 .access = PL1_R, .type = ARM_CP_CONST,
6104 .accessfn = access_aa32_tid3,
6105 .resetvalue = GET_IDREG(isar, ID_ISAR4) },
6106 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
6107 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
6108 .access = PL1_R, .type = ARM_CP_CONST,
6109 .accessfn = access_aa32_tid3,
6110 .resetvalue = GET_IDREG(isar, ID_ISAR5) },
6111 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
6112 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
6113 .access = PL1_R, .type = ARM_CP_CONST,
6114 .accessfn = access_aa32_tid3,
6115 .resetvalue = GET_IDREG(isar, ID_MMFR4)},
6116 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
6117 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
6118 .access = PL1_R, .type = ARM_CP_CONST,
6119 .accessfn = access_aa32_tid3,
6120 .resetvalue = GET_IDREG(isar, ID_ISAR6) },
6121 };
6122 define_arm_cp_regs(cpu, v6_idregs);
6123 define_arm_cp_regs(cpu, v6_cp_reginfo);
6124 } else {
6125 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
6126 }
6127 if (arm_feature(env, ARM_FEATURE_V6K)) {
6128 define_arm_cp_regs(cpu, v6k_cp_reginfo);
6129 }
6130 if (arm_feature(env, ARM_FEATURE_V7)) {
6131 ARMCPRegInfo clidr = {
6132 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
6133 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
6134 .access = PL1_R, .type = ARM_CP_CONST,
6135 .accessfn = access_tid4,
6136 .fgt = FGT_CLIDR_EL1,
6137 .resetvalue = GET_IDREG(isar, CLIDR)
6138 };
6139 define_one_arm_cp_reg(cpu, &clidr);
6140 define_arm_cp_regs(cpu, v7_cp_reginfo);
6141 define_debug_regs(cpu);
6142 } else {
6143 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
6144 }
6145 if (arm_feature(env, ARM_FEATURE_V8)) {
6146 /*
6147 * v8 ID registers, which all have impdef reset values.
6148 * Note that within the ID register ranges the unused slots
6149 * must all RAZ, not UNDEF; future architecture versions may
6150 * define new registers here.
6151 * ID registers which are AArch64 views of the AArch32 ID registers
6152 * which already existed in v6 and v7 are handled elsewhere,
6153 * in v6_idregs[].
6154 */
6155 int i;
6156 ARMCPRegInfo v8_idregs[] = {
6157 /*
6158 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
6159 * emulation because we don't know the right value for the
6160 * GIC field until after we define these regs.
6161 */
6162 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6163 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
6164 .access = PL1_R,
6165 #ifdef CONFIG_USER_ONLY
6166 .type = ARM_CP_CONST,
6167 .resetvalue = GET_IDREG(isar, ID_AA64PFR0)
6168 #else
6169 .type = ARM_CP_NO_RAW,
6170 .accessfn = access_aa64_tid3,
6171 .readfn = id_aa64pfr0_read,
6172 .writefn = arm_cp_write_ignore
6173 #endif
6174 },
6175 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6176 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6177 .access = PL1_R, .type = ARM_CP_CONST,
6178 .accessfn = access_aa64_tid3,
6179 .resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
6180 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6181 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6182 .access = PL1_R, .type = ARM_CP_CONST,
6183 .accessfn = access_aa64_tid3,
6184 .resetvalue = 0 },
6185 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6186 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6187 .access = PL1_R, .type = ARM_CP_CONST,
6188 .accessfn = access_aa64_tid3,
6189 .resetvalue = 0 },
6190 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
6191 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6192 .access = PL1_R, .type = ARM_CP_CONST,
6193 .accessfn = access_aa64_tid3,
6194 .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)},
6195 { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
6196 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6197 .access = PL1_R, .type = ARM_CP_CONST,
6198 .accessfn = access_aa64_tid3,
6199 .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)},
6200 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6201 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6202 .access = PL1_R, .type = ARM_CP_CONST,
6203 .accessfn = access_aa64_tid3,
6204 .resetvalue = 0 },
6205 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6206 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6207 .access = PL1_R, .type = ARM_CP_CONST,
6208 .accessfn = access_aa64_tid3,
6209 .resetvalue = 0 },
6210 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6211 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6212 .access = PL1_R, .type = ARM_CP_CONST,
6213 .accessfn = access_aa64_tid3,
6214 .resetvalue = GET_IDREG(isar, ID_AA64DFR0) },
6215 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6216 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6217 .access = PL1_R, .type = ARM_CP_CONST,
6218 .accessfn = access_aa64_tid3,
6219 .resetvalue = GET_IDREG(isar, ID_AA64DFR1) },
6220 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6221 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6222 .access = PL1_R, .type = ARM_CP_CONST,
6223 .accessfn = access_aa64_tid3,
6224 .resetvalue = 0 },
6225 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6226 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6227 .access = PL1_R, .type = ARM_CP_CONST,
6228 .accessfn = access_aa64_tid3,
6229 .resetvalue = 0 },
6230 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6231 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6232 .access = PL1_R, .type = ARM_CP_CONST,
6233 .accessfn = access_aa64_tid3,
6234 .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
6235 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6236 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6237 .access = PL1_R, .type = ARM_CP_CONST,
6238 .accessfn = access_aa64_tid3,
6239 .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
6240 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6241 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6242 .access = PL1_R, .type = ARM_CP_CONST,
6243 .accessfn = access_aa64_tid3,
6244 .resetvalue = 0 },
6245 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6246 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6247 .access = PL1_R, .type = ARM_CP_CONST,
6248 .accessfn = access_aa64_tid3,
6249 .resetvalue = 0 },
6250 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6251 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6252 .access = PL1_R, .type = ARM_CP_CONST,
6253 .accessfn = access_aa64_tid3,
6254 .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)},
6255 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6256 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6257 .access = PL1_R, .type = ARM_CP_CONST,
6258 .accessfn = access_aa64_tid3,
6259 .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)},
6260 { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
6261 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6262 .access = PL1_R, .type = ARM_CP_CONST,
6263 .accessfn = access_aa64_tid3,
6264 .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)},
6265 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6266 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6267 .access = PL1_R, .type = ARM_CP_CONST,
6268 .accessfn = access_aa64_tid3,
6269 .resetvalue = 0 },
6270 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6271 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6272 .access = PL1_R, .type = ARM_CP_CONST,
6273 .accessfn = access_aa64_tid3,
6274 .resetvalue = 0 },
6275 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6276 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6277 .access = PL1_R, .type = ARM_CP_CONST,
6278 .accessfn = access_aa64_tid3,
6279 .resetvalue = 0 },
6280 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6281 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6282 .access = PL1_R, .type = ARM_CP_CONST,
6283 .accessfn = access_aa64_tid3,
6284 .resetvalue = 0 },
6285 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6286 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6287 .access = PL1_R, .type = ARM_CP_CONST,
6288 .accessfn = access_aa64_tid3,
6289 .resetvalue = 0 },
6290 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6291 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6292 .access = PL1_R, .type = ARM_CP_CONST,
6293 .accessfn = access_aa64_tid3,
6294 .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)},
6295 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6296 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6297 .access = PL1_R, .type = ARM_CP_CONST,
6298 .accessfn = access_aa64_tid3,
6299 .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) },
6300 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
6301 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6302 .access = PL1_R, .type = ARM_CP_CONST,
6303 .accessfn = access_aa64_tid3,
6304 .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) },
6305 { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
6306 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6307 .access = PL1_R, .type = ARM_CP_CONST,
6308 .accessfn = access_aa64_tid3,
6309 .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) },
6310 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6311 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6312 .access = PL1_R, .type = ARM_CP_CONST,
6313 .accessfn = access_aa64_tid3,
6314 .resetvalue = 0 },
6315 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6316 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6317 .access = PL1_R, .type = ARM_CP_CONST,
6318 .accessfn = access_aa64_tid3,
6319 .resetvalue = 0 },
6320 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6321 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6322 .access = PL1_R, .type = ARM_CP_CONST,
6323 .accessfn = access_aa64_tid3,
6324 .resetvalue = 0 },
6325 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6326 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6327 .access = PL1_R, .type = ARM_CP_CONST,
6328 .accessfn = access_aa64_tid3,
6329 .resetvalue = 0 },
6330 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6331 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6332 .access = PL1_R, .type = ARM_CP_CONST,
6333 .accessfn = access_aa64_tid3,
6334 .resetvalue = cpu->isar.mvfr0 },
6335 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6336 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6337 .access = PL1_R, .type = ARM_CP_CONST,
6338 .accessfn = access_aa64_tid3,
6339 .resetvalue = cpu->isar.mvfr1 },
6340 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6341 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6342 .access = PL1_R, .type = ARM_CP_CONST,
6343 .accessfn = access_aa64_tid3,
6344 .resetvalue = cpu->isar.mvfr2 },
6345 /*
6346 * "0, c0, c3, {0,1,2}" are the encodings corresponding to
6347 * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
6348 * as RAZ, since it is in the "reserved for future ID
6349 * registers, RAZ" part of the AArch32 encoding space.
6350 */
6351 { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
6352 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6353 .access = PL1_R, .type = ARM_CP_CONST,
6354 .accessfn = access_aa64_tid3,
6355 .resetvalue = 0 },
6356 { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
6357 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6358 .access = PL1_R, .type = ARM_CP_CONST,
6359 .accessfn = access_aa64_tid3,
6360 .resetvalue = 0 },
6361 { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
6362 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6363 .access = PL1_R, .type = ARM_CP_CONST,
6364 .accessfn = access_aa64_tid3,
6365 .resetvalue = 0 },
6366 /*
6367 * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
6368 * they're also RAZ for AArch64, and in v8 are gradually
6369 * being filled with AArch64-view-of-AArch32-ID-register
6370 * for new ID registers.
6371 */
6372 { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
6373 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6374 .access = PL1_R, .type = ARM_CP_CONST,
6375 .accessfn = access_aa64_tid3,
6376 .resetvalue = 0 },
6377 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
6378 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6379 .access = PL1_R, .type = ARM_CP_CONST,
6380 .accessfn = access_aa64_tid3,
6381 .resetvalue = GET_IDREG(isar, ID_PFR2)},
6382 { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
6383 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6384 .access = PL1_R, .type = ARM_CP_CONST,
6385 .accessfn = access_aa64_tid3,
6386 .resetvalue = GET_IDREG(isar, ID_DFR1)},
6387 { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
6388 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6389 .access = PL1_R, .type = ARM_CP_CONST,
6390 .accessfn = access_aa64_tid3,
6391 .resetvalue = GET_IDREG(isar, ID_MMFR5)},
6392 { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
6393 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6394 .access = PL1_R, .type = ARM_CP_CONST,
6395 .accessfn = access_aa64_tid3,
6396 .resetvalue = 0 },
6397 };
6398 #ifdef CONFIG_USER_ONLY
6399 static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6400 { .name = "ID_AA64PFR0_EL1",
6401 .exported_bits = R_ID_AA64PFR0_FP_MASK |
6402 R_ID_AA64PFR0_ADVSIMD_MASK |
6403 R_ID_AA64PFR0_SVE_MASK |
6404 R_ID_AA64PFR0_DIT_MASK,
6405 .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
6406 (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
6407 { .name = "ID_AA64PFR1_EL1",
6408 .exported_bits = R_ID_AA64PFR1_BT_MASK |
6409 R_ID_AA64PFR1_SSBS_MASK |
6410 R_ID_AA64PFR1_MTE_MASK |
6411 R_ID_AA64PFR1_SME_MASK },
6412 { .name = "ID_AA64PFR*_EL1_RESERVED",
6413 .is_glob = true },
6414 { .name = "ID_AA64ZFR0_EL1",
6415 .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
6416 R_ID_AA64ZFR0_AES_MASK |
6417 R_ID_AA64ZFR0_BITPERM_MASK |
6418 R_ID_AA64ZFR0_BFLOAT16_MASK |
6419 R_ID_AA64ZFR0_B16B16_MASK |
6420 R_ID_AA64ZFR0_SHA3_MASK |
6421 R_ID_AA64ZFR0_SM4_MASK |
6422 R_ID_AA64ZFR0_I8MM_MASK |
6423 R_ID_AA64ZFR0_F32MM_MASK |
6424 R_ID_AA64ZFR0_F64MM_MASK },
6425 { .name = "ID_AA64SMFR0_EL1",
6426 .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
6427 R_ID_AA64SMFR0_BI32I32_MASK |
6428 R_ID_AA64SMFR0_B16F32_MASK |
6429 R_ID_AA64SMFR0_F16F32_MASK |
6430 R_ID_AA64SMFR0_I8I32_MASK |
6431 R_ID_AA64SMFR0_F16F16_MASK |
6432 R_ID_AA64SMFR0_B16B16_MASK |
6433 R_ID_AA64SMFR0_I16I32_MASK |
6434 R_ID_AA64SMFR0_F64F64_MASK |
6435 R_ID_AA64SMFR0_I16I64_MASK |
6436 R_ID_AA64SMFR0_SMEVER_MASK |
6437 R_ID_AA64SMFR0_FA64_MASK },
6438 { .name = "ID_AA64MMFR0_EL1",
6439 .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
6440 .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
6441 (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
6442 { .name = "ID_AA64MMFR1_EL1",
6443 .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
6444 { .name = "ID_AA64MMFR2_EL1",
6445 .exported_bits = R_ID_AA64MMFR2_AT_MASK },
6446 { .name = "ID_AA64MMFR3_EL1",
6447 .exported_bits = 0 },
6448 { .name = "ID_AA64MMFR*_EL1_RESERVED",
6449 .is_glob = true },
6450 { .name = "ID_AA64DFR0_EL1",
6451 .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
6452 { .name = "ID_AA64DFR1_EL1" },
6453 { .name = "ID_AA64DFR*_EL1_RESERVED",
6454 .is_glob = true },
6455 { .name = "ID_AA64AFR*",
6456 .is_glob = true },
6457 { .name = "ID_AA64ISAR0_EL1",
6458 .exported_bits = R_ID_AA64ISAR0_AES_MASK |
6459 R_ID_AA64ISAR0_SHA1_MASK |
6460 R_ID_AA64ISAR0_SHA2_MASK |
6461 R_ID_AA64ISAR0_CRC32_MASK |
6462 R_ID_AA64ISAR0_ATOMIC_MASK |
6463 R_ID_AA64ISAR0_RDM_MASK |
6464 R_ID_AA64ISAR0_SHA3_MASK |
6465 R_ID_AA64ISAR0_SM3_MASK |
6466 R_ID_AA64ISAR0_SM4_MASK |
6467 R_ID_AA64ISAR0_DP_MASK |
6468 R_ID_AA64ISAR0_FHM_MASK |
6469 R_ID_AA64ISAR0_TS_MASK |
6470 R_ID_AA64ISAR0_RNDR_MASK },
6471 { .name = "ID_AA64ISAR1_EL1",
6472 .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
6473 R_ID_AA64ISAR1_APA_MASK |
6474 R_ID_AA64ISAR1_API_MASK |
6475 R_ID_AA64ISAR1_JSCVT_MASK |
6476 R_ID_AA64ISAR1_FCMA_MASK |
6477 R_ID_AA64ISAR1_LRCPC_MASK |
6478 R_ID_AA64ISAR1_GPA_MASK |
6479 R_ID_AA64ISAR1_GPI_MASK |
6480 R_ID_AA64ISAR1_FRINTTS_MASK |
6481 R_ID_AA64ISAR1_SB_MASK |
6482 R_ID_AA64ISAR1_BF16_MASK |
6483 R_ID_AA64ISAR1_DGH_MASK |
6484 R_ID_AA64ISAR1_I8MM_MASK },
6485 { .name = "ID_AA64ISAR2_EL1",
6486 .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
6487 R_ID_AA64ISAR2_RPRES_MASK |
6488 R_ID_AA64ISAR2_GPA3_MASK |
6489 R_ID_AA64ISAR2_APA3_MASK |
6490 R_ID_AA64ISAR2_MOPS_MASK |
6491 R_ID_AA64ISAR2_BC_MASK |
6492 R_ID_AA64ISAR2_RPRFM_MASK |
6493 R_ID_AA64ISAR2_CSSC_MASK },
6494 { .name = "ID_AA64ISAR*_EL1_RESERVED",
6495 .is_glob = true },
6496 };
6497 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6498 #endif
6499 /*
6500 * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
6501 * TODO: For RMR, a write with bit 1 set should do something with
6502 * cpu_reset(). In the meantime, "the bit is strictly a request",
6503 * so we are in spec just ignoring writes.
6504 */
6505 if (!arm_feature(env, ARM_FEATURE_EL3) &&
6506 !arm_feature(env, ARM_FEATURE_EL2)) {
6507 ARMCPRegInfo el1_reset_regs[] = {
6508 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
6509 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6510 .access = PL1_R,
6511 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6512 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
6513 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6514 .access = PL1_RW, .type = ARM_CP_CONST,
6515 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
6516 };
6517 define_arm_cp_regs(cpu, el1_reset_regs);
6518 }
6519 define_arm_cp_regs(cpu, v8_idregs);
6520 define_arm_cp_regs(cpu, v8_cp_reginfo);
6521 if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
6522 define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
6523 }
6524
6525 for (i = 4; i < 16; i++) {
6526 /*
6527 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
6528 * For pre-v8 cores there are RAZ patterns for these in
6529 * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
6530 * v8 extends the "must RAZ" part of the ID register space
6531 * to also cover c0, 0, c{8-15}, {0-7}.
6532 * These are STATE_AA32 because in the AArch64 sysreg space
6533 * c4-c7 is where the AArch64 ID registers live (and we've
6534 * already defined those in v8_idregs[]), and c8-c15 are not
6535 * "must RAZ" for AArch64.
6536 */
6537 g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
6538 ARMCPRegInfo v8_aa32_raz_idregs = {
6539 .name = name,
6540 .state = ARM_CP_STATE_AA32,
6541 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
6542 .access = PL1_R, .type = ARM_CP_CONST,
6543 .accessfn = access_aa64_tid3,
6544 .resetvalue = 0 };
6545 define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
6546 }
6547 }
6548
6549 /*
6550 * Register the base EL2 cpregs.
6551 * Pre v8, these registers are implemented only as part of the
6552 * Virtualization Extensions (EL2 present). Beginning with v8,
6553 * if EL2 is missing but EL3 is enabled, mostly these become
6554 * RES0 from EL3, with some specific exceptions.
6555 */
6556 if (arm_feature(env, ARM_FEATURE_EL2)
6557 || (arm_feature(env, ARM_FEATURE_EL3)
6558 && arm_feature(env, ARM_FEATURE_V8))) {
6559 uint64_t vmpidr_def = mpidr_read_val(env);
6560 ARMCPRegInfo vpidr_regs[] = {
6561 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6562 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6563 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6564 .resetvalue = cpu->midr,
6565 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6566 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6567 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6568 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6569 .access = PL2_RW, .resetvalue = cpu->midr,
6570 .type = ARM_CP_EL3_NO_EL2_C_NZ,
6571 .nv2_redirect_offset = 0x88,
6572 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6573 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6574 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6575 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6576 .resetvalue = vmpidr_def,
6577 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6578 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6579 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6580 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6581 .access = PL2_RW, .resetvalue = vmpidr_def,
6582 .type = ARM_CP_EL3_NO_EL2_C_NZ,
6583 .nv2_redirect_offset = 0x50,
6584 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6585 };
6586 /*
6587 * The only field of MDCR_EL2 that has a defined architectural reset
6588 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
6589 */
6590 ARMCPRegInfo mdcr_el2 = {
6591 .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
6592 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
6593 .writefn = mdcr_el2_write,
6594 .access = PL2_RW, .resetvalue = pmu_num_counters(env),
6595 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
6596 };
6597 define_one_arm_cp_reg(cpu, &mdcr_el2);
6598 define_arm_cp_regs(cpu, vpidr_regs);
6599 define_arm_cp_regs(cpu, el2_cp_reginfo);
6600 if (arm_feature(env, ARM_FEATURE_V8)) {
6601 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6602 }
6603 if (cpu_isar_feature(aa64_sel2, cpu)) {
6604 define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
6605 }
6606 /*
6607 * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
6608 * See commentary near RMR_EL1.
6609 */
6610 if (!arm_feature(env, ARM_FEATURE_EL3)) {
6611 static const ARMCPRegInfo el2_reset_regs[] = {
6612 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6613 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6614 .access = PL2_R,
6615 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6616 { .name = "RVBAR", .type = ARM_CP_ALIAS,
6617 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6618 .access = PL2_R,
6619 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6620 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
6621 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
6622 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6623 };
6624 define_arm_cp_regs(cpu, el2_reset_regs);
6625 }
6626 }
6627
6628 /* Register the base EL3 cpregs. */
6629 if (arm_feature(env, ARM_FEATURE_EL3)) {
6630 define_arm_cp_regs(cpu, el3_cp_reginfo);
6631 ARMCPRegInfo el3_regs[] = {
6632 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6633 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6634 .access = PL3_R,
6635 .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
6636 { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
6637 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
6638 .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6639 { .name = "RMR", .state = ARM_CP_STATE_AA32,
6640 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6641 .access = PL3_RW, .type = ARM_CP_CONST,
6642 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
6643 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6644 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6645 .access = PL3_RW,
6646 .raw_writefn = raw_write, .writefn = sctlr_write,
6647 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6648 .resetvalue = cpu->reset_sctlr },
6649 };
6650
6651 define_arm_cp_regs(cpu, el3_regs);
6652 }
6653 /*
6654 * The behaviour of NSACR is sufficiently various that we don't
6655 * try to describe it in a single reginfo:
6656 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6657 * reads as constant 0xc00 from NS EL1 and NS EL2
6658 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6659 * if v7 without EL3, register doesn't exist
6660 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6661 */
6662 if (arm_feature(env, ARM_FEATURE_EL3)) {
6663 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6664 static const ARMCPRegInfo nsacr = {
6665 .name = "NSACR", .type = ARM_CP_CONST,
6666 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6667 .access = PL1_RW, .accessfn = nsacr_access,
6668 .resetvalue = 0xc00
6669 };
6670 define_one_arm_cp_reg(cpu, &nsacr);
6671 } else {
6672 static const ARMCPRegInfo nsacr = {
6673 .name = "NSACR",
6674 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6675 .access = PL3_RW | PL1_R,
6676 .resetvalue = 0,
6677 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6678 };
6679 define_one_arm_cp_reg(cpu, &nsacr);
6680 }
6681 } else {
6682 if (arm_feature(env, ARM_FEATURE_V8)) {
6683 static const ARMCPRegInfo nsacr = {
6684 .name = "NSACR", .type = ARM_CP_CONST,
6685 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6686 .access = PL1_R,
6687 .resetvalue = 0xc00
6688 };
6689 define_one_arm_cp_reg(cpu, &nsacr);
6690 }
6691 }
6692
6693 if (arm_feature(env, ARM_FEATURE_PMSA)) {
6694 if (arm_feature(env, ARM_FEATURE_V6)) {
6695 /* PMSAv6 not implemented */
6696 assert(arm_feature(env, ARM_FEATURE_V7));
6697 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6698 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6699 } else {
6700 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6701 }
6702 } else {
6703 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6704 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6705 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
6706 if (cpu_isar_feature(aa32_hpd, cpu)) {
6707 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6708 }
6709 }
6710 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6711 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6712 }
6713 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6714 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6715 }
6716 if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
6717 define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
6718 }
6719 #ifndef CONFIG_USER_ONLY
6720 if (cpu_isar_feature(aa64_ecv, cpu)) {
6721 define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
6722 }
6723 #endif
6724 if (arm_feature(env, ARM_FEATURE_VAPA)) {
6725 ARMCPRegInfo vapa_cp_reginfo[] = {
6726 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
6727 .access = PL1_RW, .resetvalue = 0,
6728 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
6729 offsetoflow32(CPUARMState, cp15.par_ns) },
6730 .writefn = par_write},
6731 };
6732
6733 /*
6734 * When LPAE exists this 32-bit PAR register is an alias of the
6735 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
6736 */
6737 if (arm_feature(env, ARM_FEATURE_LPAE)) {
6738 vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
6739 }
6740 define_arm_cp_regs(cpu, vapa_cp_reginfo);
6741 }
6742 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
6743 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
6744 }
6745 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
6746 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
6747 }
6748 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
6749 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
6750 }
6751 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
6752 define_arm_cp_regs(cpu, omap_cp_reginfo);
6753 }
6754 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
6755 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
6756 }
6757 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6758 define_arm_cp_regs(cpu, xscale_cp_reginfo);
6759 }
6760 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
6761 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
6762 }
6763 if (arm_feature(env, ARM_FEATURE_LPAE)) {
6764 define_arm_cp_regs(cpu, lpae_cp_reginfo);
6765 }
6766 if (cpu_isar_feature(aa32_jazelle, cpu)) {
6767 define_arm_cp_regs(cpu, jazelle_regs);
6768 }
6769 /*
6770 * Slightly awkwardly, the OMAP and StrongARM cores need all of
6771 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6772 * be read-only (ie write causes UNDEF exception).
6773 */
6774 {
6775 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
6776 /*
6777 * Pre-v8 MIDR space.
6778 * Note that the MIDR isn't a simple constant register because
6779 * of the TI925 behaviour where writes to another register can
6780 * cause the MIDR value to change.
6781 *
6782 * Unimplemented registers in the c15 0 0 0 space default to
6783 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6784 * and friends override accordingly.
6785 */
6786 { .name = "MIDR",
6787 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
6788 .access = PL1_R, .resetvalue = cpu->midr,
6789 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
6790 .readfn = midr_read,
6791 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6792 .type = ARM_CP_OVERRIDE },
6793 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6794 { .name = "DUMMY",
6795 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
6796 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6797 { .name = "DUMMY",
6798 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
6799 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6800 { .name = "DUMMY",
6801 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
6802 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6803 { .name = "DUMMY",
6804 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
6805 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6806 { .name = "DUMMY",
6807 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
6808 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6809 };
6810 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
6811 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
6812 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
6813 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
6814 .fgt = FGT_MIDR_EL1,
6815 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6816 .readfn = midr_read },
6817 /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
6818 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6819 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
6820 .access = PL1_R, .resetvalue = cpu->midr },
6821 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
6822 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
6823 .access = PL1_R,
6824 .accessfn = access_aa64_tid1,
6825 .fgt = FGT_REVIDR_EL1,
6826 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
6827 };
6828 ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
6829 .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
6830 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6831 .access = PL1_R, .resetvalue = cpu->midr
6832 };
6833 ARMCPRegInfo id_cp_reginfo[] = {
6834 /* These are common to v8 and pre-v8 */
6835 { .name = "CTR",
6836 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
6837 .access = PL1_R, .accessfn = ctr_el0_access,
6838 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6839 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
6840 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
6841 .access = PL0_R, .accessfn = ctr_el0_access,
6842 .fgt = FGT_CTR_EL0,
6843 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6844 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6845 { .name = "TCMTR",
6846 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
6847 .access = PL1_R,
6848 .accessfn = access_aa32_tid1,
6849 .type = ARM_CP_CONST, .resetvalue = 0 },
6850 };
6851 /* TLBTR is specific to VMSA */
6852 ARMCPRegInfo id_tlbtr_reginfo = {
6853 .name = "TLBTR",
6854 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
6855 .access = PL1_R,
6856 .accessfn = access_aa32_tid1,
6857 .type = ARM_CP_CONST, .resetvalue = 0,
6858 };
6859 /* MPUIR is specific to PMSA V6+ */
6860 ARMCPRegInfo id_mpuir_reginfo = {
6861 .name = "MPUIR",
6862 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6863 .access = PL1_R, .type = ARM_CP_CONST,
6864 .resetvalue = cpu->pmsav7_dregion << 8
6865 };
6866 /* HMPUIR is specific to PMSA V8 */
6867 ARMCPRegInfo id_hmpuir_reginfo = {
6868 .name = "HMPUIR",
6869 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
6870 .access = PL2_R, .type = ARM_CP_CONST,
6871 .resetvalue = cpu->pmsav8r_hdregion
6872 };
6873 static const ARMCPRegInfo crn0_wi_reginfo = {
6874 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
6875 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
6876 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
6877 };
6878 #ifdef CONFIG_USER_ONLY
6879 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6880 { .name = "MIDR_EL1",
6881 .exported_bits = R_MIDR_EL1_REVISION_MASK |
6882 R_MIDR_EL1_PARTNUM_MASK |
6883 R_MIDR_EL1_ARCHITECTURE_MASK |
6884 R_MIDR_EL1_VARIANT_MASK |
6885 R_MIDR_EL1_IMPLEMENTER_MASK },
6886 { .name = "REVIDR_EL1" },
6887 };
6888 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
6889 #endif
6890 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
6891 arm_feature(env, ARM_FEATURE_STRONGARM)) {
6892 size_t i;
6893 /*
6894 * Register the blanket "writes ignored" value first to cover the
6895 * whole space. Then update the specific ID registers to allow write
6896 * access, so that they ignore writes rather than causing them to
6897 * UNDEF.
6898 */
6899 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
6900 for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
6901 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
6902 }
6903 for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
6904 id_cp_reginfo[i].access = PL1_RW;
6905 }
6906 id_mpuir_reginfo.access = PL1_RW;
6907 id_tlbtr_reginfo.access = PL1_RW;
6908 }
6909 if (arm_feature(env, ARM_FEATURE_V8)) {
6910 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
6911 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6912 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
6913 }
6914 } else {
6915 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
6916 }
6917 define_arm_cp_regs(cpu, id_cp_reginfo);
6918 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6919 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
6920 } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
6921 arm_feature(env, ARM_FEATURE_V8)) {
6922 uint32_t i = 0;
6923 char *tmp_string;
6924
6925 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6926 define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
6927 define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
6928
6929 /* Register alias is only valid for first 32 indexes */
6930 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
6931 uint8_t crm = 0b1000 | extract32(i, 1, 3);
6932 uint8_t opc1 = extract32(i, 4, 1);
6933 uint8_t opc2 = extract32(i, 0, 1) << 2;
6934
6935 tmp_string = g_strdup_printf("PRBAR%u", i);
6936 ARMCPRegInfo tmp_prbarn_reginfo = {
6937 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
6938 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6939 .access = PL1_RW, .resetvalue = 0,
6940 .accessfn = access_tvm_trvm,
6941 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6942 };
6943 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
6944 g_free(tmp_string);
6945
6946 opc2 = extract32(i, 0, 1) << 2 | 0x1;
6947 tmp_string = g_strdup_printf("PRLAR%u", i);
6948 ARMCPRegInfo tmp_prlarn_reginfo = {
6949 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
6950 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6951 .access = PL1_RW, .resetvalue = 0,
6952 .accessfn = access_tvm_trvm,
6953 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6954 };
6955 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
6956 g_free(tmp_string);
6957 }
6958
6959 /* Register alias is only valid for first 32 indexes */
6960 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
6961 uint8_t crm = 0b1000 | extract32(i, 1, 3);
6962 uint8_t opc1 = 0b100 | extract32(i, 4, 1);
6963 uint8_t opc2 = extract32(i, 0, 1) << 2;
6964
6965 tmp_string = g_strdup_printf("HPRBAR%u", i);
6966 ARMCPRegInfo tmp_hprbarn_reginfo = {
6967 .name = tmp_string,
6968 .type = ARM_CP_NO_RAW,
6969 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6970 .access = PL2_RW, .resetvalue = 0,
6971 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6972 };
6973 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
6974 g_free(tmp_string);
6975
6976 opc2 = extract32(i, 0, 1) << 2 | 0x1;
6977 tmp_string = g_strdup_printf("HPRLAR%u", i);
6978 ARMCPRegInfo tmp_hprlarn_reginfo = {
6979 .name = tmp_string,
6980 .type = ARM_CP_NO_RAW,
6981 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
6982 .access = PL2_RW, .resetvalue = 0,
6983 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
6984 };
6985 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
6986 g_free(tmp_string);
6987 }
6988 } else if (arm_feature(env, ARM_FEATURE_V7)) {
6989 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6990 }
6991 }
6992
6993 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
6994 ARMCPRegInfo mpidr_cp_reginfo[] = {
6995 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
6996 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
6997 .fgt = FGT_MPIDR_EL1,
6998 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
6999 };
7000 #ifdef CONFIG_USER_ONLY
7001 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
7002 { .name = "MPIDR_EL1",
7003 .fixed_bits = 0x0000000080000000 },
7004 };
7005 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
7006 #endif
7007 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
7008 }
7009
7010 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
7011 ARMCPRegInfo auxcr_reginfo[] = {
7012 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
7013 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
7014 .access = PL1_RW, .accessfn = access_tacr,
7015 .nv2_redirect_offset = 0x118,
7016 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
7017 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
7018 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
7019 .access = PL2_RW, .type = ARM_CP_CONST,
7020 .resetvalue = 0 },
7021 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
7022 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
7023 .access = PL3_RW, .type = ARM_CP_CONST,
7024 .resetvalue = 0 },
7025 };
7026 define_arm_cp_regs(cpu, auxcr_reginfo);
7027 if (cpu_isar_feature(aa32_ac2, cpu)) {
7028 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
7029 }
7030 }
7031
7032 if (arm_feature(env, ARM_FEATURE_CBAR)) {
7033 /*
7034 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7035 * There are two flavours:
7036 * (1) older 32-bit only cores have a simple 32-bit CBAR
7037 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7038 * 32-bit register visible to AArch32 at a different encoding
7039 * to the "flavour 1" register and with the bits rearranged to
7040 * be able to squash a 64-bit address into the 32-bit view.
7041 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7042 * in future if we support AArch32-only configs of some of the
7043 * AArch64 cores we might need to add a specific feature flag
7044 * to indicate cores with "flavour 2" CBAR.
7045 */
7046 if (arm_feature(env, ARM_FEATURE_V8)) {
7047 /* 32 bit view is [31:18] 0...0 [43:32]. */
7048 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
7049 | extract64(cpu->reset_cbar, 32, 12);
7050 ARMCPRegInfo cbar_reginfo[] = {
7051 { .name = "CBAR",
7052 .type = ARM_CP_CONST,
7053 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
7054 .access = PL1_R, .resetvalue = cbar32 },
7055 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
7056 .type = ARM_CP_CONST,
7057 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
7058 .access = PL1_R, .resetvalue = cpu->reset_cbar },
7059 };
7060 /* We don't implement a r/w 64 bit CBAR currently */
7061 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
7062 define_arm_cp_regs(cpu, cbar_reginfo);
7063 } else {
7064 ARMCPRegInfo cbar = {
7065 .name = "CBAR",
7066 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
7067 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
7068 .fieldoffset = offsetof(CPUARMState,
7069 cp15.c15_config_base_address)
7070 };
7071 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
7072 cbar.access = PL1_R;
7073 cbar.fieldoffset = 0;
7074 cbar.type = ARM_CP_CONST;
7075 }
7076 define_one_arm_cp_reg(cpu, &cbar);
7077 }
7078 }
7079
7080 if (arm_feature(env, ARM_FEATURE_VBAR)) {
7081 static const ARMCPRegInfo vbar_cp_reginfo[] = {
7082 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
7083 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
7084 .access = PL1_RW, .writefn = vbar_write,
7085 .accessfn = access_nv1,
7086 .fgt = FGT_VBAR_EL1,
7087 .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
7088 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
7089 offsetof(CPUARMState, cp15.vbar_ns) },
7090 .resetvalue = 0 },
7091 };
7092 define_arm_cp_regs(cpu, vbar_cp_reginfo);
7093 }
7094
7095 /* Generic registers whose values depend on the implementation */
7096 {
7097 ARMCPRegInfo sctlr = {
7098 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
7099 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
7100 .access = PL1_RW, .accessfn = access_tvm_trvm,
7101 .fgt = FGT_SCTLR_EL1,
7102 .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
7103 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
7104 offsetof(CPUARMState, cp15.sctlr_ns) },
7105 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
7106 .raw_writefn = raw_write,
7107 };
7108 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7109 /*
7110 * Normally we would always end the TB on an SCTLR write, but Linux
7111 * arch/arm/mach-pxa/sleep.S expects two instructions following
7112 * an MMU enable to execute from cache. Imitate this behaviour.
7113 */
7114 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
7115 }
7116 define_one_arm_cp_reg(cpu, &sctlr);
7117
7118 if (arm_feature(env, ARM_FEATURE_PMSA) &&
7119 arm_feature(env, ARM_FEATURE_V8)) {
7120 ARMCPRegInfo vsctlr = {
7121 .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
7122 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
7123 .access = PL2_RW, .resetvalue = 0x0,
7124 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
7125 };
7126 define_one_arm_cp_reg(cpu, &vsctlr);
7127 }
7128 }
7129
7130 if (cpu_isar_feature(aa64_lor, cpu)) {
7131 define_arm_cp_regs(cpu, lor_reginfo);
7132 }
7133 if (cpu_isar_feature(aa64_pan, cpu)) {
7134 define_one_arm_cp_reg(cpu, &pan_reginfo);
7135 }
7136 if (cpu_isar_feature(aa64_uao, cpu)) {
7137 define_one_arm_cp_reg(cpu, &uao_reginfo);
7138 }
7139
7140 if (cpu_isar_feature(aa64_dit, cpu)) {
7141 define_one_arm_cp_reg(cpu, &dit_reginfo);
7142 }
7143 if (cpu_isar_feature(aa64_ssbs, cpu)) {
7144 define_one_arm_cp_reg(cpu, &ssbs_reginfo);
7145 }
7146 if (cpu_isar_feature(any_ras, cpu)) {
7147 define_arm_cp_regs(cpu, minimal_ras_reginfo);
7148 }
7149
7150 if (cpu_isar_feature(aa64_vh, cpu) ||
7151 cpu_isar_feature(aa64_debugv8p2, cpu)) {
7152 define_one_arm_cp_reg(cpu, &contextidr_el2);
7153 }
7154 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7155 define_arm_cp_regs(cpu, vhe_reginfo);
7156 }
7157
7158 if (cpu_isar_feature(aa64_sve, cpu)) {
7159 define_arm_cp_regs(cpu, zcr_reginfo);
7160 }
7161
7162 if (cpu_isar_feature(aa64_hcx, cpu)) {
7163 define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
7164 }
7165
7166 if (cpu_isar_feature(aa64_sme, cpu)) {
7167 define_arm_cp_regs(cpu, sme_reginfo);
7168 }
7169 if (cpu_isar_feature(aa64_pauth, cpu)) {
7170 define_arm_cp_regs(cpu, pauth_reginfo);
7171 }
7172 if (cpu_isar_feature(aa64_rndr, cpu)) {
7173 define_arm_cp_regs(cpu, rndr_reginfo);
7174 }
7175 /* Data Cache clean instructions up to PoP */
7176 if (cpu_isar_feature(aa64_dcpop, cpu)) {
7177 define_one_arm_cp_reg(cpu, dcpop_reg);
7178
7179 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
7180 define_one_arm_cp_reg(cpu, dcpodp_reg);
7181 }
7182 }
7183
7184 /*
7185 * If full MTE is enabled, add all of the system registers.
7186 * If only "instructions available at EL0" are enabled,
7187 * then define only a RAZ/WI version of PSTATE.TCO.
7188 */
7189 if (cpu_isar_feature(aa64_mte, cpu)) {
7190 ARMCPRegInfo gmid_reginfo = {
7191 .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7192 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7193 .access = PL1_R, .accessfn = access_aa64_tid5,
7194 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
7195 };
7196 define_one_arm_cp_reg(cpu, &gmid_reginfo);
7197 define_arm_cp_regs(cpu, mte_reginfo);
7198 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7199 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
7200 define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
7201 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7202 }
7203
7204 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
7205 define_arm_cp_regs(cpu, scxtnum_reginfo);
7206 }
7207
7208 if (cpu_isar_feature(aa64_fgt, cpu)) {
7209 define_arm_cp_regs(cpu, fgt_reginfo);
7210 }
7211
7212 if (cpu_isar_feature(aa64_rme, cpu)) {
7213 define_arm_cp_regs(cpu, rme_reginfo);
7214 if (cpu_isar_feature(aa64_mte, cpu)) {
7215 define_arm_cp_regs(cpu, rme_mte_reginfo);
7216 }
7217 }
7218
7219 if (cpu_isar_feature(aa64_nv2, cpu)) {
7220 define_arm_cp_regs(cpu, nv2_reginfo);
7221 }
7222
7223 if (cpu_isar_feature(aa64_nmi, cpu)) {
7224 define_arm_cp_regs(cpu, nmi_reginfo);
7225 }
7226
7227 if (cpu_isar_feature(any_predinv, cpu)) {
7228 define_arm_cp_regs(cpu, predinv_reginfo);
7229 }
7230
7231 if (cpu_isar_feature(any_ccidx, cpu)) {
7232 define_arm_cp_regs(cpu, ccsidr2_reginfo);
7233 }
7234
7235 define_pm_cpregs(cpu);
7236
7237 #ifndef CONFIG_USER_ONLY
7238 /*
7239 * Register redirections and aliases must be done last,
7240 * after the registers from the other extensions have been defined.
7241 */
7242 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7243 define_arm_vh_e2h_redirects_aliases(cpu);
7244 }
7245 #endif
7246 }
7247
7248 /*
7249 * Private utility function for define_one_arm_cp_reg_with_opaque():
7250 * add a single reginfo struct to the hash table.
7251 */
add_cpreg_to_hashtable(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque,CPState state,CPSecureState secstate,int crm,int opc1,int opc2,const char * name)7252 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
7253 void *opaque, CPState state,
7254 CPSecureState secstate,
7255 int crm, int opc1, int opc2,
7256 const char *name)
7257 {
7258 CPUARMState *env = &cpu->env;
7259 uint32_t key;
7260 ARMCPRegInfo *r2;
7261 bool is64 = r->type & ARM_CP_64BIT;
7262 bool ns = secstate & ARM_CP_SECSTATE_NS;
7263 int cp = r->cp;
7264 size_t name_len;
7265 bool make_const;
7266
7267 switch (state) {
7268 case ARM_CP_STATE_AA32:
7269 /* We assume it is a cp15 register if the .cp field is left unset. */
7270 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
7271 cp = 15;
7272 }
7273 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
7274 break;
7275 case ARM_CP_STATE_AA64:
7276 /*
7277 * To allow abbreviation of ARMCPRegInfo definitions, we treat
7278 * cp == 0 as equivalent to the value for "standard guest-visible
7279 * sysreg". STATE_BOTH definitions are also always "standard sysreg"
7280 * in their AArch64 view (the .cp value may be non-zero for the
7281 * benefit of the AArch32 view).
7282 */
7283 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
7284 cp = CP_REG_ARM64_SYSREG_CP;
7285 }
7286 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
7287 break;
7288 default:
7289 g_assert_not_reached();
7290 }
7291
7292 /* Overriding of an existing definition must be explicitly requested. */
7293 if (!(r->type & ARM_CP_OVERRIDE)) {
7294 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
7295 if (oldreg) {
7296 assert(oldreg->type & ARM_CP_OVERRIDE);
7297 }
7298 }
7299
7300 /*
7301 * Eliminate registers that are not present because the EL is missing.
7302 * Doing this here makes it easier to put all registers for a given
7303 * feature into the same ARMCPRegInfo array and define them all at once.
7304 */
7305 make_const = false;
7306 if (arm_feature(env, ARM_FEATURE_EL3)) {
7307 /*
7308 * An EL2 register without EL2 but with EL3 is (usually) RES0.
7309 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
7310 */
7311 int min_el = ctz32(r->access) / 2;
7312 if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
7313 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
7314 return;
7315 }
7316 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
7317 }
7318 } else {
7319 CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
7320 ? PL2_RW : PL1_RW);
7321 if ((r->access & max_el) == 0) {
7322 return;
7323 }
7324 }
7325
7326 /* Combine cpreg and name into one allocation. */
7327 name_len = strlen(name) + 1;
7328 r2 = g_malloc(sizeof(*r2) + name_len);
7329 *r2 = *r;
7330 r2->name = memcpy(r2 + 1, name, name_len);
7331
7332 /*
7333 * Update fields to match the instantiation, overwiting wildcards
7334 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
7335 */
7336 r2->cp = cp;
7337 r2->crm = crm;
7338 r2->opc1 = opc1;
7339 r2->opc2 = opc2;
7340 r2->state = state;
7341 r2->secure = secstate;
7342 if (opaque) {
7343 r2->opaque = opaque;
7344 }
7345
7346 if (make_const) {
7347 /* This should not have been a very special register to begin. */
7348 int old_special = r2->type & ARM_CP_SPECIAL_MASK;
7349 assert(old_special == 0 || old_special == ARM_CP_NOP);
7350 /*
7351 * Set the special function to CONST, retaining the other flags.
7352 * This is important for e.g. ARM_CP_SVE so that we still
7353 * take the SVE trap if CPTR_EL3.EZ == 0.
7354 */
7355 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
7356 /*
7357 * Usually, these registers become RES0, but there are a few
7358 * special cases like VPIDR_EL2 which have a constant non-zero
7359 * value with writes ignored.
7360 */
7361 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
7362 r2->resetvalue = 0;
7363 }
7364 /*
7365 * ARM_CP_CONST has precedence, so removing the callbacks and
7366 * offsets are not strictly necessary, but it is potentially
7367 * less confusing to debug later.
7368 */
7369 r2->readfn = NULL;
7370 r2->writefn = NULL;
7371 r2->raw_readfn = NULL;
7372 r2->raw_writefn = NULL;
7373 r2->resetfn = NULL;
7374 r2->fieldoffset = 0;
7375 r2->bank_fieldoffsets[0] = 0;
7376 r2->bank_fieldoffsets[1] = 0;
7377 } else {
7378 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
7379
7380 if (isbanked) {
7381 /*
7382 * Register is banked (using both entries in array).
7383 * Overwriting fieldoffset as the array is only used to define
7384 * banked registers but later only fieldoffset is used.
7385 */
7386 r2->fieldoffset = r->bank_fieldoffsets[ns];
7387 }
7388 if (state == ARM_CP_STATE_AA32) {
7389 if (isbanked) {
7390 /*
7391 * If the register is banked then we don't need to migrate or
7392 * reset the 32-bit instance in certain cases:
7393 *
7394 * 1) If the register has both 32-bit and 64-bit instances
7395 * then we can count on the 64-bit instance taking care
7396 * of the non-secure bank.
7397 * 2) If ARMv8 is enabled then we can count on a 64-bit
7398 * version taking care of the secure bank. This requires
7399 * that separate 32 and 64-bit definitions are provided.
7400 */
7401 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
7402 (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
7403 r2->type |= ARM_CP_ALIAS;
7404 }
7405 } else if ((secstate != r->secure) && !ns) {
7406 /*
7407 * The register is not banked so we only want to allow
7408 * migration of the non-secure instance.
7409 */
7410 r2->type |= ARM_CP_ALIAS;
7411 }
7412
7413 if (HOST_BIG_ENDIAN &&
7414 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
7415 r2->fieldoffset += sizeof(uint32_t);
7416 }
7417 }
7418 }
7419
7420 /*
7421 * By convention, for wildcarded registers only the first
7422 * entry is used for migration; the others are marked as
7423 * ALIAS so we don't try to transfer the register
7424 * multiple times. Special registers (ie NOP/WFI) are
7425 * never migratable and not even raw-accessible.
7426 */
7427 if (r2->type & ARM_CP_SPECIAL_MASK) {
7428 r2->type |= ARM_CP_NO_RAW;
7429 }
7430 if (((r->crm == CP_ANY) && crm != 0) ||
7431 ((r->opc1 == CP_ANY) && opc1 != 0) ||
7432 ((r->opc2 == CP_ANY) && opc2 != 0)) {
7433 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
7434 }
7435
7436 /*
7437 * Check that raw accesses are either forbidden or handled. Note that
7438 * we can't assert this earlier because the setup of fieldoffset for
7439 * banked registers has to be done first.
7440 */
7441 if (!(r2->type & ARM_CP_NO_RAW)) {
7442 assert(!raw_accessors_invalid(r2));
7443 }
7444
7445 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
7446 }
7447
7448
define_one_arm_cp_reg_with_opaque(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque)7449 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
7450 const ARMCPRegInfo *r, void *opaque)
7451 {
7452 /*
7453 * Define implementations of coprocessor registers.
7454 * We store these in a hashtable because typically
7455 * there are less than 150 registers in a space which
7456 * is 16*16*16*8*8 = 262144 in size.
7457 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7458 * If a register is defined twice then the second definition is
7459 * used, so this can be used to define some generic registers and
7460 * then override them with implementation specific variations.
7461 * At least one of the original and the second definition should
7462 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7463 * against accidental use.
7464 *
7465 * The state field defines whether the register is to be
7466 * visible in the AArch32 or AArch64 execution state. If the
7467 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7468 * reginfo structure for the AArch32 view, which sees the lower
7469 * 32 bits of the 64 bit register.
7470 *
7471 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7472 * be wildcarded. AArch64 registers are always considered to be 64
7473 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7474 * the register, if any.
7475 */
7476 int crm, opc1, opc2;
7477 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
7478 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
7479 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
7480 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
7481 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
7482 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
7483 CPState state;
7484
7485 /* 64 bit registers have only CRm and Opc1 fields */
7486 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
7487 /* op0 only exists in the AArch64 encodings */
7488 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
7489 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
7490 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
7491 /*
7492 * This API is only for Arm's system coprocessors (14 and 15) or
7493 * (M-profile or v7A-and-earlier only) for implementation defined
7494 * coprocessors in the range 0..7. Our decode assumes this, since
7495 * 8..13 can be used for other insns including VFP and Neon. See
7496 * valid_cp() in translate.c. Assert here that we haven't tried
7497 * to use an invalid coprocessor number.
7498 */
7499 switch (r->state) {
7500 case ARM_CP_STATE_BOTH:
7501 /* 0 has a special meaning, but otherwise the same rules as AA32. */
7502 if (r->cp == 0) {
7503 break;
7504 }
7505 /* fall through */
7506 case ARM_CP_STATE_AA32:
7507 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
7508 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
7509 assert(r->cp >= 14 && r->cp <= 15);
7510 } else {
7511 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
7512 }
7513 break;
7514 case ARM_CP_STATE_AA64:
7515 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
7516 break;
7517 default:
7518 g_assert_not_reached();
7519 }
7520 /*
7521 * The AArch64 pseudocode CheckSystemAccess() specifies that op1
7522 * encodes a minimum access level for the register. We roll this
7523 * runtime check into our general permission check code, so check
7524 * here that the reginfo's specified permissions are strict enough
7525 * to encompass the generic architectural permission check.
7526 */
7527 if (r->state != ARM_CP_STATE_AA32) {
7528 CPAccessRights mask;
7529 switch (r->opc1) {
7530 case 0:
7531 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7532 mask = PL0U_R | PL1_RW;
7533 break;
7534 case 1: case 2:
7535 /* min_EL EL1 */
7536 mask = PL1_RW;
7537 break;
7538 case 3:
7539 /* min_EL EL0 */
7540 mask = PL0_RW;
7541 break;
7542 case 4:
7543 case 5:
7544 /* min_EL EL2 */
7545 mask = PL2_RW;
7546 break;
7547 case 6:
7548 /* min_EL EL3 */
7549 mask = PL3_RW;
7550 break;
7551 case 7:
7552 /* min_EL EL1, secure mode only (we don't check the latter) */
7553 mask = PL1_RW;
7554 break;
7555 default:
7556 /* broken reginfo with out-of-range opc1 */
7557 g_assert_not_reached();
7558 }
7559 /* assert our permissions are not too lax (stricter is fine) */
7560 assert((r->access & ~mask) == 0);
7561 }
7562
7563 /*
7564 * Check that the register definition has enough info to handle
7565 * reads and writes if they are permitted.
7566 */
7567 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
7568 if (r->access & PL3_R) {
7569 assert((r->fieldoffset ||
7570 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7571 r->readfn);
7572 }
7573 if (r->access & PL3_W) {
7574 assert((r->fieldoffset ||
7575 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7576 r->writefn);
7577 }
7578 }
7579
7580 for (crm = crmmin; crm <= crmmax; crm++) {
7581 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
7582 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
7583 for (state = ARM_CP_STATE_AA32;
7584 state <= ARM_CP_STATE_AA64; state++) {
7585 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
7586 continue;
7587 }
7588 if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
7589 cpu_isar_feature(aa64_xs, cpu)) {
7590 /*
7591 * This is a TLBI insn which has an NXS variant. The
7592 * NXS variant is at the same encoding except that
7593 * crn is +1, and has the same behaviour except for
7594 * fine-grained trapping. Add the NXS insn here and
7595 * then fall through to add the normal register.
7596 * add_cpreg_to_hashtable() copies the cpreg struct
7597 * and name that it is passed, so it's OK to use
7598 * a local struct here.
7599 */
7600 ARMCPRegInfo nxs_ri = *r;
7601 g_autofree char *name = g_strdup_printf("%sNXS", r->name);
7602
7603 assert(state == ARM_CP_STATE_AA64);
7604 assert(nxs_ri.crn < 0xf);
7605 nxs_ri.crn++;
7606 if (nxs_ri.fgt) {
7607 nxs_ri.fgt |= R_FGT_NXS_MASK;
7608 }
7609 add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state,
7610 ARM_CP_SECSTATE_NS,
7611 crm, opc1, opc2, name);
7612 }
7613 if (state == ARM_CP_STATE_AA32) {
7614 /*
7615 * Under AArch32 CP registers can be common
7616 * (same for secure and non-secure world) or banked.
7617 */
7618 char *name;
7619
7620 switch (r->secure) {
7621 case ARM_CP_SECSTATE_S:
7622 case ARM_CP_SECSTATE_NS:
7623 add_cpreg_to_hashtable(cpu, r, opaque, state,
7624 r->secure, crm, opc1, opc2,
7625 r->name);
7626 break;
7627 case ARM_CP_SECSTATE_BOTH:
7628 name = g_strdup_printf("%s_S", r->name);
7629 add_cpreg_to_hashtable(cpu, r, opaque, state,
7630 ARM_CP_SECSTATE_S,
7631 crm, opc1, opc2, name);
7632 g_free(name);
7633 add_cpreg_to_hashtable(cpu, r, opaque, state,
7634 ARM_CP_SECSTATE_NS,
7635 crm, opc1, opc2, r->name);
7636 break;
7637 default:
7638 g_assert_not_reached();
7639 }
7640 } else {
7641 /*
7642 * AArch64 registers get mapped to non-secure instance
7643 * of AArch32
7644 */
7645 add_cpreg_to_hashtable(cpu, r, opaque, state,
7646 ARM_CP_SECSTATE_NS,
7647 crm, opc1, opc2, r->name);
7648 }
7649 }
7650 }
7651 }
7652 }
7653 }
7654
7655 /* Define a whole list of registers */
define_arm_cp_regs_with_opaque_len(ARMCPU * cpu,const ARMCPRegInfo * regs,void * opaque,size_t len)7656 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
7657 void *opaque, size_t len)
7658 {
7659 size_t i;
7660 for (i = 0; i < len; ++i) {
7661 define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
7662 }
7663 }
7664
7665 /*
7666 * Modify ARMCPRegInfo for access from userspace.
7667 *
7668 * This is a data driven modification directed by
7669 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7670 * user-space cannot alter any values and dynamic values pertaining to
7671 * execution state are hidden from user space view anyway.
7672 */
modify_arm_cp_regs_with_len(ARMCPRegInfo * regs,size_t regs_len,const ARMCPRegUserSpaceInfo * mods,size_t mods_len)7673 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
7674 const ARMCPRegUserSpaceInfo *mods,
7675 size_t mods_len)
7676 {
7677 for (size_t mi = 0; mi < mods_len; ++mi) {
7678 const ARMCPRegUserSpaceInfo *m = mods + mi;
7679 GPatternSpec *pat = NULL;
7680
7681 if (m->is_glob) {
7682 pat = g_pattern_spec_new(m->name);
7683 }
7684 for (size_t ri = 0; ri < regs_len; ++ri) {
7685 ARMCPRegInfo *r = regs + ri;
7686
7687 if (pat && g_pattern_match_string(pat, r->name)) {
7688 r->type = ARM_CP_CONST;
7689 r->access = PL0U_R;
7690 r->resetvalue = 0;
7691 /* continue */
7692 } else if (strcmp(r->name, m->name) == 0) {
7693 r->type = ARM_CP_CONST;
7694 r->access = PL0U_R;
7695 r->resetvalue &= m->exported_bits;
7696 r->resetvalue |= m->fixed_bits;
7697 break;
7698 }
7699 }
7700 if (pat) {
7701 g_pattern_spec_free(pat);
7702 }
7703 }
7704 }
7705
get_arm_cp_reginfo(GHashTable * cpregs,uint32_t encoded_cp)7706 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
7707 {
7708 return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
7709 }
7710
arm_cp_write_ignore(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)7711 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
7712 uint64_t value)
7713 {
7714 /* Helper coprocessor write function for write-ignore registers */
7715 }
7716
arm_cp_read_zero(CPUARMState * env,const ARMCPRegInfo * ri)7717 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
7718 {
7719 /* Helper coprocessor write function for read-as-zero registers */
7720 return 0;
7721 }
7722
arm_cp_reset_ignore(CPUARMState * env,const ARMCPRegInfo * opaque)7723 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
7724 {
7725 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7726 }
7727
bad_mode_switch(CPUARMState * env,int mode,CPSRWriteType write_type)7728 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
7729 {
7730 /*
7731 * Return true if it is not valid for us to switch to
7732 * this CPU mode (ie all the UNPREDICTABLE cases in
7733 * the ARM ARM CPSRWriteByInstr pseudocode).
7734 */
7735
7736 /* Changes to or from Hyp via MSR and CPS are illegal. */
7737 if (write_type == CPSRWriteByInstr &&
7738 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
7739 mode == ARM_CPU_MODE_HYP)) {
7740 return 1;
7741 }
7742
7743 switch (mode) {
7744 case ARM_CPU_MODE_USR:
7745 return 0;
7746 case ARM_CPU_MODE_SYS:
7747 case ARM_CPU_MODE_SVC:
7748 case ARM_CPU_MODE_ABT:
7749 case ARM_CPU_MODE_UND:
7750 case ARM_CPU_MODE_IRQ:
7751 case ARM_CPU_MODE_FIQ:
7752 /*
7753 * Note that we don't implement the IMPDEF NSACR.RFR which in v7
7754 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7755 */
7756 /*
7757 * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7758 * and CPS are treated as illegal mode changes.
7759 */
7760 if (write_type == CPSRWriteByInstr &&
7761 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7762 (arm_hcr_el2_eff(env) & HCR_TGE)) {
7763 return 1;
7764 }
7765 return 0;
7766 case ARM_CPU_MODE_HYP:
7767 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
7768 case ARM_CPU_MODE_MON:
7769 return arm_current_el(env) < 3;
7770 default:
7771 return 1;
7772 }
7773 }
7774
cpsr_read(CPUARMState * env)7775 uint32_t cpsr_read(CPUARMState *env)
7776 {
7777 int ZF;
7778 ZF = (env->ZF == 0);
7779 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
7780 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
7781 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
7782 | ((env->condexec_bits & 0xfc) << 8)
7783 | (env->GE << 16) | (env->daif & CPSR_AIF);
7784 }
7785
cpsr_write(CPUARMState * env,uint32_t val,uint32_t mask,CPSRWriteType write_type)7786 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
7787 CPSRWriteType write_type)
7788 {
7789 uint32_t changed_daif;
7790 bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
7791 (mask & (CPSR_M | CPSR_E | CPSR_IL));
7792
7793 if (mask & CPSR_NZCV) {
7794 env->ZF = (~val) & CPSR_Z;
7795 env->NF = val;
7796 env->CF = (val >> 29) & 1;
7797 env->VF = (val << 3) & 0x80000000;
7798 }
7799 if (mask & CPSR_Q) {
7800 env->QF = ((val & CPSR_Q) != 0);
7801 }
7802 if (mask & CPSR_T) {
7803 env->thumb = ((val & CPSR_T) != 0);
7804 }
7805 if (mask & CPSR_IT_0_1) {
7806 env->condexec_bits &= ~3;
7807 env->condexec_bits |= (val >> 25) & 3;
7808 }
7809 if (mask & CPSR_IT_2_7) {
7810 env->condexec_bits &= 3;
7811 env->condexec_bits |= (val >> 8) & 0xfc;
7812 }
7813 if (mask & CPSR_GE) {
7814 env->GE = (val >> 16) & 0xf;
7815 }
7816
7817 /*
7818 * In a V7 implementation that includes the security extensions but does
7819 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7820 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7821 * bits respectively.
7822 *
7823 * In a V8 implementation, it is permitted for privileged software to
7824 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7825 */
7826 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
7827 arm_feature(env, ARM_FEATURE_EL3) &&
7828 !arm_feature(env, ARM_FEATURE_EL2) &&
7829 !arm_is_secure(env)) {
7830
7831 changed_daif = (env->daif ^ val) & mask;
7832
7833 if (changed_daif & CPSR_A) {
7834 /*
7835 * Check to see if we are allowed to change the masking of async
7836 * abort exceptions from a non-secure state.
7837 */
7838 if (!(env->cp15.scr_el3 & SCR_AW)) {
7839 qemu_log_mask(LOG_GUEST_ERROR,
7840 "Ignoring attempt to switch CPSR_A flag from "
7841 "non-secure world with SCR.AW bit clear\n");
7842 mask &= ~CPSR_A;
7843 }
7844 }
7845
7846 if (changed_daif & CPSR_F) {
7847 /*
7848 * Check to see if we are allowed to change the masking of FIQ
7849 * exceptions from a non-secure state.
7850 */
7851 if (!(env->cp15.scr_el3 & SCR_FW)) {
7852 qemu_log_mask(LOG_GUEST_ERROR,
7853 "Ignoring attempt to switch CPSR_F flag from "
7854 "non-secure world with SCR.FW bit clear\n");
7855 mask &= ~CPSR_F;
7856 }
7857
7858 /*
7859 * Check whether non-maskable FIQ (NMFI) support is enabled.
7860 * If this bit is set software is not allowed to mask
7861 * FIQs, but is allowed to set CPSR_F to 0.
7862 */
7863 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
7864 (val & CPSR_F)) {
7865 qemu_log_mask(LOG_GUEST_ERROR,
7866 "Ignoring attempt to enable CPSR_F flag "
7867 "(non-maskable FIQ [NMFI] support enabled)\n");
7868 mask &= ~CPSR_F;
7869 }
7870 }
7871 }
7872
7873 env->daif &= ~(CPSR_AIF & mask);
7874 env->daif |= val & CPSR_AIF & mask;
7875
7876 if (write_type != CPSRWriteRaw &&
7877 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
7878 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
7879 /*
7880 * Note that we can only get here in USR mode if this is a
7881 * gdb stub write; for this case we follow the architectural
7882 * behaviour for guest writes in USR mode of ignoring an attempt
7883 * to switch mode. (Those are caught by translate.c for writes
7884 * triggered by guest instructions.)
7885 */
7886 mask &= ~CPSR_M;
7887 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
7888 /*
7889 * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7890 * v7, and has defined behaviour in v8:
7891 * + leave CPSR.M untouched
7892 * + allow changes to the other CPSR fields
7893 * + set PSTATE.IL
7894 * For user changes via the GDB stub, we don't set PSTATE.IL,
7895 * as this would be unnecessarily harsh for a user error.
7896 */
7897 mask &= ~CPSR_M;
7898 if (write_type != CPSRWriteByGDBStub &&
7899 arm_feature(env, ARM_FEATURE_V8)) {
7900 mask |= CPSR_IL;
7901 val |= CPSR_IL;
7902 }
7903 qemu_log_mask(LOG_GUEST_ERROR,
7904 "Illegal AArch32 mode switch attempt from %s to %s\n",
7905 aarch32_mode_name(env->uncached_cpsr),
7906 aarch32_mode_name(val));
7907 } else {
7908 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
7909 write_type == CPSRWriteExceptionReturn ?
7910 "Exception return from AArch32" :
7911 "AArch32 mode switch from",
7912 aarch32_mode_name(env->uncached_cpsr),
7913 aarch32_mode_name(val), env->regs[15]);
7914 switch_mode(env, val & CPSR_M);
7915 }
7916 }
7917 mask &= ~CACHED_CPSR_BITS;
7918 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
7919 if (tcg_enabled() && rebuild_hflags) {
7920 arm_rebuild_hflags(env);
7921 }
7922 }
7923
7924 #ifdef CONFIG_USER_ONLY
7925
switch_mode(CPUARMState * env,int mode)7926 static void switch_mode(CPUARMState *env, int mode)
7927 {
7928 ARMCPU *cpu = env_archcpu(env);
7929
7930 if (mode != ARM_CPU_MODE_USR) {
7931 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
7932 }
7933 }
7934
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)7935 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7936 uint32_t cur_el, bool secure)
7937 {
7938 return 1;
7939 }
7940
aarch64_sync_64_to_32(CPUARMState * env)7941 void aarch64_sync_64_to_32(CPUARMState *env)
7942 {
7943 g_assert_not_reached();
7944 }
7945
7946 #else
7947
switch_mode(CPUARMState * env,int mode)7948 static void switch_mode(CPUARMState *env, int mode)
7949 {
7950 int old_mode;
7951 int i;
7952
7953 old_mode = env->uncached_cpsr & CPSR_M;
7954 if (mode == old_mode) {
7955 return;
7956 }
7957
7958 if (old_mode == ARM_CPU_MODE_FIQ) {
7959 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
7960 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
7961 } else if (mode == ARM_CPU_MODE_FIQ) {
7962 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
7963 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
7964 }
7965
7966 i = bank_number(old_mode);
7967 env->banked_r13[i] = env->regs[13];
7968 env->banked_spsr[i] = env->spsr;
7969
7970 i = bank_number(mode);
7971 env->regs[13] = env->banked_r13[i];
7972 env->spsr = env->banked_spsr[i];
7973
7974 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
7975 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
7976 }
7977
7978 /*
7979 * Physical Interrupt Target EL Lookup Table
7980 *
7981 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7982 *
7983 * The below multi-dimensional table is used for looking up the target
7984 * exception level given numerous condition criteria. Specifically, the
7985 * target EL is based on SCR and HCR routing controls as well as the
7986 * currently executing EL and secure state.
7987 *
7988 * Dimensions:
7989 * target_el_table[2][2][2][2][2][4]
7990 * | | | | | +--- Current EL
7991 * | | | | +------ Non-secure(0)/Secure(1)
7992 * | | | +--------- HCR mask override
7993 * | | +------------ SCR exec state control
7994 * | +--------------- SCR mask override
7995 * +------------------ 32-bit(0)/64-bit(1) EL3
7996 *
7997 * The table values are as such:
7998 * 0-3 = EL0-EL3
7999 * -1 = Cannot occur
8000 *
8001 * The ARM ARM target EL table includes entries indicating that an "exception
8002 * is not taken". The two cases where this is applicable are:
8003 * 1) An exception is taken from EL3 but the SCR does not have the exception
8004 * routed to EL3.
8005 * 2) An exception is taken from EL2 but the HCR does not have the exception
8006 * routed to EL2.
8007 * In these two cases, the below table contain a target of EL1. This value is
8008 * returned as it is expected that the consumer of the table data will check
8009 * for "target EL >= current EL" to ensure the exception is not taken.
8010 *
8011 * SCR HCR
8012 * 64 EA AMO From
8013 * BIT IRQ IMO Non-secure Secure
8014 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
8015 */
8016 static const int8_t target_el_table[2][2][2][2][2][4] = {
8017 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8018 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8019 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8020 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8021 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8022 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8023 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8024 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8025 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8026 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
8027 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
8028 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
8029 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8030 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8031 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
8032 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
8033 };
8034
8035 /*
8036 * Determine the target EL for physical exceptions
8037 */
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)8038 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8039 uint32_t cur_el, bool secure)
8040 {
8041 CPUARMState *env = cpu_env(cs);
8042 bool rw;
8043 bool scr;
8044 bool hcr;
8045 int target_el;
8046 /* Is the highest EL AArch64? */
8047 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
8048 uint64_t hcr_el2;
8049
8050 if (arm_feature(env, ARM_FEATURE_EL3)) {
8051 rw = arm_scr_rw_eff(env);
8052 } else {
8053 /*
8054 * Either EL2 is the highest EL (and so the EL2 register width
8055 * is given by is64); or there is no EL2 or EL3, in which case
8056 * the value of 'rw' does not affect the table lookup anyway.
8057 */
8058 rw = is64;
8059 }
8060
8061 hcr_el2 = arm_hcr_el2_eff(env);
8062 switch (excp_idx) {
8063 case EXCP_IRQ:
8064 case EXCP_NMI:
8065 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
8066 hcr = hcr_el2 & HCR_IMO;
8067 break;
8068 case EXCP_FIQ:
8069 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
8070 hcr = hcr_el2 & HCR_FMO;
8071 break;
8072 default:
8073 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
8074 hcr = hcr_el2 & HCR_AMO;
8075 break;
8076 };
8077
8078 /*
8079 * For these purposes, TGE and AMO/IMO/FMO both force the
8080 * interrupt to EL2. Fold TGE into the bit extracted above.
8081 */
8082 hcr |= (hcr_el2 & HCR_TGE) != 0;
8083
8084 /* Perform a table-lookup for the target EL given the current state */
8085 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
8086
8087 assert(target_el > 0);
8088
8089 return target_el;
8090 }
8091
arm_log_exception(CPUState * cs)8092 void arm_log_exception(CPUState *cs)
8093 {
8094 int idx = cs->exception_index;
8095
8096 if (qemu_loglevel_mask(CPU_LOG_INT)) {
8097 const char *exc = NULL;
8098 static const char * const excnames[] = {
8099 [EXCP_UDEF] = "Undefined Instruction",
8100 [EXCP_SWI] = "SVC",
8101 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
8102 [EXCP_DATA_ABORT] = "Data Abort",
8103 [EXCP_IRQ] = "IRQ",
8104 [EXCP_FIQ] = "FIQ",
8105 [EXCP_BKPT] = "Breakpoint",
8106 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
8107 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
8108 [EXCP_HVC] = "Hypervisor Call",
8109 [EXCP_HYP_TRAP] = "Hypervisor Trap",
8110 [EXCP_SMC] = "Secure Monitor Call",
8111 [EXCP_VIRQ] = "Virtual IRQ",
8112 [EXCP_VFIQ] = "Virtual FIQ",
8113 [EXCP_SEMIHOST] = "Semihosting call",
8114 [EXCP_NOCP] = "v7M NOCP UsageFault",
8115 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
8116 [EXCP_STKOF] = "v8M STKOF UsageFault",
8117 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
8118 [EXCP_LSERR] = "v8M LSERR UsageFault",
8119 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
8120 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
8121 [EXCP_VSERR] = "Virtual SERR",
8122 [EXCP_GPC] = "Granule Protection Check",
8123 [EXCP_NMI] = "NMI",
8124 [EXCP_VINMI] = "Virtual IRQ NMI",
8125 [EXCP_VFNMI] = "Virtual FIQ NMI",
8126 [EXCP_MON_TRAP] = "Monitor Trap",
8127 };
8128
8129 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
8130 exc = excnames[idx];
8131 }
8132 if (!exc) {
8133 exc = "unknown";
8134 }
8135 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
8136 idx, exc, cs->cpu_index);
8137 }
8138 }
8139
8140 /*
8141 * Function used to synchronize QEMU's AArch64 register set with AArch32
8142 * register set. This is necessary when switching between AArch32 and AArch64
8143 * execution state.
8144 */
aarch64_sync_32_to_64(CPUARMState * env)8145 void aarch64_sync_32_to_64(CPUARMState *env)
8146 {
8147 int i;
8148 uint32_t mode = env->uncached_cpsr & CPSR_M;
8149
8150 /* We can blanket copy R[0:7] to X[0:7] */
8151 for (i = 0; i < 8; i++) {
8152 env->xregs[i] = env->regs[i];
8153 }
8154
8155 /*
8156 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8157 * Otherwise, they come from the banked user regs.
8158 */
8159 if (mode == ARM_CPU_MODE_FIQ) {
8160 for (i = 8; i < 13; i++) {
8161 env->xregs[i] = env->usr_regs[i - 8];
8162 }
8163 } else {
8164 for (i = 8; i < 13; i++) {
8165 env->xregs[i] = env->regs[i];
8166 }
8167 }
8168
8169 /*
8170 * Registers x13-x23 are the various mode SP and FP registers. Registers
8171 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8172 * from the mode banked register.
8173 */
8174 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8175 env->xregs[13] = env->regs[13];
8176 env->xregs[14] = env->regs[14];
8177 } else {
8178 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8179 /* HYP is an exception in that it is copied from r14 */
8180 if (mode == ARM_CPU_MODE_HYP) {
8181 env->xregs[14] = env->regs[14];
8182 } else {
8183 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
8184 }
8185 }
8186
8187 if (mode == ARM_CPU_MODE_HYP) {
8188 env->xregs[15] = env->regs[13];
8189 } else {
8190 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
8191 }
8192
8193 if (mode == ARM_CPU_MODE_IRQ) {
8194 env->xregs[16] = env->regs[14];
8195 env->xregs[17] = env->regs[13];
8196 } else {
8197 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8198 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8199 }
8200
8201 if (mode == ARM_CPU_MODE_SVC) {
8202 env->xregs[18] = env->regs[14];
8203 env->xregs[19] = env->regs[13];
8204 } else {
8205 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8206 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8207 }
8208
8209 if (mode == ARM_CPU_MODE_ABT) {
8210 env->xregs[20] = env->regs[14];
8211 env->xregs[21] = env->regs[13];
8212 } else {
8213 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8214 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8215 }
8216
8217 if (mode == ARM_CPU_MODE_UND) {
8218 env->xregs[22] = env->regs[14];
8219 env->xregs[23] = env->regs[13];
8220 } else {
8221 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8222 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
8223 }
8224
8225 /*
8226 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8227 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8228 * FIQ bank for r8-r14.
8229 */
8230 if (mode == ARM_CPU_MODE_FIQ) {
8231 for (i = 24; i < 31; i++) {
8232 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
8233 }
8234 } else {
8235 for (i = 24; i < 29; i++) {
8236 env->xregs[i] = env->fiq_regs[i - 24];
8237 }
8238 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8239 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
8240 }
8241
8242 env->pc = env->regs[15];
8243 }
8244
8245 /*
8246 * Function used to synchronize QEMU's AArch32 register set with AArch64
8247 * register set. This is necessary when switching between AArch32 and AArch64
8248 * execution state.
8249 */
aarch64_sync_64_to_32(CPUARMState * env)8250 void aarch64_sync_64_to_32(CPUARMState *env)
8251 {
8252 int i;
8253 uint32_t mode = env->uncached_cpsr & CPSR_M;
8254
8255 /* We can blanket copy X[0:7] to R[0:7] */
8256 for (i = 0; i < 8; i++) {
8257 env->regs[i] = env->xregs[i];
8258 }
8259
8260 /*
8261 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8262 * Otherwise, we copy x8-x12 into the banked user regs.
8263 */
8264 if (mode == ARM_CPU_MODE_FIQ) {
8265 for (i = 8; i < 13; i++) {
8266 env->usr_regs[i - 8] = env->xregs[i];
8267 }
8268 } else {
8269 for (i = 8; i < 13; i++) {
8270 env->regs[i] = env->xregs[i];
8271 }
8272 }
8273
8274 /*
8275 * Registers r13 & r14 depend on the current mode.
8276 * If we are in a given mode, we copy the corresponding x registers to r13
8277 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8278 * for the mode.
8279 */
8280 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8281 env->regs[13] = env->xregs[13];
8282 env->regs[14] = env->xregs[14];
8283 } else {
8284 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
8285
8286 /*
8287 * HYP is an exception in that it does not have its own banked r14 but
8288 * shares the USR r14
8289 */
8290 if (mode == ARM_CPU_MODE_HYP) {
8291 env->regs[14] = env->xregs[14];
8292 } else {
8293 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8294 }
8295 }
8296
8297 if (mode == ARM_CPU_MODE_HYP) {
8298 env->regs[13] = env->xregs[15];
8299 } else {
8300 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8301 }
8302
8303 if (mode == ARM_CPU_MODE_IRQ) {
8304 env->regs[14] = env->xregs[16];
8305 env->regs[13] = env->xregs[17];
8306 } else {
8307 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8308 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8309 }
8310
8311 if (mode == ARM_CPU_MODE_SVC) {
8312 env->regs[14] = env->xregs[18];
8313 env->regs[13] = env->xregs[19];
8314 } else {
8315 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8316 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8317 }
8318
8319 if (mode == ARM_CPU_MODE_ABT) {
8320 env->regs[14] = env->xregs[20];
8321 env->regs[13] = env->xregs[21];
8322 } else {
8323 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8324 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8325 }
8326
8327 if (mode == ARM_CPU_MODE_UND) {
8328 env->regs[14] = env->xregs[22];
8329 env->regs[13] = env->xregs[23];
8330 } else {
8331 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8332 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8333 }
8334
8335 /*
8336 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8337 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8338 * FIQ bank for r8-r14.
8339 */
8340 if (mode == ARM_CPU_MODE_FIQ) {
8341 for (i = 24; i < 31; i++) {
8342 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
8343 }
8344 } else {
8345 for (i = 24; i < 29; i++) {
8346 env->fiq_regs[i - 24] = env->xregs[i];
8347 }
8348 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8349 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8350 }
8351
8352 env->regs[15] = env->pc;
8353 }
8354
take_aarch32_exception(CPUARMState * env,int new_mode,uint32_t mask,uint32_t offset,uint32_t newpc)8355 static void take_aarch32_exception(CPUARMState *env, int new_mode,
8356 uint32_t mask, uint32_t offset,
8357 uint32_t newpc)
8358 {
8359 int new_el;
8360
8361 /* Change the CPU state so as to actually take the exception. */
8362 switch_mode(env, new_mode);
8363
8364 /*
8365 * For exceptions taken to AArch32 we must clear the SS bit in both
8366 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8367 */
8368 env->pstate &= ~PSTATE_SS;
8369 env->spsr = cpsr_read(env);
8370 /* Clear IT bits. */
8371 env->condexec_bits = 0;
8372 /* Switch to the new mode, and to the correct instruction set. */
8373 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8374
8375 /* This must be after mode switching. */
8376 new_el = arm_current_el(env);
8377
8378 /* Set new mode endianness */
8379 env->uncached_cpsr &= ~CPSR_E;
8380 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
8381 env->uncached_cpsr |= CPSR_E;
8382 }
8383 /* J and IL must always be cleared for exception entry */
8384 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8385 env->daif |= mask;
8386
8387 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
8388 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
8389 env->uncached_cpsr |= CPSR_SSBS;
8390 } else {
8391 env->uncached_cpsr &= ~CPSR_SSBS;
8392 }
8393 }
8394
8395 if (new_mode == ARM_CPU_MODE_HYP) {
8396 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8397 env->elr_el[2] = env->regs[15];
8398 } else {
8399 /* CPSR.PAN is normally preserved preserved unless... */
8400 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
8401 switch (new_el) {
8402 case 3:
8403 if (!arm_is_secure_below_el3(env)) {
8404 /* ... the target is EL3, from non-secure state. */
8405 env->uncached_cpsr &= ~CPSR_PAN;
8406 break;
8407 }
8408 /* ... the target is EL3, from secure state ... */
8409 /* fall through */
8410 case 1:
8411 /* ... the target is EL1 and SCTLR.SPAN is 0. */
8412 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
8413 env->uncached_cpsr |= CPSR_PAN;
8414 }
8415 break;
8416 }
8417 }
8418 /*
8419 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8420 * and we should just guard the thumb mode on V4
8421 */
8422 if (arm_feature(env, ARM_FEATURE_V4T)) {
8423 env->thumb =
8424 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8425 }
8426 env->regs[14] = env->regs[15] + offset;
8427 }
8428 env->regs[15] = newpc;
8429
8430 if (tcg_enabled()) {
8431 arm_rebuild_hflags(env);
8432 }
8433 }
8434
arm_cpu_do_interrupt_aarch32_hyp(CPUState * cs)8435 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8436 {
8437 /*
8438 * Handle exception entry to Hyp mode; this is sufficiently
8439 * different to entry to other AArch32 modes that we handle it
8440 * separately here.
8441 *
8442 * The vector table entry used is always the 0x14 Hyp mode entry point,
8443 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
8444 * The offset applied to the preferred return address is always zero
8445 * (see DDI0487C.a section G1.12.3).
8446 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8447 */
8448 uint32_t addr, mask;
8449 ARMCPU *cpu = ARM_CPU(cs);
8450 CPUARMState *env = &cpu->env;
8451
8452 switch (cs->exception_index) {
8453 case EXCP_UDEF:
8454 addr = 0x04;
8455 break;
8456 case EXCP_SWI:
8457 addr = 0x08;
8458 break;
8459 case EXCP_BKPT:
8460 /* Fall through to prefetch abort. */
8461 case EXCP_PREFETCH_ABORT:
8462 env->cp15.ifar_s = env->exception.vaddress;
8463 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8464 (uint32_t)env->exception.vaddress);
8465 addr = 0x0c;
8466 break;
8467 case EXCP_DATA_ABORT:
8468 env->cp15.dfar_s = env->exception.vaddress;
8469 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8470 (uint32_t)env->exception.vaddress);
8471 addr = 0x10;
8472 break;
8473 case EXCP_IRQ:
8474 addr = 0x18;
8475 break;
8476 case EXCP_FIQ:
8477 addr = 0x1c;
8478 break;
8479 case EXCP_HVC:
8480 addr = 0x08;
8481 break;
8482 case EXCP_HYP_TRAP:
8483 addr = 0x14;
8484 break;
8485 default:
8486 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8487 }
8488
8489 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
8490 if (!arm_feature(env, ARM_FEATURE_V8)) {
8491 /*
8492 * QEMU syndrome values are v8-style. v7 has the IL bit
8493 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8494 * If this is a v7 CPU, squash the IL bit in those cases.
8495 */
8496 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8497 (cs->exception_index == EXCP_DATA_ABORT &&
8498 !(env->exception.syndrome & ARM_EL_ISV)) ||
8499 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8500 env->exception.syndrome &= ~ARM_EL_IL;
8501 }
8502 }
8503 env->cp15.esr_el[2] = env->exception.syndrome;
8504 }
8505
8506 if (arm_current_el(env) != 2 && addr < 0x14) {
8507 addr = 0x14;
8508 }
8509
8510 mask = 0;
8511 if (!(env->cp15.scr_el3 & SCR_EA)) {
8512 mask |= CPSR_A;
8513 }
8514 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8515 mask |= CPSR_I;
8516 }
8517 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8518 mask |= CPSR_F;
8519 }
8520
8521 addr += env->cp15.hvbar;
8522
8523 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8524 }
8525
arm_cpu_do_interrupt_aarch32(CPUState * cs)8526 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8527 {
8528 ARMCPU *cpu = ARM_CPU(cs);
8529 CPUARMState *env = &cpu->env;
8530 uint32_t addr;
8531 uint32_t mask;
8532 int new_mode;
8533 uint32_t offset;
8534 uint32_t moe;
8535
8536 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8537 switch (syn_get_ec(env->exception.syndrome)) {
8538 case EC_BREAKPOINT:
8539 case EC_BREAKPOINT_SAME_EL:
8540 moe = 1;
8541 break;
8542 case EC_WATCHPOINT:
8543 case EC_WATCHPOINT_SAME_EL:
8544 moe = 10;
8545 break;
8546 case EC_AA32_BKPT:
8547 moe = 3;
8548 break;
8549 case EC_VECTORCATCH:
8550 moe = 5;
8551 break;
8552 default:
8553 moe = 0;
8554 break;
8555 }
8556
8557 if (moe) {
8558 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8559 }
8560
8561 if (env->exception.target_el == 2) {
8562 /* Debug exceptions are reported differently on AArch32 */
8563 switch (syn_get_ec(env->exception.syndrome)) {
8564 case EC_BREAKPOINT:
8565 case EC_BREAKPOINT_SAME_EL:
8566 case EC_AA32_BKPT:
8567 case EC_VECTORCATCH:
8568 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
8569 0, 0, 0x22);
8570 break;
8571 case EC_WATCHPOINT:
8572 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8573 EC_DATAABORT);
8574 break;
8575 case EC_WATCHPOINT_SAME_EL:
8576 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8577 EC_DATAABORT_SAME_EL);
8578 break;
8579 }
8580 arm_cpu_do_interrupt_aarch32_hyp(cs);
8581 return;
8582 }
8583
8584 switch (cs->exception_index) {
8585 case EXCP_UDEF:
8586 new_mode = ARM_CPU_MODE_UND;
8587 addr = 0x04;
8588 mask = CPSR_I;
8589 if (env->thumb) {
8590 offset = 2;
8591 } else {
8592 offset = 4;
8593 }
8594 break;
8595 case EXCP_SWI:
8596 new_mode = ARM_CPU_MODE_SVC;
8597 addr = 0x08;
8598 mask = CPSR_I;
8599 /* The PC already points to the next instruction. */
8600 offset = 0;
8601 break;
8602 case EXCP_BKPT:
8603 /* Fall through to prefetch abort. */
8604 case EXCP_PREFETCH_ABORT:
8605 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8606 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8607 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8608 env->exception.fsr, (uint32_t)env->exception.vaddress);
8609 new_mode = ARM_CPU_MODE_ABT;
8610 addr = 0x0c;
8611 mask = CPSR_A | CPSR_I;
8612 offset = 4;
8613 break;
8614 case EXCP_DATA_ABORT:
8615 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8616 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8617 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8618 env->exception.fsr,
8619 (uint32_t)env->exception.vaddress);
8620 new_mode = ARM_CPU_MODE_ABT;
8621 addr = 0x10;
8622 mask = CPSR_A | CPSR_I;
8623 offset = 8;
8624 break;
8625 case EXCP_IRQ:
8626 new_mode = ARM_CPU_MODE_IRQ;
8627 addr = 0x18;
8628 /* Disable IRQ and imprecise data aborts. */
8629 mask = CPSR_A | CPSR_I;
8630 offset = 4;
8631 if (env->cp15.scr_el3 & SCR_IRQ) {
8632 /* IRQ routed to monitor mode */
8633 new_mode = ARM_CPU_MODE_MON;
8634 mask |= CPSR_F;
8635 }
8636 break;
8637 case EXCP_FIQ:
8638 new_mode = ARM_CPU_MODE_FIQ;
8639 addr = 0x1c;
8640 /* Disable FIQ, IRQ and imprecise data aborts. */
8641 mask = CPSR_A | CPSR_I | CPSR_F;
8642 if (env->cp15.scr_el3 & SCR_FIQ) {
8643 /* FIQ routed to monitor mode */
8644 new_mode = ARM_CPU_MODE_MON;
8645 }
8646 offset = 4;
8647 break;
8648 case EXCP_VIRQ:
8649 new_mode = ARM_CPU_MODE_IRQ;
8650 addr = 0x18;
8651 /* Disable IRQ and imprecise data aborts. */
8652 mask = CPSR_A | CPSR_I;
8653 offset = 4;
8654 break;
8655 case EXCP_VFIQ:
8656 new_mode = ARM_CPU_MODE_FIQ;
8657 addr = 0x1c;
8658 /* Disable FIQ, IRQ and imprecise data aborts. */
8659 mask = CPSR_A | CPSR_I | CPSR_F;
8660 offset = 4;
8661 break;
8662 case EXCP_VSERR:
8663 {
8664 /*
8665 * Note that this is reported as a data abort, but the DFAR
8666 * has an UNKNOWN value. Construct the SError syndrome from
8667 * AET and ExT fields.
8668 */
8669 ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
8670
8671 if (extended_addresses_enabled(env)) {
8672 env->exception.fsr = arm_fi_to_lfsc(&fi);
8673 } else {
8674 env->exception.fsr = arm_fi_to_sfsc(&fi);
8675 }
8676 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
8677 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8678 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
8679 env->exception.fsr);
8680
8681 new_mode = ARM_CPU_MODE_ABT;
8682 addr = 0x10;
8683 mask = CPSR_A | CPSR_I;
8684 offset = 8;
8685 }
8686 break;
8687 case EXCP_SMC:
8688 new_mode = ARM_CPU_MODE_MON;
8689 addr = 0x08;
8690 mask = CPSR_A | CPSR_I | CPSR_F;
8691 offset = 0;
8692 break;
8693 case EXCP_MON_TRAP:
8694 new_mode = ARM_CPU_MODE_MON;
8695 addr = 0x04;
8696 mask = CPSR_A | CPSR_I | CPSR_F;
8697 if (env->thumb) {
8698 offset = 2;
8699 } else {
8700 offset = 4;
8701 }
8702 break;
8703 default:
8704 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8705 return; /* Never happens. Keep compiler happy. */
8706 }
8707
8708 if (new_mode == ARM_CPU_MODE_MON) {
8709 addr += env->cp15.mvbar;
8710 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
8711 /* High vectors. When enabled, base address cannot be remapped. */
8712 addr += 0xffff0000;
8713 } else {
8714 /*
8715 * ARM v7 architectures provide a vector base address register to remap
8716 * the interrupt vector table.
8717 * This register is only followed in non-monitor mode, and is banked.
8718 * Note: only bits 31:5 are valid.
8719 */
8720 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
8721 }
8722
8723 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8724 env->cp15.scr_el3 &= ~SCR_NS;
8725 }
8726
8727 take_aarch32_exception(env, new_mode, mask, offset, addr);
8728 }
8729
aarch64_regnum(CPUARMState * env,int aarch32_reg)8730 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
8731 {
8732 /*
8733 * Return the register number of the AArch64 view of the AArch32
8734 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
8735 * be that of the AArch32 mode the exception came from.
8736 */
8737 int mode = env->uncached_cpsr & CPSR_M;
8738
8739 switch (aarch32_reg) {
8740 case 0 ... 7:
8741 return aarch32_reg;
8742 case 8 ... 12:
8743 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
8744 case 13:
8745 switch (mode) {
8746 case ARM_CPU_MODE_USR:
8747 case ARM_CPU_MODE_SYS:
8748 return 13;
8749 case ARM_CPU_MODE_HYP:
8750 return 15;
8751 case ARM_CPU_MODE_IRQ:
8752 return 17;
8753 case ARM_CPU_MODE_SVC:
8754 return 19;
8755 case ARM_CPU_MODE_ABT:
8756 return 21;
8757 case ARM_CPU_MODE_UND:
8758 return 23;
8759 case ARM_CPU_MODE_FIQ:
8760 return 29;
8761 default:
8762 g_assert_not_reached();
8763 }
8764 case 14:
8765 switch (mode) {
8766 case ARM_CPU_MODE_USR:
8767 case ARM_CPU_MODE_SYS:
8768 case ARM_CPU_MODE_HYP:
8769 return 14;
8770 case ARM_CPU_MODE_IRQ:
8771 return 16;
8772 case ARM_CPU_MODE_SVC:
8773 return 18;
8774 case ARM_CPU_MODE_ABT:
8775 return 20;
8776 case ARM_CPU_MODE_UND:
8777 return 22;
8778 case ARM_CPU_MODE_FIQ:
8779 return 30;
8780 default:
8781 g_assert_not_reached();
8782 }
8783 case 15:
8784 return 31;
8785 default:
8786 g_assert_not_reached();
8787 }
8788 }
8789
cpsr_read_for_spsr_elx(CPUARMState * env)8790 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
8791 {
8792 uint32_t ret = cpsr_read(env);
8793
8794 /* Move DIT to the correct location for SPSR_ELx */
8795 if (ret & CPSR_DIT) {
8796 ret &= ~CPSR_DIT;
8797 ret |= PSTATE_DIT;
8798 }
8799 /* Merge PSTATE.SS into SPSR_ELx */
8800 ret |= env->pstate & PSTATE_SS;
8801
8802 return ret;
8803 }
8804
syndrome_is_sync_extabt(uint32_t syndrome)8805 static bool syndrome_is_sync_extabt(uint32_t syndrome)
8806 {
8807 /* Return true if this syndrome value is a synchronous external abort */
8808 switch (syn_get_ec(syndrome)) {
8809 case EC_INSNABORT:
8810 case EC_INSNABORT_SAME_EL:
8811 case EC_DATAABORT:
8812 case EC_DATAABORT_SAME_EL:
8813 /* Look at fault status code for all the synchronous ext abort cases */
8814 switch (syndrome & 0x3f) {
8815 case 0x10:
8816 case 0x13:
8817 case 0x14:
8818 case 0x15:
8819 case 0x16:
8820 case 0x17:
8821 return true;
8822 default:
8823 return false;
8824 }
8825 default:
8826 return false;
8827 }
8828 }
8829
8830 /* Handle exception entry to a target EL which is using AArch64 */
arm_cpu_do_interrupt_aarch64(CPUState * cs)8831 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
8832 {
8833 ARMCPU *cpu = ARM_CPU(cs);
8834 CPUARMState *env = &cpu->env;
8835 unsigned int new_el = env->exception.target_el;
8836 vaddr addr = env->cp15.vbar_el[new_el];
8837 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
8838 unsigned int old_mode;
8839 unsigned int cur_el = arm_current_el(env);
8840 int rt;
8841
8842 if (tcg_enabled()) {
8843 /*
8844 * Note that new_el can never be 0. If cur_el is 0, then
8845 * el0_a64 is is_a64(), else el0_a64 is ignored.
8846 */
8847 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
8848 }
8849
8850 if (cur_el < new_el) {
8851 /*
8852 * Entry vector offset depends on whether the implemented EL
8853 * immediately lower than the target level is using AArch32 or AArch64
8854 */
8855 bool is_aa64;
8856 uint64_t hcr;
8857
8858 switch (new_el) {
8859 case 3:
8860 is_aa64 = arm_scr_rw_eff(env);
8861 break;
8862 case 2:
8863 hcr = arm_hcr_el2_eff(env);
8864 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
8865 is_aa64 = (hcr & HCR_RW) != 0;
8866 break;
8867 }
8868 /* fall through */
8869 case 1:
8870 is_aa64 = is_a64(env);
8871 break;
8872 default:
8873 g_assert_not_reached();
8874 }
8875
8876 if (is_aa64) {
8877 addr += 0x400;
8878 } else {
8879 addr += 0x600;
8880 }
8881 } else if (pstate_read(env) & PSTATE_SP) {
8882 addr += 0x200;
8883 }
8884
8885 switch (cs->exception_index) {
8886 case EXCP_GPC:
8887 qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
8888 env->cp15.mfar_el3);
8889 /* fall through */
8890 case EXCP_PREFETCH_ABORT:
8891 case EXCP_DATA_ABORT:
8892 /*
8893 * FEAT_DoubleFault allows synchronous external aborts taken to EL3
8894 * to be taken to the SError vector entrypoint.
8895 */
8896 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
8897 syndrome_is_sync_extabt(env->exception.syndrome)) {
8898 addr += 0x180;
8899 }
8900 env->cp15.far_el[new_el] = env->exception.vaddress;
8901 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8902 env->cp15.far_el[new_el]);
8903 /* fall through */
8904 case EXCP_BKPT:
8905 case EXCP_UDEF:
8906 case EXCP_SWI:
8907 case EXCP_HVC:
8908 case EXCP_HYP_TRAP:
8909 case EXCP_SMC:
8910 switch (syn_get_ec(env->exception.syndrome)) {
8911 case EC_ADVSIMDFPACCESSTRAP:
8912 /*
8913 * QEMU internal FP/SIMD syndromes from AArch32 include the
8914 * TA and coproc fields which are only exposed if the exception
8915 * is taken to AArch32 Hyp mode. Mask them out to get a valid
8916 * AArch64 format syndrome.
8917 */
8918 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
8919 break;
8920 case EC_CP14RTTRAP:
8921 case EC_CP15RTTRAP:
8922 case EC_CP14DTTRAP:
8923 /*
8924 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
8925 * the raw register field from the insn; when taking this to
8926 * AArch64 we must convert it to the AArch64 view of the register
8927 * number. Notice that we read a 4-bit AArch32 register number and
8928 * write back a 5-bit AArch64 one.
8929 */
8930 rt = extract32(env->exception.syndrome, 5, 4);
8931 rt = aarch64_regnum(env, rt);
8932 env->exception.syndrome = deposit32(env->exception.syndrome,
8933 5, 5, rt);
8934 break;
8935 case EC_CP15RRTTRAP:
8936 case EC_CP14RRTTRAP:
8937 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
8938 rt = extract32(env->exception.syndrome, 5, 4);
8939 rt = aarch64_regnum(env, rt);
8940 env->exception.syndrome = deposit32(env->exception.syndrome,
8941 5, 5, rt);
8942 rt = extract32(env->exception.syndrome, 10, 4);
8943 rt = aarch64_regnum(env, rt);
8944 env->exception.syndrome = deposit32(env->exception.syndrome,
8945 10, 5, rt);
8946 break;
8947 }
8948 env->cp15.esr_el[new_el] = env->exception.syndrome;
8949 break;
8950 case EXCP_IRQ:
8951 case EXCP_VIRQ:
8952 case EXCP_NMI:
8953 case EXCP_VINMI:
8954 addr += 0x80;
8955 break;
8956 case EXCP_FIQ:
8957 case EXCP_VFIQ:
8958 case EXCP_VFNMI:
8959 addr += 0x100;
8960 break;
8961 case EXCP_VSERR:
8962 addr += 0x180;
8963 /* Construct the SError syndrome from IDS and ISS fields. */
8964 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
8965 env->cp15.esr_el[new_el] = env->exception.syndrome;
8966 break;
8967 default:
8968 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8969 }
8970
8971 if (is_a64(env)) {
8972 old_mode = pstate_read(env);
8973 aarch64_save_sp(env, arm_current_el(env));
8974 env->elr_el[new_el] = env->pc;
8975
8976 if (cur_el == 1 && new_el == 1) {
8977 uint64_t hcr = arm_hcr_el2_eff(env);
8978 if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
8979 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
8980 /*
8981 * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
8982 * by setting M[3:2] to 0b10.
8983 * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
8984 * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
8985 */
8986 old_mode = deposit32(old_mode, 2, 2, 2);
8987 }
8988 }
8989 } else {
8990 old_mode = cpsr_read_for_spsr_elx(env);
8991 env->elr_el[new_el] = env->regs[15];
8992
8993 aarch64_sync_32_to_64(env);
8994
8995 env->condexec_bits = 0;
8996 }
8997 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
8998
8999 qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
9000 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9001 env->elr_el[new_el]);
9002
9003 if (cpu_isar_feature(aa64_pan, cpu)) {
9004 /* The value of PSTATE.PAN is normally preserved, except when ... */
9005 new_mode |= old_mode & PSTATE_PAN;
9006 switch (new_el) {
9007 case 2:
9008 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9009 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9010 != (HCR_E2H | HCR_TGE)) {
9011 break;
9012 }
9013 /* fall through */
9014 case 1:
9015 /* ... the target is EL1 ... */
9016 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9017 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9018 new_mode |= PSTATE_PAN;
9019 }
9020 break;
9021 }
9022 }
9023 if (cpu_isar_feature(aa64_mte, cpu)) {
9024 new_mode |= PSTATE_TCO;
9025 }
9026
9027 if (cpu_isar_feature(aa64_ssbs, cpu)) {
9028 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
9029 new_mode |= PSTATE_SSBS;
9030 } else {
9031 new_mode &= ~PSTATE_SSBS;
9032 }
9033 }
9034
9035 if (cpu_isar_feature(aa64_nmi, cpu)) {
9036 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) {
9037 new_mode |= PSTATE_ALLINT;
9038 } else {
9039 new_mode &= ~PSTATE_ALLINT;
9040 }
9041 }
9042
9043 pstate_write(env, PSTATE_DAIF | new_mode);
9044 env->aarch64 = true;
9045 aarch64_restore_sp(env, new_el);
9046
9047 if (tcg_enabled()) {
9048 helper_rebuild_hflags_a64(env, new_el);
9049 }
9050
9051 env->pc = addr;
9052
9053 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
9054 new_el, env->pc, pstate_read(env));
9055 }
9056
9057 /*
9058 * Do semihosting call and set the appropriate return value. All the
9059 * permission and validity checks have been done at translate time.
9060 *
9061 * We only see semihosting exceptions in TCG only as they are not
9062 * trapped to the hypervisor in KVM.
9063 */
9064 #ifdef CONFIG_TCG
tcg_handle_semihosting(CPUState * cs)9065 static void tcg_handle_semihosting(CPUState *cs)
9066 {
9067 ARMCPU *cpu = ARM_CPU(cs);
9068 CPUARMState *env = &cpu->env;
9069
9070 if (is_a64(env)) {
9071 qemu_log_mask(CPU_LOG_INT,
9072 "...handling as semihosting call 0x%" PRIx64 "\n",
9073 env->xregs[0]);
9074 do_common_semihosting(cs);
9075 env->pc += 4;
9076 } else {
9077 qemu_log_mask(CPU_LOG_INT,
9078 "...handling as semihosting call 0x%x\n",
9079 env->regs[0]);
9080 do_common_semihosting(cs);
9081 env->regs[15] += env->thumb ? 2 : 4;
9082 }
9083 }
9084 #endif
9085
9086 /*
9087 * Handle a CPU exception for A and R profile CPUs.
9088 * Do any appropriate logging, handle PSCI calls, and then hand off
9089 * to the AArch64-entry or AArch32-entry function depending on the
9090 * target exception level's register width.
9091 *
9092 * Note: this is used for both TCG (as the do_interrupt tcg op),
9093 * and KVM to re-inject guest debug exceptions, and to
9094 * inject a Synchronous-External-Abort.
9095 */
arm_cpu_do_interrupt(CPUState * cs)9096 void arm_cpu_do_interrupt(CPUState *cs)
9097 {
9098 ARMCPU *cpu = ARM_CPU(cs);
9099 CPUARMState *env = &cpu->env;
9100 unsigned int new_el = env->exception.target_el;
9101
9102 assert(!arm_feature(env, ARM_FEATURE_M));
9103
9104 arm_log_exception(cs);
9105 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
9106 new_el);
9107 if (qemu_loglevel_mask(CPU_LOG_INT)
9108 && !excp_is_internal(cs->exception_index)) {
9109 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
9110 syn_get_ec(env->exception.syndrome),
9111 env->exception.syndrome);
9112 }
9113
9114 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
9115 arm_handle_psci_call(cpu);
9116 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
9117 return;
9118 }
9119
9120 /*
9121 * Semihosting semantics depend on the register width of the code
9122 * that caused the exception, not the target exception level, so
9123 * must be handled here.
9124 */
9125 #ifdef CONFIG_TCG
9126 if (cs->exception_index == EXCP_SEMIHOST) {
9127 tcg_handle_semihosting(cs);
9128 return;
9129 }
9130 #endif
9131
9132 /*
9133 * Hooks may change global state so BQL should be held, also the
9134 * BQL needs to be held for any modification of
9135 * cs->interrupt_request.
9136 */
9137 g_assert(bql_locked());
9138
9139 arm_call_pre_el_change_hook(cpu);
9140
9141 assert(!excp_is_internal(cs->exception_index));
9142 if (arm_el_is_aa64(env, new_el)) {
9143 arm_cpu_do_interrupt_aarch64(cs);
9144 } else {
9145 arm_cpu_do_interrupt_aarch32(cs);
9146 }
9147
9148 arm_call_el_change_hook(cpu);
9149
9150 if (!kvm_enabled()) {
9151 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
9152 }
9153 }
9154 #endif /* !CONFIG_USER_ONLY */
9155
arm_sctlr(CPUARMState * env,int el)9156 uint64_t arm_sctlr(CPUARMState *env, int el)
9157 {
9158 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */
9159 if (el == 0) {
9160 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
9161 switch (mmu_idx) {
9162 case ARMMMUIdx_E20_0:
9163 el = 2;
9164 break;
9165 case ARMMMUIdx_E30_0:
9166 el = 3;
9167 break;
9168 default:
9169 el = 1;
9170 break;
9171 }
9172 }
9173 return env->cp15.sctlr_el[el];
9174 }
9175
aa64_va_parameter_tbi(uint64_t tcr,ARMMMUIdx mmu_idx)9176 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
9177 {
9178 if (regime_has_2_ranges(mmu_idx)) {
9179 return extract64(tcr, 37, 2);
9180 } else if (regime_is_stage2(mmu_idx)) {
9181 return 0; /* VTCR_EL2 */
9182 } else {
9183 /* Replicate the single TBI bit so we always have 2 bits. */
9184 return extract32(tcr, 20, 1) * 3;
9185 }
9186 }
9187
aa64_va_parameter_tbid(uint64_t tcr,ARMMMUIdx mmu_idx)9188 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
9189 {
9190 if (regime_has_2_ranges(mmu_idx)) {
9191 return extract64(tcr, 51, 2);
9192 } else if (regime_is_stage2(mmu_idx)) {
9193 return 0; /* VTCR_EL2 */
9194 } else {
9195 /* Replicate the single TBID bit so we always have 2 bits. */
9196 return extract32(tcr, 29, 1) * 3;
9197 }
9198 }
9199
aa64_va_parameter_tcma(uint64_t tcr,ARMMMUIdx mmu_idx)9200 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
9201 {
9202 if (regime_has_2_ranges(mmu_idx)) {
9203 return extract64(tcr, 57, 2);
9204 } else {
9205 /* Replicate the single TCMA bit so we always have 2 bits. */
9206 return extract32(tcr, 30, 1) * 3;
9207 }
9208 }
9209
tg0_to_gran_size(int tg)9210 static ARMGranuleSize tg0_to_gran_size(int tg)
9211 {
9212 switch (tg) {
9213 case 0:
9214 return Gran4K;
9215 case 1:
9216 return Gran64K;
9217 case 2:
9218 return Gran16K;
9219 default:
9220 return GranInvalid;
9221 }
9222 }
9223
tg1_to_gran_size(int tg)9224 static ARMGranuleSize tg1_to_gran_size(int tg)
9225 {
9226 switch (tg) {
9227 case 1:
9228 return Gran16K;
9229 case 2:
9230 return Gran4K;
9231 case 3:
9232 return Gran64K;
9233 default:
9234 return GranInvalid;
9235 }
9236 }
9237
have4k(ARMCPU * cpu,bool stage2)9238 static inline bool have4k(ARMCPU *cpu, bool stage2)
9239 {
9240 return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
9241 : cpu_isar_feature(aa64_tgran4, cpu);
9242 }
9243
have16k(ARMCPU * cpu,bool stage2)9244 static inline bool have16k(ARMCPU *cpu, bool stage2)
9245 {
9246 return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
9247 : cpu_isar_feature(aa64_tgran16, cpu);
9248 }
9249
have64k(ARMCPU * cpu,bool stage2)9250 static inline bool have64k(ARMCPU *cpu, bool stage2)
9251 {
9252 return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
9253 : cpu_isar_feature(aa64_tgran64, cpu);
9254 }
9255
sanitize_gran_size(ARMCPU * cpu,ARMGranuleSize gran,bool stage2)9256 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
9257 bool stage2)
9258 {
9259 switch (gran) {
9260 case Gran4K:
9261 if (have4k(cpu, stage2)) {
9262 return gran;
9263 }
9264 break;
9265 case Gran16K:
9266 if (have16k(cpu, stage2)) {
9267 return gran;
9268 }
9269 break;
9270 case Gran64K:
9271 if (have64k(cpu, stage2)) {
9272 return gran;
9273 }
9274 break;
9275 case GranInvalid:
9276 break;
9277 }
9278 /*
9279 * If the guest selects a granule size that isn't implemented,
9280 * the architecture requires that we behave as if it selected one
9281 * that is (with an IMPDEF choice of which one to pick). We choose
9282 * to implement the smallest supported granule size.
9283 */
9284 if (have4k(cpu, stage2)) {
9285 return Gran4K;
9286 }
9287 if (have16k(cpu, stage2)) {
9288 return Gran16K;
9289 }
9290 assert(have64k(cpu, stage2));
9291 return Gran64K;
9292 }
9293
aa64_va_parameters(CPUARMState * env,uint64_t va,ARMMMUIdx mmu_idx,bool data,bool el1_is_aa32)9294 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
9295 ARMMMUIdx mmu_idx, bool data,
9296 bool el1_is_aa32)
9297 {
9298 uint64_t tcr = regime_tcr(env, mmu_idx);
9299 bool epd, hpd, tsz_oob, ds, ha, hd;
9300 int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
9301 ARMGranuleSize gran;
9302 ARMCPU *cpu = env_archcpu(env);
9303 bool stage2 = regime_is_stage2(mmu_idx);
9304
9305 if (!regime_has_2_ranges(mmu_idx)) {
9306 select = 0;
9307 tsz = extract32(tcr, 0, 6);
9308 gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9309 if (stage2) {
9310 /* VTCR_EL2 */
9311 hpd = false;
9312 } else {
9313 hpd = extract32(tcr, 24, 1);
9314 }
9315 epd = false;
9316 sh = extract32(tcr, 12, 2);
9317 ps = extract32(tcr, 16, 3);
9318 ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
9319 hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9320 ds = extract64(tcr, 32, 1);
9321 } else {
9322 bool e0pd;
9323
9324 /*
9325 * Bit 55 is always between the two regions, and is canonical for
9326 * determining if address tagging is enabled.
9327 */
9328 select = extract64(va, 55, 1);
9329 if (!select) {
9330 tsz = extract32(tcr, 0, 6);
9331 gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9332 epd = extract32(tcr, 7, 1);
9333 sh = extract32(tcr, 12, 2);
9334 hpd = extract64(tcr, 41, 1);
9335 e0pd = extract64(tcr, 55, 1);
9336 } else {
9337 tsz = extract32(tcr, 16, 6);
9338 gran = tg1_to_gran_size(extract32(tcr, 30, 2));
9339 epd = extract32(tcr, 23, 1);
9340 sh = extract32(tcr, 28, 2);
9341 hpd = extract64(tcr, 42, 1);
9342 e0pd = extract64(tcr, 56, 1);
9343 }
9344 ps = extract64(tcr, 32, 3);
9345 ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
9346 hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9347 ds = extract64(tcr, 59, 1);
9348
9349 if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
9350 regime_is_user(env, mmu_idx)) {
9351 epd = true;
9352 }
9353 }
9354
9355 gran = sanitize_gran_size(cpu, gran, stage2);
9356
9357 if (cpu_isar_feature(aa64_st, cpu)) {
9358 max_tsz = 48 - (gran == Gran64K);
9359 } else {
9360 max_tsz = 39;
9361 }
9362
9363 /*
9364 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
9365 * adjust the effective value of DS, as documented.
9366 */
9367 min_tsz = 16;
9368 if (gran == Gran64K) {
9369 if (cpu_isar_feature(aa64_lva, cpu)) {
9370 min_tsz = 12;
9371 }
9372 ds = false;
9373 } else if (ds) {
9374 if (regime_is_stage2(mmu_idx)) {
9375 if (gran == Gran16K) {
9376 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
9377 } else {
9378 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
9379 }
9380 } else {
9381 if (gran == Gran16K) {
9382 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
9383 } else {
9384 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
9385 }
9386 }
9387 if (ds) {
9388 min_tsz = 12;
9389 }
9390 }
9391
9392 if (stage2 && el1_is_aa32) {
9393 /*
9394 * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
9395 * are loosened: a configured IPA of 40 bits is permitted even if
9396 * the implemented PA is less than that (and so a 40 bit IPA would
9397 * fault for an AArch64 EL1). See R_DTLMN.
9398 */
9399 min_tsz = MIN(min_tsz, 24);
9400 }
9401
9402 if (tsz > max_tsz) {
9403 tsz = max_tsz;
9404 tsz_oob = true;
9405 } else if (tsz < min_tsz) {
9406 tsz = min_tsz;
9407 tsz_oob = true;
9408 } else {
9409 tsz_oob = false;
9410 }
9411
9412 /* Present TBI as a composite with TBID. */
9413 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
9414 if (!data) {
9415 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
9416 }
9417 tbi = (tbi >> select) & 1;
9418
9419 return (ARMVAParameters) {
9420 .tsz = tsz,
9421 .ps = ps,
9422 .sh = sh,
9423 .select = select,
9424 .tbi = tbi,
9425 .epd = epd,
9426 .hpd = hpd,
9427 .tsz_oob = tsz_oob,
9428 .ds = ds,
9429 .ha = ha,
9430 .hd = ha && hd,
9431 .gran = gran,
9432 };
9433 }
9434
9435
9436 /*
9437 * Return the exception level to which FP-disabled exceptions should
9438 * be taken, or 0 if FP is enabled.
9439 */
fp_exception_el(CPUARMState * env,int cur_el)9440 int fp_exception_el(CPUARMState *env, int cur_el)
9441 {
9442 #ifndef CONFIG_USER_ONLY
9443 uint64_t hcr_el2;
9444
9445 /*
9446 * CPACR and the CPTR registers don't exist before v6, so FP is
9447 * always accessible
9448 */
9449 if (!arm_feature(env, ARM_FEATURE_V6)) {
9450 return 0;
9451 }
9452
9453 if (arm_feature(env, ARM_FEATURE_M)) {
9454 /* CPACR can cause a NOCP UsageFault taken to current security state */
9455 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
9456 return 1;
9457 }
9458
9459 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
9460 if (!extract32(env->v7m.nsacr, 10, 1)) {
9461 /* FP insns cause a NOCP UsageFault taken to Secure */
9462 return 3;
9463 }
9464 }
9465
9466 return 0;
9467 }
9468
9469 hcr_el2 = arm_hcr_el2_eff(env);
9470
9471 /*
9472 * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
9473 * 0, 2 : trap EL0 and EL1/PL1 accesses
9474 * 1 : trap only EL0 accesses
9475 * 3 : trap no accesses
9476 * This register is ignored if E2H+TGE are both set.
9477 */
9478 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9479 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
9480
9481 switch (fpen) {
9482 case 1:
9483 if (cur_el != 0) {
9484 break;
9485 }
9486 /* fall through */
9487 case 0:
9488 case 2:
9489 /* Trap from Secure PL0 or PL1 to Secure PL1. */
9490 if (!arm_el_is_aa64(env, 3)
9491 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
9492 return 3;
9493 }
9494 if (cur_el <= 1) {
9495 return 1;
9496 }
9497 break;
9498 }
9499 }
9500
9501 /*
9502 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
9503 * to control non-secure access to the FPU. It doesn't have any
9504 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
9505 */
9506 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
9507 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
9508 if (!extract32(env->cp15.nsacr, 10, 1)) {
9509 /* FP insns act as UNDEF */
9510 return cur_el == 2 ? 2 : 1;
9511 }
9512 }
9513
9514 /*
9515 * CPTR_EL2 is present in v7VE or v8, and changes format
9516 * with HCR_EL2.E2H (regardless of TGE).
9517 */
9518 if (cur_el <= 2) {
9519 if (hcr_el2 & HCR_E2H) {
9520 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
9521 case 1:
9522 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
9523 break;
9524 }
9525 /* fall through */
9526 case 0:
9527 case 2:
9528 return 2;
9529 }
9530 } else if (arm_is_el2_enabled(env)) {
9531 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
9532 return 2;
9533 }
9534 }
9535 }
9536
9537 /* CPTR_EL3 : present in v8 */
9538 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
9539 /* Trap all FP ops to EL3 */
9540 return 3;
9541 }
9542 #endif
9543 return 0;
9544 }
9545
9546 /* Return the exception level we're running at if this is our mmu_idx */
arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)9547 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
9548 {
9549 if (mmu_idx & ARM_MMU_IDX_M) {
9550 return mmu_idx & ARM_MMU_IDX_M_PRIV;
9551 }
9552
9553 switch (mmu_idx) {
9554 case ARMMMUIdx_E10_0:
9555 case ARMMMUIdx_E20_0:
9556 case ARMMMUIdx_E30_0:
9557 return 0;
9558 case ARMMMUIdx_E10_1:
9559 case ARMMMUIdx_E10_1_PAN:
9560 return 1;
9561 case ARMMMUIdx_E2:
9562 case ARMMMUIdx_E20_2:
9563 case ARMMMUIdx_E20_2_PAN:
9564 return 2;
9565 case ARMMMUIdx_E3:
9566 case ARMMMUIdx_E30_3_PAN:
9567 return 3;
9568 default:
9569 g_assert_not_reached();
9570 }
9571 }
9572
9573 #ifndef CONFIG_TCG
arm_v7m_mmu_idx_for_secstate(CPUARMState * env,bool secstate)9574 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
9575 {
9576 g_assert_not_reached();
9577 }
9578 #endif
9579
arm_mmu_idx_el(CPUARMState * env,int el)9580 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
9581 {
9582 ARMMMUIdx idx;
9583 uint64_t hcr;
9584
9585 if (arm_feature(env, ARM_FEATURE_M)) {
9586 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
9587 }
9588
9589 /* See ARM pseudo-function ELIsInHost. */
9590 switch (el) {
9591 case 0:
9592 hcr = arm_hcr_el2_eff(env);
9593 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
9594 idx = ARMMMUIdx_E20_0;
9595 } else if (arm_is_secure_below_el3(env) &&
9596 !arm_el_is_aa64(env, 3)) {
9597 idx = ARMMMUIdx_E30_0;
9598 } else {
9599 idx = ARMMMUIdx_E10_0;
9600 }
9601 break;
9602 case 1:
9603 if (arm_pan_enabled(env)) {
9604 idx = ARMMMUIdx_E10_1_PAN;
9605 } else {
9606 idx = ARMMMUIdx_E10_1;
9607 }
9608 break;
9609 case 2:
9610 /* Note that TGE does not apply at EL2. */
9611 if (arm_hcr_el2_eff(env) & HCR_E2H) {
9612 if (arm_pan_enabled(env)) {
9613 idx = ARMMMUIdx_E20_2_PAN;
9614 } else {
9615 idx = ARMMMUIdx_E20_2;
9616 }
9617 } else {
9618 idx = ARMMMUIdx_E2;
9619 }
9620 break;
9621 case 3:
9622 if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) {
9623 return ARMMMUIdx_E30_3_PAN;
9624 }
9625 return ARMMMUIdx_E3;
9626 default:
9627 g_assert_not_reached();
9628 }
9629
9630 return idx;
9631 }
9632
arm_mmu_idx(CPUARMState * env)9633 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
9634 {
9635 return arm_mmu_idx_el(env, arm_current_el(env));
9636 }
9637
9638 /*
9639 * The manual says that when SVE is enabled and VQ is widened the
9640 * implementation is allowed to zero the previously inaccessible
9641 * portion of the registers. The corollary to that is that when
9642 * SVE is enabled and VQ is narrowed we are also allowed to zero
9643 * the now inaccessible portion of the registers.
9644 *
9645 * The intent of this is that no predicate bit beyond VQ is ever set.
9646 * Which means that some operations on predicate registers themselves
9647 * may operate on full uint64_t or even unrolled across the maximum
9648 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
9649 * may well be cheaper than conditionals to restrict the operation
9650 * to the relevant portion of a uint16_t[16].
9651 */
aarch64_sve_narrow_vq(CPUARMState * env,unsigned vq)9652 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
9653 {
9654 int i, j;
9655 uint64_t pmask;
9656
9657 assert(vq >= 1 && vq <= ARM_MAX_VQ);
9658 assert(vq <= env_archcpu(env)->sve_max_vq);
9659
9660 /* Zap the high bits of the zregs. */
9661 for (i = 0; i < 32; i++) {
9662 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
9663 }
9664
9665 /* Zap the high bits of the pregs and ffr. */
9666 pmask = 0;
9667 if (vq & 3) {
9668 pmask = ~(-1ULL << (16 * (vq & 3)));
9669 }
9670 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
9671 for (i = 0; i < 17; ++i) {
9672 env->vfp.pregs[i].p[j] &= pmask;
9673 }
9674 pmask = 0;
9675 }
9676 }
9677
sve_vqm1_for_el_sm_ena(CPUARMState * env,int el,bool sm)9678 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
9679 {
9680 int exc_el;
9681
9682 if (sm) {
9683 exc_el = sme_exception_el(env, el);
9684 } else {
9685 exc_el = sve_exception_el(env, el);
9686 }
9687 if (exc_el) {
9688 return 0; /* disabled */
9689 }
9690 return sve_vqm1_for_el_sm(env, el, sm);
9691 }
9692
9693 /*
9694 * Notice a change in SVE vector size when changing EL.
9695 */
aarch64_sve_change_el(CPUARMState * env,int old_el,int new_el,bool el0_a64)9696 void aarch64_sve_change_el(CPUARMState *env, int old_el,
9697 int new_el, bool el0_a64)
9698 {
9699 ARMCPU *cpu = env_archcpu(env);
9700 int old_len, new_len;
9701 bool old_a64, new_a64, sm;
9702
9703 /* Nothing to do if no SVE. */
9704 if (!cpu_isar_feature(aa64_sve, cpu)) {
9705 return;
9706 }
9707
9708 /* Nothing to do if FP is disabled in either EL. */
9709 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
9710 return;
9711 }
9712
9713 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
9714 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
9715
9716 /*
9717 * Both AArch64.TakeException and AArch64.ExceptionReturn
9718 * invoke ResetSVEState when taking an exception from, or
9719 * returning to, AArch32 state when PSTATE.SM is enabled.
9720 */
9721 sm = FIELD_EX64(env->svcr, SVCR, SM);
9722 if (old_a64 != new_a64 && sm) {
9723 arm_reset_sve_state(env);
9724 return;
9725 }
9726
9727 /*
9728 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
9729 * at ELx, or not available because the EL is in AArch32 state, then
9730 * for all purposes other than a direct read, the ZCR_ELx.LEN field
9731 * has an effective value of 0".
9732 *
9733 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
9734 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
9735 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
9736 * we already have the correct register contents when encountering the
9737 * vq0->vq0 transition between EL0->EL1.
9738 */
9739 old_len = new_len = 0;
9740 if (old_a64) {
9741 old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
9742 }
9743 if (new_a64) {
9744 new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
9745 }
9746
9747 /* When changing vector length, clear inaccessible state. */
9748 if (new_len < old_len) {
9749 aarch64_sve_narrow_vq(env, new_len + 1);
9750 }
9751 }
9752
9753 #ifndef CONFIG_USER_ONLY
arm_security_space(CPUARMState * env)9754 ARMSecuritySpace arm_security_space(CPUARMState *env)
9755 {
9756 if (arm_feature(env, ARM_FEATURE_M)) {
9757 return arm_secure_to_space(env->v7m.secure);
9758 }
9759
9760 /*
9761 * If EL3 is not supported then the secure state is implementation
9762 * defined, in which case QEMU defaults to non-secure.
9763 */
9764 if (!arm_feature(env, ARM_FEATURE_EL3)) {
9765 return ARMSS_NonSecure;
9766 }
9767
9768 /* Check for AArch64 EL3 or AArch32 Mon. */
9769 if (is_a64(env)) {
9770 if (extract32(env->pstate, 2, 2) == 3) {
9771 if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
9772 return ARMSS_Root;
9773 } else {
9774 return ARMSS_Secure;
9775 }
9776 }
9777 } else {
9778 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9779 return ARMSS_Secure;
9780 }
9781 }
9782
9783 return arm_security_space_below_el3(env);
9784 }
9785
arm_security_space_below_el3(CPUARMState * env)9786 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
9787 {
9788 assert(!arm_feature(env, ARM_FEATURE_M));
9789
9790 /*
9791 * If EL3 is not supported then the secure state is implementation
9792 * defined, in which case QEMU defaults to non-secure.
9793 */
9794 if (!arm_feature(env, ARM_FEATURE_EL3)) {
9795 return ARMSS_NonSecure;
9796 }
9797
9798 /*
9799 * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
9800 * Ignoring NSE when !NS retains consistency without having to
9801 * modify other predicates.
9802 */
9803 if (!(env->cp15.scr_el3 & SCR_NS)) {
9804 return ARMSS_Secure;
9805 } else if (env->cp15.scr_el3 & SCR_NSE) {
9806 return ARMSS_Realm;
9807 } else {
9808 return ARMSS_NonSecure;
9809 }
9810 }
9811 #endif /* !CONFIG_USER_ONLY */
9812