1 /*
2 * QEMU ARM CP Register access and descriptions
3 *
4 * Copyright (c) 2022 Linaro Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
21 #ifndef TARGET_ARM_CPREGS_H
22 #define TARGET_ARM_CPREGS_H
23
24 #include "hw/registerfields.h"
25 #include "target/arm/kvm-consts.h"
26
27 /*
28 * ARMCPRegInfo type field bits:
29 */
30 enum {
31 /*
32 * Register must be handled specially during translation.
33 * The method is one of the values below:
34 */
35 ARM_CP_SPECIAL_MASK = 0x000f,
36 /* Special: no change to PE state: writes ignored, reads ignored. */
37 ARM_CP_NOP = 0x0001,
38 /* Special: sysreg is WFI, for v5 and v6. */
39 ARM_CP_WFI = 0x0002,
40 /* Special: sysreg is NZCV. */
41 ARM_CP_NZCV = 0x0003,
42 /* Special: sysreg is CURRENTEL. */
43 ARM_CP_CURRENTEL = 0x0004,
44 /* Special: sysreg is DC ZVA or similar. */
45 ARM_CP_DC_ZVA = 0x0005,
46 ARM_CP_DC_GVA = 0x0006,
47 ARM_CP_DC_GZVA = 0x0007,
48
49 /* Flag: reads produce resetvalue; writes ignored. */
50 ARM_CP_CONST = 1 << 4,
51 /* Flag: For ARM_CP_STATE_AA32, sysreg is 64-bit. */
52 ARM_CP_64BIT = 1 << 5,
53 /*
54 * Flag: TB should not be ended after a write to this register
55 * (the default is that the TB ends after cp writes).
56 */
57 ARM_CP_SUPPRESS_TB_END = 1 << 6,
58 /*
59 * Flag: Permit a register definition to override a previous definition
60 * for the same (cp, is64, crn, crm, opc1, opc2) tuple: either the new
61 * or the old must have the ARM_CP_OVERRIDE bit set.
62 */
63 ARM_CP_OVERRIDE = 1 << 7,
64 /*
65 * Flag: Register is an alias view of some underlying state which is also
66 * visible via another register, and that the other register is handling
67 * migration and reset; registers marked ARM_CP_ALIAS will not be migrated
68 * but may have their state set by syncing of register state from KVM.
69 */
70 ARM_CP_ALIAS = 1 << 8,
71 /*
72 * Flag: Register does I/O and therefore its accesses need to be marked
73 * with translator_io_start() and also end the TB. In particular,
74 * registers which implement clocks or timers require this.
75 */
76 ARM_CP_IO = 1 << 9,
77 /*
78 * Flag: Register has no underlying state and does not support raw access
79 * for state saving/loading; it will not be used for either migration or
80 * KVM state synchronization. Typically this is for "registers" which are
81 * actually used as instructions for cache maintenance and so on.
82 */
83 ARM_CP_NO_RAW = 1 << 10,
84 /*
85 * Flag: The read or write hook might raise an exception; the generated
86 * code will synchronize the CPU state before calling the hook so that it
87 * is safe for the hook to call raise_exception().
88 */
89 ARM_CP_RAISES_EXC = 1 << 11,
90 /*
91 * Flag: Writes to the sysreg might change the exception level - typically
92 * on older ARM chips. For those cases we need to re-read the new el when
93 * recomputing the translation flags.
94 */
95 ARM_CP_NEWEL = 1 << 12,
96 /*
97 * Flag: Access check for this sysreg is identical to accessing FPU state
98 * from an instruction: use translation fp_access_check().
99 */
100 ARM_CP_FPU = 1 << 13,
101 /*
102 * Flag: Access check for this sysreg is identical to accessing SVE state
103 * from an instruction: use translation sve_access_check().
104 */
105 ARM_CP_SVE = 1 << 14,
106 /* Flag: Do not expose in gdb sysreg xml. */
107 ARM_CP_NO_GDB = 1 << 15,
108 /*
109 * Flags: If EL3 but not EL2...
110 * - UNDEF: discard the cpreg,
111 * - KEEP: retain the cpreg as is,
112 * - C_NZ: set const on the cpreg, but retain resetvalue,
113 * - else: set const on the cpreg, zero resetvalue, aka RES0.
114 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
115 */
116 ARM_CP_EL3_NO_EL2_UNDEF = 1 << 16,
117 ARM_CP_EL3_NO_EL2_KEEP = 1 << 17,
118 ARM_CP_EL3_NO_EL2_C_NZ = 1 << 18,
119 /*
120 * Flag: Access check for this sysreg is constrained by the
121 * ARM pseudocode function CheckSMEAccess().
122 */
123 ARM_CP_SME = 1 << 19,
124 /*
125 * Flag: one of the four EL2 registers which redirect to the
126 * equivalent EL1 register when FEAT_NV2 is enabled.
127 */
128 ARM_CP_NV2_REDIRECT = 1 << 20,
129 };
130
131 /*
132 * Interface for defining coprocessor registers.
133 * Registers are defined in tables of arm_cp_reginfo structs
134 * which are passed to define_arm_cp_regs().
135 */
136
137 /*
138 * When looking up a coprocessor register we look for it
139 * via an integer which encodes all of:
140 * coprocessor number
141 * Crn, Crm, opc1, opc2 fields
142 * 32 or 64 bit register (ie is it accessed via MRC/MCR
143 * or via MRRC/MCRR?)
144 * non-secure/secure bank (AArch32 only)
145 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
146 * (In this case crn and opc2 should be zero.)
147 * For AArch64, there is no 32/64 bit size distinction;
148 * instead all registers have a 2 bit op0, 3 bit op1 and op2,
149 * and 4 bit CRn and CRm. The encoding patterns are chosen
150 * to be easy to convert to and from the KVM encodings, and also
151 * so that the hashtable can contain both AArch32 and AArch64
152 * registers (to allow for interprocessing where we might run
153 * 32 bit code on a 64 bit core).
154 */
155 /*
156 * This bit is private to our hashtable cpreg; in KVM register
157 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
158 * in the upper bits of the 64 bit ID.
159 */
160 #define CP_REG_AA64_SHIFT 28
161 #define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
162
163 /*
164 * To enable banking of coprocessor registers depending on ns-bit we
165 * add a bit to distinguish between secure and non-secure cpregs in the
166 * hashtable.
167 */
168 #define CP_REG_NS_SHIFT 29
169 #define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
170
171 #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
172 ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
173 ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
174
175 #define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
176 (CP_REG_AA64_MASK | \
177 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
178 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
179 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
180 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
181 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
182 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
183
184 /*
185 * Convert a full 64 bit KVM register ID to the truncated 32 bit
186 * version used as a key for the coprocessor register hashtable
187 */
kvm_to_cpreg_id(uint64_t kvmid)188 static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
189 {
190 uint32_t cpregid = kvmid;
191 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
192 cpregid |= CP_REG_AA64_MASK;
193 } else {
194 if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
195 cpregid |= (1 << 15);
196 }
197
198 /*
199 * KVM is always non-secure so add the NS flag on AArch32 register
200 * entries.
201 */
202 cpregid |= 1 << CP_REG_NS_SHIFT;
203 }
204 return cpregid;
205 }
206
207 /*
208 * Convert a truncated 32 bit hashtable key into the full
209 * 64 bit KVM register ID.
210 */
cpreg_to_kvm_id(uint32_t cpregid)211 static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
212 {
213 uint64_t kvmid;
214
215 if (cpregid & CP_REG_AA64_MASK) {
216 kvmid = cpregid & ~CP_REG_AA64_MASK;
217 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
218 } else {
219 kvmid = cpregid & ~(1 << 15);
220 if (cpregid & (1 << 15)) {
221 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
222 } else {
223 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
224 }
225 }
226 return kvmid;
227 }
228
229 /*
230 * Valid values for ARMCPRegInfo state field, indicating which of
231 * the AArch32 and AArch64 execution states this register is visible in.
232 * If the reginfo doesn't explicitly specify then it is AArch32 only.
233 * If the reginfo is declared to be visible in both states then a second
234 * reginfo is synthesised for the AArch32 view of the AArch64 register,
235 * such that the AArch32 view is the lower 32 bits of the AArch64 one.
236 * Note that we rely on the values of these enums as we iterate through
237 * the various states in some places.
238 */
239 typedef enum {
240 ARM_CP_STATE_AA32 = 0,
241 ARM_CP_STATE_AA64 = 1,
242 ARM_CP_STATE_BOTH = 2,
243 } CPState;
244
245 /*
246 * ARM CP register secure state flags. These flags identify security state
247 * attributes for a given CP register entry.
248 * The existence of both or neither secure and non-secure flags indicates that
249 * the register has both a secure and non-secure hash entry. A single one of
250 * these flags causes the register to only be hashed for the specified
251 * security state.
252 * Although definitions may have any combination of the S/NS bits, each
253 * registered entry will only have one to identify whether the entry is secure
254 * or non-secure.
255 */
256 typedef enum {
257 ARM_CP_SECSTATE_BOTH = 0, /* define one cpreg for each secstate */
258 ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
259 ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
260 } CPSecureState;
261
262 /*
263 * Access rights:
264 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
265 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
266 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
267 * (ie any of the privileged modes in Secure state, or Monitor mode).
268 * If a register is accessible in one privilege level it's always accessible
269 * in higher privilege levels too. Since "Secure PL1" also follows this rule
270 * (ie anything visible in PL2 is visible in S-PL1, some things are only
271 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
272 * terminology a little and call this PL3.
273 * In AArch64 things are somewhat simpler as the PLx bits line up exactly
274 * with the ELx exception levels.
275 *
276 * If access permissions for a register are more complex than can be
277 * described with these bits, then use a laxer set of restrictions, and
278 * do the more restrictive/complex check inside a helper function.
279 */
280 typedef enum {
281 PL3_R = 0x80,
282 PL3_W = 0x40,
283 PL2_R = 0x20 | PL3_R,
284 PL2_W = 0x10 | PL3_W,
285 PL1_R = 0x08 | PL2_R,
286 PL1_W = 0x04 | PL2_W,
287 PL0_R = 0x02 | PL1_R,
288 PL0_W = 0x01 | PL1_W,
289
290 /*
291 * For user-mode some registers are accessible to EL0 via a kernel
292 * trap-and-emulate ABI. In this case we define the read permissions
293 * as actually being PL0_R. However some bits of any given register
294 * may still be masked.
295 */
296 #ifdef CONFIG_USER_ONLY
297 PL0U_R = PL0_R,
298 #else
299 PL0U_R = PL1_R,
300 #endif
301
302 PL3_RW = PL3_R | PL3_W,
303 PL2_RW = PL2_R | PL2_W,
304 PL1_RW = PL1_R | PL1_W,
305 PL0_RW = PL0_R | PL0_W,
306 } CPAccessRights;
307
308 typedef enum CPAccessResult {
309 /* Access is permitted */
310 CP_ACCESS_OK = 0,
311
312 /*
313 * Combined with one of the following, the low 2 bits indicate the
314 * target exception level. If 0, the exception is taken to the usual
315 * target EL (EL1 or PL1 if in EL0, otherwise to the current EL).
316 */
317 CP_ACCESS_EL_MASK = 3,
318
319 /*
320 * Access fails due to a configurable trap or enable which would
321 * result in a categorized exception syndrome giving information about
322 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
323 * 0xc or 0x18).
324 */
325 CP_ACCESS_TRAP = (1 << 2),
326 CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP | 2,
327 CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP | 3,
328
329 /*
330 * Access fails and results in an exception syndrome 0x0 ("uncategorized").
331 * Note that this is not a catch-all case -- the set of cases which may
332 * result in this failure is specifically defined by the architecture.
333 * This trap is always to the usual target EL, never directly to a
334 * specified target EL.
335 */
336 CP_ACCESS_TRAP_UNCATEGORIZED = (2 << 2),
337 } CPAccessResult;
338
339 /* Indexes into fgt_read[] */
340 #define FGTREG_HFGRTR 0
341 #define FGTREG_HDFGRTR 1
342 /* Indexes into fgt_write[] */
343 #define FGTREG_HFGWTR 0
344 #define FGTREG_HDFGWTR 1
345 /* Indexes into fgt_exec[] */
346 #define FGTREG_HFGITR 0
347
348 FIELD(HFGRTR_EL2, AFSR0_EL1, 0, 1)
349 FIELD(HFGRTR_EL2, AFSR1_EL1, 1, 1)
350 FIELD(HFGRTR_EL2, AIDR_EL1, 2, 1)
351 FIELD(HFGRTR_EL2, AMAIR_EL1, 3, 1)
352 FIELD(HFGRTR_EL2, APDAKEY, 4, 1)
353 FIELD(HFGRTR_EL2, APDBKEY, 5, 1)
354 FIELD(HFGRTR_EL2, APGAKEY, 6, 1)
355 FIELD(HFGRTR_EL2, APIAKEY, 7, 1)
356 FIELD(HFGRTR_EL2, APIBKEY, 8, 1)
357 FIELD(HFGRTR_EL2, CCSIDR_EL1, 9, 1)
358 FIELD(HFGRTR_EL2, CLIDR_EL1, 10, 1)
359 FIELD(HFGRTR_EL2, CONTEXTIDR_EL1, 11, 1)
360 FIELD(HFGRTR_EL2, CPACR_EL1, 12, 1)
361 FIELD(HFGRTR_EL2, CSSELR_EL1, 13, 1)
362 FIELD(HFGRTR_EL2, CTR_EL0, 14, 1)
363 FIELD(HFGRTR_EL2, DCZID_EL0, 15, 1)
364 FIELD(HFGRTR_EL2, ESR_EL1, 16, 1)
365 FIELD(HFGRTR_EL2, FAR_EL1, 17, 1)
366 FIELD(HFGRTR_EL2, ISR_EL1, 18, 1)
367 FIELD(HFGRTR_EL2, LORC_EL1, 19, 1)
368 FIELD(HFGRTR_EL2, LOREA_EL1, 20, 1)
369 FIELD(HFGRTR_EL2, LORID_EL1, 21, 1)
370 FIELD(HFGRTR_EL2, LORN_EL1, 22, 1)
371 FIELD(HFGRTR_EL2, LORSA_EL1, 23, 1)
372 FIELD(HFGRTR_EL2, MAIR_EL1, 24, 1)
373 FIELD(HFGRTR_EL2, MIDR_EL1, 25, 1)
374 FIELD(HFGRTR_EL2, MPIDR_EL1, 26, 1)
375 FIELD(HFGRTR_EL2, PAR_EL1, 27, 1)
376 FIELD(HFGRTR_EL2, REVIDR_EL1, 28, 1)
377 FIELD(HFGRTR_EL2, SCTLR_EL1, 29, 1)
378 FIELD(HFGRTR_EL2, SCXTNUM_EL1, 30, 1)
379 FIELD(HFGRTR_EL2, SCXTNUM_EL0, 31, 1)
380 FIELD(HFGRTR_EL2, TCR_EL1, 32, 1)
381 FIELD(HFGRTR_EL2, TPIDR_EL1, 33, 1)
382 FIELD(HFGRTR_EL2, TPIDRRO_EL0, 34, 1)
383 FIELD(HFGRTR_EL2, TPIDR_EL0, 35, 1)
384 FIELD(HFGRTR_EL2, TTBR0_EL1, 36, 1)
385 FIELD(HFGRTR_EL2, TTBR1_EL1, 37, 1)
386 FIELD(HFGRTR_EL2, VBAR_EL1, 38, 1)
387 FIELD(HFGRTR_EL2, ICC_IGRPENN_EL1, 39, 1)
388 FIELD(HFGRTR_EL2, ERRIDR_EL1, 40, 1)
389 FIELD(HFGRTR_EL2, ERRSELR_EL1, 41, 1)
390 FIELD(HFGRTR_EL2, ERXFR_EL1, 42, 1)
391 FIELD(HFGRTR_EL2, ERXCTLR_EL1, 43, 1)
392 FIELD(HFGRTR_EL2, ERXSTATUS_EL1, 44, 1)
393 FIELD(HFGRTR_EL2, ERXMISCN_EL1, 45, 1)
394 FIELD(HFGRTR_EL2, ERXPFGF_EL1, 46, 1)
395 FIELD(HFGRTR_EL2, ERXPFGCTL_EL1, 47, 1)
396 FIELD(HFGRTR_EL2, ERXPFGCDN_EL1, 48, 1)
397 FIELD(HFGRTR_EL2, ERXADDR_EL1, 49, 1)
398 FIELD(HFGRTR_EL2, NACCDATA_EL1, 50, 1)
399 /* 51-53: RES0 */
400 FIELD(HFGRTR_EL2, NSMPRI_EL1, 54, 1)
401 FIELD(HFGRTR_EL2, NTPIDR2_EL0, 55, 1)
402 /* 56-63: RES0 */
403
404 /* These match HFGRTR but bits for RO registers are RES0 */
405 FIELD(HFGWTR_EL2, AFSR0_EL1, 0, 1)
406 FIELD(HFGWTR_EL2, AFSR1_EL1, 1, 1)
407 FIELD(HFGWTR_EL2, AMAIR_EL1, 3, 1)
408 FIELD(HFGWTR_EL2, APDAKEY, 4, 1)
409 FIELD(HFGWTR_EL2, APDBKEY, 5, 1)
410 FIELD(HFGWTR_EL2, APGAKEY, 6, 1)
411 FIELD(HFGWTR_EL2, APIAKEY, 7, 1)
412 FIELD(HFGWTR_EL2, APIBKEY, 8, 1)
413 FIELD(HFGWTR_EL2, CONTEXTIDR_EL1, 11, 1)
414 FIELD(HFGWTR_EL2, CPACR_EL1, 12, 1)
415 FIELD(HFGWTR_EL2, CSSELR_EL1, 13, 1)
416 FIELD(HFGWTR_EL2, ESR_EL1, 16, 1)
417 FIELD(HFGWTR_EL2, FAR_EL1, 17, 1)
418 FIELD(HFGWTR_EL2, LORC_EL1, 19, 1)
419 FIELD(HFGWTR_EL2, LOREA_EL1, 20, 1)
420 FIELD(HFGWTR_EL2, LORN_EL1, 22, 1)
421 FIELD(HFGWTR_EL2, LORSA_EL1, 23, 1)
422 FIELD(HFGWTR_EL2, MAIR_EL1, 24, 1)
423 FIELD(HFGWTR_EL2, PAR_EL1, 27, 1)
424 FIELD(HFGWTR_EL2, SCTLR_EL1, 29, 1)
425 FIELD(HFGWTR_EL2, SCXTNUM_EL1, 30, 1)
426 FIELD(HFGWTR_EL2, SCXTNUM_EL0, 31, 1)
427 FIELD(HFGWTR_EL2, TCR_EL1, 32, 1)
428 FIELD(HFGWTR_EL2, TPIDR_EL1, 33, 1)
429 FIELD(HFGWTR_EL2, TPIDRRO_EL0, 34, 1)
430 FIELD(HFGWTR_EL2, TPIDR_EL0, 35, 1)
431 FIELD(HFGWTR_EL2, TTBR0_EL1, 36, 1)
432 FIELD(HFGWTR_EL2, TTBR1_EL1, 37, 1)
433 FIELD(HFGWTR_EL2, VBAR_EL1, 38, 1)
434 FIELD(HFGWTR_EL2, ICC_IGRPENN_EL1, 39, 1)
435 FIELD(HFGWTR_EL2, ERRSELR_EL1, 41, 1)
436 FIELD(HFGWTR_EL2, ERXCTLR_EL1, 43, 1)
437 FIELD(HFGWTR_EL2, ERXSTATUS_EL1, 44, 1)
438 FIELD(HFGWTR_EL2, ERXMISCN_EL1, 45, 1)
439 FIELD(HFGWTR_EL2, ERXPFGCTL_EL1, 47, 1)
440 FIELD(HFGWTR_EL2, ERXPFGCDN_EL1, 48, 1)
441 FIELD(HFGWTR_EL2, ERXADDR_EL1, 49, 1)
442 FIELD(HFGWTR_EL2, NACCDATA_EL1, 50, 1)
443 FIELD(HFGWTR_EL2, NSMPRI_EL1, 54, 1)
444 FIELD(HFGWTR_EL2, NTPIDR2_EL0, 55, 1)
445
446 FIELD(HFGITR_EL2, ICIALLUIS, 0, 1)
447 FIELD(HFGITR_EL2, ICIALLU, 1, 1)
448 FIELD(HFGITR_EL2, ICIVAU, 2, 1)
449 FIELD(HFGITR_EL2, DCIVAC, 3, 1)
450 FIELD(HFGITR_EL2, DCISW, 4, 1)
451 FIELD(HFGITR_EL2, DCCSW, 5, 1)
452 FIELD(HFGITR_EL2, DCCISW, 6, 1)
453 FIELD(HFGITR_EL2, DCCVAU, 7, 1)
454 FIELD(HFGITR_EL2, DCCVAP, 8, 1)
455 FIELD(HFGITR_EL2, DCCVADP, 9, 1)
456 FIELD(HFGITR_EL2, DCCIVAC, 10, 1)
457 FIELD(HFGITR_EL2, DCZVA, 11, 1)
458 FIELD(HFGITR_EL2, ATS1E1R, 12, 1)
459 FIELD(HFGITR_EL2, ATS1E1W, 13, 1)
460 FIELD(HFGITR_EL2, ATS1E0R, 14, 1)
461 FIELD(HFGITR_EL2, ATS1E0W, 15, 1)
462 FIELD(HFGITR_EL2, ATS1E1RP, 16, 1)
463 FIELD(HFGITR_EL2, ATS1E1WP, 17, 1)
464 FIELD(HFGITR_EL2, TLBIVMALLE1OS, 18, 1)
465 FIELD(HFGITR_EL2, TLBIVAE1OS, 19, 1)
466 FIELD(HFGITR_EL2, TLBIASIDE1OS, 20, 1)
467 FIELD(HFGITR_EL2, TLBIVAAE1OS, 21, 1)
468 FIELD(HFGITR_EL2, TLBIVALE1OS, 22, 1)
469 FIELD(HFGITR_EL2, TLBIVAALE1OS, 23, 1)
470 FIELD(HFGITR_EL2, TLBIRVAE1OS, 24, 1)
471 FIELD(HFGITR_EL2, TLBIRVAAE1OS, 25, 1)
472 FIELD(HFGITR_EL2, TLBIRVALE1OS, 26, 1)
473 FIELD(HFGITR_EL2, TLBIRVAALE1OS, 27, 1)
474 FIELD(HFGITR_EL2, TLBIVMALLE1IS, 28, 1)
475 FIELD(HFGITR_EL2, TLBIVAE1IS, 29, 1)
476 FIELD(HFGITR_EL2, TLBIASIDE1IS, 30, 1)
477 FIELD(HFGITR_EL2, TLBIVAAE1IS, 31, 1)
478 FIELD(HFGITR_EL2, TLBIVALE1IS, 32, 1)
479 FIELD(HFGITR_EL2, TLBIVAALE1IS, 33, 1)
480 FIELD(HFGITR_EL2, TLBIRVAE1IS, 34, 1)
481 FIELD(HFGITR_EL2, TLBIRVAAE1IS, 35, 1)
482 FIELD(HFGITR_EL2, TLBIRVALE1IS, 36, 1)
483 FIELD(HFGITR_EL2, TLBIRVAALE1IS, 37, 1)
484 FIELD(HFGITR_EL2, TLBIRVAE1, 38, 1)
485 FIELD(HFGITR_EL2, TLBIRVAAE1, 39, 1)
486 FIELD(HFGITR_EL2, TLBIRVALE1, 40, 1)
487 FIELD(HFGITR_EL2, TLBIRVAALE1, 41, 1)
488 FIELD(HFGITR_EL2, TLBIVMALLE1, 42, 1)
489 FIELD(HFGITR_EL2, TLBIVAE1, 43, 1)
490 FIELD(HFGITR_EL2, TLBIASIDE1, 44, 1)
491 FIELD(HFGITR_EL2, TLBIVAAE1, 45, 1)
492 FIELD(HFGITR_EL2, TLBIVALE1, 46, 1)
493 FIELD(HFGITR_EL2, TLBIVAALE1, 47, 1)
494 FIELD(HFGITR_EL2, CFPRCTX, 48, 1)
495 FIELD(HFGITR_EL2, DVPRCTX, 49, 1)
496 FIELD(HFGITR_EL2, CPPRCTX, 50, 1)
497 FIELD(HFGITR_EL2, ERET, 51, 1)
498 FIELD(HFGITR_EL2, SVC_EL0, 52, 1)
499 FIELD(HFGITR_EL2, SVC_EL1, 53, 1)
500 FIELD(HFGITR_EL2, DCCVAC, 54, 1)
501 FIELD(HFGITR_EL2, NBRBINJ, 55, 1)
502 FIELD(HFGITR_EL2, NBRBIALL, 56, 1)
503
504 FIELD(HDFGRTR_EL2, DBGBCRN_EL1, 0, 1)
505 FIELD(HDFGRTR_EL2, DBGBVRN_EL1, 1, 1)
506 FIELD(HDFGRTR_EL2, DBGWCRN_EL1, 2, 1)
507 FIELD(HDFGRTR_EL2, DBGWVRN_EL1, 3, 1)
508 FIELD(HDFGRTR_EL2, MDSCR_EL1, 4, 1)
509 FIELD(HDFGRTR_EL2, DBGCLAIM, 5, 1)
510 FIELD(HDFGRTR_EL2, DBGAUTHSTATUS_EL1, 6, 1)
511 FIELD(HDFGRTR_EL2, DBGPRCR_EL1, 7, 1)
512 /* 8: RES0: OSLAR_EL1 is WO */
513 FIELD(HDFGRTR_EL2, OSLSR_EL1, 9, 1)
514 FIELD(HDFGRTR_EL2, OSECCR_EL1, 10, 1)
515 FIELD(HDFGRTR_EL2, OSDLR_EL1, 11, 1)
516 FIELD(HDFGRTR_EL2, PMEVCNTRN_EL0, 12, 1)
517 FIELD(HDFGRTR_EL2, PMEVTYPERN_EL0, 13, 1)
518 FIELD(HDFGRTR_EL2, PMCCFILTR_EL0, 14, 1)
519 FIELD(HDFGRTR_EL2, PMCCNTR_EL0, 15, 1)
520 FIELD(HDFGRTR_EL2, PMCNTEN, 16, 1)
521 FIELD(HDFGRTR_EL2, PMINTEN, 17, 1)
522 FIELD(HDFGRTR_EL2, PMOVS, 18, 1)
523 FIELD(HDFGRTR_EL2, PMSELR_EL0, 19, 1)
524 /* 20: RES0: PMSWINC_EL0 is WO */
525 /* 21: RES0: PMCR_EL0 is WO */
526 FIELD(HDFGRTR_EL2, PMMIR_EL1, 22, 1)
527 FIELD(HDFGRTR_EL2, PMBLIMITR_EL1, 23, 1)
528 FIELD(HDFGRTR_EL2, PMBPTR_EL1, 24, 1)
529 FIELD(HDFGRTR_EL2, PMBSR_EL1, 25, 1)
530 FIELD(HDFGRTR_EL2, PMSCR_EL1, 26, 1)
531 FIELD(HDFGRTR_EL2, PMSEVFR_EL1, 27, 1)
532 FIELD(HDFGRTR_EL2, PMSFCR_EL1, 28, 1)
533 FIELD(HDFGRTR_EL2, PMSICR_EL1, 29, 1)
534 FIELD(HDFGRTR_EL2, PMSIDR_EL1, 30, 1)
535 FIELD(HDFGRTR_EL2, PMSIRR_EL1, 31, 1)
536 FIELD(HDFGRTR_EL2, PMSLATFR_EL1, 32, 1)
537 FIELD(HDFGRTR_EL2, TRC, 33, 1)
538 FIELD(HDFGRTR_EL2, TRCAUTHSTATUS, 34, 1)
539 FIELD(HDFGRTR_EL2, TRCAUXCTLR, 35, 1)
540 FIELD(HDFGRTR_EL2, TRCCLAIM, 36, 1)
541 FIELD(HDFGRTR_EL2, TRCCNTVRn, 37, 1)
542 /* 38, 39: RES0 */
543 FIELD(HDFGRTR_EL2, TRCID, 40, 1)
544 FIELD(HDFGRTR_EL2, TRCIMSPECN, 41, 1)
545 /* 42: RES0: TRCOSLAR is WO */
546 FIELD(HDFGRTR_EL2, TRCOSLSR, 43, 1)
547 FIELD(HDFGRTR_EL2, TRCPRGCTLR, 44, 1)
548 FIELD(HDFGRTR_EL2, TRCSEQSTR, 45, 1)
549 FIELD(HDFGRTR_EL2, TRCSSCSRN, 46, 1)
550 FIELD(HDFGRTR_EL2, TRCSTATR, 47, 1)
551 FIELD(HDFGRTR_EL2, TRCVICTLR, 48, 1)
552 /* 49: RES0: TRFCR_EL1 is WO */
553 FIELD(HDFGRTR_EL2, TRBBASER_EL1, 50, 1)
554 FIELD(HDFGRTR_EL2, TRBIDR_EL1, 51, 1)
555 FIELD(HDFGRTR_EL2, TRBLIMITR_EL1, 52, 1)
556 FIELD(HDFGRTR_EL2, TRBMAR_EL1, 53, 1)
557 FIELD(HDFGRTR_EL2, TRBPTR_EL1, 54, 1)
558 FIELD(HDFGRTR_EL2, TRBSR_EL1, 55, 1)
559 FIELD(HDFGRTR_EL2, TRBTRG_EL1, 56, 1)
560 FIELD(HDFGRTR_EL2, PMUSERENR_EL0, 57, 1)
561 FIELD(HDFGRTR_EL2, PMCEIDN_EL0, 58, 1)
562 FIELD(HDFGRTR_EL2, NBRBIDR, 59, 1)
563 FIELD(HDFGRTR_EL2, NBRBCTL, 60, 1)
564 FIELD(HDFGRTR_EL2, NBRBDATA, 61, 1)
565 FIELD(HDFGRTR_EL2, NPMSNEVFR_EL1, 62, 1)
566 FIELD(HDFGRTR_EL2, PMBIDR_EL1, 63, 1)
567
568 /*
569 * These match HDFGRTR_EL2, but bits for RO registers are RES0.
570 * A few bits are for WO registers, where the HDFGRTR_EL2 bit is RES0.
571 */
572 FIELD(HDFGWTR_EL2, DBGBCRN_EL1, 0, 1)
573 FIELD(HDFGWTR_EL2, DBGBVRN_EL1, 1, 1)
574 FIELD(HDFGWTR_EL2, DBGWCRN_EL1, 2, 1)
575 FIELD(HDFGWTR_EL2, DBGWVRN_EL1, 3, 1)
576 FIELD(HDFGWTR_EL2, MDSCR_EL1, 4, 1)
577 FIELD(HDFGWTR_EL2, DBGCLAIM, 5, 1)
578 FIELD(HDFGWTR_EL2, DBGPRCR_EL1, 7, 1)
579 FIELD(HDFGWTR_EL2, OSLAR_EL1, 8, 1)
580 FIELD(HDFGWTR_EL2, OSLSR_EL1, 9, 1)
581 FIELD(HDFGWTR_EL2, OSECCR_EL1, 10, 1)
582 FIELD(HDFGWTR_EL2, OSDLR_EL1, 11, 1)
583 FIELD(HDFGWTR_EL2, PMEVCNTRN_EL0, 12, 1)
584 FIELD(HDFGWTR_EL2, PMEVTYPERN_EL0, 13, 1)
585 FIELD(HDFGWTR_EL2, PMCCFILTR_EL0, 14, 1)
586 FIELD(HDFGWTR_EL2, PMCCNTR_EL0, 15, 1)
587 FIELD(HDFGWTR_EL2, PMCNTEN, 16, 1)
588 FIELD(HDFGWTR_EL2, PMINTEN, 17, 1)
589 FIELD(HDFGWTR_EL2, PMOVS, 18, 1)
590 FIELD(HDFGWTR_EL2, PMSELR_EL0, 19, 1)
591 FIELD(HDFGWTR_EL2, PMSWINC_EL0, 20, 1)
592 FIELD(HDFGWTR_EL2, PMCR_EL0, 21, 1)
593 FIELD(HDFGWTR_EL2, PMBLIMITR_EL1, 23, 1)
594 FIELD(HDFGWTR_EL2, PMBPTR_EL1, 24, 1)
595 FIELD(HDFGWTR_EL2, PMBSR_EL1, 25, 1)
596 FIELD(HDFGWTR_EL2, PMSCR_EL1, 26, 1)
597 FIELD(HDFGWTR_EL2, PMSEVFR_EL1, 27, 1)
598 FIELD(HDFGWTR_EL2, PMSFCR_EL1, 28, 1)
599 FIELD(HDFGWTR_EL2, PMSICR_EL1, 29, 1)
600 FIELD(HDFGWTR_EL2, PMSIRR_EL1, 31, 1)
601 FIELD(HDFGWTR_EL2, PMSLATFR_EL1, 32, 1)
602 FIELD(HDFGWTR_EL2, TRC, 33, 1)
603 FIELD(HDFGWTR_EL2, TRCAUXCTLR, 35, 1)
604 FIELD(HDFGWTR_EL2, TRCCLAIM, 36, 1)
605 FIELD(HDFGWTR_EL2, TRCCNTVRn, 37, 1)
606 FIELD(HDFGWTR_EL2, TRCIMSPECN, 41, 1)
607 FIELD(HDFGWTR_EL2, TRCOSLAR, 42, 1)
608 FIELD(HDFGWTR_EL2, TRCPRGCTLR, 44, 1)
609 FIELD(HDFGWTR_EL2, TRCSEQSTR, 45, 1)
610 FIELD(HDFGWTR_EL2, TRCSSCSRN, 46, 1)
611 FIELD(HDFGWTR_EL2, TRCVICTLR, 48, 1)
612 FIELD(HDFGWTR_EL2, TRFCR_EL1, 49, 1)
613 FIELD(HDFGWTR_EL2, TRBBASER_EL1, 50, 1)
614 FIELD(HDFGWTR_EL2, TRBLIMITR_EL1, 52, 1)
615 FIELD(HDFGWTR_EL2, TRBMAR_EL1, 53, 1)
616 FIELD(HDFGWTR_EL2, TRBPTR_EL1, 54, 1)
617 FIELD(HDFGWTR_EL2, TRBSR_EL1, 55, 1)
618 FIELD(HDFGWTR_EL2, TRBTRG_EL1, 56, 1)
619 FIELD(HDFGWTR_EL2, PMUSERENR_EL0, 57, 1)
620 FIELD(HDFGWTR_EL2, NBRBCTL, 60, 1)
621 FIELD(HDFGWTR_EL2, NBRBDATA, 61, 1)
622 FIELD(HDFGWTR_EL2, NPMSNEVFR_EL1, 62, 1)
623
624 /* Which fine-grained trap bit register to check, if any */
625 FIELD(FGT, TYPE, 10, 3)
626 FIELD(FGT, REV, 9, 1) /* Is bit sense reversed? */
627 FIELD(FGT, IDX, 6, 3) /* Index within a uint64_t[] array */
628 FIELD(FGT, BITPOS, 0, 6) /* Bit position within the uint64_t */
629
630 /*
631 * Macros to define FGT_##bitname enum constants to use in ARMCPRegInfo::fgt
632 * fields. We assume for brevity's sake that there are no duplicated
633 * bit names across the various FGT registers.
634 */
635 #define DO_BIT(REG, BITNAME) \
636 FGT_##BITNAME = FGT_##REG | R_##REG##_EL2_##BITNAME##_SHIFT
637
638 /* Some bits have reversed sense, so 0 means trap and 1 means not */
639 #define DO_REV_BIT(REG, BITNAME) \
640 FGT_##BITNAME = FGT_##REG | FGT_REV | R_##REG##_EL2_##BITNAME##_SHIFT
641
642 typedef enum FGTBit {
643 /*
644 * These bits tell us which register arrays to use:
645 * if FGT_R is set then reads are checked against fgt_read[];
646 * if FGT_W is set then writes are checked against fgt_write[];
647 * if FGT_EXEC is set then all accesses are checked against fgt_exec[].
648 *
649 * For almost all bits in the R/W register pairs, the bit exists in
650 * both registers for a RW register, in HFGRTR/HDFGRTR for a RO register
651 * with the corresponding HFGWTR/HDFGTWTR bit being RES0, and vice-versa
652 * for a WO register. There are unfortunately a couple of exceptions
653 * (PMCR_EL0, TRFCR_EL1) where the register being trapped is RW but
654 * the FGT system only allows trapping of writes, not reads.
655 *
656 * Note that we arrange these bits so that a 0 FGTBit means "no trap".
657 */
658 FGT_R = 1 << R_FGT_TYPE_SHIFT,
659 FGT_W = 2 << R_FGT_TYPE_SHIFT,
660 FGT_EXEC = 4 << R_FGT_TYPE_SHIFT,
661 FGT_RW = FGT_R | FGT_W,
662 /* Bit to identify whether trap bit is reversed sense */
663 FGT_REV = R_FGT_REV_MASK,
664
665 /*
666 * If a bit exists in HFGRTR/HDFGRTR then either the register being
667 * trapped is RO or the bit also exists in HFGWTR/HDFGWTR, so we either
668 * want to trap for both reads and writes or else it's harmless to mark
669 * it as trap-on-writes.
670 * If a bit exists only in HFGWTR/HDFGWTR then either the register being
671 * trapped is WO, or else it is one of the two oddball special cases
672 * which are RW but have only a write trap. We mark these as only
673 * FGT_W so we get the right behaviour for those special cases.
674 * (If a bit was added in future that provided only a read trap for an
675 * RW register we'd need to do something special to get the FGT_R bit
676 * only. But this seems unlikely to happen.)
677 *
678 * So for the DO_BIT/DO_REV_BIT macros: use FGT_HFGRTR/FGT_HDFGRTR if
679 * the bit exists in that register. Otherwise use FGT_HFGWTR/FGT_HDFGWTR.
680 */
681 FGT_HFGRTR = FGT_RW | (FGTREG_HFGRTR << R_FGT_IDX_SHIFT),
682 FGT_HFGWTR = FGT_W | (FGTREG_HFGWTR << R_FGT_IDX_SHIFT),
683 FGT_HDFGRTR = FGT_RW | (FGTREG_HDFGRTR << R_FGT_IDX_SHIFT),
684 FGT_HDFGWTR = FGT_W | (FGTREG_HDFGWTR << R_FGT_IDX_SHIFT),
685 FGT_HFGITR = FGT_EXEC | (FGTREG_HFGITR << R_FGT_IDX_SHIFT),
686
687 /* Trap bits in HFGRTR_EL2 / HFGWTR_EL2, starting from bit 0. */
688 DO_BIT(HFGRTR, AFSR0_EL1),
689 DO_BIT(HFGRTR, AFSR1_EL1),
690 DO_BIT(HFGRTR, AIDR_EL1),
691 DO_BIT(HFGRTR, AMAIR_EL1),
692 DO_BIT(HFGRTR, APDAKEY),
693 DO_BIT(HFGRTR, APDBKEY),
694 DO_BIT(HFGRTR, APGAKEY),
695 DO_BIT(HFGRTR, APIAKEY),
696 DO_BIT(HFGRTR, APIBKEY),
697 DO_BIT(HFGRTR, CCSIDR_EL1),
698 DO_BIT(HFGRTR, CLIDR_EL1),
699 DO_BIT(HFGRTR, CONTEXTIDR_EL1),
700 DO_BIT(HFGRTR, CPACR_EL1),
701 DO_BIT(HFGRTR, CSSELR_EL1),
702 DO_BIT(HFGRTR, CTR_EL0),
703 DO_BIT(HFGRTR, DCZID_EL0),
704 DO_BIT(HFGRTR, ESR_EL1),
705 DO_BIT(HFGRTR, FAR_EL1),
706 DO_BIT(HFGRTR, ISR_EL1),
707 DO_BIT(HFGRTR, LORC_EL1),
708 DO_BIT(HFGRTR, LOREA_EL1),
709 DO_BIT(HFGRTR, LORID_EL1),
710 DO_BIT(HFGRTR, LORN_EL1),
711 DO_BIT(HFGRTR, LORSA_EL1),
712 DO_BIT(HFGRTR, MAIR_EL1),
713 DO_BIT(HFGRTR, MIDR_EL1),
714 DO_BIT(HFGRTR, MPIDR_EL1),
715 DO_BIT(HFGRTR, PAR_EL1),
716 DO_BIT(HFGRTR, REVIDR_EL1),
717 DO_BIT(HFGRTR, SCTLR_EL1),
718 DO_BIT(HFGRTR, SCXTNUM_EL1),
719 DO_BIT(HFGRTR, SCXTNUM_EL0),
720 DO_BIT(HFGRTR, TCR_EL1),
721 DO_BIT(HFGRTR, TPIDR_EL1),
722 DO_BIT(HFGRTR, TPIDRRO_EL0),
723 DO_BIT(HFGRTR, TPIDR_EL0),
724 DO_BIT(HFGRTR, TTBR0_EL1),
725 DO_BIT(HFGRTR, TTBR1_EL1),
726 DO_BIT(HFGRTR, VBAR_EL1),
727 DO_BIT(HFGRTR, ICC_IGRPENN_EL1),
728 DO_BIT(HFGRTR, ERRIDR_EL1),
729 DO_REV_BIT(HFGRTR, NSMPRI_EL1),
730 DO_REV_BIT(HFGRTR, NTPIDR2_EL0),
731
732 /* Trap bits in HDFGRTR_EL2 / HDFGWTR_EL2, starting from bit 0. */
733 DO_BIT(HDFGRTR, DBGBCRN_EL1),
734 DO_BIT(HDFGRTR, DBGBVRN_EL1),
735 DO_BIT(HDFGRTR, DBGWCRN_EL1),
736 DO_BIT(HDFGRTR, DBGWVRN_EL1),
737 DO_BIT(HDFGRTR, MDSCR_EL1),
738 DO_BIT(HDFGRTR, DBGCLAIM),
739 DO_BIT(HDFGWTR, OSLAR_EL1),
740 DO_BIT(HDFGRTR, OSLSR_EL1),
741 DO_BIT(HDFGRTR, OSECCR_EL1),
742 DO_BIT(HDFGRTR, OSDLR_EL1),
743 DO_BIT(HDFGRTR, PMEVCNTRN_EL0),
744 DO_BIT(HDFGRTR, PMEVTYPERN_EL0),
745 DO_BIT(HDFGRTR, PMCCFILTR_EL0),
746 DO_BIT(HDFGRTR, PMCCNTR_EL0),
747 DO_BIT(HDFGRTR, PMCNTEN),
748 DO_BIT(HDFGRTR, PMINTEN),
749 DO_BIT(HDFGRTR, PMOVS),
750 DO_BIT(HDFGRTR, PMSELR_EL0),
751 DO_BIT(HDFGWTR, PMSWINC_EL0),
752 DO_BIT(HDFGWTR, PMCR_EL0),
753 DO_BIT(HDFGRTR, PMMIR_EL1),
754 DO_BIT(HDFGRTR, PMCEIDN_EL0),
755
756 /* Trap bits in HFGITR_EL2, starting from bit 0 */
757 DO_BIT(HFGITR, ICIALLUIS),
758 DO_BIT(HFGITR, ICIALLU),
759 DO_BIT(HFGITR, ICIVAU),
760 DO_BIT(HFGITR, DCIVAC),
761 DO_BIT(HFGITR, DCISW),
762 DO_BIT(HFGITR, DCCSW),
763 DO_BIT(HFGITR, DCCISW),
764 DO_BIT(HFGITR, DCCVAU),
765 DO_BIT(HFGITR, DCCVAP),
766 DO_BIT(HFGITR, DCCVADP),
767 DO_BIT(HFGITR, DCCIVAC),
768 DO_BIT(HFGITR, DCZVA),
769 DO_BIT(HFGITR, ATS1E1R),
770 DO_BIT(HFGITR, ATS1E1W),
771 DO_BIT(HFGITR, ATS1E0R),
772 DO_BIT(HFGITR, ATS1E0W),
773 DO_BIT(HFGITR, ATS1E1RP),
774 DO_BIT(HFGITR, ATS1E1WP),
775 DO_BIT(HFGITR, TLBIVMALLE1OS),
776 DO_BIT(HFGITR, TLBIVAE1OS),
777 DO_BIT(HFGITR, TLBIASIDE1OS),
778 DO_BIT(HFGITR, TLBIVAAE1OS),
779 DO_BIT(HFGITR, TLBIVALE1OS),
780 DO_BIT(HFGITR, TLBIVAALE1OS),
781 DO_BIT(HFGITR, TLBIRVAE1OS),
782 DO_BIT(HFGITR, TLBIRVAAE1OS),
783 DO_BIT(HFGITR, TLBIRVALE1OS),
784 DO_BIT(HFGITR, TLBIRVAALE1OS),
785 DO_BIT(HFGITR, TLBIVMALLE1IS),
786 DO_BIT(HFGITR, TLBIVAE1IS),
787 DO_BIT(HFGITR, TLBIASIDE1IS),
788 DO_BIT(HFGITR, TLBIVAAE1IS),
789 DO_BIT(HFGITR, TLBIVALE1IS),
790 DO_BIT(HFGITR, TLBIVAALE1IS),
791 DO_BIT(HFGITR, TLBIRVAE1IS),
792 DO_BIT(HFGITR, TLBIRVAAE1IS),
793 DO_BIT(HFGITR, TLBIRVALE1IS),
794 DO_BIT(HFGITR, TLBIRVAALE1IS),
795 DO_BIT(HFGITR, TLBIRVAE1),
796 DO_BIT(HFGITR, TLBIRVAAE1),
797 DO_BIT(HFGITR, TLBIRVALE1),
798 DO_BIT(HFGITR, TLBIRVAALE1),
799 DO_BIT(HFGITR, TLBIVMALLE1),
800 DO_BIT(HFGITR, TLBIVAE1),
801 DO_BIT(HFGITR, TLBIASIDE1),
802 DO_BIT(HFGITR, TLBIVAAE1),
803 DO_BIT(HFGITR, TLBIVALE1),
804 DO_BIT(HFGITR, TLBIVAALE1),
805 DO_BIT(HFGITR, CFPRCTX),
806 DO_BIT(HFGITR, DVPRCTX),
807 DO_BIT(HFGITR, CPPRCTX),
808 DO_BIT(HFGITR, DCCVAC),
809 } FGTBit;
810
811 #undef DO_BIT
812 #undef DO_REV_BIT
813
814 typedef struct ARMCPRegInfo ARMCPRegInfo;
815
816 /*
817 * Access functions for coprocessor registers. These cannot fail and
818 * may not raise exceptions.
819 */
820 typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
821 typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
822 uint64_t value);
823 /* Access permission check functions for coprocessor registers. */
824 typedef CPAccessResult CPAccessFn(CPUARMState *env,
825 const ARMCPRegInfo *opaque,
826 bool isread);
827 /* Hook function for register reset */
828 typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
829
830 #define CP_ANY 0xff
831
832 /* Flags in the high bits of nv2_redirect_offset */
833 #define NV2_REDIR_NV1 0x4000 /* Only redirect when HCR_EL2.NV1 == 1 */
834 #define NV2_REDIR_NO_NV1 0x8000 /* Only redirect when HCR_EL2.NV1 == 0 */
835 #define NV2_REDIR_FLAG_MASK 0xc000
836
837 /* Definition of an ARM coprocessor register */
838 struct ARMCPRegInfo {
839 /* Name of register (useful mainly for debugging, need not be unique) */
840 const char *name;
841 /*
842 * Location of register: coprocessor number and (crn,crm,opc1,opc2)
843 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
844 * 'wildcard' field -- any value of that field in the MRC/MCR insn
845 * will be decoded to this register. The register read and write
846 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
847 * used by the program, so it is possible to register a wildcard and
848 * then behave differently on read/write if necessary.
849 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
850 * must both be zero.
851 * For AArch64-visible registers, opc0 is also used.
852 * Since there are no "coprocessors" in AArch64, cp is purely used as a
853 * way to distinguish (for KVM's benefit) guest-visible system registers
854 * from demuxed ones provided to preserve the "no side effects on
855 * KVM register read/write from QEMU" semantics. cp==0x13 is guest
856 * visible (to match KVM's encoding); cp==0 will be converted to
857 * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
858 */
859 uint8_t cp;
860 uint8_t crn;
861 uint8_t crm;
862 uint8_t opc0;
863 uint8_t opc1;
864 uint8_t opc2;
865 /* Execution state in which this register is visible: ARM_CP_STATE_* */
866 CPState state;
867 /* Register type: ARM_CP_* bits/values */
868 int type;
869 /* Access rights: PL*_[RW] */
870 CPAccessRights access;
871 /* Security state: ARM_CP_SECSTATE_* bits/values */
872 CPSecureState secure;
873 /*
874 * Which fine-grained trap register bit to check, if any. This
875 * value encodes both the trap register and bit within it.
876 */
877 FGTBit fgt;
878
879 /*
880 * Offset from VNCR_EL2 when FEAT_NV2 redirects access to memory;
881 * may include an NV2_REDIR_* flag.
882 */
883 uint32_t nv2_redirect_offset;
884
885 /*
886 * The opaque pointer passed to define_arm_cp_regs_with_opaque() when
887 * this register was defined: can be used to hand data through to the
888 * register read/write functions, since they are passed the ARMCPRegInfo*.
889 */
890 void *opaque;
891 /*
892 * Value of this register, if it is ARM_CP_CONST. Otherwise, if
893 * fieldoffset is non-zero, the reset value of the register.
894 */
895 uint64_t resetvalue;
896 /*
897 * Offset of the field in CPUARMState for this register.
898 * This is not needed if either:
899 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
900 * 2. both readfn and writefn are specified
901 */
902 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
903
904 /*
905 * Offsets of the secure and non-secure fields in CPUARMState for the
906 * register if it is banked. These fields are only used during the static
907 * registration of a register. During hashing the bank associated
908 * with a given security state is copied to fieldoffset which is used from
909 * there on out.
910 *
911 * It is expected that register definitions use either fieldoffset or
912 * bank_fieldoffsets in the definition but not both. It is also expected
913 * that both bank offsets are set when defining a banked register. This
914 * use indicates that a register is banked.
915 */
916 ptrdiff_t bank_fieldoffsets[2];
917
918 /*
919 * Function for making any access checks for this register in addition to
920 * those specified by the 'access' permissions bits. If NULL, no extra
921 * checks required. The access check is performed at runtime, not at
922 * translate time.
923 */
924 CPAccessFn *accessfn;
925 /*
926 * Function for handling reads of this register. If NULL, then reads
927 * will be done by loading from the offset into CPUARMState specified
928 * by fieldoffset.
929 */
930 CPReadFn *readfn;
931 /*
932 * Function for handling writes of this register. If NULL, then writes
933 * will be done by writing to the offset into CPUARMState specified
934 * by fieldoffset.
935 */
936 CPWriteFn *writefn;
937 /*
938 * Function for doing a "raw" read; used when we need to copy
939 * coprocessor state to the kernel for KVM or out for
940 * migration. This only needs to be provided if there is also a
941 * readfn and it has side effects (for instance clear-on-read bits).
942 */
943 CPReadFn *raw_readfn;
944 /*
945 * Function for doing a "raw" write; used when we need to copy KVM
946 * kernel coprocessor state into userspace, or for inbound
947 * migration. This only needs to be provided if there is also a
948 * writefn and it masks out "unwritable" bits or has write-one-to-clear
949 * or similar behaviour.
950 */
951 CPWriteFn *raw_writefn;
952 /*
953 * Function for resetting the register. If NULL, then reset will be done
954 * by writing resetvalue to the field specified in fieldoffset. If
955 * fieldoffset is 0 then no reset will be done.
956 */
957 CPResetFn *resetfn;
958
959 /*
960 * "Original" readfn, writefn, accessfn.
961 * For ARMv8.1-VHE register aliases, we overwrite the read/write
962 * accessor functions of various EL1/EL0 to perform the runtime
963 * check for which sysreg should actually be modified, and then
964 * forwards the operation. Before overwriting the accessors,
965 * the original function is copied here, so that accesses that
966 * really do go to the EL1/EL0 version proceed normally.
967 * (The corresponding EL2 register is linked via opaque.)
968 */
969 CPReadFn *orig_readfn;
970 CPWriteFn *orig_writefn;
971 CPAccessFn *orig_accessfn;
972 };
973
974 /*
975 * Macros which are lvalues for the field in CPUARMState for the
976 * ARMCPRegInfo *ri.
977 */
978 #define CPREG_FIELD32(env, ri) \
979 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
980 #define CPREG_FIELD64(env, ri) \
981 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
982
983 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *reg,
984 void *opaque);
985
define_one_arm_cp_reg(ARMCPU * cpu,const ARMCPRegInfo * regs)986 static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
987 {
988 define_one_arm_cp_reg_with_opaque(cpu, regs, NULL);
989 }
990
991 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
992 void *opaque, size_t len);
993
994 #define define_arm_cp_regs_with_opaque(CPU, REGS, OPAQUE) \
995 do { \
996 QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \
997 define_arm_cp_regs_with_opaque_len(CPU, REGS, OPAQUE, \
998 ARRAY_SIZE(REGS)); \
999 } while (0)
1000
1001 #define define_arm_cp_regs(CPU, REGS) \
1002 define_arm_cp_regs_with_opaque(CPU, REGS, NULL)
1003
1004 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
1005
1006 /*
1007 * Definition of an ARM co-processor register as viewed from
1008 * userspace. This is used for presenting sanitised versions of
1009 * registers to userspace when emulating the Linux AArch64 CPU
1010 * ID/feature ABI (advertised as HWCAP_CPUID).
1011 */
1012 typedef struct ARMCPRegUserSpaceInfo {
1013 /* Name of register */
1014 const char *name;
1015
1016 /* Is the name actually a glob pattern */
1017 bool is_glob;
1018
1019 /* Only some bits are exported to user space */
1020 uint64_t exported_bits;
1021
1022 /* Fixed bits are applied after the mask */
1023 uint64_t fixed_bits;
1024 } ARMCPRegUserSpaceInfo;
1025
1026 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
1027 const ARMCPRegUserSpaceInfo *mods,
1028 size_t mods_len);
1029
1030 #define modify_arm_cp_regs(REGS, MODS) \
1031 do { \
1032 QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \
1033 QEMU_BUILD_BUG_ON(ARRAY_SIZE(MODS) == 0); \
1034 modify_arm_cp_regs_with_len(REGS, ARRAY_SIZE(REGS), \
1035 MODS, ARRAY_SIZE(MODS)); \
1036 } while (0)
1037
1038 /* CPWriteFn that can be used to implement writes-ignored behaviour */
1039 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
1040 uint64_t value);
1041 /* CPReadFn that can be used for read-as-zero behaviour */
1042 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
1043
1044 /* CPWriteFn that just writes the value to ri->fieldoffset */
1045 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value);
1046
1047 /*
1048 * CPResetFn that does nothing, for use if no reset is required even
1049 * if fieldoffset is non zero.
1050 */
1051 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
1052
1053 /*
1054 * Return true if this reginfo struct's field in the cpu state struct
1055 * is 64 bits wide.
1056 */
cpreg_field_is_64bit(const ARMCPRegInfo * ri)1057 static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
1058 {
1059 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
1060 }
1061
cp_access_ok(int current_el,const ARMCPRegInfo * ri,int isread)1062 static inline bool cp_access_ok(int current_el,
1063 const ARMCPRegInfo *ri, int isread)
1064 {
1065 return (ri->access >> ((current_el * 2) + isread)) & 1;
1066 }
1067
1068 /* Raw read of a coprocessor register (as needed for migration, etc) */
1069 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
1070
1071 /*
1072 * Return true if the cp register encoding is in the "feature ID space" as
1073 * defined by FEAT_IDST (and thus should be reported with ER_ELx.EC
1074 * as EC_SYSTEMREGISTERTRAP rather than EC_UNCATEGORIZED).
1075 */
arm_cpreg_encoding_in_idspace(uint8_t opc0,uint8_t opc1,uint8_t opc2,uint8_t crn,uint8_t crm)1076 static inline bool arm_cpreg_encoding_in_idspace(uint8_t opc0, uint8_t opc1,
1077 uint8_t opc2,
1078 uint8_t crn, uint8_t crm)
1079 {
1080 return opc0 == 3 && (opc1 == 0 || opc1 == 1 || opc1 == 3) &&
1081 crn == 0 && crm < 8;
1082 }
1083
1084 /*
1085 * As arm_cpreg_encoding_in_idspace(), but take the encoding from an
1086 * ARMCPRegInfo.
1087 */
arm_cpreg_in_idspace(const ARMCPRegInfo * ri)1088 static inline bool arm_cpreg_in_idspace(const ARMCPRegInfo *ri)
1089 {
1090 return ri->state == ARM_CP_STATE_AA64 &&
1091 arm_cpreg_encoding_in_idspace(ri->opc0, ri->opc1, ri->opc2,
1092 ri->crn, ri->crm);
1093 }
1094
1095 #ifdef CONFIG_USER_ONLY
define_cortex_a72_a57_a53_cp_reginfo(ARMCPU * cpu)1096 static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
1097 #else
1098 void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
1099 #endif
1100
1101 CPAccessResult access_tvm_trvm(CPUARMState *, const ARMCPRegInfo *, bool);
1102
1103 /**
1104 * arm_cpreg_trap_in_nv: Return true if cpreg traps in nested virtualization
1105 *
1106 * Return true if this cpreg is one which should be trapped to EL2 if
1107 * it is executed at EL1 when nested virtualization is enabled via HCR_EL2.NV.
1108 */
arm_cpreg_traps_in_nv(const ARMCPRegInfo * ri)1109 static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri)
1110 {
1111 /*
1112 * The Arm ARM defines the registers to be trapped in terms of
1113 * their names (I_TZTZL). However the underlying principle is "if
1114 * it would UNDEF at EL1 but work at EL2 then it should trap", and
1115 * the way the encoding of sysregs and system instructions is done
1116 * means that the right set of registers is exactly those where
1117 * the opc1 field is 4 or 5. (You can see this also in the assert
1118 * we do that the opc1 field and the permissions mask line up in
1119 * define_one_arm_cp_reg_with_opaque().)
1120 * Checking the opc1 field is easier for us and avoids the problem
1121 * that we do not consistently use the right architectural names
1122 * for all sysregs, since we treat the name field as largely for debug.
1123 *
1124 * However we do this check, it is going to be at least potentially
1125 * fragile to future new sysregs, but this seems the least likely
1126 * to break.
1127 *
1128 * In particular, note that the released sysreg XML defines that
1129 * the FEAT_MEC sysregs and instructions do not follow this FEAT_NV
1130 * trapping rule, so we will need to add an ARM_CP_* flag to indicate
1131 * "register does not trap on NV" to handle those if/when we implement
1132 * FEAT_MEC.
1133 */
1134 return ri->opc1 == 4 || ri->opc1 == 5;
1135 }
1136
1137 #endif /* TARGET_ARM_CPREGS_H */
1138