xref: /openbmc/qemu/target/arm/tcg/cpregs-at.c (revision 0edc2afe0c8197bbcb98f948c609fb74c9b1ffd5)
1 /*
2  * System instructions for address translation
3  * SPDX-License-Identifier: GPL-2.0-or-later
4  */
5 
6 #include "qemu/osdep.h"
7 #include "cpu.h"
8 #include "cpu-features.h"
9 #include "internals.h"
10 #include "cpregs.h"
11 
12 
par_el1_shareability(GetPhysAddrResult * res)13 static int par_el1_shareability(GetPhysAddrResult *res)
14 {
15     /*
16      * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
17      * memory -- see pseudocode PAREncodeShareability().
18      */
19     if (((res->cacheattrs.attrs & 0xf0) == 0) ||
20         res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
21         return 2;
22     }
23     return res->cacheattrs.shareability;
24 }
25 
do_ats_write(CPUARMState * env,uint64_t value,MMUAccessType access_type,ARMMMUIdx mmu_idx,ARMSecuritySpace ss)26 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
27                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
28                              ARMSecuritySpace ss)
29 {
30     bool ret;
31     uint64_t par64;
32     bool format64 = false;
33     ARMMMUFaultInfo fi = {};
34     GetPhysAddrResult res = {};
35 
36     /*
37      * I_MXTJT: Granule protection checks are not performed on the final
38      * address of a successful translation.  This is a translation not a
39      * memory reference, so "memop = none = 0".
40      */
41     ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
42                                          mmu_idx, ss, &res, &fi);
43 
44     /*
45      * ATS operations only do S1 or S1+S2 translations, so we never
46      * have to deal with the ARMCacheAttrs format for S2 only.
47      */
48     assert(!res.cacheattrs.is_s2_format);
49 
50     if (ret) {
51         /*
52          * Some kinds of translation fault must cause exceptions rather
53          * than being reported in the PAR.
54          */
55         int current_el = arm_current_el(env);
56         int target_el;
57         uint32_t syn, fsr, fsc;
58         bool take_exc = false;
59 
60         if (fi.s1ptw && current_el == 1
61             && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
62             /*
63              * Synchronous stage 2 fault on an access made as part of the
64              * translation table walk for AT S1E0* or AT S1E1* insn
65              * executed from NS EL1. If this is a synchronous external abort
66              * and SCR_EL3.EA == 1, then we take a synchronous external abort
67              * to EL3. Otherwise the fault is taken as an exception to EL2,
68              * and HPFAR_EL2 holds the faulting IPA.
69              */
70             if (fi.type == ARMFault_SyncExternalOnWalk &&
71                 (env->cp15.scr_el3 & SCR_EA)) {
72                 target_el = 3;
73             } else {
74                 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
75                 if (arm_is_secure_below_el3(env) && fi.s1ns) {
76                     env->cp15.hpfar_el2 |= HPFAR_NS;
77                 }
78                 target_el = 2;
79             }
80             take_exc = true;
81         } else if (fi.type == ARMFault_SyncExternalOnWalk) {
82             /*
83              * Synchronous external aborts during a translation table walk
84              * are taken as Data Abort exceptions.
85              */
86             if (fi.stage2) {
87                 if (current_el == 3) {
88                     target_el = 3;
89                 } else {
90                     target_el = 2;
91                 }
92             } else {
93                 target_el = exception_target_el(env);
94             }
95             take_exc = true;
96         }
97 
98         if (take_exc) {
99             /* Construct FSR and FSC using same logic as arm_deliver_fault() */
100             if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
101                 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
102                 fsr = arm_fi_to_lfsc(&fi);
103                 fsc = extract32(fsr, 0, 6);
104             } else {
105                 fsr = arm_fi_to_sfsc(&fi);
106                 fsc = 0x3f;
107             }
108             /*
109              * Report exception with ESR indicating a fault due to a
110              * translation table walk for a cache maintenance instruction.
111              */
112             syn = syn_data_abort_no_iss(current_el == target_el, 0,
113                                         fi.ea, 1, fi.s1ptw, 1, fsc);
114             env->exception.vaddress = value;
115             env->exception.fsr = fsr;
116             raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
117         }
118     }
119 
120     if (is_a64(env)) {
121         format64 = true;
122     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
123         /*
124          * ATS1Cxx:
125          * * TTBCR.EAE determines whether the result is returned using the
126          *   32-bit or the 64-bit PAR format
127          * * Instructions executed in Hyp mode always use the 64bit format
128          *
129          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
130          * * The Non-secure TTBCR.EAE bit is set to 1
131          * * The implementation includes EL2, and the value of HCR.VM is 1
132          *
133          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
134          *
135          * ATS1Hx always uses the 64bit format.
136          */
137         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
138 
139         if (arm_feature(env, ARM_FEATURE_EL2)) {
140             if (mmu_idx == ARMMMUIdx_E10_0 ||
141                 mmu_idx == ARMMMUIdx_E10_1 ||
142                 mmu_idx == ARMMMUIdx_E10_1_PAN) {
143                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
144             } else {
145                 format64 |= arm_current_el(env) == 2;
146             }
147         }
148     }
149 
150     if (format64) {
151         /* Create a 64-bit PAR */
152         par64 = (1 << 11); /* LPAE bit always set */
153         if (!ret) {
154             par64 |= res.f.phys_addr & ~0xfffULL;
155             if (!res.f.attrs.secure) {
156                 par64 |= (1 << 9); /* NS */
157             }
158             par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
159             par64 |= par_el1_shareability(&res) << 7; /* SH */
160         } else {
161             uint32_t fsr = arm_fi_to_lfsc(&fi);
162 
163             par64 |= 1; /* F */
164             par64 |= (fsr & 0x3f) << 1; /* FS */
165             if (fi.stage2) {
166                 par64 |= (1 << 9); /* S */
167             }
168             if (fi.s1ptw) {
169                 par64 |= (1 << 8); /* PTW */
170             }
171         }
172     } else {
173         /*
174          * fsr is a DFSR/IFSR value for the short descriptor
175          * translation table format (with WnR always clear).
176          * Convert it to a 32-bit PAR.
177          */
178         if (!ret) {
179             /* We do not set any attribute bits in the PAR */
180             if (res.f.lg_page_size == 24
181                 && arm_feature(env, ARM_FEATURE_V7)) {
182                 par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
183             } else {
184                 par64 = res.f.phys_addr & 0xfffff000;
185             }
186             if (!res.f.attrs.secure) {
187                 par64 |= (1 << 9); /* NS */
188             }
189         } else {
190             uint32_t fsr = arm_fi_to_sfsc(&fi);
191 
192             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
193                     ((fsr & 0xf) << 1) | 1;
194         }
195     }
196     return par64;
197 }
198 
ats_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)199 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
200 {
201     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
202     uint64_t par64;
203     ARMMMUIdx mmu_idx;
204     int el = arm_current_el(env);
205     ARMSecuritySpace ss = arm_security_space(env);
206 
207     switch (ri->opc2 & 6) {
208     case 0:
209         /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
210         switch (el) {
211         case 3:
212             if (ri->crm == 9 && arm_pan_enabled(env)) {
213                 mmu_idx = ARMMMUIdx_E30_3_PAN;
214             } else {
215                 mmu_idx = ARMMMUIdx_E3;
216             }
217             break;
218         case 2:
219             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
220             /* fall through */
221         case 1:
222             if (ri->crm == 9 && arm_pan_enabled(env)) {
223                 mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
224             } else {
225                 mmu_idx = ARMMMUIdx_Stage1_E1;
226             }
227             break;
228         default:
229             g_assert_not_reached();
230         }
231         break;
232     case 2:
233         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
234         switch (el) {
235         case 3:
236             mmu_idx = ARMMMUIdx_E30_0;
237             break;
238         case 2:
239             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
240             mmu_idx = ARMMMUIdx_Stage1_E0;
241             break;
242         case 1:
243             mmu_idx = ARMMMUIdx_Stage1_E0;
244             break;
245         default:
246             g_assert_not_reached();
247         }
248         break;
249     case 4:
250         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
251         mmu_idx = ARMMMUIdx_E10_1;
252         ss = ARMSS_NonSecure;
253         break;
254     case 6:
255         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
256         mmu_idx = ARMMMUIdx_E10_0;
257         ss = ARMSS_NonSecure;
258         break;
259     default:
260         g_assert_not_reached();
261     }
262 
263     par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
264 
265     A32_BANKED_CURRENT_REG_SET(env, par, par64);
266 }
267 
ats1h_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)268 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
269                         uint64_t value)
270 {
271     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
272     uint64_t par64;
273 
274     /* There is no SecureEL2 for AArch32. */
275     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
276                          ARMSS_NonSecure);
277 
278     A32_BANKED_CURRENT_REG_SET(env, par, par64);
279 }
280 
at_e012_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)281 static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
282                                      bool isread)
283 {
284     /*
285      * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
286      * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
287      * only happen when executing at EL3 because that combination also causes an
288      * illegal exception return. We don't need to check FEAT_RME either, because
289      * scr_write() ensures that the NSE bit is not set otherwise.
290      */
291     if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
292         return CP_ACCESS_UNDEFINED;
293     }
294     return CP_ACCESS_OK;
295 }
296 
at_s1e2_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)297 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
298                                      bool isread)
299 {
300     if (arm_current_el(env) == 3 &&
301         !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
302         return CP_ACCESS_UNDEFINED;
303     }
304     return at_e012_access(env, ri, isread);
305 }
306 
at_s1e01_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)307 static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
308                                       bool isread)
309 {
310     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
311         return CP_ACCESS_TRAP_EL2;
312     }
313     return at_e012_access(env, ri, isread);
314 }
315 
ats_write64(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)316 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
317                         uint64_t value)
318 {
319     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
320     ARMMMUIdx mmu_idx;
321     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
322     bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
323     bool for_el3 = false;
324     ARMSecuritySpace ss;
325 
326     switch (ri->opc2 & 6) {
327     case 0:
328         switch (ri->opc1) {
329         case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
330             if (ri->crm == 9 && arm_pan_enabled(env)) {
331                 mmu_idx = regime_e20 ?
332                           ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
333             } else {
334                 mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
335             }
336             break;
337         case 4: /* AT S1E2R, AT S1E2W */
338             mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
339             break;
340         case 6: /* AT S1E3R, AT S1E3W */
341             mmu_idx = ARMMMUIdx_E3;
342             for_el3 = true;
343             break;
344         default:
345             g_assert_not_reached();
346         }
347         break;
348     case 2: /* AT S1E0R, AT S1E0W */
349         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
350         break;
351     case 4: /* AT S12E1R, AT S12E1W */
352         mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
353         break;
354     case 6: /* AT S12E0R, AT S12E0W */
355         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
356         break;
357     default:
358         g_assert_not_reached();
359     }
360 
361     ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
362     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
363 }
364 
ats_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)365 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
366                                  bool isread)
367 {
368     if (ri->opc2 & 4) {
369         /*
370          * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
371          * Secure EL1 (which can only happen if EL3 is AArch64).
372          * They are simply UNDEF if executed from NS EL1.
373          * They function normally from EL2 or EL3.
374          */
375         if (arm_current_el(env) == 1) {
376             if (arm_is_secure_below_el3(env)) {
377                 if (env->cp15.scr_el3 & SCR_EEL2) {
378                     return CP_ACCESS_TRAP_EL2;
379                 }
380                 return CP_ACCESS_TRAP_EL3;
381             }
382             return CP_ACCESS_UNDEFINED;
383         }
384     }
385     return CP_ACCESS_OK;
386 }
387 
388 static const ARMCPRegInfo vapa_ats_reginfo[] = {
389     /* This underdecoding is safe because the reginfo is NO_RAW. */
390     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
391       .access = PL1_W, .accessfn = ats_access,
392       .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
393 };
394 
395 static const ARMCPRegInfo v8_ats_reginfo[] = {
396     /* 64 bit address translation operations */
397     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
398       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
399       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
400       .fgt = FGT_ATS1E1R,
401       .accessfn = at_s1e01_access, .writefn = ats_write64 },
402     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
403       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
404       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
405       .fgt = FGT_ATS1E1W,
406       .accessfn = at_s1e01_access, .writefn = ats_write64 },
407     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
408       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
409       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
410       .fgt = FGT_ATS1E0R,
411       .accessfn = at_s1e01_access, .writefn = ats_write64 },
412     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
413       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
414       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
415       .fgt = FGT_ATS1E0W,
416       .accessfn = at_s1e01_access, .writefn = ats_write64 },
417     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
418       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
419       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
420       .accessfn = at_e012_access, .writefn = ats_write64 },
421     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
422       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
423       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
424       .accessfn = at_e012_access, .writefn = ats_write64 },
425     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
426       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
427       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
428       .accessfn = at_e012_access, .writefn = ats_write64 },
429     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
430       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
431       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
432       .accessfn = at_e012_access, .writefn = ats_write64 },
433     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
434     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
435       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
436       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
437       .writefn = ats_write64 },
438     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
439       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
440       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
441       .writefn = ats_write64 },
442 };
443 
444 static const ARMCPRegInfo el2_ats_reginfo[] = {
445     /*
446      * Unlike the other EL2-related AT operations, these must
447      * UNDEF from EL3 if EL2 is not implemented, which is why we
448      * define them here rather than with the rest of the AT ops.
449      */
450     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
451       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
452       .access = PL2_W, .accessfn = at_s1e2_access,
453       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
454       .writefn = ats_write64 },
455     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
456       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
457       .access = PL2_W, .accessfn = at_s1e2_access,
458       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
459       .writefn = ats_write64 },
460     /*
461      * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
462      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
463      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
464      * to behave as if SCR.NS was 1.
465      */
466     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
467       .access = PL2_W,
468       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
469     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
470       .access = PL2_W,
471       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
472 };
473 
474 static const ARMCPRegInfo ats1e1_reginfo[] = {
475     { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
476       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
477       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
478       .fgt = FGT_ATS1E1RP,
479       .accessfn = at_s1e01_access, .writefn = ats_write64 },
480     { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
481       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
482       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
483       .fgt = FGT_ATS1E1WP,
484       .accessfn = at_s1e01_access, .writefn = ats_write64 },
485 };
486 
487 static const ARMCPRegInfo ats1cp_reginfo[] = {
488     { .name = "ATS1CPRP",
489       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
490       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
491       .writefn = ats_write },
492     { .name = "ATS1CPWP",
493       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
494       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
495       .writefn = ats_write },
496 };
497 
define_at_insn_regs(ARMCPU * cpu)498 void define_at_insn_regs(ARMCPU *cpu)
499 {
500     CPUARMState *env = &cpu->env;
501 
502     if (arm_feature(env, ARM_FEATURE_VAPA)) {
503         define_arm_cp_regs(cpu, vapa_ats_reginfo);
504     }
505     if (arm_feature(env, ARM_FEATURE_V8)) {
506         define_arm_cp_regs(cpu, v8_ats_reginfo);
507     }
508     if (arm_feature(env, ARM_FEATURE_EL2)
509         || (arm_feature(env, ARM_FEATURE_EL3)
510             && arm_feature(env, ARM_FEATURE_V8))) {
511         define_arm_cp_regs(cpu, el2_ats_reginfo);
512     }
513     if (cpu_isar_feature(aa64_ats1e1, cpu)) {
514         define_arm_cp_regs(cpu, ats1e1_reginfo);
515     }
516     if (cpu_isar_feature(aa32_ats1e1, cpu)) {
517         define_arm_cp_regs(cpu, ats1cp_reginfo);
518     }
519 }
520