xref: /openbmc/qemu/target/arm/internals.h (revision 64c9a921)
1 /*
2  * QEMU ARM CPU -- internal functions and types
3  *
4  * Copyright (c) 2014 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  *
20  * This header defines functions, types, etc which need to be shared
21  * between different source files within target/arm/ but which are
22  * private to it and not required by the rest of QEMU.
23  */
24 
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27 
28 #include "hw/registerfields.h"
29 
30 /* register banks for CPU modes */
31 #define BANK_USRSYS 0
32 #define BANK_SVC    1
33 #define BANK_ABT    2
34 #define BANK_UND    3
35 #define BANK_IRQ    4
36 #define BANK_FIQ    5
37 #define BANK_HYP    6
38 #define BANK_MON    7
39 
40 static inline bool excp_is_internal(int excp)
41 {
42     /* Return true if this exception number represents a QEMU-internal
43      * exception that will not be passed to the guest.
44      */
45     return excp == EXCP_INTERRUPT
46         || excp == EXCP_HLT
47         || excp == EXCP_DEBUG
48         || excp == EXCP_HALTED
49         || excp == EXCP_EXCEPTION_EXIT
50         || excp == EXCP_KERNEL_TRAP
51         || excp == EXCP_SEMIHOST;
52 }
53 
54 /* Scale factor for generic timers, ie number of ns per tick.
55  * This gives a 62.5MHz timer.
56  */
57 #define GTIMER_SCALE 16
58 
59 /* Bit definitions for the v7M CONTROL register */
60 FIELD(V7M_CONTROL, NPRIV, 0, 1)
61 FIELD(V7M_CONTROL, SPSEL, 1, 1)
62 FIELD(V7M_CONTROL, FPCA, 2, 1)
63 FIELD(V7M_CONTROL, SFPA, 3, 1)
64 
65 /* Bit definitions for v7M exception return payload */
66 FIELD(V7M_EXCRET, ES, 0, 1)
67 FIELD(V7M_EXCRET, RES0, 1, 1)
68 FIELD(V7M_EXCRET, SPSEL, 2, 1)
69 FIELD(V7M_EXCRET, MODE, 3, 1)
70 FIELD(V7M_EXCRET, FTYPE, 4, 1)
71 FIELD(V7M_EXCRET, DCRS, 5, 1)
72 FIELD(V7M_EXCRET, S, 6, 1)
73 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
74 
75 /* Minimum value which is a magic number for exception return */
76 #define EXC_RETURN_MIN_MAGIC 0xff000000
77 /* Minimum number which is a magic number for function or exception return
78  * when using v8M security extension
79  */
80 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
81 
82 /* We use a few fake FSR values for internal purposes in M profile.
83  * M profile cores don't have A/R format FSRs, but currently our
84  * get_phys_addr() code assumes A/R profile and reports failures via
85  * an A/R format FSR value. We then translate that into the proper
86  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
87  * Mostly the FSR values we use for this are those defined for v7PMSA,
88  * since we share some of that codepath. A few kinds of fault are
89  * only for M profile and have no A/R equivalent, though, so we have
90  * to pick a value from the reserved range (which we never otherwise
91  * generate) to use for these.
92  * These values will never be visible to the guest.
93  */
94 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
95 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
96 
97 /**
98  * raise_exception: Raise the specified exception.
99  * Raise a guest exception with the specified value, syndrome register
100  * and target exception level. This should be called from helper functions,
101  * and never returns because we will longjump back up to the CPU main loop.
102  */
103 void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
104                                    uint32_t syndrome, uint32_t target_el);
105 
106 /*
107  * Similarly, but also use unwinding to restore cpu state.
108  */
109 void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
110                                       uint32_t syndrome, uint32_t target_el,
111                                       uintptr_t ra);
112 
113 /*
114  * For AArch64, map a given EL to an index in the banked_spsr array.
115  * Note that this mapping and the AArch32 mapping defined in bank_number()
116  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
117  * mandated mapping between each other.
118  */
119 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
120 {
121     static const unsigned int map[4] = {
122         [1] = BANK_SVC, /* EL1.  */
123         [2] = BANK_HYP, /* EL2.  */
124         [3] = BANK_MON, /* EL3.  */
125     };
126     assert(el >= 1 && el <= 3);
127     return map[el];
128 }
129 
130 /* Map CPU modes onto saved register banks.  */
131 static inline int bank_number(int mode)
132 {
133     switch (mode) {
134     case ARM_CPU_MODE_USR:
135     case ARM_CPU_MODE_SYS:
136         return BANK_USRSYS;
137     case ARM_CPU_MODE_SVC:
138         return BANK_SVC;
139     case ARM_CPU_MODE_ABT:
140         return BANK_ABT;
141     case ARM_CPU_MODE_UND:
142         return BANK_UND;
143     case ARM_CPU_MODE_IRQ:
144         return BANK_IRQ;
145     case ARM_CPU_MODE_FIQ:
146         return BANK_FIQ;
147     case ARM_CPU_MODE_HYP:
148         return BANK_HYP;
149     case ARM_CPU_MODE_MON:
150         return BANK_MON;
151     }
152     g_assert_not_reached();
153 }
154 
155 /**
156  * r14_bank_number: Map CPU mode onto register bank for r14
157  *
158  * Given an AArch32 CPU mode, return the index into the saved register
159  * banks to use for the R14 (LR) in that mode. This is the same as
160  * bank_number(), except for the special case of Hyp mode, where
161  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
162  * This should be used as the index into env->banked_r14[], and
163  * bank_number() used for the index into env->banked_r13[] and
164  * env->banked_spsr[].
165  */
166 static inline int r14_bank_number(int mode)
167 {
168     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
169 }
170 
171 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
172 void arm_translate_init(void);
173 
174 enum arm_fprounding {
175     FPROUNDING_TIEEVEN,
176     FPROUNDING_POSINF,
177     FPROUNDING_NEGINF,
178     FPROUNDING_ZERO,
179     FPROUNDING_TIEAWAY,
180     FPROUNDING_ODD
181 };
182 
183 int arm_rmode_to_sf(int rmode);
184 
185 static inline void aarch64_save_sp(CPUARMState *env, int el)
186 {
187     if (env->pstate & PSTATE_SP) {
188         env->sp_el[el] = env->xregs[31];
189     } else {
190         env->sp_el[0] = env->xregs[31];
191     }
192 }
193 
194 static inline void aarch64_restore_sp(CPUARMState *env, int el)
195 {
196     if (env->pstate & PSTATE_SP) {
197         env->xregs[31] = env->sp_el[el];
198     } else {
199         env->xregs[31] = env->sp_el[0];
200     }
201 }
202 
203 static inline void update_spsel(CPUARMState *env, uint32_t imm)
204 {
205     unsigned int cur_el = arm_current_el(env);
206     /* Update PSTATE SPSel bit; this requires us to update the
207      * working stack pointer in xregs[31].
208      */
209     if (!((imm ^ env->pstate) & PSTATE_SP)) {
210         return;
211     }
212     aarch64_save_sp(env, cur_el);
213     env->pstate = deposit32(env->pstate, 0, 1, imm);
214 
215     /* We rely on illegal updates to SPsel from EL0 to get trapped
216      * at translation time.
217      */
218     assert(cur_el >= 1 && cur_el <= 3);
219     aarch64_restore_sp(env, cur_el);
220 }
221 
222 /*
223  * arm_pamax
224  * @cpu: ARMCPU
225  *
226  * Returns the implementation defined bit-width of physical addresses.
227  * The ARMv8 reference manuals refer to this as PAMax().
228  */
229 static inline unsigned int arm_pamax(ARMCPU *cpu)
230 {
231     static const unsigned int pamax_map[] = {
232         [0] = 32,
233         [1] = 36,
234         [2] = 40,
235         [3] = 42,
236         [4] = 44,
237         [5] = 48,
238     };
239     unsigned int parange =
240         FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
241 
242     /* id_aa64mmfr0 is a read-only register so values outside of the
243      * supported mappings can be considered an implementation error.  */
244     assert(parange < ARRAY_SIZE(pamax_map));
245     return pamax_map[parange];
246 }
247 
248 /* Return true if extended addresses are enabled.
249  * This is always the case if our translation regime is 64 bit,
250  * but depends on TTBCR.EAE for 32 bit.
251  */
252 static inline bool extended_addresses_enabled(CPUARMState *env)
253 {
254     TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
255     return arm_el_is_aa64(env, 1) ||
256            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
257 }
258 
259 /* Valid Syndrome Register EC field values */
260 enum arm_exception_class {
261     EC_UNCATEGORIZED          = 0x00,
262     EC_WFX_TRAP               = 0x01,
263     EC_CP15RTTRAP             = 0x03,
264     EC_CP15RRTTRAP            = 0x04,
265     EC_CP14RTTRAP             = 0x05,
266     EC_CP14DTTRAP             = 0x06,
267     EC_ADVSIMDFPACCESSTRAP    = 0x07,
268     EC_FPIDTRAP               = 0x08,
269     EC_PACTRAP                = 0x09,
270     EC_CP14RRTTRAP            = 0x0c,
271     EC_BTITRAP                = 0x0d,
272     EC_ILLEGALSTATE           = 0x0e,
273     EC_AA32_SVC               = 0x11,
274     EC_AA32_HVC               = 0x12,
275     EC_AA32_SMC               = 0x13,
276     EC_AA64_SVC               = 0x15,
277     EC_AA64_HVC               = 0x16,
278     EC_AA64_SMC               = 0x17,
279     EC_SYSTEMREGISTERTRAP     = 0x18,
280     EC_SVEACCESSTRAP          = 0x19,
281     EC_INSNABORT              = 0x20,
282     EC_INSNABORT_SAME_EL      = 0x21,
283     EC_PCALIGNMENT            = 0x22,
284     EC_DATAABORT              = 0x24,
285     EC_DATAABORT_SAME_EL      = 0x25,
286     EC_SPALIGNMENT            = 0x26,
287     EC_AA32_FPTRAP            = 0x28,
288     EC_AA64_FPTRAP            = 0x2c,
289     EC_SERROR                 = 0x2f,
290     EC_BREAKPOINT             = 0x30,
291     EC_BREAKPOINT_SAME_EL     = 0x31,
292     EC_SOFTWARESTEP           = 0x32,
293     EC_SOFTWARESTEP_SAME_EL   = 0x33,
294     EC_WATCHPOINT             = 0x34,
295     EC_WATCHPOINT_SAME_EL     = 0x35,
296     EC_AA32_BKPT              = 0x38,
297     EC_VECTORCATCH            = 0x3a,
298     EC_AA64_BKPT              = 0x3c,
299 };
300 
301 #define ARM_EL_EC_SHIFT 26
302 #define ARM_EL_IL_SHIFT 25
303 #define ARM_EL_ISV_SHIFT 24
304 #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
305 #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
306 
307 static inline uint32_t syn_get_ec(uint32_t syn)
308 {
309     return syn >> ARM_EL_EC_SHIFT;
310 }
311 
312 /* Utility functions for constructing various kinds of syndrome value.
313  * Note that in general we follow the AArch64 syndrome values; in a
314  * few cases the value in HSR for exceptions taken to AArch32 Hyp
315  * mode differs slightly, and we fix this up when populating HSR in
316  * arm_cpu_do_interrupt_aarch32_hyp().
317  * The exception is FP/SIMD access traps -- these report extra information
318  * when taking an exception to AArch32. For those we include the extra coproc
319  * and TA fields, and mask them out when taking the exception to AArch64.
320  */
321 static inline uint32_t syn_uncategorized(void)
322 {
323     return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
324 }
325 
326 static inline uint32_t syn_aa64_svc(uint32_t imm16)
327 {
328     return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
329 }
330 
331 static inline uint32_t syn_aa64_hvc(uint32_t imm16)
332 {
333     return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
334 }
335 
336 static inline uint32_t syn_aa64_smc(uint32_t imm16)
337 {
338     return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
339 }
340 
341 static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
342 {
343     return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
344         | (is_16bit ? 0 : ARM_EL_IL);
345 }
346 
347 static inline uint32_t syn_aa32_hvc(uint32_t imm16)
348 {
349     return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
350 }
351 
352 static inline uint32_t syn_aa32_smc(void)
353 {
354     return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
355 }
356 
357 static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
358 {
359     return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
360 }
361 
362 static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
363 {
364     return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
365         | (is_16bit ? 0 : ARM_EL_IL);
366 }
367 
368 static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
369                                            int crn, int crm, int rt,
370                                            int isread)
371 {
372     return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
373         | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
374         | (crm << 1) | isread;
375 }
376 
377 static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
378                                         int crn, int crm, int rt, int isread,
379                                         bool is_16bit)
380 {
381     return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
382         | (is_16bit ? 0 : ARM_EL_IL)
383         | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
384         | (crn << 10) | (rt << 5) | (crm << 1) | isread;
385 }
386 
387 static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
388                                         int crn, int crm, int rt, int isread,
389                                         bool is_16bit)
390 {
391     return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
392         | (is_16bit ? 0 : ARM_EL_IL)
393         | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
394         | (crn << 10) | (rt << 5) | (crm << 1) | isread;
395 }
396 
397 static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
398                                          int rt, int rt2, int isread,
399                                          bool is_16bit)
400 {
401     return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
402         | (is_16bit ? 0 : ARM_EL_IL)
403         | (cv << 24) | (cond << 20) | (opc1 << 16)
404         | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
405 }
406 
407 static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
408                                          int rt, int rt2, int isread,
409                                          bool is_16bit)
410 {
411     return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
412         | (is_16bit ? 0 : ARM_EL_IL)
413         | (cv << 24) | (cond << 20) | (opc1 << 16)
414         | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
415 }
416 
417 static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
418 {
419     /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
420     return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
421         | (is_16bit ? 0 : ARM_EL_IL)
422         | (cv << 24) | (cond << 20) | 0xa;
423 }
424 
425 static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
426 {
427     /* AArch32 SIMD trap: TA == 1 coproc == 0 */
428     return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
429         | (is_16bit ? 0 : ARM_EL_IL)
430         | (cv << 24) | (cond << 20) | (1 << 5);
431 }
432 
433 static inline uint32_t syn_sve_access_trap(void)
434 {
435     return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
436 }
437 
438 static inline uint32_t syn_pactrap(void)
439 {
440     return EC_PACTRAP << ARM_EL_EC_SHIFT;
441 }
442 
443 static inline uint32_t syn_btitrap(int btype)
444 {
445     return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
446 }
447 
448 static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
449 {
450     return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
451         | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
452 }
453 
454 static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv,
455                                              int ea, int cm, int s1ptw,
456                                              int wnr, int fsc)
457 {
458     return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
459            | ARM_EL_IL
460            | (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7)
461            | (wnr << 6) | fsc;
462 }
463 
464 static inline uint32_t syn_data_abort_with_iss(int same_el,
465                                                int sas, int sse, int srt,
466                                                int sf, int ar,
467                                                int ea, int cm, int s1ptw,
468                                                int wnr, int fsc,
469                                                bool is_16bit)
470 {
471     return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
472            | (is_16bit ? 0 : ARM_EL_IL)
473            | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
474            | (sf << 15) | (ar << 14)
475            | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
476 }
477 
478 static inline uint32_t syn_swstep(int same_el, int isv, int ex)
479 {
480     return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
481         | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
482 }
483 
484 static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
485 {
486     return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
487         | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
488 }
489 
490 static inline uint32_t syn_breakpoint(int same_el)
491 {
492     return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
493         | ARM_EL_IL | 0x22;
494 }
495 
496 static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
497 {
498     return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
499            (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
500            (cv << 24) | (cond << 20) | ti;
501 }
502 
503 /* Update a QEMU watchpoint based on the information the guest has set in the
504  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
505  */
506 void hw_watchpoint_update(ARMCPU *cpu, int n);
507 /* Update the QEMU watchpoints for every guest watchpoint. This does a
508  * complete delete-and-reinstate of the QEMU watchpoint list and so is
509  * suitable for use after migration or on reset.
510  */
511 void hw_watchpoint_update_all(ARMCPU *cpu);
512 /* Update a QEMU breakpoint based on the information the guest has set in the
513  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
514  */
515 void hw_breakpoint_update(ARMCPU *cpu, int n);
516 /* Update the QEMU breakpoints for every guest breakpoint. This does a
517  * complete delete-and-reinstate of the QEMU breakpoint list and so is
518  * suitable for use after migration or on reset.
519  */
520 void hw_breakpoint_update_all(ARMCPU *cpu);
521 
522 /* Callback function for checking if a watchpoint should trigger. */
523 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
524 
525 /* Adjust addresses (in BE32 mode) before testing against watchpoint
526  * addresses.
527  */
528 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
529 
530 /* Callback function for when a watchpoint or breakpoint triggers. */
531 void arm_debug_excp_handler(CPUState *cs);
532 
533 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
534 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
535 {
536     return false;
537 }
538 static inline void arm_handle_psci_call(ARMCPU *cpu)
539 {
540     g_assert_not_reached();
541 }
542 #else
543 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
544 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
545 /* Actually handle a PSCI call */
546 void arm_handle_psci_call(ARMCPU *cpu);
547 #endif
548 
549 /**
550  * arm_clear_exclusive: clear the exclusive monitor
551  * @env: CPU env
552  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
553  */
554 static inline void arm_clear_exclusive(CPUARMState *env)
555 {
556     env->exclusive_addr = -1;
557 }
558 
559 /**
560  * ARMFaultType: type of an ARM MMU fault
561  * This corresponds to the v8A pseudocode's Fault enumeration,
562  * with extensions for QEMU internal conditions.
563  */
564 typedef enum ARMFaultType {
565     ARMFault_None,
566     ARMFault_AccessFlag,
567     ARMFault_Alignment,
568     ARMFault_Background,
569     ARMFault_Domain,
570     ARMFault_Permission,
571     ARMFault_Translation,
572     ARMFault_AddressSize,
573     ARMFault_SyncExternal,
574     ARMFault_SyncExternalOnWalk,
575     ARMFault_SyncParity,
576     ARMFault_SyncParityOnWalk,
577     ARMFault_AsyncParity,
578     ARMFault_AsyncExternal,
579     ARMFault_Debug,
580     ARMFault_TLBConflict,
581     ARMFault_Lockdown,
582     ARMFault_Exclusive,
583     ARMFault_ICacheMaint,
584     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
585     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
586 } ARMFaultType;
587 
588 /**
589  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
590  * @type: Type of fault
591  * @level: Table walk level (for translation, access flag and permission faults)
592  * @domain: Domain of the fault address (for non-LPAE CPUs only)
593  * @s2addr: Address that caused a fault at stage 2
594  * @stage2: True if we faulted at stage 2
595  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
596  * @s1ns: True if we faulted on a non-secure IPA while in secure state
597  * @ea: True if we should set the EA (external abort type) bit in syndrome
598  */
599 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
600 struct ARMMMUFaultInfo {
601     ARMFaultType type;
602     target_ulong s2addr;
603     int level;
604     int domain;
605     bool stage2;
606     bool s1ptw;
607     bool s1ns;
608     bool ea;
609 };
610 
611 /**
612  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
613  * Compare pseudocode EncodeSDFSC(), though unlike that function
614  * we set up a whole FSR-format code including domain field and
615  * putting the high bit of the FSC into bit 10.
616  */
617 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
618 {
619     uint32_t fsc;
620 
621     switch (fi->type) {
622     case ARMFault_None:
623         return 0;
624     case ARMFault_AccessFlag:
625         fsc = fi->level == 1 ? 0x3 : 0x6;
626         break;
627     case ARMFault_Alignment:
628         fsc = 0x1;
629         break;
630     case ARMFault_Permission:
631         fsc = fi->level == 1 ? 0xd : 0xf;
632         break;
633     case ARMFault_Domain:
634         fsc = fi->level == 1 ? 0x9 : 0xb;
635         break;
636     case ARMFault_Translation:
637         fsc = fi->level == 1 ? 0x5 : 0x7;
638         break;
639     case ARMFault_SyncExternal:
640         fsc = 0x8 | (fi->ea << 12);
641         break;
642     case ARMFault_SyncExternalOnWalk:
643         fsc = fi->level == 1 ? 0xc : 0xe;
644         fsc |= (fi->ea << 12);
645         break;
646     case ARMFault_SyncParity:
647         fsc = 0x409;
648         break;
649     case ARMFault_SyncParityOnWalk:
650         fsc = fi->level == 1 ? 0x40c : 0x40e;
651         break;
652     case ARMFault_AsyncParity:
653         fsc = 0x408;
654         break;
655     case ARMFault_AsyncExternal:
656         fsc = 0x406 | (fi->ea << 12);
657         break;
658     case ARMFault_Debug:
659         fsc = 0x2;
660         break;
661     case ARMFault_TLBConflict:
662         fsc = 0x400;
663         break;
664     case ARMFault_Lockdown:
665         fsc = 0x404;
666         break;
667     case ARMFault_Exclusive:
668         fsc = 0x405;
669         break;
670     case ARMFault_ICacheMaint:
671         fsc = 0x4;
672         break;
673     case ARMFault_Background:
674         fsc = 0x0;
675         break;
676     case ARMFault_QEMU_NSCExec:
677         fsc = M_FAKE_FSR_NSC_EXEC;
678         break;
679     case ARMFault_QEMU_SFault:
680         fsc = M_FAKE_FSR_SFAULT;
681         break;
682     default:
683         /* Other faults can't occur in a context that requires a
684          * short-format status code.
685          */
686         g_assert_not_reached();
687     }
688 
689     fsc |= (fi->domain << 4);
690     return fsc;
691 }
692 
693 /**
694  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
695  * Compare pseudocode EncodeLDFSC(), though unlike that function
696  * we fill in also the LPAE bit 9 of a DFSR format.
697  */
698 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
699 {
700     uint32_t fsc;
701 
702     switch (fi->type) {
703     case ARMFault_None:
704         return 0;
705     case ARMFault_AddressSize:
706         fsc = fi->level & 3;
707         break;
708     case ARMFault_AccessFlag:
709         fsc = (fi->level & 3) | (0x2 << 2);
710         break;
711     case ARMFault_Permission:
712         fsc = (fi->level & 3) | (0x3 << 2);
713         break;
714     case ARMFault_Translation:
715         fsc = (fi->level & 3) | (0x1 << 2);
716         break;
717     case ARMFault_SyncExternal:
718         fsc = 0x10 | (fi->ea << 12);
719         break;
720     case ARMFault_SyncExternalOnWalk:
721         fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
722         break;
723     case ARMFault_SyncParity:
724         fsc = 0x18;
725         break;
726     case ARMFault_SyncParityOnWalk:
727         fsc = (fi->level & 3) | (0x7 << 2);
728         break;
729     case ARMFault_AsyncParity:
730         fsc = 0x19;
731         break;
732     case ARMFault_AsyncExternal:
733         fsc = 0x11 | (fi->ea << 12);
734         break;
735     case ARMFault_Alignment:
736         fsc = 0x21;
737         break;
738     case ARMFault_Debug:
739         fsc = 0x22;
740         break;
741     case ARMFault_TLBConflict:
742         fsc = 0x30;
743         break;
744     case ARMFault_Lockdown:
745         fsc = 0x34;
746         break;
747     case ARMFault_Exclusive:
748         fsc = 0x35;
749         break;
750     default:
751         /* Other faults can't occur in a context that requires a
752          * long-format status code.
753          */
754         g_assert_not_reached();
755     }
756 
757     fsc |= 1 << 9;
758     return fsc;
759 }
760 
761 static inline bool arm_extabort_type(MemTxResult result)
762 {
763     /* The EA bit in syndromes and fault status registers is an
764      * IMPDEF classification of external aborts. ARM implementations
765      * usually use this to indicate AXI bus Decode error (0) or
766      * Slave error (1); in QEMU we follow that.
767      */
768     return result != MEMTX_DECODE_ERROR;
769 }
770 
771 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
772                       MMUAccessType access_type, int mmu_idx,
773                       bool probe, uintptr_t retaddr);
774 
775 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
776 {
777     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
778 }
779 
780 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
781 {
782     if (arm_feature(env, ARM_FEATURE_M)) {
783         return mmu_idx | ARM_MMU_IDX_M;
784     } else {
785         return mmu_idx | ARM_MMU_IDX_A;
786     }
787 }
788 
789 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
790 {
791     /* AArch64 is always a-profile. */
792     return mmu_idx | ARM_MMU_IDX_A;
793 }
794 
795 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
796 
797 /*
798  * Return the MMU index for a v7M CPU with all relevant information
799  * manually specified.
800  */
801 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
802                               bool secstate, bool priv, bool negpri);
803 
804 /*
805  * Return the MMU index for a v7M CPU in the specified security and
806  * privilege state.
807  */
808 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
809                                                 bool secstate, bool priv);
810 
811 /* Return the MMU index for a v7M CPU in the specified security state */
812 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
813 
814 /* Return true if the stage 1 translation regime is using LPAE format page
815  * tables */
816 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
817 
818 /* Raise a data fault alignment exception for the specified virtual address */
819 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
820                                  MMUAccessType access_type,
821                                  int mmu_idx, uintptr_t retaddr);
822 
823 /* arm_cpu_do_transaction_failed: handle a memory system error response
824  * (eg "no device/memory present at address") by raising an external abort
825  * exception
826  */
827 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
828                                    vaddr addr, unsigned size,
829                                    MMUAccessType access_type,
830                                    int mmu_idx, MemTxAttrs attrs,
831                                    MemTxResult response, uintptr_t retaddr);
832 
833 /* Call any registered EL change hooks */
834 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
835 {
836     ARMELChangeHook *hook, *next;
837     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
838         hook->hook(cpu, hook->opaque);
839     }
840 }
841 static inline void arm_call_el_change_hook(ARMCPU *cpu)
842 {
843     ARMELChangeHook *hook, *next;
844     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
845         hook->hook(cpu, hook->opaque);
846     }
847 }
848 
849 /* Return true if this address translation regime has two ranges.  */
850 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
851 {
852     switch (mmu_idx) {
853     case ARMMMUIdx_Stage1_E0:
854     case ARMMMUIdx_Stage1_E1:
855     case ARMMMUIdx_Stage1_E1_PAN:
856     case ARMMMUIdx_Stage1_SE0:
857     case ARMMMUIdx_Stage1_SE1:
858     case ARMMMUIdx_Stage1_SE1_PAN:
859     case ARMMMUIdx_E10_0:
860     case ARMMMUIdx_E10_1:
861     case ARMMMUIdx_E10_1_PAN:
862     case ARMMMUIdx_E20_0:
863     case ARMMMUIdx_E20_2:
864     case ARMMMUIdx_E20_2_PAN:
865     case ARMMMUIdx_SE10_0:
866     case ARMMMUIdx_SE10_1:
867     case ARMMMUIdx_SE10_1_PAN:
868     case ARMMMUIdx_SE20_0:
869     case ARMMMUIdx_SE20_2:
870     case ARMMMUIdx_SE20_2_PAN:
871         return true;
872     default:
873         return false;
874     }
875 }
876 
877 /* Return true if this address translation regime is secure */
878 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
879 {
880     switch (mmu_idx) {
881     case ARMMMUIdx_E10_0:
882     case ARMMMUIdx_E10_1:
883     case ARMMMUIdx_E10_1_PAN:
884     case ARMMMUIdx_E20_0:
885     case ARMMMUIdx_E20_2:
886     case ARMMMUIdx_E20_2_PAN:
887     case ARMMMUIdx_Stage1_E0:
888     case ARMMMUIdx_Stage1_E1:
889     case ARMMMUIdx_Stage1_E1_PAN:
890     case ARMMMUIdx_E2:
891     case ARMMMUIdx_Stage2:
892     case ARMMMUIdx_MPrivNegPri:
893     case ARMMMUIdx_MUserNegPri:
894     case ARMMMUIdx_MPriv:
895     case ARMMMUIdx_MUser:
896         return false;
897     case ARMMMUIdx_SE3:
898     case ARMMMUIdx_SE10_0:
899     case ARMMMUIdx_SE10_1:
900     case ARMMMUIdx_SE10_1_PAN:
901     case ARMMMUIdx_SE20_0:
902     case ARMMMUIdx_SE20_2:
903     case ARMMMUIdx_SE20_2_PAN:
904     case ARMMMUIdx_Stage1_SE0:
905     case ARMMMUIdx_Stage1_SE1:
906     case ARMMMUIdx_Stage1_SE1_PAN:
907     case ARMMMUIdx_SE2:
908     case ARMMMUIdx_Stage2_S:
909     case ARMMMUIdx_MSPrivNegPri:
910     case ARMMMUIdx_MSUserNegPri:
911     case ARMMMUIdx_MSPriv:
912     case ARMMMUIdx_MSUser:
913         return true;
914     default:
915         g_assert_not_reached();
916     }
917 }
918 
919 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
920 {
921     switch (mmu_idx) {
922     case ARMMMUIdx_Stage1_E1_PAN:
923     case ARMMMUIdx_Stage1_SE1_PAN:
924     case ARMMMUIdx_E10_1_PAN:
925     case ARMMMUIdx_E20_2_PAN:
926     case ARMMMUIdx_SE10_1_PAN:
927     case ARMMMUIdx_SE20_2_PAN:
928         return true;
929     default:
930         return false;
931     }
932 }
933 
934 /* Return the exception level which controls this address translation regime */
935 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
936 {
937     switch (mmu_idx) {
938     case ARMMMUIdx_SE20_0:
939     case ARMMMUIdx_SE20_2:
940     case ARMMMUIdx_SE20_2_PAN:
941     case ARMMMUIdx_E20_0:
942     case ARMMMUIdx_E20_2:
943     case ARMMMUIdx_E20_2_PAN:
944     case ARMMMUIdx_Stage2:
945     case ARMMMUIdx_Stage2_S:
946     case ARMMMUIdx_SE2:
947     case ARMMMUIdx_E2:
948         return 2;
949     case ARMMMUIdx_SE3:
950         return 3;
951     case ARMMMUIdx_SE10_0:
952     case ARMMMUIdx_Stage1_SE0:
953         return arm_el_is_aa64(env, 3) ? 1 : 3;
954     case ARMMMUIdx_SE10_1:
955     case ARMMMUIdx_SE10_1_PAN:
956     case ARMMMUIdx_Stage1_E0:
957     case ARMMMUIdx_Stage1_E1:
958     case ARMMMUIdx_Stage1_E1_PAN:
959     case ARMMMUIdx_Stage1_SE1:
960     case ARMMMUIdx_Stage1_SE1_PAN:
961     case ARMMMUIdx_E10_0:
962     case ARMMMUIdx_E10_1:
963     case ARMMMUIdx_E10_1_PAN:
964     case ARMMMUIdx_MPrivNegPri:
965     case ARMMMUIdx_MUserNegPri:
966     case ARMMMUIdx_MPriv:
967     case ARMMMUIdx_MUser:
968     case ARMMMUIdx_MSPrivNegPri:
969     case ARMMMUIdx_MSUserNegPri:
970     case ARMMMUIdx_MSPriv:
971     case ARMMMUIdx_MSUser:
972         return 1;
973     default:
974         g_assert_not_reached();
975     }
976 }
977 
978 /* Return the TCR controlling this translation regime */
979 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
980 {
981     if (mmu_idx == ARMMMUIdx_Stage2) {
982         return &env->cp15.vtcr_el2;
983     }
984     if (mmu_idx == ARMMMUIdx_Stage2_S) {
985         /*
986          * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
987          * those are not currently used by QEMU, so just return VSTCR_EL2.
988          */
989         return &env->cp15.vstcr_el2;
990     }
991     return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
992 }
993 
994 /* Return the FSR value for a debug exception (watchpoint, hardware
995  * breakpoint or BKPT insn) targeting the specified exception level.
996  */
997 static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
998 {
999     ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
1000     int target_el = arm_debug_target_el(env);
1001     bool using_lpae = false;
1002 
1003     if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
1004         using_lpae = true;
1005     } else {
1006         if (arm_feature(env, ARM_FEATURE_LPAE) &&
1007             (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
1008             using_lpae = true;
1009         }
1010     }
1011 
1012     if (using_lpae) {
1013         return arm_fi_to_lfsc(&fi);
1014     } else {
1015         return arm_fi_to_sfsc(&fi);
1016     }
1017 }
1018 
1019 /**
1020  * arm_num_brps: Return number of implemented breakpoints.
1021  * Note that the ID register BRPS field is "number of bps - 1",
1022  * and we return the actual number of breakpoints.
1023  */
1024 static inline int arm_num_brps(ARMCPU *cpu)
1025 {
1026     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1027         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
1028     } else {
1029         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
1030     }
1031 }
1032 
1033 /**
1034  * arm_num_wrps: Return number of implemented watchpoints.
1035  * Note that the ID register WRPS field is "number of wps - 1",
1036  * and we return the actual number of watchpoints.
1037  */
1038 static inline int arm_num_wrps(ARMCPU *cpu)
1039 {
1040     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1041         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
1042     } else {
1043         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
1044     }
1045 }
1046 
1047 /**
1048  * arm_num_ctx_cmps: Return number of implemented context comparators.
1049  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1050  * and we return the actual number of comparators.
1051  */
1052 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
1053 {
1054     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1055         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
1056     } else {
1057         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
1058     }
1059 }
1060 
1061 /**
1062  * v7m_using_psp: Return true if using process stack pointer
1063  * Return true if the CPU is currently using the process stack
1064  * pointer, or false if it is using the main stack pointer.
1065  */
1066 static inline bool v7m_using_psp(CPUARMState *env)
1067 {
1068     /* Handler mode always uses the main stack; for thread mode
1069      * the CONTROL.SPSEL bit determines the answer.
1070      * Note that in v7M it is not possible to be in Handler mode with
1071      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1072      */
1073     return !arm_v7m_is_handler_mode(env) &&
1074         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1075 }
1076 
1077 /**
1078  * v7m_sp_limit: Return SP limit for current CPU state
1079  * Return the SP limit value for the current CPU security state
1080  * and stack pointer.
1081  */
1082 static inline uint32_t v7m_sp_limit(CPUARMState *env)
1083 {
1084     if (v7m_using_psp(env)) {
1085         return env->v7m.psplim[env->v7m.secure];
1086     } else {
1087         return env->v7m.msplim[env->v7m.secure];
1088     }
1089 }
1090 
1091 /**
1092  * v7m_cpacr_pass:
1093  * Return true if the v7M CPACR permits access to the FPU for the specified
1094  * security state and privilege level.
1095  */
1096 static inline bool v7m_cpacr_pass(CPUARMState *env,
1097                                   bool is_secure, bool is_priv)
1098 {
1099     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
1100     case 0:
1101     case 2: /* UNPREDICTABLE: we treat like 0 */
1102         return false;
1103     case 1:
1104         return is_priv;
1105     case 3:
1106         return true;
1107     default:
1108         g_assert_not_reached();
1109     }
1110 }
1111 
1112 /**
1113  * aarch32_mode_name(): Return name of the AArch32 CPU mode
1114  * @psr: Program Status Register indicating CPU mode
1115  *
1116  * Returns, for debug logging purposes, a printable representation
1117  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1118  * the low bits of the specified PSR.
1119  */
1120 static inline const char *aarch32_mode_name(uint32_t psr)
1121 {
1122     static const char cpu_mode_names[16][4] = {
1123         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1124         "???", "???", "hyp", "und", "???", "???", "???", "sys"
1125     };
1126 
1127     return cpu_mode_names[psr & 0xf];
1128 }
1129 
1130 /**
1131  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1132  *
1133  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1134  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1135  * Must be called with the iothread lock held.
1136  */
1137 void arm_cpu_update_virq(ARMCPU *cpu);
1138 
1139 /**
1140  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1141  *
1142  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1143  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1144  * Must be called with the iothread lock held.
1145  */
1146 void arm_cpu_update_vfiq(ARMCPU *cpu);
1147 
1148 /**
1149  * arm_mmu_idx_el:
1150  * @env: The cpu environment
1151  * @el: The EL to use.
1152  *
1153  * Return the full ARMMMUIdx for the translation regime for EL.
1154  */
1155 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1156 
1157 /**
1158  * arm_mmu_idx:
1159  * @env: The cpu environment
1160  *
1161  * Return the full ARMMMUIdx for the current translation regime.
1162  */
1163 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
1164 
1165 /**
1166  * arm_stage1_mmu_idx:
1167  * @env: The cpu environment
1168  *
1169  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1170  */
1171 #ifdef CONFIG_USER_ONLY
1172 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1173 {
1174     return ARMMMUIdx_Stage1_E0;
1175 }
1176 #else
1177 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1178 #endif
1179 
1180 /**
1181  * arm_mmu_idx_is_stage1_of_2:
1182  * @mmu_idx: The ARMMMUIdx to test
1183  *
1184  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1185  * first stage of a two stage regime.
1186  */
1187 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1188 {
1189     switch (mmu_idx) {
1190     case ARMMMUIdx_Stage1_E0:
1191     case ARMMMUIdx_Stage1_E1:
1192     case ARMMMUIdx_Stage1_E1_PAN:
1193     case ARMMMUIdx_Stage1_SE0:
1194     case ARMMMUIdx_Stage1_SE1:
1195     case ARMMMUIdx_Stage1_SE1_PAN:
1196         return true;
1197     default:
1198         return false;
1199     }
1200 }
1201 
1202 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1203                                                const ARMISARegisters *id)
1204 {
1205     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1206 
1207     if ((features >> ARM_FEATURE_V4T) & 1) {
1208         valid |= CPSR_T;
1209     }
1210     if ((features >> ARM_FEATURE_V5) & 1) {
1211         valid |= CPSR_Q; /* V5TE in reality*/
1212     }
1213     if ((features >> ARM_FEATURE_V6) & 1) {
1214         valid |= CPSR_E | CPSR_GE;
1215     }
1216     if ((features >> ARM_FEATURE_THUMB2) & 1) {
1217         valid |= CPSR_IT;
1218     }
1219     if (isar_feature_aa32_jazelle(id)) {
1220         valid |= CPSR_J;
1221     }
1222     if (isar_feature_aa32_pan(id)) {
1223         valid |= CPSR_PAN;
1224     }
1225 
1226     return valid;
1227 }
1228 
1229 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1230 {
1231     uint32_t valid;
1232 
1233     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1234     if (isar_feature_aa64_bti(id)) {
1235         valid |= PSTATE_BTYPE;
1236     }
1237     if (isar_feature_aa64_pan(id)) {
1238         valid |= PSTATE_PAN;
1239     }
1240     if (isar_feature_aa64_uao(id)) {
1241         valid |= PSTATE_UAO;
1242     }
1243     if (isar_feature_aa64_mte(id)) {
1244         valid |= PSTATE_TCO;
1245     }
1246 
1247     return valid;
1248 }
1249 
1250 /*
1251  * Parameters of a given virtual address, as extracted from the
1252  * translation control register (TCR) for a given regime.
1253  */
1254 typedef struct ARMVAParameters {
1255     unsigned tsz    : 8;
1256     unsigned select : 1;
1257     bool tbi        : 1;
1258     bool epd        : 1;
1259     bool hpd        : 1;
1260     bool using16k   : 1;
1261     bool using64k   : 1;
1262 } ARMVAParameters;
1263 
1264 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1265                                    ARMMMUIdx mmu_idx, bool data);
1266 
1267 static inline int exception_target_el(CPUARMState *env)
1268 {
1269     int target_el = MAX(1, arm_current_el(env));
1270 
1271     /*
1272      * No such thing as secure EL1 if EL3 is aarch32,
1273      * so update the target EL to EL3 in this case.
1274      */
1275     if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
1276         target_el = 3;
1277     }
1278 
1279     return target_el;
1280 }
1281 
1282 /* Determine if allocation tags are available.  */
1283 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1284                                                  uint64_t sctlr)
1285 {
1286     if (el < 3
1287         && arm_feature(env, ARM_FEATURE_EL3)
1288         && !(env->cp15.scr_el3 & SCR_ATA)) {
1289         return false;
1290     }
1291     if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
1292         uint64_t hcr = arm_hcr_el2_eff(env);
1293         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1294             return false;
1295         }
1296     }
1297     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1298     return sctlr != 0;
1299 }
1300 
1301 #ifndef CONFIG_USER_ONLY
1302 
1303 /* Security attributes for an address, as returned by v8m_security_lookup. */
1304 typedef struct V8M_SAttributes {
1305     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1306     bool ns;
1307     bool nsc;
1308     uint8_t sregion;
1309     bool srvalid;
1310     uint8_t iregion;
1311     bool irvalid;
1312 } V8M_SAttributes;
1313 
1314 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1315                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1316                          V8M_SAttributes *sattrs);
1317 
1318 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1319                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1320                        hwaddr *phys_ptr, MemTxAttrs *txattrs,
1321                        int *prot, bool *is_subpage,
1322                        ARMMMUFaultInfo *fi, uint32_t *mregion);
1323 
1324 /* Cacheability and shareability attributes for a memory access */
1325 typedef struct ARMCacheAttrs {
1326     unsigned int attrs:8; /* as in the MAIR register encoding */
1327     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1328 } ARMCacheAttrs;
1329 
1330 bool get_phys_addr(CPUARMState *env, target_ulong address,
1331                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
1332                    hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
1333                    target_ulong *page_size,
1334                    ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
1335     __attribute__((nonnull));
1336 
1337 void arm_log_exception(int idx);
1338 
1339 #endif /* !CONFIG_USER_ONLY */
1340 
1341 /*
1342  * The log2 of the words in the tag block, for GMID_EL1.BS.
1343  * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1344  */
1345 #define GMID_EL1_BS  6
1346 
1347 /* We associate one allocation tag per 16 bytes, the minimum.  */
1348 #define LOG2_TAG_GRANULE 4
1349 #define TAG_GRANULE      (1 << LOG2_TAG_GRANULE)
1350 
1351 /*
1352  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1353  * the same simd_desc() encoding due to restrictions on size.
1354  * Use these instead.
1355  */
1356 FIELD(PREDDESC, OPRSZ, 0, 6)
1357 FIELD(PREDDESC, ESZ, 6, 2)
1358 FIELD(PREDDESC, DATA, 8, 24)
1359 
1360 /*
1361  * The SVE simd_data field, for memory ops, contains either
1362  * rd (5 bits) or a shift count (2 bits).
1363  */
1364 #define SVE_MTEDESC_SHIFT 5
1365 
1366 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1367 FIELD(MTEDESC, MIDX,  0, 4)
1368 FIELD(MTEDESC, TBI,   4, 2)
1369 FIELD(MTEDESC, TCMA,  6, 2)
1370 FIELD(MTEDESC, WRITE, 8, 1)
1371 FIELD(MTEDESC, ESIZE, 9, 5)
1372 FIELD(MTEDESC, TSIZE, 14, 10)  /* mte_checkN only */
1373 
1374 bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
1375 uint64_t mte_check1(CPUARMState *env, uint32_t desc,
1376                     uint64_t ptr, uintptr_t ra);
1377 uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
1378                     uint64_t ptr, uintptr_t ra);
1379 
1380 static inline int allocation_tag_from_addr(uint64_t ptr)
1381 {
1382     return extract64(ptr, 56, 4);
1383 }
1384 
1385 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1386 {
1387     return deposit64(ptr, 56, 4, rtag);
1388 }
1389 
1390 /* Return true if tbi bits mean that the access is checked.  */
1391 static inline bool tbi_check(uint32_t desc, int bit55)
1392 {
1393     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1394 }
1395 
1396 /* Return true if tcma bits mean that the access is unchecked.  */
1397 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1398 {
1399     /*
1400      * We had extracted bit55 and ptr_tag for other reasons, so fold
1401      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1402      */
1403     bool match = ((ptr_tag + bit55) & 0xf) == 0;
1404     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1405     return tcma && match;
1406 }
1407 
1408 /*
1409  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1410  * for the tag to be present in the FAR_ELx register.  But for user-only
1411  * mode, we do not have a TLB with which to implement this, so we must
1412  * remove the top byte.
1413  */
1414 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1415 {
1416     /* TBI is known to be enabled. */
1417 #ifdef CONFIG_USER_ONLY
1418     ptr = sextract64(ptr, 0, 56);
1419 #endif
1420     return ptr;
1421 }
1422 
1423 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1424 {
1425 #ifdef CONFIG_USER_ONLY
1426     int64_t clean_ptr = sextract64(ptr, 0, 56);
1427     if (tbi_check(desc, clean_ptr < 0)) {
1428         ptr = clean_ptr;
1429     }
1430 #endif
1431     return ptr;
1432 }
1433 
1434 #endif
1435