xref: /openbmc/qemu/target/arm/internals.h (revision 835fde4a)
1 /*
2  * QEMU ARM CPU -- internal functions and types
3  *
4  * Copyright (c) 2014 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  *
20  * This header defines functions, types, etc which need to be shared
21  * between different source files within target/arm/ but which are
22  * private to it and not required by the rest of QEMU.
23  */
24 
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27 
28 #include "hw/registerfields.h"
29 #include "syndrome.h"
30 
31 /* register banks for CPU modes */
32 #define BANK_USRSYS 0
33 #define BANK_SVC    1
34 #define BANK_ABT    2
35 #define BANK_UND    3
36 #define BANK_IRQ    4
37 #define BANK_FIQ    5
38 #define BANK_HYP    6
39 #define BANK_MON    7
40 
41 static inline bool excp_is_internal(int excp)
42 {
43     /* Return true if this exception number represents a QEMU-internal
44      * exception that will not be passed to the guest.
45      */
46     return excp == EXCP_INTERRUPT
47         || excp == EXCP_HLT
48         || excp == EXCP_DEBUG
49         || excp == EXCP_HALTED
50         || excp == EXCP_EXCEPTION_EXIT
51         || excp == EXCP_KERNEL_TRAP
52         || excp == EXCP_SEMIHOST;
53 }
54 
55 /* Scale factor for generic timers, ie number of ns per tick.
56  * This gives a 62.5MHz timer.
57  */
58 #define GTIMER_SCALE 16
59 
60 /* Bit definitions for the v7M CONTROL register */
61 FIELD(V7M_CONTROL, NPRIV, 0, 1)
62 FIELD(V7M_CONTROL, SPSEL, 1, 1)
63 FIELD(V7M_CONTROL, FPCA, 2, 1)
64 FIELD(V7M_CONTROL, SFPA, 3, 1)
65 
66 /* Bit definitions for v7M exception return payload */
67 FIELD(V7M_EXCRET, ES, 0, 1)
68 FIELD(V7M_EXCRET, RES0, 1, 1)
69 FIELD(V7M_EXCRET, SPSEL, 2, 1)
70 FIELD(V7M_EXCRET, MODE, 3, 1)
71 FIELD(V7M_EXCRET, FTYPE, 4, 1)
72 FIELD(V7M_EXCRET, DCRS, 5, 1)
73 FIELD(V7M_EXCRET, S, 6, 1)
74 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
75 
76 /* Minimum value which is a magic number for exception return */
77 #define EXC_RETURN_MIN_MAGIC 0xff000000
78 /* Minimum number which is a magic number for function or exception return
79  * when using v8M security extension
80  */
81 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
82 
83 /* We use a few fake FSR values for internal purposes in M profile.
84  * M profile cores don't have A/R format FSRs, but currently our
85  * get_phys_addr() code assumes A/R profile and reports failures via
86  * an A/R format FSR value. We then translate that into the proper
87  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
88  * Mostly the FSR values we use for this are those defined for v7PMSA,
89  * since we share some of that codepath. A few kinds of fault are
90  * only for M profile and have no A/R equivalent, though, so we have
91  * to pick a value from the reserved range (which we never otherwise
92  * generate) to use for these.
93  * These values will never be visible to the guest.
94  */
95 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
96 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
97 
98 /**
99  * raise_exception: Raise the specified exception.
100  * Raise a guest exception with the specified value, syndrome register
101  * and target exception level. This should be called from helper functions,
102  * and never returns because we will longjump back up to the CPU main loop.
103  */
104 void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
105                                    uint32_t syndrome, uint32_t target_el);
106 
107 /*
108  * Similarly, but also use unwinding to restore cpu state.
109  */
110 void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
111                                       uint32_t syndrome, uint32_t target_el,
112                                       uintptr_t ra);
113 
114 /*
115  * For AArch64, map a given EL to an index in the banked_spsr array.
116  * Note that this mapping and the AArch32 mapping defined in bank_number()
117  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
118  * mandated mapping between each other.
119  */
120 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
121 {
122     static const unsigned int map[4] = {
123         [1] = BANK_SVC, /* EL1.  */
124         [2] = BANK_HYP, /* EL2.  */
125         [3] = BANK_MON, /* EL3.  */
126     };
127     assert(el >= 1 && el <= 3);
128     return map[el];
129 }
130 
131 /* Map CPU modes onto saved register banks.  */
132 static inline int bank_number(int mode)
133 {
134     switch (mode) {
135     case ARM_CPU_MODE_USR:
136     case ARM_CPU_MODE_SYS:
137         return BANK_USRSYS;
138     case ARM_CPU_MODE_SVC:
139         return BANK_SVC;
140     case ARM_CPU_MODE_ABT:
141         return BANK_ABT;
142     case ARM_CPU_MODE_UND:
143         return BANK_UND;
144     case ARM_CPU_MODE_IRQ:
145         return BANK_IRQ;
146     case ARM_CPU_MODE_FIQ:
147         return BANK_FIQ;
148     case ARM_CPU_MODE_HYP:
149         return BANK_HYP;
150     case ARM_CPU_MODE_MON:
151         return BANK_MON;
152     }
153     g_assert_not_reached();
154 }
155 
156 /**
157  * r14_bank_number: Map CPU mode onto register bank for r14
158  *
159  * Given an AArch32 CPU mode, return the index into the saved register
160  * banks to use for the R14 (LR) in that mode. This is the same as
161  * bank_number(), except for the special case of Hyp mode, where
162  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
163  * This should be used as the index into env->banked_r14[], and
164  * bank_number() used for the index into env->banked_r13[] and
165  * env->banked_spsr[].
166  */
167 static inline int r14_bank_number(int mode)
168 {
169     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
170 }
171 
172 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
173 void arm_translate_init(void);
174 
175 #ifdef CONFIG_TCG
176 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
177 #endif /* CONFIG_TCG */
178 
179 
180 enum arm_fprounding {
181     FPROUNDING_TIEEVEN,
182     FPROUNDING_POSINF,
183     FPROUNDING_NEGINF,
184     FPROUNDING_ZERO,
185     FPROUNDING_TIEAWAY,
186     FPROUNDING_ODD
187 };
188 
189 int arm_rmode_to_sf(int rmode);
190 
191 static inline void aarch64_save_sp(CPUARMState *env, int el)
192 {
193     if (env->pstate & PSTATE_SP) {
194         env->sp_el[el] = env->xregs[31];
195     } else {
196         env->sp_el[0] = env->xregs[31];
197     }
198 }
199 
200 static inline void aarch64_restore_sp(CPUARMState *env, int el)
201 {
202     if (env->pstate & PSTATE_SP) {
203         env->xregs[31] = env->sp_el[el];
204     } else {
205         env->xregs[31] = env->sp_el[0];
206     }
207 }
208 
209 static inline void update_spsel(CPUARMState *env, uint32_t imm)
210 {
211     unsigned int cur_el = arm_current_el(env);
212     /* Update PSTATE SPSel bit; this requires us to update the
213      * working stack pointer in xregs[31].
214      */
215     if (!((imm ^ env->pstate) & PSTATE_SP)) {
216         return;
217     }
218     aarch64_save_sp(env, cur_el);
219     env->pstate = deposit32(env->pstate, 0, 1, imm);
220 
221     /* We rely on illegal updates to SPsel from EL0 to get trapped
222      * at translation time.
223      */
224     assert(cur_el >= 1 && cur_el <= 3);
225     aarch64_restore_sp(env, cur_el);
226 }
227 
228 /*
229  * arm_pamax
230  * @cpu: ARMCPU
231  *
232  * Returns the implementation defined bit-width of physical addresses.
233  * The ARMv8 reference manuals refer to this as PAMax().
234  */
235 static inline unsigned int arm_pamax(ARMCPU *cpu)
236 {
237     static const unsigned int pamax_map[] = {
238         [0] = 32,
239         [1] = 36,
240         [2] = 40,
241         [3] = 42,
242         [4] = 44,
243         [5] = 48,
244     };
245     unsigned int parange =
246         FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
247 
248     /* id_aa64mmfr0 is a read-only register so values outside of the
249      * supported mappings can be considered an implementation error.  */
250     assert(parange < ARRAY_SIZE(pamax_map));
251     return pamax_map[parange];
252 }
253 
254 /* Return true if extended addresses are enabled.
255  * This is always the case if our translation regime is 64 bit,
256  * but depends on TTBCR.EAE for 32 bit.
257  */
258 static inline bool extended_addresses_enabled(CPUARMState *env)
259 {
260     TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
261     return arm_el_is_aa64(env, 1) ||
262            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
263 }
264 
265 /* Update a QEMU watchpoint based on the information the guest has set in the
266  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
267  */
268 void hw_watchpoint_update(ARMCPU *cpu, int n);
269 /* Update the QEMU watchpoints for every guest watchpoint. This does a
270  * complete delete-and-reinstate of the QEMU watchpoint list and so is
271  * suitable for use after migration or on reset.
272  */
273 void hw_watchpoint_update_all(ARMCPU *cpu);
274 /* Update a QEMU breakpoint based on the information the guest has set in the
275  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
276  */
277 void hw_breakpoint_update(ARMCPU *cpu, int n);
278 /* Update the QEMU breakpoints for every guest breakpoint. This does a
279  * complete delete-and-reinstate of the QEMU breakpoint list and so is
280  * suitable for use after migration or on reset.
281  */
282 void hw_breakpoint_update_all(ARMCPU *cpu);
283 
284 /* Callback function for checking if a watchpoint should trigger. */
285 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
286 
287 /* Adjust addresses (in BE32 mode) before testing against watchpoint
288  * addresses.
289  */
290 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
291 
292 /* Callback function for when a watchpoint or breakpoint triggers. */
293 void arm_debug_excp_handler(CPUState *cs);
294 
295 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
296 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
297 {
298     return false;
299 }
300 static inline void arm_handle_psci_call(ARMCPU *cpu)
301 {
302     g_assert_not_reached();
303 }
304 #else
305 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
306 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
307 /* Actually handle a PSCI call */
308 void arm_handle_psci_call(ARMCPU *cpu);
309 #endif
310 
311 /**
312  * arm_clear_exclusive: clear the exclusive monitor
313  * @env: CPU env
314  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
315  */
316 static inline void arm_clear_exclusive(CPUARMState *env)
317 {
318     env->exclusive_addr = -1;
319 }
320 
321 /**
322  * ARMFaultType: type of an ARM MMU fault
323  * This corresponds to the v8A pseudocode's Fault enumeration,
324  * with extensions for QEMU internal conditions.
325  */
326 typedef enum ARMFaultType {
327     ARMFault_None,
328     ARMFault_AccessFlag,
329     ARMFault_Alignment,
330     ARMFault_Background,
331     ARMFault_Domain,
332     ARMFault_Permission,
333     ARMFault_Translation,
334     ARMFault_AddressSize,
335     ARMFault_SyncExternal,
336     ARMFault_SyncExternalOnWalk,
337     ARMFault_SyncParity,
338     ARMFault_SyncParityOnWalk,
339     ARMFault_AsyncParity,
340     ARMFault_AsyncExternal,
341     ARMFault_Debug,
342     ARMFault_TLBConflict,
343     ARMFault_Lockdown,
344     ARMFault_Exclusive,
345     ARMFault_ICacheMaint,
346     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
347     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
348 } ARMFaultType;
349 
350 /**
351  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
352  * @type: Type of fault
353  * @level: Table walk level (for translation, access flag and permission faults)
354  * @domain: Domain of the fault address (for non-LPAE CPUs only)
355  * @s2addr: Address that caused a fault at stage 2
356  * @stage2: True if we faulted at stage 2
357  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
358  * @s1ns: True if we faulted on a non-secure IPA while in secure state
359  * @ea: True if we should set the EA (external abort type) bit in syndrome
360  */
361 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
362 struct ARMMMUFaultInfo {
363     ARMFaultType type;
364     target_ulong s2addr;
365     int level;
366     int domain;
367     bool stage2;
368     bool s1ptw;
369     bool s1ns;
370     bool ea;
371 };
372 
373 /**
374  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
375  * Compare pseudocode EncodeSDFSC(), though unlike that function
376  * we set up a whole FSR-format code including domain field and
377  * putting the high bit of the FSC into bit 10.
378  */
379 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
380 {
381     uint32_t fsc;
382 
383     switch (fi->type) {
384     case ARMFault_None:
385         return 0;
386     case ARMFault_AccessFlag:
387         fsc = fi->level == 1 ? 0x3 : 0x6;
388         break;
389     case ARMFault_Alignment:
390         fsc = 0x1;
391         break;
392     case ARMFault_Permission:
393         fsc = fi->level == 1 ? 0xd : 0xf;
394         break;
395     case ARMFault_Domain:
396         fsc = fi->level == 1 ? 0x9 : 0xb;
397         break;
398     case ARMFault_Translation:
399         fsc = fi->level == 1 ? 0x5 : 0x7;
400         break;
401     case ARMFault_SyncExternal:
402         fsc = 0x8 | (fi->ea << 12);
403         break;
404     case ARMFault_SyncExternalOnWalk:
405         fsc = fi->level == 1 ? 0xc : 0xe;
406         fsc |= (fi->ea << 12);
407         break;
408     case ARMFault_SyncParity:
409         fsc = 0x409;
410         break;
411     case ARMFault_SyncParityOnWalk:
412         fsc = fi->level == 1 ? 0x40c : 0x40e;
413         break;
414     case ARMFault_AsyncParity:
415         fsc = 0x408;
416         break;
417     case ARMFault_AsyncExternal:
418         fsc = 0x406 | (fi->ea << 12);
419         break;
420     case ARMFault_Debug:
421         fsc = 0x2;
422         break;
423     case ARMFault_TLBConflict:
424         fsc = 0x400;
425         break;
426     case ARMFault_Lockdown:
427         fsc = 0x404;
428         break;
429     case ARMFault_Exclusive:
430         fsc = 0x405;
431         break;
432     case ARMFault_ICacheMaint:
433         fsc = 0x4;
434         break;
435     case ARMFault_Background:
436         fsc = 0x0;
437         break;
438     case ARMFault_QEMU_NSCExec:
439         fsc = M_FAKE_FSR_NSC_EXEC;
440         break;
441     case ARMFault_QEMU_SFault:
442         fsc = M_FAKE_FSR_SFAULT;
443         break;
444     default:
445         /* Other faults can't occur in a context that requires a
446          * short-format status code.
447          */
448         g_assert_not_reached();
449     }
450 
451     fsc |= (fi->domain << 4);
452     return fsc;
453 }
454 
455 /**
456  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
457  * Compare pseudocode EncodeLDFSC(), though unlike that function
458  * we fill in also the LPAE bit 9 of a DFSR format.
459  */
460 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
461 {
462     uint32_t fsc;
463 
464     switch (fi->type) {
465     case ARMFault_None:
466         return 0;
467     case ARMFault_AddressSize:
468         fsc = fi->level & 3;
469         break;
470     case ARMFault_AccessFlag:
471         fsc = (fi->level & 3) | (0x2 << 2);
472         break;
473     case ARMFault_Permission:
474         fsc = (fi->level & 3) | (0x3 << 2);
475         break;
476     case ARMFault_Translation:
477         fsc = (fi->level & 3) | (0x1 << 2);
478         break;
479     case ARMFault_SyncExternal:
480         fsc = 0x10 | (fi->ea << 12);
481         break;
482     case ARMFault_SyncExternalOnWalk:
483         fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
484         break;
485     case ARMFault_SyncParity:
486         fsc = 0x18;
487         break;
488     case ARMFault_SyncParityOnWalk:
489         fsc = (fi->level & 3) | (0x7 << 2);
490         break;
491     case ARMFault_AsyncParity:
492         fsc = 0x19;
493         break;
494     case ARMFault_AsyncExternal:
495         fsc = 0x11 | (fi->ea << 12);
496         break;
497     case ARMFault_Alignment:
498         fsc = 0x21;
499         break;
500     case ARMFault_Debug:
501         fsc = 0x22;
502         break;
503     case ARMFault_TLBConflict:
504         fsc = 0x30;
505         break;
506     case ARMFault_Lockdown:
507         fsc = 0x34;
508         break;
509     case ARMFault_Exclusive:
510         fsc = 0x35;
511         break;
512     default:
513         /* Other faults can't occur in a context that requires a
514          * long-format status code.
515          */
516         g_assert_not_reached();
517     }
518 
519     fsc |= 1 << 9;
520     return fsc;
521 }
522 
523 static inline bool arm_extabort_type(MemTxResult result)
524 {
525     /* The EA bit in syndromes and fault status registers is an
526      * IMPDEF classification of external aborts. ARM implementations
527      * usually use this to indicate AXI bus Decode error (0) or
528      * Slave error (1); in QEMU we follow that.
529      */
530     return result != MEMTX_DECODE_ERROR;
531 }
532 
533 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
534                       MMUAccessType access_type, int mmu_idx,
535                       bool probe, uintptr_t retaddr);
536 
537 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
538 {
539     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
540 }
541 
542 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
543 {
544     if (arm_feature(env, ARM_FEATURE_M)) {
545         return mmu_idx | ARM_MMU_IDX_M;
546     } else {
547         return mmu_idx | ARM_MMU_IDX_A;
548     }
549 }
550 
551 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
552 {
553     /* AArch64 is always a-profile. */
554     return mmu_idx | ARM_MMU_IDX_A;
555 }
556 
557 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
558 
559 /*
560  * Return the MMU index for a v7M CPU with all relevant information
561  * manually specified.
562  */
563 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
564                               bool secstate, bool priv, bool negpri);
565 
566 /*
567  * Return the MMU index for a v7M CPU in the specified security and
568  * privilege state.
569  */
570 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
571                                                 bool secstate, bool priv);
572 
573 /* Return the MMU index for a v7M CPU in the specified security state */
574 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
575 
576 /* Return true if the stage 1 translation regime is using LPAE format page
577  * tables */
578 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
579 
580 /* Raise a data fault alignment exception for the specified virtual address */
581 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
582                                  MMUAccessType access_type,
583                                  int mmu_idx, uintptr_t retaddr);
584 
585 /* arm_cpu_do_transaction_failed: handle a memory system error response
586  * (eg "no device/memory present at address") by raising an external abort
587  * exception
588  */
589 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
590                                    vaddr addr, unsigned size,
591                                    MMUAccessType access_type,
592                                    int mmu_idx, MemTxAttrs attrs,
593                                    MemTxResult response, uintptr_t retaddr);
594 
595 /* Call any registered EL change hooks */
596 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
597 {
598     ARMELChangeHook *hook, *next;
599     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
600         hook->hook(cpu, hook->opaque);
601     }
602 }
603 static inline void arm_call_el_change_hook(ARMCPU *cpu)
604 {
605     ARMELChangeHook *hook, *next;
606     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
607         hook->hook(cpu, hook->opaque);
608     }
609 }
610 
611 /* Return true if this address translation regime has two ranges.  */
612 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
613 {
614     switch (mmu_idx) {
615     case ARMMMUIdx_Stage1_E0:
616     case ARMMMUIdx_Stage1_E1:
617     case ARMMMUIdx_Stage1_E1_PAN:
618     case ARMMMUIdx_Stage1_SE0:
619     case ARMMMUIdx_Stage1_SE1:
620     case ARMMMUIdx_Stage1_SE1_PAN:
621     case ARMMMUIdx_E10_0:
622     case ARMMMUIdx_E10_1:
623     case ARMMMUIdx_E10_1_PAN:
624     case ARMMMUIdx_E20_0:
625     case ARMMMUIdx_E20_2:
626     case ARMMMUIdx_E20_2_PAN:
627     case ARMMMUIdx_SE10_0:
628     case ARMMMUIdx_SE10_1:
629     case ARMMMUIdx_SE10_1_PAN:
630     case ARMMMUIdx_SE20_0:
631     case ARMMMUIdx_SE20_2:
632     case ARMMMUIdx_SE20_2_PAN:
633         return true;
634     default:
635         return false;
636     }
637 }
638 
639 /* Return true if this address translation regime is secure */
640 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
641 {
642     switch (mmu_idx) {
643     case ARMMMUIdx_E10_0:
644     case ARMMMUIdx_E10_1:
645     case ARMMMUIdx_E10_1_PAN:
646     case ARMMMUIdx_E20_0:
647     case ARMMMUIdx_E20_2:
648     case ARMMMUIdx_E20_2_PAN:
649     case ARMMMUIdx_Stage1_E0:
650     case ARMMMUIdx_Stage1_E1:
651     case ARMMMUIdx_Stage1_E1_PAN:
652     case ARMMMUIdx_E2:
653     case ARMMMUIdx_Stage2:
654     case ARMMMUIdx_MPrivNegPri:
655     case ARMMMUIdx_MUserNegPri:
656     case ARMMMUIdx_MPriv:
657     case ARMMMUIdx_MUser:
658         return false;
659     case ARMMMUIdx_SE3:
660     case ARMMMUIdx_SE10_0:
661     case ARMMMUIdx_SE10_1:
662     case ARMMMUIdx_SE10_1_PAN:
663     case ARMMMUIdx_SE20_0:
664     case ARMMMUIdx_SE20_2:
665     case ARMMMUIdx_SE20_2_PAN:
666     case ARMMMUIdx_Stage1_SE0:
667     case ARMMMUIdx_Stage1_SE1:
668     case ARMMMUIdx_Stage1_SE1_PAN:
669     case ARMMMUIdx_SE2:
670     case ARMMMUIdx_Stage2_S:
671     case ARMMMUIdx_MSPrivNegPri:
672     case ARMMMUIdx_MSUserNegPri:
673     case ARMMMUIdx_MSPriv:
674     case ARMMMUIdx_MSUser:
675         return true;
676     default:
677         g_assert_not_reached();
678     }
679 }
680 
681 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
682 {
683     switch (mmu_idx) {
684     case ARMMMUIdx_Stage1_E1_PAN:
685     case ARMMMUIdx_Stage1_SE1_PAN:
686     case ARMMMUIdx_E10_1_PAN:
687     case ARMMMUIdx_E20_2_PAN:
688     case ARMMMUIdx_SE10_1_PAN:
689     case ARMMMUIdx_SE20_2_PAN:
690         return true;
691     default:
692         return false;
693     }
694 }
695 
696 /* Return the exception level which controls this address translation regime */
697 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
698 {
699     switch (mmu_idx) {
700     case ARMMMUIdx_SE20_0:
701     case ARMMMUIdx_SE20_2:
702     case ARMMMUIdx_SE20_2_PAN:
703     case ARMMMUIdx_E20_0:
704     case ARMMMUIdx_E20_2:
705     case ARMMMUIdx_E20_2_PAN:
706     case ARMMMUIdx_Stage2:
707     case ARMMMUIdx_Stage2_S:
708     case ARMMMUIdx_SE2:
709     case ARMMMUIdx_E2:
710         return 2;
711     case ARMMMUIdx_SE3:
712         return 3;
713     case ARMMMUIdx_SE10_0:
714     case ARMMMUIdx_Stage1_SE0:
715         return arm_el_is_aa64(env, 3) ? 1 : 3;
716     case ARMMMUIdx_SE10_1:
717     case ARMMMUIdx_SE10_1_PAN:
718     case ARMMMUIdx_Stage1_E0:
719     case ARMMMUIdx_Stage1_E1:
720     case ARMMMUIdx_Stage1_E1_PAN:
721     case ARMMMUIdx_Stage1_SE1:
722     case ARMMMUIdx_Stage1_SE1_PAN:
723     case ARMMMUIdx_E10_0:
724     case ARMMMUIdx_E10_1:
725     case ARMMMUIdx_E10_1_PAN:
726     case ARMMMUIdx_MPrivNegPri:
727     case ARMMMUIdx_MUserNegPri:
728     case ARMMMUIdx_MPriv:
729     case ARMMMUIdx_MUser:
730     case ARMMMUIdx_MSPrivNegPri:
731     case ARMMMUIdx_MSUserNegPri:
732     case ARMMMUIdx_MSPriv:
733     case ARMMMUIdx_MSUser:
734         return 1;
735     default:
736         g_assert_not_reached();
737     }
738 }
739 
740 /* Return the TCR controlling this translation regime */
741 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
742 {
743     if (mmu_idx == ARMMMUIdx_Stage2) {
744         return &env->cp15.vtcr_el2;
745     }
746     if (mmu_idx == ARMMMUIdx_Stage2_S) {
747         /*
748          * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
749          * those are not currently used by QEMU, so just return VSTCR_EL2.
750          */
751         return &env->cp15.vstcr_el2;
752     }
753     return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
754 }
755 
756 /* Return the FSR value for a debug exception (watchpoint, hardware
757  * breakpoint or BKPT insn) targeting the specified exception level.
758  */
759 static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
760 {
761     ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
762     int target_el = arm_debug_target_el(env);
763     bool using_lpae = false;
764 
765     if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
766         using_lpae = true;
767     } else {
768         if (arm_feature(env, ARM_FEATURE_LPAE) &&
769             (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
770             using_lpae = true;
771         }
772     }
773 
774     if (using_lpae) {
775         return arm_fi_to_lfsc(&fi);
776     } else {
777         return arm_fi_to_sfsc(&fi);
778     }
779 }
780 
781 /**
782  * arm_num_brps: Return number of implemented breakpoints.
783  * Note that the ID register BRPS field is "number of bps - 1",
784  * and we return the actual number of breakpoints.
785  */
786 static inline int arm_num_brps(ARMCPU *cpu)
787 {
788     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
789         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
790     } else {
791         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
792     }
793 }
794 
795 /**
796  * arm_num_wrps: Return number of implemented watchpoints.
797  * Note that the ID register WRPS field is "number of wps - 1",
798  * and we return the actual number of watchpoints.
799  */
800 static inline int arm_num_wrps(ARMCPU *cpu)
801 {
802     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
803         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
804     } else {
805         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
806     }
807 }
808 
809 /**
810  * arm_num_ctx_cmps: Return number of implemented context comparators.
811  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
812  * and we return the actual number of comparators.
813  */
814 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
815 {
816     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
817         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
818     } else {
819         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
820     }
821 }
822 
823 /**
824  * v7m_using_psp: Return true if using process stack pointer
825  * Return true if the CPU is currently using the process stack
826  * pointer, or false if it is using the main stack pointer.
827  */
828 static inline bool v7m_using_psp(CPUARMState *env)
829 {
830     /* Handler mode always uses the main stack; for thread mode
831      * the CONTROL.SPSEL bit determines the answer.
832      * Note that in v7M it is not possible to be in Handler mode with
833      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
834      */
835     return !arm_v7m_is_handler_mode(env) &&
836         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
837 }
838 
839 /**
840  * v7m_sp_limit: Return SP limit for current CPU state
841  * Return the SP limit value for the current CPU security state
842  * and stack pointer.
843  */
844 static inline uint32_t v7m_sp_limit(CPUARMState *env)
845 {
846     if (v7m_using_psp(env)) {
847         return env->v7m.psplim[env->v7m.secure];
848     } else {
849         return env->v7m.msplim[env->v7m.secure];
850     }
851 }
852 
853 /**
854  * v7m_cpacr_pass:
855  * Return true if the v7M CPACR permits access to the FPU for the specified
856  * security state and privilege level.
857  */
858 static inline bool v7m_cpacr_pass(CPUARMState *env,
859                                   bool is_secure, bool is_priv)
860 {
861     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
862     case 0:
863     case 2: /* UNPREDICTABLE: we treat like 0 */
864         return false;
865     case 1:
866         return is_priv;
867     case 3:
868         return true;
869     default:
870         g_assert_not_reached();
871     }
872 }
873 
874 /**
875  * aarch32_mode_name(): Return name of the AArch32 CPU mode
876  * @psr: Program Status Register indicating CPU mode
877  *
878  * Returns, for debug logging purposes, a printable representation
879  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
880  * the low bits of the specified PSR.
881  */
882 static inline const char *aarch32_mode_name(uint32_t psr)
883 {
884     static const char cpu_mode_names[16][4] = {
885         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
886         "???", "???", "hyp", "und", "???", "???", "???", "sys"
887     };
888 
889     return cpu_mode_names[psr & 0xf];
890 }
891 
892 /**
893  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
894  *
895  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
896  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
897  * Must be called with the iothread lock held.
898  */
899 void arm_cpu_update_virq(ARMCPU *cpu);
900 
901 /**
902  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
903  *
904  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
905  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
906  * Must be called with the iothread lock held.
907  */
908 void arm_cpu_update_vfiq(ARMCPU *cpu);
909 
910 /**
911  * arm_mmu_idx_el:
912  * @env: The cpu environment
913  * @el: The EL to use.
914  *
915  * Return the full ARMMMUIdx for the translation regime for EL.
916  */
917 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
918 
919 /**
920  * arm_mmu_idx:
921  * @env: The cpu environment
922  *
923  * Return the full ARMMMUIdx for the current translation regime.
924  */
925 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
926 
927 /**
928  * arm_stage1_mmu_idx:
929  * @env: The cpu environment
930  *
931  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
932  */
933 #ifdef CONFIG_USER_ONLY
934 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
935 {
936     return ARMMMUIdx_Stage1_E0;
937 }
938 #else
939 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
940 #endif
941 
942 /**
943  * arm_mmu_idx_is_stage1_of_2:
944  * @mmu_idx: The ARMMMUIdx to test
945  *
946  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
947  * first stage of a two stage regime.
948  */
949 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
950 {
951     switch (mmu_idx) {
952     case ARMMMUIdx_Stage1_E0:
953     case ARMMMUIdx_Stage1_E1:
954     case ARMMMUIdx_Stage1_E1_PAN:
955     case ARMMMUIdx_Stage1_SE0:
956     case ARMMMUIdx_Stage1_SE1:
957     case ARMMMUIdx_Stage1_SE1_PAN:
958         return true;
959     default:
960         return false;
961     }
962 }
963 
964 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
965                                                const ARMISARegisters *id)
966 {
967     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
968 
969     if ((features >> ARM_FEATURE_V4T) & 1) {
970         valid |= CPSR_T;
971     }
972     if ((features >> ARM_FEATURE_V5) & 1) {
973         valid |= CPSR_Q; /* V5TE in reality*/
974     }
975     if ((features >> ARM_FEATURE_V6) & 1) {
976         valid |= CPSR_E | CPSR_GE;
977     }
978     if ((features >> ARM_FEATURE_THUMB2) & 1) {
979         valid |= CPSR_IT;
980     }
981     if (isar_feature_aa32_jazelle(id)) {
982         valid |= CPSR_J;
983     }
984     if (isar_feature_aa32_pan(id)) {
985         valid |= CPSR_PAN;
986     }
987     if (isar_feature_aa32_dit(id)) {
988         valid |= CPSR_DIT;
989     }
990     if (isar_feature_aa32_ssbs(id)) {
991         valid |= CPSR_SSBS;
992     }
993 
994     return valid;
995 }
996 
997 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
998 {
999     uint32_t valid;
1000 
1001     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1002     if (isar_feature_aa64_bti(id)) {
1003         valid |= PSTATE_BTYPE;
1004     }
1005     if (isar_feature_aa64_pan(id)) {
1006         valid |= PSTATE_PAN;
1007     }
1008     if (isar_feature_aa64_uao(id)) {
1009         valid |= PSTATE_UAO;
1010     }
1011     if (isar_feature_aa64_dit(id)) {
1012         valid |= PSTATE_DIT;
1013     }
1014     if (isar_feature_aa64_ssbs(id)) {
1015         valid |= PSTATE_SSBS;
1016     }
1017     if (isar_feature_aa64_mte(id)) {
1018         valid |= PSTATE_TCO;
1019     }
1020 
1021     return valid;
1022 }
1023 
1024 /*
1025  * Parameters of a given virtual address, as extracted from the
1026  * translation control register (TCR) for a given regime.
1027  */
1028 typedef struct ARMVAParameters {
1029     unsigned tsz    : 8;
1030     unsigned select : 1;
1031     bool tbi        : 1;
1032     bool epd        : 1;
1033     bool hpd        : 1;
1034     bool using16k   : 1;
1035     bool using64k   : 1;
1036 } ARMVAParameters;
1037 
1038 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1039                                    ARMMMUIdx mmu_idx, bool data);
1040 
1041 static inline int exception_target_el(CPUARMState *env)
1042 {
1043     int target_el = MAX(1, arm_current_el(env));
1044 
1045     /*
1046      * No such thing as secure EL1 if EL3 is aarch32,
1047      * so update the target EL to EL3 in this case.
1048      */
1049     if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
1050         target_el = 3;
1051     }
1052 
1053     return target_el;
1054 }
1055 
1056 /* Determine if allocation tags are available.  */
1057 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1058                                                  uint64_t sctlr)
1059 {
1060     if (el < 3
1061         && arm_feature(env, ARM_FEATURE_EL3)
1062         && !(env->cp15.scr_el3 & SCR_ATA)) {
1063         return false;
1064     }
1065     if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
1066         uint64_t hcr = arm_hcr_el2_eff(env);
1067         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1068             return false;
1069         }
1070     }
1071     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1072     return sctlr != 0;
1073 }
1074 
1075 #ifndef CONFIG_USER_ONLY
1076 
1077 /* Security attributes for an address, as returned by v8m_security_lookup. */
1078 typedef struct V8M_SAttributes {
1079     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1080     bool ns;
1081     bool nsc;
1082     uint8_t sregion;
1083     bool srvalid;
1084     uint8_t iregion;
1085     bool irvalid;
1086 } V8M_SAttributes;
1087 
1088 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1089                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1090                          V8M_SAttributes *sattrs);
1091 
1092 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1093                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1094                        hwaddr *phys_ptr, MemTxAttrs *txattrs,
1095                        int *prot, bool *is_subpage,
1096                        ARMMMUFaultInfo *fi, uint32_t *mregion);
1097 
1098 /* Cacheability and shareability attributes for a memory access */
1099 typedef struct ARMCacheAttrs {
1100     unsigned int attrs:8; /* as in the MAIR register encoding */
1101     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1102 } ARMCacheAttrs;
1103 
1104 bool get_phys_addr(CPUARMState *env, target_ulong address,
1105                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
1106                    hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
1107                    target_ulong *page_size,
1108                    ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
1109     __attribute__((nonnull));
1110 
1111 void arm_log_exception(int idx);
1112 
1113 #endif /* !CONFIG_USER_ONLY */
1114 
1115 /*
1116  * The log2 of the words in the tag block, for GMID_EL1.BS.
1117  * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1118  */
1119 #define GMID_EL1_BS  6
1120 
1121 /* We associate one allocation tag per 16 bytes, the minimum.  */
1122 #define LOG2_TAG_GRANULE 4
1123 #define TAG_GRANULE      (1 << LOG2_TAG_GRANULE)
1124 
1125 /*
1126  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1127  * the same simd_desc() encoding due to restrictions on size.
1128  * Use these instead.
1129  */
1130 FIELD(PREDDESC, OPRSZ, 0, 6)
1131 FIELD(PREDDESC, ESZ, 6, 2)
1132 FIELD(PREDDESC, DATA, 8, 24)
1133 
1134 /*
1135  * The SVE simd_data field, for memory ops, contains either
1136  * rd (5 bits) or a shift count (2 bits).
1137  */
1138 #define SVE_MTEDESC_SHIFT 5
1139 
1140 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1141 FIELD(MTEDESC, MIDX,  0, 4)
1142 FIELD(MTEDESC, TBI,   4, 2)
1143 FIELD(MTEDESC, TCMA,  6, 2)
1144 FIELD(MTEDESC, WRITE, 8, 1)
1145 FIELD(MTEDESC, ESIZE, 9, 5)
1146 FIELD(MTEDESC, TSIZE, 14, 10)  /* mte_checkN only */
1147 
1148 bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
1149 uint64_t mte_check1(CPUARMState *env, uint32_t desc,
1150                     uint64_t ptr, uintptr_t ra);
1151 uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
1152                     uint64_t ptr, uintptr_t ra);
1153 
1154 static inline int allocation_tag_from_addr(uint64_t ptr)
1155 {
1156     return extract64(ptr, 56, 4);
1157 }
1158 
1159 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1160 {
1161     return deposit64(ptr, 56, 4, rtag);
1162 }
1163 
1164 /* Return true if tbi bits mean that the access is checked.  */
1165 static inline bool tbi_check(uint32_t desc, int bit55)
1166 {
1167     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1168 }
1169 
1170 /* Return true if tcma bits mean that the access is unchecked.  */
1171 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1172 {
1173     /*
1174      * We had extracted bit55 and ptr_tag for other reasons, so fold
1175      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1176      */
1177     bool match = ((ptr_tag + bit55) & 0xf) == 0;
1178     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1179     return tcma && match;
1180 }
1181 
1182 /*
1183  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1184  * for the tag to be present in the FAR_ELx register.  But for user-only
1185  * mode, we do not have a TLB with which to implement this, so we must
1186  * remove the top byte.
1187  */
1188 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1189 {
1190 #ifdef CONFIG_USER_ONLY
1191     /* TBI0 is known to be enabled, while TBI1 is disabled. */
1192     ptr &= sextract64(ptr, 0, 56);
1193 #endif
1194     return ptr;
1195 }
1196 
1197 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1198 {
1199 #ifdef CONFIG_USER_ONLY
1200     int64_t clean_ptr = sextract64(ptr, 0, 56);
1201     if (tbi_check(desc, clean_ptr < 0)) {
1202         ptr = clean_ptr;
1203     }
1204 #endif
1205     return ptr;
1206 }
1207 
1208 #endif
1209