1 /* 2 * ARM TLB (Translation lookaside buffer) helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "cpu.h" 10 #include "internals.h" 11 #include "exec/exec-all.h" 12 #include "exec/helper-proto.h" 13 14 15 /* 16 * Returns true if the stage 1 translation regime is using LPAE format page 17 * tables. Used when raising alignment exceptions, whose FSR changes depending 18 * on whether the long or short descriptor format is in use. 19 */ 20 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 21 { 22 mmu_idx = stage_1_mmu_idx(mmu_idx); 23 return regime_using_lpae_format(env, mmu_idx); 24 } 25 26 static inline uint32_t merge_syn_data_abort(uint32_t template_syn, 27 ARMMMUFaultInfo *fi, 28 unsigned int target_el, 29 bool same_el, bool is_write, 30 int fsc) 31 { 32 uint32_t syn; 33 34 /* 35 * ISV is only set for data aborts routed to EL2 and 36 * never for stage-1 page table walks faulting on stage 2. 37 * 38 * Furthermore, ISV is only set for certain kinds of load/stores. 39 * If the template syndrome does not have ISV set, we should leave 40 * it cleared. 41 * 42 * See ARMv8 specs, D7-1974: 43 * ISS encoding for an exception from a Data Abort, the 44 * ISV field. 45 */ 46 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || fi->s1ptw) { 47 syn = syn_data_abort_no_iss(same_el, 0, 48 fi->ea, 0, fi->s1ptw, is_write, fsc); 49 } else { 50 /* 51 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template 52 * syndrome created at translation time. 53 * Now we create the runtime syndrome with the remaining fields. 54 */ 55 syn = syn_data_abort_with_iss(same_el, 56 0, 0, 0, 0, 0, 57 fi->ea, 0, fi->s1ptw, is_write, fsc, 58 true); 59 /* Merge the runtime syndrome with the template syndrome. */ 60 syn |= template_syn; 61 } 62 return syn; 63 } 64 65 static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi, 66 int target_el, int mmu_idx, uint32_t *ret_fsc) 67 { 68 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); 69 uint32_t fsr, fsc; 70 71 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 72 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { 73 /* 74 * LPAE format fault status register : bottom 6 bits are 75 * status code in the same form as needed for syndrome 76 */ 77 fsr = arm_fi_to_lfsc(fi); 78 fsc = extract32(fsr, 0, 6); 79 } else { 80 fsr = arm_fi_to_sfsc(fi); 81 /* 82 * Short format FSR : this fault will never actually be reported 83 * to an EL that uses a syndrome register. Use a (currently) 84 * reserved FSR code in case the constructed syndrome does leak 85 * into the guest somehow. 86 */ 87 fsc = 0x3f; 88 } 89 90 *ret_fsc = fsc; 91 return fsr; 92 } 93 94 static G_NORETURN 95 void arm_deliver_fault(ARMCPU *cpu, vaddr addr, 96 MMUAccessType access_type, 97 int mmu_idx, ARMMMUFaultInfo *fi) 98 { 99 CPUARMState *env = &cpu->env; 100 int target_el; 101 bool same_el; 102 uint32_t syn, exc, fsr, fsc; 103 104 target_el = exception_target_el(env); 105 if (fi->stage2) { 106 target_el = 2; 107 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; 108 if (arm_is_secure_below_el3(env) && fi->s1ns) { 109 env->cp15.hpfar_el2 |= HPFAR_NS; 110 } 111 } 112 same_el = (arm_current_el(env) == target_el); 113 114 fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc); 115 116 if (access_type == MMU_INST_FETCH) { 117 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); 118 exc = EXCP_PREFETCH_ABORT; 119 } else { 120 syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el, 121 same_el, access_type == MMU_DATA_STORE, 122 fsc); 123 if (access_type == MMU_DATA_STORE 124 && arm_feature(env, ARM_FEATURE_V6)) { 125 fsr |= (1 << 11); 126 } 127 exc = EXCP_DATA_ABORT; 128 } 129 130 env->exception.vaddress = addr; 131 env->exception.fsr = fsr; 132 raise_exception(env, exc, syn, target_el); 133 } 134 135 /* Raise a data fault alignment exception for the specified virtual address */ 136 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 137 MMUAccessType access_type, 138 int mmu_idx, uintptr_t retaddr) 139 { 140 ARMCPU *cpu = ARM_CPU(cs); 141 ARMMMUFaultInfo fi = {}; 142 143 /* now we have a real cpu fault */ 144 cpu_restore_state(cs, retaddr); 145 146 fi.type = ARMFault_Alignment; 147 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); 148 } 149 150 void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc) 151 { 152 ARMMMUFaultInfo fi = { .type = ARMFault_Alignment }; 153 int target_el = exception_target_el(env); 154 int mmu_idx = cpu_mmu_index(env, true); 155 uint32_t fsc; 156 157 env->exception.vaddress = pc; 158 159 /* 160 * Note that the fsc is not applicable to this exception, 161 * since any syndrome is pcalignment not insn_abort. 162 */ 163 env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc); 164 raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el); 165 } 166 167 #if !defined(CONFIG_USER_ONLY) 168 169 /* 170 * arm_cpu_do_transaction_failed: handle a memory system error response 171 * (eg "no device/memory present at address") by raising an external abort 172 * exception 173 */ 174 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 175 vaddr addr, unsigned size, 176 MMUAccessType access_type, 177 int mmu_idx, MemTxAttrs attrs, 178 MemTxResult response, uintptr_t retaddr) 179 { 180 ARMCPU *cpu = ARM_CPU(cs); 181 ARMMMUFaultInfo fi = {}; 182 183 /* now we have a real cpu fault */ 184 cpu_restore_state(cs, retaddr); 185 186 fi.ea = arm_extabort_type(response); 187 fi.type = ARMFault_SyncExternal; 188 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); 189 } 190 191 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 192 MMUAccessType access_type, int mmu_idx, 193 bool probe, uintptr_t retaddr) 194 { 195 ARMCPU *cpu = ARM_CPU(cs); 196 GetPhysAddrResult res = {}; 197 ARMMMUFaultInfo local_fi, *fi; 198 int ret; 199 200 /* 201 * Allow S1_ptw_translate to see any fault generated here. 202 * Since this may recurse, read and clear. 203 */ 204 fi = cpu->env.tlb_fi; 205 if (fi) { 206 cpu->env.tlb_fi = NULL; 207 } else { 208 fi = memset(&local_fi, 0, sizeof(local_fi)); 209 } 210 211 /* 212 * Walk the page table and (if the mapping exists) add the page 213 * to the TLB. On success, return true. Otherwise, if probing, 214 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault 215 * register format, and signal the fault. 216 */ 217 ret = get_phys_addr(&cpu->env, address, access_type, 218 core_to_arm_mmu_idx(&cpu->env, mmu_idx), 219 &res, fi); 220 if (likely(!ret)) { 221 /* 222 * Map a single [sub]page. Regions smaller than our declared 223 * target page size are handled specially, so for those we 224 * pass in the exact addresses. 225 */ 226 if (res.f.lg_page_size >= TARGET_PAGE_BITS) { 227 res.f.phys_addr &= TARGET_PAGE_MASK; 228 address &= TARGET_PAGE_MASK; 229 } 230 231 res.f.pte_attrs = res.cacheattrs.attrs; 232 res.f.shareability = res.cacheattrs.shareability; 233 234 tlb_set_page_full(cs, mmu_idx, address, &res.f); 235 return true; 236 } else if (probe) { 237 return false; 238 } else { 239 /* now we have a real cpu fault */ 240 cpu_restore_state(cs, retaddr); 241 arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); 242 } 243 } 244 #else 245 void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr, 246 MMUAccessType access_type, 247 bool maperr, uintptr_t ra) 248 { 249 ARMMMUFaultInfo fi = { 250 .type = maperr ? ARMFault_Translation : ARMFault_Permission, 251 .level = 3, 252 }; 253 ARMCPU *cpu = ARM_CPU(cs); 254 255 /* 256 * We report both ESR and FAR to signal handlers. 257 * For now, it's easiest to deliver the fault normally. 258 */ 259 cpu_restore_state(cs, ra); 260 arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi); 261 } 262 263 void arm_cpu_record_sigbus(CPUState *cs, vaddr addr, 264 MMUAccessType access_type, uintptr_t ra) 265 { 266 arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra); 267 } 268 #endif /* !defined(CONFIG_USER_ONLY) */ 269