1 /* 2 * ARM TLB (Translation lookaside buffer) helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "cpu.h" 10 #include "internals.h" 11 #include "exec/exec-all.h" 12 #include "exec/helper-proto.h" 13 14 15 /* 16 * Returns true if the stage 1 translation regime is using LPAE format page 17 * tables. Used when raising alignment exceptions, whose FSR changes depending 18 * on whether the long or short descriptor format is in use. 19 */ 20 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 21 { 22 mmu_idx = stage_1_mmu_idx(mmu_idx); 23 return regime_using_lpae_format(env, mmu_idx); 24 } 25 26 static inline uint32_t merge_syn_data_abort(uint32_t template_syn, 27 ARMMMUFaultInfo *fi, 28 unsigned int target_el, 29 bool same_el, bool is_write, 30 int fsc) 31 { 32 uint32_t syn; 33 34 /* 35 * ISV is only set for stage-2 data aborts routed to EL2 and 36 * never for stage-1 page table walks faulting on stage 2 37 * or for stage-1 faults. 38 * 39 * Furthermore, ISV is only set for certain kinds of load/stores. 40 * If the template syndrome does not have ISV set, we should leave 41 * it cleared. 42 * 43 * See ARMv8 specs, D7-1974: 44 * ISS encoding for an exception from a Data Abort, the 45 * ISV field. 46 * 47 * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation, 48 * Access Flag, and Permission faults caused by LD64B, ST64B, 49 * ST64BV, or ST64BV0 insns report syndrome info even for stage-1 50 * faults and regardless of the target EL. 51 */ 52 if (!(template_syn & ARM_EL_ISV) || target_el != 2 53 || fi->s1ptw || !fi->stage2) { 54 syn = syn_data_abort_no_iss(same_el, 0, 55 fi->ea, 0, fi->s1ptw, is_write, fsc); 56 } else { 57 /* 58 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template 59 * syndrome created at translation time. 60 * Now we create the runtime syndrome with the remaining fields. 61 */ 62 syn = syn_data_abort_with_iss(same_el, 63 0, 0, 0, 0, 0, 64 fi->ea, 0, fi->s1ptw, is_write, fsc, 65 true); 66 /* Merge the runtime syndrome with the template syndrome. */ 67 syn |= template_syn; 68 } 69 return syn; 70 } 71 72 static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi, 73 int target_el, int mmu_idx, uint32_t *ret_fsc) 74 { 75 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); 76 uint32_t fsr, fsc; 77 78 /* 79 * For M-profile there is no guest-facing FSR. We compute a 80 * short-form value for env->exception.fsr which we will then 81 * examine in arm_v7m_cpu_do_interrupt(). In theory we could 82 * use the LPAE format instead as long as both bits of code agree 83 * (and arm_fi_to_lfsc() handled the M-profile specific 84 * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases). 85 */ 86 if (!arm_feature(env, ARM_FEATURE_M) && 87 (target_el == 2 || arm_el_is_aa64(env, target_el) || 88 arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) { 89 /* 90 * LPAE format fault status register : bottom 6 bits are 91 * status code in the same form as needed for syndrome 92 */ 93 fsr = arm_fi_to_lfsc(fi); 94 fsc = extract32(fsr, 0, 6); 95 } else { 96 fsr = arm_fi_to_sfsc(fi); 97 /* 98 * Short format FSR : this fault will never actually be reported 99 * to an EL that uses a syndrome register. Use a (currently) 100 * reserved FSR code in case the constructed syndrome does leak 101 * into the guest somehow. 102 */ 103 fsc = 0x3f; 104 } 105 106 *ret_fsc = fsc; 107 return fsr; 108 } 109 110 static bool report_as_gpc_exception(ARMCPU *cpu, int current_el, 111 ARMMMUFaultInfo *fi) 112 { 113 bool ret; 114 115 switch (fi->gpcf) { 116 case GPCF_None: 117 return false; 118 case GPCF_AddressSize: 119 case GPCF_Walk: 120 case GPCF_EABT: 121 /* R_PYTGX: GPT faults are reported as GPC. */ 122 ret = true; 123 break; 124 case GPCF_Fail: 125 /* 126 * R_BLYPM: A GPF at EL3 is reported as insn or data abort. 127 * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC 128 * if SCR_EL3.GPF is set, otherwise an insn or data abort. 129 */ 130 ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3; 131 break; 132 default: 133 g_assert_not_reached(); 134 } 135 136 assert(cpu_isar_feature(aa64_rme, cpu)); 137 assert(fi->type == ARMFault_GPCFOnWalk || 138 fi->type == ARMFault_GPCFOnOutput); 139 if (fi->gpcf == GPCF_AddressSize) { 140 assert(fi->level == 0); 141 } else { 142 assert(fi->level >= 0 && fi->level <= 1); 143 } 144 145 return ret; 146 } 147 148 static unsigned encode_gpcsc(ARMMMUFaultInfo *fi) 149 { 150 static uint8_t const gpcsc[] = { 151 [GPCF_AddressSize] = 0b000000, 152 [GPCF_Walk] = 0b000100, 153 [GPCF_Fail] = 0b001100, 154 [GPCF_EABT] = 0b010100, 155 }; 156 157 /* Note that we've validated fi->gpcf and fi->level above. */ 158 return gpcsc[fi->gpcf] | fi->level; 159 } 160 161 static G_NORETURN 162 void arm_deliver_fault(ARMCPU *cpu, vaddr addr, 163 MMUAccessType access_type, 164 int mmu_idx, ARMMMUFaultInfo *fi) 165 { 166 CPUARMState *env = &cpu->env; 167 int target_el = exception_target_el(env); 168 int current_el = arm_current_el(env); 169 bool same_el; 170 uint32_t syn, exc, fsr, fsc; 171 172 if (report_as_gpc_exception(cpu, current_el, fi)) { 173 target_el = 3; 174 175 fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc); 176 177 syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk, 178 access_type == MMU_INST_FETCH, 179 encode_gpcsc(fi), 0, fi->s1ptw, 180 access_type == MMU_DATA_STORE, fsc); 181 182 env->cp15.mfar_el3 = fi->paddr; 183 switch (fi->paddr_space) { 184 case ARMSS_Secure: 185 break; 186 case ARMSS_NonSecure: 187 env->cp15.mfar_el3 |= R_MFAR_NS_MASK; 188 break; 189 case ARMSS_Root: 190 env->cp15.mfar_el3 |= R_MFAR_NSE_MASK; 191 break; 192 case ARMSS_Realm: 193 env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK; 194 break; 195 default: 196 g_assert_not_reached(); 197 } 198 199 exc = EXCP_GPC; 200 goto do_raise; 201 } 202 203 /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */ 204 if (fi->gpcf == GPCF_Fail && target_el < 2) { 205 if (arm_hcr_el2_eff(env) & HCR_GPF) { 206 target_el = 2; 207 } 208 } 209 210 if (fi->stage2) { 211 target_el = 2; 212 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; 213 if (arm_is_secure_below_el3(env) && fi->s1ns) { 214 env->cp15.hpfar_el2 |= HPFAR_NS; 215 } 216 } 217 218 same_el = current_el == target_el; 219 fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc); 220 221 if (access_type == MMU_INST_FETCH) { 222 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); 223 exc = EXCP_PREFETCH_ABORT; 224 } else { 225 syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el, 226 same_el, access_type == MMU_DATA_STORE, 227 fsc); 228 if (access_type == MMU_DATA_STORE 229 && arm_feature(env, ARM_FEATURE_V6)) { 230 fsr |= (1 << 11); 231 } 232 exc = EXCP_DATA_ABORT; 233 } 234 235 do_raise: 236 env->exception.vaddress = addr; 237 env->exception.fsr = fsr; 238 raise_exception(env, exc, syn, target_el); 239 } 240 241 /* Raise a data fault alignment exception for the specified virtual address */ 242 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 243 MMUAccessType access_type, 244 int mmu_idx, uintptr_t retaddr) 245 { 246 ARMCPU *cpu = ARM_CPU(cs); 247 ARMMMUFaultInfo fi = {}; 248 249 /* now we have a real cpu fault */ 250 cpu_restore_state(cs, retaddr); 251 252 fi.type = ARMFault_Alignment; 253 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); 254 } 255 256 void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc) 257 { 258 ARMMMUFaultInfo fi = { .type = ARMFault_Alignment }; 259 int target_el = exception_target_el(env); 260 int mmu_idx = cpu_mmu_index(env, true); 261 uint32_t fsc; 262 263 env->exception.vaddress = pc; 264 265 /* 266 * Note that the fsc is not applicable to this exception, 267 * since any syndrome is pcalignment not insn_abort. 268 */ 269 env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc); 270 raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el); 271 } 272 273 #if !defined(CONFIG_USER_ONLY) 274 275 /* 276 * arm_cpu_do_transaction_failed: handle a memory system error response 277 * (eg "no device/memory present at address") by raising an external abort 278 * exception 279 */ 280 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 281 vaddr addr, unsigned size, 282 MMUAccessType access_type, 283 int mmu_idx, MemTxAttrs attrs, 284 MemTxResult response, uintptr_t retaddr) 285 { 286 ARMCPU *cpu = ARM_CPU(cs); 287 ARMMMUFaultInfo fi = {}; 288 289 /* now we have a real cpu fault */ 290 cpu_restore_state(cs, retaddr); 291 292 fi.ea = arm_extabort_type(response); 293 fi.type = ARMFault_SyncExternal; 294 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); 295 } 296 297 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 298 MMUAccessType access_type, int mmu_idx, 299 bool probe, uintptr_t retaddr) 300 { 301 ARMCPU *cpu = ARM_CPU(cs); 302 GetPhysAddrResult res = {}; 303 ARMMMUFaultInfo local_fi, *fi; 304 int ret; 305 306 /* 307 * Allow S1_ptw_translate to see any fault generated here. 308 * Since this may recurse, read and clear. 309 */ 310 fi = cpu->env.tlb_fi; 311 if (fi) { 312 cpu->env.tlb_fi = NULL; 313 } else { 314 fi = memset(&local_fi, 0, sizeof(local_fi)); 315 } 316 317 /* 318 * Walk the page table and (if the mapping exists) add the page 319 * to the TLB. On success, return true. Otherwise, if probing, 320 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault 321 * register format, and signal the fault. 322 */ 323 ret = get_phys_addr(&cpu->env, address, access_type, 324 core_to_arm_mmu_idx(&cpu->env, mmu_idx), 325 &res, fi); 326 if (likely(!ret)) { 327 /* 328 * Map a single [sub]page. Regions smaller than our declared 329 * target page size are handled specially, so for those we 330 * pass in the exact addresses. 331 */ 332 if (res.f.lg_page_size >= TARGET_PAGE_BITS) { 333 res.f.phys_addr &= TARGET_PAGE_MASK; 334 address &= TARGET_PAGE_MASK; 335 } 336 337 res.f.pte_attrs = res.cacheattrs.attrs; 338 res.f.shareability = res.cacheattrs.shareability; 339 340 tlb_set_page_full(cs, mmu_idx, address, &res.f); 341 return true; 342 } else if (probe) { 343 return false; 344 } else { 345 /* now we have a real cpu fault */ 346 cpu_restore_state(cs, retaddr); 347 arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); 348 } 349 } 350 #else 351 void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr, 352 MMUAccessType access_type, 353 bool maperr, uintptr_t ra) 354 { 355 ARMMMUFaultInfo fi = { 356 .type = maperr ? ARMFault_Translation : ARMFault_Permission, 357 .level = 3, 358 }; 359 ARMCPU *cpu = ARM_CPU(cs); 360 361 /* 362 * We report both ESR and FAR to signal handlers. 363 * For now, it's easiest to deliver the fault normally. 364 */ 365 cpu_restore_state(cs, ra); 366 arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi); 367 } 368 369 void arm_cpu_record_sigbus(CPUState *cs, vaddr addr, 370 MMUAccessType access_type, uintptr_t ra) 371 { 372 arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra); 373 } 374 #endif /* !defined(CONFIG_USER_ONLY) */ 375