1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * LoongArch CPU helpers for qemu 4 * 5 * Copyright (c) 2024 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpu-csr.h" 13 14 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, 15 int *prot, target_ulong address, 16 int access_type, int index, int mmu_idx) 17 { 18 LoongArchTLB *tlb = &env->tlb[index]; 19 uint64_t plv = mmu_idx; 20 uint64_t tlb_entry, tlb_ppn; 21 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; 22 23 if (index >= LOONGARCH_STLB) { 24 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 25 } else { 26 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 27 } 28 n = (address >> tlb_ps) & 0x1;/* Odd or even */ 29 30 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; 31 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); 32 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); 33 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); 34 if (is_la64(env)) { 35 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); 36 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); 37 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); 38 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); 39 } else { 40 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); 41 tlb_nx = 0; 42 tlb_nr = 0; 43 tlb_rplv = 0; 44 } 45 46 /* Remove sw bit between bit12 -- bit PS*/ 47 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); 48 49 /* Check access rights */ 50 if (!tlb_v) { 51 return TLBRET_INVALID; 52 } 53 54 if (access_type == MMU_INST_FETCH && tlb_nx) { 55 return TLBRET_XI; 56 } 57 58 if (access_type == MMU_DATA_LOAD && tlb_nr) { 59 return TLBRET_RI; 60 } 61 62 if (((tlb_rplv == 0) && (plv > tlb_plv)) || 63 ((tlb_rplv == 1) && (plv != tlb_plv))) { 64 return TLBRET_PE; 65 } 66 67 if ((access_type == MMU_DATA_STORE) && !tlb_d) { 68 return TLBRET_DIRTY; 69 } 70 71 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | 72 (address & MAKE_64BIT_MASK(0, tlb_ps)); 73 *prot = PAGE_READ; 74 if (tlb_d) { 75 *prot |= PAGE_WRITE; 76 } 77 if (!tlb_nx) { 78 *prot |= PAGE_EXEC; 79 } 80 return TLBRET_MATCH; 81 } 82 83 /* 84 * One tlb entry holds an adjacent odd/even pair, the vpn is the 85 * content of the virtual page number divided by 2. So the 86 * compare vpn is bit[47:15] for 16KiB page. while the vppn 87 * field in tlb entry contains bit[47:13], so need adjust. 88 * virt_vpn = vaddr[47:13] 89 */ 90 bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, 91 int *index) 92 { 93 LoongArchTLB *tlb; 94 uint16_t csr_asid, tlb_asid, stlb_idx; 95 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; 96 int i, compare_shift; 97 uint64_t vpn, tlb_vppn; 98 99 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 100 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 101 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); 102 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ 103 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 104 105 /* Search STLB */ 106 for (i = 0; i < 8; ++i) { 107 tlb = &env->tlb[i * 256 + stlb_idx]; 108 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 109 if (tlb_e) { 110 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 111 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 112 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 113 114 if ((tlb_g == 1 || tlb_asid == csr_asid) && 115 (vpn == (tlb_vppn >> compare_shift))) { 116 *index = i * 256 + stlb_idx; 117 return true; 118 } 119 } 120 } 121 122 /* Search MTLB */ 123 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { 124 tlb = &env->tlb[i]; 125 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 126 if (tlb_e) { 127 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 128 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 129 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 130 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 131 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 132 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 133 if ((tlb_g == 1 || tlb_asid == csr_asid) && 134 (vpn == (tlb_vppn >> compare_shift))) { 135 *index = i; 136 return true; 137 } 138 } 139 } 140 return false; 141 } 142 143 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 144 int *prot, target_ulong address, 145 MMUAccessType access_type, int mmu_idx) 146 { 147 int index, match; 148 149 match = loongarch_tlb_search(env, address, &index); 150 if (match) { 151 return loongarch_map_tlb_entry(env, physical, prot, 152 address, access_type, index, mmu_idx); 153 } 154 155 return TLBRET_NOMATCH; 156 } 157 158 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, 159 target_ulong dmw) 160 { 161 if (is_la64(env)) { 162 return va & TARGET_VIRT_MASK; 163 } else { 164 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); 165 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ 166 (pseg << R_CSR_DMW_32_VSEG_SHIFT); 167 } 168 } 169 170 int get_physical_address(CPULoongArchState *env, hwaddr *physical, 171 int *prot, target_ulong address, 172 MMUAccessType access_type, int mmu_idx) 173 { 174 int user_mode = mmu_idx == MMU_IDX_USER; 175 int kernel_mode = mmu_idx == MMU_IDX_KERNEL; 176 uint32_t plv, base_c, base_v; 177 int64_t addr_high; 178 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); 179 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); 180 181 /* Check PG and DA */ 182 if (da & !pg) { 183 *physical = address & TARGET_PHYS_MASK; 184 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 185 return TLBRET_MATCH; 186 } 187 188 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); 189 if (is_la64(env)) { 190 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; 191 } else { 192 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; 193 } 194 /* Check direct map window */ 195 for (int i = 0; i < 4; i++) { 196 if (is_la64(env)) { 197 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); 198 } else { 199 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); 200 } 201 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { 202 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); 203 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 204 return TLBRET_MATCH; 205 } 206 } 207 208 /* Check valid extension */ 209 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); 210 if (!(addr_high == 0 || addr_high == -1)) { 211 return TLBRET_BADADDR; 212 } 213 214 /* Mapped address */ 215 return loongarch_map_address(env, physical, prot, address, 216 access_type, mmu_idx); 217 } 218 219 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 220 { 221 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 222 CPULoongArchState *env = &cpu->env; 223 hwaddr phys_addr; 224 int prot; 225 226 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, 227 cpu_mmu_index(env, false)) != 0) { 228 return -1; 229 } 230 return phys_addr; 231 } 232