1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch TLB helpers 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/guest-random.h" 11 12 #include "cpu.h" 13 #include "internals.h" 14 #include "exec/helper-proto.h" 15 #include "exec/exec-all.h" 16 #include "exec/cpu_ldst.h" 17 #include "exec/log.h" 18 #include "cpu-csr.h" 19 20 enum { 21 TLBRET_MATCH = 0, 22 TLBRET_BADADDR = 1, 23 TLBRET_NOMATCH = 2, 24 TLBRET_INVALID = 3, 25 TLBRET_DIRTY = 4, 26 TLBRET_RI = 5, 27 TLBRET_XI = 6, 28 TLBRET_PE = 7, 29 }; 30 31 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, 32 int *prot, target_ulong address, 33 int access_type, int index, int mmu_idx) 34 { 35 LoongArchTLB *tlb = &env->tlb[index]; 36 uint64_t plv = mmu_idx; 37 uint64_t tlb_entry, tlb_ppn; 38 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; 39 40 if (index >= LOONGARCH_STLB) { 41 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 42 } else { 43 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 44 } 45 n = (address >> tlb_ps) & 0x1;/* Odd or even */ 46 47 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; 48 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); 49 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); 50 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); 51 if (is_la64(env)) { 52 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); 53 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); 54 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); 55 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); 56 } else { 57 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); 58 tlb_nx = 0; 59 tlb_nr = 0; 60 tlb_rplv = 0; 61 } 62 63 /* Check access rights */ 64 if (!tlb_v) { 65 return TLBRET_INVALID; 66 } 67 68 if (access_type == MMU_INST_FETCH && tlb_nx) { 69 return TLBRET_XI; 70 } 71 72 if (access_type == MMU_DATA_LOAD && tlb_nr) { 73 return TLBRET_RI; 74 } 75 76 if (((tlb_rplv == 0) && (plv > tlb_plv)) || 77 ((tlb_rplv == 1) && (plv != tlb_plv))) { 78 return TLBRET_PE; 79 } 80 81 if ((access_type == MMU_DATA_STORE) && !tlb_d) { 82 return TLBRET_DIRTY; 83 } 84 85 /* 86 * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15] 87 * need adjust. 88 */ 89 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | 90 (address & MAKE_64BIT_MASK(0, tlb_ps)); 91 *prot = PAGE_READ; 92 if (tlb_d) { 93 *prot |= PAGE_WRITE; 94 } 95 if (!tlb_nx) { 96 *prot |= PAGE_EXEC; 97 } 98 return TLBRET_MATCH; 99 } 100 101 /* 102 * One tlb entry holds an adjacent odd/even pair, the vpn is the 103 * content of the virtual page number divided by 2. So the 104 * compare vpn is bit[47:15] for 16KiB page. while the vppn 105 * field in tlb entry contains bit[47:13], so need adjust. 106 * virt_vpn = vaddr[47:13] 107 */ 108 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, 109 int *index) 110 { 111 LoongArchTLB *tlb; 112 uint16_t csr_asid, tlb_asid, stlb_idx; 113 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; 114 int i, compare_shift; 115 uint64_t vpn, tlb_vppn; 116 117 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 118 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 119 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); 120 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ 121 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 122 123 /* Search STLB */ 124 for (i = 0; i < 8; ++i) { 125 tlb = &env->tlb[i * 256 + stlb_idx]; 126 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 127 if (tlb_e) { 128 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 129 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 130 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 131 132 if ((tlb_g == 1 || tlb_asid == csr_asid) && 133 (vpn == (tlb_vppn >> compare_shift))) { 134 *index = i * 256 + stlb_idx; 135 return true; 136 } 137 } 138 } 139 140 /* Search MTLB */ 141 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { 142 tlb = &env->tlb[i]; 143 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 144 if (tlb_e) { 145 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 146 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 147 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 148 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 149 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 150 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 151 if ((tlb_g == 1 || tlb_asid == csr_asid) && 152 (vpn == (tlb_vppn >> compare_shift))) { 153 *index = i; 154 return true; 155 } 156 } 157 } 158 return false; 159 } 160 161 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 162 int *prot, target_ulong address, 163 MMUAccessType access_type, int mmu_idx) 164 { 165 int index, match; 166 167 match = loongarch_tlb_search(env, address, &index); 168 if (match) { 169 return loongarch_map_tlb_entry(env, physical, prot, 170 address, access_type, index, mmu_idx); 171 } 172 173 return TLBRET_NOMATCH; 174 } 175 176 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, 177 target_ulong dmw) 178 { 179 if (is_la64(env)) { 180 return va & TARGET_VIRT_MASK; 181 } else { 182 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); 183 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ 184 (pseg << R_CSR_DMW_32_VSEG_SHIFT); 185 } 186 } 187 188 static int get_physical_address(CPULoongArchState *env, hwaddr *physical, 189 int *prot, target_ulong address, 190 MMUAccessType access_type, int mmu_idx) 191 { 192 int user_mode = mmu_idx == MMU_IDX_USER; 193 int kernel_mode = mmu_idx == MMU_IDX_KERNEL; 194 uint32_t plv, base_c, base_v; 195 int64_t addr_high; 196 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); 197 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); 198 199 /* Check PG and DA */ 200 if (da & !pg) { 201 *physical = address & TARGET_PHYS_MASK; 202 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 203 return TLBRET_MATCH; 204 } 205 206 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); 207 if (is_la64(env)) { 208 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; 209 } else { 210 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; 211 } 212 /* Check direct map window */ 213 for (int i = 0; i < 4; i++) { 214 if (is_la64(env)) { 215 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); 216 } else { 217 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); 218 } 219 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { 220 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); 221 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 222 return TLBRET_MATCH; 223 } 224 } 225 226 /* Check valid extension */ 227 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); 228 if (!(addr_high == 0 || addr_high == -1)) { 229 return TLBRET_BADADDR; 230 } 231 232 /* Mapped address */ 233 return loongarch_map_address(env, physical, prot, address, 234 access_type, mmu_idx); 235 } 236 237 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 238 { 239 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 240 CPULoongArchState *env = &cpu->env; 241 hwaddr phys_addr; 242 int prot; 243 244 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, 245 cpu_mmu_index(env, false)) != 0) { 246 return -1; 247 } 248 return phys_addr; 249 } 250 251 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, 252 MMUAccessType access_type, int tlb_error) 253 { 254 CPUState *cs = env_cpu(env); 255 256 switch (tlb_error) { 257 default: 258 case TLBRET_BADADDR: 259 cs->exception_index = access_type == MMU_INST_FETCH 260 ? EXCCODE_ADEF : EXCCODE_ADEM; 261 break; 262 case TLBRET_NOMATCH: 263 /* No TLB match for a mapped address */ 264 if (access_type == MMU_DATA_LOAD) { 265 cs->exception_index = EXCCODE_PIL; 266 } else if (access_type == MMU_DATA_STORE) { 267 cs->exception_index = EXCCODE_PIS; 268 } else if (access_type == MMU_INST_FETCH) { 269 cs->exception_index = EXCCODE_PIF; 270 } 271 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); 272 break; 273 case TLBRET_INVALID: 274 /* TLB match with no valid bit */ 275 if (access_type == MMU_DATA_LOAD) { 276 cs->exception_index = EXCCODE_PIL; 277 } else if (access_type == MMU_DATA_STORE) { 278 cs->exception_index = EXCCODE_PIS; 279 } else if (access_type == MMU_INST_FETCH) { 280 cs->exception_index = EXCCODE_PIF; 281 } 282 break; 283 case TLBRET_DIRTY: 284 /* TLB match but 'D' bit is cleared */ 285 cs->exception_index = EXCCODE_PME; 286 break; 287 case TLBRET_XI: 288 /* Execute-Inhibit Exception */ 289 cs->exception_index = EXCCODE_PNX; 290 break; 291 case TLBRET_RI: 292 /* Read-Inhibit Exception */ 293 cs->exception_index = EXCCODE_PNR; 294 break; 295 case TLBRET_PE: 296 /* Privileged Exception */ 297 cs->exception_index = EXCCODE_PPI; 298 break; 299 } 300 301 if (tlb_error == TLBRET_NOMATCH) { 302 env->CSR_TLBRBADV = address; 303 if (is_la64(env)) { 304 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64, 305 VPPN, extract64(address, 13, 35)); 306 } else { 307 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32, 308 VPPN, extract64(address, 13, 19)); 309 } 310 } else { 311 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 312 env->CSR_BADV = address; 313 } 314 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); 315 } 316 } 317 318 static void invalidate_tlb_entry(CPULoongArchState *env, int index) 319 { 320 target_ulong addr, mask, pagesize; 321 uint8_t tlb_ps; 322 LoongArchTLB *tlb = &env->tlb[index]; 323 324 int mmu_idx = cpu_mmu_index(env, false); 325 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); 326 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); 327 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 328 329 if (index >= LOONGARCH_STLB) { 330 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 331 } else { 332 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 333 } 334 pagesize = MAKE_64BIT_MASK(tlb_ps, 1); 335 mask = MAKE_64BIT_MASK(0, tlb_ps + 1); 336 337 if (tlb_v0) { 338 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ 339 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 340 mmu_idx, TARGET_LONG_BITS); 341 } 342 343 if (tlb_v1) { 344 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ 345 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 346 mmu_idx, TARGET_LONG_BITS); 347 } 348 } 349 350 static void invalidate_tlb(CPULoongArchState *env, int index) 351 { 352 LoongArchTLB *tlb; 353 uint16_t csr_asid, tlb_asid, tlb_g; 354 355 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 356 tlb = &env->tlb[index]; 357 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 358 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 359 if (tlb_g == 0 && tlb_asid != csr_asid) { 360 return; 361 } 362 invalidate_tlb_entry(env, index); 363 } 364 365 static void fill_tlb_entry(CPULoongArchState *env, int index) 366 { 367 LoongArchTLB *tlb = &env->tlb[index]; 368 uint64_t lo0, lo1, csr_vppn; 369 uint16_t csr_asid; 370 uint8_t csr_ps; 371 372 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 373 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 374 if (is_la64(env)) { 375 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN); 376 } else { 377 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN); 378 } 379 lo0 = env->CSR_TLBRELO0; 380 lo1 = env->CSR_TLBRELO1; 381 } else { 382 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 383 if (is_la64(env)) { 384 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN); 385 } else { 386 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN); 387 } 388 lo0 = env->CSR_TLBELO0; 389 lo1 = env->CSR_TLBELO1; 390 } 391 392 if (csr_ps == 0) { 393 qemu_log_mask(CPU_LOG_MMU, "page size is 0\n"); 394 } 395 396 /* Only MTLB has the ps fields */ 397 if (index >= LOONGARCH_STLB) { 398 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); 399 } 400 401 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); 402 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); 403 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 404 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); 405 406 tlb->tlb_entry0 = lo0; 407 tlb->tlb_entry1 = lo1; 408 } 409 410 /* Return an random value between low and high */ 411 static uint32_t get_random_tlb(uint32_t low, uint32_t high) 412 { 413 uint32_t val; 414 415 qemu_guest_getrandom_nofail(&val, sizeof(val)); 416 return val % (high - low + 1) + low; 417 } 418 419 void helper_tlbsrch(CPULoongArchState *env) 420 { 421 int index, match; 422 423 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 424 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); 425 } else { 426 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); 427 } 428 429 if (match) { 430 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); 431 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 432 return; 433 } 434 435 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 436 } 437 438 void helper_tlbrd(CPULoongArchState *env) 439 { 440 LoongArchTLB *tlb; 441 int index; 442 uint8_t tlb_ps, tlb_e; 443 444 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 445 tlb = &env->tlb[index]; 446 447 if (index >= LOONGARCH_STLB) { 448 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 449 } else { 450 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 451 } 452 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 453 454 if (!tlb_e) { 455 /* Invalid TLB entry */ 456 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 457 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); 458 env->CSR_TLBEHI = 0; 459 env->CSR_TLBELO0 = 0; 460 env->CSR_TLBELO1 = 0; 461 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); 462 } else { 463 /* Valid TLB entry */ 464 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 465 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, 466 PS, (tlb_ps & 0x3f)); 467 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << 468 R_TLB_MISC_VPPN_SHIFT; 469 env->CSR_TLBELO0 = tlb->tlb_entry0; 470 env->CSR_TLBELO1 = tlb->tlb_entry1; 471 } 472 } 473 474 void helper_tlbwr(CPULoongArchState *env) 475 { 476 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 477 478 invalidate_tlb(env, index); 479 480 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { 481 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, 482 TLB_MISC, E, 0); 483 return; 484 } 485 486 fill_tlb_entry(env, index); 487 } 488 489 void helper_tlbfill(CPULoongArchState *env) 490 { 491 uint64_t address, entryhi; 492 int index, set, stlb_idx; 493 uint16_t pagesize, stlb_ps; 494 495 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 496 entryhi = env->CSR_TLBREHI; 497 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 498 } else { 499 entryhi = env->CSR_TLBEHI; 500 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 501 } 502 503 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 504 505 if (pagesize == stlb_ps) { 506 /* Only write into STLB bits [47:13] */ 507 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); 508 509 /* Choose one set ramdomly */ 510 set = get_random_tlb(0, 7); 511 512 /* Index in one set */ 513 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ 514 515 index = set * 256 + stlb_idx; 516 } else { 517 /* Only write into MTLB */ 518 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); 519 } 520 521 invalidate_tlb(env, index); 522 fill_tlb_entry(env, index); 523 } 524 525 void helper_tlbclr(CPULoongArchState *env) 526 { 527 LoongArchTLB *tlb; 528 int i, index; 529 uint16_t csr_asid, tlb_asid, tlb_g; 530 531 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 532 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 533 534 if (index < LOONGARCH_STLB) { 535 /* STLB. One line per operation */ 536 for (i = 0; i < 8; i++) { 537 tlb = &env->tlb[i * 256 + (index % 256)]; 538 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 539 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 540 if (!tlb_g && tlb_asid == csr_asid) { 541 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 542 } 543 } 544 } else if (index < LOONGARCH_TLB_MAX) { 545 /* All MTLB entries */ 546 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 547 tlb = &env->tlb[i]; 548 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 549 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 550 if (!tlb_g && tlb_asid == csr_asid) { 551 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 552 } 553 } 554 } 555 556 tlb_flush(env_cpu(env)); 557 } 558 559 void helper_tlbflush(CPULoongArchState *env) 560 { 561 int i, index; 562 563 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 564 565 if (index < LOONGARCH_STLB) { 566 /* STLB. One line per operation */ 567 for (i = 0; i < 8; i++) { 568 int s_idx = i * 256 + (index % 256); 569 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, 570 TLB_MISC, E, 0); 571 } 572 } else if (index < LOONGARCH_TLB_MAX) { 573 /* All MTLB entries */ 574 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 575 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 576 TLB_MISC, E, 0); 577 } 578 } 579 580 tlb_flush(env_cpu(env)); 581 } 582 583 void helper_invtlb_all(CPULoongArchState *env) 584 { 585 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 586 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 587 TLB_MISC, E, 0); 588 } 589 tlb_flush(env_cpu(env)); 590 } 591 592 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) 593 { 594 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 595 LoongArchTLB *tlb = &env->tlb[i]; 596 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 597 598 if (tlb_g == g) { 599 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 600 } 601 } 602 tlb_flush(env_cpu(env)); 603 } 604 605 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) 606 { 607 uint16_t asid = info & R_CSR_ASID_ASID_MASK; 608 609 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 610 LoongArchTLB *tlb = &env->tlb[i]; 611 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 612 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 613 614 if (!tlb_g && (tlb_asid == asid)) { 615 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 616 } 617 } 618 tlb_flush(env_cpu(env)); 619 } 620 621 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, 622 target_ulong addr) 623 { 624 uint16_t asid = info & 0x3ff; 625 626 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 627 LoongArchTLB *tlb = &env->tlb[i]; 628 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 629 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 630 uint64_t vpn, tlb_vppn; 631 uint8_t tlb_ps, compare_shift; 632 633 if (i >= LOONGARCH_STLB) { 634 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 635 } else { 636 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 637 } 638 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 639 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 640 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 641 642 if (!tlb_g && (tlb_asid == asid) && 643 (vpn == (tlb_vppn >> compare_shift))) { 644 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 645 } 646 } 647 tlb_flush(env_cpu(env)); 648 } 649 650 void helper_invtlb_page_asid_or_g(CPULoongArchState *env, 651 target_ulong info, target_ulong addr) 652 { 653 uint16_t asid = info & 0x3ff; 654 655 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 656 LoongArchTLB *tlb = &env->tlb[i]; 657 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 658 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 659 uint64_t vpn, tlb_vppn; 660 uint8_t tlb_ps, compare_shift; 661 662 if (i >= LOONGARCH_STLB) { 663 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 664 } else { 665 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 666 } 667 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 668 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 669 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 670 671 if ((tlb_g || (tlb_asid == asid)) && 672 (vpn == (tlb_vppn >> compare_shift))) { 673 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 674 } 675 } 676 tlb_flush(env_cpu(env)); 677 } 678 679 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 680 MMUAccessType access_type, int mmu_idx, 681 bool probe, uintptr_t retaddr) 682 { 683 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 684 CPULoongArchState *env = &cpu->env; 685 hwaddr physical; 686 int prot; 687 int ret; 688 689 /* Data access */ 690 ret = get_physical_address(env, &physical, &prot, address, 691 access_type, mmu_idx); 692 693 if (ret == TLBRET_MATCH) { 694 tlb_set_page(cs, address & TARGET_PAGE_MASK, 695 physical & TARGET_PAGE_MASK, prot, 696 mmu_idx, TARGET_PAGE_SIZE); 697 qemu_log_mask(CPU_LOG_MMU, 698 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx 699 " prot %d\n", __func__, address, physical, prot); 700 return true; 701 } else { 702 qemu_log_mask(CPU_LOG_MMU, 703 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, 704 ret); 705 } 706 if (probe) { 707 return false; 708 } 709 raise_mmu_exception(env, address, access_type, ret); 710 cpu_loop_exit_restore(cs, retaddr); 711 } 712 713 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, 714 target_ulong level, uint32_t mem_idx) 715 { 716 CPUState *cs = env_cpu(env); 717 target_ulong badvaddr, index, phys, ret; 718 int shift; 719 uint64_t dir_base, dir_width; 720 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; 721 722 badvaddr = env->CSR_TLBRBADV; 723 base = base & TARGET_PHYS_MASK; 724 725 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ 726 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); 727 shift = (shift + 1) * 3; 728 729 if (huge) { 730 return base; 731 } 732 switch (level) { 733 case 1: 734 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); 735 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); 736 break; 737 case 2: 738 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); 739 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); 740 break; 741 case 3: 742 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); 743 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); 744 break; 745 case 4: 746 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); 747 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); 748 break; 749 default: 750 do_raise_exception(env, EXCCODE_INE, GETPC()); 751 return 0; 752 } 753 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); 754 phys = base | index << shift; 755 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 756 return ret; 757 } 758 759 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, 760 uint32_t mem_idx) 761 { 762 CPUState *cs = env_cpu(env); 763 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; 764 int shift; 765 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; 766 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 767 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 768 769 base = base & TARGET_PHYS_MASK; 770 771 if (huge) { 772 /* Huge Page. base is paddr */ 773 tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT); 774 /* Move Global bit */ 775 tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >> 776 LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT | 777 (tmp0 & (~(1 << R_TLBENTRY_G_SHIFT))); 778 ps = ptbase + ptwidth - 1; 779 if (odd) { 780 tmp0 += MAKE_64BIT_MASK(ps, 1); 781 } 782 } else { 783 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ 784 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); 785 shift = (shift + 1) * 3; 786 badv = env->CSR_TLBRBADV; 787 788 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); 789 ptindex = ptindex & ~0x1; /* clear bit 0 */ 790 ptoffset0 = ptindex << shift; 791 ptoffset1 = (ptindex + 1) << shift; 792 793 phys = base | (odd ? ptoffset1 : ptoffset0); 794 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 795 ps = ptbase; 796 } 797 798 if (odd) { 799 env->CSR_TLBRELO1 = tmp0; 800 } else { 801 env->CSR_TLBRELO0 = tmp0; 802 } 803 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); 804 } 805