1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch TLB helpers 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/guest-random.h" 11 12 #include "cpu.h" 13 #include "internals.h" 14 #include "exec/helper-proto.h" 15 #include "exec/exec-all.h" 16 #include "exec/cpu_ldst.h" 17 #include "exec/log.h" 18 #include "cpu-csr.h" 19 20 enum { 21 TLBRET_MATCH = 0, 22 TLBRET_BADADDR = 1, 23 TLBRET_NOMATCH = 2, 24 TLBRET_INVALID = 3, 25 TLBRET_DIRTY = 4, 26 TLBRET_RI = 5, 27 TLBRET_XI = 6, 28 TLBRET_PE = 7, 29 }; 30 31 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, 32 int *prot, target_ulong address, 33 int access_type, int index, int mmu_idx) 34 { 35 LoongArchTLB *tlb = &env->tlb[index]; 36 uint64_t plv = mmu_idx; 37 uint64_t tlb_entry, tlb_ppn; 38 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; 39 40 if (index >= LOONGARCH_STLB) { 41 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 42 } else { 43 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 44 } 45 n = (address >> tlb_ps) & 0x1;/* Odd or even */ 46 47 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; 48 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); 49 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); 50 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); 51 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY, PPN); 52 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY, NX); 53 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY, NR); 54 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY, RPLV); 55 56 /* Check access rights */ 57 if (!tlb_v) { 58 return TLBRET_INVALID; 59 } 60 61 if (access_type == MMU_INST_FETCH && tlb_nx) { 62 return TLBRET_XI; 63 } 64 65 if (access_type == MMU_DATA_LOAD && tlb_nr) { 66 return TLBRET_RI; 67 } 68 69 if (((tlb_rplv == 0) && (plv > tlb_plv)) || 70 ((tlb_rplv == 1) && (plv != tlb_plv))) { 71 return TLBRET_PE; 72 } 73 74 if ((access_type == MMU_DATA_STORE) && !tlb_d) { 75 return TLBRET_DIRTY; 76 } 77 78 /* 79 * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15] 80 * need adjust. 81 */ 82 *physical = (tlb_ppn << R_TLBENTRY_PPN_SHIFT) | 83 (address & MAKE_64BIT_MASK(0, tlb_ps)); 84 *prot = PAGE_READ; 85 if (tlb_d) { 86 *prot |= PAGE_WRITE; 87 } 88 if (!tlb_nx) { 89 *prot |= PAGE_EXEC; 90 } 91 return TLBRET_MATCH; 92 } 93 94 /* 95 * One tlb entry holds an adjacent odd/even pair, the vpn is the 96 * content of the virtual page number divided by 2. So the 97 * compare vpn is bit[47:15] for 16KiB page. while the vppn 98 * field in tlb entry contains bit[47:13], so need adjust. 99 * virt_vpn = vaddr[47:13] 100 */ 101 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, 102 int *index) 103 { 104 LoongArchTLB *tlb; 105 uint16_t csr_asid, tlb_asid, stlb_idx; 106 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; 107 int i, compare_shift; 108 uint64_t vpn, tlb_vppn; 109 110 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 111 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 112 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); 113 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ 114 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 115 116 /* Search STLB */ 117 for (i = 0; i < 8; ++i) { 118 tlb = &env->tlb[i * 256 + stlb_idx]; 119 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 120 if (tlb_e) { 121 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 122 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 123 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 124 125 if ((tlb_g == 1 || tlb_asid == csr_asid) && 126 (vpn == (tlb_vppn >> compare_shift))) { 127 *index = i * 256 + stlb_idx; 128 return true; 129 } 130 } 131 } 132 133 /* Search MTLB */ 134 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { 135 tlb = &env->tlb[i]; 136 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 137 if (tlb_e) { 138 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 139 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 140 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 141 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 142 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 143 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 144 if ((tlb_g == 1 || tlb_asid == csr_asid) && 145 (vpn == (tlb_vppn >> compare_shift))) { 146 *index = i; 147 return true; 148 } 149 } 150 } 151 return false; 152 } 153 154 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 155 int *prot, target_ulong address, 156 MMUAccessType access_type, int mmu_idx) 157 { 158 int index, match; 159 160 match = loongarch_tlb_search(env, address, &index); 161 if (match) { 162 return loongarch_map_tlb_entry(env, physical, prot, 163 address, access_type, index, mmu_idx); 164 } 165 166 return TLBRET_NOMATCH; 167 } 168 169 static int get_physical_address(CPULoongArchState *env, hwaddr *physical, 170 int *prot, target_ulong address, 171 MMUAccessType access_type, int mmu_idx) 172 { 173 int user_mode = mmu_idx == MMU_IDX_USER; 174 int kernel_mode = mmu_idx == MMU_IDX_KERNEL; 175 uint32_t plv, base_c, base_v; 176 int64_t addr_high; 177 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); 178 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); 179 180 /* Check PG and DA */ 181 if (da & !pg) { 182 *physical = address & TARGET_PHYS_MASK; 183 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 184 return TLBRET_MATCH; 185 } 186 187 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); 188 base_v = address >> R_CSR_DMW_VSEG_SHIFT; 189 /* Check direct map window */ 190 for (int i = 0; i < 4; i++) { 191 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW, VSEG); 192 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { 193 *physical = dmw_va2pa(address); 194 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 195 return TLBRET_MATCH; 196 } 197 } 198 199 /* Check valid extension */ 200 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); 201 if (!(addr_high == 0 || addr_high == -1)) { 202 return TLBRET_BADADDR; 203 } 204 205 /* Mapped address */ 206 return loongarch_map_address(env, physical, prot, address, 207 access_type, mmu_idx); 208 } 209 210 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 211 { 212 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 213 CPULoongArchState *env = &cpu->env; 214 hwaddr phys_addr; 215 int prot; 216 217 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, 218 cpu_mmu_index(env, false)) != 0) { 219 return -1; 220 } 221 return phys_addr; 222 } 223 224 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, 225 MMUAccessType access_type, int tlb_error) 226 { 227 CPUState *cs = env_cpu(env); 228 229 switch (tlb_error) { 230 default: 231 case TLBRET_BADADDR: 232 cs->exception_index = access_type == MMU_INST_FETCH 233 ? EXCCODE_ADEF : EXCCODE_ADEM; 234 break; 235 case TLBRET_NOMATCH: 236 /* No TLB match for a mapped address */ 237 if (access_type == MMU_DATA_LOAD) { 238 cs->exception_index = EXCCODE_PIL; 239 } else if (access_type == MMU_DATA_STORE) { 240 cs->exception_index = EXCCODE_PIS; 241 } else if (access_type == MMU_INST_FETCH) { 242 cs->exception_index = EXCCODE_PIF; 243 } 244 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); 245 break; 246 case TLBRET_INVALID: 247 /* TLB match with no valid bit */ 248 if (access_type == MMU_DATA_LOAD) { 249 cs->exception_index = EXCCODE_PIL; 250 } else if (access_type == MMU_DATA_STORE) { 251 cs->exception_index = EXCCODE_PIS; 252 } else if (access_type == MMU_INST_FETCH) { 253 cs->exception_index = EXCCODE_PIF; 254 } 255 break; 256 case TLBRET_DIRTY: 257 /* TLB match but 'D' bit is cleared */ 258 cs->exception_index = EXCCODE_PME; 259 break; 260 case TLBRET_XI: 261 /* Execute-Inhibit Exception */ 262 cs->exception_index = EXCCODE_PNX; 263 break; 264 case TLBRET_RI: 265 /* Read-Inhibit Exception */ 266 cs->exception_index = EXCCODE_PNR; 267 break; 268 case TLBRET_PE: 269 /* Privileged Exception */ 270 cs->exception_index = EXCCODE_PPI; 271 break; 272 } 273 274 if (tlb_error == TLBRET_NOMATCH) { 275 env->CSR_TLBRBADV = address; 276 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN, 277 extract64(address, 13, 35)); 278 } else { 279 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 280 env->CSR_BADV = address; 281 } 282 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); 283 } 284 } 285 286 static void invalidate_tlb_entry(CPULoongArchState *env, int index) 287 { 288 target_ulong addr, mask, pagesize; 289 uint8_t tlb_ps; 290 LoongArchTLB *tlb = &env->tlb[index]; 291 292 int mmu_idx = cpu_mmu_index(env, false); 293 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); 294 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); 295 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 296 297 if (index >= LOONGARCH_STLB) { 298 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 299 } else { 300 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 301 } 302 pagesize = MAKE_64BIT_MASK(tlb_ps, 1); 303 mask = MAKE_64BIT_MASK(0, tlb_ps + 1); 304 305 if (tlb_v0) { 306 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ 307 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 308 mmu_idx, TARGET_LONG_BITS); 309 } 310 311 if (tlb_v1) { 312 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ 313 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 314 mmu_idx, TARGET_LONG_BITS); 315 } 316 } 317 318 static void invalidate_tlb(CPULoongArchState *env, int index) 319 { 320 LoongArchTLB *tlb; 321 uint16_t csr_asid, tlb_asid, tlb_g; 322 323 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 324 tlb = &env->tlb[index]; 325 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 326 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 327 if (tlb_g == 0 && tlb_asid != csr_asid) { 328 return; 329 } 330 invalidate_tlb_entry(env, index); 331 } 332 333 static void fill_tlb_entry(CPULoongArchState *env, int index) 334 { 335 LoongArchTLB *tlb = &env->tlb[index]; 336 uint64_t lo0, lo1, csr_vppn; 337 uint16_t csr_asid; 338 uint8_t csr_ps; 339 340 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 341 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 342 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN); 343 lo0 = env->CSR_TLBRELO0; 344 lo1 = env->CSR_TLBRELO1; 345 } else { 346 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 347 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI, VPPN); 348 lo0 = env->CSR_TLBELO0; 349 lo1 = env->CSR_TLBELO1; 350 } 351 352 if (csr_ps == 0) { 353 qemu_log_mask(CPU_LOG_MMU, "page size is 0\n"); 354 } 355 356 /* Only MTLB has the ps fields */ 357 if (index >= LOONGARCH_STLB) { 358 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); 359 } 360 361 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); 362 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); 363 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 364 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); 365 366 tlb->tlb_entry0 = lo0; 367 tlb->tlb_entry1 = lo1; 368 } 369 370 /* Return an random value between low and high */ 371 static uint32_t get_random_tlb(uint32_t low, uint32_t high) 372 { 373 uint32_t val; 374 375 qemu_guest_getrandom_nofail(&val, sizeof(val)); 376 return val % (high - low + 1) + low; 377 } 378 379 void helper_tlbsrch(CPULoongArchState *env) 380 { 381 int index, match; 382 383 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 384 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); 385 } else { 386 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); 387 } 388 389 if (match) { 390 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); 391 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 392 return; 393 } 394 395 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 396 } 397 398 void helper_tlbrd(CPULoongArchState *env) 399 { 400 LoongArchTLB *tlb; 401 int index; 402 uint8_t tlb_ps, tlb_e; 403 404 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 405 tlb = &env->tlb[index]; 406 407 if (index >= LOONGARCH_STLB) { 408 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 409 } else { 410 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 411 } 412 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 413 414 if (!tlb_e) { 415 /* Invalid TLB entry */ 416 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 417 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); 418 env->CSR_TLBEHI = 0; 419 env->CSR_TLBELO0 = 0; 420 env->CSR_TLBELO1 = 0; 421 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); 422 } else { 423 /* Valid TLB entry */ 424 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 425 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, 426 PS, (tlb_ps & 0x3f)); 427 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << 428 R_TLB_MISC_VPPN_SHIFT; 429 env->CSR_TLBELO0 = tlb->tlb_entry0; 430 env->CSR_TLBELO1 = tlb->tlb_entry1; 431 } 432 } 433 434 void helper_tlbwr(CPULoongArchState *env) 435 { 436 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 437 438 invalidate_tlb(env, index); 439 440 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { 441 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, 442 TLB_MISC, E, 0); 443 return; 444 } 445 446 fill_tlb_entry(env, index); 447 } 448 449 void helper_tlbfill(CPULoongArchState *env) 450 { 451 uint64_t address, entryhi; 452 int index, set, stlb_idx; 453 uint16_t pagesize, stlb_ps; 454 455 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 456 entryhi = env->CSR_TLBREHI; 457 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 458 } else { 459 entryhi = env->CSR_TLBEHI; 460 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 461 } 462 463 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 464 465 if (pagesize == stlb_ps) { 466 /* Only write into STLB bits [47:13] */ 467 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_VPPN_SHIFT); 468 469 /* Choose one set ramdomly */ 470 set = get_random_tlb(0, 7); 471 472 /* Index in one set */ 473 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ 474 475 index = set * 256 + stlb_idx; 476 } else { 477 /* Only write into MTLB */ 478 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); 479 } 480 481 invalidate_tlb(env, index); 482 fill_tlb_entry(env, index); 483 } 484 485 void helper_tlbclr(CPULoongArchState *env) 486 { 487 LoongArchTLB *tlb; 488 int i, index; 489 uint16_t csr_asid, tlb_asid, tlb_g; 490 491 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 492 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 493 494 if (index < LOONGARCH_STLB) { 495 /* STLB. One line per operation */ 496 for (i = 0; i < 8; i++) { 497 tlb = &env->tlb[i * 256 + (index % 256)]; 498 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 499 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 500 if (!tlb_g && tlb_asid == csr_asid) { 501 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 502 } 503 } 504 } else if (index < LOONGARCH_TLB_MAX) { 505 /* All MTLB entries */ 506 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 507 tlb = &env->tlb[i]; 508 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 509 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 510 if (!tlb_g && tlb_asid == csr_asid) { 511 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 512 } 513 } 514 } 515 516 tlb_flush(env_cpu(env)); 517 } 518 519 void helper_tlbflush(CPULoongArchState *env) 520 { 521 int i, index; 522 523 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 524 525 if (index < LOONGARCH_STLB) { 526 /* STLB. One line per operation */ 527 for (i = 0; i < 8; i++) { 528 int s_idx = i * 256 + (index % 256); 529 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, 530 TLB_MISC, E, 0); 531 } 532 } else if (index < LOONGARCH_TLB_MAX) { 533 /* All MTLB entries */ 534 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 535 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 536 TLB_MISC, E, 0); 537 } 538 } 539 540 tlb_flush(env_cpu(env)); 541 } 542 543 void helper_invtlb_all(CPULoongArchState *env) 544 { 545 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 546 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 547 TLB_MISC, E, 0); 548 } 549 tlb_flush(env_cpu(env)); 550 } 551 552 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) 553 { 554 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 555 LoongArchTLB *tlb = &env->tlb[i]; 556 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 557 558 if (tlb_g == g) { 559 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 560 } 561 } 562 tlb_flush(env_cpu(env)); 563 } 564 565 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) 566 { 567 uint16_t asid = info & R_CSR_ASID_ASID_MASK; 568 569 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 570 LoongArchTLB *tlb = &env->tlb[i]; 571 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 572 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 573 574 if (!tlb_g && (tlb_asid == asid)) { 575 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 576 } 577 } 578 tlb_flush(env_cpu(env)); 579 } 580 581 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, 582 target_ulong addr) 583 { 584 uint16_t asid = info & 0x3ff; 585 586 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 587 LoongArchTLB *tlb = &env->tlb[i]; 588 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 589 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 590 uint64_t vpn, tlb_vppn; 591 uint8_t tlb_ps, compare_shift; 592 593 if (i >= LOONGARCH_STLB) { 594 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 595 } else { 596 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 597 } 598 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 599 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 600 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 601 602 if (!tlb_g && (tlb_asid == asid) && 603 (vpn == (tlb_vppn >> compare_shift))) { 604 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 605 } 606 } 607 tlb_flush(env_cpu(env)); 608 } 609 610 void helper_invtlb_page_asid_or_g(CPULoongArchState *env, 611 target_ulong info, target_ulong addr) 612 { 613 uint16_t asid = info & 0x3ff; 614 615 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 616 LoongArchTLB *tlb = &env->tlb[i]; 617 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 618 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 619 uint64_t vpn, tlb_vppn; 620 uint8_t tlb_ps, compare_shift; 621 622 if (i >= LOONGARCH_STLB) { 623 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 624 } else { 625 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 626 } 627 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 628 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 629 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 630 631 if ((tlb_g || (tlb_asid == asid)) && 632 (vpn == (tlb_vppn >> compare_shift))) { 633 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 634 } 635 } 636 tlb_flush(env_cpu(env)); 637 } 638 639 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 640 MMUAccessType access_type, int mmu_idx, 641 bool probe, uintptr_t retaddr) 642 { 643 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 644 CPULoongArchState *env = &cpu->env; 645 hwaddr physical; 646 int prot; 647 int ret; 648 649 /* Data access */ 650 ret = get_physical_address(env, &physical, &prot, address, 651 access_type, mmu_idx); 652 653 if (ret == TLBRET_MATCH) { 654 tlb_set_page(cs, address & TARGET_PAGE_MASK, 655 physical & TARGET_PAGE_MASK, prot, 656 mmu_idx, TARGET_PAGE_SIZE); 657 qemu_log_mask(CPU_LOG_MMU, 658 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx 659 " prot %d\n", __func__, address, physical, prot); 660 return true; 661 } else { 662 qemu_log_mask(CPU_LOG_MMU, 663 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, 664 ret); 665 } 666 if (probe) { 667 return false; 668 } 669 raise_mmu_exception(env, address, access_type, ret); 670 cpu_loop_exit_restore(cs, retaddr); 671 } 672 673 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, 674 target_ulong level, uint32_t mem_idx) 675 { 676 CPUState *cs = env_cpu(env); 677 target_ulong badvaddr, index, phys, ret; 678 int shift; 679 uint64_t dir_base, dir_width; 680 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; 681 682 badvaddr = env->CSR_TLBRBADV; 683 base = base & TARGET_PHYS_MASK; 684 685 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ 686 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); 687 shift = (shift + 1) * 3; 688 689 if (huge) { 690 return base; 691 } 692 switch (level) { 693 case 1: 694 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); 695 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); 696 break; 697 case 2: 698 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); 699 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); 700 break; 701 case 3: 702 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); 703 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); 704 break; 705 case 4: 706 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); 707 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); 708 break; 709 default: 710 do_raise_exception(env, EXCCODE_INE, GETPC()); 711 return 0; 712 } 713 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); 714 phys = base | index << shift; 715 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 716 return ret; 717 } 718 719 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, 720 uint32_t mem_idx) 721 { 722 CPUState *cs = env_cpu(env); 723 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; 724 int shift; 725 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; 726 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 727 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 728 729 base = base & TARGET_PHYS_MASK; 730 731 if (huge) { 732 /* Huge Page. base is paddr */ 733 tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT); 734 /* Move Global bit */ 735 tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >> 736 LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT | 737 (tmp0 & (~(1 << R_TLBENTRY_G_SHIFT))); 738 ps = ptbase + ptwidth - 1; 739 if (odd) { 740 tmp0 += MAKE_64BIT_MASK(ps, 1); 741 } 742 } else { 743 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ 744 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); 745 shift = (shift + 1) * 3; 746 badv = env->CSR_TLBRBADV; 747 748 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); 749 ptindex = ptindex & ~0x1; /* clear bit 0 */ 750 ptoffset0 = ptindex << shift; 751 ptoffset1 = (ptindex + 1) << shift; 752 753 phys = base | (odd ? ptoffset1 : ptoffset0); 754 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 755 ps = ptbase; 756 } 757 758 if (odd) { 759 env->CSR_TLBRELO1 = tmp0; 760 } else { 761 env->CSR_TLBRELO0 = tmp0; 762 } 763 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); 764 } 765