1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch TLB helpers 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/guest-random.h" 11 12 #include "cpu.h" 13 #include "internals.h" 14 #include "exec/helper-proto.h" 15 #include "exec/cputlb.h" 16 #include "exec/exec-all.h" 17 #include "exec/page-protection.h" 18 #include "exec/cpu_ldst.h" 19 #include "exec/log.h" 20 #include "cpu-csr.h" 21 22 void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, 23 uint64_t *dir_width, target_ulong level) 24 { 25 switch (level) { 26 case 1: 27 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); 28 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); 29 break; 30 case 2: 31 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); 32 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); 33 break; 34 case 3: 35 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); 36 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); 37 break; 38 case 4: 39 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); 40 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); 41 break; 42 default: 43 /* level may be zero for ldpte */ 44 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 45 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 46 break; 47 } 48 } 49 50 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, 51 MMUAccessType access_type, int tlb_error) 52 { 53 CPUState *cs = env_cpu(env); 54 55 switch (tlb_error) { 56 default: 57 case TLBRET_BADADDR: 58 cs->exception_index = access_type == MMU_INST_FETCH 59 ? EXCCODE_ADEF : EXCCODE_ADEM; 60 break; 61 case TLBRET_NOMATCH: 62 /* No TLB match for a mapped address */ 63 if (access_type == MMU_DATA_LOAD) { 64 cs->exception_index = EXCCODE_PIL; 65 } else if (access_type == MMU_DATA_STORE) { 66 cs->exception_index = EXCCODE_PIS; 67 } else if (access_type == MMU_INST_FETCH) { 68 cs->exception_index = EXCCODE_PIF; 69 } 70 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); 71 break; 72 case TLBRET_INVALID: 73 /* TLB match with no valid bit */ 74 if (access_type == MMU_DATA_LOAD) { 75 cs->exception_index = EXCCODE_PIL; 76 } else if (access_type == MMU_DATA_STORE) { 77 cs->exception_index = EXCCODE_PIS; 78 } else if (access_type == MMU_INST_FETCH) { 79 cs->exception_index = EXCCODE_PIF; 80 } 81 break; 82 case TLBRET_DIRTY: 83 /* TLB match but 'D' bit is cleared */ 84 cs->exception_index = EXCCODE_PME; 85 break; 86 case TLBRET_XI: 87 /* Execute-Inhibit Exception */ 88 cs->exception_index = EXCCODE_PNX; 89 break; 90 case TLBRET_RI: 91 /* Read-Inhibit Exception */ 92 cs->exception_index = EXCCODE_PNR; 93 break; 94 case TLBRET_PE: 95 /* Privileged Exception */ 96 cs->exception_index = EXCCODE_PPI; 97 break; 98 } 99 100 if (tlb_error == TLBRET_NOMATCH) { 101 env->CSR_TLBRBADV = address; 102 if (is_la64(env)) { 103 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64, 104 VPPN, extract64(address, 13, 35)); 105 } else { 106 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32, 107 VPPN, extract64(address, 13, 19)); 108 } 109 } else { 110 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 111 env->CSR_BADV = address; 112 } 113 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); 114 } 115 } 116 117 static void invalidate_tlb_entry(CPULoongArchState *env, int index) 118 { 119 target_ulong addr, mask, pagesize; 120 uint8_t tlb_ps; 121 LoongArchTLB *tlb = &env->tlb[index]; 122 123 int mmu_idx = cpu_mmu_index(env_cpu(env), false); 124 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); 125 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); 126 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 127 128 if (index >= LOONGARCH_STLB) { 129 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 130 } else { 131 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 132 } 133 pagesize = MAKE_64BIT_MASK(tlb_ps, 1); 134 mask = MAKE_64BIT_MASK(0, tlb_ps + 1); 135 136 if (tlb_v0) { 137 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ 138 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 139 mmu_idx, TARGET_LONG_BITS); 140 } 141 142 if (tlb_v1) { 143 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ 144 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 145 mmu_idx, TARGET_LONG_BITS); 146 } 147 } 148 149 static void invalidate_tlb(CPULoongArchState *env, int index) 150 { 151 LoongArchTLB *tlb; 152 uint16_t csr_asid, tlb_asid, tlb_g; 153 154 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 155 tlb = &env->tlb[index]; 156 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 157 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 158 if (tlb_g == 0 && tlb_asid != csr_asid) { 159 return; 160 } 161 invalidate_tlb_entry(env, index); 162 } 163 164 static void fill_tlb_entry(CPULoongArchState *env, int index) 165 { 166 LoongArchTLB *tlb = &env->tlb[index]; 167 uint64_t lo0, lo1, csr_vppn; 168 uint16_t csr_asid; 169 uint8_t csr_ps; 170 171 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 172 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 173 if (is_la64(env)) { 174 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN); 175 } else { 176 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN); 177 } 178 lo0 = env->CSR_TLBRELO0; 179 lo1 = env->CSR_TLBRELO1; 180 } else { 181 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 182 if (is_la64(env)) { 183 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN); 184 } else { 185 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN); 186 } 187 lo0 = env->CSR_TLBELO0; 188 lo1 = env->CSR_TLBELO1; 189 } 190 191 if (csr_ps == 0) { 192 qemu_log_mask(CPU_LOG_MMU, "page size is 0\n"); 193 } 194 195 /* Only MTLB has the ps fields */ 196 if (index >= LOONGARCH_STLB) { 197 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); 198 } 199 200 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); 201 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); 202 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 203 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); 204 205 tlb->tlb_entry0 = lo0; 206 tlb->tlb_entry1 = lo1; 207 } 208 209 /* Return an random value between low and high */ 210 static uint32_t get_random_tlb(uint32_t low, uint32_t high) 211 { 212 uint32_t val; 213 214 qemu_guest_getrandom_nofail(&val, sizeof(val)); 215 return val % (high - low + 1) + low; 216 } 217 218 void helper_tlbsrch(CPULoongArchState *env) 219 { 220 int index, match; 221 222 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 223 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); 224 } else { 225 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); 226 } 227 228 if (match) { 229 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); 230 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 231 return; 232 } 233 234 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 235 } 236 237 void helper_tlbrd(CPULoongArchState *env) 238 { 239 LoongArchTLB *tlb; 240 int index; 241 uint8_t tlb_ps, tlb_e; 242 243 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 244 tlb = &env->tlb[index]; 245 246 if (index >= LOONGARCH_STLB) { 247 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 248 } else { 249 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 250 } 251 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 252 253 if (!tlb_e) { 254 /* Invalid TLB entry */ 255 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 256 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); 257 env->CSR_TLBEHI = 0; 258 env->CSR_TLBELO0 = 0; 259 env->CSR_TLBELO1 = 0; 260 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); 261 } else { 262 /* Valid TLB entry */ 263 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 264 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, 265 PS, (tlb_ps & 0x3f)); 266 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << 267 R_TLB_MISC_VPPN_SHIFT; 268 env->CSR_TLBELO0 = tlb->tlb_entry0; 269 env->CSR_TLBELO1 = tlb->tlb_entry1; 270 } 271 } 272 273 void helper_tlbwr(CPULoongArchState *env) 274 { 275 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 276 277 invalidate_tlb(env, index); 278 279 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { 280 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, 281 TLB_MISC, E, 0); 282 return; 283 } 284 285 fill_tlb_entry(env, index); 286 } 287 288 void helper_tlbfill(CPULoongArchState *env) 289 { 290 uint64_t address, entryhi; 291 int index, set, stlb_idx; 292 uint16_t pagesize, stlb_ps; 293 294 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 295 entryhi = env->CSR_TLBREHI; 296 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 297 } else { 298 entryhi = env->CSR_TLBEHI; 299 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 300 } 301 302 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 303 304 if (pagesize == stlb_ps) { 305 /* Only write into STLB bits [47:13] */ 306 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); 307 308 /* Choose one set ramdomly */ 309 set = get_random_tlb(0, 7); 310 311 /* Index in one set */ 312 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ 313 314 index = set * 256 + stlb_idx; 315 } else { 316 /* Only write into MTLB */ 317 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); 318 } 319 320 invalidate_tlb(env, index); 321 fill_tlb_entry(env, index); 322 } 323 324 void helper_tlbclr(CPULoongArchState *env) 325 { 326 LoongArchTLB *tlb; 327 int i, index; 328 uint16_t csr_asid, tlb_asid, tlb_g; 329 330 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 331 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 332 333 if (index < LOONGARCH_STLB) { 334 /* STLB. One line per operation */ 335 for (i = 0; i < 8; i++) { 336 tlb = &env->tlb[i * 256 + (index % 256)]; 337 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 338 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 339 if (!tlb_g && tlb_asid == csr_asid) { 340 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 341 } 342 } 343 } else if (index < LOONGARCH_TLB_MAX) { 344 /* All MTLB entries */ 345 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 346 tlb = &env->tlb[i]; 347 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 348 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 349 if (!tlb_g && tlb_asid == csr_asid) { 350 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 351 } 352 } 353 } 354 355 tlb_flush(env_cpu(env)); 356 } 357 358 void helper_tlbflush(CPULoongArchState *env) 359 { 360 int i, index; 361 362 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 363 364 if (index < LOONGARCH_STLB) { 365 /* STLB. One line per operation */ 366 for (i = 0; i < 8; i++) { 367 int s_idx = i * 256 + (index % 256); 368 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, 369 TLB_MISC, E, 0); 370 } 371 } else if (index < LOONGARCH_TLB_MAX) { 372 /* All MTLB entries */ 373 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 374 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 375 TLB_MISC, E, 0); 376 } 377 } 378 379 tlb_flush(env_cpu(env)); 380 } 381 382 void helper_invtlb_all(CPULoongArchState *env) 383 { 384 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 385 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 386 TLB_MISC, E, 0); 387 } 388 tlb_flush(env_cpu(env)); 389 } 390 391 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) 392 { 393 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 394 LoongArchTLB *tlb = &env->tlb[i]; 395 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 396 397 if (tlb_g == g) { 398 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 399 } 400 } 401 tlb_flush(env_cpu(env)); 402 } 403 404 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) 405 { 406 uint16_t asid = info & R_CSR_ASID_ASID_MASK; 407 408 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 409 LoongArchTLB *tlb = &env->tlb[i]; 410 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 411 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 412 413 if (!tlb_g && (tlb_asid == asid)) { 414 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 415 } 416 } 417 tlb_flush(env_cpu(env)); 418 } 419 420 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, 421 target_ulong addr) 422 { 423 uint16_t asid = info & 0x3ff; 424 425 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 426 LoongArchTLB *tlb = &env->tlb[i]; 427 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 428 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 429 uint64_t vpn, tlb_vppn; 430 uint8_t tlb_ps, compare_shift; 431 432 if (i >= LOONGARCH_STLB) { 433 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 434 } else { 435 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 436 } 437 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 438 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 439 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 440 441 if (!tlb_g && (tlb_asid == asid) && 442 (vpn == (tlb_vppn >> compare_shift))) { 443 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 444 } 445 } 446 tlb_flush(env_cpu(env)); 447 } 448 449 void helper_invtlb_page_asid_or_g(CPULoongArchState *env, 450 target_ulong info, target_ulong addr) 451 { 452 uint16_t asid = info & 0x3ff; 453 454 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 455 LoongArchTLB *tlb = &env->tlb[i]; 456 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 457 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 458 uint64_t vpn, tlb_vppn; 459 uint8_t tlb_ps, compare_shift; 460 461 if (i >= LOONGARCH_STLB) { 462 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 463 } else { 464 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 465 } 466 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 467 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 468 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 469 470 if ((tlb_g || (tlb_asid == asid)) && 471 (vpn == (tlb_vppn >> compare_shift))) { 472 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 473 } 474 } 475 tlb_flush(env_cpu(env)); 476 } 477 478 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 479 MMUAccessType access_type, int mmu_idx, 480 bool probe, uintptr_t retaddr) 481 { 482 CPULoongArchState *env = cpu_env(cs); 483 hwaddr physical; 484 int prot; 485 int ret; 486 487 /* Data access */ 488 ret = get_physical_address(env, &physical, &prot, address, 489 access_type, mmu_idx, 0); 490 491 if (ret == TLBRET_MATCH) { 492 tlb_set_page(cs, address & TARGET_PAGE_MASK, 493 physical & TARGET_PAGE_MASK, prot, 494 mmu_idx, TARGET_PAGE_SIZE); 495 qemu_log_mask(CPU_LOG_MMU, 496 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx 497 " prot %d\n", __func__, address, physical, prot); 498 return true; 499 } else { 500 qemu_log_mask(CPU_LOG_MMU, 501 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, 502 ret); 503 } 504 if (probe) { 505 return false; 506 } 507 raise_mmu_exception(env, address, access_type, ret); 508 cpu_loop_exit_restore(cs, retaddr); 509 } 510 511 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, 512 target_ulong level, uint32_t mem_idx) 513 { 514 CPUState *cs = env_cpu(env); 515 target_ulong badvaddr, index, phys, ret; 516 uint64_t dir_base, dir_width; 517 518 if (unlikely((level == 0) || (level > 4))) { 519 qemu_log_mask(LOG_GUEST_ERROR, 520 "Attepted LDDIR with level %"PRId64"\n", level); 521 return base; 522 } 523 524 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 525 if (unlikely(level == 4)) { 526 qemu_log_mask(LOG_GUEST_ERROR, 527 "Attempted use of level 4 huge page\n"); 528 return base; 529 } 530 531 if (FIELD_EX64(base, TLBENTRY, LEVEL)) { 532 return base; 533 } else { 534 return FIELD_DP64(base, TLBENTRY, LEVEL, level); 535 } 536 } 537 538 badvaddr = env->CSR_TLBRBADV; 539 base = base & TARGET_PHYS_MASK; 540 get_dir_base_width(env, &dir_base, &dir_width, level); 541 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); 542 phys = base | index << 3; 543 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 544 return ret; 545 } 546 547 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, 548 uint32_t mem_idx) 549 { 550 CPUState *cs = env_cpu(env); 551 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; 552 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 553 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 554 uint64_t dir_base, dir_width; 555 556 /* 557 * The parameter "base" has only two types, 558 * one is the page table base address, 559 * whose bit 6 should be 0, 560 * and the other is the huge page entry, 561 * whose bit 6 should be 1. 562 */ 563 base = base & TARGET_PHYS_MASK; 564 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 565 /* 566 * Gets the huge page level and Gets huge page size. 567 * Clears the huge page level information in the entry. 568 * Clears huge page bit. 569 * Move HGLOBAL bit to GLOBAL bit. 570 */ 571 get_dir_base_width(env, &dir_base, &dir_width, 572 FIELD_EX64(base, TLBENTRY, LEVEL)); 573 574 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); 575 base = FIELD_DP64(base, TLBENTRY, HUGE, 0); 576 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { 577 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); 578 base = FIELD_DP64(base, TLBENTRY, G, 1); 579 } 580 581 ps = dir_base + dir_width - 1; 582 /* 583 * Huge pages are evenly split into parity pages 584 * when loaded into the tlb, 585 * so the tlb page size needs to be divided by 2. 586 */ 587 tmp0 = base; 588 if (odd) { 589 tmp0 += MAKE_64BIT_MASK(ps, 1); 590 } 591 } else { 592 badv = env->CSR_TLBRBADV; 593 594 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); 595 ptindex = ptindex & ~0x1; /* clear bit 0 */ 596 ptoffset0 = ptindex << 3; 597 ptoffset1 = (ptindex + 1) << 3; 598 phys = base | (odd ? ptoffset1 : ptoffset0); 599 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 600 ps = ptbase; 601 } 602 603 if (odd) { 604 env->CSR_TLBRELO1 = tmp0; 605 } else { 606 env->CSR_TLBRELO0 = tmp0; 607 } 608 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); 609 } 610