1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch TLB helpers 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/guest-random.h" 11 12 #include "cpu.h" 13 #include "internals.h" 14 #include "exec/helper-proto.h" 15 #include "exec/exec-all.h" 16 #include "exec/cpu_ldst.h" 17 #include "exec/log.h" 18 #include "cpu-csr.h" 19 20 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, 21 MMUAccessType access_type, int tlb_error) 22 { 23 CPUState *cs = env_cpu(env); 24 25 switch (tlb_error) { 26 default: 27 case TLBRET_BADADDR: 28 cs->exception_index = access_type == MMU_INST_FETCH 29 ? EXCCODE_ADEF : EXCCODE_ADEM; 30 break; 31 case TLBRET_NOMATCH: 32 /* No TLB match for a mapped address */ 33 if (access_type == MMU_DATA_LOAD) { 34 cs->exception_index = EXCCODE_PIL; 35 } else if (access_type == MMU_DATA_STORE) { 36 cs->exception_index = EXCCODE_PIS; 37 } else if (access_type == MMU_INST_FETCH) { 38 cs->exception_index = EXCCODE_PIF; 39 } 40 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); 41 break; 42 case TLBRET_INVALID: 43 /* TLB match with no valid bit */ 44 if (access_type == MMU_DATA_LOAD) { 45 cs->exception_index = EXCCODE_PIL; 46 } else if (access_type == MMU_DATA_STORE) { 47 cs->exception_index = EXCCODE_PIS; 48 } else if (access_type == MMU_INST_FETCH) { 49 cs->exception_index = EXCCODE_PIF; 50 } 51 break; 52 case TLBRET_DIRTY: 53 /* TLB match but 'D' bit is cleared */ 54 cs->exception_index = EXCCODE_PME; 55 break; 56 case TLBRET_XI: 57 /* Execute-Inhibit Exception */ 58 cs->exception_index = EXCCODE_PNX; 59 break; 60 case TLBRET_RI: 61 /* Read-Inhibit Exception */ 62 cs->exception_index = EXCCODE_PNR; 63 break; 64 case TLBRET_PE: 65 /* Privileged Exception */ 66 cs->exception_index = EXCCODE_PPI; 67 break; 68 } 69 70 if (tlb_error == TLBRET_NOMATCH) { 71 env->CSR_TLBRBADV = address; 72 if (is_la64(env)) { 73 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64, 74 VPPN, extract64(address, 13, 35)); 75 } else { 76 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32, 77 VPPN, extract64(address, 13, 19)); 78 } 79 } else { 80 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 81 env->CSR_BADV = address; 82 } 83 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); 84 } 85 } 86 87 static void invalidate_tlb_entry(CPULoongArchState *env, int index) 88 { 89 target_ulong addr, mask, pagesize; 90 uint8_t tlb_ps; 91 LoongArchTLB *tlb = &env->tlb[index]; 92 93 int mmu_idx = cpu_mmu_index(env_cpu(env), false); 94 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); 95 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); 96 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 97 98 if (index >= LOONGARCH_STLB) { 99 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 100 } else { 101 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 102 } 103 pagesize = MAKE_64BIT_MASK(tlb_ps, 1); 104 mask = MAKE_64BIT_MASK(0, tlb_ps + 1); 105 106 if (tlb_v0) { 107 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ 108 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 109 mmu_idx, TARGET_LONG_BITS); 110 } 111 112 if (tlb_v1) { 113 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ 114 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 115 mmu_idx, TARGET_LONG_BITS); 116 } 117 } 118 119 static void invalidate_tlb(CPULoongArchState *env, int index) 120 { 121 LoongArchTLB *tlb; 122 uint16_t csr_asid, tlb_asid, tlb_g; 123 124 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 125 tlb = &env->tlb[index]; 126 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 127 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 128 if (tlb_g == 0 && tlb_asid != csr_asid) { 129 return; 130 } 131 invalidate_tlb_entry(env, index); 132 } 133 134 static void fill_tlb_entry(CPULoongArchState *env, int index) 135 { 136 LoongArchTLB *tlb = &env->tlb[index]; 137 uint64_t lo0, lo1, csr_vppn; 138 uint16_t csr_asid; 139 uint8_t csr_ps; 140 141 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 142 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 143 if (is_la64(env)) { 144 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN); 145 } else { 146 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN); 147 } 148 lo0 = env->CSR_TLBRELO0; 149 lo1 = env->CSR_TLBRELO1; 150 } else { 151 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 152 if (is_la64(env)) { 153 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN); 154 } else { 155 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN); 156 } 157 lo0 = env->CSR_TLBELO0; 158 lo1 = env->CSR_TLBELO1; 159 } 160 161 if (csr_ps == 0) { 162 qemu_log_mask(CPU_LOG_MMU, "page size is 0\n"); 163 } 164 165 /* Only MTLB has the ps fields */ 166 if (index >= LOONGARCH_STLB) { 167 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); 168 } 169 170 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); 171 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); 172 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 173 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); 174 175 tlb->tlb_entry0 = lo0; 176 tlb->tlb_entry1 = lo1; 177 } 178 179 /* Return an random value between low and high */ 180 static uint32_t get_random_tlb(uint32_t low, uint32_t high) 181 { 182 uint32_t val; 183 184 qemu_guest_getrandom_nofail(&val, sizeof(val)); 185 return val % (high - low + 1) + low; 186 } 187 188 void helper_tlbsrch(CPULoongArchState *env) 189 { 190 int index, match; 191 192 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 193 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); 194 } else { 195 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); 196 } 197 198 if (match) { 199 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); 200 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 201 return; 202 } 203 204 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 205 } 206 207 void helper_tlbrd(CPULoongArchState *env) 208 { 209 LoongArchTLB *tlb; 210 int index; 211 uint8_t tlb_ps, tlb_e; 212 213 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 214 tlb = &env->tlb[index]; 215 216 if (index >= LOONGARCH_STLB) { 217 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 218 } else { 219 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 220 } 221 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 222 223 if (!tlb_e) { 224 /* Invalid TLB entry */ 225 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 226 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); 227 env->CSR_TLBEHI = 0; 228 env->CSR_TLBELO0 = 0; 229 env->CSR_TLBELO1 = 0; 230 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); 231 } else { 232 /* Valid TLB entry */ 233 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 234 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, 235 PS, (tlb_ps & 0x3f)); 236 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << 237 R_TLB_MISC_VPPN_SHIFT; 238 env->CSR_TLBELO0 = tlb->tlb_entry0; 239 env->CSR_TLBELO1 = tlb->tlb_entry1; 240 } 241 } 242 243 void helper_tlbwr(CPULoongArchState *env) 244 { 245 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 246 247 invalidate_tlb(env, index); 248 249 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { 250 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, 251 TLB_MISC, E, 0); 252 return; 253 } 254 255 fill_tlb_entry(env, index); 256 } 257 258 void helper_tlbfill(CPULoongArchState *env) 259 { 260 uint64_t address, entryhi; 261 int index, set, stlb_idx; 262 uint16_t pagesize, stlb_ps; 263 264 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 265 entryhi = env->CSR_TLBREHI; 266 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 267 } else { 268 entryhi = env->CSR_TLBEHI; 269 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 270 } 271 272 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 273 274 if (pagesize == stlb_ps) { 275 /* Only write into STLB bits [47:13] */ 276 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); 277 278 /* Choose one set ramdomly */ 279 set = get_random_tlb(0, 7); 280 281 /* Index in one set */ 282 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ 283 284 index = set * 256 + stlb_idx; 285 } else { 286 /* Only write into MTLB */ 287 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); 288 } 289 290 invalidate_tlb(env, index); 291 fill_tlb_entry(env, index); 292 } 293 294 void helper_tlbclr(CPULoongArchState *env) 295 { 296 LoongArchTLB *tlb; 297 int i, index; 298 uint16_t csr_asid, tlb_asid, tlb_g; 299 300 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 301 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 302 303 if (index < LOONGARCH_STLB) { 304 /* STLB. One line per operation */ 305 for (i = 0; i < 8; i++) { 306 tlb = &env->tlb[i * 256 + (index % 256)]; 307 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 308 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 309 if (!tlb_g && tlb_asid == csr_asid) { 310 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 311 } 312 } 313 } else if (index < LOONGARCH_TLB_MAX) { 314 /* All MTLB entries */ 315 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 316 tlb = &env->tlb[i]; 317 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 318 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 319 if (!tlb_g && tlb_asid == csr_asid) { 320 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 321 } 322 } 323 } 324 325 tlb_flush(env_cpu(env)); 326 } 327 328 void helper_tlbflush(CPULoongArchState *env) 329 { 330 int i, index; 331 332 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 333 334 if (index < LOONGARCH_STLB) { 335 /* STLB. One line per operation */ 336 for (i = 0; i < 8; i++) { 337 int s_idx = i * 256 + (index % 256); 338 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, 339 TLB_MISC, E, 0); 340 } 341 } else if (index < LOONGARCH_TLB_MAX) { 342 /* All MTLB entries */ 343 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 344 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 345 TLB_MISC, E, 0); 346 } 347 } 348 349 tlb_flush(env_cpu(env)); 350 } 351 352 void helper_invtlb_all(CPULoongArchState *env) 353 { 354 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 355 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 356 TLB_MISC, E, 0); 357 } 358 tlb_flush(env_cpu(env)); 359 } 360 361 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) 362 { 363 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 364 LoongArchTLB *tlb = &env->tlb[i]; 365 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 366 367 if (tlb_g == g) { 368 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 369 } 370 } 371 tlb_flush(env_cpu(env)); 372 } 373 374 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) 375 { 376 uint16_t asid = info & R_CSR_ASID_ASID_MASK; 377 378 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 379 LoongArchTLB *tlb = &env->tlb[i]; 380 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 381 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 382 383 if (!tlb_g && (tlb_asid == asid)) { 384 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 385 } 386 } 387 tlb_flush(env_cpu(env)); 388 } 389 390 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, 391 target_ulong addr) 392 { 393 uint16_t asid = info & 0x3ff; 394 395 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 396 LoongArchTLB *tlb = &env->tlb[i]; 397 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 398 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 399 uint64_t vpn, tlb_vppn; 400 uint8_t tlb_ps, compare_shift; 401 402 if (i >= LOONGARCH_STLB) { 403 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 404 } else { 405 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 406 } 407 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 408 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 409 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 410 411 if (!tlb_g && (tlb_asid == asid) && 412 (vpn == (tlb_vppn >> compare_shift))) { 413 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 414 } 415 } 416 tlb_flush(env_cpu(env)); 417 } 418 419 void helper_invtlb_page_asid_or_g(CPULoongArchState *env, 420 target_ulong info, target_ulong addr) 421 { 422 uint16_t asid = info & 0x3ff; 423 424 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 425 LoongArchTLB *tlb = &env->tlb[i]; 426 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 427 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 428 uint64_t vpn, tlb_vppn; 429 uint8_t tlb_ps, compare_shift; 430 431 if (i >= LOONGARCH_STLB) { 432 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 433 } else { 434 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 435 } 436 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 437 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 438 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 439 440 if ((tlb_g || (tlb_asid == asid)) && 441 (vpn == (tlb_vppn >> compare_shift))) { 442 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 443 } 444 } 445 tlb_flush(env_cpu(env)); 446 } 447 448 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 449 MMUAccessType access_type, int mmu_idx, 450 bool probe, uintptr_t retaddr) 451 { 452 CPULoongArchState *env = cpu_env(cs); 453 hwaddr physical; 454 int prot; 455 int ret; 456 457 /* Data access */ 458 ret = get_physical_address(env, &physical, &prot, address, 459 access_type, mmu_idx); 460 461 if (ret == TLBRET_MATCH) { 462 tlb_set_page(cs, address & TARGET_PAGE_MASK, 463 physical & TARGET_PAGE_MASK, prot, 464 mmu_idx, TARGET_PAGE_SIZE); 465 qemu_log_mask(CPU_LOG_MMU, 466 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx 467 " prot %d\n", __func__, address, physical, prot); 468 return true; 469 } else { 470 qemu_log_mask(CPU_LOG_MMU, 471 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, 472 ret); 473 } 474 if (probe) { 475 return false; 476 } 477 raise_mmu_exception(env, address, access_type, ret); 478 cpu_loop_exit_restore(cs, retaddr); 479 } 480 481 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, 482 target_ulong level, uint32_t mem_idx) 483 { 484 CPUState *cs = env_cpu(env); 485 target_ulong badvaddr, index, phys, ret; 486 int shift; 487 uint64_t dir_base, dir_width; 488 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; 489 490 badvaddr = env->CSR_TLBRBADV; 491 base = base & TARGET_PHYS_MASK; 492 493 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ 494 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); 495 shift = (shift + 1) * 3; 496 497 if (huge) { 498 return base; 499 } 500 switch (level) { 501 case 1: 502 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); 503 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); 504 break; 505 case 2: 506 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); 507 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); 508 break; 509 case 3: 510 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); 511 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); 512 break; 513 case 4: 514 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); 515 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); 516 break; 517 default: 518 do_raise_exception(env, EXCCODE_INE, GETPC()); 519 return 0; 520 } 521 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); 522 phys = base | index << shift; 523 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 524 return ret; 525 } 526 527 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, 528 uint32_t mem_idx) 529 { 530 CPUState *cs = env_cpu(env); 531 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; 532 int shift; 533 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; 534 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 535 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 536 537 base = base & TARGET_PHYS_MASK; 538 539 if (huge) { 540 /* Huge Page. base is paddr */ 541 tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT); 542 /* Move Global bit */ 543 tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >> 544 LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT | 545 (tmp0 & (~(1 << LOONGARCH_HGLOBAL_SHIFT))); 546 ps = ptbase + ptwidth - 1; 547 if (odd) { 548 tmp0 += MAKE_64BIT_MASK(ps, 1); 549 } 550 } else { 551 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ 552 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); 553 shift = (shift + 1) * 3; 554 badv = env->CSR_TLBRBADV; 555 556 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); 557 ptindex = ptindex & ~0x1; /* clear bit 0 */ 558 ptoffset0 = ptindex << shift; 559 ptoffset1 = (ptindex + 1) << shift; 560 561 phys = base | (odd ? ptoffset1 : ptoffset0); 562 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 563 ps = ptbase; 564 } 565 566 if (odd) { 567 env->CSR_TLBRELO1 = tmp0; 568 } else { 569 env->CSR_TLBRELO0 = tmp0; 570 } 571 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); 572 } 573