1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch TLB helpers 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/guest-random.h" 11 12 #include "cpu.h" 13 #include "internals.h" 14 #include "exec/helper-proto.h" 15 #include "exec/cputlb.h" 16 #include "exec/exec-all.h" 17 #include "exec/page-protection.h" 18 #include "exec/target_page.h" 19 #include "exec/cpu_ldst.h" 20 #include "exec/log.h" 21 #include "cpu-csr.h" 22 23 bool check_ps(CPULoongArchState *env, uint8_t tlb_ps) 24 { 25 if (tlb_ps >= 64) { 26 return false; 27 } 28 return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2); 29 } 30 31 void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, 32 uint64_t *dir_width, target_ulong level) 33 { 34 switch (level) { 35 case 1: 36 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); 37 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); 38 break; 39 case 2: 40 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); 41 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); 42 break; 43 case 3: 44 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); 45 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); 46 break; 47 case 4: 48 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); 49 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); 50 break; 51 default: 52 /* level may be zero for ldpte */ 53 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 54 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 55 break; 56 } 57 } 58 59 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, 60 MMUAccessType access_type, int tlb_error) 61 { 62 CPUState *cs = env_cpu(env); 63 64 switch (tlb_error) { 65 default: 66 case TLBRET_BADADDR: 67 cs->exception_index = access_type == MMU_INST_FETCH 68 ? EXCCODE_ADEF : EXCCODE_ADEM; 69 break; 70 case TLBRET_NOMATCH: 71 /* No TLB match for a mapped address */ 72 if (access_type == MMU_DATA_LOAD) { 73 cs->exception_index = EXCCODE_PIL; 74 } else if (access_type == MMU_DATA_STORE) { 75 cs->exception_index = EXCCODE_PIS; 76 } else if (access_type == MMU_INST_FETCH) { 77 cs->exception_index = EXCCODE_PIF; 78 } 79 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); 80 break; 81 case TLBRET_INVALID: 82 /* TLB match with no valid bit */ 83 if (access_type == MMU_DATA_LOAD) { 84 cs->exception_index = EXCCODE_PIL; 85 } else if (access_type == MMU_DATA_STORE) { 86 cs->exception_index = EXCCODE_PIS; 87 } else if (access_type == MMU_INST_FETCH) { 88 cs->exception_index = EXCCODE_PIF; 89 } 90 break; 91 case TLBRET_DIRTY: 92 /* TLB match but 'D' bit is cleared */ 93 cs->exception_index = EXCCODE_PME; 94 break; 95 case TLBRET_XI: 96 /* Execute-Inhibit Exception */ 97 cs->exception_index = EXCCODE_PNX; 98 break; 99 case TLBRET_RI: 100 /* Read-Inhibit Exception */ 101 cs->exception_index = EXCCODE_PNR; 102 break; 103 case TLBRET_PE: 104 /* Privileged Exception */ 105 cs->exception_index = EXCCODE_PPI; 106 break; 107 } 108 109 if (tlb_error == TLBRET_NOMATCH) { 110 env->CSR_TLBRBADV = address; 111 if (is_la64(env)) { 112 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64, 113 VPPN, extract64(address, 13, 35)); 114 } else { 115 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32, 116 VPPN, extract64(address, 13, 19)); 117 } 118 } else { 119 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 120 env->CSR_BADV = address; 121 } 122 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); 123 } 124 } 125 126 static void invalidate_tlb_entry(CPULoongArchState *env, int index) 127 { 128 target_ulong addr, mask, pagesize; 129 uint8_t tlb_ps; 130 LoongArchTLB *tlb = &env->tlb[index]; 131 132 int mmu_idx = cpu_mmu_index(env_cpu(env), false); 133 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); 134 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); 135 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 136 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 137 138 if (!tlb_e) { 139 return; 140 } 141 if (index >= LOONGARCH_STLB) { 142 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 143 } else { 144 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 145 } 146 pagesize = MAKE_64BIT_MASK(tlb_ps, 1); 147 mask = MAKE_64BIT_MASK(0, tlb_ps + 1); 148 149 if (tlb_v0) { 150 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ 151 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 152 mmu_idx, TARGET_LONG_BITS); 153 } 154 155 if (tlb_v1) { 156 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ 157 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 158 mmu_idx, TARGET_LONG_BITS); 159 } 160 } 161 162 static void invalidate_tlb(CPULoongArchState *env, int index) 163 { 164 LoongArchTLB *tlb; 165 uint16_t csr_asid, tlb_asid, tlb_g; 166 167 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 168 tlb = &env->tlb[index]; 169 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 170 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 171 if (tlb_g == 0 && tlb_asid != csr_asid) { 172 return; 173 } 174 invalidate_tlb_entry(env, index); 175 } 176 177 static void fill_tlb_entry(CPULoongArchState *env, int index) 178 { 179 LoongArchTLB *tlb = &env->tlb[index]; 180 uint64_t lo0, lo1, csr_vppn; 181 uint16_t csr_asid; 182 uint8_t csr_ps; 183 184 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 185 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 186 if (is_la64(env)) { 187 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN); 188 } else { 189 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN); 190 } 191 lo0 = env->CSR_TLBRELO0; 192 lo1 = env->CSR_TLBRELO1; 193 } else { 194 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 195 if (is_la64(env)) { 196 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN); 197 } else { 198 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN); 199 } 200 lo0 = env->CSR_TLBELO0; 201 lo1 = env->CSR_TLBELO1; 202 } 203 204 /*check csr_ps */ 205 if (!check_ps(env, csr_ps)) { 206 qemu_log_mask(LOG_GUEST_ERROR, "csr_ps %d is illegal\n", csr_ps); 207 return; 208 } 209 210 /* Only MTLB has the ps fields */ 211 if (index >= LOONGARCH_STLB) { 212 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); 213 } 214 215 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); 216 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); 217 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 218 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); 219 220 tlb->tlb_entry0 = lo0; 221 tlb->tlb_entry1 = lo1; 222 } 223 224 /* Return an random value between low and high */ 225 static uint32_t get_random_tlb(uint32_t low, uint32_t high) 226 { 227 uint32_t val; 228 229 qemu_guest_getrandom_nofail(&val, sizeof(val)); 230 return val % (high - low + 1) + low; 231 } 232 233 void helper_tlbsrch(CPULoongArchState *env) 234 { 235 int index, match; 236 237 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 238 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); 239 } else { 240 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); 241 } 242 243 if (match) { 244 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); 245 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 246 return; 247 } 248 249 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 250 } 251 252 void helper_tlbrd(CPULoongArchState *env) 253 { 254 LoongArchTLB *tlb; 255 int index; 256 uint8_t tlb_ps, tlb_e; 257 258 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 259 tlb = &env->tlb[index]; 260 261 if (index >= LOONGARCH_STLB) { 262 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 263 } else { 264 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 265 } 266 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 267 268 if (!tlb_e) { 269 /* Invalid TLB entry */ 270 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 271 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); 272 env->CSR_TLBEHI = 0; 273 env->CSR_TLBELO0 = 0; 274 env->CSR_TLBELO1 = 0; 275 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); 276 } else { 277 /* Valid TLB entry */ 278 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 279 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, 280 PS, (tlb_ps & 0x3f)); 281 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << 282 R_TLB_MISC_VPPN_SHIFT; 283 env->CSR_TLBELO0 = tlb->tlb_entry0; 284 env->CSR_TLBELO1 = tlb->tlb_entry1; 285 } 286 } 287 288 void helper_tlbwr(CPULoongArchState *env) 289 { 290 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 291 292 invalidate_tlb(env, index); 293 294 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { 295 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, 296 TLB_MISC, E, 0); 297 return; 298 } 299 300 fill_tlb_entry(env, index); 301 } 302 303 void helper_tlbfill(CPULoongArchState *env) 304 { 305 uint64_t address, entryhi; 306 int index, set, stlb_idx; 307 uint16_t pagesize, stlb_ps; 308 309 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 310 entryhi = env->CSR_TLBREHI; 311 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 312 } else { 313 entryhi = env->CSR_TLBEHI; 314 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 315 } 316 317 if (!check_ps(env, pagesize)) { 318 qemu_log_mask(LOG_GUEST_ERROR, "pagesize %d is illegal\n", pagesize); 319 return; 320 } 321 322 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 323 if (!check_ps(env, stlb_ps)) { 324 qemu_log_mask(LOG_GUEST_ERROR, "stlb_ps %d is illegal\n", stlb_ps); 325 return; 326 } 327 328 if (pagesize == stlb_ps) { 329 /* Only write into STLB bits [47:13] */ 330 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); 331 332 /* Choose one set ramdomly */ 333 set = get_random_tlb(0, 7); 334 335 /* Index in one set */ 336 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ 337 338 index = set * 256 + stlb_idx; 339 } else { 340 /* Only write into MTLB */ 341 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); 342 } 343 344 invalidate_tlb(env, index); 345 fill_tlb_entry(env, index); 346 } 347 348 void helper_tlbclr(CPULoongArchState *env) 349 { 350 LoongArchTLB *tlb; 351 int i, index; 352 uint16_t csr_asid, tlb_asid, tlb_g; 353 354 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 355 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 356 357 if (index < LOONGARCH_STLB) { 358 /* STLB. One line per operation */ 359 for (i = 0; i < 8; i++) { 360 tlb = &env->tlb[i * 256 + (index % 256)]; 361 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 362 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 363 if (!tlb_g && tlb_asid == csr_asid) { 364 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 365 } 366 } 367 } else if (index < LOONGARCH_TLB_MAX) { 368 /* All MTLB entries */ 369 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 370 tlb = &env->tlb[i]; 371 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 372 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 373 if (!tlb_g && tlb_asid == csr_asid) { 374 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 375 } 376 } 377 } 378 379 tlb_flush(env_cpu(env)); 380 } 381 382 void helper_tlbflush(CPULoongArchState *env) 383 { 384 int i, index; 385 386 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 387 388 if (index < LOONGARCH_STLB) { 389 /* STLB. One line per operation */ 390 for (i = 0; i < 8; i++) { 391 int s_idx = i * 256 + (index % 256); 392 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, 393 TLB_MISC, E, 0); 394 } 395 } else if (index < LOONGARCH_TLB_MAX) { 396 /* All MTLB entries */ 397 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 398 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 399 TLB_MISC, E, 0); 400 } 401 } 402 403 tlb_flush(env_cpu(env)); 404 } 405 406 void helper_invtlb_all(CPULoongArchState *env) 407 { 408 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 409 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 410 TLB_MISC, E, 0); 411 } 412 tlb_flush(env_cpu(env)); 413 } 414 415 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) 416 { 417 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 418 LoongArchTLB *tlb = &env->tlb[i]; 419 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 420 421 if (tlb_g == g) { 422 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 423 } 424 } 425 tlb_flush(env_cpu(env)); 426 } 427 428 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) 429 { 430 uint16_t asid = info & R_CSR_ASID_ASID_MASK; 431 432 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 433 LoongArchTLB *tlb = &env->tlb[i]; 434 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 435 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 436 437 if (!tlb_g && (tlb_asid == asid)) { 438 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 439 } 440 } 441 tlb_flush(env_cpu(env)); 442 } 443 444 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, 445 target_ulong addr) 446 { 447 uint16_t asid = info & 0x3ff; 448 449 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 450 LoongArchTLB *tlb = &env->tlb[i]; 451 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 452 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 453 uint64_t vpn, tlb_vppn; 454 uint8_t tlb_ps, compare_shift; 455 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 456 457 if (!tlb_e) { 458 continue; 459 } 460 if (i >= LOONGARCH_STLB) { 461 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 462 } else { 463 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 464 } 465 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 466 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 467 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 468 469 if (!tlb_g && (tlb_asid == asid) && 470 (vpn == (tlb_vppn >> compare_shift))) { 471 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 472 } 473 } 474 tlb_flush(env_cpu(env)); 475 } 476 477 void helper_invtlb_page_asid_or_g(CPULoongArchState *env, 478 target_ulong info, target_ulong addr) 479 { 480 uint16_t asid = info & 0x3ff; 481 482 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 483 LoongArchTLB *tlb = &env->tlb[i]; 484 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 485 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 486 uint64_t vpn, tlb_vppn; 487 uint8_t tlb_ps, compare_shift; 488 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 489 490 if (!tlb_e) { 491 continue; 492 } 493 if (i >= LOONGARCH_STLB) { 494 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 495 } else { 496 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 497 } 498 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 499 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 500 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 501 502 if ((tlb_g || (tlb_asid == asid)) && 503 (vpn == (tlb_vppn >> compare_shift))) { 504 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 505 } 506 } 507 tlb_flush(env_cpu(env)); 508 } 509 510 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 511 MMUAccessType access_type, int mmu_idx, 512 bool probe, uintptr_t retaddr) 513 { 514 CPULoongArchState *env = cpu_env(cs); 515 hwaddr physical; 516 int prot; 517 int ret; 518 519 /* Data access */ 520 ret = get_physical_address(env, &physical, &prot, address, 521 access_type, mmu_idx, 0); 522 523 if (ret == TLBRET_MATCH) { 524 tlb_set_page(cs, address & TARGET_PAGE_MASK, 525 physical & TARGET_PAGE_MASK, prot, 526 mmu_idx, TARGET_PAGE_SIZE); 527 qemu_log_mask(CPU_LOG_MMU, 528 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx 529 " prot %d\n", __func__, address, physical, prot); 530 return true; 531 } else { 532 qemu_log_mask(CPU_LOG_MMU, 533 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, 534 ret); 535 } 536 if (probe) { 537 return false; 538 } 539 raise_mmu_exception(env, address, access_type, ret); 540 cpu_loop_exit_restore(cs, retaddr); 541 } 542 543 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, 544 target_ulong level, uint32_t mem_idx) 545 { 546 CPUState *cs = env_cpu(env); 547 target_ulong badvaddr, index, phys; 548 uint64_t dir_base, dir_width; 549 550 if (unlikely((level == 0) || (level > 4))) { 551 qemu_log_mask(LOG_GUEST_ERROR, 552 "Attepted LDDIR with level %"PRId64"\n", level); 553 return base; 554 } 555 556 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 557 if (unlikely(level == 4)) { 558 qemu_log_mask(LOG_GUEST_ERROR, 559 "Attempted use of level 4 huge page\n"); 560 return base; 561 } 562 563 if (FIELD_EX64(base, TLBENTRY, LEVEL)) { 564 return base; 565 } else { 566 return FIELD_DP64(base, TLBENTRY, LEVEL, level); 567 } 568 } 569 570 badvaddr = env->CSR_TLBRBADV; 571 base = base & TARGET_PHYS_MASK; 572 get_dir_base_width(env, &dir_base, &dir_width, level); 573 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); 574 phys = base | index << 3; 575 return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 576 } 577 578 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, 579 uint32_t mem_idx) 580 { 581 CPUState *cs = env_cpu(env); 582 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; 583 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 584 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 585 uint64_t dir_base, dir_width; 586 587 /* 588 * The parameter "base" has only two types, 589 * one is the page table base address, 590 * whose bit 6 should be 0, 591 * and the other is the huge page entry, 592 * whose bit 6 should be 1. 593 */ 594 base = base & TARGET_PHYS_MASK; 595 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 596 /* 597 * Gets the huge page level and Gets huge page size. 598 * Clears the huge page level information in the entry. 599 * Clears huge page bit. 600 * Move HGLOBAL bit to GLOBAL bit. 601 */ 602 get_dir_base_width(env, &dir_base, &dir_width, 603 FIELD_EX64(base, TLBENTRY, LEVEL)); 604 605 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); 606 base = FIELD_DP64(base, TLBENTRY, HUGE, 0); 607 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { 608 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); 609 base = FIELD_DP64(base, TLBENTRY, G, 1); 610 } 611 612 ps = dir_base + dir_width - 1; 613 /* 614 * Huge pages are evenly split into parity pages 615 * when loaded into the tlb, 616 * so the tlb page size needs to be divided by 2. 617 */ 618 tmp0 = base; 619 if (odd) { 620 tmp0 += MAKE_64BIT_MASK(ps, 1); 621 } 622 } else { 623 badv = env->CSR_TLBRBADV; 624 625 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); 626 ptindex = ptindex & ~0x1; /* clear bit 0 */ 627 ptoffset0 = ptindex << 3; 628 ptoffset1 = (ptindex + 1) << 3; 629 phys = base | (odd ? ptoffset1 : ptoffset0); 630 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 631 ps = ptbase; 632 } 633 634 if (odd) { 635 env->CSR_TLBRELO1 = tmp0; 636 } else { 637 env->CSR_TLBRELO0 = tmp0; 638 } 639 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); 640 } 641