1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch TLB helpers 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/guest-random.h" 11 12 #include "cpu.h" 13 #include "internals.h" 14 #include "exec/helper-proto.h" 15 #include "exec/exec-all.h" 16 #include "exec/page-protection.h" 17 #include "exec/cpu_ldst.h" 18 #include "exec/log.h" 19 #include "cpu-csr.h" 20 21 bool check_ps(CPULoongArchState *env, int tlb_ps) 22 { 23 if (tlb_ps > 64) { 24 return false; 25 } 26 return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2); 27 } 28 29 void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, 30 uint64_t *dir_width, target_ulong level) 31 { 32 switch (level) { 33 case 1: 34 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); 35 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); 36 break; 37 case 2: 38 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); 39 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); 40 break; 41 case 3: 42 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); 43 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); 44 break; 45 case 4: 46 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); 47 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); 48 break; 49 default: 50 /* level may be zero for ldpte */ 51 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 52 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 53 break; 54 } 55 } 56 57 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, 58 MMUAccessType access_type, int tlb_error) 59 { 60 CPUState *cs = env_cpu(env); 61 62 switch (tlb_error) { 63 default: 64 case TLBRET_BADADDR: 65 cs->exception_index = access_type == MMU_INST_FETCH 66 ? EXCCODE_ADEF : EXCCODE_ADEM; 67 break; 68 case TLBRET_NOMATCH: 69 /* No TLB match for a mapped address */ 70 if (access_type == MMU_DATA_LOAD) { 71 cs->exception_index = EXCCODE_PIL; 72 } else if (access_type == MMU_DATA_STORE) { 73 cs->exception_index = EXCCODE_PIS; 74 } else if (access_type == MMU_INST_FETCH) { 75 cs->exception_index = EXCCODE_PIF; 76 } 77 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); 78 break; 79 case TLBRET_INVALID: 80 /* TLB match with no valid bit */ 81 if (access_type == MMU_DATA_LOAD) { 82 cs->exception_index = EXCCODE_PIL; 83 } else if (access_type == MMU_DATA_STORE) { 84 cs->exception_index = EXCCODE_PIS; 85 } else if (access_type == MMU_INST_FETCH) { 86 cs->exception_index = EXCCODE_PIF; 87 } 88 break; 89 case TLBRET_DIRTY: 90 /* TLB match but 'D' bit is cleared */ 91 cs->exception_index = EXCCODE_PME; 92 break; 93 case TLBRET_XI: 94 /* Execute-Inhibit Exception */ 95 cs->exception_index = EXCCODE_PNX; 96 break; 97 case TLBRET_RI: 98 /* Read-Inhibit Exception */ 99 cs->exception_index = EXCCODE_PNR; 100 break; 101 case TLBRET_PE: 102 /* Privileged Exception */ 103 cs->exception_index = EXCCODE_PPI; 104 break; 105 } 106 107 if (tlb_error == TLBRET_NOMATCH) { 108 env->CSR_TLBRBADV = address; 109 if (is_la64(env)) { 110 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64, 111 VPPN, extract64(address, 13, 35)); 112 } else { 113 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32, 114 VPPN, extract64(address, 13, 19)); 115 } 116 } else { 117 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 118 env->CSR_BADV = address; 119 } 120 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); 121 } 122 } 123 124 static void invalidate_tlb_entry(CPULoongArchState *env, int index) 125 { 126 target_ulong addr, mask, pagesize; 127 uint8_t tlb_ps; 128 LoongArchTLB *tlb = &env->tlb[index]; 129 130 int mmu_idx = cpu_mmu_index(env_cpu(env), false); 131 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); 132 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); 133 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 134 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 135 136 if (!tlb_e) { 137 return; 138 } 139 if (index >= LOONGARCH_STLB) { 140 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 141 } else { 142 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 143 } 144 pagesize = MAKE_64BIT_MASK(tlb_ps, 1); 145 mask = MAKE_64BIT_MASK(0, tlb_ps + 1); 146 147 if (tlb_v0) { 148 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ 149 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 150 mmu_idx, TARGET_LONG_BITS); 151 } 152 153 if (tlb_v1) { 154 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ 155 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 156 mmu_idx, TARGET_LONG_BITS); 157 } 158 } 159 160 static void invalidate_tlb(CPULoongArchState *env, int index) 161 { 162 LoongArchTLB *tlb; 163 uint16_t csr_asid, tlb_asid, tlb_g; 164 165 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 166 tlb = &env->tlb[index]; 167 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 168 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 169 if (tlb_g == 0 && tlb_asid != csr_asid) { 170 return; 171 } 172 invalidate_tlb_entry(env, index); 173 } 174 175 static void fill_tlb_entry(CPULoongArchState *env, int index) 176 { 177 LoongArchTLB *tlb = &env->tlb[index]; 178 uint64_t lo0, lo1, csr_vppn; 179 uint16_t csr_asid; 180 uint8_t csr_ps; 181 182 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 183 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 184 if (is_la64(env)) { 185 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN); 186 } else { 187 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN); 188 } 189 lo0 = env->CSR_TLBRELO0; 190 lo1 = env->CSR_TLBRELO1; 191 } else { 192 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 193 if (is_la64(env)) { 194 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN); 195 } else { 196 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN); 197 } 198 lo0 = env->CSR_TLBELO0; 199 lo1 = env->CSR_TLBELO1; 200 } 201 202 /*check csr_ps */ 203 if (!check_ps(env, csr_ps)) { 204 qemu_log_mask(LOG_GUEST_ERROR, "csr_ps %d is illegal\n", csr_ps); 205 return; 206 } 207 208 /* Only MTLB has the ps fields */ 209 if (index >= LOONGARCH_STLB) { 210 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); 211 } 212 213 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); 214 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); 215 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 216 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); 217 218 tlb->tlb_entry0 = lo0; 219 tlb->tlb_entry1 = lo1; 220 } 221 222 /* Return an random value between low and high */ 223 static uint32_t get_random_tlb(uint32_t low, uint32_t high) 224 { 225 uint32_t val; 226 227 qemu_guest_getrandom_nofail(&val, sizeof(val)); 228 return val % (high - low + 1) + low; 229 } 230 231 void helper_tlbsrch(CPULoongArchState *env) 232 { 233 int index, match; 234 235 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 236 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); 237 } else { 238 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); 239 } 240 241 if (match) { 242 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); 243 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 244 return; 245 } 246 247 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 248 } 249 250 void helper_tlbrd(CPULoongArchState *env) 251 { 252 LoongArchTLB *tlb; 253 int index; 254 uint8_t tlb_ps, tlb_e; 255 256 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 257 tlb = &env->tlb[index]; 258 259 if (index >= LOONGARCH_STLB) { 260 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 261 } else { 262 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 263 } 264 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 265 266 if (!tlb_e) { 267 /* Invalid TLB entry */ 268 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 269 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); 270 env->CSR_TLBEHI = 0; 271 env->CSR_TLBELO0 = 0; 272 env->CSR_TLBELO1 = 0; 273 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); 274 } else { 275 /* Valid TLB entry */ 276 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 277 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, 278 PS, (tlb_ps & 0x3f)); 279 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << 280 R_TLB_MISC_VPPN_SHIFT; 281 env->CSR_TLBELO0 = tlb->tlb_entry0; 282 env->CSR_TLBELO1 = tlb->tlb_entry1; 283 } 284 } 285 286 void helper_tlbwr(CPULoongArchState *env) 287 { 288 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 289 290 invalidate_tlb(env, index); 291 292 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { 293 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, 294 TLB_MISC, E, 0); 295 return; 296 } 297 298 fill_tlb_entry(env, index); 299 } 300 301 void helper_tlbfill(CPULoongArchState *env) 302 { 303 uint64_t address, entryhi; 304 int index, set, stlb_idx; 305 uint16_t pagesize, stlb_ps; 306 307 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 308 entryhi = env->CSR_TLBREHI; 309 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 310 } else { 311 entryhi = env->CSR_TLBEHI; 312 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 313 } 314 315 if (!check_ps(env, pagesize)) { 316 qemu_log_mask(LOG_GUEST_ERROR, "pagesize %d is illegal\n", pagesize); 317 return; 318 } 319 320 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 321 if (!check_ps(env, stlb_ps)) { 322 qemu_log_mask(LOG_GUEST_ERROR, "stlb_ps %d is illegal\n", stlb_ps); 323 return; 324 } 325 326 if (pagesize == stlb_ps) { 327 /* Only write into STLB bits [47:13] */ 328 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); 329 330 /* Choose one set ramdomly */ 331 set = get_random_tlb(0, 7); 332 333 /* Index in one set */ 334 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ 335 336 index = set * 256 + stlb_idx; 337 } else { 338 /* Only write into MTLB */ 339 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); 340 } 341 342 invalidate_tlb(env, index); 343 fill_tlb_entry(env, index); 344 } 345 346 void helper_tlbclr(CPULoongArchState *env) 347 { 348 LoongArchTLB *tlb; 349 int i, index; 350 uint16_t csr_asid, tlb_asid, tlb_g; 351 352 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 353 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 354 355 if (index < LOONGARCH_STLB) { 356 /* STLB. One line per operation */ 357 for (i = 0; i < 8; i++) { 358 tlb = &env->tlb[i * 256 + (index % 256)]; 359 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 360 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 361 if (!tlb_g && tlb_asid == csr_asid) { 362 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 363 } 364 } 365 } else if (index < LOONGARCH_TLB_MAX) { 366 /* All MTLB entries */ 367 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 368 tlb = &env->tlb[i]; 369 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 370 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 371 if (!tlb_g && tlb_asid == csr_asid) { 372 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 373 } 374 } 375 } 376 377 tlb_flush(env_cpu(env)); 378 } 379 380 void helper_tlbflush(CPULoongArchState *env) 381 { 382 int i, index; 383 384 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 385 386 if (index < LOONGARCH_STLB) { 387 /* STLB. One line per operation */ 388 for (i = 0; i < 8; i++) { 389 int s_idx = i * 256 + (index % 256); 390 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, 391 TLB_MISC, E, 0); 392 } 393 } else if (index < LOONGARCH_TLB_MAX) { 394 /* All MTLB entries */ 395 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 396 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 397 TLB_MISC, E, 0); 398 } 399 } 400 401 tlb_flush(env_cpu(env)); 402 } 403 404 void helper_invtlb_all(CPULoongArchState *env) 405 { 406 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 407 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 408 TLB_MISC, E, 0); 409 } 410 tlb_flush(env_cpu(env)); 411 } 412 413 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) 414 { 415 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 416 LoongArchTLB *tlb = &env->tlb[i]; 417 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 418 419 if (tlb_g == g) { 420 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 421 } 422 } 423 tlb_flush(env_cpu(env)); 424 } 425 426 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) 427 { 428 uint16_t asid = info & R_CSR_ASID_ASID_MASK; 429 430 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 431 LoongArchTLB *tlb = &env->tlb[i]; 432 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 433 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 434 435 if (!tlb_g && (tlb_asid == asid)) { 436 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 437 } 438 } 439 tlb_flush(env_cpu(env)); 440 } 441 442 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, 443 target_ulong addr) 444 { 445 uint16_t asid = info & 0x3ff; 446 447 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 448 LoongArchTLB *tlb = &env->tlb[i]; 449 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 450 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 451 uint64_t vpn, tlb_vppn; 452 uint8_t tlb_ps, compare_shift; 453 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 454 455 if (!tlb_e) { 456 continue; 457 } 458 if (i >= LOONGARCH_STLB) { 459 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 460 } else { 461 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 462 } 463 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 464 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 465 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 466 467 if (!tlb_g && (tlb_asid == asid) && 468 (vpn == (tlb_vppn >> compare_shift))) { 469 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 470 } 471 } 472 tlb_flush(env_cpu(env)); 473 } 474 475 void helper_invtlb_page_asid_or_g(CPULoongArchState *env, 476 target_ulong info, target_ulong addr) 477 { 478 uint16_t asid = info & 0x3ff; 479 480 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 481 LoongArchTLB *tlb = &env->tlb[i]; 482 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 483 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 484 uint64_t vpn, tlb_vppn; 485 uint8_t tlb_ps, compare_shift; 486 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 487 488 if (!tlb_e) { 489 continue; 490 } 491 if (i >= LOONGARCH_STLB) { 492 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 493 } else { 494 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 495 } 496 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 497 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 498 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 499 500 if ((tlb_g || (tlb_asid == asid)) && 501 (vpn == (tlb_vppn >> compare_shift))) { 502 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 503 } 504 } 505 tlb_flush(env_cpu(env)); 506 } 507 508 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 509 MMUAccessType access_type, int mmu_idx, 510 bool probe, uintptr_t retaddr) 511 { 512 CPULoongArchState *env = cpu_env(cs); 513 hwaddr physical; 514 int prot; 515 int ret; 516 517 /* Data access */ 518 ret = get_physical_address(env, &physical, &prot, address, 519 access_type, mmu_idx, 0); 520 521 if (ret == TLBRET_MATCH) { 522 tlb_set_page(cs, address & TARGET_PAGE_MASK, 523 physical & TARGET_PAGE_MASK, prot, 524 mmu_idx, TARGET_PAGE_SIZE); 525 qemu_log_mask(CPU_LOG_MMU, 526 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx 527 " prot %d\n", __func__, address, physical, prot); 528 return true; 529 } else { 530 qemu_log_mask(CPU_LOG_MMU, 531 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, 532 ret); 533 } 534 if (probe) { 535 return false; 536 } 537 raise_mmu_exception(env, address, access_type, ret); 538 cpu_loop_exit_restore(cs, retaddr); 539 } 540 541 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, 542 target_ulong level, uint32_t mem_idx) 543 { 544 CPUState *cs = env_cpu(env); 545 target_ulong badvaddr, index, phys, ret; 546 uint64_t dir_base, dir_width; 547 548 if (unlikely((level == 0) || (level > 4))) { 549 qemu_log_mask(LOG_GUEST_ERROR, 550 "Attepted LDDIR with level %"PRId64"\n", level); 551 return base; 552 } 553 554 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 555 if (unlikely(level == 4)) { 556 qemu_log_mask(LOG_GUEST_ERROR, 557 "Attempted use of level 4 huge page\n"); 558 return base; 559 } 560 561 if (FIELD_EX64(base, TLBENTRY, LEVEL)) { 562 return base; 563 } else { 564 return FIELD_DP64(base, TLBENTRY, LEVEL, level); 565 } 566 } 567 568 badvaddr = env->CSR_TLBRBADV; 569 base = base & TARGET_PHYS_MASK; 570 get_dir_base_width(env, &dir_base, &dir_width, level); 571 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); 572 phys = base | index << 3; 573 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 574 return ret; 575 } 576 577 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, 578 uint32_t mem_idx) 579 { 580 CPUState *cs = env_cpu(env); 581 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; 582 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 583 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 584 uint64_t dir_base, dir_width; 585 586 /* 587 * The parameter "base" has only two types, 588 * one is the page table base address, 589 * whose bit 6 should be 0, 590 * and the other is the huge page entry, 591 * whose bit 6 should be 1. 592 */ 593 base = base & TARGET_PHYS_MASK; 594 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 595 /* 596 * Gets the huge page level and Gets huge page size. 597 * Clears the huge page level information in the entry. 598 * Clears huge page bit. 599 * Move HGLOBAL bit to GLOBAL bit. 600 */ 601 get_dir_base_width(env, &dir_base, &dir_width, 602 FIELD_EX64(base, TLBENTRY, LEVEL)); 603 604 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); 605 base = FIELD_DP64(base, TLBENTRY, HUGE, 0); 606 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { 607 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); 608 base = FIELD_DP64(base, TLBENTRY, G, 1); 609 } 610 611 ps = dir_base + dir_width - 1; 612 /* 613 * Huge pages are evenly split into parity pages 614 * when loaded into the tlb, 615 * so the tlb page size needs to be divided by 2. 616 */ 617 tmp0 = base; 618 if (odd) { 619 tmp0 += MAKE_64BIT_MASK(ps, 1); 620 } 621 } else { 622 badv = env->CSR_TLBRBADV; 623 624 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); 625 ptindex = ptindex & ~0x1; /* clear bit 0 */ 626 ptoffset0 = ptindex << 3; 627 ptoffset1 = (ptindex + 1) << 3; 628 phys = base | (odd ? ptoffset1 : ptoffset0); 629 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 630 ps = ptbase; 631 } 632 633 if (odd) { 634 env->CSR_TLBRELO1 = tmp0; 635 } else { 636 env->CSR_TLBRELO0 = tmp0; 637 } 638 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); 639 } 640