Lines Matching full:tlb
2 * MIPS TLB (Translation lookaside buffer) helpers.
31 /* TLB management */
34 /* Discard entries from env->tlb[first] onwards. */ in r4k_mips_tlb_flush_extra()
35 while (env->tlb->tlb_in_use > first) { in r4k_mips_tlb_flush_extra()
36 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); in r4k_mips_tlb_flush_extra()
52 r4k_tlb_t *tlb; in r4k_fill_tlb() local
56 tlb = &env->tlb->mmu.r4k.tlb[idx]; in r4k_fill_tlb()
58 tlb->EHINV = 1; in r4k_fill_tlb()
61 tlb->EHINV = 0; in r4k_fill_tlb()
62 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); in r4k_fill_tlb()
64 tlb->VPN &= env->SEGMask; in r4k_fill_tlb()
66 tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; in r4k_fill_tlb()
67 tlb->MMID = env->CP0_MemoryMapID; in r4k_fill_tlb()
68 tlb->PageMask = env->CP0_PageMask; in r4k_fill_tlb()
69 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; in r4k_fill_tlb()
70 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; in r4k_fill_tlb()
71 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; in r4k_fill_tlb()
72 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; in r4k_fill_tlb()
73 tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; in r4k_fill_tlb()
74 tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; in r4k_fill_tlb()
75 tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12; in r4k_fill_tlb()
76 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; in r4k_fill_tlb()
77 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; in r4k_fill_tlb()
78 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; in r4k_fill_tlb()
79 tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; in r4k_fill_tlb()
80 tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; in r4k_fill_tlb()
81 tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12; in r4k_fill_tlb()
90 r4k_tlb_t *tlb; in r4k_helper_tlbinv() local
94 for (idx = 0; idx < env->tlb->nb_tlb; idx++) { in r4k_helper_tlbinv()
95 tlb = &env->tlb->mmu.r4k.tlb[idx]; in r4k_helper_tlbinv()
96 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; in r4k_helper_tlbinv()
97 if (!tlb->G && tlb_mmid == MMID) { in r4k_helper_tlbinv()
98 tlb->EHINV = 1; in r4k_helper_tlbinv()
108 for (idx = 0; idx < env->tlb->nb_tlb; idx++) { in r4k_helper_tlbinvf()
109 env->tlb->mmu.r4k.tlb[idx].EHINV = 1; in r4k_helper_tlbinvf()
122 r4k_tlb_t *tlb; in r4k_helper_tlbwi() local
127 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; in r4k_helper_tlbwi()
128 tlb = &env->tlb->mmu.r4k.tlb[idx]; in r4k_helper_tlbwi()
144 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; in r4k_helper_tlbwi()
146 * Discard cached TLB entries, unless tlbwi is just upgrading access in r4k_helper_tlbwi()
149 if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G || in r4k_helper_tlbwi()
150 (!tlb->EHINV && EHINV) || in r4k_helper_tlbwi()
151 (tlb->V0 && !V0) || (tlb->D0 && !D0) || in r4k_helper_tlbwi()
152 (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) || in r4k_helper_tlbwi()
153 (tlb->V1 && !V1) || (tlb->D1 && !D1) || in r4k_helper_tlbwi()
154 (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) { in r4k_helper_tlbwi()
155 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); in r4k_helper_tlbwi()
173 r4k_tlb_t *tlb; in r4k_helper_tlbp() local
183 for (i = 0; i < env->tlb->nb_tlb; i++) { in r4k_helper_tlbp()
184 tlb = &env->tlb->mmu.r4k.tlb[i]; in r4k_helper_tlbp()
186 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); in r4k_helper_tlbp()
188 VPN = tlb->VPN & ~mask; in r4k_helper_tlbp()
192 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; in r4k_helper_tlbp()
194 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { in r4k_helper_tlbp()
195 /* TLB match */ in r4k_helper_tlbp()
200 if (i == env->tlb->nb_tlb) { in r4k_helper_tlbp()
202 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { in r4k_helper_tlbp()
203 tlb = &env->tlb->mmu.r4k.tlb[i]; in r4k_helper_tlbp()
205 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); in r4k_helper_tlbp()
207 VPN = tlb->VPN & ~mask; in r4k_helper_tlbp()
211 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; in r4k_helper_tlbp()
213 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) { in r4k_helper_tlbp()
239 r4k_tlb_t *tlb; in r4k_helper_tlbr() local
243 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; in r4k_helper_tlbr()
244 tlb = &env->tlb->mmu.r4k.tlb[idx]; in r4k_helper_tlbr()
246 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; in r4k_helper_tlbr()
247 /* If this will change the current ASID/MMID, flush qemu's TLB. */ in r4k_helper_tlbr()
252 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); in r4k_helper_tlbr()
254 if (tlb->EHINV) { in r4k_helper_tlbr()
260 env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID; in r4k_helper_tlbr()
261 env->CP0_MemoryMapID = tlb->MMID; in r4k_helper_tlbr()
262 env->CP0_PageMask = tlb->PageMask; in r4k_helper_tlbr()
263 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | in r4k_helper_tlbr()
264 ((uint64_t)tlb->RI0 << CP0EnLo_RI) | in r4k_helper_tlbr()
265 ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) | in r4k_helper_tlbr()
266 get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12); in r4k_helper_tlbr()
267 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | in r4k_helper_tlbr()
268 ((uint64_t)tlb->RI1 << CP0EnLo_RI) | in r4k_helper_tlbr()
269 ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) | in r4k_helper_tlbr()
270 get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12); in r4k_helper_tlbr()
276 env->tlb->helper_tlbwi(env); in helper_tlbwi()
281 env->tlb->helper_tlbwr(env); in helper_tlbwr()
286 env->tlb->helper_tlbp(env); in helper_tlbp()
291 env->tlb->helper_tlbr(env); in helper_tlbr()
296 env->tlb->helper_tlbinv(env); in helper_tlbinv()
301 env->tlb->helper_tlbinvf(env); in helper_tlbinvf()
315 r4k_tlb_t *tlb; in global_invalidate_tlb() local
319 for (idx = 0; idx < env->tlb->nb_tlb; idx++) { in global_invalidate_tlb()
320 tlb = &env->tlb->mmu.r4k.tlb[idx]; in global_invalidate_tlb()
322 (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask)) in global_invalidate_tlb()
328 MMidMatch = tlb->MMID == invMsgMMid; in global_invalidate_tlb()
330 (VAMatch && invVAMMid && (tlb->G || MMidMatch)) || in global_invalidate_tlb()
332 (MMidMatch && !(tlb->G) && invMMid)) { in global_invalidate_tlb()
333 tlb->EHINV = 1; in global_invalidate_tlb()
403 for (i = 0; i < env->tlb->tlb_in_use; i++) { in r4k_map_address()
404 r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i]; in r4k_map_address() local
406 target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); in r4k_map_address()
408 target_ulong VPN = tlb->VPN & ~mask; in r4k_map_address()
414 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; in r4k_map_address()
415 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { in r4k_map_address()
416 /* TLB match */ in r4k_map_address()
419 if (!(n ? tlb->V1 : tlb->V0)) { in r4k_map_address()
422 if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) { in r4k_map_address()
425 if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) { in r4k_map_address()
428 if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) { in r4k_map_address()
429 *physical = tlb->PFN[n] | (address & (mask >> 1)); in r4k_map_address()
431 if (n ? tlb->D1 : tlb->D0) { in r4k_map_address()
434 if (!(n ? tlb->XI1 : tlb->XI0)) { in r4k_map_address()
447 env->tlb->nb_tlb = 1; in no_mmu_init()
448 env->tlb->map_address = &no_mmu_map_address; in no_mmu_init()
453 env->tlb->nb_tlb = 1; in fixed_mmu_init()
454 env->tlb->map_address = &fixed_mmu_map_address; in fixed_mmu_init()
459 env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63); in r4k_mmu_init()
460 env->tlb->map_address = &r4k_map_address; in r4k_mmu_init()
461 env->tlb->helper_tlbwi = r4k_helper_tlbwi; in r4k_mmu_init()
462 env->tlb->helper_tlbwr = r4k_helper_tlbwr; in r4k_mmu_init()
463 env->tlb->helper_tlbp = r4k_helper_tlbp; in r4k_mmu_init()
464 env->tlb->helper_tlbr = r4k_helper_tlbr; in r4k_mmu_init()
465 env->tlb->helper_tlbinv = r4k_helper_tlbinv; in r4k_mmu_init()
466 env->tlb->helper_tlbinvf = r4k_helper_tlbinvf; in r4k_mmu_init()
471 env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext)); in mmu_init()
493 /* Flush qemu's TLB and discard all shadowed entries. */ in cpu_mips_tlb_flush()
495 env->tlb->tlb_in_use = env->tlb->nb_tlb; in cpu_mips_tlb_flush()
520 /* No TLB match for a mapped address */ in raise_mmu_exception()
529 /* TLB match with no valid bit */ in raise_mmu_exception()
537 /* TLB match but 'D' bit is cleared */ in raise_mmu_exception()
584 * from page table walking, resulting in a TLB or XTLB Refill exception.
588 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
593 * resulting in a TLB or XTLB Refill exception.
661 /* Generate adjacent page from same PTE for odd TLB page */ in walk_directory()
893 * The hardware page walker inserts a page into the TLB in a manner in page_table_walk_refill()
940 if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) { in mips_cpu_tlb_fill()
1362 r4k_tlb_t *tlb; in r4k_invalidate_tlb() local
1373 tlb = &env->tlb->mmu.r4k.tlb[idx]; in r4k_invalidate_tlb()
1375 * The qemu TLB is flushed when the ASID/MMID changes, so no need to in r4k_invalidate_tlb()
1378 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; in r4k_invalidate_tlb()
1379 if (tlb->G == 0 && tlb_mmid != MMID) { in r4k_invalidate_tlb()
1383 if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) { in r4k_invalidate_tlb()
1386 * a new (fake) TLB entry, as long as the guest can not in r4k_invalidate_tlb()
1389 env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb; in r4k_invalidate_tlb()
1390 env->tlb->tlb_in_use++; in r4k_invalidate_tlb()
1395 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); in r4k_invalidate_tlb()
1396 if (tlb->V0) { in r4k_invalidate_tlb()
1397 addr = tlb->VPN & ~mask; in r4k_invalidate_tlb()
1409 if (tlb->V1) { in r4k_invalidate_tlb()
1410 addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); in r4k_invalidate_tlb()