Lines Matching full:tlb
107 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; in hppa_flush_tlb_ent()
146 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { in hppa_alloc_tlb_ent()
151 ent = &env->tlb[i]; in hppa_alloc_tlb_ent()
230 /* Find a valid tlb entry that matches the virtual address. */ in hppa_get_physical_address()
242 /* Map TLB access_rights field to QEMU protection. */ in hppa_get_physical_address()
297 * architectural TLB entry for the next access. in hppa_get_physical_address()
309 /* The D bit is not set -- TLB Dirty Bit Fault. */ in hppa_get_physical_address()
467 * Success! Store the translation into the QEMU TLB. in hppa_cpu_tlb_fill_align()
471 * because we record the large page here in the hppa tlb. in hppa_cpu_tlb_fill_align()
482 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
522 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
594 /* Purge (Insn/Data) TLB. */
648 /* Zap the (non-btlb) tlb entries themselves. */ in hppa_ptlbe()
649 memset(&env->tlb[btlb_entries], 0, in hppa_ptlbe()
650 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); in hppa_ptlbe()
655 env->tlb_unused = &env->tlb[btlb_entries]; in hppa_ptlbe()
656 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { in hppa_ptlbe()
657 env->tlb[i].unused_next = &env->tlb[i + 1]; in hppa_ptlbe()
663 if (env->tlb[i].entry_valid) { in hppa_ptlbe()
664 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); in hppa_ptlbe()
671 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
676 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); in HELPER()
710 * allow operating systems to modify the Block TLB (BTLB) entries.
762 btlb = &env->tlb[slot]; in HELPER()
784 btlb = &env->tlb[slot]; in HELPER()
794 btlb = &env->tlb[slot]; in HELPER()