Lines Matching full:tlb

2  *  Common CPU TLB handling
34 #include "exec/tlb-common.h"
40 #include "exec/tlb-flags.h"
47 #include "tlb-bounds.h"
128 /* Find the TLB index corresponding to the mmu_idx + address pair. */
132 uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; in tlb_index()
137 /* Find the TLB entry corresponding to the mmu_idx + address pair. */
141 return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)]; in tlb_entry()
167 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
168 * @desc: The CPUTLBDesc portion of the TLB
169 * @fast: The CPUTLBDescFast portion of the same TLB
173 * We have two main constraints when resizing a TLB: (1) we only resize it
174 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
181 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
182 * we just have to make the TLB as large as possible; while an oversized TLB
183 * results in minimal TLB miss rates, it also takes longer to be flushed
189 * 1. Aggressively increase the size of the TLB when the use rate of the
190 * TLB being flushed is high, since it is likely that in the near future this
194 * 2. Slowly reduce the size of the TLB as the use rate declines over a
196 * we have not observed a high TLB use rate, it is likely that we won't observe
198 * the TLB to match the maximum use rate observed in the window.
201 * since in that range performance is likely near-optimal. Recall that the TLB
264 * size, aborting if we cannot even allocate the smallest TLB we support. in tlb_mmu_resize_locked()
294 CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; in tlb_flush_one_mmuidx_locked()
295 CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; in tlb_flush_one_mmuidx_locked()
315 cpu->neg.tlb.d[mmu_idx].n_used_entries++; in tlb_n_used_entries_inc()
320 cpu->neg.tlb.d[mmu_idx].n_used_entries--; in tlb_n_used_entries_dec()
328 qemu_spin_init(&cpu->neg.tlb.c.lock); in tlb_init()
331 cpu->neg.tlb.c.dirty = 0; in tlb_init()
334 tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); in tlb_init()
342 qemu_spin_destroy(&cpu->neg.tlb.c.lock); in tlb_destroy()
344 CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; in tlb_destroy()
345 CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; in tlb_destroy()
381 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_by_mmuidx_async_work()
383 all_dirty = cpu->neg.tlb.c.dirty; in tlb_flush_by_mmuidx_async_work()
386 cpu->neg.tlb.c.dirty = all_dirty; in tlb_flush_by_mmuidx_async_work()
393 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_by_mmuidx_async_work()
398 qatomic_set(&cpu->neg.tlb.c.full_flush_count, in tlb_flush_by_mmuidx_async_work()
399 cpu->neg.tlb.c.full_flush_count + 1); in tlb_flush_by_mmuidx_async_work()
401 qatomic_set(&cpu->neg.tlb.c.part_flush_count, in tlb_flush_by_mmuidx_async_work()
402 cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean)); in tlb_flush_by_mmuidx_async_work()
404 qatomic_set(&cpu->neg.tlb.c.elide_flush_count, in tlb_flush_by_mmuidx_async_work()
405 cpu->neg.tlb.c.elide_flush_count + in tlb_flush_by_mmuidx_async_work()
487 CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx]; in tlb_flush_vtlb_page_mask_locked()
506 vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr; in tlb_flush_page_locked()
507 vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask; in tlb_flush_page_locked()
542 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_page_by_mmuidx_async_0()
548 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_page_by_mmuidx_async_0()
669 CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; in tlb_flush_range_locked()
670 CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; in tlb_flush_range_locked()
674 * If @bits is smaller than the tlb size, there may be multiple entries in tlb_flush_range_locked()
675 * within the TLB; otherwise all addresses that match under @mask hit in tlb_flush_range_locked()
676 * the same TLB entry. in tlb_flush_range_locked()
678 * For now, just flush the entire TLB. in tlb_flush_range_locked()
680 * If @len is larger than the tlb size, then it will take longer to in tlb_flush_range_locked()
681 * test all of the entries in the TLB than it will to flush it all. in tlb_flush_range_locked()
732 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_range_by_mmuidx_async_0()
738 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_range_by_mmuidx_async_0()
866 /* update the TLB so that writes in physical page 'phys_addr' are no longer
878 * the TLB and uses that data to compute the final address. If any of
907 * Called only from the vCPU context, i.e. the TLB's owner thread.
917 * thing actually updated is the target TLB entry ->addr_write flags.
923 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_reset_dirty()
925 CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; in tlb_reset_dirty()
926 CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; in tlb_reset_dirty()
940 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_reset_dirty()
952 /* update the TLB corresponding to virtual page vaddr
961 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_set_dirty()
969 tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr); in tlb_set_dirty()
972 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_set_dirty()
975 /* Our TLB does not support large pages, so remember the area covered by
976 large pages and trigger a full TLB flush if these are invalidated. */
980 vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr; in tlb_add_large_page()
989 the cost of maintaining a full variable size TLB. */ in tlb_add_large_page()
990 lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask; in tlb_add_large_page()
995 cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask; in tlb_add_large_page()
996 cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask; in tlb_add_large_page()
1018 * Add a new TLB entry. At most one entry for a given virtual address
1028 CPUTLB *tlb = &cpu->neg.tlb; in tlb_set_page_full() local
1029 CPUTLBDesc *desc = &tlb->d[mmu_idx]; in tlb_set_page_full()
1062 /* Repeat the MMU check and TLB fill on every access. */ in tlb_set_page_full()
1113 * Hold the TLB lock for the rest of the function. We could acquire/release in tlb_set_page_full()
1116 * a longer critical section, but this is not a concern since the TLB lock in tlb_set_page_full()
1119 qemu_spin_lock(&tlb->c.lock); in tlb_set_page_full()
1121 /* Note that the tlb is no longer clean. */ in tlb_set_page_full()
1122 tlb->c.dirty |= 1 << mmu_idx; in tlb_set_page_full()
1128 * Only evict the old entry to the victim tlb if it's for a in tlb_set_page_full()
1135 /* Evict the old entry into the victim tlb. */ in tlb_set_page_full()
1141 /* refill the tlb */ in tlb_set_page_full()
1184 qemu_spin_unlock(&tlb->c.lock); in tlb_set_page_full()
1212 * TLB entry @tlb_addr
1215 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
1223 * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
1226 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
1234 * Note: tlb_fill_align() can trigger a resize of the TLB.
1235 * This means that all of the caller's prior references to the TLB table
1305 /* Return true if ADDR is present in the victim tlb, and has been copied
1306 back to the main tlb. */
1314 CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx]; in victim_tlb_hit()
1318 /* Found entry in victim tlb, swap tlb and iotlb. */ in victim_tlb_hit()
1319 CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; in victim_tlb_hit() local
1321 qemu_spin_lock(&cpu->neg.tlb.c.lock); in victim_tlb_hit()
1322 copy_tlb_helper_locked(&tmptlb, tlb); in victim_tlb_hit()
1323 copy_tlb_helper_locked(tlb, vtlb); in victim_tlb_hit()
1325 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in victim_tlb_hit()
1327 CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in victim_tlb_hit()
1328 CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx]; in victim_tlb_hit()
1385 /* TLB resize via tlb_fill_align may have moved the entry. */ in probe_access_internal()
1400 *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in probe_access_internal()
1569 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1575 * address space. Those changes, and the corresponding tlb flush,
1592 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in tlb_plugin_lookup()
1640 * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
1654 /* If the TLB entry is for a different page, reload and try again. */ in mmu_lookup1()
1667 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in mmu_lookup1()
1786 l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; in mmu_lookup()
1830 /* Check TLB entry and enforce page permissions. */ in atomic_mmu_lookup()
1877 /* Finish collecting tlb flags for both read and write. */ in atomic_mmu_lookup()
1878 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in atomic_mmu_lookup()