/openbmc/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_types.h | 110 /* Serialize global tlb invalidations */ 114 * Batch TLB invalidations 119 * so we track how many TLB invalidations have been
|
H A D | intel_tlb.c | 105 * invalidations so it is good to avoid paying the forcewake cost and in mmio_invalidate_full()
|
/openbmc/linux/Documentation/arch/x86/ |
H A D | tlb.rst | 41 You may be doing too many individual invalidations if you see the 43 profiles. If you believe that individual invalidations being
|
/openbmc/linux/arch/powerpc/include/asm/ |
H A D | mmu_context.h | 138 * in order to force TLB invalidations to be global as to in mm_context_add_copro() 162 * for the time being. Invalidations will remain global if in mm_context_remove_copro() 164 * it could make some invalidations local with no flush in mm_context_remove_copro()
|
H A D | mmu.h | 77 /* Enable use of broadcast TLB invalidations. We don't always set it 79 * use of such invalidations
|
/openbmc/linux/arch/sh/mm/ |
H A D | cache-shx3.c | 38 * Broadcast I-cache block invalidations by default. in shx3_cache_init()
|
/openbmc/linux/arch/openrisc/include/asm/ |
H A D | cacheflush.h | 29 * invalidations need to be broadcasted to all other cpu in the system in
|
/openbmc/linux/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/ |
H A D | l2_cache.json | 12 …h return data even if the snoops cause an invalidation. L2 cache line invalidations which do not w…
|
H A D | l1d_cache.json | 12 …nce operations. The following cache operations are not counted:\n\n1. Invalidations which do not r…
|
/openbmc/linux/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ |
H A D | l2_cache.json | 12 …h return data even if the snoops cause an invalidation. L2 cache line invalidations which do not w…
|
H A D | l1d_cache.json | 12 …nce operations. The following cache operations are not counted:\n\n1. Invalidations which do not r…
|
/openbmc/linux/include/uapi/rdma/ |
H A D | rdma_user_ioctl.h | 80 /* read TID cache invalidations */
|
/openbmc/linux/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_clflush.c | 100 * flushed/invalidated. As we always have to emit invalidations in i915_gem_clflush_object()
|
/openbmc/qemu/include/hw/i386/ |
H A D | intel_iommu.h | 124 * Invalidations) or DSI (Domain Selective Invalidations) event is an
|
H A D | x86-iommu.h | 148 * x86_iommu_iec_notify_all - Notify IEC invalidations
|
/openbmc/linux/drivers/infiniband/hw/mlx5/ |
H A D | restrack.c | 100 atomic64_read(&mr->odp_stats.invalidations))) in fill_stat_mr_entry()
|
H A D | odp.c | 235 u64 invalidations = 0; in mlx5_ib_invalidate_range() local 260 * overwrite the same MTTs. Concurent invalidations might race us, in mlx5_ib_invalidate_range() 278 /* Count page invalidations */ in mlx5_ib_invalidate_range() 279 invalidations += idx - blk_start_idx + 1; in mlx5_ib_invalidate_range() 298 mlx5_update_odp_stats(mr, invalidations, invalidations); in mlx5_ib_invalidate_range()
|
/openbmc/linux/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/ |
H A D | cache.json | 39 … which return data, regardless of whether they cause an invalidation. Invalidations from the L2 wh…
|
/openbmc/linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
H A D | mmu.h | 137 struct mutex mutex; /* serialises mmu invalidations */
|
/openbmc/linux/fs/xfs/scrub/ |
H A D | reap.c | 162 * of buffer invalidations to 2048. 389 * buffer invalidations, so we need to return early so that we can in xreap_agextent_iter()
|
/openbmc/linux/arch/mips/kernel/ |
H A D | vdso.c | 141 * will observe it without requiring cache invalidations. in arch_setup_additional_pages()
|
/openbmc/linux/Documentation/devicetree/bindings/arm/ |
H A D | arm,cci-400.yaml | 15 and manage coherency, TLB invalidations and memory barriers.
|
/openbmc/linux/include/linux/ |
H A D | migrate.h | 207 * callbacks to avoid device MMU invalidations for device private
|
/openbmc/linux/drivers/misc/cxl/ |
H A D | cxllib.c | 123 * However, we'll turn the invalidations off, so that in cxllib_switch_phb_mode()
|
/openbmc/linux/Documentation/mm/ |
H A D | hmm.rst | 116 specific commands in it to perform the update (unmap, cache invalidations, and 357 handles CPU page table invalidations so the device driver only has to
|