/openbmc/qemu/hw/virtio/ |
H A D | virtio.c | 112 VRingMemoryRegionCaches *caches; member 209 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches) in virtio_free_region_cache() argument 211 assert(caches != NULL); in virtio_free_region_cache() 212 address_space_cache_destroy(&caches->desc); in virtio_free_region_cache() 213 address_space_cache_destroy(&caches->avail); in virtio_free_region_cache() 214 address_space_cache_destroy(&caches->used); in virtio_free_region_cache() 215 g_free(caches); in virtio_free_region_cache() 220 VRingMemoryRegionCaches *caches; in virtio_virtqueue_reset_region_cache() local 222 caches = qatomic_read(&vq->vring.caches); in virtio_virtqueue_reset_region_cache() 223 qatomic_rcu_set(&vq->vring.caches, NULL); in virtio_virtqueue_reset_region_cache() [all …]
|
/openbmc/linux/tools/cgroup/ |
H A D | memcg_slabinfo.py | 184 caches = {} 203 caches[addr] = cache 215 for addr in caches: 217 cache_show(caches[addr], cfg, stats[addr])
|
/openbmc/linux/kernel/bpf/ |
H A D | memalloc.c | 558 ma->caches = pcc; in bpf_mem_alloc_init() 605 if (ma->caches) { in check_leaked_objs() 607 cc = per_cpu_ptr(ma->caches, cpu); in check_leaked_objs() 620 free_percpu(ma->caches); in free_mem_alloc_no_barrier() 622 ma->caches = NULL; in free_mem_alloc_no_barrier() 698 if (ma->caches) { in bpf_mem_alloc_destroy() 701 cc = per_cpu_ptr(ma->caches, cpu); in bpf_mem_alloc_destroy() 829 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); in bpf_mem_alloc() 846 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); in bpf_mem_free() 862 unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr); in bpf_mem_free_rcu()
|
/openbmc/linux/Documentation/block/ |
H A D | writeback_cache_control.rst | 9 write back caches. That means the devices signal I/O completion to the 60 devices with volatile caches need to implement the support for these 67 For devices that do not support volatile write caches there is no driver 70 requests that have a payload. For devices with volatile write caches the 71 driver needs to tell the block layer that it supports flushing caches by
|
/openbmc/linux/Documentation/filesystems/ |
H A D | 9p.rst | 80 cache=mode specifies a caching policy. By default, no caches are used. 86 0b00000000 all caches disabled, mmap disabled 87 0b00000001 file caches enabled 88 0b00000010 meta-data caches enabled 90 0b00001000 loose caches (no explicit consistency with server) 100 loose 0b00001111 (non-coherent file and meta-data caches) 108 IMPORTANT: loose caches (and by extension at the moment fscache) 184 /sys/fs/9p/caches. (applies only to cache=fscache)
|
/openbmc/linux/arch/arm/boot/compressed/ |
H A D | head-xscale.S | 28 mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches 30 @ disabling MMU and caches
|
H A D | head-sa1100.S | 38 mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches 40 @ disabling MMU and caches
|
/openbmc/linux/arch/arm/mm/ |
H A D | proc-arm720.S | 46 mcr p15, 0, r0, c1, c0, 0 @ disable caches 108 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches 136 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches
|
H A D | proc-sa110.S | 49 mcr p15, 0, r0, c1, c0, 0 @ disable caches 65 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 162 mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
|
H A D | proc-fa526.S | 39 mcr p15, 0, r0, c1, c0, 0 @ disable caches 58 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 137 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
|
H A D | proc-arm926.S | 53 mcr p15, 0, r0, c1, c0, 0 @ disable caches 69 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 404 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches 417 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 425 mov r0, #4 @ disable write-back on caches explicitly
|
H A D | proc-mohawk.S | 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 62 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 359 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 378 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches
|
H A D | proc-arm920.S | 61 mcr p15, 0, r0, c1, c0, 0 @ disable caches 77 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 389 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches 402 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
|
H A D | proc-sa1100.S | 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 73 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 201 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
|
H A D | proc-arm925.S | 84 mcr p15, 0, r0, c1, c0, 0 @ disable caches 109 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 436 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 443 mov r0, #4 @ disable write-back on caches explicitly
|
H A D | proc-arm740.S | 40 mcr p15, 0, r0, c1, c0, 0 @ disable caches 62 mcr p15, 0, r0, c7, c0, 0 @ invalidate caches
|
/openbmc/openbmc/poky/meta/classes-recipe/ |
H A D | manpages.bbclass | 22 # only update manual page index caches when manual files are built and installed 37 # only update manual page index caches when manual files are built and installed
|
/openbmc/u-boot/doc/ |
H A D | README.mips | 32 or override do_bootelf_exec() not to disable I-/D-caches, because most 33 Linux/MIPS ports don't re-enable caches after entering kernel_entry.
|
/openbmc/linux/tools/perf/ |
H A D | builtin-stat.c | 1363 struct cpu_cache_level caches[MAX_CACHE_LVL]; in cpu__get_cache_details() local 1369 ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt); in cpu__get_cache_details() 1393 if (caches[i].level > caches[max_level_index].level) in cpu__get_cache_details() 1397 cache->cache_lvl = caches[max_level_index].level; in cpu__get_cache_details() 1398 cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map); in cpu__get_cache_details() 1406 if (caches[i].level == cache_level) { in cpu__get_cache_details() 1408 cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); in cpu__get_cache_details() 1411 cpu_cache_level__free(&caches[i]); in cpu__get_cache_details() 1419 cpu_cache_level__free(&caches[i++]); in cpu__get_cache_details() 1712 struct cpu_cache_level *caches = env->caches; in perf_env__get_cache_id_for_cpu() local [all …]
|
/openbmc/qemu/contrib/plugins/ |
H A D | cache.c | 287 Cache **caches; in caches_init() local 294 caches = g_new(Cache *, cores); in caches_init() 297 caches[i] = cache_init(blksize, assoc, cachesize); in caches_init() 300 return caches; in caches_init() 529 static void caches_free(Cache **caches) in caches_free() argument 534 cache_free(caches[i]); in caches_free()
|
/openbmc/linux/arch/openrisc/ |
H A D | Kconfig | 84 bool "Have write through data caches" 87 Select this if your implementation features write through data caches. 89 caches at relevant times. Most OpenRISC implementations support write- 90 through data caches.
|
/openbmc/qemu/docs/ |
H A D | qcow2-cache.txt | 11 The QEMU qcow2 driver has two caches that can improve the I/O 16 caches, and how to configure them. 83 caches (in bytes) is: 121 "cache-size": maximum size of both caches combined 125 - Both caches must have a size that is a multiple of the cluster size 155 L2 cache size. This resulted in unnecessarily large caches, so now the
|
/openbmc/linux/drivers/acpi/numa/ |
H A D | hmat.c | 66 struct list_head caches; member 141 INIT_LIST_HEAD(&target->caches); in alloc_memory_target() 414 list_add_tail(&tcache->node, &target->caches); in hmat_parse_cache() 697 list_for_each_entry(tcache, &target->caches, node) in hmat_register_target_cache() 793 list_for_each_entry_safe(tcache, cnext, &target->caches, node) { in hmat_free_structures()
|
/openbmc/qemu/qapi/ |
H A D | machine-common.json | 107 # @caches: the list of SmpCacheProperties. 112 'data': { 'caches': ['SmpCacheProperties'] } }
|
/openbmc/linux/include/linux/ |
H A D | bpf_mem_alloc.h | 12 struct bpf_mem_caches __percpu *caches; member
|