/openbmc/linux/tools/bpf/bpftool/ |
H A D | pids.c | 34 struct obj_refs *refs; in add_ref() local 40 refs = entry->pvalue; in add_ref() 42 for (i = 0; i < refs->ref_cnt; i++) { in add_ref() 43 if (refs->refs[i].pid == e->pid) in add_ref() 47 tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref)); in add_ref() 53 refs->refs = tmp; in add_ref() 54 ref = &refs->refs[refs->ref_cnt]; in add_ref() 57 refs->ref_cnt++; in add_ref() 63 refs = calloc(1, sizeof(*refs)); in add_ref() 64 if (!refs) { in add_ref() [all …]
|
/openbmc/linux/include/linux/ |
H A D | freelist.h | 19 atomic_t refs; member 50 atomic_set_release(&node->refs, 1); in __freelist_add() 57 if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1) in __freelist_add() 70 if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) { in freelist_add() 82 unsigned int refs; in freelist_try_get() local 86 refs = atomic_read(&head->refs); in freelist_try_get() 87 if ((refs & REFS_MASK) == 0 || in freelist_try_get() 88 !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) { in freelist_try_get() 106 WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST); in freelist_try_get() 112 atomic_fetch_add(-2, &head->refs); in freelist_try_get() [all …]
|
H A D | refcount.h | 112 atomic_t refs; member 115 #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } 136 atomic_set(&r->refs, n); in refcount_set() 147 return atomic_read(&r->refs); in refcount_read() 157 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); in __refcount_add_not_zero() 193 int old = atomic_fetch_add_relaxed(i, &r->refs); in __refcount_add() 272 int old = atomic_fetch_sub_release(i, &r->refs); in __refcount_sub_and_test() 338 int old = atomic_fetch_sub_release(1, &r->refs); in __refcount_dec()
|
/openbmc/linux/drivers/media/v4l2-core/ |
H A D | v4l2-h264.c | 60 b->refs[i].longterm = true; in v4l2_h264_init_reflist_builder() 68 if (!b->refs[i].longterm && dpb[i].frame_num > cur_frame_num) in v4l2_h264_init_reflist_builder() 69 b->refs[i].frame_num = (int)dpb[i].frame_num - in v4l2_h264_init_reflist_builder() 72 b->refs[i].frame_num = dpb[i].frame_num; in v4l2_h264_init_reflist_builder() 74 b->refs[i].top_field_order_cnt = dpb[i].top_field_order_cnt; in v4l2_h264_init_reflist_builder() 75 b->refs[i].bottom_field_order_cnt = dpb[i].bottom_field_order_cnt; in v4l2_h264_init_reflist_builder() 113 return min(b->refs[ref->index].top_field_order_cnt, in v4l2_h264_get_poc() 114 b->refs[ref->index].bottom_field_order_cnt); in v4l2_h264_get_poc() 116 return b->refs[ref->index].top_field_order_cnt; in v4l2_h264_get_poc() 118 return b->refs[ref->index].bottom_field_order_cnt; in v4l2_h264_get_poc() [all …]
|
/openbmc/openbmc/poky/bitbake/bin/ |
H A D | git-make-shallow | 48 make_shallow(shallow_file, args.revisions, args.refs) 50 ref_revs = check_output(git_cmd + ['rev-list'] + args.refs).splitlines() 56 filter_refs(args.refs) 76 if args.refs: 77 … args.refs = check_output(git_cmd + ['rev-parse', '--symbolic-full-name'] + args.refs).splitlines() 79 args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit') 81 args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs)) 90 def make_shallow(shallow_file, revisions, refs): argument 92 for rev in follow_history_intersections(revisions, refs): 104 refs = [r[0] for r in ref_split] [all …]
|
/openbmc/linux/tools/include/linux/ |
H A D | refcount.h | 53 atomic_t refs; member 56 #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } 60 atomic_set(&r->refs, n); in refcount_set() 65 return atomic_read(&r->refs); in refcount_read() 78 unsigned int old, new, val = atomic_read(&r->refs); in refcount_inc_not_zero() 89 old = atomic_cmpxchg_relaxed(&r->refs, val, new); in refcount_inc_not_zero() 123 unsigned int old, new, val = atomic_read(&r->refs); in refcount_sub_and_test() 135 old = atomic_cmpxchg_release(&r->refs, val, new); in refcount_sub_and_test()
|
/openbmc/linux/drivers/xen/ |
H A D | gntdev-dmabuf.c | 44 grant_ref_t *refs; member 437 int count, u32 domid, u32 *refs, u32 *fd) in dmabuf_exp_from_refs() argument 449 map->grants[i].ref = refs[i]; in dmabuf_exp_from_refs() 488 dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs, in dmabuf_imp_grant_foreign_access() argument 512 refs[i] = cur_ref; in dmabuf_imp_grant_foreign_access() 522 static void dmabuf_imp_end_foreign_access(u32 *refs, int count) in dmabuf_imp_end_foreign_access() argument 527 if (refs[i] != INVALID_GRANT_REF) in dmabuf_imp_end_foreign_access() 528 gnttab_end_foreign_access(refs[i], NULL); in dmabuf_imp_end_foreign_access() 533 kfree(gntdev_dmabuf->u.imp.refs); in dmabuf_imp_free_storage() 546 gntdev_dmabuf->u.imp.refs = kcalloc(count, in dmabuf_imp_alloc_storage() [all …]
|
/openbmc/linux/io_uring/ |
H A D | refs.h | 12 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u) 17 return atomic_inc_not_zero(&req->refs); in req_ref_inc_not_zero() 26 return atomic_dec_and_test(&req->refs); in req_ref_put_and_test() 33 atomic_inc(&req->refs); in req_ref_get() 40 atomic_set(&req->refs, nr); in __io_req_set_refcount()
|
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | event.c | 34 if (--event->refs[index * event->types_nr + type] == 0) { in nvkm_event_put() 51 if (++event->refs[index * event->types_nr + type] == 1) { in nvkm_event_get() 175 if (!event->refs || WARN_ON(id >= event->index_nr)) in nvkm_event_ntfy() 194 if (event->refs) { in nvkm_event_fini() 195 kfree(event->refs); in nvkm_event_fini() 196 event->refs = NULL; in nvkm_event_fini() 204 event->refs = kzalloc(array3_size(index_nr, types_nr, sizeof(*event->refs)), GFP_KERNEL); in __nvkm_event_init() 205 if (!event->refs) in __nvkm_event_init()
|
/openbmc/linux/fs/smb/client/ |
H A D | dfs_cache.c | 250 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs) in dump_refs() argument 256 const struct dfs_info3_param *ref = &refs[i]; in dump_refs() 382 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, in copy_ref_data() argument 388 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL); in copy_ref_data() 390 ce->srvtype = refs[0].server_type; in copy_ref_data() 391 ce->hdr_flags = refs[0].flags; in copy_ref_data() 392 ce->ref_flags = refs[0].ref_flag; in copy_ref_data() 393 ce->path_consumed = refs[0].path_consumed; in copy_ref_data() 398 t = alloc_target(refs[i].node_name, refs[i].path_consumed); in copy_ref_data() 420 static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs) in alloc_cache_entry() argument [all …]
|
H A D | dfs.h | 28 struct dfs_ref refs[MAX_NESTED_LINKS]; member 31 #define ref_walk_start(w) ((w)->refs) 32 #define ref_walk_end(w) (&(w)->refs[ARRAY_SIZE((w)->refs) - 1])
|
/openbmc/linux/mm/ |
H A D | gup.c | 71 static inline struct folio *try_get_folio(struct page *page, int refs) in try_get_folio() argument 79 if (unlikely(!folio_ref_try_add(folio, refs))) in try_get_folio() 92 if (!put_devmap_managed_page_refs(&folio->page, refs)) in try_get_folio() 93 folio_put_refs(folio, refs); in try_get_folio() 100 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) in gup_put_folio() argument 105 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); in gup_put_folio() 107 atomic_sub(refs, &folio->_pincount); in gup_put_folio() 109 refs *= GUP_PIN_COUNTING_BIAS; in gup_put_folio() 112 if (!put_devmap_managed_page_refs(&folio->page, refs)) in gup_put_folio() 113 folio_put_refs(folio, refs); in gup_put_folio() [all …]
|
H A D | workingset.c | 240 int refs = folio_lru_refs(folio); in lru_gen_eviction() local 241 int tier = lru_tier_from_refs(refs); in lru_gen_eviction() 250 token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0); in lru_gen_eviction() 255 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); in lru_gen_eviction() 282 int hist, tier, refs; in lru_gen_refault() local 305 refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset; in lru_gen_refault() 306 tier = lru_tier_from_refs(refs); in lru_gen_refault() 318 if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) { in lru_gen_refault()
|
/openbmc/qemu/hw/i386/kvm/ |
H A D | xen_gnttab.c | 328 uint32_t *refs, int prot) in xen_be_gnttab_map_refs() argument 363 act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0])); in xen_be_gnttab_map_refs() 366 if (gnt_ref(s, refs[0], prot) == INVALID_GPA) { in xen_be_gnttab_map_refs() 373 uint64_t gpa = gnt_ref(s, refs[0], prot); in xen_be_gnttab_map_refs() 391 gnt_unref(s, refs[0], &act->mrs, 0); in xen_be_gnttab_map_refs() 397 s->map_track[refs[0]]++; in xen_be_gnttab_map_refs() 398 g_hash_table_insert(xgt->active_maps, GINT_TO_POINTER(refs[0]), act); in xen_be_gnttab_map_refs() 416 void *start_address, uint32_t *refs, in xen_be_gnttab_unmap() argument 432 act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0])); in xen_be_gnttab_unmap() 442 do_unmap(GINT_TO_POINTER(refs[0]), act, s); in xen_be_gnttab_unmap() [all …]
|
/openbmc/linux/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3-sva.c | 20 refcount_t refs; member 32 refcount_t refs; member 61 refcount_inc(&cd->refs); in arm_smmu_share_asid() 115 refcount_set(&cd->refs, 1); in arm_smmu_alloc_shared_cd() 281 refcount_inc(&smmu_mn->refs); in arm_smmu_mmu_notifier_get() 296 refcount_set(&smmu_mn->refs, 1); in arm_smmu_mmu_notifier_get() 328 if (!refcount_dec_and_test(&smmu_mn->refs)) in arm_smmu_mmu_notifier_put() 363 refcount_inc(&bond->refs); in __arm_smmu_sva_bind() 374 refcount_set(&bond->refs, 1); in __arm_smmu_sva_bind() 553 if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) { in arm_smmu_sva_remove_dev_pasid()
|
/openbmc/linux/drivers/thermal/ |
H A D | k3_j72xx_bandgap.c | 78 int refs[4]; member 90 idx1 = err_vals->refs[seg]; in create_table_segments() 92 idx2 = err_vals->refs[seg + 1]; in create_table_segments() 95 ref1 = err_vals->refs[seg]; in create_table_segments() 96 ref2 = err_vals->refs[seg + 1]; in create_table_segments() 461 err_vals.refs[0] = MINUS40CREF; in k3_j72xx_bandgap_probe() 462 err_vals.refs[1] = PLUS30CREF; in k3_j72xx_bandgap_probe() 463 err_vals.refs[2] = PLUS125CREF; in k3_j72xx_bandgap_probe() 464 err_vals.refs[3] = PLUS150CREF; in k3_j72xx_bandgap_probe()
|
/openbmc/linux/fs/btrfs/ |
H A D | delayed-inode.c | 52 refcount_set(&delayed_node->refs, 0); in btrfs_init_delayed_node() 69 refcount_inc(&node->refs); in btrfs_get_delayed_node() 78 refcount_inc(&node->refs); /* can be accessed */ in btrfs_get_delayed_node() 100 if (refcount_inc_not_zero(&node->refs)) { in btrfs_get_delayed_node() 101 refcount_inc(&node->refs); in btrfs_get_delayed_node() 135 refcount_set(&node->refs, 2); in btrfs_get_or_create_delayed_node() 176 refcount_inc(&node->refs); /* inserted into list */ in btrfs_queue_delayed_node() 190 refcount_dec(&node->refs); /* not in the list */ in btrfs_dequeue_delayed_node() 211 refcount_inc(&node->refs); in btrfs_first_delayed_node() 238 refcount_inc(&next->refs); in btrfs_next_delayed_node() [all …]
|
H A D | delayed-ref.h | 36 refcount_t refs; member 82 refcount_t refs; member 341 WARN_ON(refcount_read(&ref->refs) == 0); in btrfs_put_delayed_ref() 342 if (refcount_dec_and_test(&ref->refs)) { in btrfs_put_delayed_ref() 371 if (refcount_dec_and_test(&head->refs)) in btrfs_put_delayed_ref_head()
|
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
H A D | cgrp.c | 39 if (refcount_dec_and_test(&ectx->refs)) { in nvkm_cgrp_ectx_put() 65 refcount_inc(&ectx->refs); in nvkm_cgrp_ectx_get() 76 refcount_set(&ectx->refs, 1); in nvkm_cgrp_ectx_get() 100 if (refcount_dec_and_test(&vctx->refs)) { in nvkm_cgrp_vctx_put() 130 refcount_inc(&vctx->refs); in nvkm_cgrp_vctx_get() 151 refcount_set(&vctx->refs, 1); in nvkm_cgrp_vctx_get()
|
/openbmc/linux/lib/ |
H A D | refcount.c | 59 return atomic_try_cmpxchg_release(&r->refs, &val, 0); in refcount_dec_if_one() 76 unsigned int new, val = atomic_read(&r->refs); in refcount_dec_not_one() 91 } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); in refcount_dec_not_one()
|
/openbmc/openbmc/poky/bitbake/lib/bb/ |
H A D | codeparser.py | 123 def __init__(self, refs, execs, contains, extra): argument 124 self.refs = codecache.internSet(refs) 132 return (self.refs, self.execs, self.contains, self.extra) 135 (refs, execs, contains, extra) = state 136 self.__init__(refs, execs, contains, extra) 138 l = (hash(self.refs), hash(self.execs), hash(self.extra)) 143 return " ".join([str(self.refs), str(self.execs), str(self.contains)]) 181 def newPythonCacheLine(self, refs, execs, contains, extra): argument 182 cacheline = pythonCacheLine(refs, execs, contains, extra) 353 self.references = set(codeparsercache.pythoncache[h].refs) [all …]
|
/openbmc/webui-vue/src/views/Operations/Kvm/ |
H A D | KvmConsole.vue | 116 this.$refs.panel, 143 this.$refs.panel.children && 144 this.$refs.panel.children.length > 0 && 145 this.$refs.panel.children[0].children.length > 0 147 this.$refs.toolbar.style.width = 148 this.$refs.panel.children[0].children[0].clientWidth - 10 + 'px';
|
/openbmc/linux/include/uapi/xen/ |
H A D | gntdev.h | 70 struct ioctl_gntdev_grant_ref refs[1]; member 256 __u32 refs[1]; member 298 __u32 refs[1]; member
|
/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | map_kptr.c | 308 if (p_st->cnt.refs.counter != ref) { in test_map_kptr_ref_pre() 318 if (p_st->cnt.refs.counter != ref) in test_map_kptr_ref_pre() 326 if (p_st->cnt.refs.counter != ref) in test_map_kptr_ref_pre() 338 if (p_st->cnt.refs.counter != ref) in test_map_kptr_ref_pre() 355 if (!p_st || p_st->cnt.refs.counter != ref) in test_map_kptr_ref_post() 361 if (p_st->cnt.refs.counter != ref) { in test_map_kptr_ref_post() 371 if (p_st->cnt.refs.counter != ref) in test_map_kptr_ref_post() 471 if (p->cnt.refs.counter != ref) { in test_map_kptr_ref3()
|
/openbmc/qemu/include/hw/xen/ |
H A D | xen_backend_ops.h | 160 uint32_t *refs, int prot); 161 int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs, 213 uint32_t *refs, int prot) in qemu_xen_gnttab_map_refs() argument 218 return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot); in qemu_xen_gnttab_map_refs() 222 void *start_address, uint32_t *refs, in qemu_xen_gnttab_unmap() argument 228 return xen_gnttab_ops->unmap(xgt, start_address, refs, count); in qemu_xen_gnttab_unmap()
|