Home
last modified time | relevance | path

Searched refs:buckets (Results 1 – 25 of 116) sorted by relevance

12345

/openbmc/qemu/util/
H A Dthrottle.c73 throttle_leak_bucket(&ts->cfg.buckets[i], delta_ns); in throttle_do_leak()
159 wait = throttle_compute_wait(&ts->cfg.buckets[index]); in throttle_compute_wait_for()
223 cfg->buckets[i].burst_length = 1; in throttle_config_init()
303 if (cfg->buckets[i].avg > 0) { in throttle_enabled()
322 bps_flag = cfg->buckets[THROTTLE_BPS_TOTAL].avg && in throttle_is_valid()
323 (cfg->buckets[THROTTLE_BPS_READ].avg || in throttle_is_valid()
324 cfg->buckets[THROTTLE_BPS_WRITE].avg); in throttle_is_valid()
326 ops_flag = cfg->buckets[THROTTLE_OPS_TOTAL].avg && in throttle_is_valid()
327 (cfg->buckets[THROTTLE_OPS_READ].avg || in throttle_is_valid()
328 cfg->buckets[THROTTLE_OPS_WRITE].avg); in throttle_is_valid()
[all …]
H A Dqht.c185 struct qht_bucket *buckets; member
233 qht_bucket_debug__locked(&map->buckets[i]); in qht_map_debug__all_locked()
262 unsigned long bucket_idx = b - map->buckets; in qht_do_if_first_in_stripe()
278 unsigned long bucket_idx = b - map->buckets; in qht_bucket_lock_do()
308 return &map->buckets[hash & (map->n_buckets - 1)]; in qht_map_to_bucket()
317 struct qht_bucket *b = &map->buckets[i]; in qht_map_lock_buckets()
328 struct qht_bucket *b = &map->buckets[i]; in qht_map_unlock_buckets()
434 qht_chain_destroy(map, &map->buckets[i]); in qht_map_destroy()
436 qemu_vfree(map->buckets); in qht_map_destroy()
457 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN, in qht_map_create()
[all …]
/openbmc/qemu/tests/unit/
H A Dtest-throttle.c43 bkt = cfg.buckets[THROTTLE_BPS_TOTAL]; in test_leak_bucket()
92 bkt = cfg.buckets[THROTTLE_BPS_TOTAL]; in test_compute_wait()
179 g_assert(!ts.cfg.buckets[i].avg); in test_init()
180 g_assert(!ts.cfg.buckets[i].max); in test_init()
181 g_assert(!ts.cfg.buckets[i].level); in test_init()
211 g_assert(!ts.cfg.buckets[i].avg); in test_init_readonly()
212 g_assert(!ts.cfg.buckets[i].max); in test_init_readonly()
213 g_assert(!ts.cfg.buckets[i].level); in test_init_readonly()
243 g_assert(!ts.cfg.buckets[i].avg); in test_init_writeonly()
244 g_assert(!ts.cfg.buckets[i].max); in test_init_writeonly()
[all …]
/openbmc/qemu/fsdev/
H A Dqemu-fsdev-throttle.c37 fst->cfg.buckets[THROTTLE_BPS_TOTAL].avg = in fsdev_throttle_parse_opts()
39 fst->cfg.buckets[THROTTLE_BPS_READ].avg = in fsdev_throttle_parse_opts()
41 fst->cfg.buckets[THROTTLE_BPS_WRITE].avg = in fsdev_throttle_parse_opts()
43 fst->cfg.buckets[THROTTLE_OPS_TOTAL].avg = in fsdev_throttle_parse_opts()
45 fst->cfg.buckets[THROTTLE_OPS_READ].avg = in fsdev_throttle_parse_opts()
47 fst->cfg.buckets[THROTTLE_OPS_WRITE].avg = in fsdev_throttle_parse_opts()
50 fst->cfg.buckets[THROTTLE_BPS_TOTAL].max = in fsdev_throttle_parse_opts()
52 fst->cfg.buckets[THROTTLE_BPS_READ].max = in fsdev_throttle_parse_opts()
54 fst->cfg.buckets[THROTTLE_BPS_WRITE].max = in fsdev_throttle_parse_opts()
56 fst->cfg.buckets[THROTTLE_OPS_TOTAL].max = in fsdev_throttle_parse_opts()
[all …]
/openbmc/qemu/block/
H A Dqapi-sysemu.c439 cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps; in qmp_block_set_io_throttle()
440 cfg.buckets[THROTTLE_BPS_READ].avg = arg->bps_rd; in qmp_block_set_io_throttle()
441 cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr; in qmp_block_set_io_throttle()
443 cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops; in qmp_block_set_io_throttle()
444 cfg.buckets[THROTTLE_OPS_READ].avg = arg->iops_rd; in qmp_block_set_io_throttle()
445 cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr; in qmp_block_set_io_throttle()
448 cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max; in qmp_block_set_io_throttle()
451 cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max; in qmp_block_set_io_throttle()
454 cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max; in qmp_block_set_io_throttle()
457 cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max; in qmp_block_set_io_throttle()
[all …]
H A Dqapi.c97 info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg; in bdrv_block_device_info()
98 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg; in bdrv_block_device_info()
99 info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg; in bdrv_block_device_info()
101 info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg; in bdrv_block_device_info()
102 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg; in bdrv_block_device_info()
103 info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg; in bdrv_block_device_info()
105 info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max; in bdrv_block_device_info()
106 info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max; in bdrv_block_device_info()
107 info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max; in bdrv_block_device_info()
108 info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max; in bdrv_block_device_info()
[all …]
/openbmc/linux/drivers/s390/scsi/
H A Dzfcp_reqlist.h26 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; member
52 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc()
68 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty()
92 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find()
165 list_add_tail(&req->list, &rl->buckets[i]); in zfcp_reqlist_add()
182 list_splice_init(&rl->buckets[i], list); in zfcp_reqlist_move()
209 list_for_each_entry(req, &rl->buckets[i], list) in zfcp_reqlist_apply_for_all()
/openbmc/linux/Documentation/networking/
H A Dnexthop-group-resilient.rst54 continuous. With a hash table, mapping between the hash table buckets and
56 the buckets that held it are simply reassigned to other next hops::
70 choose a subset of buckets that are currently not used for forwarding
72 keeping the "busy" buckets intact. This way, established flows are ideally
80 certain number of buckets, according to its weight and the number of
81 buckets in the hash table. In accordance with the source code, we will call
86 Next hops that have fewer buckets than their wants count, are called
98 buckets:
105 underweight next hops. If, after considering all buckets in this manner,
109 There may not be enough "idle" buckets to satisfy the updated wants counts
[all …]
/openbmc/linux/tools/lib/bpf/
H A Dhashmap.h77 struct hashmap_entry **buckets; member
170 for (cur = map->buckets[bkt]; cur; cur = cur->next)
182 for (cur = map->buckets[bkt]; \
193 for (cur = map->buckets \
194 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
201 for (cur = map->buckets \
202 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
H A Dhashmap.c45 map->buckets = NULL; in hashmap__init()
71 free(map->buckets); in hashmap__clear()
72 map->buckets = NULL; in hashmap__clear()
124 free(map->buckets); in hashmap_grow()
125 map->buckets = new_buckets; in hashmap_grow()
137 if (!map->buckets) in hashmap_find_entry()
140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry()
200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
/openbmc/linux/tools/perf/util/
H A Dhashmap.h77 struct hashmap_entry **buckets; member
170 for (cur = map->buckets[bkt]; cur; cur = cur->next)
182 for (cur = map->buckets[bkt]; \
193 for (cur = map->buckets \
194 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
201 for (cur = map->buckets \
202 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
H A Dhashmap.c45 map->buckets = NULL; in hashmap__init()
71 free(map->buckets); in hashmap__clear()
72 map->buckets = NULL; in hashmap__clear()
124 free(map->buckets); in hashmap_grow()
125 map->buckets = new_buckets; in hashmap_grow()
137 if (!map->buckets) in hashmap_find_entry()
140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry()
200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
/openbmc/linux/block/
H A Dblk-stat.c86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn()
93 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn()
105 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument
113 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()
119 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()
130 cb->buckets = buckets; in blk_stat_alloc_callback()
147 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
/openbmc/linux/tools/perf/
H A Dbuiltin-ftrace.c685 static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf, in make_histogram() argument
742 buckets[i]++; in make_histogram()
753 static void display_histogram(int buckets[], bool use_nsec) in display_histogram() argument
762 total += buckets[i]; in display_histogram()
772 bar_len = buckets[0] * bar_total / total; in display_histogram()
774 0, 1, use_nsec ? "ns" : "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); in display_histogram()
786 bar_len = buckets[i] * bar_total / total; in display_histogram()
788 start, stop, unit, buckets[i], bar_len, bar, in display_histogram()
792 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total; in display_histogram()
794 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1], in display_histogram()
[all …]
/openbmc/linux/tools/testing/selftests/drivers/net/netdevsim/
H A Dnexthop.sh213 $IP nexthop add id 10 group 1/2 type resilient buckets 4
229 $IP nexthop add id 10 group 1,3/2,2 type resilient buckets 5
259 $IP nexthop add id 10 group 1/2 type resilient buckets 4 &> /dev/null
325 $IP nexthop add id 10 group 1/2 type resilient buckets 6
353 $IP nexthop add id 10 group 1/2 type resilient buckets 6
408 $IP nexthop add id 10 group 1/2 type resilient buckets 8 idle_timer 4
434 type resilient buckets 8 idle_timer 6
469 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 4
504 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 8
535 type resilient buckets 8 $timer 4
[all …]
/openbmc/linux/lib/
H A Dhashtable_test.c189 int buckets[2]; in hashtable_test_hash_for_each_possible() local
223 buckets[y->key] = bkt; in hashtable_test_hash_for_each_possible()
230 if (buckets[0] == buckets[1]) { in hashtable_test_hash_for_each_possible()
244 int buckets[2]; in hashtable_test_hash_for_each_possible_safe() local
281 buckets[y->key] = bkt; in hashtable_test_hash_for_each_possible_safe()
288 if (buckets[0] == buckets[1]) { in hashtable_test_hash_for_each_possible_safe()
/openbmc/linux/net/ceph/crush/
H A Dcrush.c111 if (map->buckets) { in crush_destroy()
114 if (map->buckets[b] == NULL) in crush_destroy()
116 crush_destroy_bucket(map->buckets[b]); in crush_destroy()
118 kfree(map->buckets); in crush_destroy()
H A Dmapper.c527 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn()
540 in = map->buckets[-1-item]; in crush_choose_firstn()
564 map->buckets[-1-item], in crush_choose_firstn()
741 itemtype = map->buckets[-1-item]->type; in crush_choose_indep()
758 in = map->buckets[-1-item]; in crush_choose_indep()
778 map->buckets[-1-item], in crush_choose_indep()
865 if (!map->buckets[b]) in crush_init_workspace()
869 switch (map->buckets[b]->alg) { in crush_init_workspace()
877 v += map->buckets[b]->size * sizeof(__u32); in crush_init_workspace()
948 map->buckets[-1-curstep->arg1])) { in crush_do_rule()
[all …]
/openbmc/linux/net/netfilter/ipvs/
H A Dip_vs_sh.c70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; member
108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get()
130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback()
145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback()
172 b = &s->buckets[0]; in ip_vs_sh_reassign()
216 b = &s->buckets[0]; in ip_vs_sh_flush()
H A Dip_vs_dh.c64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; member
90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get()
106 b = &s->buckets[0]; in ip_vs_dh_reassign()
140 b = &s->buckets[0]; in ip_vs_dh_flush()
/openbmc/linux/fs/nfs/
H A Dpnfs_nfs.c102 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array()
109 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array()
259 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array() argument
267 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); in pnfs_bucket_scan_array()
288 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists()
304 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs() argument
314 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs()
343 array->buckets, in pnfs_generic_recover_commit_reqs()
355 pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, in pnfs_bucket_search_commit_reqs() argument
364 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_search_commit_reqs()
[all …]
H A Dnfs42xattr.c70 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; member
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
506 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket()
507 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
H A Dipoib_vlan.c45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member
71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, in mlx5i_find_qpn_to_netdev_node() argument
74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node()
99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn()
112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn()
131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
/openbmc/linux/drivers/md/
H A Ddm-region-hash.c73 struct list_head *buckets; member
209 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); in dm_region_hash_create()
210 if (!rh->buckets) { in dm_region_hash_create()
217 INIT_LIST_HEAD(rh->buckets + i); in dm_region_hash_create()
231 vfree(rh->buckets); in dm_region_hash_create()
247 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy()
258 vfree(rh->buckets); in dm_region_hash_destroy()
277 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup()
288 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key)); in __rh_insert()
/openbmc/linux/kernel/bpf/
H A Dbpf_local_storage.c24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; in select_bucket()
777 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log); in bpf_local_storage_map_mem_usage()
811 smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets, in bpf_local_storage_map_alloc()
812 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN); in bpf_local_storage_map_alloc()
813 if (!smap->buckets) { in bpf_local_storage_map_alloc()
819 INIT_HLIST_HEAD(&smap->buckets[i].list); in bpf_local_storage_map_alloc()
820 raw_spin_lock_init(&smap->buckets[i].lock); in bpf_local_storage_map_alloc()
847 kvfree(smap->buckets); in bpf_local_storage_map_alloc()
880 b = &smap->buckets[i]; in bpf_local_storage_map_free()
919 kvfree(smap->buckets); in bpf_local_storage_map_free()

12345