/openbmc/linux/net/mptcp/ |
H A D | token.c | 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 157 struct token_bucket *bucket; in mptcp_token_new_connect() local 164 spin_lock_bh(&bucket->lock); in mptcp_token_new_connect() 177 bucket->chain_len++; in mptcp_token_new_connect() 196 struct token_bucket *bucket; in mptcp_token_accept() local 200 spin_lock_bh(&bucket->lock); in mptcp_token_accept() 364 bucket->chain_len--; in mptcp_token_destroy_request() [all …]
|
/openbmc/linux/net/ceph/crush/ |
H A D | mapper.c | 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() 113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose() 114 (bucket->size - p); in bucket_perm_choose() 131 return bucket->items[s]; in bucket_perm_choose() 148 __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i], in bucket_list_choose() 149 r, bucket->h.id); in bucket_list_choose() 153 i, x, r, bucket->h.items[i], bucket->item_weights[i], in bucket_list_choose() 164 return bucket->h.items[0]; in bucket_list_choose() 237 draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r); in bucket_straw_choose() [all …]
|
/openbmc/linux/drivers/interconnect/qcom/ |
H A D | bcm-voter.c | 65 int bucket, i; in bcm_aggregate_mask() local 67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate_mask() 68 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask() 69 bcm->vote_y[bucket] = 0; in bcm_aggregate_mask() 75 if (node->sum_avg[bucket] || node->max_peak[bucket]) { in bcm_aggregate_mask() 76 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask() 77 bcm->vote_y[bucket] = bcm->enable_mask; in bcm_aggregate_mask() 94 size_t i, bucket; in bcm_aggregate() local 99 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate() 104 agg_avg[bucket] = max(agg_avg[bucket], temp); in bcm_aggregate() [all …]
|
/openbmc/linux/block/ |
H A D | blk-stat.c | 55 int bucket, cpu; in blk_stat_add() local 69 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 if (bucket < 0) in blk_stat_add() 83 unsigned int bucket; in blk_stat_timer_fn() local 86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 87 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 93 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 94 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn() 95 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn() 139 unsigned int bucket; in blk_stat_add_callback() local [all …]
|
H A D | kyber-iosched.c | 216 unsigned int bucket; in flush_latency_buckets() local 218 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) in flush_latency_buckets() 219 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets() 233 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) in calculate_percentile() 234 samples += buckets[bucket]; in calculate_percentile() 252 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) { in calculate_percentile() 253 if (buckets[bucket] >= percentile_samples) in calculate_percentile() 255 percentile_samples -= buckets[bucket]; in calculate_percentile() 263 return bucket; in calculate_percentile() 625 unsigned int bucket; in add_latency_sample() local [all …]
|
/openbmc/linux/net/sched/ |
H A D | sch_hhf.c | 333 bucket->head = skb->next; in dequeue_head() 341 if (bucket->head == NULL) in bucket_add() 342 bucket->head = skb; in bucket_add() 344 bucket->tail->next = skb; in bucket_add() 345 bucket->tail = skb; in bucket_add() 352 struct wdrr_bucket *bucket; in hhf_drop() local 356 if (!bucket->head) in hhf_drop() 359 if (bucket->head) { in hhf_drop() 381 bucket = &q->buckets[idx]; in hhf_enqueue() 382 bucket_add(bucket, skb); in hhf_enqueue() [all …]
|
/openbmc/linux/drivers/infiniband/sw/rdmavt/ |
H A D | trace_qp.h | 18 TP_PROTO(struct rvt_qp *qp, u32 bucket), 19 TP_ARGS(qp, bucket), 23 __field(u32, bucket) 28 __entry->bucket = bucket; 34 __entry->bucket 39 TP_PROTO(struct rvt_qp *qp, u32 bucket), 40 TP_ARGS(qp, bucket)); 43 TP_PROTO(struct rvt_qp *qp, u32 bucket), 44 TP_ARGS(qp, bucket));
|
/openbmc/linux/net/9p/ |
H A D | error.c | 179 int bucket; in p9_error_init() local 182 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 183 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 188 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 190 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 208 int bucket; in p9_errstr2errno() local 212 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 213 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
|
/openbmc/linux/fs/nfs/ |
H A D | nfs42xattr.c | 238 entry->bucket = NULL; in nfs4_xattr_alloc_entry() 396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache() 397 bucket->draining = true; in nfs4_xattr_discard_cache() 534 entry->bucket = bucket; in nfs4_xattr_hash_add() 536 spin_lock(&bucket->lock); in nfs4_xattr_hash_add() 538 if (bucket->draining) { in nfs4_xattr_hash_add() 555 spin_unlock(&bucket->lock); in nfs4_xattr_hash_add() 571 spin_lock(&bucket->lock); in nfs4_xattr_hash_remove() 594 spin_lock(&bucket->lock); in nfs4_xattr_hash_find() 897 bucket = entry->bucket; in entry_lru_isolate() [all …]
|
H A D | pnfs_nfs.c | 65 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg() 67 bucket->lseg = NULL; in pnfs_free_bucket_lseg() 91 if (bucket) in pnfs_generic_clear_request_commit() 414 lseg = pnfs_get_lseg(bucket->lseg); in pnfs_bucket_get_committing() 435 struct pnfs_commit_bucket *bucket; in pnfs_generic_retry_commit() local 439 for (bucket = buckets; idx < nbuckets; bucket++, idx++) { in pnfs_generic_retry_commit() 456 struct pnfs_commit_bucket *bucket; in pnfs_bucket_alloc_ds_commits() local 461 for (i = 0, bucket = buckets; i < nbuckets; i++, bucket++) { in pnfs_bucket_alloc_ds_commits() 1207 struct pnfs_commit_bucket *bucket; in pnfs_layout_mark_request_commit() local 1214 list = &bucket->written; in pnfs_layout_mark_request_commit() [all …]
|
/openbmc/linux/net/vmw_vsock/ |
H A D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump()
|
/openbmc/linux/drivers/cpuidle/governors/ |
H A D | menu.c | 116 unsigned int bucket; member 124 int bucket = 0; in which_bucket() local 133 bucket = BUCKETS/2; in which_bucket() 136 return bucket; in which_bucket() 138 return bucket + 1; in which_bucket() 140 return bucket + 2; in which_bucket() 142 return bucket + 3; in which_bucket() 144 return bucket + 4; in which_bucket() 145 return bucket + 5; in which_bucket() 298 data->correction_factor[data->bucket], in menu_select() [all …]
|
/openbmc/linux/kernel/dma/ |
H A D | debug.c | 266 __releases(&bucket->lock) in put_hash_bucket() 592 struct hash_bucket *bucket; in add_dma_entry() local 597 hash_bucket_add(bucket, entry); in add_dma_entry() 598 put_hash_bucket(bucket, flags); in add_dma_entry() 966 struct hash_bucket *bucket; in check_unmap() local 1056 put_hash_bucket(bucket, flags); in check_unmap() 1099 struct hash_bucket *bucket; in check_sync() local 1166 put_hash_bucket(bucket, flags); in check_sync() 1251 struct hash_bucket *bucket; in debug_dma_mapping_error() local 1281 put_hash_bucket(bucket, flags); in debug_dma_mapping_error() [all …]
|
/openbmc/linux/fs/dlm/ |
H A D | debug_fs.c | 417 unsigned bucket; member 487 bucket = n >> 32; in table_seq_start() 509 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start() 518 ri->bucket = bucket; in table_seq_start() 534 bucket++; in table_seq_start() 541 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start() 549 ri->bucket = bucket; in table_seq_start() 566 unsigned bucket; in table_seq_next() local 569 bucket = n >> 32; in table_seq_next() 599 bucket++; in table_seq_next() [all …]
|
/openbmc/linux/Documentation/userspace-api/media/v4l/ |
H A D | metafmt-vsp1-hgt.rst | 28 The Saturation position **n** (0 - 31) of the bucket in the matrix is 33 The Hue position **m** (0 - 5) of the bucket in the matrix depends on 101 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0] 103 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0] 107 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0] 109 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0] 113 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0] 117 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0] 121 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0] 125 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0] [all …]
|
/openbmc/linux/fs/ocfs2/ |
H A D | xattr.c | 324 if (bucket) { in ocfs2_xattr_bucket_new() 329 return bucket; in ocfs2_xattr_bucket_new() 344 if (bucket) { in ocfs2_xattr_bucket_free() 347 kfree(bucket); in ocfs2_xattr_bucket_free() 363 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb, in ocfs2_init_xattr_bucket() 396 bucket->bu_blocks, bucket->bu_bhs, 0, in ocfs2_read_xattr_bucket() 440 bucket->bu_bhs, bucket->bu_blocks, in ocfs2_xattr_bucket_journal_dirty() 3981 if (!bucket) { in ocfs2_iterate_xattr_buckets() 5570 xs->bucket); in ocfs2_xattr_set_entry_bucket() 5814 bucket = xbs->bucket; in ocfs2_prepare_refcount_xattr() [all …]
|
/openbmc/linux/drivers/md/bcache/ |
H A D | alloc.c | 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() 89 struct bucket *b; in bch_rescale_priorities() 181 struct bucket *b; in invalidate_buckets_lru() 218 struct bucket *b; in invalidate_buckets_fifo() 241 struct bucket *b; in invalidate_buckets_random() 307 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) in bch_allocator_push() 311 if (fifo_push(&ca->free[i], bucket)) in bch_allocator_push() 330 long bucket; in bch_allocator_thread() local 332 if (!fifo_pop(&ca->free_inc, bucket)) in bch_allocator_thread() 338 bucket_to_sector(ca->set, bucket), in bch_allocator_thread() [all …]
|
/openbmc/linux/drivers/md/ |
H A D | dm-clone-target.c | 588 bucket = clone->ht + i; in hash_table_init() 883 bucket_lock_irq(bucket); in hydrate_bio_region() 889 bucket_unlock_irq(bucket); in hydrate_bio_region() 895 bucket_unlock_irq(bucket); in hydrate_bio_region() 904 bucket_unlock_irq(bucket); in hydrate_bio_region() 909 bucket_lock_irq(bucket); in hydrate_bio_region() 913 bucket_unlock_irq(bucket); in hydrate_bio_region() 923 bucket_unlock_irq(bucket); in hydrate_bio_region() 935 bucket_unlock_irq(bucket); in hydrate_bio_region() 949 bucket_unlock_irq(bucket); in hydrate_bio_region() [all …]
|
/openbmc/qemu/qobject/ |
H A D | qdict.c | 92 const char *key, unsigned int bucket) in qdict_find() argument 96 QLIST_FOREACH(entry, &qdict->table[bucket], next) in qdict_find() 117 unsigned int bucket; in qdict_put_obj() local 120 bucket = tdb_hash(key) % QDICT_BUCKET_MAX; in qdict_put_obj() 121 entry = qdict_find(qdict, key, bucket); in qdict_put_obj() 129 QLIST_INSERT_HEAD(&qdict->table[bucket], entry, next); in qdict_put_obj() 175 unsigned int bucket = tdb_hash(key) % QDICT_BUCKET_MAX; in qdict_haskey() local 176 return (qdict_find(qdict, key, bucket) == NULL ? 0 : 1); in qdict_haskey() 334 unsigned int bucket = tdb_hash(entry->key) % QDICT_BUCKET_MAX; in qdict_next() local 335 ret = qdict_next_entry(qdict, bucket + 1); in qdict_next()
|
/openbmc/linux/net/rxrpc/ |
H A D | proc.c | 245 unsigned int bucket, n; in rxrpc_peer_seq_start() local 255 bucket = *_pos >> shift; in rxrpc_peer_seq_start() 257 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { in rxrpc_peer_seq_start() 262 if (bucket == 0) in rxrpc_peer_seq_start() 271 bucket++; in rxrpc_peer_seq_start() 273 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start() 280 unsigned int bucket, n; in rxrpc_peer_seq_next() local 287 bucket = *_pos >> shift; in rxrpc_peer_seq_next() 294 bucket++; in rxrpc_peer_seq_next() 296 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_next() [all …]
|
/openbmc/linux/Documentation/networking/ |
H A D | nexthop-group-resilient.rst | 49 to choose a hash table bucket, then reads the next hop that this bucket 92 through a bucket, this timer is updated to current jiffies value. One 104 upkeep changes the next hop that the bucket references to one of the 135 - Single-bucket notifications of the type 145 hop associated with the bucket was removed, and the bucket really must be 150 bucket should be migrated, but the HW discovers that the bucket has in fact 153 A second way for the HW to report that a bucket is busy is through the 212 ``NHA_RES_BUCKET_INDEX`` Index of bucket in the resilient table. 255 Changing next-hop weights leads to change in bucket allocation:: 261 # ip nexthop bucket show id 10 [all …]
|
/openbmc/linux/include/trace/events/ |
H A D | bcache.h | 68 __field(size_t, bucket ) 72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 75 TP_printk("bucket %zu", __entry->bucket) 267 __field(size_t, bucket ) 273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 279 __entry->bucket, __entry->block, __entry->keys) 370 __field(size_t, bucket ) 429 TP_PROTO(struct cache *ca, size_t bucket), 430 TP_ARGS(ca, bucket), 450 TP_PROTO(struct cache *ca, size_t bucket), [all …]
|
/openbmc/linux/kernel/bpf/ |
H A D | stackmap.c | 233 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid() 235 hash_matches = bucket && bucket->hash == hash; in __bpf_get_stackid() 251 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 261 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 262 memcmp(bucket->data, ips, trace_len) == 0) in __bpf_get_stackid() 264 if (bucket && !(flags & BPF_F_REUSE_STACKID)) in __bpf_get_stackid() 581 struct stack_map_bucket *bucket, *old_bucket; in bpf_stackmap_copy() local 587 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy() 588 if (!bucket) in bpf_stackmap_copy() 592 memcpy(value, bucket->data, trace_len); in bpf_stackmap_copy() [all …]
|
/openbmc/linux/lib/ |
H A D | stackdepot.c | 343 static inline struct stack_record *find_stack(struct stack_record *bucket, in find_stack() argument 349 for (found = bucket; found; found = found->next) { in find_stack() 362 struct stack_record *found = NULL, **bucket; in __stack_depot_save() local 383 bucket = &stack_table[hash & stack_hash_mask]; in __stack_depot_save() 390 found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); in __stack_depot_save() 417 found = find_stack(*bucket, entries, nr_entries, hash); in __stack_depot_save() 423 new->next = *bucket; in __stack_depot_save() 428 smp_store_release(bucket, new); in __stack_depot_save()
|
/openbmc/linux/arch/sparc/kernel/ |
H A D | irq_64.c | 207 struct ino_bucket bucket; member 258 struct ino_bucket *bucket; in cookie_exists() local 270 irq = bucket->__irq; in cookie_exists() 279 struct ino_bucket *bucket; in sysino_exists() local 282 bucket = &ivector_table[sysino]; in sysino_exists() 616 struct ino_bucket *bucket; in build_irq() local 623 bucket = &ivector_table[ino]; in build_irq() 688 ihd->bucket.__irq = irq; in cookie_assign() 689 cookie = ~__pa(&ihd->bucket); in cookie_assign() 739 struct ino_bucket *bucket; in sysino_set_bucket() local [all …]
|