Lines Matching refs:htab
131 static inline bool htab_is_prealloc(const struct bpf_htab *htab) in htab_is_prealloc() argument
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
136 static void htab_init_buckets(struct bpf_htab *htab) in htab_init_buckets() argument
140 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
142 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
143 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
144 &htab->lockdep_key); in htab_init_buckets()
149 static inline int htab_lock_bucket(const struct bpf_htab *htab, in htab_lock_bucket() argument
155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_lock_bucket()
159 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
160 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
172 static inline void htab_unlock_bucket(const struct bpf_htab *htab, in htab_unlock_bucket() argument
176 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_unlock_bucket()
178 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
185 static bool htab_is_lru(const struct bpf_htab *htab) in htab_is_lru() argument
187 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
188 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
191 static bool htab_is_percpu(const struct bpf_htab *htab) in htab_is_percpu() argument
193 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
194 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
213 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) in get_htab_elem() argument
215 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
218 static bool htab_has_extra_elems(struct bpf_htab *htab) in htab_has_extra_elems() argument
220 return !htab_is_percpu(htab) && !htab_is_lru(htab); in htab_has_extra_elems()
223 static void htab_free_prealloced_timers(struct bpf_htab *htab) in htab_free_prealloced_timers() argument
225 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers()
228 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_prealloced_timers()
230 if (htab_has_extra_elems(htab)) in htab_free_prealloced_timers()
236 elem = get_htab_elem(htab, i); in htab_free_prealloced_timers()
237 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers()
242 static void htab_free_prealloced_fields(struct bpf_htab *htab) in htab_free_prealloced_fields() argument
244 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields()
247 if (IS_ERR_OR_NULL(htab->map.record)) in htab_free_prealloced_fields()
249 if (htab_has_extra_elems(htab)) in htab_free_prealloced_fields()
254 elem = get_htab_elem(htab, i); in htab_free_prealloced_fields()
255 if (htab_is_percpu(htab)) { in htab_free_prealloced_fields()
256 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in htab_free_prealloced_fields()
260 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in htab_free_prealloced_fields()
264 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_fields()
271 static void htab_free_elems(struct bpf_htab *htab) in htab_free_elems() argument
275 if (!htab_is_percpu(htab)) in htab_free_elems()
278 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
281 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), in htab_free_elems()
282 htab->map.key_size); in htab_free_elems()
287 bpf_map_area_free(htab->elems); in htab_free_elems()
301 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, in prealloc_lru_pop() argument
304 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
308 bpf_map_inc_elem_count(&htab->map); in prealloc_lru_pop()
310 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
317 static int prealloc_init(struct bpf_htab *htab) in prealloc_init() argument
319 u32 num_entries = htab->map.max_entries; in prealloc_init()
322 if (htab_has_extra_elems(htab)) in prealloc_init()
325 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
326 htab->map.numa_node); in prealloc_init()
327 if (!htab->elems) in prealloc_init()
330 if (!htab_is_percpu(htab)) in prealloc_init()
334 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
337 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
341 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
347 if (htab_is_lru(htab)) in prealloc_init()
348 err = bpf_lru_init(&htab->lru, in prealloc_init()
349 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
353 htab); in prealloc_init()
355 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
360 if (htab_is_lru(htab)) in prealloc_init()
361 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
363 htab->elem_size, num_entries); in prealloc_init()
365 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
366 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
367 htab->elem_size, num_entries); in prealloc_init()
372 htab_free_elems(htab); in prealloc_init()
376 static void prealloc_destroy(struct bpf_htab *htab) in prealloc_destroy() argument
378 htab_free_elems(htab); in prealloc_destroy()
380 if (htab_is_lru(htab)) in prealloc_destroy()
381 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
383 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
386 static int alloc_extra_elems(struct bpf_htab *htab) in alloc_extra_elems() argument
392 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
398 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
405 htab->extra_elems = pptr; in alloc_extra_elems()
481 struct bpf_htab *htab; in htab_map_alloc() local
484 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); in htab_map_alloc()
485 if (!htab) in htab_map_alloc()
488 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
490 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
497 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
499 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
500 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
508 if (htab->map.max_entries > 1UL << 31) in htab_map_alloc()
511 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
513 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
514 round_up(htab->map.key_size, 8); in htab_map_alloc()
516 htab->elem_size += sizeof(void *); in htab_map_alloc()
518 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
521 if (htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
524 err = bpf_map_init_elem_count(&htab->map); in htab_map_alloc()
529 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
531 htab->map.numa_node); in htab_map_alloc()
532 if (!htab->buckets) in htab_map_alloc()
536 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
540 if (!htab->map_locked[i]) in htab_map_alloc()
544 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
545 htab->hashrnd = 0; in htab_map_alloc()
547 htab->hashrnd = get_random_u32(); in htab_map_alloc()
549 htab_init_buckets(htab); in htab_map_alloc()
566 htab->use_percpu_counter = true; in htab_map_alloc()
568 if (htab->use_percpu_counter) { in htab_map_alloc()
569 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
575 err = prealloc_init(htab); in htab_map_alloc()
583 err = alloc_extra_elems(htab); in htab_map_alloc()
588 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
592 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
593 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
599 return &htab->map; in htab_map_alloc()
602 prealloc_destroy(htab); in htab_map_alloc()
604 if (htab->use_percpu_counter) in htab_map_alloc()
605 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
607 free_percpu(htab->map_locked[i]); in htab_map_alloc()
608 bpf_map_area_free(htab->buckets); in htab_map_alloc()
609 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
610 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
612 bpf_map_free_elem_count(&htab->map); in htab_map_alloc()
614 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
615 bpf_map_area_free(htab); in htab_map_alloc()
626 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) in __select_bucket() argument
628 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
631 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) in select_bucket() argument
633 return &__select_bucket(htab, hash)->head; in select_bucket()
679 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem() local
689 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
691 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
693 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
783 static void check_and_free_fields(struct bpf_htab *htab, in check_and_free_fields() argument
786 if (htab_is_percpu(htab)) { in check_and_free_fields()
787 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in check_and_free_fields()
791 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in check_and_free_fields()
793 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
795 bpf_obj_free_fields(htab->map.record, map_value); in check_and_free_fields()
804 struct bpf_htab *htab = arg; in htab_lru_map_delete_node() local
813 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
816 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
823 check_and_free_fields(htab, l); in htab_lru_map_delete_node()
824 bpf_map_dec_elem_count(&htab->map); in htab_lru_map_delete_node()
828 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
836 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key() local
849 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
851 head = select_bucket(htab, hash); in htab_map_get_next_key()
854 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
870 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
875 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
876 head = select_bucket(htab, i); in htab_map_get_next_key()
892 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) in htab_elem_free() argument
894 check_and_free_fields(htab, l); in htab_elem_free()
897 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
898 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
899 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
903 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) in htab_put_fd_value() argument
905 struct bpf_map *map = &htab->map; in htab_put_fd_value()
914 static bool is_map_full(struct bpf_htab *htab) in is_map_full() argument
916 if (htab->use_percpu_counter) in is_map_full()
917 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
919 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
922 static void inc_elem_count(struct bpf_htab *htab) in inc_elem_count() argument
924 bpf_map_inc_elem_count(&htab->map); in inc_elem_count()
926 if (htab->use_percpu_counter) in inc_elem_count()
927 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
929 atomic_inc(&htab->count); in inc_elem_count()
932 static void dec_elem_count(struct bpf_htab *htab) in dec_elem_count() argument
934 bpf_map_dec_elem_count(&htab->map); in dec_elem_count()
936 if (htab->use_percpu_counter) in dec_elem_count()
937 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
939 atomic_dec(&htab->count); in dec_elem_count()
943 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) in free_htab_elem() argument
945 htab_put_fd_value(htab, l); in free_htab_elem()
947 if (htab_is_prealloc(htab)) { in free_htab_elem()
948 bpf_map_dec_elem_count(&htab->map); in free_htab_elem()
949 check_and_free_fields(htab, l); in free_htab_elem()
950 pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
952 dec_elem_count(htab); in free_htab_elem()
953 htab_elem_free(htab, l); in free_htab_elem()
957 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_copy_value() argument
962 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); in pcpu_copy_value()
964 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
968 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); in pcpu_copy_value()
974 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_init_value() argument
988 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); in pcpu_init_value()
990 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); in pcpu_init_value()
993 pcpu_copy_value(htab, pptr, value, onallcpus); in pcpu_init_value()
997 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) in fd_htab_map_needs_adjust() argument
999 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
1003 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, in alloc_htab_elem() argument
1008 u32 size = htab->map.value_size; in alloc_htab_elem()
1009 bool prealloc = htab_is_prealloc(htab); in alloc_htab_elem()
1018 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
1024 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
1028 bpf_map_inc_elem_count(&htab->map); in alloc_htab_elem()
1031 if (is_map_full(htab)) in alloc_htab_elem()
1039 inc_elem_count(htab); in alloc_htab_elem()
1040 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1053 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1055 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1063 pcpu_init_value(htab, pptr, value, onallcpus); in alloc_htab_elem()
1067 } else if (fd_htab_map_needs_adjust(htab)) { in alloc_htab_elem()
1071 copy_map_value(&htab->map, in alloc_htab_elem()
1079 dec_elem_count(htab); in alloc_htab_elem()
1083 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, in check_flags() argument
1101 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem() local
1119 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1121 b = __select_bucket(htab, hash); in htab_map_update_elem()
1129 htab->n_buckets); in htab_map_update_elem()
1130 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1146 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_update_elem()
1152 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1170 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, in htab_map_update_elem()
1191 if (htab_is_prealloc(htab)) { in htab_map_update_elem()
1194 check_and_free_fields(htab, l_old); in htab_map_update_elem()
1197 htab_unlock_bucket(htab, b, hash, flags); in htab_map_update_elem()
1201 if (!htab_is_prealloc(htab)) in htab_map_update_elem()
1202 free_htab_elem(htab, l_old); in htab_map_update_elem()
1206 htab_unlock_bucket(htab, b, hash, flags); in htab_map_update_elem()
1210 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) in htab_lru_push_free() argument
1212 check_and_free_fields(htab, elem); in htab_lru_push_free()
1213 bpf_map_dec_elem_count(&htab->map); in htab_lru_push_free()
1214 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1220 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem() local
1237 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1239 b = __select_bucket(htab, hash); in htab_lru_map_update_elem()
1247 l_new = prealloc_lru_pop(htab, key, hash); in htab_lru_map_update_elem()
1250 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1253 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_update_elem()
1259 ret = check_flags(htab, l_old, map_flags); in htab_lru_map_update_elem()
1274 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_update_elem()
1278 htab_lru_push_free(htab, l_new); in htab_lru_map_update_elem()
1280 htab_lru_push_free(htab, l_old); in htab_lru_map_update_elem()
1289 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem() local
1306 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1308 b = __select_bucket(htab, hash); in __htab_percpu_map_update_elem()
1311 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_percpu_map_update_elem()
1317 ret = check_flags(htab, l_old, map_flags); in __htab_percpu_map_update_elem()
1323 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_percpu_map_update_elem()
1326 l_new = alloc_htab_elem(htab, key, value, key_size, in __htab_percpu_map_update_elem()
1336 htab_unlock_bucket(htab, b, hash, flags); in __htab_percpu_map_update_elem()
1344 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem() local
1361 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1363 b = __select_bucket(htab, hash); in __htab_lru_percpu_map_update_elem()
1372 l_new = prealloc_lru_pop(htab, key, hash); in __htab_lru_percpu_map_update_elem()
1377 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_lru_percpu_map_update_elem()
1383 ret = check_flags(htab, l_old, map_flags); in __htab_lru_percpu_map_update_elem()
1391 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_lru_percpu_map_update_elem()
1394 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), in __htab_lru_percpu_map_update_elem()
1401 htab_unlock_bucket(htab, b, hash, flags); in __htab_lru_percpu_map_update_elem()
1404 bpf_map_dec_elem_count(&htab->map); in __htab_lru_percpu_map_update_elem()
1405 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1426 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem() local
1439 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1440 b = __select_bucket(htab, hash); in htab_map_delete_elem()
1443 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_delete_elem()
1453 htab_unlock_bucket(htab, b, hash, flags); in htab_map_delete_elem()
1456 free_htab_elem(htab, l); in htab_map_delete_elem()
1462 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem() local
1475 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1476 b = __select_bucket(htab, hash); in htab_lru_map_delete_elem()
1479 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_delete_elem()
1490 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_delete_elem()
1492 htab_lru_push_free(htab, l); in htab_lru_map_delete_elem()
1496 static void delete_all_elements(struct bpf_htab *htab) in delete_all_elements() argument
1504 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1505 struct hlist_nulls_head *head = select_bucket(htab, i); in delete_all_elements()
1511 htab_elem_free(htab, l); in delete_all_elements()
1517 static void htab_free_malloced_timers(struct bpf_htab *htab) in htab_free_malloced_timers() argument
1522 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers()
1523 struct hlist_nulls_head *head = select_bucket(htab, i); in htab_free_malloced_timers()
1529 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers()
1538 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers() local
1541 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_map_free_timers()
1543 if (!htab_is_prealloc(htab)) in htab_map_free_timers()
1544 htab_free_malloced_timers(htab); in htab_map_free_timers()
1546 htab_free_prealloced_timers(htab); in htab_map_free_timers()
1552 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free() local
1564 if (!htab_is_prealloc(htab)) { in htab_map_free()
1565 delete_all_elements(htab); in htab_map_free()
1567 htab_free_prealloced_fields(htab); in htab_map_free()
1568 prealloc_destroy(htab); in htab_map_free()
1572 free_percpu(htab->extra_elems); in htab_map_free()
1573 bpf_map_area_free(htab->buckets); in htab_map_free()
1574 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1575 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1576 if (htab->use_percpu_counter) in htab_map_free()
1577 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1579 free_percpu(htab->map_locked[i]); in htab_map_free()
1580 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1581 bpf_map_area_free(htab); in htab_map_free()
1609 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem() local
1619 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1620 b = __select_bucket(htab, hash); in __htab_map_lookup_and_delete_elem()
1623 ret = htab_lock_bucket(htab, b, hash, &bflags); in __htab_map_lookup_and_delete_elem()
1638 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_elem()
1639 check_and_init_map_value(&htab->map, value + off); in __htab_map_lookup_and_delete_elem()
1658 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_elem()
1661 htab_unlock_bucket(htab, b, hash, bflags); in __htab_map_lookup_and_delete_elem()
1664 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_elem()
1706 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch() local
1743 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1746 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1747 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1748 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1775 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1779 ret = htab_lock_bucket(htab, b, batch, &flags); in __htab_map_lookup_and_delete_batch()
1802 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1813 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1834 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_batch()
1835 check_and_init_map_value(&htab->map, dst_val + off); in __htab_map_lookup_and_delete_batch()
1876 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1883 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_batch()
1885 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_batch()
1892 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1909 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
2002 struct bpf_htab *htab; member
2012 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next() local
2021 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
2035 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
2040 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
2041 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2159 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2188 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem() local
2203 is_percpu = htab_is_percpu(htab); in bpf_for_each_hash_elem()
2211 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2212 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2243 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_mem_usage() local
2244 u32 value_size = round_up(htab->map.value_size, 8); in htab_map_mem_usage()
2245 bool prealloc = htab_is_prealloc(htab); in htab_map_mem_usage()
2246 bool percpu = htab_is_percpu(htab); in htab_map_mem_usage()
2247 bool lru = htab_is_lru(htab); in htab_map_mem_usage()
2251 usage += sizeof(struct bucket) * htab->n_buckets; in htab_map_mem_usage()
2255 if (htab_has_extra_elems(htab)) in htab_map_mem_usage()
2258 usage += htab->elem_size * num_entries; in htab_map_mem_usage()
2267 num_entries = htab->use_percpu_counter ? in htab_map_mem_usage()
2268 percpu_counter_sum(&htab->pcount) : in htab_map_mem_usage()
2269 atomic_read(&htab->count); in htab_map_mem_usage()
2270 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; in htab_map_mem_usage()
2296 BATCH_OPS(htab),
2411 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update() local
2415 if (htab_is_lru(htab)) in bpf_percpu_hash_update()
2504 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free() local
2510 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2511 head = select_bucket(htab, i); in fd_htab_map_free()
2628 BATCH_OPS(htab),