Lines Matching refs:smap

21 select_bucket(struct bpf_local_storage_map *smap,
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
29 struct bpf_map *map = &smap->map;
34 return map->ops->map_local_storage_charge(smap, owner, size);
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
40 struct bpf_map *map = &smap->map;
43 map->ops->map_local_storage_uncharge(smap, owner, size);
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
49 struct bpf_map *map = &smap->map;
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
80 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
83 if (smap->bpf_ma) {
85 selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
95 memset(SDATA(selem)->data, 0, smap->map.value_size);
97 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
103 copy_map_value(&smap->map, SDATA(selem)->data, value);
109 mem_uncharge(smap, owner, smap->elem_size);
157 struct bpf_local_storage_map *smap,
174 if (smap) {
176 bpf_mem_cache_free(&smap->storage_ma, local_storage);
179 /* smap could be NULL if the selem that triggered
226 struct bpf_local_storage_map *smap,
229 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
231 if (!smap->bpf_ma) {
244 bpf_mem_cache_free(&smap->selem_ma, selem);
250 * The caller must ensure selem->smap is still valid to be
251 * dereferenced for its smap->elem_size and smap->cache_idx.
257 struct bpf_local_storage_map *smap;
261 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
269 mem_uncharge(smap, owner, smap->elem_size);
274 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
278 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
295 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
297 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
299 bpf_selem_free(selem, smap, reuse_now);
301 if (rcu_access_pointer(local_storage->smap) == smap)
302 RCU_INIT_POINTER(local_storage->smap, NULL);
314 /* local_storage->smap may be NULL. If it is, get the bpf_ma
337 selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
356 storage_smap = rcu_dereference_check(local_storage->smap,
379 struct bpf_local_storage_map *smap;
384 /* selem has already be unlinked from smap */
387 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
388 b = select_bucket(smap, selem);
395 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
398 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
402 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
420 struct bpf_local_storage_map *smap,
427 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
429 if (sdata && rcu_access_pointer(sdata->smap) == smap)
435 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
452 rcu_assign_pointer(local_storage->cache[smap->cache_idx],
475 struct bpf_local_storage_map *smap,
483 err = mem_charge(smap, owner, sizeof(*storage));
487 if (smap->bpf_ma) {
489 storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
492 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
501 RCU_INIT_POINTER(storage->smap, smap);
507 bpf_selem_link_map(smap, first_selem);
510 (struct bpf_local_storage **)owner_storage(smap, owner);
527 /* Note that even first_selem was linked to smap's
541 bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
542 mem_uncharge(smap, owner, sizeof(*storage));
552 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
565 !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
571 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
579 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
583 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
585 bpf_selem_free(selem, smap, true);
586 mem_uncharge(smap, owner, smap->elem_size);
599 bpf_local_storage_lookup(local_storage, smap, false);
604 copy_map_value_locked(&smap->map, old_sdata->data,
613 alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
630 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
636 copy_map_value_locked(&smap->map, old_sdata->data, value,
644 bpf_selem_link_map(smap, selem);
659 mem_uncharge(smap, owner, smap->elem_size);
660 bpf_selem_free(alloc_selem, smap, true);
738 storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
773 struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
774 u64 usage = sizeof(*smap);
777 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
796 struct bpf_local_storage_map *smap;
801 smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
802 if (!smap)
804 bpf_map_init_from_attr(&smap->map, attr);
809 smap->bucket_log = ilog2(nbuckets);
811 smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
812 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
813 if (!smap->buckets) {
819 INIT_HLIST_HEAD(&smap->buckets[i].list);
820 raw_spin_lock_init(&smap->buckets[i].lock);
823 smap->elem_size = offsetof(struct bpf_local_storage_elem,
830 smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
831 if (smap->bpf_ma) {
832 err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
836 err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
838 bpf_mem_alloc_destroy(&smap->selem_ma);
843 smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
844 return &smap->map;
847 kvfree(smap->buckets);
848 bpf_map_area_free(smap);
858 struct bpf_local_storage_map *smap;
861 smap = (struct bpf_local_storage_map *)map;
862 bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
879 for (i = 0; i < (1U << smap->bucket_log); i++) {
880 b = &smap->buckets[i];
908 * smap->elem_size to do the uncharging in
915 if (smap->bpf_ma) {
916 bpf_mem_alloc_destroy(&smap->selem_ma);
917 bpf_mem_alloc_destroy(&smap->storage_ma);
919 kvfree(smap->buckets);
920 bpf_map_area_free(smap);