1450af8d0SKP Singh // SPDX-License-Identifier: GPL-2.0
2450af8d0SKP Singh /* Copyright (c) 2019 Facebook */
3450af8d0SKP Singh #include <linux/rculist.h>
4450af8d0SKP Singh #include <linux/list.h>
5450af8d0SKP Singh #include <linux/hash.h>
6450af8d0SKP Singh #include <linux/types.h>
7450af8d0SKP Singh #include <linux/spinlock.h>
8450af8d0SKP Singh #include <linux/bpf.h>
9450af8d0SKP Singh #include <linux/btf_ids.h>
10450af8d0SKP Singh #include <linux/bpf_local_storage.h>
11450af8d0SKP Singh #include <net/sock.h>
12450af8d0SKP Singh #include <uapi/linux/sock_diag.h>
13450af8d0SKP Singh #include <uapi/linux/btf.h>
140fe4b381SKP Singh #include <linux/rcupdate.h>
150fe4b381SKP Singh #include <linux/rcupdate_trace.h>
160fe4b381SKP Singh #include <linux/rcupdate_wait.h>
17450af8d0SKP Singh
18450af8d0SKP Singh #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19450af8d0SKP Singh
20450af8d0SKP Singh static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)21450af8d0SKP Singh select_bucket(struct bpf_local_storage_map *smap,
22450af8d0SKP Singh struct bpf_local_storage_elem *selem)
23450af8d0SKP Singh {
24450af8d0SKP Singh return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25450af8d0SKP Singh }
26450af8d0SKP Singh
mem_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)27450af8d0SKP Singh static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28450af8d0SKP Singh {
29450af8d0SKP Singh struct bpf_map *map = &smap->map;
30450af8d0SKP Singh
31450af8d0SKP Singh if (!map->ops->map_local_storage_charge)
32450af8d0SKP Singh return 0;
33450af8d0SKP Singh
34450af8d0SKP Singh return map->ops->map_local_storage_charge(smap, owner, size);
35450af8d0SKP Singh }
36450af8d0SKP Singh
mem_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)37450af8d0SKP Singh static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38450af8d0SKP Singh u32 size)
39450af8d0SKP Singh {
40450af8d0SKP Singh struct bpf_map *map = &smap->map;
41450af8d0SKP Singh
42450af8d0SKP Singh if (map->ops->map_local_storage_uncharge)
43450af8d0SKP Singh map->ops->map_local_storage_uncharge(smap, owner, size);
44450af8d0SKP Singh }
45450af8d0SKP Singh
46450af8d0SKP Singh static struct bpf_local_storage __rcu **
owner_storage(struct bpf_local_storage_map * smap,void * owner)47450af8d0SKP Singh owner_storage(struct bpf_local_storage_map *smap, void *owner)
48450af8d0SKP Singh {
49450af8d0SKP Singh struct bpf_map *map = &smap->map;
50450af8d0SKP Singh
51450af8d0SKP Singh return map->ops->map_owner_storage_ptr(owner);
52450af8d0SKP Singh }
53450af8d0SKP Singh
selem_linked_to_storage_lockless(const struct bpf_local_storage_elem * selem)540a09a2f9SKumar Kartikeya Dwivedi static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
550a09a2f9SKumar Kartikeya Dwivedi {
560a09a2f9SKumar Kartikeya Dwivedi return !hlist_unhashed_lockless(&selem->snode);
570a09a2f9SKumar Kartikeya Dwivedi }
580a09a2f9SKumar Kartikeya Dwivedi
selem_linked_to_storage(const struct bpf_local_storage_elem * selem)59450af8d0SKP Singh static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60450af8d0SKP Singh {
61450af8d0SKP Singh return !hlist_unhashed(&selem->snode);
62450af8d0SKP Singh }
63450af8d0SKP Singh
selem_linked_to_map_lockless(const struct bpf_local_storage_elem * selem)640a09a2f9SKumar Kartikeya Dwivedi static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
650a09a2f9SKumar Kartikeya Dwivedi {
660a09a2f9SKumar Kartikeya Dwivedi return !hlist_unhashed_lockless(&selem->map_node);
670a09a2f9SKumar Kartikeya Dwivedi }
680a09a2f9SKumar Kartikeya Dwivedi
selem_linked_to_map(const struct bpf_local_storage_elem * selem)69450af8d0SKP Singh static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70450af8d0SKP Singh {
71450af8d0SKP Singh return !hlist_unhashed(&selem->map_node);
72450af8d0SKP Singh }
73450af8d0SKP Singh
74450af8d0SKP Singh struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map * smap,void * owner,void * value,bool charge_mem,gfp_t gfp_flags)75450af8d0SKP Singh bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76b00fa38aSJoanne Koong void *value, bool charge_mem, gfp_t gfp_flags)
77450af8d0SKP Singh {
78450af8d0SKP Singh struct bpf_local_storage_elem *selem;
79450af8d0SKP Singh
80450af8d0SKP Singh if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81450af8d0SKP Singh return NULL;
82450af8d0SKP Singh
8308a7ce38SMartin KaFai Lau if (smap->bpf_ma) {
8408a7ce38SMartin KaFai Lau migrate_disable();
8508a7ce38SMartin KaFai Lau selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
8608a7ce38SMartin KaFai Lau migrate_enable();
8708a7ce38SMartin KaFai Lau if (selem)
8808a7ce38SMartin KaFai Lau /* Keep the original bpf_map_kzalloc behavior
8908a7ce38SMartin KaFai Lau * before started using the bpf_mem_cache_alloc.
9008a7ce38SMartin KaFai Lau *
9108a7ce38SMartin KaFai Lau * No need to use zero_map_value. The bpf_selem_free()
9208a7ce38SMartin KaFai Lau * only does bpf_mem_cache_free when there is
9308a7ce38SMartin KaFai Lau * no other bpf prog is using the selem.
9408a7ce38SMartin KaFai Lau */
9508a7ce38SMartin KaFai Lau memset(SDATA(selem)->data, 0, smap->map.value_size);
9608a7ce38SMartin KaFai Lau } else {
97e9aae8beSRoman Gushchin selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
98b00fa38aSJoanne Koong gfp_flags | __GFP_NOWARN);
9908a7ce38SMartin KaFai Lau }
10008a7ce38SMartin KaFai Lau
101450af8d0SKP Singh if (selem) {
102450af8d0SKP Singh if (value)
103836e49e1SXu Kuohai copy_map_value(&smap->map, SDATA(selem)->data, value);
1049db44fddSKumar Kartikeya Dwivedi /* No need to call check_and_init_map_value as memory is zero init */
105450af8d0SKP Singh return selem;
106450af8d0SKP Singh }
107450af8d0SKP Singh
108450af8d0SKP Singh if (charge_mem)
109450af8d0SKP Singh mem_uncharge(smap, owner, smap->elem_size);
110450af8d0SKP Singh
111450af8d0SKP Singh return NULL;
112450af8d0SKP Singh }
113450af8d0SKP Singh
1146ae9d5e9SMartin KaFai Lau /* rcu tasks trace callback for bpf_ma == false */
__bpf_local_storage_free_trace_rcu(struct rcu_head * rcu)1156ae9d5e9SMartin KaFai Lau static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
1166ae9d5e9SMartin KaFai Lau {
1176ae9d5e9SMartin KaFai Lau struct bpf_local_storage *local_storage;
1186ae9d5e9SMartin KaFai Lau
1196ae9d5e9SMartin KaFai Lau /* If RCU Tasks Trace grace period implies RCU grace period, do
1206ae9d5e9SMartin KaFai Lau * kfree(), else do kfree_rcu().
1216ae9d5e9SMartin KaFai Lau */
1226ae9d5e9SMartin KaFai Lau local_storage = container_of(rcu, struct bpf_local_storage, rcu);
1236ae9d5e9SMartin KaFai Lau if (rcu_trace_implies_rcu_gp())
1246ae9d5e9SMartin KaFai Lau kfree(local_storage);
1256ae9d5e9SMartin KaFai Lau else
1266ae9d5e9SMartin KaFai Lau kfree_rcu(local_storage, rcu);
1276ae9d5e9SMartin KaFai Lau }
1286ae9d5e9SMartin KaFai Lau
bpf_local_storage_free_rcu(struct rcu_head * rcu)1294cbd23ccSMartin KaFai Lau static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
1300fe4b381SKP Singh {
1310fe4b381SKP Singh struct bpf_local_storage *local_storage;
1320fe4b381SKP Singh
1331288aaa2SMartin KaFai Lau local_storage = container_of(rcu, struct bpf_local_storage, rcu);
1346ae9d5e9SMartin KaFai Lau bpf_mem_cache_raw_free(local_storage);
1351288aaa2SMartin KaFai Lau }
1361288aaa2SMartin KaFai Lau
bpf_local_storage_free_trace_rcu(struct rcu_head * rcu)1371288aaa2SMartin KaFai Lau static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
1381288aaa2SMartin KaFai Lau {
139d39d1445SHou Tao if (rcu_trace_implies_rcu_gp())
1401288aaa2SMartin KaFai Lau bpf_local_storage_free_rcu(rcu);
141d39d1445SHou Tao else
1421288aaa2SMartin KaFai Lau call_rcu(rcu, bpf_local_storage_free_rcu);
1430fe4b381SKP Singh }
1440fe4b381SKP Singh
1456ae9d5e9SMartin KaFai Lau /* Handle bpf_ma == false */
__bpf_local_storage_free(struct bpf_local_storage * local_storage,bool vanilla_rcu)1466ae9d5e9SMartin KaFai Lau static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
1476ae9d5e9SMartin KaFai Lau bool vanilla_rcu)
1487e30a847SMartin KaFai Lau {
1496ae9d5e9SMartin KaFai Lau if (vanilla_rcu)
1506ae9d5e9SMartin KaFai Lau kfree_rcu(local_storage, rcu);
1516ae9d5e9SMartin KaFai Lau else
1526ae9d5e9SMartin KaFai Lau call_rcu_tasks_trace(&local_storage->rcu,
1536ae9d5e9SMartin KaFai Lau __bpf_local_storage_free_trace_rcu);
1546ae9d5e9SMartin KaFai Lau }
1556ae9d5e9SMartin KaFai Lau
bpf_local_storage_free(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool bpf_ma,bool reuse_now)1566ae9d5e9SMartin KaFai Lau static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
1576ae9d5e9SMartin KaFai Lau struct bpf_local_storage_map *smap,
1586ae9d5e9SMartin KaFai Lau bool bpf_ma, bool reuse_now)
1596ae9d5e9SMartin KaFai Lau {
16010fd5f70SAlexei Starovoitov if (!local_storage)
16110fd5f70SAlexei Starovoitov return;
16210fd5f70SAlexei Starovoitov
1636ae9d5e9SMartin KaFai Lau if (!bpf_ma) {
1646ae9d5e9SMartin KaFai Lau __bpf_local_storage_free(local_storage, reuse_now);
1656ae9d5e9SMartin KaFai Lau return;
1666ae9d5e9SMartin KaFai Lau }
1676ae9d5e9SMartin KaFai Lau
1686ae9d5e9SMartin KaFai Lau if (!reuse_now) {
1697e30a847SMartin KaFai Lau call_rcu_tasks_trace(&local_storage->rcu,
1707e30a847SMartin KaFai Lau bpf_local_storage_free_trace_rcu);
1716ae9d5e9SMartin KaFai Lau return;
1726ae9d5e9SMartin KaFai Lau }
1736ae9d5e9SMartin KaFai Lau
1746ae9d5e9SMartin KaFai Lau if (smap) {
1756ae9d5e9SMartin KaFai Lau migrate_disable();
1766ae9d5e9SMartin KaFai Lau bpf_mem_cache_free(&smap->storage_ma, local_storage);
1776ae9d5e9SMartin KaFai Lau migrate_enable();
1786ae9d5e9SMartin KaFai Lau } else {
1796ae9d5e9SMartin KaFai Lau /* smap could be NULL if the selem that triggered
1806ae9d5e9SMartin KaFai Lau * this 'local_storage' creation had been long gone.
1816ae9d5e9SMartin KaFai Lau * In this case, directly do call_rcu().
1826ae9d5e9SMartin KaFai Lau */
1837e30a847SMartin KaFai Lau call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
1847e30a847SMartin KaFai Lau }
1856ae9d5e9SMartin KaFai Lau }
1867e30a847SMartin KaFai Lau
18708a7ce38SMartin KaFai Lau /* rcu tasks trace callback for bpf_ma == false */
__bpf_selem_free_trace_rcu(struct rcu_head * rcu)18808a7ce38SMartin KaFai Lau static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
18908a7ce38SMartin KaFai Lau {
19008a7ce38SMartin KaFai Lau struct bpf_local_storage_elem *selem;
19108a7ce38SMartin KaFai Lau
19208a7ce38SMartin KaFai Lau selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
19308a7ce38SMartin KaFai Lau if (rcu_trace_implies_rcu_gp())
19408a7ce38SMartin KaFai Lau kfree(selem);
19508a7ce38SMartin KaFai Lau else
19608a7ce38SMartin KaFai Lau kfree_rcu(selem, rcu);
19708a7ce38SMartin KaFai Lau }
19808a7ce38SMartin KaFai Lau
19908a7ce38SMartin KaFai Lau /* Handle bpf_ma == false */
__bpf_selem_free(struct bpf_local_storage_elem * selem,bool vanilla_rcu)20008a7ce38SMartin KaFai Lau static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
20108a7ce38SMartin KaFai Lau bool vanilla_rcu)
20208a7ce38SMartin KaFai Lau {
20308a7ce38SMartin KaFai Lau if (vanilla_rcu)
20408a7ce38SMartin KaFai Lau kfree_rcu(selem, rcu);
20508a7ce38SMartin KaFai Lau else
20608a7ce38SMartin KaFai Lau call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
20708a7ce38SMartin KaFai Lau }
20808a7ce38SMartin KaFai Lau
bpf_selem_free_rcu(struct rcu_head * rcu)209f8ccf30cSMartin KaFai Lau static void bpf_selem_free_rcu(struct rcu_head *rcu)
2100fe4b381SKP Singh {
2110fe4b381SKP Singh struct bpf_local_storage_elem *selem;
2120fe4b381SKP Singh
2130fe4b381SKP Singh selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
21408a7ce38SMartin KaFai Lau bpf_mem_cache_raw_free(selem);
215f8ccf30cSMartin KaFai Lau }
216f8ccf30cSMartin KaFai Lau
bpf_selem_free_trace_rcu(struct rcu_head * rcu)217f8ccf30cSMartin KaFai Lau static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
218f8ccf30cSMartin KaFai Lau {
219f8ccf30cSMartin KaFai Lau if (rcu_trace_implies_rcu_gp())
220f8ccf30cSMartin KaFai Lau bpf_selem_free_rcu(rcu);
221d39d1445SHou Tao else
222f8ccf30cSMartin KaFai Lau call_rcu(rcu, bpf_selem_free_rcu);
2230fe4b381SKP Singh }
2240fe4b381SKP Singh
bpf_selem_free(struct bpf_local_storage_elem * selem,struct bpf_local_storage_map * smap,bool reuse_now)225c0d63f30SMartin KaFai Lau void bpf_selem_free(struct bpf_local_storage_elem *selem,
226c0d63f30SMartin KaFai Lau struct bpf_local_storage_map *smap,
227c0d63f30SMartin KaFai Lau bool reuse_now)
228c0d63f30SMartin KaFai Lau {
229c0d63f30SMartin KaFai Lau bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
23008a7ce38SMartin KaFai Lau
23108a7ce38SMartin KaFai Lau if (!smap->bpf_ma) {
23208a7ce38SMartin KaFai Lau __bpf_selem_free(selem, reuse_now);
23308a7ce38SMartin KaFai Lau return;
23408a7ce38SMartin KaFai Lau }
23508a7ce38SMartin KaFai Lau
23608a7ce38SMartin KaFai Lau if (!reuse_now) {
237c0d63f30SMartin KaFai Lau call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
23808a7ce38SMartin KaFai Lau } else {
23908a7ce38SMartin KaFai Lau /* Instead of using the vanilla call_rcu(),
24008a7ce38SMartin KaFai Lau * bpf_mem_cache_free will be able to reuse selem
24108a7ce38SMartin KaFai Lau * immediately.
24208a7ce38SMartin KaFai Lau */
24308a7ce38SMartin KaFai Lau migrate_disable();
24408a7ce38SMartin KaFai Lau bpf_mem_cache_free(&smap->selem_ma, selem);
24508a7ce38SMartin KaFai Lau migrate_enable();
24608a7ce38SMartin KaFai Lau }
247c0d63f30SMartin KaFai Lau }
248c0d63f30SMartin KaFai Lau
249450af8d0SKP Singh /* local_storage->lock must be held and selem->local_storage == local_storage.
250450af8d0SKP Singh * The caller must ensure selem->smap is still valid to be
251450af8d0SKP Singh * dereferenced for its smap->elem_size and smap->cache_idx.
252450af8d0SKP Singh */
bpf_selem_unlink_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem,bool uncharge_mem,bool reuse_now)253c83597faSYonghong Song static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
254450af8d0SKP Singh struct bpf_local_storage_elem *selem,
255a47eabf2SMartin KaFai Lau bool uncharge_mem, bool reuse_now)
256450af8d0SKP Singh {
257450af8d0SKP Singh struct bpf_local_storage_map *smap;
258450af8d0SKP Singh bool free_local_storage;
259450af8d0SKP Singh void *owner;
260450af8d0SKP Singh
2610fe4b381SKP Singh smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
262450af8d0SKP Singh owner = local_storage->owner;
263450af8d0SKP Singh
264450af8d0SKP Singh /* All uncharging on the owner must be done first.
265450af8d0SKP Singh * The owner may be freed once the last selem is unlinked
266450af8d0SKP Singh * from local_storage.
267450af8d0SKP Singh */
268450af8d0SKP Singh if (uncharge_mem)
269450af8d0SKP Singh mem_uncharge(smap, owner, smap->elem_size);
270450af8d0SKP Singh
271450af8d0SKP Singh free_local_storage = hlist_is_singular_node(&selem->snode,
272450af8d0SKP Singh &local_storage->list);
273450af8d0SKP Singh if (free_local_storage) {
274450af8d0SKP Singh mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
275450af8d0SKP Singh local_storage->owner = NULL;
276450af8d0SKP Singh
277450af8d0SKP Singh /* After this RCU_INIT, owner may be freed and cannot be used */
278450af8d0SKP Singh RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
279450af8d0SKP Singh
280450af8d0SKP Singh /* local_storage is not freed now. local_storage->lock is
281450af8d0SKP Singh * still held and raw_spin_unlock_bh(&local_storage->lock)
282450af8d0SKP Singh * will be done by the caller.
283450af8d0SKP Singh *
284450af8d0SKP Singh * Although the unlock will be done under
285c561d110STom Rix * rcu_read_lock(), it is more intuitive to
2860fe4b381SKP Singh * read if the freeing of the storage is done
287450af8d0SKP Singh * after the raw_spin_unlock_bh(&local_storage->lock).
288450af8d0SKP Singh *
289450af8d0SKP Singh * Hence, a "bool free_local_storage" is returned
2900fe4b381SKP Singh * to the caller which then calls then frees the storage after
2910fe4b381SKP Singh * all the RCU grace periods have expired.
292450af8d0SKP Singh */
293450af8d0SKP Singh }
294450af8d0SKP Singh hlist_del_init_rcu(&selem->snode);
295450af8d0SKP Singh if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
296450af8d0SKP Singh SDATA(selem))
297450af8d0SKP Singh RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
298450af8d0SKP Singh
299c0d63f30SMartin KaFai Lau bpf_selem_free(selem, smap, reuse_now);
300dcf456c9SKP Singh
301fc6652aaSMartin KaFai Lau if (rcu_access_pointer(local_storage->smap) == smap)
302fc6652aaSMartin KaFai Lau RCU_INIT_POINTER(local_storage->smap, NULL);
303fc6652aaSMartin KaFai Lau
304450af8d0SKP Singh return free_local_storage;
305450af8d0SKP Singh }
306450af8d0SKP Singh
check_storage_bpf_ma(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * storage_smap,struct bpf_local_storage_elem * selem)3076ae9d5e9SMartin KaFai Lau static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
3086ae9d5e9SMartin KaFai Lau struct bpf_local_storage_map *storage_smap,
3096ae9d5e9SMartin KaFai Lau struct bpf_local_storage_elem *selem)
3106ae9d5e9SMartin KaFai Lau {
3116ae9d5e9SMartin KaFai Lau
3126ae9d5e9SMartin KaFai Lau struct bpf_local_storage_map *selem_smap;
3136ae9d5e9SMartin KaFai Lau
3146ae9d5e9SMartin KaFai Lau /* local_storage->smap may be NULL. If it is, get the bpf_ma
3156ae9d5e9SMartin KaFai Lau * from any selem in the local_storage->list. The bpf_ma of all
3166ae9d5e9SMartin KaFai Lau * local_storage and selem should have the same value
3176ae9d5e9SMartin KaFai Lau * for the same map type.
3186ae9d5e9SMartin KaFai Lau *
3196ae9d5e9SMartin KaFai Lau * If the local_storage->list is already empty, the caller will not
3206ae9d5e9SMartin KaFai Lau * care about the bpf_ma value also because the caller is not
3216ae9d5e9SMartin KaFai Lau * responsibile to free the local_storage.
3226ae9d5e9SMartin KaFai Lau */
3236ae9d5e9SMartin KaFai Lau
3246ae9d5e9SMartin KaFai Lau if (storage_smap)
3256ae9d5e9SMartin KaFai Lau return storage_smap->bpf_ma;
3266ae9d5e9SMartin KaFai Lau
3276ae9d5e9SMartin KaFai Lau if (!selem) {
3286ae9d5e9SMartin KaFai Lau struct hlist_node *n;
3296ae9d5e9SMartin KaFai Lau
3306ae9d5e9SMartin KaFai Lau n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
3316ae9d5e9SMartin KaFai Lau bpf_rcu_lock_held());
3326ae9d5e9SMartin KaFai Lau if (!n)
3336ae9d5e9SMartin KaFai Lau return false;
3346ae9d5e9SMartin KaFai Lau
3356ae9d5e9SMartin KaFai Lau selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
3366ae9d5e9SMartin KaFai Lau }
3376ae9d5e9SMartin KaFai Lau selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
3386ae9d5e9SMartin KaFai Lau
3396ae9d5e9SMartin KaFai Lau return selem_smap->bpf_ma;
3406ae9d5e9SMartin KaFai Lau }
3416ae9d5e9SMartin KaFai Lau
bpf_selem_unlink_storage(struct bpf_local_storage_elem * selem,bool reuse_now)342121f31f3SMartin KaFai Lau static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
343a47eabf2SMartin KaFai Lau bool reuse_now)
344450af8d0SKP Singh {
3456ae9d5e9SMartin KaFai Lau struct bpf_local_storage_map *storage_smap;
346450af8d0SKP Singh struct bpf_local_storage *local_storage;
3476ae9d5e9SMartin KaFai Lau bool bpf_ma, free_local_storage = false;
348a10787e6SSong Liu unsigned long flags;
349450af8d0SKP Singh
3500a09a2f9SKumar Kartikeya Dwivedi if (unlikely(!selem_linked_to_storage_lockless(selem)))
351450af8d0SKP Singh /* selem has already been unlinked from sk */
352450af8d0SKP Singh return;
353450af8d0SKP Singh
3540fe4b381SKP Singh local_storage = rcu_dereference_check(selem->local_storage,
3550fe4b381SKP Singh bpf_rcu_lock_held());
3566ae9d5e9SMartin KaFai Lau storage_smap = rcu_dereference_check(local_storage->smap,
3576ae9d5e9SMartin KaFai Lau bpf_rcu_lock_held());
3586ae9d5e9SMartin KaFai Lau bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
3596ae9d5e9SMartin KaFai Lau
360a10787e6SSong Liu raw_spin_lock_irqsave(&local_storage->lock, flags);
361450af8d0SKP Singh if (likely(selem_linked_to_storage(selem)))
362450af8d0SKP Singh free_local_storage = bpf_selem_unlink_storage_nolock(
363a47eabf2SMartin KaFai Lau local_storage, selem, true, reuse_now);
364a10787e6SSong Liu raw_spin_unlock_irqrestore(&local_storage->lock, flags);
365450af8d0SKP Singh
3667e30a847SMartin KaFai Lau if (free_local_storage)
3676ae9d5e9SMartin KaFai Lau bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
368450af8d0SKP Singh }
369450af8d0SKP Singh
bpf_selem_link_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem)370450af8d0SKP Singh void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
371450af8d0SKP Singh struct bpf_local_storage_elem *selem)
372450af8d0SKP Singh {
373450af8d0SKP Singh RCU_INIT_POINTER(selem->local_storage, local_storage);
37470b97111SMartin KaFai Lau hlist_add_head_rcu(&selem->snode, &local_storage->list);
375450af8d0SKP Singh }
376450af8d0SKP Singh
bpf_selem_unlink_map(struct bpf_local_storage_elem * selem)3774cbd23ccSMartin KaFai Lau static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
378450af8d0SKP Singh {
379450af8d0SKP Singh struct bpf_local_storage_map *smap;
380450af8d0SKP Singh struct bpf_local_storage_map_bucket *b;
381a10787e6SSong Liu unsigned long flags;
382450af8d0SKP Singh
3830a09a2f9SKumar Kartikeya Dwivedi if (unlikely(!selem_linked_to_map_lockless(selem)))
384450af8d0SKP Singh /* selem has already be unlinked from smap */
385450af8d0SKP Singh return;
386450af8d0SKP Singh
3870fe4b381SKP Singh smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
388450af8d0SKP Singh b = select_bucket(smap, selem);
389a10787e6SSong Liu raw_spin_lock_irqsave(&b->lock, flags);
390450af8d0SKP Singh if (likely(selem_linked_to_map(selem)))
391450af8d0SKP Singh hlist_del_init_rcu(&selem->map_node);
392a10787e6SSong Liu raw_spin_unlock_irqrestore(&b->lock, flags);
393450af8d0SKP Singh }
394450af8d0SKP Singh
bpf_selem_link_map(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)395450af8d0SKP Singh void bpf_selem_link_map(struct bpf_local_storage_map *smap,
396450af8d0SKP Singh struct bpf_local_storage_elem *selem)
397450af8d0SKP Singh {
398450af8d0SKP Singh struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
399a10787e6SSong Liu unsigned long flags;
400450af8d0SKP Singh
401a10787e6SSong Liu raw_spin_lock_irqsave(&b->lock, flags);
402450af8d0SKP Singh RCU_INIT_POINTER(SDATA(selem)->smap, smap);
403450af8d0SKP Singh hlist_add_head_rcu(&selem->map_node, &b->list);
404a10787e6SSong Liu raw_spin_unlock_irqrestore(&b->lock, flags);
405450af8d0SKP Singh }
406450af8d0SKP Singh
bpf_selem_unlink(struct bpf_local_storage_elem * selem,bool reuse_now)407a47eabf2SMartin KaFai Lau void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
408450af8d0SKP Singh {
409450af8d0SKP Singh /* Always unlink from map before unlinking from local_storage
410450af8d0SKP Singh * because selem will be freed after successfully unlinked from
411450af8d0SKP Singh * the local_storage.
412450af8d0SKP Singh */
413450af8d0SKP Singh bpf_selem_unlink_map(selem);
414a47eabf2SMartin KaFai Lau bpf_selem_unlink_storage(selem, reuse_now);
415450af8d0SKP Singh }
416450af8d0SKP Singh
417e8b02296SMartin KaFai Lau /* If cacheit_lockit is false, this lookup function is lockless */
418450af8d0SKP Singh struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool cacheit_lockit)419450af8d0SKP Singh bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
420450af8d0SKP Singh struct bpf_local_storage_map *smap,
421450af8d0SKP Singh bool cacheit_lockit)
422450af8d0SKP Singh {
423450af8d0SKP Singh struct bpf_local_storage_data *sdata;
424450af8d0SKP Singh struct bpf_local_storage_elem *selem;
425450af8d0SKP Singh
426450af8d0SKP Singh /* Fast path (cache hit) */
4270fe4b381SKP Singh sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
4280fe4b381SKP Singh bpf_rcu_lock_held());
429450af8d0SKP Singh if (sdata && rcu_access_pointer(sdata->smap) == smap)
430450af8d0SKP Singh return sdata;
431450af8d0SKP Singh
432450af8d0SKP Singh /* Slow path (cache miss) */
4330fe4b381SKP Singh hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
4340fe4b381SKP Singh rcu_read_lock_trace_held())
435450af8d0SKP Singh if (rcu_access_pointer(SDATA(selem)->smap) == smap)
436450af8d0SKP Singh break;
437450af8d0SKP Singh
438450af8d0SKP Singh if (!selem)
439450af8d0SKP Singh return NULL;
440450af8d0SKP Singh
441450af8d0SKP Singh sdata = SDATA(selem);
442450af8d0SKP Singh if (cacheit_lockit) {
443a10787e6SSong Liu unsigned long flags;
444a10787e6SSong Liu
445450af8d0SKP Singh /* spinlock is needed to avoid racing with the
446450af8d0SKP Singh * parallel delete. Otherwise, publishing an already
447450af8d0SKP Singh * deleted sdata to the cache will become a use-after-free
448450af8d0SKP Singh * problem in the next bpf_local_storage_lookup().
449450af8d0SKP Singh */
450a10787e6SSong Liu raw_spin_lock_irqsave(&local_storage->lock, flags);
451450af8d0SKP Singh if (selem_linked_to_storage(selem))
452450af8d0SKP Singh rcu_assign_pointer(local_storage->cache[smap->cache_idx],
453450af8d0SKP Singh sdata);
454a10787e6SSong Liu raw_spin_unlock_irqrestore(&local_storage->lock, flags);
455450af8d0SKP Singh }
456450af8d0SKP Singh
457450af8d0SKP Singh return sdata;
458450af8d0SKP Singh }
459450af8d0SKP Singh
check_flags(const struct bpf_local_storage_data * old_sdata,u64 map_flags)460450af8d0SKP Singh static int check_flags(const struct bpf_local_storage_data *old_sdata,
461450af8d0SKP Singh u64 map_flags)
462450af8d0SKP Singh {
463450af8d0SKP Singh if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
464450af8d0SKP Singh /* elem already exists */
465450af8d0SKP Singh return -EEXIST;
466450af8d0SKP Singh
467450af8d0SKP Singh if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
468450af8d0SKP Singh /* elem doesn't exist, cannot update it */
469450af8d0SKP Singh return -ENOENT;
470450af8d0SKP Singh
471450af8d0SKP Singh return 0;
472450af8d0SKP Singh }
473450af8d0SKP Singh
bpf_local_storage_alloc(void * owner,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * first_selem,gfp_t gfp_flags)474450af8d0SKP Singh int bpf_local_storage_alloc(void *owner,
475450af8d0SKP Singh struct bpf_local_storage_map *smap,
476b00fa38aSJoanne Koong struct bpf_local_storage_elem *first_selem,
477b00fa38aSJoanne Koong gfp_t gfp_flags)
478450af8d0SKP Singh {
479450af8d0SKP Singh struct bpf_local_storage *prev_storage, *storage;
480450af8d0SKP Singh struct bpf_local_storage **owner_storage_ptr;
481450af8d0SKP Singh int err;
482450af8d0SKP Singh
483450af8d0SKP Singh err = mem_charge(smap, owner, sizeof(*storage));
484450af8d0SKP Singh if (err)
485450af8d0SKP Singh return err;
486450af8d0SKP Singh
4876ae9d5e9SMartin KaFai Lau if (smap->bpf_ma) {
4886ae9d5e9SMartin KaFai Lau migrate_disable();
4896ae9d5e9SMartin KaFai Lau storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
4906ae9d5e9SMartin KaFai Lau migrate_enable();
4916ae9d5e9SMartin KaFai Lau } else {
492e9aae8beSRoman Gushchin storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
493b00fa38aSJoanne Koong gfp_flags | __GFP_NOWARN);
4946ae9d5e9SMartin KaFai Lau }
4956ae9d5e9SMartin KaFai Lau
496450af8d0SKP Singh if (!storage) {
497450af8d0SKP Singh err = -ENOMEM;
498450af8d0SKP Singh goto uncharge;
499450af8d0SKP Singh }
500450af8d0SKP Singh
501fc6652aaSMartin KaFai Lau RCU_INIT_POINTER(storage->smap, smap);
502450af8d0SKP Singh INIT_HLIST_HEAD(&storage->list);
503450af8d0SKP Singh raw_spin_lock_init(&storage->lock);
504450af8d0SKP Singh storage->owner = owner;
505450af8d0SKP Singh
506450af8d0SKP Singh bpf_selem_link_storage_nolock(storage, first_selem);
507450af8d0SKP Singh bpf_selem_link_map(smap, first_selem);
508450af8d0SKP Singh
509450af8d0SKP Singh owner_storage_ptr =
510450af8d0SKP Singh (struct bpf_local_storage **)owner_storage(smap, owner);
511450af8d0SKP Singh /* Publish storage to the owner.
512450af8d0SKP Singh * Instead of using any lock of the kernel object (i.e. owner),
513450af8d0SKP Singh * cmpxchg will work with any kernel object regardless what
514450af8d0SKP Singh * the running context is, bh, irq...etc.
515450af8d0SKP Singh *
516450af8d0SKP Singh * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
517450af8d0SKP Singh * is protected by the storage->lock. Hence, when freeing
518450af8d0SKP Singh * the owner->storage, the storage->lock must be held before
519450af8d0SKP Singh * setting owner->storage ptr to NULL.
520450af8d0SKP Singh */
521450af8d0SKP Singh prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
522450af8d0SKP Singh if (unlikely(prev_storage)) {
523450af8d0SKP Singh bpf_selem_unlink_map(first_selem);
524450af8d0SKP Singh err = -EAGAIN;
525450af8d0SKP Singh goto uncharge;
526450af8d0SKP Singh
527450af8d0SKP Singh /* Note that even first_selem was linked to smap's
528450af8d0SKP Singh * bucket->list, first_selem can be freed immediately
529450af8d0SKP Singh * (instead of kfree_rcu) because
530450af8d0SKP Singh * bpf_local_storage_map_free() does a
5310fe4b381SKP Singh * synchronize_rcu_mult (waiting for both sleepable and
5320fe4b381SKP Singh * normal programs) before walking the bucket->list.
533450af8d0SKP Singh * Hence, no one is accessing selem from the
534450af8d0SKP Singh * bucket->list under rcu_read_lock().
535450af8d0SKP Singh */
536450af8d0SKP Singh }
537450af8d0SKP Singh
538450af8d0SKP Singh return 0;
539450af8d0SKP Singh
540450af8d0SKP Singh uncharge:
5416ae9d5e9SMartin KaFai Lau bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
542450af8d0SKP Singh mem_uncharge(smap, owner, sizeof(*storage));
543450af8d0SKP Singh return err;
544450af8d0SKP Singh }
545450af8d0SKP Singh
546450af8d0SKP Singh /* sk cannot be going away because it is linking new elem
547450af8d0SKP Singh * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
548450af8d0SKP Singh * Otherwise, it will become a leak (and other memory issues
549450af8d0SKP Singh * during map destruction).
550450af8d0SKP Singh */
551450af8d0SKP Singh struct bpf_local_storage_data *
bpf_local_storage_update(void * owner,struct bpf_local_storage_map * smap,void * value,u64 map_flags,gfp_t gfp_flags)552450af8d0SKP Singh bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
553b00fa38aSJoanne Koong void *value, u64 map_flags, gfp_t gfp_flags)
554450af8d0SKP Singh {
555450af8d0SKP Singh struct bpf_local_storage_data *old_sdata = NULL;
556a96a44abSMartin KaFai Lau struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
557450af8d0SKP Singh struct bpf_local_storage *local_storage;
558a10787e6SSong Liu unsigned long flags;
559450af8d0SKP Singh int err;
560450af8d0SKP Singh
561450af8d0SKP Singh /* BPF_EXIST and BPF_NOEXIST cannot be both set */
562450af8d0SKP Singh if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
563450af8d0SKP Singh /* BPF_F_LOCK can only be used in a value with spin_lock */
564450af8d0SKP Singh unlikely((map_flags & BPF_F_LOCK) &&
565db559117SKumar Kartikeya Dwivedi !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
566450af8d0SKP Singh return ERR_PTR(-EINVAL);
567450af8d0SKP Singh
568b00fa38aSJoanne Koong if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
569b00fa38aSJoanne Koong return ERR_PTR(-EINVAL);
570b00fa38aSJoanne Koong
5710fe4b381SKP Singh local_storage = rcu_dereference_check(*owner_storage(smap, owner),
5720fe4b381SKP Singh bpf_rcu_lock_held());
573450af8d0SKP Singh if (!local_storage || hlist_empty(&local_storage->list)) {
574450af8d0SKP Singh /* Very first elem for the owner */
575450af8d0SKP Singh err = check_flags(NULL, map_flags);
576450af8d0SKP Singh if (err)
577450af8d0SKP Singh return ERR_PTR(err);
578450af8d0SKP Singh
579b00fa38aSJoanne Koong selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
580450af8d0SKP Singh if (!selem)
581450af8d0SKP Singh return ERR_PTR(-ENOMEM);
582450af8d0SKP Singh
583b00fa38aSJoanne Koong err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
584450af8d0SKP Singh if (err) {
585c0d63f30SMartin KaFai Lau bpf_selem_free(selem, smap, true);
586450af8d0SKP Singh mem_uncharge(smap, owner, smap->elem_size);
587450af8d0SKP Singh return ERR_PTR(err);
588450af8d0SKP Singh }
589450af8d0SKP Singh
590450af8d0SKP Singh return SDATA(selem);
591450af8d0SKP Singh }
592450af8d0SKP Singh
593450af8d0SKP Singh if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
594450af8d0SKP Singh /* Hoping to find an old_sdata to do inline update
595450af8d0SKP Singh * such that it can avoid taking the local_storage->lock
596450af8d0SKP Singh * and changing the lists.
597450af8d0SKP Singh */
598450af8d0SKP Singh old_sdata =
599450af8d0SKP Singh bpf_local_storage_lookup(local_storage, smap, false);
600450af8d0SKP Singh err = check_flags(old_sdata, map_flags);
601450af8d0SKP Singh if (err)
602450af8d0SKP Singh return ERR_PTR(err);
6030a09a2f9SKumar Kartikeya Dwivedi if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
604450af8d0SKP Singh copy_map_value_locked(&smap->map, old_sdata->data,
605450af8d0SKP Singh value, false);
606450af8d0SKP Singh return old_sdata;
607450af8d0SKP Singh }
608450af8d0SKP Singh }
609450af8d0SKP Singh
610a96a44abSMartin KaFai Lau /* A lookup has just been done before and concluded a new selem is
611a96a44abSMartin KaFai Lau * needed. The chance of an unnecessary alloc is unlikely.
612a96a44abSMartin KaFai Lau */
613a96a44abSMartin KaFai Lau alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
614a96a44abSMartin KaFai Lau if (!alloc_selem)
615b00fa38aSJoanne Koong return ERR_PTR(-ENOMEM);
616b00fa38aSJoanne Koong
617a10787e6SSong Liu raw_spin_lock_irqsave(&local_storage->lock, flags);
618450af8d0SKP Singh
619450af8d0SKP Singh /* Recheck local_storage->list under local_storage->lock */
620450af8d0SKP Singh if (unlikely(hlist_empty(&local_storage->list))) {
621450af8d0SKP Singh /* A parallel del is happening and local_storage is going
622450af8d0SKP Singh * away. It has just been checked before, so very
623450af8d0SKP Singh * unlikely. Return instead of retry to keep things
624450af8d0SKP Singh * simple.
625450af8d0SKP Singh */
626450af8d0SKP Singh err = -EAGAIN;
627a96a44abSMartin KaFai Lau goto unlock;
628450af8d0SKP Singh }
629450af8d0SKP Singh
630450af8d0SKP Singh old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
631450af8d0SKP Singh err = check_flags(old_sdata, map_flags);
632450af8d0SKP Singh if (err)
633a96a44abSMartin KaFai Lau goto unlock;
634450af8d0SKP Singh
635450af8d0SKP Singh if (old_sdata && (map_flags & BPF_F_LOCK)) {
636450af8d0SKP Singh copy_map_value_locked(&smap->map, old_sdata->data, value,
637450af8d0SKP Singh false);
638450af8d0SKP Singh selem = SELEM(old_sdata);
639450af8d0SKP Singh goto unlock;
640450af8d0SKP Singh }
641450af8d0SKP Singh
642a96a44abSMartin KaFai Lau alloc_selem = NULL;
643450af8d0SKP Singh /* First, link the new selem to the map */
644450af8d0SKP Singh bpf_selem_link_map(smap, selem);
645450af8d0SKP Singh
646450af8d0SKP Singh /* Second, link (and publish) the new selem to local_storage */
647450af8d0SKP Singh bpf_selem_link_storage_nolock(local_storage, selem);
648450af8d0SKP Singh
649450af8d0SKP Singh /* Third, remove old selem, SELEM(old_sdata) */
650450af8d0SKP Singh if (old_sdata) {
651450af8d0SKP Singh bpf_selem_unlink_map(SELEM(old_sdata));
652450af8d0SKP Singh bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
653a96a44abSMartin KaFai Lau true, false);
654450af8d0SKP Singh }
655450af8d0SKP Singh
656450af8d0SKP Singh unlock:
657a10787e6SSong Liu raw_spin_unlock_irqrestore(&local_storage->lock, flags);
658a96a44abSMartin KaFai Lau if (alloc_selem) {
659b00fa38aSJoanne Koong mem_uncharge(smap, owner, smap->elem_size);
660a96a44abSMartin KaFai Lau bpf_selem_free(alloc_selem, smap, true);
661b00fa38aSJoanne Koong }
662a96a44abSMartin KaFai Lau return err ? ERR_PTR(err) : SDATA(selem);
663450af8d0SKP Singh }
664450af8d0SKP Singh
bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache * cache)665c83597faSYonghong Song static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
666450af8d0SKP Singh {
667450af8d0SKP Singh u64 min_usage = U64_MAX;
668450af8d0SKP Singh u16 i, res = 0;
669450af8d0SKP Singh
670450af8d0SKP Singh spin_lock(&cache->idx_lock);
671450af8d0SKP Singh
672450af8d0SKP Singh for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
673450af8d0SKP Singh if (cache->idx_usage_counts[i] < min_usage) {
674450af8d0SKP Singh min_usage = cache->idx_usage_counts[i];
675450af8d0SKP Singh res = i;
676450af8d0SKP Singh
677450af8d0SKP Singh /* Found a free cache_idx */
678450af8d0SKP Singh if (!min_usage)
679450af8d0SKP Singh break;
680450af8d0SKP Singh }
681450af8d0SKP Singh }
682450af8d0SKP Singh cache->idx_usage_counts[res]++;
683450af8d0SKP Singh
684450af8d0SKP Singh spin_unlock(&cache->idx_lock);
685450af8d0SKP Singh
686450af8d0SKP Singh return res;
687450af8d0SKP Singh }
688450af8d0SKP Singh
bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache * cache,u16 idx)689c83597faSYonghong Song static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
690450af8d0SKP Singh u16 idx)
691450af8d0SKP Singh {
692450af8d0SKP Singh spin_lock(&cache->idx_lock);
693450af8d0SKP Singh cache->idx_usage_counts[idx]--;
694450af8d0SKP Singh spin_unlock(&cache->idx_lock);
695450af8d0SKP Singh }
696450af8d0SKP Singh
bpf_local_storage_map_alloc_check(union bpf_attr * attr)697c83597faSYonghong Song int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
698c83597faSYonghong Song {
699c83597faSYonghong Song if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
700c83597faSYonghong Song !(attr->map_flags & BPF_F_NO_PREALLOC) ||
701c83597faSYonghong Song attr->max_entries ||
702c83597faSYonghong Song attr->key_size != sizeof(int) || !attr->value_size ||
703c83597faSYonghong Song /* Enforce BTF for userspace sk dumping */
704c83597faSYonghong Song !attr->btf_key_type_id || !attr->btf_value_type_id)
705c83597faSYonghong Song return -EINVAL;
706c83597faSYonghong Song
707c83597faSYonghong Song if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
708c83597faSYonghong Song return -E2BIG;
709c83597faSYonghong Song
710c83597faSYonghong Song return 0;
711c83597faSYonghong Song }
712c83597faSYonghong Song
bpf_local_storage_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)713c83597faSYonghong Song int bpf_local_storage_map_check_btf(const struct bpf_map *map,
714c83597faSYonghong Song const struct btf *btf,
715c83597faSYonghong Song const struct btf_type *key_type,
716c83597faSYonghong Song const struct btf_type *value_type)
717c83597faSYonghong Song {
718c83597faSYonghong Song u32 int_data;
719c83597faSYonghong Song
720c83597faSYonghong Song if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
721c83597faSYonghong Song return -EINVAL;
722c83597faSYonghong Song
723c83597faSYonghong Song int_data = *(u32 *)(key_type + 1);
724c83597faSYonghong Song if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
725c83597faSYonghong Song return -EINVAL;
726c83597faSYonghong Song
727c83597faSYonghong Song return 0;
728c83597faSYonghong Song }
729c83597faSYonghong Song
bpf_local_storage_destroy(struct bpf_local_storage * local_storage)7302ffcb6fcSMartin KaFai Lau void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
731450af8d0SKP Singh {
7326ae9d5e9SMartin KaFai Lau struct bpf_local_storage_map *storage_smap;
733450af8d0SKP Singh struct bpf_local_storage_elem *selem;
7346ae9d5e9SMartin KaFai Lau bool bpf_ma, free_storage = false;
735c83597faSYonghong Song struct hlist_node *n;
7362ffcb6fcSMartin KaFai Lau unsigned long flags;
737c83597faSYonghong Song
7386ae9d5e9SMartin KaFai Lau storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
7396ae9d5e9SMartin KaFai Lau bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
7406ae9d5e9SMartin KaFai Lau
741c83597faSYonghong Song /* Neither the bpf_prog nor the bpf_map's syscall
742c83597faSYonghong Song * could be modifying the local_storage->list now.
743c83597faSYonghong Song * Thus, no elem can be added to or deleted from the
744c83597faSYonghong Song * local_storage->list by the bpf_prog or by the bpf_map's syscall.
745c83597faSYonghong Song *
746c83597faSYonghong Song * It is racing with bpf_local_storage_map_free() alone
747c83597faSYonghong Song * when unlinking elem from the local_storage->list and
748c83597faSYonghong Song * the map's bucket->list.
749c83597faSYonghong Song */
7502ffcb6fcSMartin KaFai Lau raw_spin_lock_irqsave(&local_storage->lock, flags);
751c83597faSYonghong Song hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
752c83597faSYonghong Song /* Always unlink from map before unlinking from
753c83597faSYonghong Song * local_storage.
754c83597faSYonghong Song */
755c83597faSYonghong Song bpf_selem_unlink_map(selem);
756c83597faSYonghong Song /* If local_storage list has only one element, the
757c83597faSYonghong Song * bpf_selem_unlink_storage_nolock() will return true.
758c83597faSYonghong Song * Otherwise, it will return false. The current loop iteration
759c83597faSYonghong Song * intends to remove all local storage. So the last iteration
760c83597faSYonghong Song * of the loop will set the free_cgroup_storage to true.
761c83597faSYonghong Song */
762c83597faSYonghong Song free_storage = bpf_selem_unlink_storage_nolock(
76355d49f75SMartin KaFai Lau local_storage, selem, true, true);
764c83597faSYonghong Song }
7652ffcb6fcSMartin KaFai Lau raw_spin_unlock_irqrestore(&local_storage->lock, flags);
766c83597faSYonghong Song
7672ffcb6fcSMartin KaFai Lau if (free_storage)
7686ae9d5e9SMartin KaFai Lau bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
769c83597faSYonghong Song }
770c83597faSYonghong Song
bpf_local_storage_map_mem_usage(const struct bpf_map * map)7717490b7f1SYafang Shao u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
7727490b7f1SYafang Shao {
7737490b7f1SYafang Shao struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
7747490b7f1SYafang Shao u64 usage = sizeof(*smap);
7757490b7f1SYafang Shao
7767490b7f1SYafang Shao /* The dynamically callocated selems are not counted currently. */
7777490b7f1SYafang Shao usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
7787490b7f1SYafang Shao return usage;
7797490b7f1SYafang Shao }
7807490b7f1SYafang Shao
78108a7ce38SMartin KaFai Lau /* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
78208a7ce38SMartin KaFai Lau * A deadlock free allocator is useful for storage that the bpf prog can easily
78308a7ce38SMartin KaFai Lau * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
78408a7ce38SMartin KaFai Lau * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
78508a7ce38SMartin KaFai Lau * memory immediately. To be reuse-immediate safe, the owner destruction
78608a7ce38SMartin KaFai Lau * code path needs to go through a rcu grace period before calling
78708a7ce38SMartin KaFai Lau * bpf_local_storage_destroy().
78808a7ce38SMartin KaFai Lau *
78908a7ce38SMartin KaFai Lau * When bpf_ma == false, the kmalloc and kfree are used.
79008a7ce38SMartin KaFai Lau */
791c83597faSYonghong Song struct bpf_map *
bpf_local_storage_map_alloc(union bpf_attr * attr,struct bpf_local_storage_cache * cache,bool bpf_ma)792c83597faSYonghong Song bpf_local_storage_map_alloc(union bpf_attr *attr,
79308a7ce38SMartin KaFai Lau struct bpf_local_storage_cache *cache,
79408a7ce38SMartin KaFai Lau bool bpf_ma)
795c83597faSYonghong Song {
796c83597faSYonghong Song struct bpf_local_storage_map *smap;
79762827d61SMartin KaFai Lau unsigned int i;
79862827d61SMartin KaFai Lau u32 nbuckets;
79908a7ce38SMartin KaFai Lau int err;
800c83597faSYonghong Song
80162827d61SMartin KaFai Lau smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
80262827d61SMartin KaFai Lau if (!smap)
80362827d61SMartin KaFai Lau return ERR_PTR(-ENOMEM);
80462827d61SMartin KaFai Lau bpf_map_init_from_attr(&smap->map, attr);
80562827d61SMartin KaFai Lau
80662827d61SMartin KaFai Lau nbuckets = roundup_pow_of_two(num_possible_cpus());
80762827d61SMartin KaFai Lau /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
80862827d61SMartin KaFai Lau nbuckets = max_t(u32, 2, nbuckets);
80962827d61SMartin KaFai Lau smap->bucket_log = ilog2(nbuckets);
81062827d61SMartin KaFai Lau
811*e65a49b9SMohammad Shehar Yaar Tausif smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
812*e65a49b9SMohammad Shehar Yaar Tausif sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
81362827d61SMartin KaFai Lau if (!smap->buckets) {
81408a7ce38SMartin KaFai Lau err = -ENOMEM;
81508a7ce38SMartin KaFai Lau goto free_smap;
81662827d61SMartin KaFai Lau }
81762827d61SMartin KaFai Lau
81862827d61SMartin KaFai Lau for (i = 0; i < nbuckets; i++) {
81962827d61SMartin KaFai Lau INIT_HLIST_HEAD(&smap->buckets[i].list);
82062827d61SMartin KaFai Lau raw_spin_lock_init(&smap->buckets[i].lock);
82162827d61SMartin KaFai Lau }
82262827d61SMartin KaFai Lau
82362827d61SMartin KaFai Lau smap->elem_size = offsetof(struct bpf_local_storage_elem,
82462827d61SMartin KaFai Lau sdata.data[attr->value_size]);
825c83597faSYonghong Song
82608a7ce38SMartin KaFai Lau smap->bpf_ma = bpf_ma;
82708a7ce38SMartin KaFai Lau if (bpf_ma) {
82808a7ce38SMartin KaFai Lau err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
82908a7ce38SMartin KaFai Lau if (err)
83008a7ce38SMartin KaFai Lau goto free_smap;
8316ae9d5e9SMartin KaFai Lau
8326ae9d5e9SMartin KaFai Lau err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
8336ae9d5e9SMartin KaFai Lau if (err) {
8346ae9d5e9SMartin KaFai Lau bpf_mem_alloc_destroy(&smap->selem_ma);
8356ae9d5e9SMartin KaFai Lau goto free_smap;
8366ae9d5e9SMartin KaFai Lau }
83708a7ce38SMartin KaFai Lau }
83808a7ce38SMartin KaFai Lau
839c83597faSYonghong Song smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
840c83597faSYonghong Song return &smap->map;
84108a7ce38SMartin KaFai Lau
84208a7ce38SMartin KaFai Lau free_smap:
84308a7ce38SMartin KaFai Lau kvfree(smap->buckets);
84408a7ce38SMartin KaFai Lau bpf_map_area_free(smap);
84508a7ce38SMartin KaFai Lau return ERR_PTR(err);
846c83597faSYonghong Song }
847c83597faSYonghong Song
bpf_local_storage_map_free(struct bpf_map * map,struct bpf_local_storage_cache * cache,int __percpu * busy_counter)848c83597faSYonghong Song void bpf_local_storage_map_free(struct bpf_map *map,
849c83597faSYonghong Song struct bpf_local_storage_cache *cache,
850c83597faSYonghong Song int __percpu *busy_counter)
851c83597faSYonghong Song {
852450af8d0SKP Singh struct bpf_local_storage_map_bucket *b;
853c83597faSYonghong Song struct bpf_local_storage_elem *selem;
854c83597faSYonghong Song struct bpf_local_storage_map *smap;
855450af8d0SKP Singh unsigned int i;
856450af8d0SKP Singh
857c83597faSYonghong Song smap = (struct bpf_local_storage_map *)map;
858c83597faSYonghong Song bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
859c83597faSYonghong Song
860450af8d0SKP Singh /* Note that this map might be concurrently cloned from
861450af8d0SKP Singh * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
862450af8d0SKP Singh * RCU read section to finish before proceeding. New RCU
863450af8d0SKP Singh * read sections should be prevented via bpf_map_inc_not_zero.
864450af8d0SKP Singh */
865450af8d0SKP Singh synchronize_rcu();
866450af8d0SKP Singh
867450af8d0SKP Singh /* bpf prog and the userspace can no longer access this map
868450af8d0SKP Singh * now. No new selem (of this map) can be added
869450af8d0SKP Singh * to the owner->storage or to the map bucket's list.
870450af8d0SKP Singh *
871450af8d0SKP Singh * The elem of this map can be cleaned up here
872450af8d0SKP Singh * or when the storage is freed e.g.
873450af8d0SKP Singh * by bpf_sk_storage_free() during __sk_destruct().
874450af8d0SKP Singh */
875450af8d0SKP Singh for (i = 0; i < (1U << smap->bucket_log); i++) {
876450af8d0SKP Singh b = &smap->buckets[i];
877450af8d0SKP Singh
878450af8d0SKP Singh rcu_read_lock();
879450af8d0SKP Singh /* No one is adding to b->list now */
880450af8d0SKP Singh while ((selem = hlist_entry_safe(
881450af8d0SKP Singh rcu_dereference_raw(hlist_first_rcu(&b->list)),
882450af8d0SKP Singh struct bpf_local_storage_elem, map_node))) {
883bc235cdbSSong Liu if (busy_counter) {
884bc235cdbSSong Liu migrate_disable();
885197827a0SHou Tao this_cpu_inc(*busy_counter);
886bc235cdbSSong Liu }
887a47eabf2SMartin KaFai Lau bpf_selem_unlink(selem, true);
888bc235cdbSSong Liu if (busy_counter) {
889197827a0SHou Tao this_cpu_dec(*busy_counter);
890bc235cdbSSong Liu migrate_enable();
891bc235cdbSSong Liu }
892450af8d0SKP Singh cond_resched_rcu();
893450af8d0SKP Singh }
894450af8d0SKP Singh rcu_read_unlock();
895450af8d0SKP Singh }
896450af8d0SKP Singh
897450af8d0SKP Singh /* While freeing the storage we may still need to access the map.
898450af8d0SKP Singh *
899450af8d0SKP Singh * e.g. when bpf_sk_storage_free() has unlinked selem from the map
900450af8d0SKP Singh * which then made the above while((selem = ...)) loop
901450af8d0SKP Singh * exit immediately.
902450af8d0SKP Singh *
903450af8d0SKP Singh * However, while freeing the storage one still needs to access the
904450af8d0SKP Singh * smap->elem_size to do the uncharging in
905450af8d0SKP Singh * bpf_selem_unlink_storage_nolock().
906450af8d0SKP Singh *
907450af8d0SKP Singh * Hence, wait another rcu grace period for the storage to be freed.
908450af8d0SKP Singh */
909450af8d0SKP Singh synchronize_rcu();
910450af8d0SKP Singh
9116ae9d5e9SMartin KaFai Lau if (smap->bpf_ma) {
91208a7ce38SMartin KaFai Lau bpf_mem_alloc_destroy(&smap->selem_ma);
9136ae9d5e9SMartin KaFai Lau bpf_mem_alloc_destroy(&smap->storage_ma);
9146ae9d5e9SMartin KaFai Lau }
915450af8d0SKP Singh kvfree(smap->buckets);
91673cf09a3SYafang Shao bpf_map_area_free(smap);
917450af8d0SKP Singh }
918