xref: /openbmc/linux/kernel/bpf/bpf_local_storage.c (revision 7e30a8477b0bdd13dfd0b24e4f32b26d22b96e6c)
1450af8d0SKP Singh // SPDX-License-Identifier: GPL-2.0
2450af8d0SKP Singh /* Copyright (c) 2019 Facebook  */
3450af8d0SKP Singh #include <linux/rculist.h>
4450af8d0SKP Singh #include <linux/list.h>
5450af8d0SKP Singh #include <linux/hash.h>
6450af8d0SKP Singh #include <linux/types.h>
7450af8d0SKP Singh #include <linux/spinlock.h>
8450af8d0SKP Singh #include <linux/bpf.h>
9450af8d0SKP Singh #include <linux/btf_ids.h>
10450af8d0SKP Singh #include <linux/bpf_local_storage.h>
11450af8d0SKP Singh #include <net/sock.h>
12450af8d0SKP Singh #include <uapi/linux/sock_diag.h>
13450af8d0SKP Singh #include <uapi/linux/btf.h>
140fe4b381SKP Singh #include <linux/rcupdate.h>
150fe4b381SKP Singh #include <linux/rcupdate_trace.h>
160fe4b381SKP Singh #include <linux/rcupdate_wait.h>
17450af8d0SKP Singh 
18450af8d0SKP Singh #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19450af8d0SKP Singh 
20450af8d0SKP Singh static struct bpf_local_storage_map_bucket *
21450af8d0SKP Singh select_bucket(struct bpf_local_storage_map *smap,
22450af8d0SKP Singh 	      struct bpf_local_storage_elem *selem)
23450af8d0SKP Singh {
24450af8d0SKP Singh 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25450af8d0SKP Singh }
26450af8d0SKP Singh 
27450af8d0SKP Singh static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28450af8d0SKP Singh {
29450af8d0SKP Singh 	struct bpf_map *map = &smap->map;
30450af8d0SKP Singh 
31450af8d0SKP Singh 	if (!map->ops->map_local_storage_charge)
32450af8d0SKP Singh 		return 0;
33450af8d0SKP Singh 
34450af8d0SKP Singh 	return map->ops->map_local_storage_charge(smap, owner, size);
35450af8d0SKP Singh }
36450af8d0SKP Singh 
37450af8d0SKP Singh static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38450af8d0SKP Singh 			 u32 size)
39450af8d0SKP Singh {
40450af8d0SKP Singh 	struct bpf_map *map = &smap->map;
41450af8d0SKP Singh 
42450af8d0SKP Singh 	if (map->ops->map_local_storage_uncharge)
43450af8d0SKP Singh 		map->ops->map_local_storage_uncharge(smap, owner, size);
44450af8d0SKP Singh }
45450af8d0SKP Singh 
46450af8d0SKP Singh static struct bpf_local_storage __rcu **
47450af8d0SKP Singh owner_storage(struct bpf_local_storage_map *smap, void *owner)
48450af8d0SKP Singh {
49450af8d0SKP Singh 	struct bpf_map *map = &smap->map;
50450af8d0SKP Singh 
51450af8d0SKP Singh 	return map->ops->map_owner_storage_ptr(owner);
52450af8d0SKP Singh }
53450af8d0SKP Singh 
540a09a2f9SKumar Kartikeya Dwivedi static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
550a09a2f9SKumar Kartikeya Dwivedi {
560a09a2f9SKumar Kartikeya Dwivedi 	return !hlist_unhashed_lockless(&selem->snode);
570a09a2f9SKumar Kartikeya Dwivedi }
580a09a2f9SKumar Kartikeya Dwivedi 
59450af8d0SKP Singh static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60450af8d0SKP Singh {
61450af8d0SKP Singh 	return !hlist_unhashed(&selem->snode);
62450af8d0SKP Singh }
63450af8d0SKP Singh 
640a09a2f9SKumar Kartikeya Dwivedi static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
650a09a2f9SKumar Kartikeya Dwivedi {
660a09a2f9SKumar Kartikeya Dwivedi 	return !hlist_unhashed_lockless(&selem->map_node);
670a09a2f9SKumar Kartikeya Dwivedi }
680a09a2f9SKumar Kartikeya Dwivedi 
69450af8d0SKP Singh static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70450af8d0SKP Singh {
71450af8d0SKP Singh 	return !hlist_unhashed(&selem->map_node);
72450af8d0SKP Singh }
73450af8d0SKP Singh 
74450af8d0SKP Singh struct bpf_local_storage_elem *
75450af8d0SKP Singh bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76b00fa38aSJoanne Koong 		void *value, bool charge_mem, gfp_t gfp_flags)
77450af8d0SKP Singh {
78450af8d0SKP Singh 	struct bpf_local_storage_elem *selem;
79450af8d0SKP Singh 
80450af8d0SKP Singh 	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81450af8d0SKP Singh 		return NULL;
82450af8d0SKP Singh 
83e9aae8beSRoman Gushchin 	selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
84b00fa38aSJoanne Koong 				gfp_flags | __GFP_NOWARN);
85450af8d0SKP Singh 	if (selem) {
86450af8d0SKP Singh 		if (value)
87836e49e1SXu Kuohai 			copy_map_value(&smap->map, SDATA(selem)->data, value);
889db44fddSKumar Kartikeya Dwivedi 		/* No need to call check_and_init_map_value as memory is zero init */
89450af8d0SKP Singh 		return selem;
90450af8d0SKP Singh 	}
91450af8d0SKP Singh 
92450af8d0SKP Singh 	if (charge_mem)
93450af8d0SKP Singh 		mem_uncharge(smap, owner, smap->elem_size);
94450af8d0SKP Singh 
95450af8d0SKP Singh 	return NULL;
96450af8d0SKP Singh }
97450af8d0SKP Singh 
984cbd23ccSMartin KaFai Lau static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
990fe4b381SKP Singh {
1000fe4b381SKP Singh 	struct bpf_local_storage *local_storage;
1010fe4b381SKP Singh 
1021288aaa2SMartin KaFai Lau 	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
1031288aaa2SMartin KaFai Lau 	kfree(local_storage);
1041288aaa2SMartin KaFai Lau }
1051288aaa2SMartin KaFai Lau 
1061288aaa2SMartin KaFai Lau static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
1071288aaa2SMartin KaFai Lau {
108d39d1445SHou Tao 	/* If RCU Tasks Trace grace period implies RCU grace period, do
109d39d1445SHou Tao 	 * kfree(), else do kfree_rcu().
110d39d1445SHou Tao 	 */
111d39d1445SHou Tao 	if (rcu_trace_implies_rcu_gp())
1121288aaa2SMartin KaFai Lau 		bpf_local_storage_free_rcu(rcu);
113d39d1445SHou Tao 	else
1141288aaa2SMartin KaFai Lau 		call_rcu(rcu, bpf_local_storage_free_rcu);
1150fe4b381SKP Singh }
1160fe4b381SKP Singh 
117*7e30a847SMartin KaFai Lau static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
118*7e30a847SMartin KaFai Lau 				   bool reuse_now)
119*7e30a847SMartin KaFai Lau {
120*7e30a847SMartin KaFai Lau 	if (!reuse_now)
121*7e30a847SMartin KaFai Lau 		call_rcu_tasks_trace(&local_storage->rcu,
122*7e30a847SMartin KaFai Lau 				     bpf_local_storage_free_trace_rcu);
123*7e30a847SMartin KaFai Lau 	else
124*7e30a847SMartin KaFai Lau 		call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
125*7e30a847SMartin KaFai Lau }
126*7e30a847SMartin KaFai Lau 
127f8ccf30cSMartin KaFai Lau static void bpf_selem_free_rcu(struct rcu_head *rcu)
1280fe4b381SKP Singh {
1290fe4b381SKP Singh 	struct bpf_local_storage_elem *selem;
1300fe4b381SKP Singh 
1310fe4b381SKP Singh 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
132e768e3c5SKumar Kartikeya Dwivedi 	kfree(selem);
133f8ccf30cSMartin KaFai Lau }
134f8ccf30cSMartin KaFai Lau 
135f8ccf30cSMartin KaFai Lau static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
136f8ccf30cSMartin KaFai Lau {
137f8ccf30cSMartin KaFai Lau 	if (rcu_trace_implies_rcu_gp())
138f8ccf30cSMartin KaFai Lau 		bpf_selem_free_rcu(rcu);
139d39d1445SHou Tao 	else
140f8ccf30cSMartin KaFai Lau 		call_rcu(rcu, bpf_selem_free_rcu);
1410fe4b381SKP Singh }
1420fe4b381SKP Singh 
143c0d63f30SMartin KaFai Lau void bpf_selem_free(struct bpf_local_storage_elem *selem,
144c0d63f30SMartin KaFai Lau 		    struct bpf_local_storage_map *smap,
145c0d63f30SMartin KaFai Lau 		    bool reuse_now)
146c0d63f30SMartin KaFai Lau {
147c0d63f30SMartin KaFai Lau 	bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
148c0d63f30SMartin KaFai Lau 	if (!reuse_now)
149c0d63f30SMartin KaFai Lau 		call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
150c0d63f30SMartin KaFai Lau 	else
151c0d63f30SMartin KaFai Lau 		call_rcu(&selem->rcu, bpf_selem_free_rcu);
152c0d63f30SMartin KaFai Lau }
153c0d63f30SMartin KaFai Lau 
154450af8d0SKP Singh /* local_storage->lock must be held and selem->local_storage == local_storage.
155450af8d0SKP Singh  * The caller must ensure selem->smap is still valid to be
156450af8d0SKP Singh  * dereferenced for its smap->elem_size and smap->cache_idx.
157450af8d0SKP Singh  */
158c83597faSYonghong Song static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
159450af8d0SKP Singh 					    struct bpf_local_storage_elem *selem,
160a47eabf2SMartin KaFai Lau 					    bool uncharge_mem, bool reuse_now)
161450af8d0SKP Singh {
162450af8d0SKP Singh 	struct bpf_local_storage_map *smap;
163450af8d0SKP Singh 	bool free_local_storage;
164450af8d0SKP Singh 	void *owner;
165450af8d0SKP Singh 
1660fe4b381SKP Singh 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
167450af8d0SKP Singh 	owner = local_storage->owner;
168450af8d0SKP Singh 
169450af8d0SKP Singh 	/* All uncharging on the owner must be done first.
170450af8d0SKP Singh 	 * The owner may be freed once the last selem is unlinked
171450af8d0SKP Singh 	 * from local_storage.
172450af8d0SKP Singh 	 */
173450af8d0SKP Singh 	if (uncharge_mem)
174450af8d0SKP Singh 		mem_uncharge(smap, owner, smap->elem_size);
175450af8d0SKP Singh 
176450af8d0SKP Singh 	free_local_storage = hlist_is_singular_node(&selem->snode,
177450af8d0SKP Singh 						    &local_storage->list);
178450af8d0SKP Singh 	if (free_local_storage) {
179450af8d0SKP Singh 		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
180450af8d0SKP Singh 		local_storage->owner = NULL;
181450af8d0SKP Singh 
182450af8d0SKP Singh 		/* After this RCU_INIT, owner may be freed and cannot be used */
183450af8d0SKP Singh 		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
184450af8d0SKP Singh 
185450af8d0SKP Singh 		/* local_storage is not freed now.  local_storage->lock is
186450af8d0SKP Singh 		 * still held and raw_spin_unlock_bh(&local_storage->lock)
187450af8d0SKP Singh 		 * will be done by the caller.
188450af8d0SKP Singh 		 *
189450af8d0SKP Singh 		 * Although the unlock will be done under
190c561d110STom Rix 		 * rcu_read_lock(),  it is more intuitive to
1910fe4b381SKP Singh 		 * read if the freeing of the storage is done
192450af8d0SKP Singh 		 * after the raw_spin_unlock_bh(&local_storage->lock).
193450af8d0SKP Singh 		 *
194450af8d0SKP Singh 		 * Hence, a "bool free_local_storage" is returned
1950fe4b381SKP Singh 		 * to the caller which then calls then frees the storage after
1960fe4b381SKP Singh 		 * all the RCU grace periods have expired.
197450af8d0SKP Singh 		 */
198450af8d0SKP Singh 	}
199450af8d0SKP Singh 	hlist_del_init_rcu(&selem->snode);
200450af8d0SKP Singh 	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
201450af8d0SKP Singh 	    SDATA(selem))
202450af8d0SKP Singh 		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
203450af8d0SKP Singh 
204c0d63f30SMartin KaFai Lau 	bpf_selem_free(selem, smap, reuse_now);
205dcf456c9SKP Singh 
206fc6652aaSMartin KaFai Lau 	if (rcu_access_pointer(local_storage->smap) == smap)
207fc6652aaSMartin KaFai Lau 		RCU_INIT_POINTER(local_storage->smap, NULL);
208fc6652aaSMartin KaFai Lau 
209450af8d0SKP Singh 	return free_local_storage;
210450af8d0SKP Singh }
211450af8d0SKP Singh 
212121f31f3SMartin KaFai Lau static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
213a47eabf2SMartin KaFai Lau 				     bool reuse_now)
214450af8d0SKP Singh {
215450af8d0SKP Singh 	struct bpf_local_storage *local_storage;
216450af8d0SKP Singh 	bool free_local_storage = false;
217a10787e6SSong Liu 	unsigned long flags;
218450af8d0SKP Singh 
2190a09a2f9SKumar Kartikeya Dwivedi 	if (unlikely(!selem_linked_to_storage_lockless(selem)))
220450af8d0SKP Singh 		/* selem has already been unlinked from sk */
221450af8d0SKP Singh 		return;
222450af8d0SKP Singh 
2230fe4b381SKP Singh 	local_storage = rcu_dereference_check(selem->local_storage,
2240fe4b381SKP Singh 					      bpf_rcu_lock_held());
225a10787e6SSong Liu 	raw_spin_lock_irqsave(&local_storage->lock, flags);
226450af8d0SKP Singh 	if (likely(selem_linked_to_storage(selem)))
227450af8d0SKP Singh 		free_local_storage = bpf_selem_unlink_storage_nolock(
228a47eabf2SMartin KaFai Lau 			local_storage, selem, true, reuse_now);
229a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
230450af8d0SKP Singh 
231*7e30a847SMartin KaFai Lau 	if (free_local_storage)
232*7e30a847SMartin KaFai Lau 		bpf_local_storage_free(local_storage, reuse_now);
233450af8d0SKP Singh }
234450af8d0SKP Singh 
235450af8d0SKP Singh void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
236450af8d0SKP Singh 				   struct bpf_local_storage_elem *selem)
237450af8d0SKP Singh {
238450af8d0SKP Singh 	RCU_INIT_POINTER(selem->local_storage, local_storage);
23970b97111SMartin KaFai Lau 	hlist_add_head_rcu(&selem->snode, &local_storage->list);
240450af8d0SKP Singh }
241450af8d0SKP Singh 
2424cbd23ccSMartin KaFai Lau static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
243450af8d0SKP Singh {
244450af8d0SKP Singh 	struct bpf_local_storage_map *smap;
245450af8d0SKP Singh 	struct bpf_local_storage_map_bucket *b;
246a10787e6SSong Liu 	unsigned long flags;
247450af8d0SKP Singh 
2480a09a2f9SKumar Kartikeya Dwivedi 	if (unlikely(!selem_linked_to_map_lockless(selem)))
249450af8d0SKP Singh 		/* selem has already be unlinked from smap */
250450af8d0SKP Singh 		return;
251450af8d0SKP Singh 
2520fe4b381SKP Singh 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
253450af8d0SKP Singh 	b = select_bucket(smap, selem);
254a10787e6SSong Liu 	raw_spin_lock_irqsave(&b->lock, flags);
255450af8d0SKP Singh 	if (likely(selem_linked_to_map(selem)))
256450af8d0SKP Singh 		hlist_del_init_rcu(&selem->map_node);
257a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&b->lock, flags);
258450af8d0SKP Singh }
259450af8d0SKP Singh 
260450af8d0SKP Singh void bpf_selem_link_map(struct bpf_local_storage_map *smap,
261450af8d0SKP Singh 			struct bpf_local_storage_elem *selem)
262450af8d0SKP Singh {
263450af8d0SKP Singh 	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
264a10787e6SSong Liu 	unsigned long flags;
265450af8d0SKP Singh 
266a10787e6SSong Liu 	raw_spin_lock_irqsave(&b->lock, flags);
267450af8d0SKP Singh 	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
268450af8d0SKP Singh 	hlist_add_head_rcu(&selem->map_node, &b->list);
269a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&b->lock, flags);
270450af8d0SKP Singh }
271450af8d0SKP Singh 
272a47eabf2SMartin KaFai Lau void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
273450af8d0SKP Singh {
274450af8d0SKP Singh 	/* Always unlink from map before unlinking from local_storage
275450af8d0SKP Singh 	 * because selem will be freed after successfully unlinked from
276450af8d0SKP Singh 	 * the local_storage.
277450af8d0SKP Singh 	 */
278450af8d0SKP Singh 	bpf_selem_unlink_map(selem);
279a47eabf2SMartin KaFai Lau 	bpf_selem_unlink_storage(selem, reuse_now);
280450af8d0SKP Singh }
281450af8d0SKP Singh 
282e8b02296SMartin KaFai Lau /* If cacheit_lockit is false, this lookup function is lockless */
283450af8d0SKP Singh struct bpf_local_storage_data *
284450af8d0SKP Singh bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
285450af8d0SKP Singh 			 struct bpf_local_storage_map *smap,
286450af8d0SKP Singh 			 bool cacheit_lockit)
287450af8d0SKP Singh {
288450af8d0SKP Singh 	struct bpf_local_storage_data *sdata;
289450af8d0SKP Singh 	struct bpf_local_storage_elem *selem;
290450af8d0SKP Singh 
291450af8d0SKP Singh 	/* Fast path (cache hit) */
2920fe4b381SKP Singh 	sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
2930fe4b381SKP Singh 				      bpf_rcu_lock_held());
294450af8d0SKP Singh 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
295450af8d0SKP Singh 		return sdata;
296450af8d0SKP Singh 
297450af8d0SKP Singh 	/* Slow path (cache miss) */
2980fe4b381SKP Singh 	hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
2990fe4b381SKP Singh 				  rcu_read_lock_trace_held())
300450af8d0SKP Singh 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
301450af8d0SKP Singh 			break;
302450af8d0SKP Singh 
303450af8d0SKP Singh 	if (!selem)
304450af8d0SKP Singh 		return NULL;
305450af8d0SKP Singh 
306450af8d0SKP Singh 	sdata = SDATA(selem);
307450af8d0SKP Singh 	if (cacheit_lockit) {
308a10787e6SSong Liu 		unsigned long flags;
309a10787e6SSong Liu 
310450af8d0SKP Singh 		/* spinlock is needed to avoid racing with the
311450af8d0SKP Singh 		 * parallel delete.  Otherwise, publishing an already
312450af8d0SKP Singh 		 * deleted sdata to the cache will become a use-after-free
313450af8d0SKP Singh 		 * problem in the next bpf_local_storage_lookup().
314450af8d0SKP Singh 		 */
315a10787e6SSong Liu 		raw_spin_lock_irqsave(&local_storage->lock, flags);
316450af8d0SKP Singh 		if (selem_linked_to_storage(selem))
317450af8d0SKP Singh 			rcu_assign_pointer(local_storage->cache[smap->cache_idx],
318450af8d0SKP Singh 					   sdata);
319a10787e6SSong Liu 		raw_spin_unlock_irqrestore(&local_storage->lock, flags);
320450af8d0SKP Singh 	}
321450af8d0SKP Singh 
322450af8d0SKP Singh 	return sdata;
323450af8d0SKP Singh }
324450af8d0SKP Singh 
325450af8d0SKP Singh static int check_flags(const struct bpf_local_storage_data *old_sdata,
326450af8d0SKP Singh 		       u64 map_flags)
327450af8d0SKP Singh {
328450af8d0SKP Singh 	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
329450af8d0SKP Singh 		/* elem already exists */
330450af8d0SKP Singh 		return -EEXIST;
331450af8d0SKP Singh 
332450af8d0SKP Singh 	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
333450af8d0SKP Singh 		/* elem doesn't exist, cannot update it */
334450af8d0SKP Singh 		return -ENOENT;
335450af8d0SKP Singh 
336450af8d0SKP Singh 	return 0;
337450af8d0SKP Singh }
338450af8d0SKP Singh 
339450af8d0SKP Singh int bpf_local_storage_alloc(void *owner,
340450af8d0SKP Singh 			    struct bpf_local_storage_map *smap,
341b00fa38aSJoanne Koong 			    struct bpf_local_storage_elem *first_selem,
342b00fa38aSJoanne Koong 			    gfp_t gfp_flags)
343450af8d0SKP Singh {
344450af8d0SKP Singh 	struct bpf_local_storage *prev_storage, *storage;
345450af8d0SKP Singh 	struct bpf_local_storage **owner_storage_ptr;
346450af8d0SKP Singh 	int err;
347450af8d0SKP Singh 
348450af8d0SKP Singh 	err = mem_charge(smap, owner, sizeof(*storage));
349450af8d0SKP Singh 	if (err)
350450af8d0SKP Singh 		return err;
351450af8d0SKP Singh 
352e9aae8beSRoman Gushchin 	storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
353b00fa38aSJoanne Koong 				  gfp_flags | __GFP_NOWARN);
354450af8d0SKP Singh 	if (!storage) {
355450af8d0SKP Singh 		err = -ENOMEM;
356450af8d0SKP Singh 		goto uncharge;
357450af8d0SKP Singh 	}
358450af8d0SKP Singh 
359fc6652aaSMartin KaFai Lau 	RCU_INIT_POINTER(storage->smap, smap);
360450af8d0SKP Singh 	INIT_HLIST_HEAD(&storage->list);
361450af8d0SKP Singh 	raw_spin_lock_init(&storage->lock);
362450af8d0SKP Singh 	storage->owner = owner;
363450af8d0SKP Singh 
364450af8d0SKP Singh 	bpf_selem_link_storage_nolock(storage, first_selem);
365450af8d0SKP Singh 	bpf_selem_link_map(smap, first_selem);
366450af8d0SKP Singh 
367450af8d0SKP Singh 	owner_storage_ptr =
368450af8d0SKP Singh 		(struct bpf_local_storage **)owner_storage(smap, owner);
369450af8d0SKP Singh 	/* Publish storage to the owner.
370450af8d0SKP Singh 	 * Instead of using any lock of the kernel object (i.e. owner),
371450af8d0SKP Singh 	 * cmpxchg will work with any kernel object regardless what
372450af8d0SKP Singh 	 * the running context is, bh, irq...etc.
373450af8d0SKP Singh 	 *
374450af8d0SKP Singh 	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
375450af8d0SKP Singh 	 * is protected by the storage->lock.  Hence, when freeing
376450af8d0SKP Singh 	 * the owner->storage, the storage->lock must be held before
377450af8d0SKP Singh 	 * setting owner->storage ptr to NULL.
378450af8d0SKP Singh 	 */
379450af8d0SKP Singh 	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
380450af8d0SKP Singh 	if (unlikely(prev_storage)) {
381450af8d0SKP Singh 		bpf_selem_unlink_map(first_selem);
382450af8d0SKP Singh 		err = -EAGAIN;
383450af8d0SKP Singh 		goto uncharge;
384450af8d0SKP Singh 
385450af8d0SKP Singh 		/* Note that even first_selem was linked to smap's
386450af8d0SKP Singh 		 * bucket->list, first_selem can be freed immediately
387450af8d0SKP Singh 		 * (instead of kfree_rcu) because
388450af8d0SKP Singh 		 * bpf_local_storage_map_free() does a
3890fe4b381SKP Singh 		 * synchronize_rcu_mult (waiting for both sleepable and
3900fe4b381SKP Singh 		 * normal programs) before walking the bucket->list.
391450af8d0SKP Singh 		 * Hence, no one is accessing selem from the
392450af8d0SKP Singh 		 * bucket->list under rcu_read_lock().
393450af8d0SKP Singh 		 */
394450af8d0SKP Singh 	}
395450af8d0SKP Singh 
396450af8d0SKP Singh 	return 0;
397450af8d0SKP Singh 
398450af8d0SKP Singh uncharge:
399*7e30a847SMartin KaFai Lau 	bpf_local_storage_free(storage, true);
400450af8d0SKP Singh 	mem_uncharge(smap, owner, sizeof(*storage));
401450af8d0SKP Singh 	return err;
402450af8d0SKP Singh }
403450af8d0SKP Singh 
404450af8d0SKP Singh /* sk cannot be going away because it is linking new elem
405450af8d0SKP Singh  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
406450af8d0SKP Singh  * Otherwise, it will become a leak (and other memory issues
407450af8d0SKP Singh  * during map destruction).
408450af8d0SKP Singh  */
409450af8d0SKP Singh struct bpf_local_storage_data *
410450af8d0SKP Singh bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
411b00fa38aSJoanne Koong 			 void *value, u64 map_flags, gfp_t gfp_flags)
412450af8d0SKP Singh {
413450af8d0SKP Singh 	struct bpf_local_storage_data *old_sdata = NULL;
414b00fa38aSJoanne Koong 	struct bpf_local_storage_elem *selem = NULL;
415450af8d0SKP Singh 	struct bpf_local_storage *local_storage;
416a10787e6SSong Liu 	unsigned long flags;
417450af8d0SKP Singh 	int err;
418450af8d0SKP Singh 
419450af8d0SKP Singh 	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
420450af8d0SKP Singh 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
421450af8d0SKP Singh 	    /* BPF_F_LOCK can only be used in a value with spin_lock */
422450af8d0SKP Singh 	    unlikely((map_flags & BPF_F_LOCK) &&
423db559117SKumar Kartikeya Dwivedi 		     !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
424450af8d0SKP Singh 		return ERR_PTR(-EINVAL);
425450af8d0SKP Singh 
426b00fa38aSJoanne Koong 	if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
427b00fa38aSJoanne Koong 		return ERR_PTR(-EINVAL);
428b00fa38aSJoanne Koong 
4290fe4b381SKP Singh 	local_storage = rcu_dereference_check(*owner_storage(smap, owner),
4300fe4b381SKP Singh 					      bpf_rcu_lock_held());
431450af8d0SKP Singh 	if (!local_storage || hlist_empty(&local_storage->list)) {
432450af8d0SKP Singh 		/* Very first elem for the owner */
433450af8d0SKP Singh 		err = check_flags(NULL, map_flags);
434450af8d0SKP Singh 		if (err)
435450af8d0SKP Singh 			return ERR_PTR(err);
436450af8d0SKP Singh 
437b00fa38aSJoanne Koong 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
438450af8d0SKP Singh 		if (!selem)
439450af8d0SKP Singh 			return ERR_PTR(-ENOMEM);
440450af8d0SKP Singh 
441b00fa38aSJoanne Koong 		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
442450af8d0SKP Singh 		if (err) {
443c0d63f30SMartin KaFai Lau 			bpf_selem_free(selem, smap, true);
444450af8d0SKP Singh 			mem_uncharge(smap, owner, smap->elem_size);
445450af8d0SKP Singh 			return ERR_PTR(err);
446450af8d0SKP Singh 		}
447450af8d0SKP Singh 
448450af8d0SKP Singh 		return SDATA(selem);
449450af8d0SKP Singh 	}
450450af8d0SKP Singh 
451450af8d0SKP Singh 	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
452450af8d0SKP Singh 		/* Hoping to find an old_sdata to do inline update
453450af8d0SKP Singh 		 * such that it can avoid taking the local_storage->lock
454450af8d0SKP Singh 		 * and changing the lists.
455450af8d0SKP Singh 		 */
456450af8d0SKP Singh 		old_sdata =
457450af8d0SKP Singh 			bpf_local_storage_lookup(local_storage, smap, false);
458450af8d0SKP Singh 		err = check_flags(old_sdata, map_flags);
459450af8d0SKP Singh 		if (err)
460450af8d0SKP Singh 			return ERR_PTR(err);
4610a09a2f9SKumar Kartikeya Dwivedi 		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
462450af8d0SKP Singh 			copy_map_value_locked(&smap->map, old_sdata->data,
463450af8d0SKP Singh 					      value, false);
464450af8d0SKP Singh 			return old_sdata;
465450af8d0SKP Singh 		}
466450af8d0SKP Singh 	}
467450af8d0SKP Singh 
468b00fa38aSJoanne Koong 	if (gfp_flags == GFP_KERNEL) {
469b00fa38aSJoanne Koong 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
470b00fa38aSJoanne Koong 		if (!selem)
471b00fa38aSJoanne Koong 			return ERR_PTR(-ENOMEM);
472b00fa38aSJoanne Koong 	}
473b00fa38aSJoanne Koong 
474a10787e6SSong Liu 	raw_spin_lock_irqsave(&local_storage->lock, flags);
475450af8d0SKP Singh 
476450af8d0SKP Singh 	/* Recheck local_storage->list under local_storage->lock */
477450af8d0SKP Singh 	if (unlikely(hlist_empty(&local_storage->list))) {
478450af8d0SKP Singh 		/* A parallel del is happening and local_storage is going
479450af8d0SKP Singh 		 * away.  It has just been checked before, so very
480450af8d0SKP Singh 		 * unlikely.  Return instead of retry to keep things
481450af8d0SKP Singh 		 * simple.
482450af8d0SKP Singh 		 */
483450af8d0SKP Singh 		err = -EAGAIN;
484450af8d0SKP Singh 		goto unlock_err;
485450af8d0SKP Singh 	}
486450af8d0SKP Singh 
487450af8d0SKP Singh 	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
488450af8d0SKP Singh 	err = check_flags(old_sdata, map_flags);
489450af8d0SKP Singh 	if (err)
490450af8d0SKP Singh 		goto unlock_err;
491450af8d0SKP Singh 
492450af8d0SKP Singh 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
493450af8d0SKP Singh 		copy_map_value_locked(&smap->map, old_sdata->data, value,
494450af8d0SKP Singh 				      false);
495450af8d0SKP Singh 		selem = SELEM(old_sdata);
496450af8d0SKP Singh 		goto unlock;
497450af8d0SKP Singh 	}
498450af8d0SKP Singh 
499b00fa38aSJoanne Koong 	if (gfp_flags != GFP_KERNEL) {
500450af8d0SKP Singh 		/* local_storage->lock is held.  Hence, we are sure
501450af8d0SKP Singh 		 * we can unlink and uncharge the old_sdata successfully
502450af8d0SKP Singh 		 * later.  Hence, instead of charging the new selem now
503450af8d0SKP Singh 		 * and then uncharge the old selem later (which may cause
504450af8d0SKP Singh 		 * a potential but unnecessary charge failure),  avoid taking
505450af8d0SKP Singh 		 * a charge at all here (the "!old_sdata" check) and the
506450af8d0SKP Singh 		 * old_sdata will not be uncharged later during
507450af8d0SKP Singh 		 * bpf_selem_unlink_storage_nolock().
508450af8d0SKP Singh 		 */
509b00fa38aSJoanne Koong 		selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
510450af8d0SKP Singh 		if (!selem) {
511450af8d0SKP Singh 			err = -ENOMEM;
512450af8d0SKP Singh 			goto unlock_err;
513450af8d0SKP Singh 		}
514b00fa38aSJoanne Koong 	}
515450af8d0SKP Singh 
516450af8d0SKP Singh 	/* First, link the new selem to the map */
517450af8d0SKP Singh 	bpf_selem_link_map(smap, selem);
518450af8d0SKP Singh 
519450af8d0SKP Singh 	/* Second, link (and publish) the new selem to local_storage */
520450af8d0SKP Singh 	bpf_selem_link_storage_nolock(local_storage, selem);
521450af8d0SKP Singh 
522450af8d0SKP Singh 	/* Third, remove old selem, SELEM(old_sdata) */
523450af8d0SKP Singh 	if (old_sdata) {
524450af8d0SKP Singh 		bpf_selem_unlink_map(SELEM(old_sdata));
525450af8d0SKP Singh 		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
526a47eabf2SMartin KaFai Lau 						false, false);
527450af8d0SKP Singh 	}
528450af8d0SKP Singh 
529450af8d0SKP Singh unlock:
530a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
531450af8d0SKP Singh 	return SDATA(selem);
532450af8d0SKP Singh 
533450af8d0SKP Singh unlock_err:
534a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
535b00fa38aSJoanne Koong 	if (selem) {
536b00fa38aSJoanne Koong 		mem_uncharge(smap, owner, smap->elem_size);
537c0d63f30SMartin KaFai Lau 		bpf_selem_free(selem, smap, true);
538b00fa38aSJoanne Koong 	}
539450af8d0SKP Singh 	return ERR_PTR(err);
540450af8d0SKP Singh }
541450af8d0SKP Singh 
542c83597faSYonghong Song static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
543450af8d0SKP Singh {
544450af8d0SKP Singh 	u64 min_usage = U64_MAX;
545450af8d0SKP Singh 	u16 i, res = 0;
546450af8d0SKP Singh 
547450af8d0SKP Singh 	spin_lock(&cache->idx_lock);
548450af8d0SKP Singh 
549450af8d0SKP Singh 	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
550450af8d0SKP Singh 		if (cache->idx_usage_counts[i] < min_usage) {
551450af8d0SKP Singh 			min_usage = cache->idx_usage_counts[i];
552450af8d0SKP Singh 			res = i;
553450af8d0SKP Singh 
554450af8d0SKP Singh 			/* Found a free cache_idx */
555450af8d0SKP Singh 			if (!min_usage)
556450af8d0SKP Singh 				break;
557450af8d0SKP Singh 		}
558450af8d0SKP Singh 	}
559450af8d0SKP Singh 	cache->idx_usage_counts[res]++;
560450af8d0SKP Singh 
561450af8d0SKP Singh 	spin_unlock(&cache->idx_lock);
562450af8d0SKP Singh 
563450af8d0SKP Singh 	return res;
564450af8d0SKP Singh }
565450af8d0SKP Singh 
566c83597faSYonghong Song static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
567450af8d0SKP Singh 					     u16 idx)
568450af8d0SKP Singh {
569450af8d0SKP Singh 	spin_lock(&cache->idx_lock);
570450af8d0SKP Singh 	cache->idx_usage_counts[idx]--;
571450af8d0SKP Singh 	spin_unlock(&cache->idx_lock);
572450af8d0SKP Singh }
573450af8d0SKP Singh 
574c83597faSYonghong Song int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
575c83597faSYonghong Song {
576c83597faSYonghong Song 	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
577c83597faSYonghong Song 	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
578c83597faSYonghong Song 	    attr->max_entries ||
579c83597faSYonghong Song 	    attr->key_size != sizeof(int) || !attr->value_size ||
580c83597faSYonghong Song 	    /* Enforce BTF for userspace sk dumping */
581c83597faSYonghong Song 	    !attr->btf_key_type_id || !attr->btf_value_type_id)
582c83597faSYonghong Song 		return -EINVAL;
583c83597faSYonghong Song 
584c83597faSYonghong Song 	if (!bpf_capable())
585c83597faSYonghong Song 		return -EPERM;
586c83597faSYonghong Song 
587c83597faSYonghong Song 	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
588c83597faSYonghong Song 		return -E2BIG;
589c83597faSYonghong Song 
590c83597faSYonghong Song 	return 0;
591c83597faSYonghong Song }
592c83597faSYonghong Song 
593c83597faSYonghong Song int bpf_local_storage_map_check_btf(const struct bpf_map *map,
594c83597faSYonghong Song 				    const struct btf *btf,
595c83597faSYonghong Song 				    const struct btf_type *key_type,
596c83597faSYonghong Song 				    const struct btf_type *value_type)
597c83597faSYonghong Song {
598c83597faSYonghong Song 	u32 int_data;
599c83597faSYonghong Song 
600c83597faSYonghong Song 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
601c83597faSYonghong Song 		return -EINVAL;
602c83597faSYonghong Song 
603c83597faSYonghong Song 	int_data = *(u32 *)(key_type + 1);
604c83597faSYonghong Song 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
605c83597faSYonghong Song 		return -EINVAL;
606c83597faSYonghong Song 
607c83597faSYonghong Song 	return 0;
608c83597faSYonghong Song }
609c83597faSYonghong Song 
6102ffcb6fcSMartin KaFai Lau void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
611450af8d0SKP Singh {
612450af8d0SKP Singh 	struct bpf_local_storage_elem *selem;
613c83597faSYonghong Song 	bool free_storage = false;
614c83597faSYonghong Song 	struct hlist_node *n;
6152ffcb6fcSMartin KaFai Lau 	unsigned long flags;
616c83597faSYonghong Song 
617c83597faSYonghong Song 	/* Neither the bpf_prog nor the bpf_map's syscall
618c83597faSYonghong Song 	 * could be modifying the local_storage->list now.
619c83597faSYonghong Song 	 * Thus, no elem can be added to or deleted from the
620c83597faSYonghong Song 	 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
621c83597faSYonghong Song 	 *
622c83597faSYonghong Song 	 * It is racing with bpf_local_storage_map_free() alone
623c83597faSYonghong Song 	 * when unlinking elem from the local_storage->list and
624c83597faSYonghong Song 	 * the map's bucket->list.
625c83597faSYonghong Song 	 */
6262ffcb6fcSMartin KaFai Lau 	raw_spin_lock_irqsave(&local_storage->lock, flags);
627c83597faSYonghong Song 	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
628c83597faSYonghong Song 		/* Always unlink from map before unlinking from
629c83597faSYonghong Song 		 * local_storage.
630c83597faSYonghong Song 		 */
631c83597faSYonghong Song 		bpf_selem_unlink_map(selem);
632c83597faSYonghong Song 		/* If local_storage list has only one element, the
633c83597faSYonghong Song 		 * bpf_selem_unlink_storage_nolock() will return true.
634c83597faSYonghong Song 		 * Otherwise, it will return false. The current loop iteration
635c83597faSYonghong Song 		 * intends to remove all local storage. So the last iteration
636c83597faSYonghong Song 		 * of the loop will set the free_cgroup_storage to true.
637c83597faSYonghong Song 		 */
638c83597faSYonghong Song 		free_storage = bpf_selem_unlink_storage_nolock(
639a47eabf2SMartin KaFai Lau 			local_storage, selem, false, true);
640c83597faSYonghong Song 	}
6412ffcb6fcSMartin KaFai Lau 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
642c83597faSYonghong Song 
6432ffcb6fcSMartin KaFai Lau 	if (free_storage)
644*7e30a847SMartin KaFai Lau 		bpf_local_storage_free(local_storage, true);
645c83597faSYonghong Song }
646c83597faSYonghong Song 
6477490b7f1SYafang Shao u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
6487490b7f1SYafang Shao {
6497490b7f1SYafang Shao 	struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
6507490b7f1SYafang Shao 	u64 usage = sizeof(*smap);
6517490b7f1SYafang Shao 
6527490b7f1SYafang Shao 	/* The dynamically callocated selems are not counted currently. */
6537490b7f1SYafang Shao 	usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
6547490b7f1SYafang Shao 	return usage;
6557490b7f1SYafang Shao }
6567490b7f1SYafang Shao 
657c83597faSYonghong Song struct bpf_map *
658c83597faSYonghong Song bpf_local_storage_map_alloc(union bpf_attr *attr,
659c83597faSYonghong Song 			    struct bpf_local_storage_cache *cache)
660c83597faSYonghong Song {
661c83597faSYonghong Song 	struct bpf_local_storage_map *smap;
66262827d61SMartin KaFai Lau 	unsigned int i;
66362827d61SMartin KaFai Lau 	u32 nbuckets;
664c83597faSYonghong Song 
66562827d61SMartin KaFai Lau 	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
66662827d61SMartin KaFai Lau 	if (!smap)
66762827d61SMartin KaFai Lau 		return ERR_PTR(-ENOMEM);
66862827d61SMartin KaFai Lau 	bpf_map_init_from_attr(&smap->map, attr);
66962827d61SMartin KaFai Lau 
67062827d61SMartin KaFai Lau 	nbuckets = roundup_pow_of_two(num_possible_cpus());
67162827d61SMartin KaFai Lau 	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
67262827d61SMartin KaFai Lau 	nbuckets = max_t(u32, 2, nbuckets);
67362827d61SMartin KaFai Lau 	smap->bucket_log = ilog2(nbuckets);
67462827d61SMartin KaFai Lau 
67562827d61SMartin KaFai Lau 	smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
67662827d61SMartin KaFai Lau 					 nbuckets, GFP_USER | __GFP_NOWARN);
67762827d61SMartin KaFai Lau 	if (!smap->buckets) {
67862827d61SMartin KaFai Lau 		bpf_map_area_free(smap);
67962827d61SMartin KaFai Lau 		return ERR_PTR(-ENOMEM);
68062827d61SMartin KaFai Lau 	}
68162827d61SMartin KaFai Lau 
68262827d61SMartin KaFai Lau 	for (i = 0; i < nbuckets; i++) {
68362827d61SMartin KaFai Lau 		INIT_HLIST_HEAD(&smap->buckets[i].list);
68462827d61SMartin KaFai Lau 		raw_spin_lock_init(&smap->buckets[i].lock);
68562827d61SMartin KaFai Lau 	}
68662827d61SMartin KaFai Lau 
68762827d61SMartin KaFai Lau 	smap->elem_size = offsetof(struct bpf_local_storage_elem,
68862827d61SMartin KaFai Lau 				   sdata.data[attr->value_size]);
689c83597faSYonghong Song 
690c83597faSYonghong Song 	smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
691c83597faSYonghong Song 	return &smap->map;
692c83597faSYonghong Song }
693c83597faSYonghong Song 
694c83597faSYonghong Song void bpf_local_storage_map_free(struct bpf_map *map,
695c83597faSYonghong Song 				struct bpf_local_storage_cache *cache,
696c83597faSYonghong Song 				int __percpu *busy_counter)
697c83597faSYonghong Song {
698450af8d0SKP Singh 	struct bpf_local_storage_map_bucket *b;
699c83597faSYonghong Song 	struct bpf_local_storage_elem *selem;
700c83597faSYonghong Song 	struct bpf_local_storage_map *smap;
701450af8d0SKP Singh 	unsigned int i;
702450af8d0SKP Singh 
703c83597faSYonghong Song 	smap = (struct bpf_local_storage_map *)map;
704c83597faSYonghong Song 	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
705c83597faSYonghong Song 
706450af8d0SKP Singh 	/* Note that this map might be concurrently cloned from
707450af8d0SKP Singh 	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
708450af8d0SKP Singh 	 * RCU read section to finish before proceeding. New RCU
709450af8d0SKP Singh 	 * read sections should be prevented via bpf_map_inc_not_zero.
710450af8d0SKP Singh 	 */
711450af8d0SKP Singh 	synchronize_rcu();
712450af8d0SKP Singh 
713450af8d0SKP Singh 	/* bpf prog and the userspace can no longer access this map
714450af8d0SKP Singh 	 * now.  No new selem (of this map) can be added
715450af8d0SKP Singh 	 * to the owner->storage or to the map bucket's list.
716450af8d0SKP Singh 	 *
717450af8d0SKP Singh 	 * The elem of this map can be cleaned up here
718450af8d0SKP Singh 	 * or when the storage is freed e.g.
719450af8d0SKP Singh 	 * by bpf_sk_storage_free() during __sk_destruct().
720450af8d0SKP Singh 	 */
721450af8d0SKP Singh 	for (i = 0; i < (1U << smap->bucket_log); i++) {
722450af8d0SKP Singh 		b = &smap->buckets[i];
723450af8d0SKP Singh 
724450af8d0SKP Singh 		rcu_read_lock();
725450af8d0SKP Singh 		/* No one is adding to b->list now */
726450af8d0SKP Singh 		while ((selem = hlist_entry_safe(
727450af8d0SKP Singh 				rcu_dereference_raw(hlist_first_rcu(&b->list)),
728450af8d0SKP Singh 				struct bpf_local_storage_elem, map_node))) {
729bc235cdbSSong Liu 			if (busy_counter) {
730bc235cdbSSong Liu 				migrate_disable();
731197827a0SHou Tao 				this_cpu_inc(*busy_counter);
732bc235cdbSSong Liu 			}
733a47eabf2SMartin KaFai Lau 			bpf_selem_unlink(selem, true);
734bc235cdbSSong Liu 			if (busy_counter) {
735197827a0SHou Tao 				this_cpu_dec(*busy_counter);
736bc235cdbSSong Liu 				migrate_enable();
737bc235cdbSSong Liu 			}
738450af8d0SKP Singh 			cond_resched_rcu();
739450af8d0SKP Singh 		}
740450af8d0SKP Singh 		rcu_read_unlock();
741450af8d0SKP Singh 	}
742450af8d0SKP Singh 
743450af8d0SKP Singh 	/* While freeing the storage we may still need to access the map.
744450af8d0SKP Singh 	 *
745450af8d0SKP Singh 	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
746450af8d0SKP Singh 	 * which then made the above while((selem = ...)) loop
747450af8d0SKP Singh 	 * exit immediately.
748450af8d0SKP Singh 	 *
749450af8d0SKP Singh 	 * However, while freeing the storage one still needs to access the
750450af8d0SKP Singh 	 * smap->elem_size to do the uncharging in
751450af8d0SKP Singh 	 * bpf_selem_unlink_storage_nolock().
752450af8d0SKP Singh 	 *
753450af8d0SKP Singh 	 * Hence, wait another rcu grace period for the storage to be freed.
754450af8d0SKP Singh 	 */
755450af8d0SKP Singh 	synchronize_rcu();
756450af8d0SKP Singh 
757450af8d0SKP Singh 	kvfree(smap->buckets);
75873cf09a3SYafang Shao 	bpf_map_area_free(smap);
759450af8d0SKP Singh }
760