xref: /openbmc/linux/kernel/bpf/bpf_local_storage.c (revision 73cf09a36bf7bfb3e5a3ff23755c36d49137c44d)
1450af8d0SKP Singh // SPDX-License-Identifier: GPL-2.0
2450af8d0SKP Singh /* Copyright (c) 2019 Facebook  */
3450af8d0SKP Singh #include <linux/rculist.h>
4450af8d0SKP Singh #include <linux/list.h>
5450af8d0SKP Singh #include <linux/hash.h>
6450af8d0SKP Singh #include <linux/types.h>
7450af8d0SKP Singh #include <linux/spinlock.h>
8450af8d0SKP Singh #include <linux/bpf.h>
9450af8d0SKP Singh #include <linux/btf_ids.h>
10450af8d0SKP Singh #include <linux/bpf_local_storage.h>
11450af8d0SKP Singh #include <net/sock.h>
12450af8d0SKP Singh #include <uapi/linux/sock_diag.h>
13450af8d0SKP Singh #include <uapi/linux/btf.h>
140fe4b381SKP Singh #include <linux/rcupdate.h>
150fe4b381SKP Singh #include <linux/rcupdate_trace.h>
160fe4b381SKP Singh #include <linux/rcupdate_wait.h>
17450af8d0SKP Singh 
18450af8d0SKP Singh #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19450af8d0SKP Singh 
20450af8d0SKP Singh static struct bpf_local_storage_map_bucket *
21450af8d0SKP Singh select_bucket(struct bpf_local_storage_map *smap,
22450af8d0SKP Singh 	      struct bpf_local_storage_elem *selem)
23450af8d0SKP Singh {
24450af8d0SKP Singh 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25450af8d0SKP Singh }
26450af8d0SKP Singh 
27450af8d0SKP Singh static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28450af8d0SKP Singh {
29450af8d0SKP Singh 	struct bpf_map *map = &smap->map;
30450af8d0SKP Singh 
31450af8d0SKP Singh 	if (!map->ops->map_local_storage_charge)
32450af8d0SKP Singh 		return 0;
33450af8d0SKP Singh 
34450af8d0SKP Singh 	return map->ops->map_local_storage_charge(smap, owner, size);
35450af8d0SKP Singh }
36450af8d0SKP Singh 
37450af8d0SKP Singh static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38450af8d0SKP Singh 			 u32 size)
39450af8d0SKP Singh {
40450af8d0SKP Singh 	struct bpf_map *map = &smap->map;
41450af8d0SKP Singh 
42450af8d0SKP Singh 	if (map->ops->map_local_storage_uncharge)
43450af8d0SKP Singh 		map->ops->map_local_storage_uncharge(smap, owner, size);
44450af8d0SKP Singh }
45450af8d0SKP Singh 
46450af8d0SKP Singh static struct bpf_local_storage __rcu **
47450af8d0SKP Singh owner_storage(struct bpf_local_storage_map *smap, void *owner)
48450af8d0SKP Singh {
49450af8d0SKP Singh 	struct bpf_map *map = &smap->map;
50450af8d0SKP Singh 
51450af8d0SKP Singh 	return map->ops->map_owner_storage_ptr(owner);
52450af8d0SKP Singh }
53450af8d0SKP Singh 
54450af8d0SKP Singh static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
55450af8d0SKP Singh {
56450af8d0SKP Singh 	return !hlist_unhashed(&selem->snode);
57450af8d0SKP Singh }
58450af8d0SKP Singh 
59450af8d0SKP Singh static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
60450af8d0SKP Singh {
61450af8d0SKP Singh 	return !hlist_unhashed(&selem->map_node);
62450af8d0SKP Singh }
63450af8d0SKP Singh 
64450af8d0SKP Singh struct bpf_local_storage_elem *
65450af8d0SKP Singh bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
66b00fa38aSJoanne Koong 		void *value, bool charge_mem, gfp_t gfp_flags)
67450af8d0SKP Singh {
68450af8d0SKP Singh 	struct bpf_local_storage_elem *selem;
69450af8d0SKP Singh 
70450af8d0SKP Singh 	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
71450af8d0SKP Singh 		return NULL;
72450af8d0SKP Singh 
73e9aae8beSRoman Gushchin 	selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
74b00fa38aSJoanne Koong 				gfp_flags | __GFP_NOWARN);
75450af8d0SKP Singh 	if (selem) {
76450af8d0SKP Singh 		if (value)
77450af8d0SKP Singh 			memcpy(SDATA(selem)->data, value, smap->map.value_size);
78450af8d0SKP Singh 		return selem;
79450af8d0SKP Singh 	}
80450af8d0SKP Singh 
81450af8d0SKP Singh 	if (charge_mem)
82450af8d0SKP Singh 		mem_uncharge(smap, owner, smap->elem_size);
83450af8d0SKP Singh 
84450af8d0SKP Singh 	return NULL;
85450af8d0SKP Singh }
86450af8d0SKP Singh 
870fe4b381SKP Singh void bpf_local_storage_free_rcu(struct rcu_head *rcu)
880fe4b381SKP Singh {
890fe4b381SKP Singh 	struct bpf_local_storage *local_storage;
900fe4b381SKP Singh 
910fe4b381SKP Singh 	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
920fe4b381SKP Singh 	kfree_rcu(local_storage, rcu);
930fe4b381SKP Singh }
940fe4b381SKP Singh 
950fe4b381SKP Singh static void bpf_selem_free_rcu(struct rcu_head *rcu)
960fe4b381SKP Singh {
970fe4b381SKP Singh 	struct bpf_local_storage_elem *selem;
980fe4b381SKP Singh 
990fe4b381SKP Singh 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
1000fe4b381SKP Singh 	kfree_rcu(selem, rcu);
1010fe4b381SKP Singh }
1020fe4b381SKP Singh 
103450af8d0SKP Singh /* local_storage->lock must be held and selem->local_storage == local_storage.
104450af8d0SKP Singh  * The caller must ensure selem->smap is still valid to be
105450af8d0SKP Singh  * dereferenced for its smap->elem_size and smap->cache_idx.
106450af8d0SKP Singh  */
107450af8d0SKP Singh bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
108450af8d0SKP Singh 				     struct bpf_local_storage_elem *selem,
109dcf456c9SKP Singh 				     bool uncharge_mem, bool use_trace_rcu)
110450af8d0SKP Singh {
111450af8d0SKP Singh 	struct bpf_local_storage_map *smap;
112450af8d0SKP Singh 	bool free_local_storage;
113450af8d0SKP Singh 	void *owner;
114450af8d0SKP Singh 
1150fe4b381SKP Singh 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
116450af8d0SKP Singh 	owner = local_storage->owner;
117450af8d0SKP Singh 
118450af8d0SKP Singh 	/* All uncharging on the owner must be done first.
119450af8d0SKP Singh 	 * The owner may be freed once the last selem is unlinked
120450af8d0SKP Singh 	 * from local_storage.
121450af8d0SKP Singh 	 */
122450af8d0SKP Singh 	if (uncharge_mem)
123450af8d0SKP Singh 		mem_uncharge(smap, owner, smap->elem_size);
124450af8d0SKP Singh 
125450af8d0SKP Singh 	free_local_storage = hlist_is_singular_node(&selem->snode,
126450af8d0SKP Singh 						    &local_storage->list);
127450af8d0SKP Singh 	if (free_local_storage) {
128450af8d0SKP Singh 		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
129450af8d0SKP Singh 		local_storage->owner = NULL;
130450af8d0SKP Singh 
131450af8d0SKP Singh 		/* After this RCU_INIT, owner may be freed and cannot be used */
132450af8d0SKP Singh 		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
133450af8d0SKP Singh 
134450af8d0SKP Singh 		/* local_storage is not freed now.  local_storage->lock is
135450af8d0SKP Singh 		 * still held and raw_spin_unlock_bh(&local_storage->lock)
136450af8d0SKP Singh 		 * will be done by the caller.
137450af8d0SKP Singh 		 *
138450af8d0SKP Singh 		 * Although the unlock will be done under
139c561d110STom Rix 		 * rcu_read_lock(),  it is more intuitive to
1400fe4b381SKP Singh 		 * read if the freeing of the storage is done
141450af8d0SKP Singh 		 * after the raw_spin_unlock_bh(&local_storage->lock).
142450af8d0SKP Singh 		 *
143450af8d0SKP Singh 		 * Hence, a "bool free_local_storage" is returned
1440fe4b381SKP Singh 		 * to the caller which then calls then frees the storage after
1450fe4b381SKP Singh 		 * all the RCU grace periods have expired.
146450af8d0SKP Singh 		 */
147450af8d0SKP Singh 	}
148450af8d0SKP Singh 	hlist_del_init_rcu(&selem->snode);
149450af8d0SKP Singh 	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
150450af8d0SKP Singh 	    SDATA(selem))
151450af8d0SKP Singh 		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
152450af8d0SKP Singh 
153dcf456c9SKP Singh 	if (use_trace_rcu)
1540fe4b381SKP Singh 		call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
155dcf456c9SKP Singh 	else
156dcf456c9SKP Singh 		kfree_rcu(selem, rcu);
157dcf456c9SKP Singh 
158450af8d0SKP Singh 	return free_local_storage;
159450af8d0SKP Singh }
160450af8d0SKP Singh 
161dcf456c9SKP Singh static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
162dcf456c9SKP Singh 				       bool use_trace_rcu)
163450af8d0SKP Singh {
164450af8d0SKP Singh 	struct bpf_local_storage *local_storage;
165450af8d0SKP Singh 	bool free_local_storage = false;
166a10787e6SSong Liu 	unsigned long flags;
167450af8d0SKP Singh 
168450af8d0SKP Singh 	if (unlikely(!selem_linked_to_storage(selem)))
169450af8d0SKP Singh 		/* selem has already been unlinked from sk */
170450af8d0SKP Singh 		return;
171450af8d0SKP Singh 
1720fe4b381SKP Singh 	local_storage = rcu_dereference_check(selem->local_storage,
1730fe4b381SKP Singh 					      bpf_rcu_lock_held());
174a10787e6SSong Liu 	raw_spin_lock_irqsave(&local_storage->lock, flags);
175450af8d0SKP Singh 	if (likely(selem_linked_to_storage(selem)))
176450af8d0SKP Singh 		free_local_storage = bpf_selem_unlink_storage_nolock(
177dcf456c9SKP Singh 			local_storage, selem, true, use_trace_rcu);
178a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
179450af8d0SKP Singh 
180dcf456c9SKP Singh 	if (free_local_storage) {
181dcf456c9SKP Singh 		if (use_trace_rcu)
1820fe4b381SKP Singh 			call_rcu_tasks_trace(&local_storage->rcu,
1830fe4b381SKP Singh 				     bpf_local_storage_free_rcu);
184dcf456c9SKP Singh 		else
185dcf456c9SKP Singh 			kfree_rcu(local_storage, rcu);
186dcf456c9SKP Singh 	}
187450af8d0SKP Singh }
188450af8d0SKP Singh 
189450af8d0SKP Singh void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
190450af8d0SKP Singh 				   struct bpf_local_storage_elem *selem)
191450af8d0SKP Singh {
192450af8d0SKP Singh 	RCU_INIT_POINTER(selem->local_storage, local_storage);
19370b97111SMartin KaFai Lau 	hlist_add_head_rcu(&selem->snode, &local_storage->list);
194450af8d0SKP Singh }
195450af8d0SKP Singh 
196450af8d0SKP Singh void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
197450af8d0SKP Singh {
198450af8d0SKP Singh 	struct bpf_local_storage_map *smap;
199450af8d0SKP Singh 	struct bpf_local_storage_map_bucket *b;
200a10787e6SSong Liu 	unsigned long flags;
201450af8d0SKP Singh 
202450af8d0SKP Singh 	if (unlikely(!selem_linked_to_map(selem)))
203450af8d0SKP Singh 		/* selem has already be unlinked from smap */
204450af8d0SKP Singh 		return;
205450af8d0SKP Singh 
2060fe4b381SKP Singh 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
207450af8d0SKP Singh 	b = select_bucket(smap, selem);
208a10787e6SSong Liu 	raw_spin_lock_irqsave(&b->lock, flags);
209450af8d0SKP Singh 	if (likely(selem_linked_to_map(selem)))
210450af8d0SKP Singh 		hlist_del_init_rcu(&selem->map_node);
211a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&b->lock, flags);
212450af8d0SKP Singh }
213450af8d0SKP Singh 
214450af8d0SKP Singh void bpf_selem_link_map(struct bpf_local_storage_map *smap,
215450af8d0SKP Singh 			struct bpf_local_storage_elem *selem)
216450af8d0SKP Singh {
217450af8d0SKP Singh 	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
218a10787e6SSong Liu 	unsigned long flags;
219450af8d0SKP Singh 
220a10787e6SSong Liu 	raw_spin_lock_irqsave(&b->lock, flags);
221450af8d0SKP Singh 	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
222450af8d0SKP Singh 	hlist_add_head_rcu(&selem->map_node, &b->list);
223a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&b->lock, flags);
224450af8d0SKP Singh }
225450af8d0SKP Singh 
226dcf456c9SKP Singh void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
227450af8d0SKP Singh {
228450af8d0SKP Singh 	/* Always unlink from map before unlinking from local_storage
229450af8d0SKP Singh 	 * because selem will be freed after successfully unlinked from
230450af8d0SKP Singh 	 * the local_storage.
231450af8d0SKP Singh 	 */
232450af8d0SKP Singh 	bpf_selem_unlink_map(selem);
233dcf456c9SKP Singh 	__bpf_selem_unlink_storage(selem, use_trace_rcu);
234450af8d0SKP Singh }
235450af8d0SKP Singh 
236450af8d0SKP Singh struct bpf_local_storage_data *
237450af8d0SKP Singh bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
238450af8d0SKP Singh 			 struct bpf_local_storage_map *smap,
239450af8d0SKP Singh 			 bool cacheit_lockit)
240450af8d0SKP Singh {
241450af8d0SKP Singh 	struct bpf_local_storage_data *sdata;
242450af8d0SKP Singh 	struct bpf_local_storage_elem *selem;
243450af8d0SKP Singh 
244450af8d0SKP Singh 	/* Fast path (cache hit) */
2450fe4b381SKP Singh 	sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
2460fe4b381SKP Singh 				      bpf_rcu_lock_held());
247450af8d0SKP Singh 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
248450af8d0SKP Singh 		return sdata;
249450af8d0SKP Singh 
250450af8d0SKP Singh 	/* Slow path (cache miss) */
2510fe4b381SKP Singh 	hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
2520fe4b381SKP Singh 				  rcu_read_lock_trace_held())
253450af8d0SKP Singh 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
254450af8d0SKP Singh 			break;
255450af8d0SKP Singh 
256450af8d0SKP Singh 	if (!selem)
257450af8d0SKP Singh 		return NULL;
258450af8d0SKP Singh 
259450af8d0SKP Singh 	sdata = SDATA(selem);
260450af8d0SKP Singh 	if (cacheit_lockit) {
261a10787e6SSong Liu 		unsigned long flags;
262a10787e6SSong Liu 
263450af8d0SKP Singh 		/* spinlock is needed to avoid racing with the
264450af8d0SKP Singh 		 * parallel delete.  Otherwise, publishing an already
265450af8d0SKP Singh 		 * deleted sdata to the cache will become a use-after-free
266450af8d0SKP Singh 		 * problem in the next bpf_local_storage_lookup().
267450af8d0SKP Singh 		 */
268a10787e6SSong Liu 		raw_spin_lock_irqsave(&local_storage->lock, flags);
269450af8d0SKP Singh 		if (selem_linked_to_storage(selem))
270450af8d0SKP Singh 			rcu_assign_pointer(local_storage->cache[smap->cache_idx],
271450af8d0SKP Singh 					   sdata);
272a10787e6SSong Liu 		raw_spin_unlock_irqrestore(&local_storage->lock, flags);
273450af8d0SKP Singh 	}
274450af8d0SKP Singh 
275450af8d0SKP Singh 	return sdata;
276450af8d0SKP Singh }
277450af8d0SKP Singh 
278450af8d0SKP Singh static int check_flags(const struct bpf_local_storage_data *old_sdata,
279450af8d0SKP Singh 		       u64 map_flags)
280450af8d0SKP Singh {
281450af8d0SKP Singh 	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
282450af8d0SKP Singh 		/* elem already exists */
283450af8d0SKP Singh 		return -EEXIST;
284450af8d0SKP Singh 
285450af8d0SKP Singh 	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
286450af8d0SKP Singh 		/* elem doesn't exist, cannot update it */
287450af8d0SKP Singh 		return -ENOENT;
288450af8d0SKP Singh 
289450af8d0SKP Singh 	return 0;
290450af8d0SKP Singh }
291450af8d0SKP Singh 
292450af8d0SKP Singh int bpf_local_storage_alloc(void *owner,
293450af8d0SKP Singh 			    struct bpf_local_storage_map *smap,
294b00fa38aSJoanne Koong 			    struct bpf_local_storage_elem *first_selem,
295b00fa38aSJoanne Koong 			    gfp_t gfp_flags)
296450af8d0SKP Singh {
297450af8d0SKP Singh 	struct bpf_local_storage *prev_storage, *storage;
298450af8d0SKP Singh 	struct bpf_local_storage **owner_storage_ptr;
299450af8d0SKP Singh 	int err;
300450af8d0SKP Singh 
301450af8d0SKP Singh 	err = mem_charge(smap, owner, sizeof(*storage));
302450af8d0SKP Singh 	if (err)
303450af8d0SKP Singh 		return err;
304450af8d0SKP Singh 
305e9aae8beSRoman Gushchin 	storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
306b00fa38aSJoanne Koong 				  gfp_flags | __GFP_NOWARN);
307450af8d0SKP Singh 	if (!storage) {
308450af8d0SKP Singh 		err = -ENOMEM;
309450af8d0SKP Singh 		goto uncharge;
310450af8d0SKP Singh 	}
311450af8d0SKP Singh 
312450af8d0SKP Singh 	INIT_HLIST_HEAD(&storage->list);
313450af8d0SKP Singh 	raw_spin_lock_init(&storage->lock);
314450af8d0SKP Singh 	storage->owner = owner;
315450af8d0SKP Singh 
316450af8d0SKP Singh 	bpf_selem_link_storage_nolock(storage, first_selem);
317450af8d0SKP Singh 	bpf_selem_link_map(smap, first_selem);
318450af8d0SKP Singh 
319450af8d0SKP Singh 	owner_storage_ptr =
320450af8d0SKP Singh 		(struct bpf_local_storage **)owner_storage(smap, owner);
321450af8d0SKP Singh 	/* Publish storage to the owner.
322450af8d0SKP Singh 	 * Instead of using any lock of the kernel object (i.e. owner),
323450af8d0SKP Singh 	 * cmpxchg will work with any kernel object regardless what
324450af8d0SKP Singh 	 * the running context is, bh, irq...etc.
325450af8d0SKP Singh 	 *
326450af8d0SKP Singh 	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
327450af8d0SKP Singh 	 * is protected by the storage->lock.  Hence, when freeing
328450af8d0SKP Singh 	 * the owner->storage, the storage->lock must be held before
329450af8d0SKP Singh 	 * setting owner->storage ptr to NULL.
330450af8d0SKP Singh 	 */
331450af8d0SKP Singh 	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
332450af8d0SKP Singh 	if (unlikely(prev_storage)) {
333450af8d0SKP Singh 		bpf_selem_unlink_map(first_selem);
334450af8d0SKP Singh 		err = -EAGAIN;
335450af8d0SKP Singh 		goto uncharge;
336450af8d0SKP Singh 
337450af8d0SKP Singh 		/* Note that even first_selem was linked to smap's
338450af8d0SKP Singh 		 * bucket->list, first_selem can be freed immediately
339450af8d0SKP Singh 		 * (instead of kfree_rcu) because
340450af8d0SKP Singh 		 * bpf_local_storage_map_free() does a
3410fe4b381SKP Singh 		 * synchronize_rcu_mult (waiting for both sleepable and
3420fe4b381SKP Singh 		 * normal programs) before walking the bucket->list.
343450af8d0SKP Singh 		 * Hence, no one is accessing selem from the
344450af8d0SKP Singh 		 * bucket->list under rcu_read_lock().
345450af8d0SKP Singh 		 */
346450af8d0SKP Singh 	}
347450af8d0SKP Singh 
348450af8d0SKP Singh 	return 0;
349450af8d0SKP Singh 
350450af8d0SKP Singh uncharge:
351450af8d0SKP Singh 	kfree(storage);
352450af8d0SKP Singh 	mem_uncharge(smap, owner, sizeof(*storage));
353450af8d0SKP Singh 	return err;
354450af8d0SKP Singh }
355450af8d0SKP Singh 
356450af8d0SKP Singh /* sk cannot be going away because it is linking new elem
357450af8d0SKP Singh  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
358450af8d0SKP Singh  * Otherwise, it will become a leak (and other memory issues
359450af8d0SKP Singh  * during map destruction).
360450af8d0SKP Singh  */
361450af8d0SKP Singh struct bpf_local_storage_data *
362450af8d0SKP Singh bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
363b00fa38aSJoanne Koong 			 void *value, u64 map_flags, gfp_t gfp_flags)
364450af8d0SKP Singh {
365450af8d0SKP Singh 	struct bpf_local_storage_data *old_sdata = NULL;
366b00fa38aSJoanne Koong 	struct bpf_local_storage_elem *selem = NULL;
367450af8d0SKP Singh 	struct bpf_local_storage *local_storage;
368a10787e6SSong Liu 	unsigned long flags;
369450af8d0SKP Singh 	int err;
370450af8d0SKP Singh 
371450af8d0SKP Singh 	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
372450af8d0SKP Singh 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
373450af8d0SKP Singh 	    /* BPF_F_LOCK can only be used in a value with spin_lock */
374450af8d0SKP Singh 	    unlikely((map_flags & BPF_F_LOCK) &&
375450af8d0SKP Singh 		     !map_value_has_spin_lock(&smap->map)))
376450af8d0SKP Singh 		return ERR_PTR(-EINVAL);
377450af8d0SKP Singh 
378b00fa38aSJoanne Koong 	if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
379b00fa38aSJoanne Koong 		return ERR_PTR(-EINVAL);
380b00fa38aSJoanne Koong 
3810fe4b381SKP Singh 	local_storage = rcu_dereference_check(*owner_storage(smap, owner),
3820fe4b381SKP Singh 					      bpf_rcu_lock_held());
383450af8d0SKP Singh 	if (!local_storage || hlist_empty(&local_storage->list)) {
384450af8d0SKP Singh 		/* Very first elem for the owner */
385450af8d0SKP Singh 		err = check_flags(NULL, map_flags);
386450af8d0SKP Singh 		if (err)
387450af8d0SKP Singh 			return ERR_PTR(err);
388450af8d0SKP Singh 
389b00fa38aSJoanne Koong 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
390450af8d0SKP Singh 		if (!selem)
391450af8d0SKP Singh 			return ERR_PTR(-ENOMEM);
392450af8d0SKP Singh 
393b00fa38aSJoanne Koong 		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
394450af8d0SKP Singh 		if (err) {
395450af8d0SKP Singh 			kfree(selem);
396450af8d0SKP Singh 			mem_uncharge(smap, owner, smap->elem_size);
397450af8d0SKP Singh 			return ERR_PTR(err);
398450af8d0SKP Singh 		}
399450af8d0SKP Singh 
400450af8d0SKP Singh 		return SDATA(selem);
401450af8d0SKP Singh 	}
402450af8d0SKP Singh 
403450af8d0SKP Singh 	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
404450af8d0SKP Singh 		/* Hoping to find an old_sdata to do inline update
405450af8d0SKP Singh 		 * such that it can avoid taking the local_storage->lock
406450af8d0SKP Singh 		 * and changing the lists.
407450af8d0SKP Singh 		 */
408450af8d0SKP Singh 		old_sdata =
409450af8d0SKP Singh 			bpf_local_storage_lookup(local_storage, smap, false);
410450af8d0SKP Singh 		err = check_flags(old_sdata, map_flags);
411450af8d0SKP Singh 		if (err)
412450af8d0SKP Singh 			return ERR_PTR(err);
413450af8d0SKP Singh 		if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
414450af8d0SKP Singh 			copy_map_value_locked(&smap->map, old_sdata->data,
415450af8d0SKP Singh 					      value, false);
416450af8d0SKP Singh 			return old_sdata;
417450af8d0SKP Singh 		}
418450af8d0SKP Singh 	}
419450af8d0SKP Singh 
420b00fa38aSJoanne Koong 	if (gfp_flags == GFP_KERNEL) {
421b00fa38aSJoanne Koong 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
422b00fa38aSJoanne Koong 		if (!selem)
423b00fa38aSJoanne Koong 			return ERR_PTR(-ENOMEM);
424b00fa38aSJoanne Koong 	}
425b00fa38aSJoanne Koong 
426a10787e6SSong Liu 	raw_spin_lock_irqsave(&local_storage->lock, flags);
427450af8d0SKP Singh 
428450af8d0SKP Singh 	/* Recheck local_storage->list under local_storage->lock */
429450af8d0SKP Singh 	if (unlikely(hlist_empty(&local_storage->list))) {
430450af8d0SKP Singh 		/* A parallel del is happening and local_storage is going
431450af8d0SKP Singh 		 * away.  It has just been checked before, so very
432450af8d0SKP Singh 		 * unlikely.  Return instead of retry to keep things
433450af8d0SKP Singh 		 * simple.
434450af8d0SKP Singh 		 */
435450af8d0SKP Singh 		err = -EAGAIN;
436450af8d0SKP Singh 		goto unlock_err;
437450af8d0SKP Singh 	}
438450af8d0SKP Singh 
439450af8d0SKP Singh 	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
440450af8d0SKP Singh 	err = check_flags(old_sdata, map_flags);
441450af8d0SKP Singh 	if (err)
442450af8d0SKP Singh 		goto unlock_err;
443450af8d0SKP Singh 
444450af8d0SKP Singh 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
445450af8d0SKP Singh 		copy_map_value_locked(&smap->map, old_sdata->data, value,
446450af8d0SKP Singh 				      false);
447450af8d0SKP Singh 		selem = SELEM(old_sdata);
448450af8d0SKP Singh 		goto unlock;
449450af8d0SKP Singh 	}
450450af8d0SKP Singh 
451b00fa38aSJoanne Koong 	if (gfp_flags != GFP_KERNEL) {
452450af8d0SKP Singh 		/* local_storage->lock is held.  Hence, we are sure
453450af8d0SKP Singh 		 * we can unlink and uncharge the old_sdata successfully
454450af8d0SKP Singh 		 * later.  Hence, instead of charging the new selem now
455450af8d0SKP Singh 		 * and then uncharge the old selem later (which may cause
456450af8d0SKP Singh 		 * a potential but unnecessary charge failure),  avoid taking
457450af8d0SKP Singh 		 * a charge at all here (the "!old_sdata" check) and the
458450af8d0SKP Singh 		 * old_sdata will not be uncharged later during
459450af8d0SKP Singh 		 * bpf_selem_unlink_storage_nolock().
460450af8d0SKP Singh 		 */
461b00fa38aSJoanne Koong 		selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
462450af8d0SKP Singh 		if (!selem) {
463450af8d0SKP Singh 			err = -ENOMEM;
464450af8d0SKP Singh 			goto unlock_err;
465450af8d0SKP Singh 		}
466b00fa38aSJoanne Koong 	}
467450af8d0SKP Singh 
468450af8d0SKP Singh 	/* First, link the new selem to the map */
469450af8d0SKP Singh 	bpf_selem_link_map(smap, selem);
470450af8d0SKP Singh 
471450af8d0SKP Singh 	/* Second, link (and publish) the new selem to local_storage */
472450af8d0SKP Singh 	bpf_selem_link_storage_nolock(local_storage, selem);
473450af8d0SKP Singh 
474450af8d0SKP Singh 	/* Third, remove old selem, SELEM(old_sdata) */
475450af8d0SKP Singh 	if (old_sdata) {
476450af8d0SKP Singh 		bpf_selem_unlink_map(SELEM(old_sdata));
477450af8d0SKP Singh 		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
478dcf456c9SKP Singh 						false, true);
479450af8d0SKP Singh 	}
480450af8d0SKP Singh 
481450af8d0SKP Singh unlock:
482a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
483450af8d0SKP Singh 	return SDATA(selem);
484450af8d0SKP Singh 
485450af8d0SKP Singh unlock_err:
486a10787e6SSong Liu 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
487b00fa38aSJoanne Koong 	if (selem) {
488b00fa38aSJoanne Koong 		mem_uncharge(smap, owner, smap->elem_size);
489b00fa38aSJoanne Koong 		kfree(selem);
490b00fa38aSJoanne Koong 	}
491450af8d0SKP Singh 	return ERR_PTR(err);
492450af8d0SKP Singh }
493450af8d0SKP Singh 
494450af8d0SKP Singh u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
495450af8d0SKP Singh {
496450af8d0SKP Singh 	u64 min_usage = U64_MAX;
497450af8d0SKP Singh 	u16 i, res = 0;
498450af8d0SKP Singh 
499450af8d0SKP Singh 	spin_lock(&cache->idx_lock);
500450af8d0SKP Singh 
501450af8d0SKP Singh 	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
502450af8d0SKP Singh 		if (cache->idx_usage_counts[i] < min_usage) {
503450af8d0SKP Singh 			min_usage = cache->idx_usage_counts[i];
504450af8d0SKP Singh 			res = i;
505450af8d0SKP Singh 
506450af8d0SKP Singh 			/* Found a free cache_idx */
507450af8d0SKP Singh 			if (!min_usage)
508450af8d0SKP Singh 				break;
509450af8d0SKP Singh 		}
510450af8d0SKP Singh 	}
511450af8d0SKP Singh 	cache->idx_usage_counts[res]++;
512450af8d0SKP Singh 
513450af8d0SKP Singh 	spin_unlock(&cache->idx_lock);
514450af8d0SKP Singh 
515450af8d0SKP Singh 	return res;
516450af8d0SKP Singh }
517450af8d0SKP Singh 
518450af8d0SKP Singh void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
519450af8d0SKP Singh 				      u16 idx)
520450af8d0SKP Singh {
521450af8d0SKP Singh 	spin_lock(&cache->idx_lock);
522450af8d0SKP Singh 	cache->idx_usage_counts[idx]--;
523450af8d0SKP Singh 	spin_unlock(&cache->idx_lock);
524450af8d0SKP Singh }
525450af8d0SKP Singh 
526bc235cdbSSong Liu void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
527bc235cdbSSong Liu 				int __percpu *busy_counter)
528450af8d0SKP Singh {
529450af8d0SKP Singh 	struct bpf_local_storage_elem *selem;
530450af8d0SKP Singh 	struct bpf_local_storage_map_bucket *b;
531450af8d0SKP Singh 	unsigned int i;
532450af8d0SKP Singh 
533450af8d0SKP Singh 	/* Note that this map might be concurrently cloned from
534450af8d0SKP Singh 	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
535450af8d0SKP Singh 	 * RCU read section to finish before proceeding. New RCU
536450af8d0SKP Singh 	 * read sections should be prevented via bpf_map_inc_not_zero.
537450af8d0SKP Singh 	 */
538450af8d0SKP Singh 	synchronize_rcu();
539450af8d0SKP Singh 
540450af8d0SKP Singh 	/* bpf prog and the userspace can no longer access this map
541450af8d0SKP Singh 	 * now.  No new selem (of this map) can be added
542450af8d0SKP Singh 	 * to the owner->storage or to the map bucket's list.
543450af8d0SKP Singh 	 *
544450af8d0SKP Singh 	 * The elem of this map can be cleaned up here
545450af8d0SKP Singh 	 * or when the storage is freed e.g.
546450af8d0SKP Singh 	 * by bpf_sk_storage_free() during __sk_destruct().
547450af8d0SKP Singh 	 */
548450af8d0SKP Singh 	for (i = 0; i < (1U << smap->bucket_log); i++) {
549450af8d0SKP Singh 		b = &smap->buckets[i];
550450af8d0SKP Singh 
551450af8d0SKP Singh 		rcu_read_lock();
552450af8d0SKP Singh 		/* No one is adding to b->list now */
553450af8d0SKP Singh 		while ((selem = hlist_entry_safe(
554450af8d0SKP Singh 				rcu_dereference_raw(hlist_first_rcu(&b->list)),
555450af8d0SKP Singh 				struct bpf_local_storage_elem, map_node))) {
556bc235cdbSSong Liu 			if (busy_counter) {
557bc235cdbSSong Liu 				migrate_disable();
558bc235cdbSSong Liu 				__this_cpu_inc(*busy_counter);
559bc235cdbSSong Liu 			}
560dcf456c9SKP Singh 			bpf_selem_unlink(selem, false);
561bc235cdbSSong Liu 			if (busy_counter) {
562bc235cdbSSong Liu 				__this_cpu_dec(*busy_counter);
563bc235cdbSSong Liu 				migrate_enable();
564bc235cdbSSong Liu 			}
565450af8d0SKP Singh 			cond_resched_rcu();
566450af8d0SKP Singh 		}
567450af8d0SKP Singh 		rcu_read_unlock();
568450af8d0SKP Singh 	}
569450af8d0SKP Singh 
570450af8d0SKP Singh 	/* While freeing the storage we may still need to access the map.
571450af8d0SKP Singh 	 *
572450af8d0SKP Singh 	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
573450af8d0SKP Singh 	 * which then made the above while((selem = ...)) loop
574450af8d0SKP Singh 	 * exit immediately.
575450af8d0SKP Singh 	 *
576450af8d0SKP Singh 	 * However, while freeing the storage one still needs to access the
577450af8d0SKP Singh 	 * smap->elem_size to do the uncharging in
578450af8d0SKP Singh 	 * bpf_selem_unlink_storage_nolock().
579450af8d0SKP Singh 	 *
580450af8d0SKP Singh 	 * Hence, wait another rcu grace period for the storage to be freed.
581450af8d0SKP Singh 	 */
582450af8d0SKP Singh 	synchronize_rcu();
583450af8d0SKP Singh 
584450af8d0SKP Singh 	kvfree(smap->buckets);
585*73cf09a3SYafang Shao 	bpf_map_area_free(smap);
586450af8d0SKP Singh }
587450af8d0SKP Singh 
588450af8d0SKP Singh int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
589450af8d0SKP Singh {
590450af8d0SKP Singh 	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
591450af8d0SKP Singh 	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
592450af8d0SKP Singh 	    attr->max_entries ||
593450af8d0SKP Singh 	    attr->key_size != sizeof(int) || !attr->value_size ||
594450af8d0SKP Singh 	    /* Enforce BTF for userspace sk dumping */
595450af8d0SKP Singh 	    !attr->btf_key_type_id || !attr->btf_value_type_id)
596450af8d0SKP Singh 		return -EINVAL;
597450af8d0SKP Singh 
598450af8d0SKP Singh 	if (!bpf_capable())
599450af8d0SKP Singh 		return -EPERM;
600450af8d0SKP Singh 
601450af8d0SKP Singh 	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
602450af8d0SKP Singh 		return -E2BIG;
603450af8d0SKP Singh 
604450af8d0SKP Singh 	return 0;
605450af8d0SKP Singh }
606450af8d0SKP Singh 
607450af8d0SKP Singh struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
608450af8d0SKP Singh {
609450af8d0SKP Singh 	struct bpf_local_storage_map *smap;
610450af8d0SKP Singh 	unsigned int i;
611450af8d0SKP Singh 	u32 nbuckets;
612450af8d0SKP Singh 
613*73cf09a3SYafang Shao 	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
614450af8d0SKP Singh 	if (!smap)
615450af8d0SKP Singh 		return ERR_PTR(-ENOMEM);
616450af8d0SKP Singh 	bpf_map_init_from_attr(&smap->map, attr);
617450af8d0SKP Singh 
618450af8d0SKP Singh 	nbuckets = roundup_pow_of_two(num_possible_cpus());
619450af8d0SKP Singh 	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
620450af8d0SKP Singh 	nbuckets = max_t(u32, 2, nbuckets);
621450af8d0SKP Singh 	smap->bucket_log = ilog2(nbuckets);
622450af8d0SKP Singh 
623450af8d0SKP Singh 	smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
624e9aae8beSRoman Gushchin 				 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
625450af8d0SKP Singh 	if (!smap->buckets) {
626*73cf09a3SYafang Shao 		bpf_map_area_free(smap);
627450af8d0SKP Singh 		return ERR_PTR(-ENOMEM);
628450af8d0SKP Singh 	}
629450af8d0SKP Singh 
630450af8d0SKP Singh 	for (i = 0; i < nbuckets; i++) {
631450af8d0SKP Singh 		INIT_HLIST_HEAD(&smap->buckets[i].list);
632450af8d0SKP Singh 		raw_spin_lock_init(&smap->buckets[i].lock);
633450af8d0SKP Singh 	}
634450af8d0SKP Singh 
635450af8d0SKP Singh 	smap->elem_size =
636450af8d0SKP Singh 		sizeof(struct bpf_local_storage_elem) + attr->value_size;
637450af8d0SKP Singh 
638450af8d0SKP Singh 	return smap;
639450af8d0SKP Singh }
640450af8d0SKP Singh 
641450af8d0SKP Singh int bpf_local_storage_map_check_btf(const struct bpf_map *map,
642450af8d0SKP Singh 				    const struct btf *btf,
643450af8d0SKP Singh 				    const struct btf_type *key_type,
644450af8d0SKP Singh 				    const struct btf_type *value_type)
645450af8d0SKP Singh {
646450af8d0SKP Singh 	u32 int_data;
647450af8d0SKP Singh 
648450af8d0SKP Singh 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
649450af8d0SKP Singh 		return -EINVAL;
650450af8d0SKP Singh 
651450af8d0SKP Singh 	int_data = *(u32 *)(key_type + 1);
652450af8d0SKP Singh 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
653450af8d0SKP Singh 		return -EINVAL;
654450af8d0SKP Singh 
655450af8d0SKP Singh 	return 0;
656450af8d0SKP Singh }
657