xref: /openbmc/linux/net/core/bpf_sk_storage.c (revision 911b8eac)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/bpf_sk_storage.h>
12 #include <net/sock.h>
13 #include <uapi/linux/sock_diag.h>
14 #include <uapi/linux/btf.h>
15 
16 DEFINE_BPF_STORAGE_CACHE(sk_cache);
17 
18 static int omem_charge(struct sock *sk, unsigned int size)
19 {
20 	/* same check as in sock_kmalloc() */
21 	if (size <= sysctl_optmem_max &&
22 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
23 		atomic_add(size, &sk->sk_omem_alloc);
24 		return 0;
25 	}
26 
27 	return -ENOMEM;
28 }
29 
30 static struct bpf_local_storage_data *
31 sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
32 {
33 	struct bpf_local_storage *sk_storage;
34 	struct bpf_local_storage_map *smap;
35 
36 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
37 	if (!sk_storage)
38 		return NULL;
39 
40 	smap = (struct bpf_local_storage_map *)map;
41 	return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
42 }
43 
44 static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
45 {
46 	struct bpf_local_storage_data *sdata;
47 
48 	sdata = sk_storage_lookup(sk, map, false);
49 	if (!sdata)
50 		return -ENOENT;
51 
52 	bpf_selem_unlink(SELEM(sdata));
53 
54 	return 0;
55 }
56 
57 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
58 void bpf_sk_storage_free(struct sock *sk)
59 {
60 	struct bpf_local_storage_elem *selem;
61 	struct bpf_local_storage *sk_storage;
62 	bool free_sk_storage = false;
63 	struct hlist_node *n;
64 
65 	rcu_read_lock();
66 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
67 	if (!sk_storage) {
68 		rcu_read_unlock();
69 		return;
70 	}
71 
72 	/* Netiher the bpf_prog nor the bpf-map's syscall
73 	 * could be modifying the sk_storage->list now.
74 	 * Thus, no elem can be added-to or deleted-from the
75 	 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
76 	 *
77 	 * It is racing with bpf_local_storage_map_free() alone
78 	 * when unlinking elem from the sk_storage->list and
79 	 * the map's bucket->list.
80 	 */
81 	raw_spin_lock_bh(&sk_storage->lock);
82 	hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
83 		/* Always unlink from map before unlinking from
84 		 * sk_storage.
85 		 */
86 		bpf_selem_unlink_map(selem);
87 		free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
88 								  selem, true);
89 	}
90 	raw_spin_unlock_bh(&sk_storage->lock);
91 	rcu_read_unlock();
92 
93 	if (free_sk_storage)
94 		kfree_rcu(sk_storage, rcu);
95 }
96 
97 static void sk_storage_map_free(struct bpf_map *map)
98 {
99 	struct bpf_local_storage_map *smap;
100 
101 	smap = (struct bpf_local_storage_map *)map;
102 	bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
103 	bpf_local_storage_map_free(smap);
104 }
105 
106 static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr)
107 {
108 	struct bpf_local_storage_map *smap;
109 
110 	smap = bpf_local_storage_map_alloc(attr);
111 	if (IS_ERR(smap))
112 		return ERR_CAST(smap);
113 
114 	smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
115 	return &smap->map;
116 }
117 
118 static int notsupp_get_next_key(struct bpf_map *map, void *key,
119 				void *next_key)
120 {
121 	return -ENOTSUPP;
122 }
123 
124 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
125 {
126 	struct bpf_local_storage_data *sdata;
127 	struct socket *sock;
128 	int fd, err;
129 
130 	fd = *(int *)key;
131 	sock = sockfd_lookup(fd, &err);
132 	if (sock) {
133 		sdata = sk_storage_lookup(sock->sk, map, true);
134 		sockfd_put(sock);
135 		return sdata ? sdata->data : NULL;
136 	}
137 
138 	return ERR_PTR(err);
139 }
140 
141 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
142 					 void *value, u64 map_flags)
143 {
144 	struct bpf_local_storage_data *sdata;
145 	struct socket *sock;
146 	int fd, err;
147 
148 	fd = *(int *)key;
149 	sock = sockfd_lookup(fd, &err);
150 	if (sock) {
151 		sdata = bpf_local_storage_update(
152 			sock->sk, (struct bpf_local_storage_map *)map, value,
153 			map_flags);
154 		sockfd_put(sock);
155 		return PTR_ERR_OR_ZERO(sdata);
156 	}
157 
158 	return err;
159 }
160 
161 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
162 {
163 	struct socket *sock;
164 	int fd, err;
165 
166 	fd = *(int *)key;
167 	sock = sockfd_lookup(fd, &err);
168 	if (sock) {
169 		err = sk_storage_delete(sock->sk, map);
170 		sockfd_put(sock);
171 		return err;
172 	}
173 
174 	return err;
175 }
176 
177 static struct bpf_local_storage_elem *
178 bpf_sk_storage_clone_elem(struct sock *newsk,
179 			  struct bpf_local_storage_map *smap,
180 			  struct bpf_local_storage_elem *selem)
181 {
182 	struct bpf_local_storage_elem *copy_selem;
183 
184 	copy_selem = bpf_selem_alloc(smap, newsk, NULL, true);
185 	if (!copy_selem)
186 		return NULL;
187 
188 	if (map_value_has_spin_lock(&smap->map))
189 		copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
190 				      SDATA(selem)->data, true);
191 	else
192 		copy_map_value(&smap->map, SDATA(copy_selem)->data,
193 			       SDATA(selem)->data);
194 
195 	return copy_selem;
196 }
197 
198 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
199 {
200 	struct bpf_local_storage *new_sk_storage = NULL;
201 	struct bpf_local_storage *sk_storage;
202 	struct bpf_local_storage_elem *selem;
203 	int ret = 0;
204 
205 	RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
206 
207 	rcu_read_lock();
208 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
209 
210 	if (!sk_storage || hlist_empty(&sk_storage->list))
211 		goto out;
212 
213 	hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
214 		struct bpf_local_storage_elem *copy_selem;
215 		struct bpf_local_storage_map *smap;
216 		struct bpf_map *map;
217 
218 		smap = rcu_dereference(SDATA(selem)->smap);
219 		if (!(smap->map.map_flags & BPF_F_CLONE))
220 			continue;
221 
222 		/* Note that for lockless listeners adding new element
223 		 * here can race with cleanup in bpf_local_storage_map_free.
224 		 * Try to grab map refcnt to make sure that it's still
225 		 * alive and prevent concurrent removal.
226 		 */
227 		map = bpf_map_inc_not_zero(&smap->map);
228 		if (IS_ERR(map))
229 			continue;
230 
231 		copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
232 		if (!copy_selem) {
233 			ret = -ENOMEM;
234 			bpf_map_put(map);
235 			goto out;
236 		}
237 
238 		if (new_sk_storage) {
239 			bpf_selem_link_map(smap, copy_selem);
240 			bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
241 		} else {
242 			ret = bpf_local_storage_alloc(newsk, smap, copy_selem);
243 			if (ret) {
244 				kfree(copy_selem);
245 				atomic_sub(smap->elem_size,
246 					   &newsk->sk_omem_alloc);
247 				bpf_map_put(map);
248 				goto out;
249 			}
250 
251 			new_sk_storage =
252 				rcu_dereference(copy_selem->local_storage);
253 		}
254 		bpf_map_put(map);
255 	}
256 
257 out:
258 	rcu_read_unlock();
259 
260 	/* In case of an error, don't free anything explicitly here, the
261 	 * caller is responsible to call bpf_sk_storage_free.
262 	 */
263 
264 	return ret;
265 }
266 
267 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
268 	   void *, value, u64, flags)
269 {
270 	struct bpf_local_storage_data *sdata;
271 
272 	if (flags > BPF_SK_STORAGE_GET_F_CREATE)
273 		return (unsigned long)NULL;
274 
275 	sdata = sk_storage_lookup(sk, map, true);
276 	if (sdata)
277 		return (unsigned long)sdata->data;
278 
279 	if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
280 	    /* Cannot add new elem to a going away sk.
281 	     * Otherwise, the new elem may become a leak
282 	     * (and also other memory issues during map
283 	     *  destruction).
284 	     */
285 	    refcount_inc_not_zero(&sk->sk_refcnt)) {
286 		sdata = bpf_local_storage_update(
287 			sk, (struct bpf_local_storage_map *)map, value,
288 			BPF_NOEXIST);
289 		/* sk must be a fullsock (guaranteed by verifier),
290 		 * so sock_gen_put() is unnecessary.
291 		 */
292 		sock_put(sk);
293 		return IS_ERR(sdata) ?
294 			(unsigned long)NULL : (unsigned long)sdata->data;
295 	}
296 
297 	return (unsigned long)NULL;
298 }
299 
300 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
301 {
302 	if (refcount_inc_not_zero(&sk->sk_refcnt)) {
303 		int err;
304 
305 		err = sk_storage_delete(sk, map);
306 		sock_put(sk);
307 		return err;
308 	}
309 
310 	return -ENOENT;
311 }
312 
313 static int sk_storage_charge(struct bpf_local_storage_map *smap,
314 			     void *owner, u32 size)
315 {
316 	return omem_charge(owner, size);
317 }
318 
319 static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
320 				void *owner, u32 size)
321 {
322 	struct sock *sk = owner;
323 
324 	atomic_sub(size, &sk->sk_omem_alloc);
325 }
326 
327 static struct bpf_local_storage __rcu **
328 sk_storage_ptr(void *owner)
329 {
330 	struct sock *sk = owner;
331 
332 	return &sk->sk_bpf_storage;
333 }
334 
335 static int sk_storage_map_btf_id;
336 const struct bpf_map_ops sk_storage_map_ops = {
337 	.map_meta_equal = bpf_map_meta_equal,
338 	.map_alloc_check = bpf_local_storage_map_alloc_check,
339 	.map_alloc = sk_storage_map_alloc,
340 	.map_free = sk_storage_map_free,
341 	.map_get_next_key = notsupp_get_next_key,
342 	.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
343 	.map_update_elem = bpf_fd_sk_storage_update_elem,
344 	.map_delete_elem = bpf_fd_sk_storage_delete_elem,
345 	.map_check_btf = bpf_local_storage_map_check_btf,
346 	.map_btf_name = "bpf_local_storage_map",
347 	.map_btf_id = &sk_storage_map_btf_id,
348 	.map_local_storage_charge = sk_storage_charge,
349 	.map_local_storage_uncharge = sk_storage_uncharge,
350 	.map_owner_storage_ptr = sk_storage_ptr,
351 };
352 
353 const struct bpf_func_proto bpf_sk_storage_get_proto = {
354 	.func		= bpf_sk_storage_get,
355 	.gpl_only	= false,
356 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
357 	.arg1_type	= ARG_CONST_MAP_PTR,
358 	.arg2_type	= ARG_PTR_TO_SOCKET,
359 	.arg3_type	= ARG_PTR_TO_MAP_VALUE_OR_NULL,
360 	.arg4_type	= ARG_ANYTHING,
361 };
362 
363 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
364 	.func		= bpf_sk_storage_get,
365 	.gpl_only	= false,
366 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
367 	.arg1_type	= ARG_CONST_MAP_PTR,
368 	.arg2_type	= ARG_PTR_TO_CTX, /* context is 'struct sock' */
369 	.arg3_type	= ARG_PTR_TO_MAP_VALUE_OR_NULL,
370 	.arg4_type	= ARG_ANYTHING,
371 };
372 
373 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
374 	.func		= bpf_sk_storage_delete,
375 	.gpl_only	= false,
376 	.ret_type	= RET_INTEGER,
377 	.arg1_type	= ARG_CONST_MAP_PTR,
378 	.arg2_type	= ARG_PTR_TO_SOCKET,
379 };
380 
381 const struct bpf_func_proto sk_storage_get_btf_proto = {
382 	.func		= bpf_sk_storage_get,
383 	.gpl_only	= false,
384 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
385 	.arg1_type	= ARG_CONST_MAP_PTR,
386 	.arg2_type	= ARG_PTR_TO_BTF_ID,
387 	.arg2_btf_id	= &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
388 	.arg3_type	= ARG_PTR_TO_MAP_VALUE_OR_NULL,
389 	.arg4_type	= ARG_ANYTHING,
390 };
391 
392 const struct bpf_func_proto sk_storage_delete_btf_proto = {
393 	.func		= bpf_sk_storage_delete,
394 	.gpl_only	= false,
395 	.ret_type	= RET_INTEGER,
396 	.arg1_type	= ARG_CONST_MAP_PTR,
397 	.arg2_type	= ARG_PTR_TO_BTF_ID,
398 	.arg2_btf_id	= &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
399 };
400 
401 struct bpf_sk_storage_diag {
402 	u32 nr_maps;
403 	struct bpf_map *maps[];
404 };
405 
406 /* The reply will be like:
407  * INET_DIAG_BPF_SK_STORAGES (nla_nest)
408  *	SK_DIAG_BPF_STORAGE (nla_nest)
409  *		SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
410  *		SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
411  *	SK_DIAG_BPF_STORAGE (nla_nest)
412  *		SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
413  *		SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
414  *	....
415  */
416 static int nla_value_size(u32 value_size)
417 {
418 	/* SK_DIAG_BPF_STORAGE (nla_nest)
419 	 *	SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
420 	 *	SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
421 	 */
422 	return nla_total_size(0) + nla_total_size(sizeof(u32)) +
423 		nla_total_size_64bit(value_size);
424 }
425 
426 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
427 {
428 	u32 i;
429 
430 	if (!diag)
431 		return;
432 
433 	for (i = 0; i < diag->nr_maps; i++)
434 		bpf_map_put(diag->maps[i]);
435 
436 	kfree(diag);
437 }
438 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
439 
440 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
441 			   const struct bpf_map *map)
442 {
443 	u32 i;
444 
445 	for (i = 0; i < diag->nr_maps; i++) {
446 		if (diag->maps[i] == map)
447 			return true;
448 	}
449 
450 	return false;
451 }
452 
453 struct bpf_sk_storage_diag *
454 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
455 {
456 	struct bpf_sk_storage_diag *diag;
457 	struct nlattr *nla;
458 	u32 nr_maps = 0;
459 	int rem, err;
460 
461 	/* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
462 	 * the map_alloc_check() side also does.
463 	 */
464 	if (!bpf_capable())
465 		return ERR_PTR(-EPERM);
466 
467 	nla_for_each_nested(nla, nla_stgs, rem) {
468 		if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
469 			nr_maps++;
470 	}
471 
472 	diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
473 		       GFP_KERNEL);
474 	if (!diag)
475 		return ERR_PTR(-ENOMEM);
476 
477 	nla_for_each_nested(nla, nla_stgs, rem) {
478 		struct bpf_map *map;
479 		int map_fd;
480 
481 		if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
482 			continue;
483 
484 		map_fd = nla_get_u32(nla);
485 		map = bpf_map_get(map_fd);
486 		if (IS_ERR(map)) {
487 			err = PTR_ERR(map);
488 			goto err_free;
489 		}
490 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
491 			bpf_map_put(map);
492 			err = -EINVAL;
493 			goto err_free;
494 		}
495 		if (diag_check_dup(diag, map)) {
496 			bpf_map_put(map);
497 			err = -EEXIST;
498 			goto err_free;
499 		}
500 		diag->maps[diag->nr_maps++] = map;
501 	}
502 
503 	return diag;
504 
505 err_free:
506 	bpf_sk_storage_diag_free(diag);
507 	return ERR_PTR(err);
508 }
509 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
510 
511 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
512 {
513 	struct nlattr *nla_stg, *nla_value;
514 	struct bpf_local_storage_map *smap;
515 
516 	/* It cannot exceed max nlattr's payload */
517 	BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
518 
519 	nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
520 	if (!nla_stg)
521 		return -EMSGSIZE;
522 
523 	smap = rcu_dereference(sdata->smap);
524 	if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
525 		goto errout;
526 
527 	nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
528 				      smap->map.value_size,
529 				      SK_DIAG_BPF_STORAGE_PAD);
530 	if (!nla_value)
531 		goto errout;
532 
533 	if (map_value_has_spin_lock(&smap->map))
534 		copy_map_value_locked(&smap->map, nla_data(nla_value),
535 				      sdata->data, true);
536 	else
537 		copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
538 
539 	nla_nest_end(skb, nla_stg);
540 	return 0;
541 
542 errout:
543 	nla_nest_cancel(skb, nla_stg);
544 	return -EMSGSIZE;
545 }
546 
547 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
548 				       int stg_array_type,
549 				       unsigned int *res_diag_size)
550 {
551 	/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
552 	unsigned int diag_size = nla_total_size(0);
553 	struct bpf_local_storage *sk_storage;
554 	struct bpf_local_storage_elem *selem;
555 	struct bpf_local_storage_map *smap;
556 	struct nlattr *nla_stgs;
557 	unsigned int saved_len;
558 	int err = 0;
559 
560 	rcu_read_lock();
561 
562 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
563 	if (!sk_storage || hlist_empty(&sk_storage->list)) {
564 		rcu_read_unlock();
565 		return 0;
566 	}
567 
568 	nla_stgs = nla_nest_start(skb, stg_array_type);
569 	if (!nla_stgs)
570 		/* Continue to learn diag_size */
571 		err = -EMSGSIZE;
572 
573 	saved_len = skb->len;
574 	hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
575 		smap = rcu_dereference(SDATA(selem)->smap);
576 		diag_size += nla_value_size(smap->map.value_size);
577 
578 		if (nla_stgs && diag_get(SDATA(selem), skb))
579 			/* Continue to learn diag_size */
580 			err = -EMSGSIZE;
581 	}
582 
583 	rcu_read_unlock();
584 
585 	if (nla_stgs) {
586 		if (saved_len == skb->len)
587 			nla_nest_cancel(skb, nla_stgs);
588 		else
589 			nla_nest_end(skb, nla_stgs);
590 	}
591 
592 	if (diag_size == nla_total_size(0)) {
593 		*res_diag_size = 0;
594 		return 0;
595 	}
596 
597 	*res_diag_size = diag_size;
598 	return err;
599 }
600 
601 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
602 			    struct sock *sk, struct sk_buff *skb,
603 			    int stg_array_type,
604 			    unsigned int *res_diag_size)
605 {
606 	/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
607 	unsigned int diag_size = nla_total_size(0);
608 	struct bpf_local_storage *sk_storage;
609 	struct bpf_local_storage_data *sdata;
610 	struct nlattr *nla_stgs;
611 	unsigned int saved_len;
612 	int err = 0;
613 	u32 i;
614 
615 	*res_diag_size = 0;
616 
617 	/* No map has been specified.  Dump all. */
618 	if (!diag->nr_maps)
619 		return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
620 						   res_diag_size);
621 
622 	rcu_read_lock();
623 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
624 	if (!sk_storage || hlist_empty(&sk_storage->list)) {
625 		rcu_read_unlock();
626 		return 0;
627 	}
628 
629 	nla_stgs = nla_nest_start(skb, stg_array_type);
630 	if (!nla_stgs)
631 		/* Continue to learn diag_size */
632 		err = -EMSGSIZE;
633 
634 	saved_len = skb->len;
635 	for (i = 0; i < diag->nr_maps; i++) {
636 		sdata = bpf_local_storage_lookup(sk_storage,
637 				(struct bpf_local_storage_map *)diag->maps[i],
638 				false);
639 
640 		if (!sdata)
641 			continue;
642 
643 		diag_size += nla_value_size(diag->maps[i]->value_size);
644 
645 		if (nla_stgs && diag_get(sdata, skb))
646 			/* Continue to learn diag_size */
647 			err = -EMSGSIZE;
648 	}
649 	rcu_read_unlock();
650 
651 	if (nla_stgs) {
652 		if (saved_len == skb->len)
653 			nla_nest_cancel(skb, nla_stgs);
654 		else
655 			nla_nest_end(skb, nla_stgs);
656 	}
657 
658 	if (diag_size == nla_total_size(0)) {
659 		*res_diag_size = 0;
660 		return 0;
661 	}
662 
663 	*res_diag_size = diag_size;
664 	return err;
665 }
666 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
667 
668 struct bpf_iter_seq_sk_storage_map_info {
669 	struct bpf_map *map;
670 	unsigned int bucket_id;
671 	unsigned skip_elems;
672 };
673 
674 static struct bpf_local_storage_elem *
675 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
676 				 struct bpf_local_storage_elem *prev_selem)
677 	__acquires(RCU) __releases(RCU)
678 {
679 	struct bpf_local_storage *sk_storage;
680 	struct bpf_local_storage_elem *selem;
681 	u32 skip_elems = info->skip_elems;
682 	struct bpf_local_storage_map *smap;
683 	u32 bucket_id = info->bucket_id;
684 	u32 i, count, n_buckets;
685 	struct bpf_local_storage_map_bucket *b;
686 
687 	smap = (struct bpf_local_storage_map *)info->map;
688 	n_buckets = 1U << smap->bucket_log;
689 	if (bucket_id >= n_buckets)
690 		return NULL;
691 
692 	/* try to find next selem in the same bucket */
693 	selem = prev_selem;
694 	count = 0;
695 	while (selem) {
696 		selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
697 					 struct bpf_local_storage_elem, map_node);
698 		if (!selem) {
699 			/* not found, unlock and go to the next bucket */
700 			b = &smap->buckets[bucket_id++];
701 			rcu_read_unlock();
702 			skip_elems = 0;
703 			break;
704 		}
705 		sk_storage = rcu_dereference(selem->local_storage);
706 		if (sk_storage) {
707 			info->skip_elems = skip_elems + count;
708 			return selem;
709 		}
710 		count++;
711 	}
712 
713 	for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
714 		b = &smap->buckets[i];
715 		rcu_read_lock();
716 		count = 0;
717 		hlist_for_each_entry_rcu(selem, &b->list, map_node) {
718 			sk_storage = rcu_dereference(selem->local_storage);
719 			if (sk_storage && count >= skip_elems) {
720 				info->bucket_id = i;
721 				info->skip_elems = count;
722 				return selem;
723 			}
724 			count++;
725 		}
726 		rcu_read_unlock();
727 		skip_elems = 0;
728 	}
729 
730 	info->bucket_id = i;
731 	info->skip_elems = 0;
732 	return NULL;
733 }
734 
735 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
736 {
737 	struct bpf_local_storage_elem *selem;
738 
739 	selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
740 	if (!selem)
741 		return NULL;
742 
743 	if (*pos == 0)
744 		++*pos;
745 	return selem;
746 }
747 
748 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
749 					 loff_t *pos)
750 {
751 	struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
752 
753 	++*pos;
754 	++info->skip_elems;
755 	return bpf_sk_storage_map_seq_find_next(seq->private, v);
756 }
757 
758 struct bpf_iter__bpf_sk_storage_map {
759 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
760 	__bpf_md_ptr(struct bpf_map *, map);
761 	__bpf_md_ptr(struct sock *, sk);
762 	__bpf_md_ptr(void *, value);
763 };
764 
765 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
766 		     struct bpf_map *map, struct sock *sk,
767 		     void *value)
768 
769 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
770 					 struct bpf_local_storage_elem *selem)
771 {
772 	struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
773 	struct bpf_iter__bpf_sk_storage_map ctx = {};
774 	struct bpf_local_storage *sk_storage;
775 	struct bpf_iter_meta meta;
776 	struct bpf_prog *prog;
777 	int ret = 0;
778 
779 	meta.seq = seq;
780 	prog = bpf_iter_get_info(&meta, selem == NULL);
781 	if (prog) {
782 		ctx.meta = &meta;
783 		ctx.map = info->map;
784 		if (selem) {
785 			sk_storage = rcu_dereference(selem->local_storage);
786 			ctx.sk = sk_storage->owner;
787 			ctx.value = SDATA(selem)->data;
788 		}
789 		ret = bpf_iter_run_prog(prog, &ctx);
790 	}
791 
792 	return ret;
793 }
794 
795 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
796 {
797 	return __bpf_sk_storage_map_seq_show(seq, v);
798 }
799 
800 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
801 	__releases(RCU)
802 {
803 	if (!v)
804 		(void)__bpf_sk_storage_map_seq_show(seq, v);
805 	else
806 		rcu_read_unlock();
807 }
808 
809 static int bpf_iter_init_sk_storage_map(void *priv_data,
810 					struct bpf_iter_aux_info *aux)
811 {
812 	struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
813 
814 	seq_info->map = aux->map;
815 	return 0;
816 }
817 
818 static int bpf_iter_attach_map(struct bpf_prog *prog,
819 			       union bpf_iter_link_info *linfo,
820 			       struct bpf_iter_aux_info *aux)
821 {
822 	struct bpf_map *map;
823 	int err = -EINVAL;
824 
825 	if (!linfo->map.map_fd)
826 		return -EBADF;
827 
828 	map = bpf_map_get_with_uref(linfo->map.map_fd);
829 	if (IS_ERR(map))
830 		return PTR_ERR(map);
831 
832 	if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
833 		goto put_map;
834 
835 	if (prog->aux->max_rdonly_access > map->value_size) {
836 		err = -EACCES;
837 		goto put_map;
838 	}
839 
840 	aux->map = map;
841 	return 0;
842 
843 put_map:
844 	bpf_map_put_with_uref(map);
845 	return err;
846 }
847 
848 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
849 {
850 	bpf_map_put_with_uref(aux->map);
851 }
852 
853 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
854 	.start  = bpf_sk_storage_map_seq_start,
855 	.next   = bpf_sk_storage_map_seq_next,
856 	.stop   = bpf_sk_storage_map_seq_stop,
857 	.show   = bpf_sk_storage_map_seq_show,
858 };
859 
860 static const struct bpf_iter_seq_info iter_seq_info = {
861 	.seq_ops		= &bpf_sk_storage_map_seq_ops,
862 	.init_seq_private	= bpf_iter_init_sk_storage_map,
863 	.fini_seq_private	= NULL,
864 	.seq_priv_size		= sizeof(struct bpf_iter_seq_sk_storage_map_info),
865 };
866 
867 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
868 	.target			= "bpf_sk_storage_map",
869 	.attach_target		= bpf_iter_attach_map,
870 	.detach_target		= bpf_iter_detach_map,
871 	.show_fdinfo		= bpf_iter_map_show_fdinfo,
872 	.fill_link_info		= bpf_iter_map_fill_link_info,
873 	.ctx_arg_info_size	= 2,
874 	.ctx_arg_info		= {
875 		{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
876 		  PTR_TO_BTF_ID_OR_NULL },
877 		{ offsetof(struct bpf_iter__bpf_sk_storage_map, value),
878 		  PTR_TO_RDWR_BUF_OR_NULL },
879 	},
880 	.seq_info		= &iter_seq_info,
881 };
882 
883 static int __init bpf_sk_storage_map_iter_init(void)
884 {
885 	bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
886 		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
887 	return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
888 }
889 late_initcall(bpf_sk_storage_map_iter_init);
890